aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/clang/lib')
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h107
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesManager.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesReader.cpp426
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesTypes.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesWriter.cpp199
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesYAMLCompiler.cpp173
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransAPIUses.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/AST/APValue.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTConcept.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTContext.cpp962
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp111
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTDumper.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporter.cpp402
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Availability.cpp (renamed from contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp)27
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentParser.cpp171
-rw-r--r--contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/AST/Decl.cpp304
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclBase.cpp136
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclCXX.cpp251
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclFriend.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclObjC.cpp123
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp281
-rwxr-xr-xcontrib/llvm-project/clang/lib/AST/DeclTemplate.cpp393
-rw-r--r--contrib/llvm-project/clang/lib/AST/Expr.cpp237
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprCXX.cpp123
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprClassification.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConstShared.h10
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConstant.cpp946
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/AST/FormatString.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Boolean.h15
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp111
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h9
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp3088
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h46
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp680
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h91
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Compiler.cpp5599
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Compiler.h (renamed from contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h)408
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Context.cpp169
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Context.h26
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp164
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h66
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp315
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.cpp118
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.h102
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp126
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h31
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp152
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h22
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Floating.h2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Function.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Function.h21
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h36
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Integral.h10
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h39
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp571
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Interp.h1267
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h50
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp933
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp62
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h16
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpShared.cpp42
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpShared.h26
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h4
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpState.h29
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.cpp76
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.h112
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td216
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp404
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Pointer.h456
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/PrimType.h62
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Program.cpp165
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Program.h12
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Record.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Record.h21
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Source.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/State.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp152
-rw-r--r--contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/AST/Linkage.h3
-rw-r--r--contrib/llvm-project/clang/lib/AST/Mangle.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp263
-rw-r--r--contrib/llvm-project/clang/lib/AST/NSAPI.cpp109
-rw-r--r--contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/AST/ODRHash.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/AST/OpenACCClause.cpp552
-rw-r--r--contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/ParentMap.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp95
-rw-r--r--contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/SelectorLocationsKind.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/Stmt.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtOpenACC.cpp125
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp66
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtProfile.cpp210
-rw-r--r--contrib/llvm-project/clang/lib/AST/TemplateBase.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/AST/TemplateName.cpp106
-rw-r--r--contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp321
-rw-r--r--contrib/llvm-project/clang/lib/AST/Type.cpp664
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypeLoc.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypePrinter.cpp179
-rw-r--r--contrib/llvm-project/clang/lib/AST/VTTBuilder.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp105
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h4
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CFG.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp168
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ASTOps.cpp287
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/AdornedCFG.cpp183
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/CNFFormula.cpp303
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp121
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp73
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp991
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp129
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css10
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html1
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp279
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp97
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp361
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp261
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Value.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp482
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/MacroExpansionContext.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ObjCNoReturn.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp1173
-rw-r--r--contrib/llvm-project/clang/lib/Basic/ASTSourceDescriptor.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Attributes.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Builtins.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Basic/CharInfo.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Cuda.cpp111
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Basic/FileManager.cpp104
-rw-r--r--contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp39
-rw-r--r--contrib/llvm-project/clang/lib/Basic/LangOptions.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/LangStandards.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Module.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp379
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Sarif.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Basic/SourceManager.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp290
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h24
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp75
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h14
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.h21
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AVR.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/BPF.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/BPF.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/M68k.h5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Mips.h42
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp163
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h25
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp145
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.h59
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp75
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h66
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h16
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/VE.h1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp197
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h21
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp126
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.h40
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/CIR/Dialect/IR/CIRDialect.cpp (renamed from contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.cpp)13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp62
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h24
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp76
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h23
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Address.h229
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/BackendConsumer.h17
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp113
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp499
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp78
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h3
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h247
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp1639
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp110
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h31
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp819
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.h28
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp137
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp186
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h72
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp166
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp371
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h39
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp182
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGException.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp703
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp355
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp228
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp161
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp492
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp595
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h35
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h7
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp92
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp89
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp142
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp495
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h25
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp1575
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h3
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGPointerAuth.cpp621
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGPointerAuthInfo.h99
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp513
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp332
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp498
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp119
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTables.h6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGValue.h272
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp168
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp271
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h576
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp785
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h113
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp266
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h20
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp151
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h17
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp85
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h33
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h12
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp637
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h19
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp528
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.h3
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MCDCState.h49
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h3
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp120
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h29
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp299
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp40
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp115
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp42
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp97
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Distro.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Driver.cpp416
-rw-r--r--contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp237
-rw-r--r--contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChain.cpp129
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp118
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp68
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp77
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp90
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp974
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h15
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp500
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h19
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp232
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h17
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp233
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h19
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp121
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp89
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp265
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp66
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp48
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp58
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h5
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp182
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h27
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp108
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Types.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/API.cpp551
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp398
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp119
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp988
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Format/BreakableToken.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Format/BreakableToken.h3
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp95
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h7
-rw-r--r--contrib/llvm-project/clang/lib/Format/Encoding.h1
-rw-r--r--contrib/llvm-project/clang/lib/Format/Format.cpp317
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatInternal.h4
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.h92
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp59
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h5
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenSource.h14
-rw-r--r--contrib/llvm-project/clang/lib/Format/MacroCallReconstructor.cpp68
-rw-r--r--contrib/llvm-project/clang/lib/Format/MacroExpander.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Format/Macros.h40
-rw-r--r--contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h18
-rw-r--r--contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.h3
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h12
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp1032
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.h13
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp65
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.h2
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp436
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h18
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp140
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.h21
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp148
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp283
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp62
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp269
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp42
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp133
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp213
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/TextDiagnosticPrinter.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stdarg_header_macro.h12
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stddef_header_macro.h12
-rw-r--r--contrib/llvm-project/clang/lib/Headers/arm_acle.h36
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512erintrin.h271
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h76
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512pfintrin.h92
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avxintrin.h102
-rw-r--r--contrib/llvm-project/clang/lib/Headers/bmiintrin.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/builtins.h3
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cpuid.h26
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cuda_wrappers/algorithm2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/emmintrin.h471
-rw-r--r--contrib/llvm-project/clang/lib/Headers/float.h28
-rw-r--r--contrib/llvm-project/clang/lib/Headers/fmaintrin.h48
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h13
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h1341
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ia32intrin.h72
-rw-r--r--contrib/llvm-project/clang/lib/Headers/immintrin.h244
-rw-r--r--contrib/llvm-project/clang/lib/Headers/intrin.h272
-rw-r--r--contrib/llvm-project/clang/lib/Headers/intrin0.h247
-rw-r--r--contrib/llvm-project/clang/lib/Headers/inttypes.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/iso646.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/keylockerintrin.h13
-rw-r--r--contrib/llvm-project/clang/lib/Headers/limits.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/assert.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/mm3dnow.h147
-rw-r--r--contrib/llvm-project/clang/lib/Headers/mmintrin.h160
-rw-r--r--contrib/llvm-project/clang/lib/Headers/module.modulemap15
-rw-r--r--contrib/llvm-project/clang/lib/Headers/opencl-c-base.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/opencl-c.h15
-rw-r--r--contrib/llvm-project/clang/lib/Headers/prfchwintrin.h18
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ptrauth.h330
-rw-r--r--contrib/llvm-project/clang/lib/Headers/sifive_vector.h102
-rw-r--r--contrib/llvm-project/clang/lib/Headers/smmintrin.h24
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdalign.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdarg.h34
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdatomic.h12
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdbool.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stddef.h60
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdint.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdnoreturn.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/tmmintrin.h36
-rw-r--r--contrib/llvm-project/clang/lib/Headers/varargs.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/vecintrin.h28
-rw-r--r--contrib/llvm-project/clang/lib/Headers/x86gprintrin.h21
-rw-r--r--contrib/llvm-project/clang/lib/Headers/x86intrin.h32
-rw-r--r--contrib/llvm-project/clang/lib/Headers/xmmintrin.h384
-rw-r--r--contrib/llvm-project/clang/lib/Headers/yvals_core.h25
-rw-r--r--contrib/llvm-project/clang/lib/Headers/zos_wrappers/builtins.h18
-rw-r--r--contrib/llvm-project/clang/lib/Index/CommentToXML.cpp40
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexBody.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexDecl.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexingAction.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Index/USRGeneration.cpp32
-rw-r--r--contrib/llvm-project/clang/lib/InstallAPI/DiagnosticBuilderWrappers.cpp110
-rw-r--r--contrib/llvm-project/clang/lib/InstallAPI/DiagnosticBuilderWrappers.h49
-rw-r--r--contrib/llvm-project/clang/lib/InstallAPI/DirectoryScanner.cpp300
-rw-r--r--contrib/llvm-project/clang/lib/InstallAPI/DylibVerifier.cpp1005
-rw-r--r--contrib/llvm-project/clang/lib/InstallAPI/FileList.cpp192
-rw-r--r--contrib/llvm-project/clang/lib/InstallAPI/Frontend.cpp220
-rw-r--r--contrib/llvm-project/clang/lib/InstallAPI/HeaderFile.cpp88
-rw-r--r--contrib/llvm-project/clang/lib/InstallAPI/Library.cpp40
-rw-r--r--contrib/llvm-project/clang/lib/InstallAPI/Visitor.cpp728
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/CodeCompletion.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h22
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp58
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h5
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp522
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/Value.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/Wasm.cpp149
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/Wasm.h38
-rw-r--r--contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp347
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Lexer.cpp68
-rw-r--r--contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp68
-rw-r--r--contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp66
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp552
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp57
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp192
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Pragma.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp138
-rw-r--r--contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseAST.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp86
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp760
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp490
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp364
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp193
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp125
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseInit.cpp49
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp438
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseOpenACC.cpp742
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp932
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp83
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp244
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Parse/Parser.cpp123
-rw-r--r--contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp135
-rw-r--r--contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/Sema/CheckExprLifetime.cpp1330
-rw-r--r--contrib/llvm-project/clang/lib/Sema/CheckExprLifetime.h39
-rw-r--r--contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp84
-rw-r--r--contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td26
-rw-r--r--contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Scope.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Sema.cpp503
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAMDGPU.cpp311
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAPINotes.cpp1036
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaARM.cpp1340
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAVR.cpp49
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp83
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp202
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaBPF.cpp194
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaBase.cpp90
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaBoundsSafety.cpp193
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp439
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp115
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCast.cpp120
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp7772
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp1479
-rwxr-xr-xcontrib/llvm-project/clang/lib/Sema/SemaConcept.cpp760
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp104
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp2685
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp3794
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp1970
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp1463
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp69
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp2613
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp925
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp384
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp1398
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaHLSL.cpp1103
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaHexagon.cpp290
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaInit.cpp1813
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp160
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp505
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLoongArch.cpp515
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaM68k.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaMIPS.cpp300
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaMSP430.cpp78
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaModule.cpp169
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaNVPTX.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaObjC.cpp2408
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp297
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOpenACC.cpp1710
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOpenCL.cpp578
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp8353
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp1815
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaPPC.cpp439
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp309
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaRISCV.cpp1491
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp497
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp95
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp509
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp121
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaSwift.cpp765
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaSystemZ.cpp94
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp1838
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp1727
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateDeductionGuide.cpp1450
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp543
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp559
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp106
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaType.cpp1069
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaWasm.cpp341
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaX86.cpp972
-rw-r--r--contrib/llvm-project/clang/lib/Sema/TreeTransform.h2075
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTCommon.h24
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp1317
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp389
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h10
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp180
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp1902
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp190
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp117
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp87
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ModuleFile.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ModuleFileExtension.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/MultiOnDiskHashTable.h4
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp488
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp391
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp55
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp66
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp321
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp96
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp76
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp131
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h9
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp447
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp58
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp110
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp457
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoOwnershipChangeVisitor.cpp118
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoOwnershipChangeVisitor.h77
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp126
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp115
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PutenvStackArrayChecker.cpp (renamed from contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp)37
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp156
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp69
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SetgidSetuidOrderChecker.cpp196
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp140
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp1350
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp230
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h11
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp401
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h32
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp304
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp142
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp198
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp102
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp138
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp39
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp32
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Z3CrosscheckVisitor.cpp160
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Testing/TestAST.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp226
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp232
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp150
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DumpTool/APIData.h31
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp271
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h53
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp155
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/EmptyNodeIntrospection.inc.in48
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/IncludeStyle.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/CSpecialSymbolMap.inc14
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/LocateToolCompilationDatabase.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/NodeIntrospection.cpp88
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Tooling.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp59
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp4
749 files changed, 97484 insertions, 58759 deletions
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h b/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h
index 615314c46f09..cd6456dbe37b 100644
--- a/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h
@@ -24,7 +24,10 @@ const uint16_t VERSION_MAJOR = 0;
/// API notes file minor version number.
///
/// When the format changes IN ANY WAY, this number should be incremented.
-const uint16_t VERSION_MINOR = 25; // SwiftImportAs
+const uint16_t VERSION_MINOR = 27; // SingleDeclTableKey
+
+const uint8_t kSwiftCopyable = 1;
+const uint8_t kSwiftNonCopyable = 2;
using IdentifierID = llvm::PointerEmbeddedInt<unsigned, 31>;
using IdentifierIDField = llvm::BCVBR<16>;
@@ -60,6 +63,10 @@ enum BlockID {
/// about the method.
OBJC_METHOD_BLOCK_ID,
+ /// The C++ method data block, which maps C++ (context id, method name) pairs
+ /// to information about the method.
+ CXX_METHOD_BLOCK_ID,
+
/// The Objective-C selector data block, which maps Objective-C
/// selector names (# of pieces, identifier IDs) to the selector ID
/// used in other tables.
@@ -129,26 +136,26 @@ using IdentifierDataLayout = llvm::BCRecordLayout<
>;
} // namespace identifier_block
-namespace objc_context_block {
+namespace context_block {
enum {
- OBJC_CONTEXT_ID_DATA = 1,
- OBJC_CONTEXT_INFO_DATA = 2,
+ CONTEXT_ID_DATA = 1,
+ CONTEXT_INFO_DATA = 2,
};
-using ObjCContextIDLayout =
- llvm::BCRecordLayout<OBJC_CONTEXT_ID_DATA, // record ID
+using ContextIDLayout =
+ llvm::BCRecordLayout<CONTEXT_ID_DATA, // record ID
llvm::BCVBR<16>, // table offset within the blob (see
// below)
llvm::BCBlob // map from ObjC class names/protocol (as
// IDs) to context IDs
>;
-using ObjCContextInfoLayout = llvm::BCRecordLayout<
- OBJC_CONTEXT_INFO_DATA, // record ID
- llvm::BCVBR<16>, // table offset within the blob (see below)
- llvm::BCBlob // map from ObjC context IDs to context information.
+using ContextInfoLayout = llvm::BCRecordLayout<
+ CONTEXT_INFO_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see below)
+ llvm::BCBlob // map from ObjC context IDs to context information.
>;
-} // namespace objc_context_block
+} // namespace context_block
namespace objc_property_block {
enum {
@@ -178,6 +185,20 @@ using ObjCMethodDataLayout =
>;
} // namespace objc_method_block
+namespace cxx_method_block {
+enum {
+ CXX_METHOD_DATA = 1,
+};
+
+using CXXMethodDataLayout =
+ llvm::BCRecordLayout<CXX_METHOD_DATA, // record ID
+ llvm::BCVBR<16>, // table offset within the blob (see
+ // below)
+ llvm::BCBlob // map from C++ (context id, name)
+ // tuples to C++ method information
+ >;
+} // namespace cxx_method_block
+
namespace objc_selector_block {
enum {
OBJC_SELECTOR_DATA = 1,
@@ -266,11 +287,16 @@ struct ContextTableKey {
: parentContextID(parentContextID), contextKind(contextKind),
contextID(contextID) {}
- ContextTableKey(std::optional<Context> context, IdentifierID nameID)
- : parentContextID(context ? context->id.Value : (uint32_t)-1),
- contextKind(context ? static_cast<uint8_t>(context->kind)
- : static_cast<uint8_t>(-1)),
- contextID(nameID) {}
+ ContextTableKey(std::optional<ContextID> ParentContextID, ContextKind Kind,
+ uint32_t ContextID)
+ : parentContextID(ParentContextID ? ParentContextID->Value : -1),
+ contextKind(static_cast<uint8_t>(Kind)), contextID(ContextID) {}
+
+ ContextTableKey(std::optional<Context> ParentContext, ContextKind Kind,
+ uint32_t ContextID)
+ : ContextTableKey(ParentContext ? std::make_optional(ParentContext->id)
+ : std::nullopt,
+ Kind, ContextID) {}
llvm::hash_code hashValue() const {
return llvm::hash_value(
@@ -283,6 +309,32 @@ inline bool operator==(const ContextTableKey &lhs, const ContextTableKey &rhs) {
lhs.contextKind == rhs.contextKind && lhs.contextID == rhs.contextID;
}
+/// A stored Objective-C or C++ declaration, represented by the ID of its parent
+/// context, and the name of the declaration.
+struct SingleDeclTableKey {
+ uint32_t parentContextID;
+ uint32_t nameID;
+
+ SingleDeclTableKey() : parentContextID(-1), nameID(-1) {}
+
+ SingleDeclTableKey(uint32_t ParentContextID, uint32_t NameID)
+ : parentContextID(ParentContextID), nameID(NameID) {}
+
+ SingleDeclTableKey(std::optional<Context> ParentCtx, IdentifierID NameID)
+ : parentContextID(ParentCtx ? ParentCtx->id.Value
+ : static_cast<uint32_t>(-1)),
+ nameID(NameID) {}
+
+ llvm::hash_code hashValue() const {
+ return llvm::hash_value(std::make_pair(parentContextID, nameID));
+ }
+};
+
+inline bool operator==(const SingleDeclTableKey &lhs,
+ const SingleDeclTableKey &rhs) {
+ return lhs.parentContextID == rhs.parentContextID && lhs.nameID == rhs.nameID;
+}
+
} // namespace api_notes
} // namespace clang
@@ -338,6 +390,29 @@ template <> struct DenseMapInfo<clang::api_notes::ContextTableKey> {
return lhs == rhs;
}
};
+
+template <> struct DenseMapInfo<clang::api_notes::SingleDeclTableKey> {
+ static inline clang::api_notes::SingleDeclTableKey getEmptyKey() {
+ return clang::api_notes::SingleDeclTableKey();
+ }
+
+ static inline clang::api_notes::SingleDeclTableKey getTombstoneKey() {
+ return clang::api_notes::SingleDeclTableKey{
+ DenseMapInfo<uint32_t>::getTombstoneKey(),
+ DenseMapInfo<uint32_t>::getTombstoneKey()};
+ }
+
+ static unsigned
+ getHashValue(const clang::api_notes::SingleDeclTableKey &value) {
+ return value.hashValue();
+ }
+
+ static bool isEqual(const clang::api_notes::SingleDeclTableKey &lhs,
+ const clang::api_notes::SingleDeclTableKey &rhs) {
+ return lhs == rhs;
+ }
+};
+
} // namespace llvm
#endif
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesManager.cpp b/contrib/llvm-project/clang/lib/APINotes/APINotesManager.cpp
index d3aef09dac91..039d09fa7cf5 100644
--- a/contrib/llvm-project/clang/lib/APINotes/APINotesManager.cpp
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesManager.cpp
@@ -12,6 +12,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceMgrAdapter.h"
#include "clang/Basic/Version.h"
@@ -221,10 +222,11 @@ APINotesManager::getCurrentModuleAPINotes(Module *M, bool LookInModule,
ArrayRef<std::string> SearchPaths) {
FileManager &FM = SM.getFileManager();
auto ModuleName = M->getTopLevelModuleName();
+ auto ExportedModuleName = M->getTopLevelModule()->ExportAsModule;
llvm::SmallVector<FileEntryRef, 2> APINotes;
// First, look relative to the module itself.
- if (LookInModule) {
+ if (LookInModule && M->Directory) {
// Local function to try loading an API notes file in the given directory.
auto tryAPINotes = [&](DirectoryEntryRef Dir, bool WantPublic) {
if (auto File = findAPINotesFile(Dir, ModuleName, WantPublic)) {
@@ -233,6 +235,10 @@ APINotesManager::getCurrentModuleAPINotes(Module *M, bool LookInModule,
APINotes.push_back(*File);
}
+ // If module FooCore is re-exported through module Foo, try Foo.apinotes.
+ if (!ExportedModuleName.empty())
+ if (auto File = findAPINotesFile(Dir, ExportedModuleName, WantPublic))
+ APINotes.push_back(*File);
};
if (M->IsFramework) {
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesReader.cpp b/contrib/llvm-project/clang/lib/APINotes/APINotesReader.cpp
index ff9b95d9bf75..871f782511d5 100644
--- a/contrib/llvm-project/clang/lib/APINotes/APINotesReader.cpp
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesReader.cpp
@@ -5,7 +5,13 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-
+//
+// This file implements the \c APINotesReader class that reads source
+// API notes data providing additional information about source code as
+// a separate input, such as the non-nil/nilable annotations for
+// method parameters.
+//
+//===----------------------------------------------------------------------===//
#include "clang/APINotes/APINotesReader.h"
#include "APINotesFormat.h"
#include "llvm/ADT/Hashing.h"
@@ -24,23 +30,20 @@ namespace {
llvm::VersionTuple ReadVersionTuple(const uint8_t *&Data) {
uint8_t NumVersions = (*Data++) & 0x03;
- unsigned Major =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ unsigned Major = endian::readNext<uint32_t, llvm::endianness::little>(Data);
if (NumVersions == 0)
return llvm::VersionTuple(Major);
- unsigned Minor =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ unsigned Minor = endian::readNext<uint32_t, llvm::endianness::little>(Data);
if (NumVersions == 1)
return llvm::VersionTuple(Major, Minor);
unsigned Subminor =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little>(Data);
if (NumVersions == 2)
return llvm::VersionTuple(Major, Minor, Subminor);
- unsigned Build =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ unsigned Build = endian::readNext<uint32_t, llvm::endianness::little>(Data);
return llvm::VersionTuple(Major, Minor, Subminor, Build);
}
@@ -65,25 +68,25 @@ public:
static std::pair<unsigned, unsigned> ReadKeyDataLength(const uint8_t *&Data) {
unsigned KeyLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
unsigned DataLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
return {KeyLength, DataLength};
}
static data_type ReadData(internal_key_type Key, const uint8_t *Data,
unsigned Length) {
unsigned NumElements =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
data_type Result;
Result.reserve(NumElements);
for (unsigned i = 0; i != NumElements; ++i) {
auto version = ReadVersionTuple(Data);
const auto *DataBefore = Data;
(void)DataBefore;
+ auto UnversionedData = Derived::readUnversioned(Key, Data);
assert(Data != DataBefore &&
"Unversioned data reader didn't move pointer");
- auto UnversionedData = Derived::readUnversioned(Key, Data);
Result.push_back({version, UnversionedData});
}
return Result;
@@ -99,14 +102,14 @@ void ReadCommonEntityInfo(const uint8_t *&Data, CommonEntityInfo &Info) {
Info.setSwiftPrivate(static_cast<bool>((UnavailableBits >> 3) & 0x01));
unsigned MsgLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
Info.UnavailableMsg =
std::string(reinterpret_cast<const char *>(Data),
reinterpret_cast<const char *>(Data) + MsgLength);
Data += MsgLength;
unsigned SwiftNameLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
Info.SwiftName =
std::string(reinterpret_cast<const char *>(Data),
reinterpret_cast<const char *>(Data) + SwiftNameLength);
@@ -118,7 +121,7 @@ void ReadCommonTypeInfo(const uint8_t *&Data, CommonTypeInfo &Info) {
ReadCommonEntityInfo(Data, Info);
unsigned SwiftBridgeLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
if (SwiftBridgeLength > 0) {
Info.setSwiftBridge(std::string(reinterpret_cast<const char *>(Data),
SwiftBridgeLength - 1));
@@ -126,7 +129,7 @@ void ReadCommonTypeInfo(const uint8_t *&Data, CommonTypeInfo &Info) {
}
unsigned ErrorDomainLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
if (ErrorDomainLength > 0) {
Info.setNSErrorDomain(std::optional<std::string>(std::string(
reinterpret_cast<const char *>(Data), ErrorDomainLength - 1)));
@@ -148,7 +151,7 @@ public:
external_key_type GetExternalKey(internal_key_type Key) { return Key; }
hash_value_type ComputeHash(internal_key_type Key) {
- return llvm::hash_value(Key);
+ return llvm::djbHash(Key);
}
static bool EqualKey(internal_key_type LHS, internal_key_type RHS) {
@@ -157,9 +160,9 @@ public:
static std::pair<unsigned, unsigned> ReadKeyDataLength(const uint8_t *&Data) {
unsigned KeyLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
unsigned DataLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
return {KeyLength, DataLength};
}
@@ -169,13 +172,13 @@ public:
static data_type ReadData(internal_key_type key, const uint8_t *Data,
unsigned Length) {
- return endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
- Data);
+ return endian::readNext<uint32_t, llvm::endianness::little>(Data);
}
};
-/// Used to deserialize the on-disk Objective-C class table.
-class ObjCContextIDTableInfo {
+/// Used to deserialize the on-disk table of Objective-C classes and C++
+/// namespaces.
+class ContextIDTableInfo {
public:
using internal_key_type = ContextTableKey;
using external_key_type = internal_key_type;
@@ -197,46 +200,42 @@ public:
static std::pair<unsigned, unsigned> ReadKeyDataLength(const uint8_t *&Data) {
unsigned KeyLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
unsigned DataLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
return {KeyLength, DataLength};
}
static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
auto ParentCtxID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little>(Data);
auto ContextKind =
- endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
- auto NameID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint8_t, llvm::endianness::little>(Data);
+ auto NameID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
return {ParentCtxID, ContextKind, NameID};
}
static data_type ReadData(internal_key_type Key, const uint8_t *Data,
unsigned Length) {
- return endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
- Data);
+ return endian::readNext<uint32_t, llvm::endianness::little>(Data);
}
};
/// Used to deserialize the on-disk Objective-C property table.
-class ObjCContextInfoTableInfo
- : public VersionedTableInfo<ObjCContextInfoTableInfo, unsigned,
- ObjCContextInfo> {
+class ContextInfoTableInfo
+ : public VersionedTableInfo<ContextInfoTableInfo, unsigned, ContextInfo> {
public:
static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
- return endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
- Data);
+ return endian::readNext<uint32_t, llvm::endianness::little>(Data);
}
hash_value_type ComputeHash(internal_key_type Key) {
return static_cast<size_t>(llvm::hash_value(Key));
}
- static ObjCContextInfo readUnversioned(internal_key_type Key,
- const uint8_t *&Data) {
- ObjCContextInfo Info;
+ static ContextInfo readUnversioned(internal_key_type Key,
+ const uint8_t *&Data) {
+ ContextInfo Info;
ReadCommonTypeInfo(Data, Info);
uint8_t Payload = *Data++;
@@ -267,8 +266,7 @@ void ReadVariableInfo(const uint8_t *&Data, VariableInfo &Info) {
}
++Data;
- auto TypeLen =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ auto TypeLen = endian::readNext<uint16_t, llvm::endianness::little>(Data);
Info.setType(std::string(Data, Data + TypeLen));
Data += TypeLen;
}
@@ -280,12 +278,9 @@ class ObjCPropertyTableInfo
ObjCPropertyInfo> {
public:
static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
- auto ClassID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- auto NameID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- char IsInstance =
- endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ auto ClassID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
+ auto NameID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
+ char IsInstance = endian::readNext<uint8_t, llvm::endianness::little>(Data);
return {ClassID, NameID, IsInstance};
}
@@ -308,8 +303,7 @@ public:
void ReadParamInfo(const uint8_t *&Data, ParamInfo &Info) {
ReadVariableInfo(Data, Info);
- uint8_t Payload =
- endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ uint8_t Payload = endian::readNext<uint8_t, llvm::endianness::little>(Data);
if (auto RawConvention = Payload & 0x7) {
auto Convention = static_cast<RetainCountConventionKind>(RawConvention - 1);
Info.setRetainCountConvention(Convention);
@@ -325,8 +319,7 @@ void ReadParamInfo(const uint8_t *&Data, ParamInfo &Info) {
void ReadFunctionInfo(const uint8_t *&Data, FunctionInfo &Info) {
ReadCommonEntityInfo(Data, Info);
- uint8_t Payload =
- endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ uint8_t Payload = endian::readNext<uint8_t, llvm::endianness::little>(Data);
if (auto RawConvention = Payload & 0x7) {
auto Convention = static_cast<RetainCountConventionKind>(RawConvention - 1);
Info.setRetainCountConvention(Convention);
@@ -337,12 +330,12 @@ void ReadFunctionInfo(const uint8_t *&Data, FunctionInfo &Info) {
assert(Payload == 0 && "Bad API notes");
Info.NumAdjustedNullable =
- endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint8_t, llvm::endianness::little>(Data);
Info.NullabilityPayload =
- endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint64_t, llvm::endianness::little>(Data);
unsigned NumParams =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
while (NumParams > 0) {
ParamInfo pi;
ReadParamInfo(Data, pi);
@@ -351,7 +344,7 @@ void ReadFunctionInfo(const uint8_t *&Data, FunctionInfo &Info) {
}
unsigned ResultTypeLen =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
Info.ResultType = std::string(Data, Data + ResultTypeLen);
Data += ResultTypeLen;
}
@@ -363,12 +356,10 @@ class ObjCMethodTableInfo
ObjCMethodInfo> {
public:
static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
- auto ClassID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ auto ClassID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
auto SelectorID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- auto IsInstance =
- endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little>(Data);
+ auto IsInstance = endian::readNext<uint8_t, llvm::endianness::little>(Data);
return {ClassID, SelectorID, IsInstance};
}
@@ -413,45 +404,38 @@ public:
static std::pair<unsigned, unsigned> ReadKeyDataLength(const uint8_t *&Data) {
unsigned KeyLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
unsigned DataLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
return {KeyLength, DataLength};
}
static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
internal_key_type Key;
- Key.NumArgs =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ Key.NumArgs = endian::readNext<uint16_t, llvm::endianness::little>(Data);
unsigned NumIdents = (Length - sizeof(uint16_t)) / sizeof(uint32_t);
for (unsigned i = 0; i != NumIdents; ++i) {
Key.Identifiers.push_back(
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
- Data));
+ endian::readNext<uint32_t, llvm::endianness::little>(Data));
}
return Key;
}
static data_type ReadData(internal_key_type Key, const uint8_t *Data,
unsigned Length) {
- return endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
- Data);
+ return endian::readNext<uint32_t, llvm::endianness::little>(Data);
}
};
/// Used to deserialize the on-disk global variable table.
class GlobalVariableTableInfo
- : public VersionedTableInfo<GlobalVariableTableInfo, ContextTableKey,
+ : public VersionedTableInfo<GlobalVariableTableInfo, SingleDeclTableKey,
GlobalVariableInfo> {
public:
static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
- auto CtxID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- auto ContextKind =
- endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
- auto NameID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- return {CtxID, ContextKind, NameID};
+ auto CtxID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
+ auto NameID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
+ return {CtxID, NameID};
}
hash_value_type ComputeHash(internal_key_type Key) {
@@ -468,17 +452,13 @@ public:
/// Used to deserialize the on-disk global function table.
class GlobalFunctionTableInfo
- : public VersionedTableInfo<GlobalFunctionTableInfo, ContextTableKey,
+ : public VersionedTableInfo<GlobalFunctionTableInfo, SingleDeclTableKey,
GlobalFunctionInfo> {
public:
static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
- auto CtxID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- auto ContextKind =
- endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
- auto NameID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- return {CtxID, ContextKind, NameID};
+ auto CtxID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
+ auto NameID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
+ return {CtxID, NameID};
}
hash_value_type ComputeHash(internal_key_type Key) {
@@ -493,14 +473,36 @@ public:
}
};
+/// Used to deserialize the on-disk C++ method table.
+class CXXMethodTableInfo
+ : public VersionedTableInfo<CXXMethodTableInfo, SingleDeclTableKey,
+ CXXMethodInfo> {
+public:
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ auto CtxID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
+ auto NameID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
+ return {CtxID, NameID};
+ }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return static_cast<size_t>(Key.hashValue());
+ }
+
+ static CXXMethodInfo readUnversioned(internal_key_type Key,
+ const uint8_t *&Data) {
+ CXXMethodInfo Info;
+ ReadFunctionInfo(Data, Info);
+ return Info;
+ }
+};
+
/// Used to deserialize the on-disk enumerator table.
class EnumConstantTableInfo
: public VersionedTableInfo<EnumConstantTableInfo, uint32_t,
EnumConstantInfo> {
public:
static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
- auto NameID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ auto NameID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
return NameID;
}
@@ -518,17 +520,13 @@ public:
/// Used to deserialize the on-disk tag table.
class TagTableInfo
- : public VersionedTableInfo<TagTableInfo, ContextTableKey, TagInfo> {
+ : public VersionedTableInfo<TagTableInfo, SingleDeclTableKey, TagInfo> {
public:
static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
- auto CtxID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- auto ContextKind =
- endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ auto CtxID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
auto NameID =
- endian::readNext<IdentifierID, llvm::endianness::little, unaligned>(
- Data);
- return {CtxID, ContextKind, NameID};
+ endian::readNext<IdentifierID, llvm::endianness::little>(Data);
+ return {CtxID, NameID};
}
hash_value_type ComputeHash(internal_key_type Key) {
@@ -546,22 +544,29 @@ public:
Info.EnumExtensibility =
static_cast<EnumExtensibilityKind>((Payload & 0x3) - 1);
+ uint8_t Copyable =
+ endian::readNext<uint8_t, llvm::endianness::little>(Data);
+ if (Copyable == kSwiftNonCopyable)
+ Info.setSwiftCopyable(std::optional(false));
+ else if (Copyable == kSwiftCopyable)
+ Info.setSwiftCopyable(std::optional(true));
+
unsigned ImportAsLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
if (ImportAsLength > 0) {
Info.SwiftImportAs =
std::string(reinterpret_cast<const char *>(Data), ImportAsLength - 1);
Data += ImportAsLength - 1;
}
unsigned RetainOpLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
if (RetainOpLength > 0) {
Info.SwiftRetainOp =
std::string(reinterpret_cast<const char *>(Data), RetainOpLength - 1);
Data += RetainOpLength - 1;
}
unsigned ReleaseOpLength =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint16_t, llvm::endianness::little>(Data);
if (ReleaseOpLength > 0) {
Info.SwiftReleaseOp = std::string(reinterpret_cast<const char *>(Data),
ReleaseOpLength - 1);
@@ -575,18 +580,14 @@ public:
/// Used to deserialize the on-disk typedef table.
class TypedefTableInfo
- : public VersionedTableInfo<TypedefTableInfo, ContextTableKey,
+ : public VersionedTableInfo<TypedefTableInfo, SingleDeclTableKey,
TypedefInfo> {
public:
static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
- auto CtxID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- auto ContextKind =
- endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ auto CtxID = endian::readNext<uint32_t, llvm::endianness::little>(Data);
auto nameID =
- endian::readNext<IdentifierID, llvm::endianness::little, unaligned>(
- Data);
- return {CtxID, ContextKind, nameID};
+ endian::readNext<IdentifierID, llvm::endianness::little>(Data);
+ return {CtxID, nameID};
}
hash_value_type ComputeHash(internal_key_type Key) {
@@ -628,17 +629,17 @@ public:
/// The identifier table.
std::unique_ptr<SerializedIdentifierTable> IdentifierTable;
- using SerializedObjCContextIDTable =
- llvm::OnDiskIterableChainedHashTable<ObjCContextIDTableInfo>;
+ using SerializedContextIDTable =
+ llvm::OnDiskIterableChainedHashTable<ContextIDTableInfo>;
- /// The Objective-C context ID table.
- std::unique_ptr<SerializedObjCContextIDTable> ObjCContextIDTable;
+ /// The Objective-C / C++ context ID table.
+ std::unique_ptr<SerializedContextIDTable> ContextIDTable;
- using SerializedObjCContextInfoTable =
- llvm::OnDiskIterableChainedHashTable<ObjCContextInfoTableInfo>;
+ using SerializedContextInfoTable =
+ llvm::OnDiskIterableChainedHashTable<ContextInfoTableInfo>;
/// The Objective-C context info table.
- std::unique_ptr<SerializedObjCContextInfoTable> ObjCContextInfoTable;
+ std::unique_ptr<SerializedContextInfoTable> ContextInfoTable;
using SerializedObjCPropertyTable =
llvm::OnDiskIterableChainedHashTable<ObjCPropertyTableInfo>;
@@ -652,6 +653,12 @@ public:
/// The Objective-C method table.
std::unique_ptr<SerializedObjCMethodTable> ObjCMethodTable;
+ using SerializedCXXMethodTable =
+ llvm::OnDiskIterableChainedHashTable<CXXMethodTableInfo>;
+
+ /// The C++ method table.
+ std::unique_ptr<SerializedCXXMethodTable> CXXMethodTable;
+
using SerializedObjCSelectorTable =
llvm::OnDiskIterableChainedHashTable<ObjCSelectorTableInfo>;
@@ -699,12 +706,14 @@ public:
llvm::SmallVectorImpl<uint64_t> &Scratch);
bool readIdentifierBlock(llvm::BitstreamCursor &Cursor,
llvm::SmallVectorImpl<uint64_t> &Scratch);
- bool readObjCContextBlock(llvm::BitstreamCursor &Cursor,
- llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readContextBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
bool readObjCPropertyBlock(llvm::BitstreamCursor &Cursor,
llvm::SmallVectorImpl<uint64_t> &Scratch);
bool readObjCMethodBlock(llvm::BitstreamCursor &Cursor,
llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readCXXMethodBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
bool readObjCSelectorBlock(llvm::BitstreamCursor &Cursor,
llvm::SmallVectorImpl<uint64_t> &Scratch);
bool readGlobalVariableBlock(llvm::BitstreamCursor &Cursor,
@@ -920,7 +929,7 @@ bool APINotesReader::Implementation::readIdentifierBlock(
return false;
}
-bool APINotesReader::Implementation::readObjCContextBlock(
+bool APINotesReader::Implementation::readContextBlock(
llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
if (Cursor.EnterSubBlock(OBJC_CONTEXT_BLOCK_ID))
return true;
@@ -964,31 +973,30 @@ bool APINotesReader::Implementation::readObjCContextBlock(
}
unsigned Kind = MaybeKind.get();
switch (Kind) {
- case objc_context_block::OBJC_CONTEXT_ID_DATA: {
- // Already saw Objective-C context ID table.
- if (ObjCContextIDTable)
+ case context_block::CONTEXT_ID_DATA: {
+ // Already saw Objective-C / C++ context ID table.
+ if (ContextIDTable)
return true;
uint32_t tableOffset;
- objc_context_block::ObjCContextIDLayout::readRecord(Scratch, tableOffset);
+ context_block::ContextIDLayout::readRecord(Scratch, tableOffset);
auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
- ObjCContextIDTable.reset(SerializedObjCContextIDTable::Create(
+ ContextIDTable.reset(SerializedContextIDTable::Create(
base + tableOffset, base + sizeof(uint32_t), base));
break;
}
- case objc_context_block::OBJC_CONTEXT_INFO_DATA: {
- // Already saw Objective-C context info table.
- if (ObjCContextInfoTable)
+ case context_block::CONTEXT_INFO_DATA: {
+ // Already saw Objective-C / C++ context info table.
+ if (ContextInfoTable)
return true;
uint32_t tableOffset;
- objc_context_block::ObjCContextInfoLayout::readRecord(Scratch,
- tableOffset);
+ context_block::ContextInfoLayout::readRecord(Scratch, tableOffset);
auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
- ObjCContextInfoTable.reset(SerializedObjCContextInfoTable::Create(
+ ContextInfoTable.reset(SerializedContextInfoTable::Create(
base + tableOffset, base + sizeof(uint32_t), base));
break;
}
@@ -1163,6 +1171,81 @@ bool APINotesReader::Implementation::readObjCMethodBlock(
return false;
}
+bool APINotesReader::Implementation::readCXXMethodBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(CXX_METHOD_BLOCK_ID))
+ return true;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+ switch (Kind) {
+ case cxx_method_block::CXX_METHOD_DATA: {
+ // Already saw C++ method table.
+ if (CXXMethodTable)
+ return true;
+
+ uint32_t tableOffset;
+ cxx_method_block::CXXMethodDataLayout::readRecord(Scratch, tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ CXXMethodTable.reset(SerializedCXXMethodTable::Create(
+ base + tableOffset, base + sizeof(uint32_t), base));
+ break;
+ }
+
+ default:
+ // Unknown record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return false;
+}
+
bool APINotesReader::Implementation::readObjCSelectorBlock(
llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
if (Cursor.EnterSubBlock(OBJC_SELECTOR_BLOCK_ID))
@@ -1692,7 +1775,7 @@ APINotesReader::APINotesReader(llvm::MemoryBuffer *InputBuffer,
case OBJC_CONTEXT_BLOCK_ID:
if (!HasValidControlBlock ||
- Implementation->readObjCContextBlock(Cursor, Scratch)) {
+ Implementation->readContextBlock(Cursor, Scratch)) {
Failed = true;
return;
}
@@ -1715,6 +1798,14 @@ APINotesReader::APINotesReader(llvm::MemoryBuffer *InputBuffer,
}
break;
+ case CXX_METHOD_BLOCK_ID:
+ if (!HasValidControlBlock ||
+ Implementation->readCXXMethodBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+ break;
+
case OBJC_SELECTOR_BLOCK_ID:
if (!HasValidControlBlock ||
Implementation->readObjCSelectorBlock(Cursor, Scratch)) {
@@ -1797,8 +1888,8 @@ APINotesReader::Create(std::unique_ptr<llvm::MemoryBuffer> InputBuffer,
template <typename T>
APINotesReader::VersionedInfo<T>::VersionedInfo(
llvm::VersionTuple Version,
- llvm::SmallVector<std::pair<llvm::VersionTuple, T>, 1> Results)
- : Results(std::move(Results)) {
+ llvm::SmallVector<std::pair<llvm::VersionTuple, T>, 1> R)
+ : Results(std::move(R)) {
assert(!Results.empty());
assert(std::is_sorted(
@@ -1829,7 +1920,7 @@ APINotesReader::VersionedInfo<T>::VersionedInfo(
auto APINotesReader::lookupObjCClassID(llvm::StringRef Name)
-> std::optional<ContextID> {
- if (!Implementation->ObjCContextIDTable)
+ if (!Implementation->ContextIDTable)
return std::nullopt;
std::optional<IdentifierID> ClassID = Implementation->getIdentifier(Name);
@@ -1838,25 +1929,25 @@ auto APINotesReader::lookupObjCClassID(llvm::StringRef Name)
// ObjC classes can't be declared in C++ namespaces, so use -1 as the global
// context.
- auto KnownID = Implementation->ObjCContextIDTable->find(
+ auto KnownID = Implementation->ContextIDTable->find(
ContextTableKey(-1, (uint8_t)ContextKind::ObjCClass, *ClassID));
- if (KnownID == Implementation->ObjCContextIDTable->end())
+ if (KnownID == Implementation->ContextIDTable->end())
return std::nullopt;
return ContextID(*KnownID);
}
auto APINotesReader::lookupObjCClassInfo(llvm::StringRef Name)
- -> VersionedInfo<ObjCContextInfo> {
- if (!Implementation->ObjCContextInfoTable)
+ -> VersionedInfo<ContextInfo> {
+ if (!Implementation->ContextInfoTable)
return std::nullopt;
std::optional<ContextID> CtxID = lookupObjCClassID(Name);
if (!CtxID)
return std::nullopt;
- auto KnownInfo = Implementation->ObjCContextInfoTable->find(CtxID->Value);
- if (KnownInfo == Implementation->ObjCContextInfoTable->end())
+ auto KnownInfo = Implementation->ContextInfoTable->find(CtxID->Value);
+ if (KnownInfo == Implementation->ContextInfoTable->end())
return std::nullopt;
return {Implementation->SwiftVersion, *KnownInfo};
@@ -1864,7 +1955,7 @@ auto APINotesReader::lookupObjCClassInfo(llvm::StringRef Name)
auto APINotesReader::lookupObjCProtocolID(llvm::StringRef Name)
-> std::optional<ContextID> {
- if (!Implementation->ObjCContextIDTable)
+ if (!Implementation->ContextIDTable)
return std::nullopt;
std::optional<IdentifierID> classID = Implementation->getIdentifier(Name);
@@ -1873,25 +1964,25 @@ auto APINotesReader::lookupObjCProtocolID(llvm::StringRef Name)
// ObjC classes can't be declared in C++ namespaces, so use -1 as the global
// context.
- auto KnownID = Implementation->ObjCContextIDTable->find(
+ auto KnownID = Implementation->ContextIDTable->find(
ContextTableKey(-1, (uint8_t)ContextKind::ObjCProtocol, *classID));
- if (KnownID == Implementation->ObjCContextIDTable->end())
+ if (KnownID == Implementation->ContextIDTable->end())
return std::nullopt;
return ContextID(*KnownID);
}
auto APINotesReader::lookupObjCProtocolInfo(llvm::StringRef Name)
- -> VersionedInfo<ObjCContextInfo> {
- if (!Implementation->ObjCContextInfoTable)
+ -> VersionedInfo<ContextInfo> {
+ if (!Implementation->ContextInfoTable)
return std::nullopt;
std::optional<ContextID> CtxID = lookupObjCProtocolID(Name);
if (!CtxID)
return std::nullopt;
- auto KnownInfo = Implementation->ObjCContextInfoTable->find(CtxID->Value);
- if (KnownInfo == Implementation->ObjCContextInfoTable->end())
+ auto KnownInfo = Implementation->ContextInfoTable->find(CtxID->Value);
+ if (KnownInfo == Implementation->ContextInfoTable->end())
return std::nullopt;
return {Implementation->SwiftVersion, *KnownInfo};
@@ -1934,6 +2025,23 @@ auto APINotesReader::lookupObjCMethod(ContextID CtxID, ObjCSelectorRef Selector,
return {Implementation->SwiftVersion, *Known};
}
+auto APINotesReader::lookupCXXMethod(ContextID CtxID, llvm::StringRef Name)
+ -> VersionedInfo<CXXMethodInfo> {
+ if (!Implementation->CXXMethodTable)
+ return std::nullopt;
+
+ std::optional<IdentifierID> NameID = Implementation->getIdentifier(Name);
+ if (!NameID)
+ return std::nullopt;
+
+ auto Known = Implementation->CXXMethodTable->find(
+ SingleDeclTableKey(CtxID.Value, *NameID));
+ if (Known == Implementation->CXXMethodTable->end())
+ return std::nullopt;
+
+ return {Implementation->SwiftVersion, *Known};
+}
+
auto APINotesReader::lookupGlobalVariable(llvm::StringRef Name,
std::optional<Context> Ctx)
-> VersionedInfo<GlobalVariableInfo> {
@@ -1944,7 +2052,7 @@ auto APINotesReader::lookupGlobalVariable(llvm::StringRef Name,
if (!NameID)
return std::nullopt;
- ContextTableKey Key(Ctx, *NameID);
+ SingleDeclTableKey Key(Ctx, *NameID);
auto Known = Implementation->GlobalVariableTable->find(Key);
if (Known == Implementation->GlobalVariableTable->end())
@@ -1963,7 +2071,7 @@ auto APINotesReader::lookupGlobalFunction(llvm::StringRef Name,
if (!NameID)
return std::nullopt;
- ContextTableKey Key(Ctx, *NameID);
+ SingleDeclTableKey Key(Ctx, *NameID);
auto Known = Implementation->GlobalFunctionTable->find(Key);
if (Known == Implementation->GlobalFunctionTable->end())
@@ -1988,6 +2096,24 @@ auto APINotesReader::lookupEnumConstant(llvm::StringRef Name)
return {Implementation->SwiftVersion, *Known};
}
+auto APINotesReader::lookupTagID(llvm::StringRef Name,
+ std::optional<Context> ParentCtx)
+ -> std::optional<ContextID> {
+ if (!Implementation->ContextIDTable)
+ return std::nullopt;
+
+ std::optional<IdentifierID> TagID = Implementation->getIdentifier(Name);
+ if (!TagID)
+ return std::nullopt;
+
+ auto KnownID = Implementation->ContextIDTable->find(
+ ContextTableKey(ParentCtx, ContextKind::Tag, *TagID));
+ if (KnownID == Implementation->ContextIDTable->end())
+ return std::nullopt;
+
+ return ContextID(*KnownID);
+}
+
auto APINotesReader::lookupTag(llvm::StringRef Name, std::optional<Context> Ctx)
-> VersionedInfo<TagInfo> {
if (!Implementation->TagTable)
@@ -1997,7 +2123,7 @@ auto APINotesReader::lookupTag(llvm::StringRef Name, std::optional<Context> Ctx)
if (!NameID)
return std::nullopt;
- ContextTableKey Key(Ctx, *NameID);
+ SingleDeclTableKey Key(Ctx, *NameID);
auto Known = Implementation->TagTable->find(Key);
if (Known == Implementation->TagTable->end())
@@ -2016,7 +2142,7 @@ auto APINotesReader::lookupTypedef(llvm::StringRef Name,
if (!NameID)
return std::nullopt;
- ContextTableKey Key(Ctx, *NameID);
+ SingleDeclTableKey Key(Ctx, *NameID);
auto Known = Implementation->TypedefTable->find(Key);
if (Known == Implementation->TypedefTable->end())
@@ -2028,7 +2154,7 @@ auto APINotesReader::lookupTypedef(llvm::StringRef Name,
auto APINotesReader::lookupNamespaceID(
llvm::StringRef Name, std::optional<ContextID> ParentNamespaceID)
-> std::optional<ContextID> {
- if (!Implementation->ObjCContextIDTable)
+ if (!Implementation->ContextIDTable)
return std::nullopt;
std::optional<IdentifierID> NamespaceID = Implementation->getIdentifier(Name);
@@ -2037,9 +2163,9 @@ auto APINotesReader::lookupNamespaceID(
uint32_t RawParentNamespaceID =
ParentNamespaceID ? ParentNamespaceID->Value : -1;
- auto KnownID = Implementation->ObjCContextIDTable->find(
+ auto KnownID = Implementation->ContextIDTable->find(
{RawParentNamespaceID, (uint8_t)ContextKind::Namespace, *NamespaceID});
- if (KnownID == Implementation->ObjCContextIDTable->end())
+ if (KnownID == Implementation->ContextIDTable->end())
return std::nullopt;
return ContextID(*KnownID);
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesTypes.cpp b/contrib/llvm-project/clang/lib/APINotes/APINotesTypes.cpp
index c0bb726ea72b..a87ecb3bc30e 100644
--- a/contrib/llvm-project/clang/lib/APINotes/APINotesTypes.cpp
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesTypes.cpp
@@ -32,7 +32,7 @@ LLVM_DUMP_METHOD void CommonTypeInfo::dump(llvm::raw_ostream &OS) const {
OS << '\n';
}
-LLVM_DUMP_METHOD void ObjCContextInfo::dump(llvm::raw_ostream &OS) {
+LLVM_DUMP_METHOD void ContextInfo::dump(llvm::raw_ostream &OS) {
static_cast<CommonTypeInfo &>(*this).dump(OS);
if (HasDefaultNullability)
OS << "DefaultNullability: " << DefaultNullability << ' ';
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesWriter.cpp b/contrib/llvm-project/clang/lib/APINotes/APINotesWriter.cpp
index 62a2ab179991..2a71922746ac 100644
--- a/contrib/llvm-project/clang/lib/APINotes/APINotesWriter.cpp
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesWriter.cpp
@@ -42,8 +42,8 @@ class APINotesWriter::Implementation {
/// this context and provides both the context ID and information describing
/// the context within that module.
llvm::DenseMap<ContextTableKey,
- std::pair<unsigned, VersionedSmallVector<ObjCContextInfo>>>
- ObjCContexts;
+ std::pair<unsigned, VersionedSmallVector<ContextInfo>>>
+ Contexts;
/// Information about parent contexts for each context.
///
@@ -51,7 +51,7 @@ class APINotesWriter::Implementation {
llvm::DenseMap<uint32_t, uint32_t> ParentContexts;
/// Mapping from context IDs to the identifier ID holding the name.
- llvm::DenseMap<unsigned, unsigned> ObjCContextNames;
+ llvm::DenseMap<unsigned, unsigned> ContextNames;
/// Information about Objective-C properties.
///
@@ -70,22 +70,29 @@ class APINotesWriter::Implementation {
llvm::SmallVector<std::pair<VersionTuple, ObjCMethodInfo>, 1>>
ObjCMethods;
+ /// Information about C++ methods.
+ ///
+ /// Indexed by the context ID and name ID.
+ llvm::DenseMap<SingleDeclTableKey,
+ llvm::SmallVector<std::pair<VersionTuple, CXXMethodInfo>, 1>>
+ CXXMethods;
+
/// Mapping from selectors to selector ID.
llvm::DenseMap<StoredObjCSelector, SelectorID> SelectorIDs;
/// Information about global variables.
///
- /// Indexed by the context ID, contextKind, identifier ID.
+ /// Indexed by the context ID, identifier ID.
llvm::DenseMap<
- ContextTableKey,
+ SingleDeclTableKey,
llvm::SmallVector<std::pair<VersionTuple, GlobalVariableInfo>, 1>>
GlobalVariables;
/// Information about global functions.
///
- /// Indexed by the context ID, contextKind, identifier ID.
+ /// Indexed by the context ID, identifier ID.
llvm::DenseMap<
- ContextTableKey,
+ SingleDeclTableKey,
llvm::SmallVector<std::pair<VersionTuple, GlobalFunctionInfo>, 1>>
GlobalFunctions;
@@ -98,15 +105,15 @@ class APINotesWriter::Implementation {
/// Information about tags.
///
- /// Indexed by the context ID, contextKind, identifier ID.
- llvm::DenseMap<ContextTableKey,
+ /// Indexed by the context ID, identifier ID.
+ llvm::DenseMap<SingleDeclTableKey,
llvm::SmallVector<std::pair<VersionTuple, TagInfo>, 1>>
Tags;
/// Information about typedefs.
///
- /// Indexed by the context ID, contextKind, identifier ID.
- llvm::DenseMap<ContextTableKey,
+ /// Indexed by the context ID, identifier ID.
+ llvm::DenseMap<SingleDeclTableKey,
llvm::SmallVector<std::pair<VersionTuple, TypedefInfo>, 1>>
Typedefs;
@@ -128,6 +135,7 @@ class APINotesWriter::Implementation {
SelectorID getSelector(ObjCSelectorRef SelectorRef) {
// Translate the selector reference into a stored selector.
StoredObjCSelector Selector;
+ Selector.NumArgs = SelectorRef.NumArgs;
Selector.Identifiers.reserve(SelectorRef.Identifiers.size());
for (auto piece : SelectorRef.Identifiers)
Selector.Identifiers.push_back(getIdentifier(piece));
@@ -146,9 +154,10 @@ private:
void writeBlockInfoBlock(llvm::BitstreamWriter &Stream);
void writeControlBlock(llvm::BitstreamWriter &Stream);
void writeIdentifierBlock(llvm::BitstreamWriter &Stream);
- void writeObjCContextBlock(llvm::BitstreamWriter &Stream);
+ void writeContextBlock(llvm::BitstreamWriter &Stream);
void writeObjCPropertyBlock(llvm::BitstreamWriter &Stream);
void writeObjCMethodBlock(llvm::BitstreamWriter &Stream);
+ void writeCXXMethodBlock(llvm::BitstreamWriter &Stream);
void writeObjCSelectorBlock(llvm::BitstreamWriter &Stream);
void writeGlobalVariableBlock(llvm::BitstreamWriter &Stream);
void writeGlobalFunctionBlock(llvm::BitstreamWriter &Stream);
@@ -177,9 +186,10 @@ void APINotesWriter::Implementation::writeToStream(llvm::raw_ostream &OS) {
writeBlockInfoBlock(Stream);
writeControlBlock(Stream);
writeIdentifierBlock(Stream);
- writeObjCContextBlock(Stream);
+ writeContextBlock(Stream);
writeObjCPropertyBlock(Stream);
writeObjCMethodBlock(Stream);
+ writeCXXMethodBlock(Stream);
writeObjCSelectorBlock(Stream);
writeGlobalVariableBlock(Stream);
writeGlobalFunctionBlock(Stream);
@@ -239,7 +249,7 @@ void APINotesWriter::Implementation::writeBlockInfoBlock(
BLOCK_RECORD(identifier_block, IDENTIFIER_DATA);
BLOCK(OBJC_CONTEXT_BLOCK);
- BLOCK_RECORD(objc_context_block, OBJC_CONTEXT_ID_DATA);
+ BLOCK_RECORD(context_block, CONTEXT_ID_DATA);
BLOCK(OBJC_PROPERTY_BLOCK);
BLOCK_RECORD(objc_property_block, OBJC_PROPERTY_DATA);
@@ -336,7 +346,7 @@ void APINotesWriter::Implementation::writeIdentifierBlock(
namespace {
/// Used to serialize the on-disk Objective-C context table.
-class ObjCContextIDTableInfo {
+class ContextIDTableInfo {
public:
using key_type = ContextTableKey;
using key_type_ref = key_type;
@@ -440,7 +450,7 @@ void emitVersionedInfo(
std::sort(VI.begin(), VI.end(),
[](const std::pair<VersionTuple, T> &LHS,
const std::pair<VersionTuple, T> &RHS) -> bool {
- assert(LHS.first != RHS.first &&
+ assert((&LHS == &RHS || LHS.first != RHS.first) &&
"two entries for the same version");
return LHS.first < RHS.first;
});
@@ -551,9 +561,8 @@ void emitCommonTypeInfo(raw_ostream &OS, const CommonTypeInfo &CTI) {
}
/// Used to serialize the on-disk Objective-C property table.
-class ObjCContextInfoTableInfo
- : public VersionedTableInfo<ObjCContextInfoTableInfo, unsigned,
- ObjCContextInfo> {
+class ContextInfoTableInfo
+ : public VersionedTableInfo<ContextInfoTableInfo, unsigned, ContextInfo> {
public:
unsigned getKeyLength(key_type_ref) { return sizeof(uint32_t); }
@@ -566,11 +575,11 @@ public:
return static_cast<size_t>(llvm::hash_value(Key));
}
- unsigned getUnversionedInfoSize(const ObjCContextInfo &OCI) {
+ unsigned getUnversionedInfoSize(const ContextInfo &OCI) {
return getCommonTypeInfoSize(OCI) + 1;
}
- void emitUnversionedInfo(raw_ostream &OS, const ObjCContextInfo &OCI) {
+ void emitUnversionedInfo(raw_ostream &OS, const ContextInfo &OCI) {
emitCommonTypeInfo(OS, OCI);
uint8_t payload = 0;
@@ -589,19 +598,19 @@ public:
};
} // namespace
-void APINotesWriter::Implementation::writeObjCContextBlock(
+void APINotesWriter::Implementation::writeContextBlock(
llvm::BitstreamWriter &Stream) {
llvm::BCBlockRAII restoreBlock(Stream, OBJC_CONTEXT_BLOCK_ID, 3);
- if (ObjCContexts.empty())
+ if (Contexts.empty())
return;
{
llvm::SmallString<4096> HashTableBlob;
uint32_t Offset;
{
- llvm::OnDiskChainedHashTableGenerator<ObjCContextIDTableInfo> Generator;
- for (auto &OC : ObjCContexts)
+ llvm::OnDiskChainedHashTableGenerator<ContextIDTableInfo> Generator;
+ for (auto &OC : Contexts)
Generator.insert(OC.first, OC.second.first);
llvm::raw_svector_ostream BlobStream(HashTableBlob);
@@ -611,16 +620,16 @@ void APINotesWriter::Implementation::writeObjCContextBlock(
Offset = Generator.Emit(BlobStream);
}
- objc_context_block::ObjCContextIDLayout ObjCContextID(Stream);
- ObjCContextID.emit(Scratch, Offset, HashTableBlob);
+ context_block::ContextIDLayout ContextID(Stream);
+ ContextID.emit(Scratch, Offset, HashTableBlob);
}
{
llvm::SmallString<4096> HashTableBlob;
uint32_t Offset;
{
- llvm::OnDiskChainedHashTableGenerator<ObjCContextInfoTableInfo> Generator;
- for (auto &OC : ObjCContexts)
+ llvm::OnDiskChainedHashTableGenerator<ContextInfoTableInfo> Generator;
+ for (auto &OC : Contexts)
Generator.insert(OC.second.first, OC.second.second);
llvm::raw_svector_ostream BlobStream(HashTableBlob);
@@ -630,8 +639,8 @@ void APINotesWriter::Implementation::writeObjCContextBlock(
Offset = Generator.Emit(BlobStream);
}
- objc_context_block::ObjCContextInfoLayout ObjCContextInfo(Stream);
- ObjCContextInfo.emit(Scratch, Offset, HashTableBlob);
+ context_block::ContextInfoLayout ContextInfo(Stream);
+ ContextInfo.emit(Scratch, Offset, HashTableBlob);
}
}
@@ -765,6 +774,34 @@ public:
emitFunctionInfo(OS, OMI);
}
};
+
+/// Used to serialize the on-disk C++ method table.
+class CXXMethodTableInfo
+ : public VersionedTableInfo<CXXMethodTableInfo, SingleDeclTableKey,
+ CXXMethodInfo> {
+public:
+ unsigned getKeyLength(key_type_ref) {
+ return sizeof(uint32_t) + sizeof(uint32_t);
+ }
+
+ void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(Key.parentContextID);
+ writer.write<uint32_t>(Key.nameID);
+ }
+
+ hash_value_type ComputeHash(key_type_ref key) {
+ return static_cast<size_t>(key.hashValue());
+ }
+
+ unsigned getUnversionedInfoSize(const CXXMethodInfo &OMI) {
+ return getFunctionInfoSize(OMI);
+ }
+
+ void emitUnversionedInfo(raw_ostream &OS, const CXXMethodInfo &OMI) {
+ emitFunctionInfo(OS, OMI);
+ }
+};
} // namespace
void APINotesWriter::Implementation::writeObjCMethodBlock(
@@ -794,6 +831,33 @@ void APINotesWriter::Implementation::writeObjCMethodBlock(
}
}
+void APINotesWriter::Implementation::writeCXXMethodBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII Scope(Stream, CXX_METHOD_BLOCK_ID, 3);
+
+ if (CXXMethods.empty())
+ return;
+
+ {
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<CXXMethodTableInfo> Generator;
+ for (auto &MD : CXXMethods)
+ Generator.insert(MD.first, MD.second);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ cxx_method_block::CXXMethodDataLayout CXXMethodData(Stream);
+ CXXMethodData.emit(Scratch, Offset, HashTableBlob);
+ }
+}
+
namespace {
/// Used to serialize the on-disk Objective-C selector table.
class ObjCSelectorTableInfo {
@@ -865,18 +929,17 @@ void APINotesWriter::Implementation::writeObjCSelectorBlock(
namespace {
/// Used to serialize the on-disk global variable table.
class GlobalVariableTableInfo
- : public VersionedTableInfo<GlobalVariableTableInfo, ContextTableKey,
+ : public VersionedTableInfo<GlobalVariableTableInfo, SingleDeclTableKey,
GlobalVariableInfo> {
public:
unsigned getKeyLength(key_type_ref) {
- return sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t);
+ return sizeof(uint32_t) + sizeof(uint32_t);
}
void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
llvm::support::endian::Writer writer(OS, llvm::endianness::little);
writer.write<uint32_t>(Key.parentContextID);
- writer.write<uint8_t>(Key.contextKind);
- writer.write<uint32_t>(Key.contextID);
+ writer.write<uint32_t>(Key.nameID);
}
hash_value_type ComputeHash(key_type_ref Key) {
@@ -979,18 +1042,17 @@ void emitFunctionInfo(raw_ostream &OS, const FunctionInfo &FI) {
/// Used to serialize the on-disk global function table.
class GlobalFunctionTableInfo
- : public VersionedTableInfo<GlobalFunctionTableInfo, ContextTableKey,
+ : public VersionedTableInfo<GlobalFunctionTableInfo, SingleDeclTableKey,
GlobalFunctionInfo> {
public:
unsigned getKeyLength(key_type_ref) {
- return sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t);
+ return sizeof(uint32_t) + sizeof(uint32_t);
}
void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
llvm::support::endian::Writer writer(OS, llvm::endianness::little);
writer.write<uint32_t>(Key.parentContextID);
- writer.write<uint8_t>(Key.contextKind);
- writer.write<uint32_t>(Key.contextID);
+ writer.write<uint32_t>(Key.nameID);
}
hash_value_type ComputeHash(key_type_ref Key) {
@@ -1091,20 +1153,20 @@ void APINotesWriter::Implementation::writeEnumConstantBlock(
namespace {
template <typename Derived, typename UnversionedDataType>
class CommonTypeTableInfo
- : public VersionedTableInfo<Derived, ContextTableKey, UnversionedDataType> {
+ : public VersionedTableInfo<Derived, SingleDeclTableKey,
+ UnversionedDataType> {
public:
using key_type_ref = typename CommonTypeTableInfo::key_type_ref;
using hash_value_type = typename CommonTypeTableInfo::hash_value_type;
unsigned getKeyLength(key_type_ref) {
- return sizeof(uint32_t) + sizeof(uint8_t) + sizeof(IdentifierID);
+ return sizeof(uint32_t) + sizeof(IdentifierID);
}
void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
llvm::support::endian::Writer writer(OS, llvm::endianness::little);
writer.write<uint32_t>(Key.parentContextID);
- writer.write<uint8_t>(Key.contextKind);
- writer.write<IdentifierID>(Key.contextID);
+ writer.write<IdentifierID>(Key.nameID);
}
hash_value_type ComputeHash(key_type_ref Key) {
@@ -1127,7 +1189,7 @@ public:
return 2 + (TI.SwiftImportAs ? TI.SwiftImportAs->size() : 0) +
2 + (TI.SwiftRetainOp ? TI.SwiftRetainOp->size() : 0) +
2 + (TI.SwiftReleaseOp ? TI.SwiftReleaseOp->size() : 0) +
- 1 + getCommonTypeInfoSize(TI);
+ 2 + getCommonTypeInfoSize(TI);
}
void emitUnversionedInfo(raw_ostream &OS, const TagInfo &TI) {
@@ -1145,6 +1207,11 @@ public:
writer.write<uint8_t>(Flags);
+ if (auto Copyable = TI.isSwiftCopyable())
+ writer.write<uint8_t>(*Copyable ? kSwiftCopyable : kSwiftNonCopyable);
+ else
+ writer.write<uint8_t>(0);
+
if (auto ImportAs = TI.SwiftImportAs) {
writer.write<uint16_t>(ImportAs->size() + 1);
OS.write(ImportAs->c_str(), ImportAs->size());
@@ -1257,25 +1324,25 @@ void APINotesWriter::writeToStream(llvm::raw_ostream &OS) {
Implementation->writeToStream(OS);
}
-ContextID APINotesWriter::addObjCContext(std::optional<ContextID> ParentCtxID,
- StringRef Name, ContextKind Kind,
- const ObjCContextInfo &Info,
- VersionTuple SwiftVersion) {
+ContextID APINotesWriter::addContext(std::optional<ContextID> ParentCtxID,
+ llvm::StringRef Name, ContextKind Kind,
+ const ContextInfo &Info,
+ llvm::VersionTuple SwiftVersion) {
IdentifierID NameID = Implementation->getIdentifier(Name);
uint32_t RawParentCtxID = ParentCtxID ? ParentCtxID->Value : -1;
ContextTableKey Key(RawParentCtxID, static_cast<uint8_t>(Kind), NameID);
- auto Known = Implementation->ObjCContexts.find(Key);
- if (Known == Implementation->ObjCContexts.end()) {
- unsigned NextID = Implementation->ObjCContexts.size() + 1;
+ auto Known = Implementation->Contexts.find(Key);
+ if (Known == Implementation->Contexts.end()) {
+ unsigned NextID = Implementation->Contexts.size() + 1;
- Implementation::VersionedSmallVector<ObjCContextInfo> EmptyVersionedInfo;
- Known = Implementation->ObjCContexts
+ Implementation::VersionedSmallVector<ContextInfo> EmptyVersionedInfo;
+ Known = Implementation->Contexts
.insert(std::make_pair(
Key, std::make_pair(NextID, EmptyVersionedInfo)))
.first;
- Implementation->ObjCContextNames[NextID] = NameID;
+ Implementation->ContextNames[NextID] = NameID;
Implementation->ParentContexts[NextID] = RawParentCtxID;
}
@@ -1322,9 +1389,9 @@ void APINotesWriter::addObjCMethod(ContextID CtxID, ObjCSelectorRef Selector,
uint32_t ParentCtxID = Implementation->ParentContexts[CtxID.Value];
ContextTableKey CtxKey(ParentCtxID,
static_cast<uint8_t>(ContextKind::ObjCClass),
- Implementation->ObjCContextNames[CtxID.Value]);
- assert(Implementation->ObjCContexts.contains(CtxKey));
- auto &VersionedVec = Implementation->ObjCContexts[CtxKey].second;
+ Implementation->ContextNames[CtxID.Value]);
+ assert(Implementation->Contexts.contains(CtxKey));
+ auto &VersionedVec = Implementation->Contexts[CtxKey].second;
bool Found = false;
for (auto &Versioned : VersionedVec) {
if (Versioned.first == SwiftVersion) {
@@ -1335,18 +1402,26 @@ void APINotesWriter::addObjCMethod(ContextID CtxID, ObjCSelectorRef Selector,
}
if (!Found) {
- VersionedVec.push_back({SwiftVersion, ObjCContextInfo()});
+ VersionedVec.push_back({SwiftVersion, ContextInfo()});
VersionedVec.back().second.setHasDesignatedInits(true);
}
}
}
+void APINotesWriter::addCXXMethod(ContextID CtxID, llvm::StringRef Name,
+ const CXXMethodInfo &Info,
+ VersionTuple SwiftVersion) {
+ IdentifierID NameID = Implementation->getIdentifier(Name);
+ SingleDeclTableKey Key(CtxID.Value, NameID);
+ Implementation->CXXMethods[Key].push_back({SwiftVersion, Info});
+}
+
void APINotesWriter::addGlobalVariable(std::optional<Context> Ctx,
llvm::StringRef Name,
const GlobalVariableInfo &Info,
VersionTuple SwiftVersion) {
IdentifierID VariableID = Implementation->getIdentifier(Name);
- ContextTableKey Key(Ctx, VariableID);
+ SingleDeclTableKey Key(Ctx, VariableID);
Implementation->GlobalVariables[Key].push_back({SwiftVersion, Info});
}
@@ -1355,7 +1430,7 @@ void APINotesWriter::addGlobalFunction(std::optional<Context> Ctx,
const GlobalFunctionInfo &Info,
VersionTuple SwiftVersion) {
IdentifierID NameID = Implementation->getIdentifier(Name);
- ContextTableKey Key(Ctx, NameID);
+ SingleDeclTableKey Key(Ctx, NameID);
Implementation->GlobalFunctions[Key].push_back({SwiftVersion, Info});
}
@@ -1369,7 +1444,7 @@ void APINotesWriter::addEnumConstant(llvm::StringRef Name,
void APINotesWriter::addTag(std::optional<Context> Ctx, llvm::StringRef Name,
const TagInfo &Info, VersionTuple SwiftVersion) {
IdentifierID TagID = Implementation->getIdentifier(Name);
- ContextTableKey Key(Ctx, TagID);
+ SingleDeclTableKey Key(Ctx, TagID);
Implementation->Tags[Key].push_back({SwiftVersion, Info});
}
@@ -1377,7 +1452,7 @@ void APINotesWriter::addTypedef(std::optional<Context> Ctx,
llvm::StringRef Name, const TypedefInfo &Info,
VersionTuple SwiftVersion) {
IdentifierID TypedefID = Implementation->getIdentifier(Name);
- ContextTableKey Key(Ctx, TypedefID);
+ SingleDeclTableKey Key(Ctx, TypedefID);
Implementation->Typedefs[Key].push_back({SwiftVersion, Info});
}
} // namespace api_notes
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesYAMLCompiler.cpp b/contrib/llvm-project/clang/lib/APINotes/APINotesYAMLCompiler.cpp
index 57d6da7a1775..060e1fdaf2fd 100644
--- a/contrib/llvm-project/clang/lib/APINotes/APINotesYAMLCompiler.cpp
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesYAMLCompiler.cpp
@@ -419,6 +419,8 @@ struct Tag {
std::optional<EnumExtensibilityKind> EnumExtensibility;
std::optional<bool> FlagEnum;
std::optional<EnumConvenienceAliasKind> EnumConvenienceKind;
+ std::optional<bool> SwiftCopyable;
+ FunctionsSeq Methods;
};
typedef std::vector<Tag> TagsSeq;
@@ -452,6 +454,8 @@ template <> struct MappingTraits<Tag> {
IO.mapOptional("EnumExtensibility", T.EnumExtensibility);
IO.mapOptional("FlagEnum", T.FlagEnum);
IO.mapOptional("EnumKind", T.EnumConvenienceKind);
+ IO.mapOptional("SwiftCopyable", T.SwiftCopyable);
+ IO.mapOptional("Methods", T.Methods);
}
};
} // namespace yaml
@@ -784,7 +788,7 @@ public:
void convertContext(std::optional<ContextID> ParentContextID, const Class &C,
ContextKind Kind, VersionTuple SwiftVersion) {
// Write the class.
- ObjCContextInfo CI;
+ ContextInfo CI;
convertCommonType(C, CI, C.Name);
if (C.AuditedForNullability)
@@ -795,7 +799,7 @@ public:
CI.setSwiftObjCMembers(*C.SwiftObjCMembers);
ContextID CtxID =
- Writer.addObjCContext(ParentContextID, C.Name, Kind, CI, SwiftVersion);
+ Writer.addContext(ParentContextID, C.Name, Kind, CI, SwiftVersion);
// Write all methods.
llvm::StringMap<std::pair<bool, bool>> KnownMethods;
@@ -861,17 +865,107 @@ public:
const Namespace &TheNamespace,
VersionTuple SwiftVersion) {
// Write the namespace.
- ObjCContextInfo CI;
+ ContextInfo CI;
convertCommonEntity(TheNamespace, CI, TheNamespace.Name);
ContextID CtxID =
- Writer.addObjCContext(ParentContextID, TheNamespace.Name,
- ContextKind::Namespace, CI, SwiftVersion);
+ Writer.addContext(ParentContextID, TheNamespace.Name,
+ ContextKind::Namespace, CI, SwiftVersion);
convertTopLevelItems(Context(CtxID, ContextKind::Namespace),
TheNamespace.Items, SwiftVersion);
}
+ void convertFunction(const Function &Function, FunctionInfo &FI) {
+ convertAvailability(Function.Availability, FI, Function.Name);
+ FI.setSwiftPrivate(Function.SwiftPrivate);
+ FI.SwiftName = std::string(Function.SwiftName);
+ convertParams(Function.Params, FI);
+ convertNullability(Function.Nullability, Function.NullabilityOfRet, FI,
+ Function.Name);
+ FI.ResultType = std::string(Function.ResultType);
+ FI.setRetainCountConvention(Function.RetainCountConvention);
+ }
+
+ void convertTagContext(std::optional<Context> ParentContext, const Tag &T,
+ VersionTuple SwiftVersion) {
+ TagInfo TI;
+ std::optional<ContextID> ParentContextID =
+ ParentContext ? std::optional<ContextID>(ParentContext->id)
+ : std::nullopt;
+ convertCommonType(T, TI, T.Name);
+
+ if ((T.SwiftRetainOp || T.SwiftReleaseOp) && !T.SwiftImportAs) {
+ emitError(llvm::Twine("should declare SwiftImportAs to use "
+ "SwiftRetainOp and SwiftReleaseOp (for ") +
+ T.Name + ")");
+ return;
+ }
+ if (T.SwiftReleaseOp.has_value() != T.SwiftRetainOp.has_value()) {
+ emitError(llvm::Twine("should declare both SwiftReleaseOp and "
+ "SwiftRetainOp (for ") +
+ T.Name + ")");
+ return;
+ }
+
+ if (T.SwiftImportAs)
+ TI.SwiftImportAs = T.SwiftImportAs;
+ if (T.SwiftRetainOp)
+ TI.SwiftRetainOp = T.SwiftRetainOp;
+ if (T.SwiftReleaseOp)
+ TI.SwiftReleaseOp = T.SwiftReleaseOp;
+
+ if (T.SwiftCopyable)
+ TI.setSwiftCopyable(T.SwiftCopyable);
+
+ if (T.EnumConvenienceKind) {
+ if (T.EnumExtensibility) {
+ emitError(
+ llvm::Twine("cannot mix EnumKind and EnumExtensibility (for ") +
+ T.Name + ")");
+ return;
+ }
+ if (T.FlagEnum) {
+ emitError(llvm::Twine("cannot mix EnumKind and FlagEnum (for ") +
+ T.Name + ")");
+ return;
+ }
+ switch (*T.EnumConvenienceKind) {
+ case EnumConvenienceAliasKind::None:
+ TI.EnumExtensibility = EnumExtensibilityKind::None;
+ TI.setFlagEnum(false);
+ break;
+ case EnumConvenienceAliasKind::CFEnum:
+ TI.EnumExtensibility = EnumExtensibilityKind::Open;
+ TI.setFlagEnum(false);
+ break;
+ case EnumConvenienceAliasKind::CFOptions:
+ TI.EnumExtensibility = EnumExtensibilityKind::Open;
+ TI.setFlagEnum(true);
+ break;
+ case EnumConvenienceAliasKind::CFClosedEnum:
+ TI.EnumExtensibility = EnumExtensibilityKind::Closed;
+ TI.setFlagEnum(false);
+ break;
+ }
+ } else {
+ TI.EnumExtensibility = T.EnumExtensibility;
+ TI.setFlagEnum(T.FlagEnum);
+ }
+
+ Writer.addTag(ParentContext, T.Name, TI, SwiftVersion);
+
+ ContextInfo CI;
+ auto TagCtxID = Writer.addContext(ParentContextID, T.Name, ContextKind::Tag,
+ CI, SwiftVersion);
+
+ for (const auto &CXXMethod : T.Methods) {
+ CXXMethodInfo MI;
+ convertFunction(CXXMethod, MI);
+ Writer.addCXXMethod(TagCtxID, CXXMethod.Name, MI, SwiftVersion);
+ }
+ }
+
void convertTopLevelItems(std::optional<Context> Ctx,
const TopLevelItems &TLItems,
VersionTuple SwiftVersion) {
@@ -948,14 +1042,7 @@ public:
}
GlobalFunctionInfo GFI;
- convertAvailability(Function.Availability, GFI, Function.Name);
- GFI.setSwiftPrivate(Function.SwiftPrivate);
- GFI.SwiftName = std::string(Function.SwiftName);
- convertParams(Function.Params, GFI);
- convertNullability(Function.Nullability, Function.NullabilityOfRet, GFI,
- Function.Name);
- GFI.ResultType = std::string(Function.ResultType);
- GFI.setRetainCountConvention(Function.RetainCountConvention);
+ convertFunction(Function, GFI);
Writer.addGlobalFunction(Ctx, Function.Name, GFI, SwiftVersion);
}
@@ -986,65 +1073,7 @@ public:
continue;
}
- TagInfo TI;
- convertCommonType(Tag, TI, Tag.Name);
-
- if ((Tag.SwiftRetainOp || Tag.SwiftReleaseOp) && !Tag.SwiftImportAs) {
- emitError(llvm::Twine("should declare SwiftImportAs to use "
- "SwiftRetainOp and SwiftReleaseOp (for ") +
- Tag.Name + ")");
- continue;
- }
- if (Tag.SwiftReleaseOp.has_value() != Tag.SwiftRetainOp.has_value()) {
- emitError(llvm::Twine("should declare both SwiftReleaseOp and "
- "SwiftRetainOp (for ") +
- Tag.Name + ")");
- continue;
- }
-
- if (Tag.SwiftImportAs)
- TI.SwiftImportAs = Tag.SwiftImportAs;
- if (Tag.SwiftRetainOp)
- TI.SwiftRetainOp = Tag.SwiftRetainOp;
- if (Tag.SwiftReleaseOp)
- TI.SwiftReleaseOp = Tag.SwiftReleaseOp;
-
- if (Tag.EnumConvenienceKind) {
- if (Tag.EnumExtensibility) {
- emitError(
- llvm::Twine("cannot mix EnumKind and EnumExtensibility (for ") +
- Tag.Name + ")");
- continue;
- }
- if (Tag.FlagEnum) {
- emitError(llvm::Twine("cannot mix EnumKind and FlagEnum (for ") +
- Tag.Name + ")");
- continue;
- }
- switch (*Tag.EnumConvenienceKind) {
- case EnumConvenienceAliasKind::None:
- TI.EnumExtensibility = EnumExtensibilityKind::None;
- TI.setFlagEnum(false);
- break;
- case EnumConvenienceAliasKind::CFEnum:
- TI.EnumExtensibility = EnumExtensibilityKind::Open;
- TI.setFlagEnum(false);
- break;
- case EnumConvenienceAliasKind::CFOptions:
- TI.EnumExtensibility = EnumExtensibilityKind::Open;
- TI.setFlagEnum(true);
- break;
- case EnumConvenienceAliasKind::CFClosedEnum:
- TI.EnumExtensibility = EnumExtensibilityKind::Closed;
- TI.setFlagEnum(false);
- break;
- }
- } else {
- TI.EnumExtensibility = Tag.EnumExtensibility;
- TI.setFlagEnum(Tag.FlagEnum);
- }
-
- Writer.addTag(Ctx, Tag.Name, TI, SwiftVersion);
+ convertTagContext(Ctx, Tag, SwiftVersion);
}
// Write all typedefs.
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
index b410d5f3b42a..5835559bff6b 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
@@ -606,8 +606,7 @@ bool MigrationProcess::applyTransform(TransformFn trans,
llvm::raw_svector_ostream vecOS(newText);
buf.write(vecOS);
std::unique_ptr<llvm::MemoryBuffer> memBuf(
- llvm::MemoryBuffer::getMemBufferCopy(
- StringRef(newText.data(), newText.size()), newFname));
+ llvm::MemoryBuffer::getMemBufferCopy(newText.str(), newFname));
SmallString<64> filePath(file->getName());
Unit->getFileManager().FixupRelativePath(filePath);
Remapper.remap(filePath.str(), std::move(memBuf));
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
index 0786c81516b2..4357c8e3f09a 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -484,7 +484,7 @@ static void rewriteToObjCProperty(const ObjCMethodDecl *Getter,
// Short circuit 'delegate' properties that contain the name "delegate" or
// "dataSource", or have exact name "target" to have 'assign' attribute.
- if (PropertyName.equals("target") || PropertyName.contains("delegate") ||
+ if (PropertyName == "target" || PropertyName.contains("delegate") ||
PropertyName.contains("dataSource")) {
QualType QT = Getter->getReturnType();
if (!QT->isRealType())
@@ -1144,7 +1144,7 @@ static bool IsValidIdentifier(ASTContext &Ctx,
return false;
std::string NameString = Name;
NameString[0] = toLowercase(NameString[0]);
- IdentifierInfo *II = &Ctx.Idents.get(NameString);
+ const IdentifierInfo *II = &Ctx.Idents.get(NameString);
return II->getTokenID() == tok::identifier;
}
@@ -1166,7 +1166,7 @@ bool ObjCMigrateASTConsumer::migrateProperty(ASTContext &Ctx,
if (OIT_Family != OIT_None)
return false;
- IdentifierInfo *getterName = GetterSelector.getIdentifierInfoForSlot(0);
+ const IdentifierInfo *getterName = GetterSelector.getIdentifierInfoForSlot(0);
Selector SetterSelector =
SelectorTable::constructSetterSelector(PP.getIdentifierTable(),
PP.getSelectorTable(),
@@ -1311,7 +1311,8 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
std::string StringLoweredClassName = LoweredClassName.lower();
LoweredClassName = StringLoweredClassName;
- IdentifierInfo *MethodIdName = OM->getSelector().getIdentifierInfoForSlot(0);
+ const IdentifierInfo *MethodIdName =
+ OM->getSelector().getIdentifierInfoForSlot(0);
// Handle method with no name at its first selector slot; e.g. + (id):(int)x.
if (!MethodIdName)
return;
@@ -1962,8 +1963,7 @@ void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
llvm::raw_svector_ostream vecOS(newText);
buf.write(vecOS);
std::unique_ptr<llvm::MemoryBuffer> memBuf(
- llvm::MemoryBuffer::getMemBufferCopy(
- StringRef(newText.data(), newText.size()), file->getName()));
+ llvm::MemoryBuffer::getMemBufferCopy(newText.str(), file->getName()));
SmallString<64> filePath(file->getName());
FileMgr.FixupRelativePath(filePath);
Remapper.remap(filePath.str(), std::move(memBuf));
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransAPIUses.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransAPIUses.cpp
index 638850dcf9ec..8f5d4f4bde06 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransAPIUses.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransAPIUses.cpp
@@ -41,7 +41,7 @@ public:
getReturnValueSel = sels.getUnarySelector(&ids.get("getReturnValue"));
setReturnValueSel = sels.getUnarySelector(&ids.get("setReturnValue"));
- IdentifierInfo *selIds[2];
+ const IdentifierInfo *selIds[2];
selIds[0] = &ids.get("getArgument");
selIds[1] = &ids.get("atIndex");
getArgumentSel = sels.getSelector(2, selIds);
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp
index 28d1db7f4376..85e3fe77660b 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp
@@ -1,4 +1,4 @@
-//===--- TransGCAttrs.cpp - Transformations to ARC mode --------------------===//
+//===--- TransGCAttrs.cpp - Transformations to ARC mode -------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
index 1e6354f71e29..7390ea17c8a4 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -371,7 +371,7 @@ private:
Stmt *parent = E;
do {
parent = StmtMap->getParentIgnoreParenImpCasts(parent);
- } while (parent && isa<FullExpr>(parent));
+ } while (isa_and_nonnull<FullExpr>(parent));
if (ReturnStmt *retS = dyn_cast_or_null<ReturnStmt>(parent)) {
std::string note = "remove the cast and change return type of function "
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
index 2808e35135dc..fda0e1c932fc 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
@@ -17,6 +17,7 @@
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaObjC.h"
using namespace clang;
using namespace arcmt;
@@ -26,8 +27,8 @@ ASTTraverser::~ASTTraverser() { }
bool MigrationPass::CFBridgingFunctionsDefined() {
if (!EnableCFBridgeFns)
- EnableCFBridgeFns = SemaRef.isKnownName("CFBridgingRetain") &&
- SemaRef.isKnownName("CFBridgingRelease");
+ EnableCFBridgeFns = SemaRef.ObjC().isKnownName("CFBridgingRetain") &&
+ SemaRef.ObjC().isKnownName("CFBridgingRelease");
return *EnableCFBridgeFns;
}
diff --git a/contrib/llvm-project/clang/lib/AST/APValue.cpp b/contrib/llvm-project/clang/lib/AST/APValue.cpp
index 4eae308ef5b3..d8e33ff421c0 100644
--- a/contrib/llvm-project/clang/lib/AST/APValue.cpp
+++ b/contrib/llvm-project/clang/lib/AST/APValue.cpp
@@ -90,7 +90,7 @@ QualType APValue::LValueBase::getType() const {
// For a materialized temporary, the type of the temporary we materialized
// may not be the type of the expression.
if (const MaterializeTemporaryExpr *MTE =
- clang::dyn_cast<MaterializeTemporaryExpr>(Base)) {
+ llvm::dyn_cast<MaterializeTemporaryExpr>(Base)) {
SmallVector<const Expr *, 2> CommaLHSs;
SmallVector<SubobjectAdjustment, 2> Adjustments;
const Expr *Temp = MTE->getSubExpr();
@@ -704,6 +704,9 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
return;
}
+ if (const auto *AT = Ty->getAs<AtomicType>())
+ Ty = AT->getValueType();
+
switch (getKind()) {
case APValue::None:
Out << "<out of lifetime>";
@@ -905,7 +908,8 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
for (const auto *FI : RD->fields()) {
if (!First)
Out << ", ";
- if (FI->isUnnamedBitfield()) continue;
+ if (FI->isUnnamedBitField())
+ continue;
getStructField(FI->getFieldIndex()).
printPretty(Out, Policy, FI->getType(), Ctx);
First = false;
diff --git a/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp b/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp
index b3ec99448b3e..d8efbe44dbec 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp
@@ -15,30 +15,25 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/PrettyPrinter.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
-namespace {
-void CreatUnsatisfiedConstraintRecord(
- const ASTContext &C, const UnsatisfiedConstraintRecord &Detail,
- UnsatisfiedConstraintRecord *TrailingObject) {
- if (Detail.second.is<Expr *>())
- new (TrailingObject) UnsatisfiedConstraintRecord{
- Detail.first,
- UnsatisfiedConstraintRecord::second_type(Detail.second.get<Expr *>())};
+static void
+CreateUnsatisfiedConstraintRecord(const ASTContext &C,
+ const UnsatisfiedConstraintRecord &Detail,
+ UnsatisfiedConstraintRecord *TrailingObject) {
+ if (Detail.is<Expr *>())
+ new (TrailingObject) UnsatisfiedConstraintRecord(Detail.get<Expr *>());
else {
auto &SubstitutionDiagnostic =
- *Detail.second.get<std::pair<SourceLocation, StringRef> *>();
- unsigned MessageSize = SubstitutionDiagnostic.second.size();
- char *Mem = new (C) char[MessageSize];
- memcpy(Mem, SubstitutionDiagnostic.second.data(), MessageSize);
+ *Detail.get<std::pair<SourceLocation, StringRef> *>();
+ StringRef Message = C.backupStr(SubstitutionDiagnostic.second);
auto *NewSubstDiag = new (C) std::pair<SourceLocation, StringRef>(
- SubstitutionDiagnostic.first, StringRef(Mem, MessageSize));
- new (TrailingObject) UnsatisfiedConstraintRecord{
- Detail.first, UnsatisfiedConstraintRecord::second_type(NewSubstDiag)};
+ SubstitutionDiagnostic.first, Message);
+ new (TrailingObject) UnsatisfiedConstraintRecord(NewSubstDiag);
}
}
-} // namespace
ASTConstraintSatisfaction::ASTConstraintSatisfaction(
const ASTContext &C, const ConstraintSatisfaction &Satisfaction)
@@ -46,7 +41,7 @@ ASTConstraintSatisfaction::ASTConstraintSatisfaction(
IsSatisfied{Satisfaction.IsSatisfied}, ContainsErrors{
Satisfaction.ContainsErrors} {
for (unsigned I = 0; I < NumRecords; ++I)
- CreatUnsatisfiedConstraintRecord(
+ CreateUnsatisfiedConstraintRecord(
C, Satisfaction.Details[I],
getTrailingObjects<UnsatisfiedConstraintRecord>() + I);
}
@@ -57,7 +52,7 @@ ASTConstraintSatisfaction::ASTConstraintSatisfaction(
IsSatisfied{Satisfaction.IsSatisfied},
ContainsErrors{Satisfaction.ContainsErrors} {
for (unsigned I = 0; I < NumRecords; ++I)
- CreatUnsatisfiedConstraintRecord(
+ CreateUnsatisfiedConstraintRecord(
C, *(Satisfaction.begin() + I),
getTrailingObjects<UnsatisfiedConstraintRecord>() + I);
}
@@ -106,9 +101,12 @@ void ConceptReference::print(llvm::raw_ostream &OS,
ConceptName.printName(OS, Policy);
if (hasExplicitTemplateArgs()) {
OS << "<";
+ llvm::ListSeparator Sep(", ");
// FIXME: Find corresponding parameter for argument
- for (auto &ArgLoc : ArgsAsWritten->arguments())
+ for (auto &ArgLoc : ArgsAsWritten->arguments()) {
+ OS << Sep;
ArgLoc.getArgument().print(Policy, OS, /*IncludeType*/ false);
+ }
OS << ">";
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
index cc5de9a6295e..1064507f3461 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
@@ -41,6 +41,7 @@
#include "clang/AST/RawCommentList.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -85,7 +86,9 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/SipHash.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
@@ -798,7 +801,7 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create(
*this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(),
- TTP->getPosition(), TTP->isParameterPack(), nullptr,
+ TTP->getPosition(), TTP->isParameterPack(), nullptr, /*Typename=*/false,
TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(),
CanonParams, SourceLocation(),
/*RequiresClause=*/nullptr));
@@ -878,7 +881,8 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
TemplateSpecializationTypes(this_()),
DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
- CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
+ ArrayParameterTypes(this_()), CanonTemplateTemplateParms(this_()),
+ SourceMgr(SM), LangOpts(LOpts),
NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
LangOpts.XRayNeverInstrumentFiles,
@@ -1081,7 +1085,8 @@ void ASTContext::addModuleInitializer(Module *M, Decl *D) {
Inits->Initializers.push_back(D);
}
-void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) {
+void ASTContext::addLazyModuleInitializers(Module *M,
+ ArrayRef<GlobalDeclID> IDs) {
auto *&Inits = ModuleInitializers[M];
if (!Inits)
Inits = new (*this) PerModuleInitializers;
@@ -1106,6 +1111,31 @@ void ASTContext::setCurrentNamedModule(Module *M) {
CurrentCXXNamedModule = M;
}
+bool ASTContext::isInSameModule(const Module *M1, const Module *M2) {
+ if (!M1 != !M2)
+ return false;
+
+ /// Get the representative module for M. The representative module is the
+ /// first module unit for a specific primary module name. So that the module
+ /// units have the same representative module belongs to the same module.
+ ///
+ /// The process is helpful to reduce the expensive string operations.
+ auto GetRepresentativeModule = [this](const Module *M) {
+ auto Iter = SameModuleLookupSet.find(M);
+ if (Iter != SameModuleLookupSet.end())
+ return Iter->second;
+
+ const Module *RepresentativeModule =
+ PrimaryModuleNameMap.try_emplace(M->getPrimaryModuleInterfaceName(), M)
+ .first->second;
+ SameModuleLookupSet[M] = RepresentativeModule;
+ return RepresentativeModule;
+ };
+
+ assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none.");
+ return GetRepresentativeModule(M1) == GetRepresentativeModule(M2);
+}
+
ExternCContextDecl *ASTContext::getExternCContextDecl() const {
if (!ExternCContext)
ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
@@ -1304,6 +1334,9 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
// Placeholder type for bound members.
InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
+ // Placeholder type for unresolved templates.
+ InitBuiltinType(UnresolvedTemplateTy, BuiltinType::UnresolvedTemplate);
+
// Placeholder type for pseudo-objects.
InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
@@ -1318,16 +1351,14 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
// Placeholder type for OMP array sections.
if (LangOpts.OpenMP) {
- InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
+ InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection);
InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
}
- // Placeholder type for OpenACC array sections.
- if (LangOpts.OpenACC) {
- // FIXME: Once we implement OpenACC array sections in Sema, this will either
- // be combined with the OpenMP type, or given its own type. In the meantime,
- // just use the OpenMP type so that parsing can work.
- InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
+ // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode,
+ // don't bother, as we're just using the same type as OMP.
+ if (LangOpts.OpenACC && !LangOpts.OpenMP) {
+ InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection);
}
if (LangOpts.MatrixTypes)
InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
@@ -1353,7 +1384,8 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
#include "clang/Basic/OpenCLExtensionTypes.def"
}
- if (Target.hasAArch64SVETypes()) {
+ if (Target.hasAArch64SVETypes() ||
+ (AuxTarget && AuxTarget->hasAArch64SVETypes())) {
#define SVE_TYPE(Name, Id, SingletonId) \
InitBuiltinType(SingletonId, BuiltinType::Id);
#include "clang/Basic/AArch64SVEACLETypes.def"
@@ -1380,6 +1412,13 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
#include "clang/Basic/WebAssemblyReferenceTypes.def"
}
+ if (Target.getTriple().isAMDGPU() ||
+ (AuxTarget && AuxTarget->getTriple().isAMDGPU())) {
+#define AMDGPU_TYPE(Name, Id, SingletonId) \
+ InitBuiltinType(SingletonId, BuiltinType::Id);
+#include "clang/Basic/AMDGPUTypes.def"
+ }
+
// Builtin type for __objc_yes and __objc_no
ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
SignedCharTy : BoolTy);
@@ -1611,15 +1650,7 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
case BuiltinType::Float16:
return Target->getHalfFormat();
case BuiltinType::Half:
- // For HLSL, when the native half type is disabled, half will be treat as
- // float.
- if (getLangOpts().HLSL)
- if (getLangOpts().NativeHalfType)
- return Target->getHalfFormat();
- else
- return Target->getFloatFormat();
- else
- return Target->getHalfFormat();
+ return Target->getHalfFormat();
case BuiltinType::Float: return Target->getFloatFormat();
case BuiltinType::Double: return Target->getDoubleFormat();
case BuiltinType::Ibm128:
@@ -1692,7 +1723,7 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
if (VD->hasGlobalStorage() && !ForAlignof) {
uint64_t TypeSize =
!BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0;
- Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize));
+ Align = std::max(Align, getMinGlobalAlignOfVar(TypeSize, VD));
}
// Fields can be subject to extra alignment constraints, like if
@@ -1749,7 +1780,8 @@ TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
// of a base-class subobject. We decide whether that's possible
// during class layout, so here we can just trust the layout results.
if (getLangOpts().CPlusPlus) {
- if (const auto *RT = T->getAs<RecordType>()) {
+ if (const auto *RT = T->getAs<RecordType>();
+ RT && !RT->getDecl()->isInvalidDecl()) {
const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
Info.Width = layout.getDataSize();
}
@@ -1764,7 +1796,7 @@ TypeInfoChars
static getConstantArrayInfoInChars(const ASTContext &Context,
const ConstantArrayType *CAT) {
TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType());
- uint64_t Size = CAT->getSize().getZExtValue();
+ uint64_t Size = CAT->getZExtSize();
assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
(uint64_t)(-1)/Size) &&
"Overflow in array type char size evaluation");
@@ -1904,11 +1936,12 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::IncompleteArray:
case Type::VariableArray:
- case Type::ConstantArray: {
+ case Type::ConstantArray:
+ case Type::ArrayParameter: {
// Model non-constant sized arrays as size zero, but track the alignment.
uint64_t Size = 0;
if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
- Size = CAT->getSize().getZExtValue();
+ Size = CAT->getZExtSize();
TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
@@ -2202,6 +2235,13 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = 8; \
break;
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_OPAQUE_PTR_TYPE(NAME, MANGLEDNAME, AS, WIDTH, ALIGN, ID, \
+ SINGLETONID) \
+ case BuiltinType::ID: \
+ Width = WIDTH; \
+ Align = ALIGN; \
+ break;
+#include "clang/Basic/AMDGPUTypes.def"
}
break;
case Type::ObjCObjectPointer:
@@ -2260,9 +2300,8 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
}
case Type::BitInt: {
const auto *EIT = cast<BitIntType>(T);
- Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(EIT->getNumBits()),
- getCharWidth(), Target->getLongLongAlign());
- Width = llvm::alignTo(EIT->getNumBits(), Align);
+ Align = Target->getBitIntAlign(EIT->getNumBits());
+ Width = Target->getBitIntWidth(EIT->getNumBits());
break;
}
case Type::Record:
@@ -2346,6 +2385,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
return getTypeInfo(
cast<AttributedType>(T)->getEquivalentType().getTypePtr());
+ case Type::CountAttributed:
+ return getTypeInfo(cast<CountAttributedType>(T)->desugar().getTypePtr());
+
case Type::BTFTagAttributed:
return getTypeInfo(
cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr());
@@ -2515,16 +2557,25 @@ unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
/// getAlignOfGlobalVar - Return the alignment in bits that should be given
/// to a global variable of the specified type.
-unsigned ASTContext::getAlignOfGlobalVar(QualType T) const {
+unsigned ASTContext::getAlignOfGlobalVar(QualType T, const VarDecl *VD) const {
uint64_t TypeSize = getTypeSize(T.getTypePtr());
return std::max(getPreferredTypeAlign(T),
- getTargetInfo().getMinGlobalAlign(TypeSize));
+ getMinGlobalAlignOfVar(TypeSize, VD));
}
/// getAlignOfGlobalVarInChars - Return the alignment in characters that
/// should be given to a global variable of the specified type.
-CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const {
- return toCharUnitsFromBits(getAlignOfGlobalVar(T));
+CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T,
+ const VarDecl *VD) const {
+ return toCharUnitsFromBits(getAlignOfGlobalVar(T, VD));
+}
+
+unsigned ASTContext::getMinGlobalAlignOfVar(uint64_t Size,
+ const VarDecl *VD) const {
+ // Make the default handling as that of a non-weak definition in the
+ // current translation unit.
+ bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak());
+ return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef);
}
CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
@@ -2668,7 +2719,7 @@ getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
if (Field->isBitField()) {
// If we have explicit padding bits, they don't contribute bits
// to the actual object representation, so return 0.
- if (Field->isUnnamedBitfield())
+ if (Field->isUnnamedBitField())
return 0;
int64_t BitfieldSize = Field->getBitWidthValue(Context);
@@ -2781,6 +2832,10 @@ bool ASTContext::hasUniqueObjectRepresentations(
return hasUniqueObjectRepresentations(getBaseElementType(Ty),
CheckIfTriviallyCopyable);
+ assert((Ty->isVoidType() || !Ty->isIncompleteType()) &&
+ "hasUniqueObjectRepresentations should not be called with an "
+ "incomplete type");
+
// (9.1) - T is trivially copyable...
if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this))
return false;
@@ -3045,21 +3100,27 @@ QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
if (!T.hasAddressSpace())
return T;
- // If we are composing extended qualifiers together, merge together
- // into one ExtQuals node.
QualifierCollector Quals;
const Type *TypeNode;
+ // For arrays, strip the qualifier off the element type, then reconstruct the
+ // array type
+ if (T.getTypePtr()->isArrayType()) {
+ T = getUnqualifiedArrayType(T, Quals);
+ TypeNode = T.getTypePtr();
+ } else {
+ // If we are composing extended qualifiers together, merge together
+ // into one ExtQuals node.
+ while (T.hasAddressSpace()) {
+ TypeNode = Quals.strip(T);
+
+ // If the type no longer has an address space after stripping qualifiers,
+ // jump out.
+ if (!QualType(TypeNode, 0).hasAddressSpace())
+ break;
- while (T.hasAddressSpace()) {
- TypeNode = Quals.strip(T);
-
- // If the type no longer has an address space after stripping qualifiers,
- // jump out.
- if (!QualType(TypeNode, 0).hasAddressSpace())
- break;
-
- // There might be sugar in the way. Strip it and try again.
- T = T.getSingleStepDesugaredType(*this);
+ // There might be sugar in the way. Strip it and try again.
+ T = T.getSingleStepDesugaredType(*this);
+ }
}
Quals.removeAddressSpace();
@@ -3073,6 +3134,300 @@ QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
return QualType(TypeNode, Quals.getFastQualifiers());
}
+uint16_t
+ASTContext::getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD) {
+ assert(RD->isPolymorphic() &&
+ "Attempted to get vtable pointer discriminator on a monomorphic type");
+ std::unique_ptr<MangleContext> MC(createMangleContext());
+ SmallString<256> Str;
+ llvm::raw_svector_ostream Out(Str);
+ MC->mangleCXXVTable(RD, Out);
+ return llvm::getPointerAuthStableSipHash(Str);
+}
+
+/// Encode a function type for use in the discriminator of a function pointer
+/// type. We can't use the itanium scheme for this since C has quite permissive
+/// rules for type compatibility that we need to be compatible with.
+///
+/// Formally, this function associates every function pointer type T with an
+/// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as
+/// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type
+/// compatibility requires equivalent treatment under the ABI, so
+/// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be
+/// a subset of ~. Crucially, however, it must be a proper subset because
+/// CCompatible is not an equivalence relation: for example, int[] is compatible
+/// with both int[1] and int[2], but the latter are not compatible with each
+/// other. Therefore this encoding function must be careful to only distinguish
+/// types if there is no third type with which they are both required to be
+/// compatible.
+static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx,
+ raw_ostream &OS, QualType QT) {
+ // FIXME: Consider address space qualifiers.
+ const Type *T = QT.getCanonicalType().getTypePtr();
+
+ // FIXME: Consider using the C++ type mangling when we encounter a construct
+ // that is incompatible with C.
+
+ switch (T->getTypeClass()) {
+ case Type::Atomic:
+ return encodeTypeForFunctionPointerAuth(
+ Ctx, OS, cast<AtomicType>(T)->getValueType());
+
+ case Type::LValueReference:
+ OS << "R";
+ encodeTypeForFunctionPointerAuth(Ctx, OS,
+ cast<ReferenceType>(T)->getPointeeType());
+ return;
+ case Type::RValueReference:
+ OS << "O";
+ encodeTypeForFunctionPointerAuth(Ctx, OS,
+ cast<ReferenceType>(T)->getPointeeType());
+ return;
+
+ case Type::Pointer:
+ // C11 6.7.6.1p2:
+ // For two pointer types to be compatible, both shall be identically
+ // qualified and both shall be pointers to compatible types.
+ // FIXME: we should also consider pointee types.
+ OS << "P";
+ return;
+
+ case Type::ObjCObjectPointer:
+ case Type::BlockPointer:
+ OS << "P";
+ return;
+
+ case Type::Complex:
+ OS << "C";
+ return encodeTypeForFunctionPointerAuth(
+ Ctx, OS, cast<ComplexType>(T)->getElementType());
+
+ case Type::VariableArray:
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::ArrayParameter:
+ // C11 6.7.6.2p6:
+ // For two array types to be compatible, both shall have compatible
+ // element types, and if both size specifiers are present, and are integer
+ // constant expressions, then both size specifiers shall have the same
+ // constant value [...]
+ //
+ // So since ElemType[N] has to be compatible ElemType[], we can't encode the
+ // width of the array.
+ OS << "A";
+ return encodeTypeForFunctionPointerAuth(
+ Ctx, OS, cast<ArrayType>(T)->getElementType());
+
+ case Type::ObjCInterface:
+ case Type::ObjCObject:
+ OS << "<objc_object>";
+ return;
+
+ case Type::Enum: {
+ // C11 6.7.2.2p4:
+ // Each enumerated type shall be compatible with char, a signed integer
+ // type, or an unsigned integer type.
+ //
+ // So we have to treat enum types as integers.
+ QualType UnderlyingType = cast<EnumType>(T)->getDecl()->getIntegerType();
+ return encodeTypeForFunctionPointerAuth(
+ Ctx, OS, UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType);
+ }
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto: {
+ // C11 6.7.6.3p15:
+ // For two function types to be compatible, both shall specify compatible
+ // return types. Moreover, the parameter type lists, if both are present,
+ // shall agree in the number of parameters and in the use of the ellipsis
+ // terminator; corresponding parameters shall have compatible types.
+ //
+ // That paragraph goes on to describe how unprototyped functions are to be
+ // handled, which we ignore here. Unprototyped function pointers are hashed
+ // as though they were prototyped nullary functions since thats probably
+ // what the user meant. This behavior is non-conforming.
+ // FIXME: If we add a "custom discriminator" function type attribute we
+ // should encode functions as their discriminators.
+ OS << "F";
+ const auto *FuncType = cast<FunctionType>(T);
+ encodeTypeForFunctionPointerAuth(Ctx, OS, FuncType->getReturnType());
+ if (const auto *FPT = dyn_cast<FunctionProtoType>(FuncType)) {
+ for (QualType Param : FPT->param_types()) {
+ Param = Ctx.getSignatureParameterType(Param);
+ encodeTypeForFunctionPointerAuth(Ctx, OS, Param);
+ }
+ if (FPT->isVariadic())
+ OS << "z";
+ }
+ OS << "E";
+ return;
+ }
+
+ case Type::MemberPointer: {
+ OS << "M";
+ const auto *MPT = T->getAs<MemberPointerType>();
+ encodeTypeForFunctionPointerAuth(Ctx, OS, QualType(MPT->getClass(), 0));
+ encodeTypeForFunctionPointerAuth(Ctx, OS, MPT->getPointeeType());
+ return;
+ }
+ case Type::ExtVector:
+ case Type::Vector:
+ OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity();
+ break;
+
+ // Don't bother discriminating based on these types.
+ case Type::Pipe:
+ case Type::BitInt:
+ case Type::ConstantMatrix:
+ OS << "?";
+ return;
+
+ case Type::Builtin: {
+ const auto *BTy = T->getAs<BuiltinType>();
+ switch (BTy->getKind()) {
+#define SIGNED_TYPE(Id, SingletonId) \
+ case BuiltinType::Id: \
+ OS << "i"; \
+ return;
+#define UNSIGNED_TYPE(Id, SingletonId) \
+ case BuiltinType::Id: \
+ OS << "i"; \
+ return;
+#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
+#define BUILTIN_TYPE(Id, SingletonId)
+#include "clang/AST/BuiltinTypes.def"
+ llvm_unreachable("placeholder types should not appear here.");
+
+ case BuiltinType::Half:
+ OS << "Dh";
+ return;
+ case BuiltinType::Float:
+ OS << "f";
+ return;
+ case BuiltinType::Double:
+ OS << "d";
+ return;
+ case BuiltinType::LongDouble:
+ OS << "e";
+ return;
+ case BuiltinType::Float16:
+ OS << "DF16_";
+ return;
+ case BuiltinType::Float128:
+ OS << "g";
+ return;
+
+ case BuiltinType::Void:
+ OS << "v";
+ return;
+
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ case BuiltinType::NullPtr:
+ OS << "P";
+ return;
+
+ // Don't bother discriminating based on OpenCL types.
+ case BuiltinType::OCLSampler:
+ case BuiltinType::OCLEvent:
+ case BuiltinType::OCLClkEvent:
+ case BuiltinType::OCLQueue:
+ case BuiltinType::OCLReserveID:
+ case BuiltinType::BFloat16:
+ case BuiltinType::VectorQuad:
+ case BuiltinType::VectorPair:
+ OS << "?";
+ return;
+
+ // Don't bother discriminating based on these seldom-used types.
+ case BuiltinType::Ibm128:
+ return;
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ case BuiltinType::Id: \
+ return;
+#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ return;
+#include "clang/Basic/OpenCLExtensionTypes.def"
+#define SVE_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ return;
+#include "clang/Basic/AArch64SVEACLETypes.def"
+ case BuiltinType::Dependent:
+ llvm_unreachable("should never get here");
+ case BuiltinType::AMDGPUBufferRsrc:
+ case BuiltinType::WasmExternRef:
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
+ llvm_unreachable("not yet implemented");
+ }
+ }
+ case Type::Record: {
+ const RecordDecl *RD = T->getAs<RecordType>()->getDecl();
+ const IdentifierInfo *II = RD->getIdentifier();
+
+ // In C++, an immediate typedef of an anonymous struct or union
+ // is considered to name it for ODR purposes, but C's specification
+ // of type compatibility does not have a similar rule. Using the typedef
+ // name in function type discriminators anyway, as we do here,
+ // therefore technically violates the C standard: two function pointer
+ // types defined in terms of two typedef'd anonymous structs with
+ // different names are formally still compatible, but we are assigning
+ // them different discriminators and therefore incompatible ABIs.
+ //
+ // This is a relatively minor violation that significantly improves
+ // discrimination in some cases and has not caused problems in
+ // practice. Regardless, it is now part of the ABI in places where
+ // function type discrimination is used, and it can no longer be
+ // changed except on new platforms.
+
+ if (!II)
+ if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl())
+ II = Typedef->getDeclName().getAsIdentifierInfo();
+
+ if (!II) {
+ OS << "<anonymous_record>";
+ return;
+ }
+ OS << II->getLength() << II->getName();
+ return;
+ }
+ case Type::DeducedTemplateSpecialization:
+ case Type::Auto:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define ABSTRACT_TYPE(Class, Base)
+#define TYPE(Class, Base)
+#include "clang/AST/TypeNodes.inc"
+ llvm_unreachable("unexpected non-canonical or dependent type!");
+ return;
+ }
+}
+
+uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) {
+ assert(!T->isDependentType() &&
+ "cannot compute type discriminator of a dependent type");
+
+ SmallString<256> Str;
+ llvm::raw_svector_ostream Out(Str);
+
+ if (T->isFunctionPointerType() || T->isFunctionReferenceType())
+ T = T->getPointeeType();
+
+ if (T->isFunctionType()) {
+ encodeTypeForFunctionPointerAuth(*this, Out, T);
+ } else {
+ T = T.getUnqualifiedType();
+ std::unique_ptr<MangleContext> MC(createMangleContext());
+ MC->mangleCanonicalTypeName(T, Out);
+ }
+
+ return llvm::getPointerAuthStableSipHash(Str);
+}
+
QualType ASTContext::getObjCGCQualType(QualType T,
Qualifiers::GC GCAttr) const {
QualType CanT = getCanonicalType(T);
@@ -3111,6 +3466,32 @@ QualType ASTContext::removePtrSizeAddrSpace(QualType T) const {
return T;
}
+QualType ASTContext::getCountAttributedType(
+ QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull,
+ ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const {
+ assert(WrappedTy->isPointerType() || WrappedTy->isArrayType());
+
+ llvm::FoldingSetNodeID ID;
+ CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, OrNull);
+
+ void *InsertPos = nullptr;
+ CountAttributedType *CATy =
+ CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (CATy)
+ return QualType(CATy, 0);
+
+ QualType CanonTy = getCanonicalType(WrappedTy);
+ size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>(
+ DependentDecls.size());
+ CATy = (CountAttributedType *)Allocate(Size, TypeAlignment);
+ new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes,
+ OrNull, DependentDecls);
+ Types.push_back(CATy);
+ CountAttributedTypes.InsertNode(CATy, InsertPos);
+
+ return QualType(CATy, 0);
+}
+
const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
FunctionType::ExtInfo Info) {
if (T->getExtInfo() == Info)
@@ -3356,6 +3737,37 @@ QualType ASTContext::getDecayedType(QualType T) const {
return getDecayedType(T, Decayed);
}
+QualType ASTContext::getArrayParameterType(QualType Ty) const {
+ if (Ty->isArrayParameterType())
+ return Ty;
+ assert(Ty->isConstantArrayType() && "Ty must be an array type.");
+ const auto *ATy = cast<ConstantArrayType>(Ty);
+ llvm::FoldingSetNodeID ID;
+ ATy->Profile(ID, *this, ATy->getElementType(), ATy->getZExtSize(),
+ ATy->getSizeExpr(), ATy->getSizeModifier(),
+ ATy->getIndexTypeQualifiers().getAsOpaqueValue());
+ void *InsertPos = nullptr;
+ ArrayParameterType *AT =
+ ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (AT)
+ return QualType(AT, 0);
+
+ QualType Canonical;
+ if (!Ty.isCanonical()) {
+ Canonical = getArrayParameterType(getCanonicalType(Ty));
+
+ // Get the new insert position for the node we care about.
+ AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!AT && "Shouldn't be in the map!");
+ }
+
+ AT = new (*this, alignof(ArrayParameterType))
+ ArrayParameterType(ATy, Canonical);
+ Types.push_back(AT);
+ ArrayParameterTypes.InsertNode(AT, InsertPos);
+ return QualType(AT, 0);
+}
+
/// getBlockPointerType - Return the uniqued reference to the type for
/// a pointer to the specified block.
QualType ASTContext::getBlockPointerType(QualType T) const {
@@ -3520,8 +3932,8 @@ QualType ASTContext::getConstantArrayType(QualType EltTy,
ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
llvm::FoldingSetNodeID ID;
- ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM,
- IndexTypeQuals);
+ ConstantArrayType::Profile(ID, *this, EltTy, ArySize.getZExtValue(), SizeExpr,
+ ASM, IndexTypeQuals);
void *InsertPos = nullptr;
if (ConstantArrayType *ATP =
@@ -3545,11 +3957,8 @@ QualType ASTContext::getConstantArrayType(QualType EltTy,
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- void *Mem = Allocate(
- ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0),
- alignof(ConstantArrayType));
- auto *New = new (Mem)
- ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals);
+ auto *New = ConstantArrayType::Create(*this, EltTy, Canon, ArySize, SizeExpr,
+ ASM, IndexTypeQuals);
ConstantArrayTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
@@ -3602,8 +4011,10 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::Auto:
case Type::DeducedTemplateSpecialization:
case Type::PackExpansion:
+ case Type::PackIndexing:
case Type::BitInt:
case Type::DependentBitInt:
+ case Type::ArrayParameter:
llvm_unreachable("type should never be variably-modified");
// These types can be variably-modified but should never need to
@@ -3731,33 +4142,33 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType,
numElements->isValueDependent()) &&
"Size must be type- or value-dependent!");
+ SplitQualType canonElementType = getCanonicalType(elementType).split();
+
+ void *insertPos = nullptr;
+ llvm::FoldingSetNodeID ID;
+ DependentSizedArrayType::Profile(
+ ID, *this, numElements ? QualType(canonElementType.Ty, 0) : elementType,
+ ASM, elementTypeQuals, numElements);
+
+ // Look for an existing type with these properties.
+ DependentSizedArrayType *canonTy =
+ DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
+
// Dependently-sized array types that do not have a specified number
// of elements will have their sizes deduced from a dependent
- // initializer. We do no canonicalization here at all, which is okay
- // because they can't be used in most locations.
+ // initializer.
if (!numElements) {
+ if (canonTy)
+ return QualType(canonTy, 0);
+
auto *newType = new (*this, alignof(DependentSizedArrayType))
DependentSizedArrayType(elementType, QualType(), numElements, ASM,
elementTypeQuals, brackets);
+ DependentSizedArrayTypes.InsertNode(newType, insertPos);
Types.push_back(newType);
return QualType(newType, 0);
}
- // Otherwise, we actually build a new type every time, but we
- // also build a canonical type.
-
- SplitQualType canonElementType = getCanonicalType(elementType).split();
-
- void *insertPos = nullptr;
- llvm::FoldingSetNodeID ID;
- DependentSizedArrayType::Profile(ID, *this,
- QualType(canonElementType.Ty, 0),
- ASM, elementTypeQuals, numElements);
-
- // Look for an existing type with these properties.
- DependentSizedArrayType *canonTy =
- DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
-
// If we don't have one, build one.
if (!canonTy) {
canonTy = new (*this, alignof(DependentSizedArrayType))
@@ -4490,12 +4901,14 @@ QualType ASTContext::getFunctionTypeInternal(
size_t Size = FunctionProtoType::totalSizeToAlloc<
QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
- Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers>(
+ Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers,
+ FunctionEffect, EffectConditionExpr>(
NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(),
EPI.requiresFunctionProtoTypeArmAttributes(), ESH.NumExceptionType,
ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
EPI.ExtParameterInfos ? NumArgs : 0,
- EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
+ EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0, EPI.FunctionEffects.size(),
+ EPI.FunctionEffects.conditions().size());
auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType));
FunctionProtoType::ExtProtoInfo newEPI = EPI;
@@ -4503,6 +4916,8 @@ QualType ASTContext::getFunctionTypeInternal(
Types.push_back(FTP);
if (!Unique)
FunctionProtoTypes.InsertNode(FTP, InsertPos);
+ if (!EPI.FunctionEffects.empty())
+ AnyFunctionEffects = true;
return QualType(FTP, 0);
}
@@ -4935,9 +5350,6 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
QualType Underlying) const {
assert(!Template.getAsDependentTemplateName() &&
"No dependent template names here!");
- // Look through qualified template names.
- if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
- Template = QTN->getUnderlyingTemplate();
const auto *TD = Template.getAsTemplateDecl();
bool IsTypeAlias = TD && TD->isTypeAlias();
@@ -4973,10 +5385,6 @@ QualType ASTContext::getCanonicalTemplateSpecializationType(
assert(!Template.getAsDependentTemplateName() &&
"No dependent template names here!");
- // Look through qualified template names.
- if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
- Template = TemplateName(QTN->getUnderlyingTemplate());
-
// Build the canonical template specialization type.
TemplateName CanonTemplate = getCanonicalTemplateName(Template);
bool AnyNonCanonArgs = false;
@@ -5191,10 +5599,12 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
Arg = TemplateArgument(E);
} else {
auto *TTP = cast<TemplateTemplateParmDecl>(Param);
+ TemplateName Name = getQualifiedTemplateName(
+ nullptr, /*TemplateKeyword=*/false, TemplateName(TTP));
if (TTP->isParameterPack())
- Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>());
+ Arg = TemplateArgument(Name, std::optional<unsigned>());
else
- Arg = TemplateArgument(TemplateName(TTP));
+ Arg = TemplateArgument(Name);
}
if (Param->isTemplateParameterPack())
@@ -5617,19 +6027,19 @@ QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const {
if (Canon) {
// We already have a "canonical" version of an identical, dependent
// typeof(expr) type. Use that as our canonical type.
- toe = new (*this, alignof(TypeOfExprType))
- TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0));
+ toe = new (*this, alignof(TypeOfExprType)) TypeOfExprType(
+ *this, tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0));
} else {
// Build a new, canonical typeof(expr) type.
Canon = new (*this, alignof(DependentTypeOfExprType))
- DependentTypeOfExprType(tofExpr, Kind);
+ DependentTypeOfExprType(*this, tofExpr, Kind);
DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
toe = Canon;
}
} else {
QualType Canonical = getCanonicalType(tofExpr->getType());
toe = new (*this, alignof(TypeOfExprType))
- TypeOfExprType(tofExpr, Kind, Canonical);
+ TypeOfExprType(*this, tofExpr, Kind, Canonical);
}
Types.push_back(toe);
return QualType(toe, 0);
@@ -5642,8 +6052,8 @@ QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const {
/// on canonical types (which are always unique).
QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const {
QualType Canonical = getCanonicalType(tofType);
- auto *tot =
- new (*this, alignof(TypeOfType)) TypeOfType(tofType, Canonical, Kind);
+ auto *tot = new (*this, alignof(TypeOfType))
+ TypeOfType(*this, tofType, Canonical, Kind);
Types.push_back(tot);
return QualType(tot, 0);
}
@@ -5705,6 +6115,39 @@ QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const {
return QualType(dt, 0);
}
+QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr,
+ bool FullySubstituted,
+ ArrayRef<QualType> Expansions,
+ int Index) const {
+ QualType Canonical;
+ if (FullySubstituted && Index != -1) {
+ Canonical = getCanonicalType(Expansions[Index]);
+ } else {
+ llvm::FoldingSetNodeID ID;
+ PackIndexingType::Profile(ID, *this, Pattern, IndexExpr);
+ void *InsertPos = nullptr;
+ PackIndexingType *Canon =
+ DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (!Canon) {
+ void *Mem = Allocate(
+ PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()),
+ TypeAlignment);
+ Canon = new (Mem)
+ PackIndexingType(*this, QualType(), Pattern, IndexExpr, Expansions);
+ DependentPackIndexingTypes.InsertNode(Canon, InsertPos);
+ }
+ Canonical = QualType(Canon, 0);
+ }
+
+ void *Mem =
+ Allocate(PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()),
+ TypeAlignment);
+ auto *T = new (Mem)
+ PackIndexingType(*this, Canonical, Pattern, IndexExpr, Expansions);
+ Types.push_back(T);
+ return QualType(T, 0);
+}
+
/// getUnaryTransformationType - We don't unique these, since the memory
/// savings are minimal and these are rare.
QualType ASTContext::getUnaryTransformType(QualType BaseType,
@@ -5812,7 +6255,8 @@ QualType ASTContext::getUnconstrainedType(QualType T) const {
if (auto *AT = CanonT->getAs<AutoType>()) {
if (!AT->isConstrained())
return T;
- return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), false,
+ return getQualifiedType(getAutoType(QualType(), AT->getKeyword(),
+ AT->isDependentType(),
AT->containsUnexpandedParameterPack()),
T.getQualifiers());
}
@@ -5980,7 +6424,9 @@ CanQualType ASTContext::getCanonicalParamType(QualType T) const {
T = getVariableArrayDecayedType(T);
const Type *Ty = T.getTypePtr();
QualType Result;
- if (isa<ArrayType>(Ty)) {
+ if (getLangOpts().HLSL && isa<ConstantArrayType>(Ty)) {
+ Result = getArrayParameterType(QualType(Ty, 0));
+ } else if (isa<ArrayType>(Ty)) {
Result = getArrayDecayedType(QualType(Ty,0));
} else if (isa<FunctionType>(Ty)) {
Result = getPointerType(QualType(Ty, 0));
@@ -5992,7 +6438,7 @@ CanQualType ASTContext::getCanonicalParamType(QualType T) const {
}
QualType ASTContext::getUnqualifiedArrayType(QualType type,
- Qualifiers &quals) {
+ Qualifiers &quals) const {
SplitQualType splitType = type.getSplitUnqualifiedType();
// FIXME: getSplitUnqualifiedType() actually walks all the way to
@@ -6387,7 +6833,8 @@ bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
return false;
- return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument());
+ return hasSameType(TTPX->getDefaultArgument().getArgument().getAsType(),
+ TTPY->getDefaultArgument().getArgument().getAsType());
}
if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
@@ -6395,8 +6842,10 @@ bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument())
return false;
- Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts();
- Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts();
+ Expr *DefaultArgumentX =
+ NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
+ Expr *DefaultArgumentY =
+ NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
llvm::FoldingSetNodeID XID, YID;
DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true);
DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true);
@@ -6689,7 +7138,7 @@ bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
// Using shadow declarations with the same target match.
if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) {
const auto *USY = cast<UsingShadowDecl>(Y);
- return USX->getTargetDecl() == USY->getTargetDecl();
+ return declaresSameEntity(USX->getTargetDecl(), USY->getTargetDecl());
}
// Using declarations with the same qualifier match. (We already know that
@@ -6799,14 +7248,14 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
// A namespace is canonical; build a nested-name-specifier with
// this namespace and no prefix.
return NestedNameSpecifier::Create(*this, nullptr,
- NNS->getAsNamespace()->getOriginalNamespace());
+ NNS->getAsNamespace()->getFirstDecl());
case NestedNameSpecifier::NamespaceAlias:
// A namespace is canonical; build a nested-name-specifier with
// this namespace and no prefix.
- return NestedNameSpecifier::Create(*this, nullptr,
- NNS->getAsNamespaceAlias()->getNamespace()
- ->getOriginalNamespace());
+ return NestedNameSpecifier::Create(
+ *this, nullptr,
+ NNS->getAsNamespaceAlias()->getNamespace()->getFirstDecl());
// The difference between TypeSpec and TypeSpecWithTemplate is that the
// latter will have the 'template' keyword when printed.
@@ -6822,16 +7271,13 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
// typedef typename T::type T1;
// typedef typename T1::type T2;
if (const auto *DNT = T->getAs<DependentNameType>())
- return NestedNameSpecifier::Create(
- *this, DNT->getQualifier(),
- const_cast<IdentifierInfo *>(DNT->getIdentifier()));
+ return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
+ DNT->getIdentifier());
if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>())
- return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true,
- const_cast<Type *>(T));
+ return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, T);
// TODO: Set 'Template' parameter to true for other template types.
- return NestedNameSpecifier::Create(*this, nullptr, false,
- const_cast<Type *>(T));
+ return NestedNameSpecifier::Create(*this, nullptr, false, T);
}
case NestedNameSpecifier::Global:
@@ -6902,6 +7348,8 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) const {
}
QualType ASTContext::getAdjustedParameterType(QualType T) const {
+ if (getLangOpts().HLSL && T->isConstantArrayType())
+ return getArrayParameterType(T);
if (T->isArrayType() || T->isFunctionType())
return getDecayedType(T);
return T;
@@ -6977,7 +7425,7 @@ uint64_t
ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
uint64_t ElementCount = 1;
do {
- ElementCount *= CA->getSize().getZExtValue();
+ ElementCount *= CA->getZExtSize();
CA = dyn_cast_or_null<ConstantArrayType>(
CA->getElementType()->getAsArrayTypeUnsafe());
} while (CA);
@@ -7135,6 +7583,14 @@ QualType ASTContext::isPromotableBitField(Expr *E) const {
// We perform that promotion here to match GCC and C++.
// FIXME: C does not permit promotion of an enum bit-field whose rank is
// greater than that of 'int'. We perform that promotion to match GCC.
+ //
+ // C23 6.3.1.1p2:
+ // The value from a bit-field of a bit-precise integer type is converted to
+ // the corresponding bit-precise integer type. (The rest is the same as in
+ // C11.)
+ if (QualType QT = Field->getType(); QT->isBitIntType())
+ return QT;
+
if (BitWidth < IntSize)
return IntTy;
@@ -8043,6 +8499,8 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C,
#include "clang/Basic/RISCVVTypes.def"
#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
{
DiagnosticsEngine &Diags = C->getDiagnostics();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
@@ -8300,7 +8758,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
S += '[';
if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
- S += llvm::utostr(CAT->getSize().getZExtValue());
+ S += llvm::utostr(CAT->getZExtSize());
else {
//Variable length arrays are encoded as a regular array with 0 elements.
assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
@@ -8512,6 +8970,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
case Type::DeducedTemplateSpecialization:
return;
+ case Type::ArrayParameter:
case Type::Pipe:
#define ABSTRACT_TYPE(KIND, BASE)
#define TYPE(KIND, BASE)
@@ -9186,7 +9645,8 @@ TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
bool TemplateKeyword,
TemplateName Template) const {
- assert(NNS && "Missing nested-name-specifier in qualified template name");
+ assert(Template.getKind() == TemplateName::Template ||
+ Template.getKind() == TemplateName::UsingTemplate);
// FIXME: Canonicalization?
llvm::FoldingSetNodeID ID;
@@ -9440,11 +9900,6 @@ static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) {
bool ASTContext::areCompatibleSveTypes(QualType FirstType,
QualType SecondType) {
- assert(
- ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) ||
- (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) &&
- "Expected SVE builtin type and vector type!");
-
auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
if (const auto *BT = FirstType->getAs<BuiltinType>()) {
if (const auto *VT = SecondType->getAs<VectorType>()) {
@@ -9470,11 +9925,6 @@ bool ASTContext::areCompatibleSveTypes(QualType FirstType,
bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
QualType SecondType) {
- assert(
- ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) ||
- (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) &&
- "Expected SVE builtin type and vector type!");
-
auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
const auto *BT = FirstType->getAs<BuiltinType>();
if (!BT)
@@ -9529,11 +9979,11 @@ static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) {
ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty);
- unsigned EltSize = Context.getTypeSize(Info.ElementType);
+ uint64_t EltSize = Context.getTypeSize(Info.ElementType);
if (Info.ElementType == Context.BoolTy)
EltSize = 1;
- unsigned MinElts = Info.EC.getKnownMinValue();
+ uint64_t MinElts = Info.EC.getKnownMinValue();
return VScale->first * MinElts * EltSize;
}
@@ -10391,6 +10841,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn);
+ std::optional<FunctionEffectSet> MergedFX;
+
if (lproto && rproto) { // two C99 style function prototypes
assert((AllowCXX ||
(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&
@@ -10406,6 +10858,25 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
if (lproto->getMethodQuals() != rproto->getMethodQuals())
return {};
+ // Function effects are handled similarly to noreturn, see above.
+ FunctionEffectsRef LHSFX = lproto->getFunctionEffects();
+ FunctionEffectsRef RHSFX = rproto->getFunctionEffects();
+ if (LHSFX != RHSFX) {
+ if (IsConditionalOperator)
+ MergedFX = FunctionEffectSet::getIntersection(LHSFX, RHSFX);
+ else {
+ FunctionEffectSet::Conflicts Errs;
+ MergedFX = FunctionEffectSet::getUnion(LHSFX, RHSFX, Errs);
+ // Here we're discarding a possible error due to conflicts in the effect
+ // sets. But we're not in a context where we can report it. The
+ // operation does however guarantee maintenance of invariants.
+ }
+ if (*MergedFX != LHSFX)
+ allLTypes = false;
+ if (*MergedFX != RHSFX)
+ allRTypes = false;
+ }
+
SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos;
bool canUseLeft, canUseRight;
if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight,
@@ -10449,6 +10920,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
EPI.ExtInfo = einfo;
EPI.ExtParameterInfos =
newParamInfos.empty() ? nullptr : newParamInfos.data();
+ if (MergedFX)
+ EPI.FunctionEffects = *MergedFX;
return getFunctionType(retType, types, EPI);
}
@@ -10486,6 +10959,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
EPI.ExtInfo = einfo;
+ if (MergedFX)
+ EPI.FunctionEffects = *MergedFX;
return getFunctionType(retType, proto->getParamTypes(), EPI);
}
@@ -10734,7 +11209,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
{
const ConstantArrayType* LCAT = getAsConstantArrayType(LHS);
const ConstantArrayType* RCAT = getAsConstantArrayType(RHS);
- if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize())
+ if (LCAT && RCAT && RCAT->getZExtSize() != LCAT->getZExtSize())
return {};
QualType LHSElem = getAsArrayType(LHS)->getElementType();
@@ -10855,6 +11330,10 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
assert(LHS != RHS &&
"Equivalent pipe types should have already been handled!");
return {};
+ case Type::ArrayParameter:
+ assert(LHS != RHS &&
+ "Equivalent ArrayParameter types should have already been handled!");
+ return {};
case Type::BitInt: {
// Merge two bit-precise int types, while trying to preserve typedef info.
bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned();
@@ -11408,6 +11887,10 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
Type = Context.SveCountTy;
break;
}
+ case 'b': {
+ Type = Context.AMDGPUBufferRsrcTy;
+ break;
+ }
default:
llvm_unreachable("Unexpected target builtin type");
}
@@ -11909,8 +12392,7 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
!isMSStaticDataMemberInlineDefinition(VD))
return false;
- // Variables in other module units shouldn't be forced to be emitted.
- if (VD->isInAnotherModuleUnit())
+ if (VD->shouldEmitInExternalSource())
return false;
// Variables that can be needed in other TUs are required.
@@ -12134,8 +12616,13 @@ QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
}
void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
- if (Number > 1)
- MangleNumbers[ND] = Number;
+ if (Number <= 1)
+ return;
+
+ MangleNumbers[ND] = Number;
+
+ if (Listener)
+ Listener->AddedManglingNumber(ND, Number);
}
unsigned ASTContext::getManglingNumber(const NamedDecl *ND,
@@ -12154,8 +12641,13 @@ unsigned ASTContext::getManglingNumber(const NamedDecl *ND,
}
void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
- if (Number > 1)
- StaticLocalNumbers[VD] = Number;
+ if (Number <= 1)
+ return;
+
+ StaticLocalNumbers[VD] = Number;
+
+ if (Listener)
+ Listener->AddedStaticLocalNumbers(VD, Number);
}
unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
@@ -12746,6 +13238,18 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr,
getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY));
}
+ case Type::ArrayParameter: {
+ const auto *AX = cast<ArrayParameterType>(X),
+ *AY = cast<ArrayParameterType>(Y);
+ assert(AX->getSize() == AY->getSize());
+ const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr())
+ ? AX->getSizeExpr()
+ : nullptr;
+ auto ArrayTy = Ctx.getConstantArrayType(
+ getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr,
+ getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY));
+ return Ctx.getArrayParameterType(ArrayTy);
+ }
case Type::Atomic: {
const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y);
return Ctx.getAtomicType(
@@ -12921,6 +13425,14 @@ static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
// As Decltype is not uniqued, building a common type would be wasteful.
return QualType(DX, 0);
}
+ case Type::PackIndexing: {
+ const auto *DX = cast<PackIndexingType>(X);
+ [[maybe_unused]] const auto *DY = cast<PackIndexingType>(Y);
+ assert(DX->isDependentType());
+ assert(DY->isDependentType());
+ assert(Ctx.hasSameExpr(DX->getIndexExpr(), DY->getIndexExpr()));
+ return QualType(DX, 0);
+ }
case Type::DependentName: {
const auto *NX = cast<DependentNameType>(X),
*NY = cast<DependentNameType>(Y);
@@ -12999,6 +13511,7 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
CANONICAL_TYPE(Builtin)
CANONICAL_TYPE(Complex)
CANONICAL_TYPE(ConstantArray)
+ CANONICAL_TYPE(ArrayParameter)
CANONICAL_TYPE(ConstantMatrix)
CANONICAL_TYPE(Enum)
CANONICAL_TYPE(ExtVector)
@@ -13081,6 +13594,7 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(),
/*IsDependent=*/false, /*IsPack=*/false, CD, As);
}
+ case Type::PackIndexing:
case Type::Decltype:
return QualType();
case Type::DeducedTemplateSpecialization:
@@ -13180,6 +13694,32 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
return QualType();
return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying));
}
+ case Type::CountAttributed: {
+ const auto *DX = cast<CountAttributedType>(X),
+ *DY = cast<CountAttributedType>(Y);
+ if (DX->isCountInBytes() != DY->isCountInBytes())
+ return QualType();
+ if (DX->isOrNull() != DY->isOrNull())
+ return QualType();
+ Expr *CEX = DX->getCountExpr();
+ Expr *CEY = DY->getCountExpr();
+ llvm::ArrayRef<clang::TypeCoupledDeclRefInfo> CDX = DX->getCoupledDecls();
+ if (Ctx.hasSameExpr(CEX, CEY))
+ return Ctx.getCountAttributedType(Ctx.getQualifiedType(Underlying), CEX,
+ DX->isCountInBytes(), DX->isOrNull(),
+ CDX);
+ if (!CEX->isIntegerConstantExpr(Ctx) || !CEY->isIntegerConstantExpr(Ctx))
+ return QualType();
+ // Two declarations with the same integer constant may still differ in their
+ // expression pointers, so we need to evaluate them.
+ llvm::APSInt VX = *CEX->getIntegerConstantExpr(Ctx);
+ llvm::APSInt VY = *CEY->getIntegerConstantExpr(Ctx);
+ if (VX != VY)
+ return QualType();
+ return Ctx.getCountAttributedType(Ctx.getQualifiedType(Underlying), CEX,
+ DX->isCountInBytes(), DX->isOrNull(),
+ CDX);
+ }
}
llvm_unreachable("Unhandled Type Class");
}
@@ -13260,6 +13800,42 @@ QualType ASTContext::getCommonSugaredType(QualType X, QualType Y,
return R;
}
+QualType ASTContext::getCorrespondingUnsaturatedType(QualType Ty) const {
+ assert(Ty->isFixedPointType());
+
+ if (Ty->isUnsaturatedFixedPointType())
+ return Ty;
+
+ switch (Ty->castAs<BuiltinType>()->getKind()) {
+ default:
+ llvm_unreachable("Not a saturated fixed point type!");
+ case BuiltinType::SatShortAccum:
+ return ShortAccumTy;
+ case BuiltinType::SatAccum:
+ return AccumTy;
+ case BuiltinType::SatLongAccum:
+ return LongAccumTy;
+ case BuiltinType::SatUShortAccum:
+ return UnsignedShortAccumTy;
+ case BuiltinType::SatUAccum:
+ return UnsignedAccumTy;
+ case BuiltinType::SatULongAccum:
+ return UnsignedLongAccumTy;
+ case BuiltinType::SatShortFract:
+ return ShortFractTy;
+ case BuiltinType::SatFract:
+ return FractTy;
+ case BuiltinType::SatLongFract:
+ return LongFractTy;
+ case BuiltinType::SatUShortFract:
+ return UnsignedShortFractTy;
+ case BuiltinType::SatUFract:
+ return UnsignedFractTy;
+ case BuiltinType::SatULongFract:
+ return UnsignedLongFractTy;
+ }
+}
+
QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
assert(Ty->isFixedPointType());
@@ -13461,17 +14037,16 @@ QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
}
}
-std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs(
- const TargetVersionAttr *TV) const {
- assert(TV != nullptr);
- llvm::SmallVector<StringRef, 8> Feats;
- std::vector<std::string> ResFeats;
- TV->getFeatures(Feats);
- for (auto &Feature : Feats)
- if (Target->validateCpuSupports(Feature.str()))
- // Use '?' to mark features that came from TargetVersion.
- ResFeats.push_back("?" + Feature.str());
- return ResFeats;
+// Given a list of FMV features, return a concatenated list of the
+// corresponding backend features (which may contain duplicates).
+static std::vector<std::string> getFMVBackendFeaturesFor(
+ const llvm::SmallVectorImpl<StringRef> &FMVFeatStrings) {
+ std::vector<std::string> BackendFeats;
+ for (StringRef F : FMVFeatStrings)
+ if (auto FMVExt = llvm::AArch64::parseFMVExtension(F))
+ for (StringRef F : FMVExt->getImpliedFeatures())
+ BackendFeats.push_back(F.str());
+ return BackendFeats;
}
ParsedTargetAttr
@@ -13506,10 +14081,12 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
// Make a copy of the features as passed on the command line into the
// beginning of the additional features from the function to override.
- ParsedAttr.Features.insert(
- ParsedAttr.Features.begin(),
- Target->getTargetOpts().FeaturesAsWritten.begin(),
- Target->getTargetOpts().FeaturesAsWritten.end());
+ // AArch64 handles command line option features in parseTargetAttr().
+ if (!Target->getTriple().isAArch64())
+ ParsedAttr.Features.insert(
+ ParsedAttr.Features.begin(),
+ Target->getTargetOpts().FeaturesAsWritten.begin(),
+ Target->getTargetOpts().FeaturesAsWritten.end());
if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU))
TargetCPU = ParsedAttr.CPU;
@@ -13530,35 +14107,31 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
Target->getTargetOpts().FeaturesAsWritten.end());
Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
} else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) {
- std::vector<std::string> Features;
- StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex());
if (Target->getTriple().isAArch64()) {
- // TargetClones for AArch64
- if (VersionStr != "default") {
- SmallVector<StringRef, 1> VersionFeatures;
- VersionStr.split(VersionFeatures, "+");
- for (auto &VFeature : VersionFeatures) {
- VFeature = VFeature.trim();
- // Use '?' to mark features that came from AArch64 TargetClones.
- Features.push_back((StringRef{"?"} + VFeature).str());
- }
- }
+ llvm::SmallVector<StringRef, 8> Feats;
+ TC->getFeatures(Feats, GD.getMultiVersionIndex());
+ std::vector<std::string> Features = getFMVBackendFeaturesFor(Feats);
Features.insert(Features.begin(),
Target->getTargetOpts().FeaturesAsWritten.begin(),
Target->getTargetOpts().FeaturesAsWritten.end());
+ Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
} else {
+ std::vector<std::string> Features;
+ StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex());
if (VersionStr.starts_with("arch="))
TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1);
else if (VersionStr != "default")
Features.push_back((StringRef{"+"} + VersionStr).str());
+ Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
}
- Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
} else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) {
- std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV);
- Feats.insert(Feats.begin(),
- Target->getTargetOpts().FeaturesAsWritten.begin(),
- Target->getTargetOpts().FeaturesAsWritten.end());
- Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Feats);
+ llvm::SmallVector<StringRef, 8> Feats;
+ TV->getFeatures(Feats);
+ std::vector<std::string> Features = getFMVBackendFeaturesFor(Feats);
+ Features.insert(Features.begin(),
+ Target->getTargetOpts().FeaturesAsWritten.begin(),
+ Target->getTargetOpts().FeaturesAsWritten.end());
+ Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
} else {
FeatureMap = Target->getTargetOpts().FeatureMap;
}
@@ -13609,3 +14182,74 @@ StringRef ASTContext::getCUIDHash() const {
CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true);
return CUIDHash;
}
+
+const CXXRecordDecl *
+ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) {
+ assert(ThisClass);
+ assert(ThisClass->isPolymorphic());
+ const CXXRecordDecl *PrimaryBase = ThisClass;
+ while (1) {
+ assert(PrimaryBase);
+ assert(PrimaryBase->isPolymorphic());
+ auto &Layout = getASTRecordLayout(PrimaryBase);
+ auto Base = Layout.getPrimaryBase();
+ if (!Base || Base == PrimaryBase || !Base->isPolymorphic())
+ break;
+ PrimaryBase = Base;
+ }
+ return PrimaryBase;
+}
+
+bool ASTContext::useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl,
+ StringRef MangledName) {
+ auto *Method = cast<CXXMethodDecl>(VirtualMethodDecl.getDecl());
+ assert(Method->isVirtual());
+ bool DefaultIncludesPointerAuth =
+ LangOpts.PointerAuthCalls || LangOpts.PointerAuthIntrinsics;
+
+ if (!DefaultIncludesPointerAuth)
+ return true;
+
+ auto Existing = ThunksToBeAbbreviated.find(VirtualMethodDecl);
+ if (Existing != ThunksToBeAbbreviated.end())
+ return Existing->second.contains(MangledName.str());
+
+ std::unique_ptr<MangleContext> Mangler(createMangleContext());
+ llvm::StringMap<llvm::SmallVector<std::string, 2>> Thunks;
+ auto VtableContext = getVTableContext();
+ if (const auto *ThunkInfos = VtableContext->getThunkInfo(VirtualMethodDecl)) {
+ auto *Destructor = dyn_cast<CXXDestructorDecl>(Method);
+ for (const auto &Thunk : *ThunkInfos) {
+ SmallString<256> ElidedName;
+ llvm::raw_svector_ostream ElidedNameStream(ElidedName);
+ if (Destructor)
+ Mangler->mangleCXXDtorThunk(Destructor, VirtualMethodDecl.getDtorType(),
+ Thunk, /* elideOverrideInfo */ true,
+ ElidedNameStream);
+ else
+ Mangler->mangleThunk(Method, Thunk, /* elideOverrideInfo */ true,
+ ElidedNameStream);
+ SmallString<256> MangledName;
+ llvm::raw_svector_ostream mangledNameStream(MangledName);
+ if (Destructor)
+ Mangler->mangleCXXDtorThunk(Destructor, VirtualMethodDecl.getDtorType(),
+ Thunk, /* elideOverrideInfo */ false,
+ mangledNameStream);
+ else
+ Mangler->mangleThunk(Method, Thunk, /* elideOverrideInfo */ false,
+ mangledNameStream);
+
+ if (Thunks.find(ElidedName) == Thunks.end())
+ Thunks[ElidedName] = {};
+ Thunks[ElidedName].push_back(std::string(MangledName));
+ }
+ }
+ llvm::StringSet<> SimplifiedThunkNames;
+ for (auto &ThunkList : Thunks) {
+ llvm::sort(ThunkList.second);
+ SimplifiedThunkNames.insert(ThunkList.second[0]);
+ }
+ bool Result = SimplifiedThunkNames.contains(MangledName);
+ ThunksToBeAbbreviated[VirtualMethodDecl] = std::move(SimplifiedThunkNames);
+ return Result;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
index 7b0d5f9cc1a9..0680ff5e3a38 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
@@ -1215,46 +1215,19 @@ class TemplateDiff {
bool &NeedAddressOf) {
if (!Iter.isEnd()) {
switch (Iter->getKind()) {
- default:
- llvm_unreachable("unknown ArgumentKind");
- case TemplateArgument::Integral:
- Value = Iter->getAsIntegral();
- HasInt = true;
- IntType = Iter->getIntegralType();
- return;
- case TemplateArgument::Declaration: {
- VD = Iter->getAsDecl();
- QualType ArgType = Iter->getParamTypeForDecl();
- QualType VDType = VD->getType();
- if (ArgType->isPointerType() &&
- Context.hasSameType(ArgType->getPointeeType(), VDType))
- NeedAddressOf = true;
- return;
- }
- case TemplateArgument::NullPtr:
- IsNullPtr = true;
- return;
- case TemplateArgument::Expression:
- E = Iter->getAsExpr();
- }
- } else if (!Default->isParameterPack()) {
- E = Default->getDefaultArgument();
- }
-
- if (!Iter.hasDesugaredTA()) return;
-
- const TemplateArgument& TA = Iter.getDesugaredTA();
- switch (TA.getKind()) {
- default:
- llvm_unreachable("unknown ArgumentKind");
+ case TemplateArgument::StructuralValue:
+ // FIXME: Diffing of structural values is not implemented.
+ // There is no possible fallback in this case, this will show up
+ // as '(no argument)'.
+ return;
case TemplateArgument::Integral:
- Value = TA.getAsIntegral();
+ Value = Iter->getAsIntegral();
HasInt = true;
- IntType = TA.getIntegralType();
+ IntType = Iter->getIntegralType();
return;
case TemplateArgument::Declaration: {
- VD = TA.getAsDecl();
- QualType ArgType = TA.getParamTypeForDecl();
+ VD = Iter->getAsDecl();
+ QualType ArgType = Iter->getParamTypeForDecl();
QualType VDType = VD->getType();
if (ArgType->isPointerType() &&
Context.hasSameType(ArgType->getPointeeType(), VDType))
@@ -1265,13 +1238,62 @@ class TemplateDiff {
IsNullPtr = true;
return;
case TemplateArgument::Expression:
- // TODO: Sometimes, the desugared template argument Expr differs from
- // the sugared template argument Expr. It may be useful in the future
- // but for now, it is just discarded.
- if (!E)
- E = TA.getAsExpr();
- return;
+ E = Iter->getAsExpr();
+ break;
+ case TemplateArgument::Null:
+ case TemplateArgument::Type:
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("TemplateArgument kind is not expected for NTTP");
+ case TemplateArgument::Pack:
+ llvm_unreachable("TemplateArgument kind should be handled elsewhere");
+ }
+ } else if (!Default->isParameterPack()) {
+ E = Default->getDefaultArgument().getArgument().getAsExpr();
}
+
+ if (!Iter.hasDesugaredTA())
+ return;
+
+ const TemplateArgument &TA = Iter.getDesugaredTA();
+ switch (TA.getKind()) {
+ case TemplateArgument::StructuralValue:
+ // FIXME: Diffing of structural values is not implemented.
+ // Just fall back to the expression.
+ return;
+ case TemplateArgument::Integral:
+ Value = TA.getAsIntegral();
+ HasInt = true;
+ IntType = TA.getIntegralType();
+ return;
+ case TemplateArgument::Declaration: {
+ VD = TA.getAsDecl();
+ QualType ArgType = TA.getParamTypeForDecl();
+ QualType VDType = VD->getType();
+ if (ArgType->isPointerType() &&
+ Context.hasSameType(ArgType->getPointeeType(), VDType))
+ NeedAddressOf = true;
+ return;
+ }
+ case TemplateArgument::NullPtr:
+ IsNullPtr = true;
+ return;
+ case TemplateArgument::Expression:
+ // TODO: Sometimes, the desugared template argument Expr differs from
+ // the sugared template argument Expr. It may be useful in the future
+ // but for now, it is just discarded.
+ if (!E)
+ E = TA.getAsExpr();
+ return;
+ case TemplateArgument::Null:
+ case TemplateArgument::Type:
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("TemplateArgument kind is not expected for NTTP");
+ case TemplateArgument::Pack:
+ llvm_unreachable("TemplateArgument kind should be handled elsewhere");
+ }
+ llvm_unreachable("Unexpected TemplateArgument kind");
}
/// DiffNonTypes - Handles any template parameters not handled by DiffTypes
@@ -1914,6 +1936,11 @@ class TemplateDiff {
return;
}
+ if (E) {
+ PrintExpr(E);
+ return;
+ }
+
OS << "(no argument)";
}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp b/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp
index cc9a84eecaad..864d0393f9a7 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp
@@ -17,7 +17,6 @@
#include "clang/AST/DeclLookups.h"
#include "clang/AST/JSONNodeDumper.h"
#include "clang/Basic/Builtins.h"
-#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/raw_ostream.h"
@@ -201,6 +200,19 @@ LLVM_DUMP_METHOD void Type::dump(llvm::raw_ostream &OS,
}
//===----------------------------------------------------------------------===//
+// TypeLoc method implementations
+//===----------------------------------------------------------------------===//
+
+LLVM_DUMP_METHOD void TypeLoc::dump() const {
+ ASTDumper(llvm::errs(), /*ShowColors=*/false).Visit(*this);
+}
+
+LLVM_DUMP_METHOD void TypeLoc::dump(llvm::raw_ostream &OS,
+ const ASTContext &Context) const {
+ ASTDumper(OS, Context, Context.getDiagnostics().getShowColors()).Visit(*this);
+}
+
+//===----------------------------------------------------------------------===//
// Decl method implementations
//===----------------------------------------------------------------------===//
@@ -330,8 +342,7 @@ LLVM_DUMP_METHOD void APValue::dump() const {
LLVM_DUMP_METHOD void APValue::dump(raw_ostream &OS,
const ASTContext &Context) const {
- ASTDumper Dumper(llvm::errs(), Context,
- Context.getDiagnostics().getShowColors());
+ ASTDumper Dumper(OS, Context, Context.getDiagnostics().getShowColors());
Dumper.Visit(*this, /*Ty=*/Context.getPointerType(Context.CharTy));
}
@@ -348,3 +359,37 @@ LLVM_DUMP_METHOD void ConceptReference::dump(raw_ostream &OS) const {
ASTDumper P(OS, Ctx, Ctx.getDiagnostics().getShowColors());
P.Visit(this);
}
+
+//===----------------------------------------------------------------------===//
+// TemplateName method implementations
+//===----------------------------------------------------------------------===//
+
+// FIXME: These are actually using the TemplateArgument dumper, through
+// an implicit conversion. The dump will claim this is a template argument,
+// which is misleading.
+
+LLVM_DUMP_METHOD void TemplateName::dump() const {
+ ASTDumper Dumper(llvm::errs(), /*ShowColors=*/false);
+ Dumper.Visit(*this);
+}
+
+LLVM_DUMP_METHOD void TemplateName::dump(llvm::raw_ostream &OS,
+ const ASTContext &Context) const {
+ ASTDumper Dumper(OS, Context, Context.getDiagnostics().getShowColors());
+ Dumper.Visit(*this);
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgument method implementations
+//===----------------------------------------------------------------------===//
+
+LLVM_DUMP_METHOD void TemplateArgument::dump() const {
+ ASTDumper Dumper(llvm::errs(), /*ShowColors=*/false);
+ Dumper.Visit(*this);
+}
+
+LLVM_DUMP_METHOD void TemplateArgument::dump(llvm::raw_ostream &OS,
+ const ASTContext &Context) const {
+ ASTDumper Dumper(OS, Context, Context.getDiagnostics().getShowColors());
+ Dumper.Visit(*this);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
index 12734d62ed9f..e95992b99f7e 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
@@ -443,8 +443,9 @@ namespace clang {
Expected<FunctionTemplateAndArgsTy>
ImportFunctionTemplateWithTemplateArgsFromSpecialization(
FunctionDecl *FromFD);
- Error ImportTemplateParameterLists(const DeclaratorDecl *FromD,
- DeclaratorDecl *ToD);
+
+ template <typename DeclTy>
+ Error ImportTemplateParameterLists(const DeclTy *FromD, DeclTy *ToD);
Error ImportTemplateInformation(FunctionDecl *FromFD, FunctionDecl *ToFD);
@@ -695,7 +696,7 @@ namespace clang {
// Returns true if the given function has a placeholder return type and
// that type is declared inside the body of the function.
// E.g. auto f() { struct X{}; return X(); }
- bool hasAutoReturnTypeDeclaredInside(FunctionDecl *D);
+ bool hasReturnTypeDeclaredInside(FunctionDecl *D);
};
template <typename InContainerTy>
@@ -1098,6 +1099,10 @@ ExpectedType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
case BuiltinType::Id: \
return Importer.getToContext().SingletonId;
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ return Importer.getToContext().SingletonId;
+#include "clang/Basic/AMDGPUTypes.def"
#define SHARED_SINGLETON_TYPE(Expansion)
#define BUILTIN_TYPE(Id, SingletonId) \
case BuiltinType::Id: return Importer.getToContext().SingletonId;
@@ -1219,6 +1224,15 @@ ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
}
ExpectedType
+ASTNodeImporter::VisitArrayParameterType(const ArrayParameterType *T) {
+ ExpectedType ToArrayTypeOrErr = VisitConstantArrayType(T);
+ if (!ToArrayTypeOrErr)
+ return ToArrayTypeOrErr.takeError();
+
+ return Importer.getToContext().getArrayParameterType(*ToArrayTypeOrErr);
+}
+
+ExpectedType
ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
ExpectedType ToElementTypeOrErr = import(T->getElementType());
if (!ToElementTypeOrErr)
@@ -1369,6 +1383,18 @@ ExpectedType ASTNodeImporter::VisitParenType(const ParenType *T) {
return Importer.getToContext().getParenType(*ToInnerTypeOrErr);
}
+ExpectedType
+ASTNodeImporter::VisitPackIndexingType(clang::PackIndexingType const *T) {
+
+ ExpectedType Pattern = import(T->getPattern());
+ if (!Pattern)
+ return Pattern.takeError();
+ ExpectedExpr Index = import(T->getIndexExpr());
+ if (!Index)
+ return Index.takeError();
+ return Importer.getToContext().getPackIndexingType(*Pattern, *Index);
+}
+
ExpectedType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
Expected<TypedefNameDecl *> ToDeclOrErr = import(T->getDecl());
if (!ToDeclOrErr)
@@ -1483,7 +1509,7 @@ ExpectedType ASTNodeImporter::VisitInjectedClassNameType(
// The InjectedClassNameType is created in VisitRecordDecl when the
// T->getDecl() is imported. Here we can return the existing type.
const Type *Ty = (*ToDeclOrErr)->getTypeForDecl();
- assert(Ty && isa<InjectedClassNameType>(Ty));
+ assert(isa_and_nonnull<InjectedClassNameType>(Ty));
return QualType(Ty, 0);
}
@@ -1515,6 +1541,28 @@ ExpectedType ASTNodeImporter::VisitAttributedType(const AttributedType *T) {
*ToModifiedTypeOrErr, *ToEquivalentTypeOrErr);
}
+ExpectedType
+ASTNodeImporter::VisitCountAttributedType(const CountAttributedType *T) {
+ ExpectedType ToWrappedTypeOrErr = import(T->desugar());
+ if (!ToWrappedTypeOrErr)
+ return ToWrappedTypeOrErr.takeError();
+
+ Error Err = Error::success();
+ Expr *CountExpr = importChecked(Err, T->getCountExpr());
+
+ SmallVector<TypeCoupledDeclRefInfo, 1> CoupledDecls;
+ for (const TypeCoupledDeclRefInfo &TI : T->dependent_decls()) {
+ Expected<ValueDecl *> ToDeclOrErr = import(TI.getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
+ CoupledDecls.emplace_back(*ToDeclOrErr, TI.isDeref());
+ }
+
+ return Importer.getToContext().getCountAttributedType(
+ *ToWrappedTypeOrErr, CountExpr, T->isCountInBytes(), T->isOrNull(),
+ ArrayRef(CoupledDecls.data(), CoupledDecls.size()));
+}
+
ExpectedType ASTNodeImporter::VisitTemplateTypeParmType(
const TemplateTypeParmType *T) {
Expected<TemplateTypeParmDecl *> ToDeclOrErr = import(T->getDecl());
@@ -2885,7 +2933,7 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
// We may already have an enum of the same name; try to find and match it.
EnumDecl *PrevDecl = nullptr;
- if (!DC->isFunctionOrMethod() && SearchName) {
+ if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
auto FoundDecls =
Importer.findDeclsInToCtx(DC, SearchName);
@@ -2901,7 +2949,7 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
if (auto *FoundEnum = dyn_cast<EnumDecl>(FoundDecl)) {
if (!hasSameVisibilityContextAndLinkage(FoundEnum, D))
continue;
- if (IsStructuralMatch(D, FoundEnum)) {
+ if (IsStructuralMatch(D, FoundEnum, !SearchName.isEmpty())) {
EnumDecl *FoundDef = FoundEnum->getDefinition();
if (D->isThisDeclarationADefinition() && FoundDef)
return Importer.MapImported(D, FoundDef);
@@ -2912,7 +2960,12 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
}
}
- if (!ConflictingDecls.empty()) {
+ // In case of unnamed enums, we try to find an existing similar one, if none
+ // was found, perform the import always.
+ // Structural in-equivalence is not detected in this way here, but it may
+ // be found when the parent decl is imported (if the enum is part of a
+ // class). To make this totally exact a more difficult solution is needed.
+ if (SearchName && !ConflictingDecls.empty()) {
ExpectedName NameOrErr = Importer.HandleNameConflict(
SearchName, DC, IDNS, ConflictingDecls.data(),
ConflictingDecls.size());
@@ -3279,8 +3332,9 @@ ExpectedDecl ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
return ToEnumerator;
}
-Error ASTNodeImporter::ImportTemplateParameterLists(const DeclaratorDecl *FromD,
- DeclaratorDecl *ToD) {
+template <typename DeclTy>
+Error ASTNodeImporter::ImportTemplateParameterLists(const DeclTy *FromD,
+ DeclTy *ToD) {
unsigned int Num = FromD->getNumTemplateParameterLists();
if (Num == 0)
return Error::success();
@@ -3604,15 +3658,28 @@ private:
};
} // namespace
-/// This function checks if the function has 'auto' return type that contains
+/// This function checks if the given function has a return type that contains
/// a reference (in any way) to a declaration inside the same function.
-bool ASTNodeImporter::hasAutoReturnTypeDeclaredInside(FunctionDecl *D) {
+bool ASTNodeImporter::hasReturnTypeDeclaredInside(FunctionDecl *D) {
QualType FromTy = D->getType();
const auto *FromFPT = FromTy->getAs<FunctionProtoType>();
assert(FromFPT && "Must be called on FunctionProtoType");
+ auto IsCXX11LambdaWithouTrailingReturn = [&]() {
+ if (Importer.FromContext.getLangOpts().CPlusPlus14) // C++14 or later
+ return false;
+
+ if (FromFPT->hasTrailingReturn())
+ return false;
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
+ return cast<CXXRecordDecl>(MD->getDeclContext())->isLambda();
+
+ return false;
+ };
+
QualType RetT = FromFPT->getReturnType();
- if (isa<AutoType>(RetT.getTypePtr())) {
+ if (isa<AutoType>(RetT.getTypePtr()) || IsCXX11LambdaWithouTrailingReturn()) {
FunctionDecl *Def = D->getDefinition();
IsTypeDeclaredInsideVisitor Visitor(Def ? Def : D);
return Visitor.CheckType(RetT);
@@ -3768,7 +3835,7 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// E.g.: auto foo() { struct X{}; return X(); }
// To avoid an infinite recursion when importing, create the FunctionDecl
// with a simplified return type.
- if (hasAutoReturnTypeDeclaredInside(D)) {
+ if (hasReturnTypeDeclaredInside(D)) {
FromReturnTy = Importer.getFromContext().VoidTy;
UsedDifferentProtoType = true;
}
@@ -3904,6 +3971,14 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// decl and its redeclarations may be required.
}
+ StringLiteral *Msg = D->getDeletedMessage();
+ if (Msg) {
+ auto Imported = import(Msg);
+ if (!Imported)
+ return Imported.takeError();
+ Msg = *Imported;
+ }
+
ToFunction->setQualifierInfo(ToQualifierLoc);
ToFunction->setAccess(D->getAccess());
ToFunction->setLexicalDeclContext(LexicalDC);
@@ -3918,6 +3993,11 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
ToFunction->setRangeEnd(ToEndLoc);
ToFunction->setDefaultLoc(ToDefaultLoc);
+ if (Msg)
+ ToFunction->setDefaultedOrDeletedInfo(
+ FunctionDecl::DefaultedOrDeletedFunctionInfo::Create(
+ Importer.getToContext(), {}, Msg));
+
// Set the parameters.
for (auto *Param : Parameters) {
Param->setOwningFunction(ToFunction);
@@ -4498,6 +4578,10 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
ToVar->setQualifierInfo(ToQualifierLoc);
ToVar->setAccess(D->getAccess());
ToVar->setLexicalDeclContext(LexicalDC);
+ if (D->isInlineSpecified())
+ ToVar->setInlineSpecified();
+ if (D->isInline())
+ ToVar->setImplicitlyInline();
if (FoundByLookup) {
auto *Recent = const_cast<VarDecl *>(FoundByLookup->getMostRecentDecl());
@@ -5842,11 +5926,11 @@ ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
}
if (D->hasDefaultArgument()) {
- Expected<TypeSourceInfo *> ToDefaultArgOrErr =
- import(D->getDefaultArgumentInfo());
+ Expected<TemplateArgumentLoc> ToDefaultArgOrErr =
+ import(D->getDefaultArgument());
if (!ToDefaultArgOrErr)
return ToDefaultArgOrErr.takeError();
- ToD->setDefaultArgument(*ToDefaultArgOrErr);
+ ToD->setDefaultArgument(ToD->getASTContext(), *ToDefaultArgOrErr);
}
return ToD;
@@ -5874,10 +5958,11 @@ ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
return ToD;
if (D->hasDefaultArgument()) {
- ExpectedExpr ToDefaultArgOrErr = import(D->getDefaultArgument());
+ Expected<TemplateArgumentLoc> ToDefaultArgOrErr =
+ import(D->getDefaultArgument());
if (!ToDefaultArgOrErr)
return ToDefaultArgOrErr.takeError();
- ToD->setDefaultArgument(*ToDefaultArgOrErr);
+ ToD->setDefaultArgument(Importer.getToContext(), *ToDefaultArgOrErr);
}
return ToD;
@@ -5905,7 +5990,8 @@ ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
ToD, D, Importer.getToContext(),
Importer.getToContext().getTranslationUnitDecl(), *LocationOrErr,
D->getDepth(), D->getPosition(), D->isParameterPack(),
- (*NameOrErr).getAsIdentifierInfo(), *TemplateParamsOrErr))
+ (*NameOrErr).getAsIdentifierInfo(), D->wasDeclaredWithTypename(),
+ *TemplateParamsOrErr))
return ToD;
if (D->hasDefaultArgument()) {
@@ -6136,15 +6222,16 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
if (!IdLocOrErr)
return IdLocOrErr.takeError();
+ // Import TemplateArgumentListInfo.
+ TemplateArgumentListInfo ToTAInfo;
+ if (const auto *ASTTemplateArgs = D->getTemplateArgsAsWritten()) {
+ if (Error Err = ImportTemplateArgumentListInfo(*ASTTemplateArgs, ToTAInfo))
+ return std::move(Err);
+ }
+
// Create the specialization.
ClassTemplateSpecializationDecl *D2 = nullptr;
if (PartialSpec) {
- // Import TemplateArgumentListInfo.
- TemplateArgumentListInfo ToTAInfo;
- const auto &ASTTemplateArgs = *PartialSpec->getTemplateArgsAsWritten();
- if (Error Err = ImportTemplateArgumentListInfo(ASTTemplateArgs, ToTAInfo))
- return std::move(Err);
-
QualType CanonInjType;
if (Error Err = importInto(
CanonInjType, PartialSpec->getInjectedSpecializationType()))
@@ -6154,7 +6241,7 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
if (GetImportedOrCreateDecl<ClassTemplatePartialSpecializationDecl>(
D2, D, Importer.getToContext(), D->getTagKind(), DC, *BeginLocOrErr,
*IdLocOrErr, ToTPList, ClassTemplate,
- llvm::ArrayRef(TemplateArgs.data(), TemplateArgs.size()), ToTAInfo,
+ llvm::ArrayRef(TemplateArgs.data(), TemplateArgs.size()),
CanonInjType,
cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl)))
return D2;
@@ -6202,28 +6289,27 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
else
return BraceRangeOrErr.takeError();
+ if (Error Err = ImportTemplateParameterLists(D, D2))
+ return std::move(Err);
+
// Import the qualifier, if any.
if (auto LocOrErr = import(D->getQualifierLoc()))
D2->setQualifierInfo(*LocOrErr);
else
return LocOrErr.takeError();
- if (auto *TSI = D->getTypeAsWritten()) {
- if (auto TInfoOrErr = import(TSI))
- D2->setTypeAsWritten(*TInfoOrErr);
- else
- return TInfoOrErr.takeError();
+ if (D->getTemplateArgsAsWritten())
+ D2->setTemplateArgsAsWritten(ToTAInfo);
- if (auto LocOrErr = import(D->getTemplateKeywordLoc()))
- D2->setTemplateKeywordLoc(*LocOrErr);
- else
- return LocOrErr.takeError();
+ if (auto LocOrErr = import(D->getTemplateKeywordLoc()))
+ D2->setTemplateKeywordLoc(*LocOrErr);
+ else
+ return LocOrErr.takeError();
- if (auto LocOrErr = import(D->getExternLoc()))
- D2->setExternLoc(*LocOrErr);
- else
- return LocOrErr.takeError();
- }
+ if (auto LocOrErr = import(D->getExternKeywordLoc()))
+ D2->setExternKeywordLoc(*LocOrErr);
+ else
+ return LocOrErr.takeError();
if (D->getPointOfInstantiation().isValid()) {
if (auto POIOrErr = import(D->getPointOfInstantiation()))
@@ -6373,16 +6459,19 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *D) {
- // If this record has a definition in the translation unit we're coming from,
- // but this particular declaration is not that definition, import the
- // definition and map to that.
- VarDecl *Definition = D->getDefinition();
- if (Definition && Definition != D) {
- if (ExpectedDecl ImportedDefOrErr = import(Definition))
- return Importer.MapImported(D, *ImportedDefOrErr);
- else
- return ImportedDefOrErr.takeError();
+ // A VarTemplateSpecializationDecl inherits from VarDecl, the import is done
+ // in an analog way (but specialized for this case).
+
+ SmallVector<Decl *, 2> Redecls = getCanonicalForwardRedeclChain(D);
+ auto RedeclIt = Redecls.begin();
+ // Import the first part of the decl chain. I.e. import all previous
+ // declarations starting from the canonical decl.
+ for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt) {
+ ExpectedDecl RedeclOrErr = import(*RedeclIt);
+ if (!RedeclOrErr)
+ return RedeclOrErr.takeError();
}
+ assert(*RedeclIt == D);
VarTemplateDecl *VarTemplate = nullptr;
if (Error Err = importInto(VarTemplate, D->getSpecializedTemplate()))
@@ -6410,116 +6499,129 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
// Try to find an existing specialization with these template arguments.
void *InsertPos = nullptr;
- VarTemplateSpecializationDecl *D2 = VarTemplate->findSpecialization(
- TemplateArgs, InsertPos);
- if (D2) {
- // We already have a variable template specialization with these template
- // arguments.
-
- // FIXME: Check for specialization vs. instantiation errors.
-
- if (VarDecl *FoundDef = D2->getDefinition()) {
- if (!D->isThisDeclarationADefinition() ||
- IsStructuralMatch(D, FoundDef)) {
- // The record types structurally match, or the "from" translation
- // unit only had a forward declaration anyway; call it the same
- // variable.
- return Importer.MapImported(D, FoundDef);
+ VarTemplateSpecializationDecl *FoundSpecialization =
+ VarTemplate->findSpecialization(TemplateArgs, InsertPos);
+ if (FoundSpecialization) {
+ if (IsStructuralMatch(D, FoundSpecialization)) {
+ VarDecl *FoundDef = FoundSpecialization->getDefinition();
+ if (D->getDeclContext()->isRecord()) {
+ // In a record, it is allowed only to have one optional declaration and
+ // one definition of the (static or constexpr) variable template.
+ assert(
+ FoundSpecialization->getDeclContext()->isRecord() &&
+ "Member variable template specialization imported as non-member, "
+ "inconsistent imported AST?");
+ if (FoundDef)
+ return Importer.MapImported(D, FoundDef);
+ if (!D->isThisDeclarationADefinition())
+ return Importer.MapImported(D, FoundSpecialization);
+ } else {
+ // If definition is imported and there is already one, map to it.
+ // Otherwise create a new variable and link it to the existing.
+ if (FoundDef && D->isThisDeclarationADefinition())
+ return Importer.MapImported(D, FoundDef);
}
+ } else {
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
- } else {
- TemplateArgumentListInfo ToTAInfo;
- if (const ASTTemplateArgumentListInfo *Args = D->getTemplateArgsInfo()) {
- if (Error Err = ImportTemplateArgumentListInfo(*Args, ToTAInfo))
- return std::move(Err);
- }
+ }
- using PartVarSpecDecl = VarTemplatePartialSpecializationDecl;
- // Create a new specialization.
- if (auto *FromPartial = dyn_cast<PartVarSpecDecl>(D)) {
- // Import TemplateArgumentListInfo
- TemplateArgumentListInfo ArgInfos;
- const auto *FromTAArgsAsWritten = FromPartial->getTemplateArgsAsWritten();
- // NOTE: FromTAArgsAsWritten and template parameter list are non-null.
- if (Error Err = ImportTemplateArgumentListInfo(
- *FromTAArgsAsWritten, ArgInfos))
- return std::move(Err);
+ VarTemplateSpecializationDecl *D2 = nullptr;
- auto ToTPListOrErr = import(FromPartial->getTemplateParameters());
- if (!ToTPListOrErr)
- return ToTPListOrErr.takeError();
+ TemplateArgumentListInfo ToTAInfo;
+ if (const auto *Args = D->getTemplateArgsAsWritten()) {
+ if (Error Err = ImportTemplateArgumentListInfo(*Args, ToTAInfo))
+ return std::move(Err);
+ }
- PartVarSpecDecl *ToPartial;
- if (GetImportedOrCreateDecl(ToPartial, D, Importer.getToContext(), DC,
- *BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr,
- VarTemplate, QualType(), nullptr,
- D->getStorageClass(), TemplateArgs, ArgInfos))
- return ToPartial;
+ using PartVarSpecDecl = VarTemplatePartialSpecializationDecl;
+ // Create a new specialization.
+ if (auto *FromPartial = dyn_cast<PartVarSpecDecl>(D)) {
+ auto ToTPListOrErr = import(FromPartial->getTemplateParameters());
+ if (!ToTPListOrErr)
+ return ToTPListOrErr.takeError();
- if (Expected<PartVarSpecDecl *> ToInstOrErr = import(
- FromPartial->getInstantiatedFromMember()))
- ToPartial->setInstantiatedFromMember(*ToInstOrErr);
- else
- return ToInstOrErr.takeError();
-
- if (FromPartial->isMemberSpecialization())
- ToPartial->setMemberSpecialization();
-
- D2 = ToPartial;
-
- // FIXME: Use this update if VarTemplatePartialSpecializationDecl is fixed
- // to adopt template parameters.
- // updateLookupTableForTemplateParameters(**ToTPListOrErr);
- } else { // Full specialization
- if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC,
- *BeginLocOrErr, *IdLocOrErr, VarTemplate,
- QualType(), nullptr, D->getStorageClass(),
- TemplateArgs))
- return D2;
- }
+ PartVarSpecDecl *ToPartial;
+ if (GetImportedOrCreateDecl(ToPartial, D, Importer.getToContext(), DC,
+ *BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr,
+ VarTemplate, QualType(), nullptr,
+ D->getStorageClass(), TemplateArgs))
+ return ToPartial;
- QualType T;
- if (Error Err = importInto(T, D->getType()))
- return std::move(Err);
- D2->setType(T);
+ if (Expected<PartVarSpecDecl *> ToInstOrErr =
+ import(FromPartial->getInstantiatedFromMember()))
+ ToPartial->setInstantiatedFromMember(*ToInstOrErr);
+ else
+ return ToInstOrErr.takeError();
- auto TInfoOrErr = import(D->getTypeSourceInfo());
- if (!TInfoOrErr)
- return TInfoOrErr.takeError();
- D2->setTypeSourceInfo(*TInfoOrErr);
+ if (FromPartial->isMemberSpecialization())
+ ToPartial->setMemberSpecialization();
- if (D->getPointOfInstantiation().isValid()) {
- if (ExpectedSLoc POIOrErr = import(D->getPointOfInstantiation()))
- D2->setPointOfInstantiation(*POIOrErr);
- else
- return POIOrErr.takeError();
- }
+ D2 = ToPartial;
- D2->setSpecializationKind(D->getSpecializationKind());
- D2->setTemplateArgsInfo(ToTAInfo);
+ // FIXME: Use this update if VarTemplatePartialSpecializationDecl is fixed
+ // to adopt template parameters.
+ // updateLookupTableForTemplateParameters(**ToTPListOrErr);
+ } else { // Full specialization
+ if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC,
+ *BeginLocOrErr, *IdLocOrErr, VarTemplate,
+ QualType(), nullptr, D->getStorageClass(),
+ TemplateArgs))
+ return D2;
+ }
- // Add this specialization to the class template.
+ // Update InsertPos, because preceding import calls may have invalidated
+ // it by adding new specializations.
+ if (!VarTemplate->findSpecialization(TemplateArgs, InsertPos))
VarTemplate->AddSpecialization(D2, InsertPos);
- // Import the qualifier, if any.
- if (auto LocOrErr = import(D->getQualifierLoc()))
- D2->setQualifierInfo(*LocOrErr);
+ QualType T;
+ if (Error Err = importInto(T, D->getType()))
+ return std::move(Err);
+ D2->setType(T);
+
+ auto TInfoOrErr = import(D->getTypeSourceInfo());
+ if (!TInfoOrErr)
+ return TInfoOrErr.takeError();
+ D2->setTypeSourceInfo(*TInfoOrErr);
+
+ if (D->getPointOfInstantiation().isValid()) {
+ if (ExpectedSLoc POIOrErr = import(D->getPointOfInstantiation()))
+ D2->setPointOfInstantiation(*POIOrErr);
else
- return LocOrErr.takeError();
+ return POIOrErr.takeError();
+ }
- if (D->isConstexpr())
- D2->setConstexpr(true);
+ D2->setSpecializationKind(D->getSpecializationKind());
- // Add the specialization to this context.
- D2->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDeclInternal(D2);
+ if (D->getTemplateArgsAsWritten())
+ D2->setTemplateArgsAsWritten(ToTAInfo);
- D2->setAccess(D->getAccess());
- }
+ if (auto LocOrErr = import(D->getQualifierLoc()))
+ D2->setQualifierInfo(*LocOrErr);
+ else
+ return LocOrErr.takeError();
+
+ if (D->isConstexpr())
+ D2->setConstexpr(true);
+
+ D2->setAccess(D->getAccess());
if (Error Err = ImportInitializer(D, D2))
return std::move(Err);
+ if (FoundSpecialization)
+ D2->setPreviousDecl(FoundSpecialization->getMostRecentDecl());
+
+ addDeclToContexts(D, D2);
+
+ // Import the rest of the chain. I.e. import all subsequent declarations.
+ for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt) {
+ ExpectedDecl RedeclOrErr = import(*RedeclIt);
+ if (!RedeclOrErr)
+ return RedeclOrErr.takeError();
+ }
+
return D2;
}
@@ -8317,8 +8419,8 @@ ASTNodeImporter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
return std::move(Err);
PseudoDestructorTypeStorage Storage;
- if (IdentifierInfo *FromII = E->getDestroyedTypeIdentifier()) {
- IdentifierInfo *ToII = Importer.Import(FromII);
+ if (const IdentifierInfo *FromII = E->getDestroyedTypeIdentifier()) {
+ const IdentifierInfo *ToII = Importer.Import(FromII);
ExpectedSLoc ToDestroyedTypeLocOrErr = import(E->getDestroyedTypeLoc());
if (!ToDestroyedTypeLocOrErr)
return ToDestroyedTypeLocOrErr.takeError();
@@ -8476,13 +8578,15 @@ ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
return UnresolvedLookupExpr::Create(
Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr,
*ToTemplateKeywordLocOrErr, ToNameInfo, E->requiresADL(), &ToTAInfo,
- ToDecls.begin(), ToDecls.end(), KnownDependent);
+ ToDecls.begin(), ToDecls.end(), KnownDependent,
+ /*KnownInstantiationDependent=*/E->isInstantiationDependent());
}
return UnresolvedLookupExpr::Create(
Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr,
- ToNameInfo, E->requiresADL(), E->isOverloaded(), ToDecls.begin(),
- ToDecls.end());
+ ToNameInfo, E->requiresADL(), ToDecls.begin(), ToDecls.end(),
+ /*KnownDependent=*/E->isTypeDependent(),
+ /*KnownInstantiationDependent=*/E->isInstantiationDependent());
}
ExpectedStmt
@@ -9274,16 +9378,6 @@ Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) {
From->args_size());
break;
}
- case attr::CountedBy: {
- AI.cloneAttr(FromAttr);
- const auto *CBA = cast<CountedByAttr>(FromAttr);
- Expected<SourceRange> SR = Import(CBA->getCountedByFieldLoc()).get();
- if (!SR)
- return SR.takeError();
- AI.castAttrAs<CountedByAttr>()->setCountedByFieldLoc(SR.get());
- break;
- }
-
default: {
// The default branch works for attributes that have no arguments to import.
// FIXME: Handle every attribute type that has arguments of type to import
@@ -10138,7 +10232,7 @@ Expected<Selector> ASTImporter::Import(Selector FromSel) {
if (FromSel.isNull())
return Selector{};
- SmallVector<IdentifierInfo *, 4> Idents;
+ SmallVector<const IdentifierInfo *, 4> Idents;
Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0)));
for (unsigned I = 1, N = FromSel.getNumArgs(); I < N; ++I)
Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(I)));
diff --git a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
index be7a850a2982..37555c324282 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -74,6 +74,7 @@
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
@@ -347,6 +348,15 @@ class StmtComparer {
return true;
}
+ bool IsStmtEquivalent(const CXXDependentScopeMemberExpr *E1,
+ const CXXDependentScopeMemberExpr *E2) {
+ if (!IsStructurallyEquivalent(Context, E1->getMember(), E2->getMember())) {
+ return false;
+ }
+ return IsStructurallyEquivalent(Context, E1->getBaseType(),
+ E2->getBaseType());
+ }
+
bool IsStmtEquivalent(const UnaryExprOrTypeTraitExpr *E1,
const UnaryExprOrTypeTraitExpr *E2) {
if (E1->getKind() != E2->getKind())
@@ -839,6 +849,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case Type::Adjusted:
case Type::Decayed:
+ case Type::ArrayParameter:
if (!IsStructurallyEquivalent(Context,
cast<AdjustedType>(T1)->getOriginalType(),
cast<AdjustedType>(T2)->getOriginalType()))
@@ -1068,6 +1079,13 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
+ case Type::CountAttributed:
+ if (!IsStructurallyEquivalent(Context,
+ cast<CountAttributedType>(T1)->desugar(),
+ cast<CountAttributedType>(T2)->desugar()))
+ return false;
+ break;
+
case Type::BTFTagAttributed:
if (!IsStructurallyEquivalent(
Context, cast<BTFTagAttributedType>(T1)->getWrappedType(),
@@ -1292,6 +1310,16 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
+ case Type::PackIndexing:
+ if (!IsStructurallyEquivalent(Context,
+ cast<PackIndexingType>(T1)->getPattern(),
+ cast<PackIndexingType>(T2)->getPattern()))
+ if (!IsStructurallyEquivalent(Context,
+ cast<PackIndexingType>(T1)->getIndexExpr(),
+ cast<PackIndexingType>(T2)->getIndexExpr()))
+ return false;
+ break;
+
case Type::ObjCInterface: {
const auto *Iface1 = cast<ObjCInterfaceType>(T1);
const auto *Iface2 = cast<ObjCInterfaceType>(T2);
@@ -1379,9 +1407,6 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
VarDecl *D1, VarDecl *D2) {
- if (D1->getStorageClass() != D2->getStorageClass())
- return false;
-
IdentifierInfo *Name1 = D1->getIdentifier();
IdentifierInfo *Name2 = D2->getIdentifier();
if (!::IsStructurallyEquivalent(Name1, Name2))
@@ -1390,6 +1415,15 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!IsStructurallyEquivalent(Context, D1->getType(), D2->getType()))
return false;
+ // Compare storage class and initializer only if none or both are a
+ // definition. Like a forward-declaration matches a class definition, variable
+ // declarations that are not definitions should match with the definitions.
+ if (D1->isThisDeclarationADefinition() != D2->isThisDeclarationADefinition())
+ return true;
+
+ if (D1->getStorageClass() != D2->getStorageClass())
+ return false;
+
return IsStructurallyEquivalent(Context, D1->getInit(), D2->getInit());
}
@@ -1972,7 +2006,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
return false;
}
-
+ if (!Context.IgnoreTemplateParmDepth && D1->getDepth() != D2->getDepth())
+ return false;
+ if (D1->getIndex() != D2->getIndex())
+ return false;
// Check types.
if (!IsStructurallyEquivalent(Context, D1->getType(), D2->getType())) {
if (Context.Complain) {
diff --git a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
index 4c7496c699be..99916f523aa9 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
@@ -228,6 +228,8 @@ void DynTypedNode::dump(llvm::raw_ostream &OS,
T->dump(OS, Context);
else if (const ConceptReference *C = get<ConceptReference>())
C->dump(OS);
+ else if (const TypeLoc *TL = get<TypeLoc>())
+ TL->dump(OS, Context);
else
OS << "Unable to dump values of type " << NodeKind.asStringRef() << "\n";
}
diff --git a/contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp b/contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp
index df7e3d63a6c3..56a143b9ed29 100644
--- a/contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp
+++ b/contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp
@@ -21,7 +21,7 @@ static const llvm::StringRef AttrDoc[] = {
};
llvm::StringRef clang::Attr::getDocumentation(clang::attr::Kind K) {
- if (K < std::size(AttrDoc))
+ if (K < (int)std::size(AttrDoc))
return AttrDoc[K];
return "";
}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp b/contrib/llvm-project/clang/lib/AST/Availability.cpp
index 18e4d16b45bb..238359a2dedf 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Availability.cpp
@@ -1,11 +1,22 @@
-#include "clang/ExtractAPI/AvailabilityInfo.h"
+//===- Availability.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Availability information for Decls.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Availability.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
#include "clang/Basic/TargetInfo.h"
-#include "llvm/ADT/STLExtras.h"
-using namespace clang::extractapi;
-using namespace llvm;
+namespace clang {
AvailabilityInfo AvailabilityInfo::createFromDecl(const Decl *Decl) {
ASTContext &Context = Decl->getASTContext();
@@ -17,9 +28,9 @@ AvailabilityInfo AvailabilityInfo::createFromDecl(const Decl *Decl) {
for (const auto *A : RD->specific_attrs<AvailabilityAttr>()) {
if (A->getPlatform()->getName() != PlatformName)
continue;
- Availability =
- AvailabilityInfo(A->getPlatform()->getName(), A->getIntroduced(),
- A->getDeprecated(), A->getObsoleted(), false, false);
+ Availability = AvailabilityInfo(
+ A->getPlatform()->getName(), A->getIntroduced(), A->getDeprecated(),
+ A->getObsoleted(), A->getUnavailable(), false, false);
break;
}
@@ -33,3 +44,5 @@ AvailabilityInfo AvailabilityInfo::createFromDecl(const Decl *Decl) {
}
return Availability;
}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/CommentParser.cpp b/contrib/llvm-project/clang/lib/AST/CommentParser.cpp
index 8adfd85d0160..d5e5bb27ceba 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentParser.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentParser.cpp
@@ -89,6 +89,31 @@ class TextTokenRetokenizer {
}
}
+ /// Extract a template type
+ bool lexTemplate(SmallString<32> &WordText) {
+ unsigned BracketCount = 0;
+ while (!isEnd()) {
+ const char C = peek();
+ WordText.push_back(C);
+ consumeChar();
+ switch (C) {
+ case '<': {
+ BracketCount++;
+ break;
+ }
+ case '>': {
+ BracketCount--;
+ if (!BracketCount)
+ return true;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ return false;
+ }
+
/// Add a token.
/// Returns true on success, false if there are no interesting tokens to
/// fetch from lexer.
@@ -149,6 +174,111 @@ public:
addToken();
}
+ /// Extract a type argument
+ bool lexType(Token &Tok) {
+ if (isEnd())
+ return false;
+
+ // Save current position in case we need to rollback because the type is
+ // empty.
+ Position SavedPos = Pos;
+
+ // Consume any leading whitespace.
+ consumeWhitespace();
+ SmallString<32> WordText;
+ const char *WordBegin = Pos.BufferPtr;
+ SourceLocation Loc = getSourceLocation();
+
+ while (!isEnd()) {
+ const char C = peek();
+ // For non-whitespace characters we check if it's a template or otherwise
+ // continue reading the text into a word.
+ if (!isWhitespace(C)) {
+ if (C == '<') {
+ if (!lexTemplate(WordText))
+ return false;
+ } else {
+ WordText.push_back(C);
+ consumeChar();
+ }
+ } else {
+ consumeChar();
+ break;
+ }
+ }
+
+ const unsigned Length = WordText.size();
+ if (Length == 0) {
+ Pos = SavedPos;
+ return false;
+ }
+
+ char *TextPtr = Allocator.Allocate<char>(Length + 1);
+
+ memcpy(TextPtr, WordText.c_str(), Length + 1);
+ StringRef Text = StringRef(TextPtr, Length);
+
+ formTokenWithChars(Tok, Loc, WordBegin, Length, Text);
+ return true;
+ }
+
+ // Check if this line starts with @par or \par
+ bool startsWithParCommand() {
+ unsigned Offset = 1;
+
+ // Skip all whitespace characters at the beginning.
+ // This needs to backtrack because Pos has already advanced past the
+ // actual \par or @par command by the time this function is called.
+ while (isWhitespace(*(Pos.BufferPtr - Offset)))
+ Offset++;
+
+ // Once we've reached the whitespace, backtrack and check if the previous
+ // four characters are \par or @par.
+ llvm::StringRef LineStart(Pos.BufferPtr - Offset - 3, 4);
+ return LineStart.starts_with("\\par") || LineStart.starts_with("@par");
+ }
+
+ /// Extract a par command argument-header.
+ bool lexParHeading(Token &Tok) {
+ if (isEnd())
+ return false;
+
+ Position SavedPos = Pos;
+
+ consumeWhitespace();
+ SmallString<32> WordText;
+ const char *WordBegin = Pos.BufferPtr;
+ SourceLocation Loc = getSourceLocation();
+
+ if (!startsWithParCommand())
+ return false;
+
+ // Read until the end of this token, which is effectively the end of the
+ // line. This gets us the content of the par header, if there is one.
+ while (!isEnd()) {
+ WordText.push_back(peek());
+ if (Pos.BufferPtr + 1 == Pos.BufferEnd) {
+ consumeChar();
+ break;
+ }
+ consumeChar();
+ }
+
+ unsigned Length = WordText.size();
+ if (Length == 0) {
+ Pos = SavedPos;
+ return false;
+ }
+
+ char *TextPtr = Allocator.Allocate<char>(Length + 1);
+
+ memcpy(TextPtr, WordText.c_str(), Length + 1);
+ StringRef Text = StringRef(TextPtr, Length);
+
+ formTokenWithChars(Tok, Loc, WordBegin, Length, Text);
+ return true;
+ }
+
/// Extract a word -- sequence of non-whitespace characters.
bool lexWord(Token &Tok) {
if (isEnd())
@@ -304,6 +434,41 @@ Parser::parseCommandArgs(TextTokenRetokenizer &Retokenizer, unsigned NumArgs) {
return llvm::ArrayRef(Args, ParsedArgs);
}
+ArrayRef<Comment::Argument>
+Parser::parseThrowCommandArgs(TextTokenRetokenizer &Retokenizer,
+ unsigned NumArgs) {
+ auto *Args = new (Allocator.Allocate<Comment::Argument>(NumArgs))
+ Comment::Argument[NumArgs];
+ unsigned ParsedArgs = 0;
+ Token Arg;
+
+ while (ParsedArgs < NumArgs && Retokenizer.lexType(Arg)) {
+ Args[ParsedArgs] = Comment::Argument{
+ SourceRange(Arg.getLocation(), Arg.getEndLocation()), Arg.getText()};
+ ParsedArgs++;
+ }
+
+ return llvm::ArrayRef(Args, ParsedArgs);
+}
+
+ArrayRef<Comment::Argument>
+Parser::parseParCommandArgs(TextTokenRetokenizer &Retokenizer,
+ unsigned NumArgs) {
+ assert(NumArgs > 0);
+ auto *Args = new (Allocator.Allocate<Comment::Argument>(NumArgs))
+ Comment::Argument[NumArgs];
+ unsigned ParsedArgs = 0;
+ Token Arg;
+
+ while (ParsedArgs < NumArgs && Retokenizer.lexParHeading(Arg)) {
+ Args[ParsedArgs] = Comment::Argument{
+ SourceRange(Arg.getLocation(), Arg.getEndLocation()), Arg.getText()};
+ ParsedArgs++;
+ }
+
+ return llvm::ArrayRef(Args, ParsedArgs);
+}
+
BlockCommandComment *Parser::parseBlockCommand() {
assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command));
@@ -356,6 +521,12 @@ BlockCommandComment *Parser::parseBlockCommand() {
parseParamCommandArgs(PC, Retokenizer);
else if (TPC)
parseTParamCommandArgs(TPC, Retokenizer);
+ else if (Info->IsThrowsCommand)
+ S.actOnBlockCommandArgs(
+ BC, parseThrowCommandArgs(Retokenizer, Info->NumArgs));
+ else if (Info->IsParCommand)
+ S.actOnBlockCommandArgs(BC,
+ parseParCommandArgs(Retokenizer, Info->NumArgs));
else
S.actOnBlockCommandArgs(BC, parseCommandArgs(Retokenizer, Info->NumArgs));
diff --git a/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp b/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp
index 58411201c3b0..28244104d663 100644
--- a/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp
@@ -48,7 +48,7 @@ bool ComparisonCategoryInfo::ValueInfo::hasValidIntValue() const {
// Before we attempt to get the value of the first field, ensure that we
// actually have one (and only one) field.
- auto *Record = VD->getType()->getAsCXXRecordDecl();
+ const auto *Record = VD->getType()->getAsCXXRecordDecl();
if (std::distance(Record->field_begin(), Record->field_end()) != 1 ||
!Record->field_begin()->getType()->isIntegralOrEnumerationType())
return false;
@@ -98,13 +98,13 @@ static const NamespaceDecl *lookupStdNamespace(const ASTContext &Ctx,
return StdNS;
}
-static CXXRecordDecl *lookupCXXRecordDecl(const ASTContext &Ctx,
- const NamespaceDecl *StdNS,
- ComparisonCategoryType Kind) {
+static const CXXRecordDecl *lookupCXXRecordDecl(const ASTContext &Ctx,
+ const NamespaceDecl *StdNS,
+ ComparisonCategoryType Kind) {
StringRef Name = ComparisonCategories::getCategoryString(Kind);
DeclContextLookupResult Lookup = StdNS->lookup(&Ctx.Idents.get(Name));
if (!Lookup.empty())
- if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Lookup.front()))
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Lookup.front()))
return RD;
return nullptr;
}
@@ -116,7 +116,7 @@ ComparisonCategories::lookupInfo(ComparisonCategoryType Kind) const {
return &It->second;
if (const NamespaceDecl *NS = lookupStdNamespace(Ctx, StdNS))
- if (CXXRecordDecl *RD = lookupCXXRecordDecl(Ctx, NS, Kind))
+ if (const CXXRecordDecl *RD = lookupCXXRecordDecl(Ctx, NS, Kind))
return &Data.try_emplace((char)Kind, Ctx, RD, Kind).first->second;
return nullptr;
@@ -126,13 +126,13 @@ const ComparisonCategoryInfo *
ComparisonCategories::lookupInfoForType(QualType Ty) const {
assert(!Ty.isNull() && "type must be non-null");
using CCT = ComparisonCategoryType;
- auto *RD = Ty->getAsCXXRecordDecl();
+ const auto *RD = Ty->getAsCXXRecordDecl();
if (!RD)
return nullptr;
// Check to see if we have information for the specified type cached.
const auto *CanonRD = RD->getCanonicalDecl();
- for (auto &KV : Data) {
+ for (const auto &KV : Data) {
const ComparisonCategoryInfo &Info = KV.second;
if (CanonRD == Info.Record->getCanonicalDecl())
return &Info;
diff --git a/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
index 584b58473294..62ca15ea398f 100644
--- a/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
@@ -310,6 +310,16 @@ ExprDependence clang::computeDependence(CXXThisExpr *E) {
// 'this' is type-dependent if the class type of the enclosing
// member function is dependent (C++ [temp.dep.expr]p2)
auto D = toExprDependenceForImpliedType(E->getType()->getDependence());
+
+ // If a lambda with an explicit object parameter captures '*this', then
+ // 'this' now refers to the captured copy of lambda, and if the lambda
+ // is type-dependent, so is the object and thus 'this'.
+ //
+ // Note: The standard does not mention this case explicitly, but we need
+ // to do this so we can mark NSDM accesses as dependent.
+ if (E->isCapturedByCopyInLambdaWithExplicitObjectParameter())
+ D |= ExprDependence::Type;
+
assert(!(D & ExprDependence::UnexpandedPack));
return D;
}
@@ -364,6 +374,28 @@ ExprDependence clang::computeDependence(PackExpansionExpr *E) {
ExprDependence::TypeValueInstantiation;
}
+ExprDependence clang::computeDependence(PackIndexingExpr *E) {
+
+ ExprDependence PatternDep = E->getPackIdExpression()->getDependence() &
+ ~ExprDependence::UnexpandedPack;
+
+ ExprDependence D = E->getIndexExpr()->getDependence();
+ if (D & ExprDependence::TypeValueInstantiation)
+ D |= E->getIndexExpr()->getDependence() | PatternDep |
+ ExprDependence::Instantiation;
+
+ ArrayRef<Expr *> Exprs = E->getExpressions();
+ if (Exprs.empty())
+ D |= PatternDep | ExprDependence::Instantiation;
+
+ else if (!E->getIndexExpr()->isInstantiationDependent()) {
+ std::optional<unsigned> Index = E->getSelectedIndex();
+ assert(Index && *Index < Exprs.size() && "pack index out of bound");
+ D |= Exprs[*Index]->getDependence();
+ }
+ return D;
+}
+
ExprDependence clang::computeDependence(SubstNonTypeTemplateParmExpr *E) {
return E->getReplacement()->getDependence();
}
@@ -418,12 +450,17 @@ ExprDependence clang::computeDependence(ObjCIndirectCopyRestoreExpr *E) {
return E->getSubExpr()->getDependence();
}
-ExprDependence clang::computeDependence(OMPArraySectionExpr *E) {
+ExprDependence clang::computeDependence(ArraySectionExpr *E) {
auto D = E->getBase()->getDependence();
if (auto *LB = E->getLowerBound())
D |= LB->getDependence();
if (auto *Len = E->getLength())
D |= Len->getDependence();
+
+ if (E->isOMPArraySection()) {
+ if (auto *Stride = E->getStride())
+ D |= Stride->getDependence();
+ }
return D;
}
@@ -639,6 +676,9 @@ ExprDependence clang::computeDependence(MemberExpr *E) {
D |= toExprDependence(NNS->getDependence() &
~NestedNameSpecifierDependence::Dependent);
+ for (const auto &A : E->template_arguments())
+ D |= toExprDependence(A.getArgument().getDependence());
+
auto *MemberDecl = E->getMemberDecl();
if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl)) {
DeclContext *DC = MemberDecl->getDeclContext();
@@ -655,7 +695,6 @@ ExprDependence clang::computeDependence(MemberExpr *E) {
D |= ExprDependence::Type;
}
}
- // FIXME: move remaining dependence computation from MemberExpr::Create()
return D;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Decl.cpp b/contrib/llvm-project/clang/lib/AST/Decl.cpp
index 1ee33fd7576d..bc7cce0bcd7f 100644
--- a/contrib/llvm-project/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Decl.cpp
@@ -612,19 +612,26 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
assert(D->getDeclContext()->getRedeclContext()->isFileContext() &&
"Not a name having namespace scope");
ASTContext &Context = D->getASTContext();
+ const auto *Var = dyn_cast<VarDecl>(D);
// C++ [basic.link]p3:
// A name having namespace scope (3.3.6) has internal linkage if it
// is the name of
- if (getStorageClass(D->getCanonicalDecl()) == SC_Static) {
+ if ((getStorageClass(D->getCanonicalDecl()) == SC_Static) ||
+ (Context.getLangOpts().C23 && Var && Var->isConstexpr())) {
// - a variable, variable template, function, or function template
// that is explicitly declared static; or
// (This bullet corresponds to C99 6.2.2p3.)
+
+ // C23 6.2.2p3
+ // If the declaration of a file scope identifier for
+ // an object contains any of the storage-class specifiers static or
+ // constexpr then the identifier has internal linkage.
return LinkageInfo::internal();
}
- if (const auto *Var = dyn_cast<VarDecl>(D)) {
+ if (Var) {
// - a non-template variable of non-volatile const-qualified type, unless
// - it is explicitly declared extern, or
// - it is declared in the purview of a module interface unit
@@ -1174,13 +1181,6 @@ Linkage NamedDecl::getLinkageInternal() const {
.getLinkage();
}
-/// Determine whether D is attached to a named module.
-static bool isInNamedModule(const NamedDecl *D) {
- if (auto *M = D->getOwningModule())
- return M->isNamedModule();
- return false;
-}
-
static bool isExportedFromModuleInterfaceUnit(const NamedDecl *D) {
// FIXME: Handle isModulePrivate.
switch (D->getModuleOwnershipKind()) {
@@ -1190,7 +1190,7 @@ static bool isExportedFromModuleInterfaceUnit(const NamedDecl *D) {
return false;
case Decl::ModuleOwnershipKind::Visible:
case Decl::ModuleOwnershipKind::VisibleWhenImported:
- return isInNamedModule(D);
+ return D->isInNamedModule();
}
llvm_unreachable("unexpected module ownership kind");
}
@@ -1208,7 +1208,7 @@ Linkage NamedDecl::getFormalLinkage() const {
// [basic.namespace.general]/p2
// A namespace is never attached to a named module and never has a name with
// module linkage.
- if (isInNamedModule(this) && InternalLinkage == Linkage::External &&
+ if (isInNamedModule() && InternalLinkage == Linkage::External &&
!isExportedFromModuleInterfaceUnit(
cast<NamedDecl>(this->getCanonicalDecl())) &&
!isa<NamespaceDecl>(this))
@@ -1621,7 +1621,7 @@ LinkageInfo LinkageComputer::getDeclLinkageAndVisibility(const NamedDecl *D) {
: CK);
}
-Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const {
+Module *Decl::getOwningModuleForLinkage() const {
if (isa<NamespaceDecl>(this))
// Namespaces never have module linkage. It is the entities within them
// that [may] do.
@@ -1644,24 +1644,9 @@ Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const {
case Module::ModuleHeaderUnit:
case Module::ExplicitGlobalModuleFragment:
- case Module::ImplicitGlobalModuleFragment: {
- // External linkage declarations in the global module have no owning module
- // for linkage purposes. But internal linkage declarations in the global
- // module fragment of a particular module are owned by that module for
- // linkage purposes.
- // FIXME: p1815 removes the need for this distinction -- there are no
- // internal linkage declarations that need to be referred to from outside
- // this TU.
- if (IgnoreLinkage)
- return nullptr;
- bool InternalLinkage;
- if (auto *ND = dyn_cast<NamedDecl>(this))
- InternalLinkage = !ND->hasExternalFormalLinkage();
- else
- InternalLinkage = isInAnonymousNamespace();
- return InternalLinkage ? M->Kind == Module::ModuleHeaderUnit ? M : M->Parent
- : nullptr;
- }
+ case Module::ImplicitGlobalModuleFragment:
+ // The global module shouldn't change the linkage.
+ return nullptr;
case Module::PrivateModuleFragment:
// The private module fragment is part of its containing module for linkage
@@ -2151,7 +2136,7 @@ VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StartL,
return new (C, DC) VarDecl(Var, C, DC, StartL, IdL, Id, T, TInfo, S);
}
-VarDecl *VarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+VarDecl *VarDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID)
VarDecl(Var, C, nullptr, SourceLocation(), SourceLocation(), nullptr,
QualType(), nullptr, SC_None);
@@ -2397,6 +2382,9 @@ bool VarDecl::hasInit() const {
if (P->hasUnparsedDefaultArg() || P->hasUninstantiatedDefaultArg())
return false;
+ if (auto *Eval = getEvaluatedStmt())
+ return Eval->Value.isValid();
+
return !Init.isNull();
}
@@ -2408,9 +2396,9 @@ Expr *VarDecl::getInit() {
return cast<Expr>(S);
auto *Eval = getEvaluatedStmt();
- return cast<Expr>(Eval->Value.isOffset()
- ? Eval->Value.get(getASTContext().getExternalSource())
- : Eval->Value.get(nullptr));
+
+ return cast<Expr>(Eval->Value.get(
+ Eval->Value.isOffset() ? getASTContext().getExternalSource() : nullptr));
}
Stmt **VarDecl::getInitAddress() {
@@ -2465,7 +2453,7 @@ bool VarDecl::mightBeUsableInConstantExpressions(const ASTContext &C) const {
// OpenCL permits const integral variables to be used in constant
// expressions, like in C++98.
- if (!Lang.CPlusPlus && !Lang.OpenCL)
+ if (!Lang.CPlusPlus && !Lang.OpenCL && !Lang.C23)
return false;
// Function parameters are never usable in constant expressions.
@@ -2487,14 +2475,19 @@ bool VarDecl::mightBeUsableInConstantExpressions(const ASTContext &C) const {
if (!getType().isConstant(C) || getType().isVolatileQualified())
return false;
- // In C++, const, non-volatile variables of integral or enumeration types
- // can be used in constant expressions.
- if (getType()->isIntegralOrEnumerationType())
+ // In C++, but not in C, const, non-volatile variables of integral or
+ // enumeration types can be used in constant expressions.
+ if (getType()->isIntegralOrEnumerationType() && !Lang.C23)
return true;
+ // C23 6.6p7: An identifier that is:
+ // ...
+ // - declared with storage-class specifier constexpr and has an object type,
+ // is a named constant, ... such a named constant is a constant expression
+ // with the type and value of the declared object.
// Additionally, in C++11, non-volatile constexpr variables can be used in
// constant expressions.
- return Lang.CPlusPlus11 && isConstexpr();
+ return (Lang.CPlusPlus11 || Lang.C23) && isConstexpr();
}
bool VarDecl::isUsableInConstantExpressions(const ASTContext &Context) const {
@@ -2510,7 +2503,8 @@ bool VarDecl::isUsableInConstantExpressions(const ASTContext &Context) const {
if (!DefVD->mightBeUsableInConstantExpressions(Context))
return false;
// ... and its initializer is a constant initializer.
- if (Context.getLangOpts().CPlusPlus && !DefVD->hasConstantInitialization())
+ if ((Context.getLangOpts().CPlusPlus || getLangOpts().C23) &&
+ !DefVD->hasConstantInitialization())
return false;
// C++98 [expr.const]p1:
// An integral constant-expression can involve only [...] const variables
@@ -2572,10 +2566,13 @@ APValue *VarDecl::evaluateValueImpl(SmallVectorImpl<PartialDiagnosticAt> &Notes,
bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, Ctx, this, Notes,
IsConstantInitialization);
- // In C++, this isn't a constant initializer if we produced notes. In that
- // case, we can't keep the result, because it may only be correct under the
- // assumption that the initializer is a constant context.
- if (IsConstantInitialization && Ctx.getLangOpts().CPlusPlus &&
+ // In C++, or in C23 if we're initialising a 'constexpr' variable, this isn't
+ // a constant initializer if we produced notes. In that case, we can't keep
+ // the result, because it may only be correct under the assumption that the
+ // initializer is a constant context.
+ if (IsConstantInitialization &&
+ (Ctx.getLangOpts().CPlusPlus ||
+ (isConstexpr() && Ctx.getLangOpts().C23)) &&
!Notes.empty())
Result = false;
@@ -2614,8 +2611,11 @@ bool VarDecl::hasICEInitializer(const ASTContext &Context) const {
}
bool VarDecl::hasConstantInitialization() const {
- // In C, all globals (and only globals) have constant initialization.
- if (hasGlobalStorage() && !getASTContext().getLangOpts().CPlusPlus)
+ // In C, all globals and constexpr variables should have constant
+ // initialization. For constexpr variables in C check that initializer is a
+ // constant initializer because they can be used in constant expressions.
+ if (hasGlobalStorage() && !getASTContext().getLangOpts().CPlusPlus &&
+ !isConstexpr())
return true;
// In C++, it depends on whether the evaluation at the point of definition
@@ -2634,7 +2634,9 @@ bool VarDecl::checkForConstantInitialization(
// std::is_constant_evaluated()).
assert(!Eval->WasEvaluated &&
"already evaluated var value before checking for constant init");
- assert(getASTContext().getLangOpts().CPlusPlus && "only meaningful in C++");
+ assert((getASTContext().getLangOpts().CPlusPlus ||
+ getASTContext().getLangOpts().C23) &&
+ "only meaningful in C++/C23");
assert(!getInit()->isValueDependent());
@@ -2830,7 +2832,7 @@ bool VarDecl::hasFlexibleArrayInit(const ASTContext &Ctx) const {
auto InitTy = Ctx.getAsConstantArrayType(FlexibleInit->getType());
if (!InitTy)
return false;
- return InitTy->getSize() != 0;
+ return !InitTy->isZeroSize();
}
CharUnits VarDecl::getFlexibleArrayInitChars(const ASTContext &Ctx) const {
@@ -2903,10 +2905,10 @@ VarDecl::setInstantiationOfStaticDataMember(VarDecl *VD,
//===----------------------------------------------------------------------===//
ParmVarDecl *ParmVarDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation StartLoc,
- SourceLocation IdLoc, IdentifierInfo *Id,
- QualType T, TypeSourceInfo *TInfo,
- StorageClass S, Expr *DefArg) {
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ const IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo, StorageClass S,
+ Expr *DefArg) {
return new (C, DC) ParmVarDecl(ParmVar, C, DC, StartLoc, IdLoc, Id, T, TInfo,
S, DefArg);
}
@@ -2919,7 +2921,7 @@ QualType ParmVarDecl::getOriginalType() const {
return T;
}
-ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID)
ParmVarDecl(ParmVar, C, nullptr, SourceLocation(), SourceLocation(),
nullptr, QualType(), nullptr, SC_None, nullptr);
@@ -3048,7 +3050,7 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
FunctionDeclBits.IsTrivialForCall = false;
FunctionDeclBits.IsDefaulted = false;
FunctionDeclBits.IsExplicitlyDefaulted = false;
- FunctionDeclBits.HasDefaultedFunctionInfo = false;
+ FunctionDeclBits.HasDefaultedOrDeletedInfo = false;
FunctionDeclBits.IsIneligibleOrNotSelected = false;
FunctionDeclBits.HasImplicitReturnZero = false;
FunctionDeclBits.IsLateTemplateParsed = false;
@@ -3082,30 +3084,65 @@ bool FunctionDecl::isVariadic() const {
return false;
}
-FunctionDecl::DefaultedFunctionInfo *
-FunctionDecl::DefaultedFunctionInfo::Create(ASTContext &Context,
- ArrayRef<DeclAccessPair> Lookups) {
- DefaultedFunctionInfo *Info = new (Context.Allocate(
- totalSizeToAlloc<DeclAccessPair>(Lookups.size()),
- std::max(alignof(DefaultedFunctionInfo), alignof(DeclAccessPair))))
- DefaultedFunctionInfo;
+FunctionDecl::DefaultedOrDeletedFunctionInfo *
+FunctionDecl::DefaultedOrDeletedFunctionInfo::Create(
+ ASTContext &Context, ArrayRef<DeclAccessPair> Lookups,
+ StringLiteral *DeletedMessage) {
+ static constexpr size_t Alignment =
+ std::max({alignof(DefaultedOrDeletedFunctionInfo),
+ alignof(DeclAccessPair), alignof(StringLiteral *)});
+ size_t Size = totalSizeToAlloc<DeclAccessPair, StringLiteral *>(
+ Lookups.size(), DeletedMessage != nullptr);
+
+ DefaultedOrDeletedFunctionInfo *Info =
+ new (Context.Allocate(Size, Alignment)) DefaultedOrDeletedFunctionInfo;
Info->NumLookups = Lookups.size();
+ Info->HasDeletedMessage = DeletedMessage != nullptr;
+
std::uninitialized_copy(Lookups.begin(), Lookups.end(),
Info->getTrailingObjects<DeclAccessPair>());
+ if (DeletedMessage)
+ *Info->getTrailingObjects<StringLiteral *>() = DeletedMessage;
return Info;
}
-void FunctionDecl::setDefaultedFunctionInfo(DefaultedFunctionInfo *Info) {
- assert(!FunctionDeclBits.HasDefaultedFunctionInfo && "already have this");
+void FunctionDecl::setDefaultedOrDeletedInfo(
+ DefaultedOrDeletedFunctionInfo *Info) {
+ assert(!FunctionDeclBits.HasDefaultedOrDeletedInfo && "already have this");
assert(!Body && "can't replace function body with defaulted function info");
- FunctionDeclBits.HasDefaultedFunctionInfo = true;
- DefaultedInfo = Info;
+ FunctionDeclBits.HasDefaultedOrDeletedInfo = true;
+ DefaultedOrDeletedInfo = Info;
+}
+
+void FunctionDecl::setDeletedAsWritten(bool D, StringLiteral *Message) {
+ FunctionDeclBits.IsDeleted = D;
+
+ if (Message) {
+ assert(isDeletedAsWritten() && "Function must be deleted");
+ if (FunctionDeclBits.HasDefaultedOrDeletedInfo)
+ DefaultedOrDeletedInfo->setDeletedMessage(Message);
+ else
+ setDefaultedOrDeletedInfo(DefaultedOrDeletedFunctionInfo::Create(
+ getASTContext(), /*Lookups=*/{}, Message));
+ }
+}
+
+void FunctionDecl::DefaultedOrDeletedFunctionInfo::setDeletedMessage(
+ StringLiteral *Message) {
+ // We should never get here with the DefaultedOrDeletedInfo populated, but
+ // no space allocated for the deleted message, since that would require
+ // recreating this, but setDefaultedOrDeletedInfo() disallows overwriting
+ // an already existing DefaultedOrDeletedFunctionInfo.
+ assert(HasDeletedMessage &&
+ "No space to store a delete message in this DefaultedOrDeletedInfo");
+ *getTrailingObjects<StringLiteral *>() = Message;
}
-FunctionDecl::DefaultedFunctionInfo *
-FunctionDecl::getDefaultedFunctionInfo() const {
- return FunctionDeclBits.HasDefaultedFunctionInfo ? DefaultedInfo : nullptr;
+FunctionDecl::DefaultedOrDeletedFunctionInfo *
+FunctionDecl::getDefalutedOrDeletedInfo() const {
+ return FunctionDeclBits.HasDefaultedOrDeletedInfo ? DefaultedOrDeletedInfo
+ : nullptr;
}
bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
@@ -3192,7 +3229,7 @@ Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const {
if (!hasBody(Definition))
return nullptr;
- assert(!Definition->FunctionDeclBits.HasDefaultedFunctionInfo &&
+ assert(!Definition->FunctionDeclBits.HasDefaultedOrDeletedInfo &&
"definition should not have a body");
if (Definition->Body)
return Definition->Body.get(getASTContext().getExternalSource());
@@ -3201,7 +3238,7 @@ Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const {
}
void FunctionDecl::setBody(Stmt *B) {
- FunctionDeclBits.HasDefaultedFunctionInfo = false;
+ FunctionDeclBits.HasDefaultedOrDeletedInfo = false;
Body = LazyDeclStmtPtr(B);
if (B)
EndRangeLoc = B->getEndLoc();
@@ -3537,10 +3574,23 @@ bool FunctionDecl::isTargetMultiVersion() const {
(hasAttr<TargetAttr>() || hasAttr<TargetVersionAttr>());
}
+bool FunctionDecl::isTargetMultiVersionDefault() const {
+ if (!isMultiVersion())
+ return false;
+ if (hasAttr<TargetAttr>())
+ return getAttr<TargetAttr>()->isDefaultVersion();
+ return hasAttr<TargetVersionAttr>() &&
+ getAttr<TargetVersionAttr>()->isDefaultVersion();
+}
+
bool FunctionDecl::isTargetClonesMultiVersion() const {
return isMultiVersion() && hasAttr<TargetClonesAttr>();
}
+bool FunctionDecl::isTargetVersionMultiVersion() const {
+ return isMultiVersion() && hasAttr<TargetVersionAttr>();
+}
+
void
FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
redeclarable_base::setPreviousDecl(PrevDecl);
@@ -4141,14 +4191,12 @@ FunctionDecl::getTemplateSpecializationArgsAsWritten() const {
return nullptr;
}
-void
-FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C,
- FunctionTemplateDecl *Template,
- const TemplateArgumentList *TemplateArgs,
- void *InsertPos,
- TemplateSpecializationKind TSK,
- const TemplateArgumentListInfo *TemplateArgsAsWritten,
- SourceLocation PointOfInstantiation) {
+void FunctionDecl::setFunctionTemplateSpecialization(
+ ASTContext &C, FunctionTemplateDecl *Template,
+ TemplateArgumentList *TemplateArgs, void *InsertPos,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation PointOfInstantiation) {
assert((TemplateOrSpecialization.isNull() ||
TemplateOrSpecialization.is<MemberSpecializationInfo *>()) &&
"Member function is already a specialization");
@@ -4476,7 +4524,7 @@ unsigned FunctionDecl::getODRHash() {
}
class ODRHash Hash;
- Hash.AddFunctionDecl(this, /*SkipBody=*/shouldSkipCheckingODR());
+ Hash.AddFunctionDecl(this);
setHasODRHash(true);
ODRHash = Hash.CalculateHash();
return ODRHash;
@@ -4488,14 +4536,14 @@ unsigned FunctionDecl::getODRHash() {
FieldDecl *FieldDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
- IdentifierInfo *Id, QualType T,
+ const IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
InClassInitStyle InitStyle) {
return new (C, DC) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo,
BW, Mutable, InitStyle);
}
-FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) FieldDecl(Field, nullptr, SourceLocation(),
SourceLocation(), nullptr, QualType(), nullptr,
nullptr, false, ICIS_NoInit);
@@ -4539,7 +4587,7 @@ unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const {
}
bool FieldDecl::isZeroLengthBitField(const ASTContext &Ctx) const {
- return isUnnamedBitfield() && !getBitWidth()->isValueDependent() &&
+ return isUnnamedBitField() && !getBitWidth()->isValueDependent() &&
getBitWidthValue(Ctx) == 0;
}
@@ -4805,7 +4853,7 @@ EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
return Enum;
}
-EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
EnumDecl *Enum =
new (C, ID) EnumDecl(C, nullptr, SourceLocation(), SourceLocation(),
nullptr, nullptr, false, false, false);
@@ -4967,7 +5015,8 @@ RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
return R;
}
-RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C,
+ GlobalDeclID ID) {
RecordDecl *R = new (C, ID)
RecordDecl(Record, TagTypeKind::Struct, C, nullptr, SourceLocation(),
SourceLocation(), nullptr, nullptr);
@@ -5029,7 +5078,13 @@ void RecordDecl::completeDefinition() {
// Layouts are dumped when computed, so if we are dumping for all complete
// types, we need to force usage to get types that wouldn't be used elsewhere.
- if (Ctx.getLangOpts().DumpRecordLayoutsComplete)
+ //
+ // If the type is dependent, then we can't compute its layout because there
+ // is no way for us to know the size or alignment of a dependent type. Also
+ // ignore declarations marked as invalid since 'getASTRecordLayout()' asserts
+ // on that.
+ if (Ctx.getLangOpts().DumpRecordLayoutsComplete && !isDependentType() &&
+ !isInvalidDecl())
(void)Ctx.getASTRecordLayout(this);
}
@@ -5210,6 +5265,13 @@ TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) {
return new (C, (DeclContext *)nullptr) TranslationUnitDecl(C);
}
+void TranslationUnitDecl::setAnonymousNamespace(NamespaceDecl *D) {
+ AnonymousNamespace = D;
+
+ if (ASTMutationListener *Listener = Ctx.getASTMutationListener())
+ Listener->AddedAnonymousNamespace(this, D);
+}
+
void PragmaCommentDecl::anchor() {}
PragmaCommentDecl *PragmaCommentDecl::Create(const ASTContext &C,
@@ -5226,7 +5288,7 @@ PragmaCommentDecl *PragmaCommentDecl::Create(const ASTContext &C,
}
PragmaCommentDecl *PragmaCommentDecl::CreateDeserialized(ASTContext &C,
- unsigned ID,
+ GlobalDeclID ID,
unsigned ArgSize) {
return new (C, ID, additionalSizeToAlloc<char>(ArgSize + 1))
PragmaCommentDecl(nullptr, SourceLocation(), PCK_Unknown);
@@ -5251,7 +5313,7 @@ PragmaDetectMismatchDecl::Create(const ASTContext &C, TranslationUnitDecl *DC,
}
PragmaDetectMismatchDecl *
-PragmaDetectMismatchDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+PragmaDetectMismatchDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID,
unsigned NameValueSize) {
return new (C, ID, additionalSizeToAlloc<char>(NameValueSize + 1))
PragmaDetectMismatchDecl(nullptr, SourceLocation(), 0);
@@ -5278,7 +5340,7 @@ LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
return new (C, DC) LabelDecl(DC, IdentL, II, nullptr, GnuLabelL);
}
-LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) LabelDecl(nullptr, SourceLocation(), nullptr, nullptr,
SourceLocation());
}
@@ -5319,7 +5381,7 @@ ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, QualType Type,
}
ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) ImplicitParamDecl(C, QualType(), ImplicitParamKind::Other);
}
@@ -5337,7 +5399,7 @@ FunctionDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
return New;
}
-FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) FunctionDecl(
Function, C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(),
nullptr, SC_None, false, false, ConstexprSpecKind::Unspecified, nullptr);
@@ -5347,7 +5409,7 @@ BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
return new (C, DC) BlockDecl(DC, L);
}
-BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) BlockDecl(nullptr, SourceLocation());
}
@@ -5361,7 +5423,7 @@ CapturedDecl *CapturedDecl::Create(ASTContext &C, DeclContext *DC,
CapturedDecl(DC, NumParams);
}
-CapturedDecl *CapturedDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+CapturedDecl *CapturedDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID,
unsigned NumParams) {
return new (C, ID, additionalSizeToAlloc<ImplicitParamDecl *>(NumParams))
CapturedDecl(nullptr, NumParams);
@@ -5387,8 +5449,8 @@ EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
return new (C, CD) EnumConstantDecl(C, CD, L, Id, T, E, V);
}
-EnumConstantDecl *
-EnumConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+EnumConstantDecl *EnumConstantDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) EnumConstantDecl(C, nullptr, SourceLocation(), nullptr,
QualType(), nullptr, llvm::APSInt());
}
@@ -5409,13 +5471,13 @@ IndirectFieldDecl::IndirectFieldDecl(ASTContext &C, DeclContext *DC,
IndirectFieldDecl *
IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
- IdentifierInfo *Id, QualType T,
+ const IdentifierInfo *Id, QualType T,
llvm::MutableArrayRef<NamedDecl *> CH) {
return new (C, DC) IndirectFieldDecl(C, DC, L, Id, T, CH);
}
IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID)
IndirectFieldDecl(C, nullptr, SourceLocation(), DeclarationName(),
QualType(), std::nullopt);
@@ -5432,7 +5494,8 @@ void TypeDecl::anchor() {}
TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
- IdentifierInfo *Id, TypeSourceInfo *TInfo) {
+ const IdentifierInfo *Id,
+ TypeSourceInfo *TInfo) {
return new (C, DC) TypedefDecl(C, DC, StartLoc, IdLoc, Id, TInfo);
}
@@ -5475,19 +5538,21 @@ bool TypedefNameDecl::isTransparentTagSlow() const {
return isTransparent;
}
-TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) TypedefDecl(C, nullptr, SourceLocation(), SourceLocation(),
nullptr, nullptr);
}
TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
- SourceLocation IdLoc, IdentifierInfo *Id,
+ SourceLocation IdLoc,
+ const IdentifierInfo *Id,
TypeSourceInfo *TInfo) {
return new (C, DC) TypeAliasDecl(C, DC, StartLoc, IdLoc, Id, TInfo);
}
-TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) TypeAliasDecl(C, nullptr, SourceLocation(),
SourceLocation(), nullptr, nullptr);
}
@@ -5518,7 +5583,7 @@ FileScopeAsmDecl *FileScopeAsmDecl::Create(ASTContext &C, DeclContext *DC,
}
FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) FileScopeAsmDecl(nullptr, nullptr, SourceLocation(),
SourceLocation());
}
@@ -5526,18 +5591,17 @@ FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C,
void TopLevelStmtDecl::anchor() {}
TopLevelStmtDecl *TopLevelStmtDecl::Create(ASTContext &C, Stmt *Statement) {
- assert(Statement);
assert(C.getLangOpts().IncrementalExtensions &&
"Must be used only in incremental mode");
- SourceLocation BeginLoc = Statement->getBeginLoc();
+ SourceLocation Loc = Statement ? Statement->getBeginLoc() : SourceLocation();
DeclContext *DC = C.getTranslationUnitDecl();
- return new (C, DC) TopLevelStmtDecl(DC, BeginLoc, Statement);
+ return new (C, DC) TopLevelStmtDecl(DC, Loc, Statement);
}
TopLevelStmtDecl *TopLevelStmtDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID)
TopLevelStmtDecl(/*DC=*/nullptr, SourceLocation(), /*S=*/nullptr);
}
@@ -5546,13 +5610,19 @@ SourceRange TopLevelStmtDecl::getSourceRange() const {
return SourceRange(getLocation(), Statement->getEndLoc());
}
+void TopLevelStmtDecl::setStmt(Stmt *S) {
+ assert(S);
+ Statement = S;
+ setLocation(Statement->getBeginLoc());
+}
+
void EmptyDecl::anchor() {}
EmptyDecl *EmptyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
return new (C, DC) EmptyDecl(DC, L);
}
-EmptyDecl *EmptyDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+EmptyDecl *EmptyDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) EmptyDecl(nullptr, SourceLocation());
}
@@ -5585,7 +5655,8 @@ HLSLBufferDecl *HLSLBufferDecl::Create(ASTContext &C,
return Result;
}
-HLSLBufferDecl *HLSLBufferDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+HLSLBufferDecl *HLSLBufferDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) HLSLBufferDecl(nullptr, false, SourceLocation(), nullptr,
SourceLocation(), SourceLocation());
}
@@ -5641,7 +5712,7 @@ ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC,
return Import;
}
-ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID,
unsigned NumLocations) {
return new (C, ID, additionalSizeToAlloc<SourceLocation>(NumLocations))
ImportDecl(EmptyShell());
@@ -5674,6 +5745,21 @@ ExportDecl *ExportDecl::Create(ASTContext &C, DeclContext *DC,
return new (C, DC) ExportDecl(DC, ExportLoc);
}
-ExportDecl *ExportDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ExportDecl *ExportDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) ExportDecl(nullptr, SourceLocation());
}
+
+bool clang::IsArmStreamingFunction(const FunctionDecl *FD,
+ bool IncludeLocallyStreaming) {
+ if (IncludeLocallyStreaming)
+ if (FD->hasAttr<ArmLocallyStreamingAttr>())
+ return true;
+
+ if (const Type *Ty = FD->getType().getTypePtrOrNull())
+ if (const auto *FPT = Ty->getAs<FunctionProtoType>())
+ if (FPT->getAArch64SMEAttributes() &
+ FunctionType::SME_PStateSMEnabledMask)
+ return true;
+
+ return false;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
index 6b3c13ff206d..c4e948a38e26 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
@@ -71,21 +71,20 @@ void Decl::updateOutOfDate(IdentifierInfo &II) const {
#include "clang/AST/DeclNodes.inc"
void *Decl::operator new(std::size_t Size, const ASTContext &Context,
- unsigned ID, std::size_t Extra) {
+ GlobalDeclID ID, std::size_t Extra) {
// Allocate an extra 8 bytes worth of storage, which ensures that the
// resulting pointer will still be 8-byte aligned.
- static_assert(sizeof(unsigned) * 2 >= alignof(Decl),
- "Decl won't be misaligned");
+ static_assert(sizeof(uint64_t) >= alignof(Decl), "Decl won't be misaligned");
void *Start = Context.Allocate(Size + Extra + 8);
void *Result = (char*)Start + 8;
- unsigned *PrefixPtr = (unsigned *)Result - 2;
+ uint64_t *PrefixPtr = (uint64_t *)Result - 1;
- // Zero out the first 4 bytes; this is used to store the owning module ID.
- PrefixPtr[0] = 0;
+ *PrefixPtr = ID.getRawValue();
- // Store the global declaration ID in the second 4 bytes.
- PrefixPtr[1] = ID;
+ // We leave the upper 16 bits to store the module IDs. 48 bits should be
+ // sufficient to store a declaration ID.
+ assert(*PrefixPtr < llvm::maskTrailingOnes<uint64_t>(48));
return Result;
}
@@ -111,6 +110,29 @@ void *Decl::operator new(std::size_t Size, const ASTContext &Ctx,
return ::operator new(Size + Extra, Ctx);
}
+GlobalDeclID Decl::getGlobalID() const {
+ if (!isFromASTFile())
+ return GlobalDeclID();
+ // See the comments in `Decl::operator new` for details.
+ uint64_t ID = *((const uint64_t *)this - 1);
+ return GlobalDeclID(ID & llvm::maskTrailingOnes<uint64_t>(48));
+}
+
+unsigned Decl::getOwningModuleID() const {
+ if (!isFromASTFile())
+ return 0;
+
+ uint64_t ID = *((const uint64_t *)this - 1);
+ return ID >> 48;
+}
+
+void Decl::setOwningModuleID(unsigned ID) {
+ assert(isFromASTFile() && "Only works on a deserialized declaration");
+ uint64_t *IDAddress = (uint64_t *)this - 1;
+ *IDAddress &= llvm::maskTrailingOnes<uint64_t>(48);
+ *IDAddress |= (uint64_t)ID << 48;
+}
+
Module *Decl::getOwningModuleSlow() const {
assert(isFromASTFile() && "Not from AST file?");
return getASTContext().getExternalSource()->getModule(getOwningModuleID());
@@ -402,7 +424,7 @@ bool Decl::isInAnonymousNamespace() const {
bool Decl::isInStdNamespace() const {
const DeclContext *DC = getDeclContext();
- return DC && DC->isStdNamespace();
+ return DC && DC->getNonTransparentContext()->isStdNamespace();
}
bool Decl::isFileContextDecl() const {
@@ -666,12 +688,29 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
// Make sure that this declaration has already been introduced.
if (!A->getIntroduced().empty() &&
EnclosingVersion < A->getIntroduced()) {
- if (Message) {
- Message->clear();
- llvm::raw_string_ostream Out(*Message);
- VersionTuple VTI(A->getIntroduced());
- Out << "introduced in " << PrettyPlatformName << ' '
- << VTI << HintMessage;
+ IdentifierInfo *IIEnv = A->getEnvironment();
+ StringRef TargetEnv =
+ Context.getTargetInfo().getTriple().getEnvironmentName();
+ StringRef EnvName = llvm::Triple::getEnvironmentTypeName(
+ Context.getTargetInfo().getTriple().getEnvironment());
+ // Matching environment or no environment on attribute
+ if (!IIEnv || (!TargetEnv.empty() && IIEnv->getName() == TargetEnv)) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ VersionTuple VTI(A->getIntroduced());
+ Out << "introduced in " << PrettyPlatformName << " " << VTI << " "
+ << EnvName << HintMessage;
+ }
+ }
+ // Non-matching environment or no environment on target
+ else {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ Out << "not available on " << PrettyPlatformName << " " << EnvName
+ << HintMessage;
+ }
}
return A->getStrict() ? AR_Unavailable : AR_NotYetIntroduced;
@@ -1077,7 +1116,7 @@ bool Decl::isInExportDeclContext() const {
while (DC && !isa<ExportDecl>(DC))
DC = DC->getLexicalParent();
- return DC && isa<ExportDecl>(DC);
+ return isa_and_nonnull<ExportDecl>(DC);
}
bool Decl::isInAnotherModuleUnit() const {
@@ -1086,25 +1125,48 @@ bool Decl::isInAnotherModuleUnit() const {
if (!M)
return false;
+ // FIXME or NOTE: maybe we need to be clear about the semantics
+ // of clang header modules. e.g., if this lives in a clang header
+ // module included by the current unit, should we return false
+ // here?
+ //
+ // This is clear for header units as the specification says the
+ // header units live in a synthesised translation unit. So we
+ // can return false here.
M = M->getTopLevelModule();
- // FIXME: It is problematic if the header module lives in another module
- // unit. Consider to fix this by techniques like
- // ExternalASTSource::hasExternalDefinitions.
- if (M->isHeaderLikeModule())
+ if (!M->isNamedModule())
return false;
- // A global module without parent implies that we're parsing the global
- // module. So it can't be in another module unit.
- if (M->isGlobalModule())
+ return M != getASTContext().getCurrentNamedModule();
+}
+
+bool Decl::isInCurrentModuleUnit() const {
+ auto *M = getOwningModule();
+
+ if (!M || !M->isNamedModule())
return false;
- assert(M->isNamedModule() && "New module kind?");
- return M != getASTContext().getCurrentNamedModule();
+ return M == getASTContext().getCurrentNamedModule();
+}
+
+bool Decl::shouldEmitInExternalSource() const {
+ ExternalASTSource *Source = getASTContext().getExternalSource();
+ if (!Source)
+ return false;
+
+ return Source->hasExternalDefinitions(this) == ExternalASTSource::EK_Always;
+}
+
+bool Decl::isFromExplicitGlobalModule() const {
+ return getOwningModule() && getOwningModule()->isExplicitGlobalModule();
}
-bool Decl::shouldSkipCheckingODR() const {
- return getASTContext().getLangOpts().SkipODRCheckInGMF && getOwningModule() &&
- getOwningModule()->isExplicitGlobalModule();
+bool Decl::isFromGlobalModule() const {
+ return getOwningModule() && getOwningModule()->isGlobalModule();
+}
+
+bool Decl::isInNamedModule() const {
+ return getOwningModule() && getOwningModule()->isNamedModule();
}
static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
@@ -1116,7 +1178,9 @@ int64_t Decl::getID() const {
const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
QualType Ty;
- if (const auto *D = dyn_cast<ValueDecl>(this))
+ if (isa<BindingDecl>(this))
+ return nullptr;
+ else if (const auto *D = dyn_cast<ValueDecl>(this))
Ty = D->getType();
else if (const auto *D = dyn_cast<TypedefNameDecl>(this))
Ty = D->getUnderlyingType();
@@ -1357,6 +1421,7 @@ DeclContext *DeclContext::getPrimaryContext() {
case Decl::ExternCContext:
case Decl::LinkageSpec:
case Decl::Export:
+ case Decl::TopLevelStmt:
case Decl::Block:
case Decl::Captured:
case Decl::OMPDeclareReduction:
@@ -1377,8 +1442,7 @@ DeclContext *DeclContext::getPrimaryContext() {
case Decl::TranslationUnit:
return static_cast<TranslationUnitDecl *>(this)->getFirstDecl();
case Decl::Namespace:
- // The original namespace is our primary context.
- return static_cast<NamespaceDecl *>(this)->getOriginalNamespace();
+ return static_cast<NamespaceDecl *>(this)->getFirstDecl();
case Decl::ObjCMethod:
return this;
@@ -1847,9 +1911,9 @@ DeclContext::lookup(DeclarationName Name) const {
DeclContext::lookup_result
DeclContext::noload_lookup(DeclarationName Name) {
- assert(getDeclKind() != Decl::LinkageSpec &&
- getDeclKind() != Decl::Export &&
- "should not perform lookups into transparent contexts");
+ // For transparent DeclContext, we should lookup in their enclosing context.
+ if (getDeclKind() == Decl::LinkageSpec || getDeclKind() == Decl::Export)
+ return getParent()->noload_lookup(Name);
DeclContext *PrimaryContext = getPrimaryContext();
if (PrimaryContext != this)
@@ -2145,3 +2209,7 @@ DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C,
return DD;
}
+
+unsigned DeclIDBase::getLocalDeclIndex() const {
+ return ID & llvm::maskTrailingOnes<DeclID>(32);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
index 117e802dae2d..9a3ede426e91 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
@@ -57,7 +57,8 @@ using namespace clang;
void AccessSpecDecl::anchor() {}
-AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) AccessSpecDecl(EmptyShell());
}
@@ -67,8 +68,8 @@ void LazyASTUnresolvedSet::getFromExternalSource(ASTContext &C) const {
assert(Source && "getFromExternalSource with no external source");
for (ASTUnresolvedSet::iterator I = Impl.begin(); I != Impl.end(); ++I)
- I.setDecl(cast<NamedDecl>(Source->GetExternalDecl(
- reinterpret_cast<uintptr_t>(I.getDecl()) >> 2)));
+ I.setDecl(
+ cast<NamedDecl>(Source->GetExternalDecl(GlobalDeclID(I.getDeclID()))));
Impl.Decls.setLazy(false);
}
@@ -160,8 +161,8 @@ CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
return R;
}
-CXXRecordDecl *
-CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+CXXRecordDecl *CXXRecordDecl::CreateDeserialized(const ASTContext &C,
+ GlobalDeclID ID) {
auto *R = new (C, ID)
CXXRecordDecl(CXXRecord, TagTypeKind::Struct, C, nullptr,
SourceLocation(), SourceLocation(), nullptr, nullptr);
@@ -400,10 +401,11 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// C++11 [class.ctor]p6:
// If that user-written default constructor would satisfy the
- // requirements of a constexpr constructor, the implicitly-defined
- // default constructor is constexpr.
+ // requirements of a constexpr constructor/function(C++23), the
+ // implicitly-defined default constructor is constexpr.
if (!BaseClassDecl->hasConstexprDefaultConstructor())
- data().DefaultedDefaultConstructorIsConstexpr = false;
+ data().DefaultedDefaultConstructorIsConstexpr =
+ C.getLangOpts().CPlusPlus23;
// C++1z [class.copy]p8:
// The implicitly-declared copy constructor for a class X will have
@@ -548,7 +550,8 @@ void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) {
// -- for every subobject of class type or (possibly multi-dimensional)
// array thereof, that class type shall have a constexpr destructor
if (!Subobj->hasConstexprDestructor())
- data().DefaultedDestructorIsConstexpr = false;
+ data().DefaultedDestructorIsConstexpr =
+ getASTContext().getLangOpts().CPlusPlus23;
// C++20 [temp.param]p7:
// A structural type is [...] a literal class type [for which] the types
@@ -558,6 +561,42 @@ void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) {
data().StructuralIfLiteral = false;
}
+const CXXRecordDecl *CXXRecordDecl::getStandardLayoutBaseWithFields() const {
+ assert(
+ isStandardLayout() &&
+ "getStandardLayoutBaseWithFields called on a non-standard-layout type");
+#ifdef EXPENSIVE_CHECKS
+ {
+ unsigned NumberOfBasesWithFields = 0;
+ if (!field_empty())
+ ++NumberOfBasesWithFields;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 8> UniqueBases;
+ forallBases([&](const CXXRecordDecl *Base) -> bool {
+ if (!Base->field_empty())
+ ++NumberOfBasesWithFields;
+ assert(
+ UniqueBases.insert(Base->getCanonicalDecl()).second &&
+ "Standard layout struct has multiple base classes of the same type");
+ return true;
+ });
+ assert(NumberOfBasesWithFields <= 1 &&
+ "Standard layout struct has fields declared in more than one class");
+ }
+#endif
+ if (!field_empty())
+ return this;
+ const CXXRecordDecl *Result = this;
+ forallBases([&](const CXXRecordDecl *Base) -> bool {
+ if (!Base->field_empty()) {
+ // This is the base where the fields are declared; return early
+ Result = Base;
+ return false;
+ }
+ return true;
+ });
+ return Result;
+}
+
bool CXXRecordDecl::hasConstexprDestructor() const {
auto *Dtor = getDestructor();
return Dtor ? Dtor->isConstexpr() : defaultedDestructorIsConstexpr();
@@ -666,12 +705,15 @@ bool CXXRecordDecl::hasSubobjectAtOffsetZeroOfEmptyBaseType(
for (auto *FD : X->fields()) {
// FIXME: Should we really care about the type of the first non-static
// data member of a non-union if there are preceding unnamed bit-fields?
- if (FD->isUnnamedBitfield())
+ if (FD->isUnnamedBitField())
continue;
if (!IsFirstField && !FD->isZeroSize(Ctx))
continue;
+ if (FD->isInvalidDecl())
+ continue;
+
// -- If X is n array type, [visit the element type]
QualType T = Ctx.getBaseElementType(FD->getType());
if (auto *RD = T->getAsCXXRecordDecl())
@@ -945,7 +987,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
// A declaration for a bit-field that omits the identifier declares an
// unnamed bit-field. Unnamed bit-fields are not members and cannot be
// initialized.
- if (Field->isUnnamedBitfield()) {
+ if (Field->isUnnamedBitField()) {
// C++ [meta.unary.prop]p4: [LWG2358]
// T is a class type [...] with [...] no unnamed bit-fields of non-zero
// length
@@ -1297,7 +1339,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
!FieldRec->hasConstexprDefaultConstructor() && !isUnion())
// The standard requires any in-class initializer to be a constant
// expression. We consider this to be a defect.
- data().DefaultedDefaultConstructorIsConstexpr = false;
+ data().DefaultedDefaultConstructorIsConstexpr =
+ Context.getLangOpts().CPlusPlus23;
// C++11 [class.copy]p8:
// The implicitly-declared copy constructor for a class X will have
@@ -1517,11 +1560,11 @@ void CXXRecordDecl::setCaptures(ASTContext &Context,
auto *ToCapture = (LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) *
Captures.size());
Data.AddCaptureList(Context, ToCapture);
- for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
- if (Captures[I].isExplicit())
+ for (const LambdaCapture &C : Captures) {
+ if (C.isExplicit())
++Data.NumExplicitCaptures;
- new (ToCapture) LambdaCapture(Captures[I]);
+ new (ToCapture) LambdaCapture(C);
ToCapture++;
}
@@ -1564,10 +1607,9 @@ bool CXXRecordDecl::isGenericLambda() const {
#ifndef NDEBUG
static bool allLookupResultsAreTheSame(const DeclContext::lookup_result &R) {
- for (auto *D : R)
- if (!declaresSameEntity(D, R.front()))
- return false;
- return true;
+ return llvm::all_of(R, [&](NamedDecl *D) {
+ return D->isInvalidDecl() || declaresSameEntity(D, R.front());
+ });
}
#endif
@@ -2053,40 +2095,39 @@ void CXXRecordDecl::completeDefinition() {
completeDefinition(nullptr);
}
+static bool hasPureVirtualFinalOverrider(
+ const CXXRecordDecl &RD, const CXXFinalOverriderMap *FinalOverriders) {
+ if (!FinalOverriders) {
+ CXXFinalOverriderMap MyFinalOverriders;
+ RD.getFinalOverriders(MyFinalOverriders);
+ return hasPureVirtualFinalOverrider(RD, &MyFinalOverriders);
+ }
+
+ for (const CXXFinalOverriderMap::value_type &
+ OverridingMethodsEntry : *FinalOverriders) {
+ for (const auto &[_, SubobjOverrides] : OverridingMethodsEntry.second) {
+ assert(SubobjOverrides.size() > 0 &&
+ "All virtual functions have overriding virtual functions");
+
+ if (SubobjOverrides.front().Method->isPureVirtual())
+ return true;
+ }
+ }
+ return false;
+}
+
void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
RecordDecl::completeDefinition();
// If the class may be abstract (but hasn't been marked as such), check for
// any pure final overriders.
- if (mayBeAbstract()) {
- CXXFinalOverriderMap MyFinalOverriders;
- if (!FinalOverriders) {
- getFinalOverriders(MyFinalOverriders);
- FinalOverriders = &MyFinalOverriders;
- }
-
- bool Done = false;
- for (CXXFinalOverriderMap::iterator M = FinalOverriders->begin(),
- MEnd = FinalOverriders->end();
- M != MEnd && !Done; ++M) {
- for (OverridingMethods::iterator SO = M->second.begin(),
- SOEnd = M->second.end();
- SO != SOEnd && !Done; ++SO) {
- assert(SO->second.size() > 0 &&
- "All virtual functions have overriding virtual functions");
-
- // C++ [class.abstract]p4:
- // A class is abstract if it contains or inherits at least one
- // pure virtual function for which the final overrider is pure
- // virtual.
- if (SO->second.front().Method->isPureVirtual()) {
- data().Abstract = true;
- Done = true;
- break;
- }
- }
- }
- }
+ //
+ // C++ [class.abstract]p4:
+ // A class is abstract if it contains or inherits at least one
+ // pure virtual function for which the final overrider is pure
+ // virtual.
+ if (mayBeAbstract() && hasPureVirtualFinalOverrider(*this, FinalOverriders))
+ markAbstract();
// Set access bits correctly on the directly-declared conversions.
for (conversion_iterator I = conversion_begin(), E = conversion_end();
@@ -2160,8 +2201,8 @@ CXXDeductionGuideDecl *CXXDeductionGuideDecl::Create(
TInfo, EndLocation, Ctor, Kind);
}
-CXXDeductionGuideDecl *CXXDeductionGuideDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+CXXDeductionGuideDecl *
+CXXDeductionGuideDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) CXXDeductionGuideDecl(
C, nullptr, SourceLocation(), ExplicitSpecifier(), DeclarationNameInfo(),
QualType(), nullptr, SourceLocation(), nullptr,
@@ -2173,8 +2214,8 @@ RequiresExprBodyDecl *RequiresExprBodyDecl::Create(
return new (C, DC) RequiresExprBodyDecl(C, DC, StartLoc);
}
-RequiresExprBodyDecl *RequiresExprBodyDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+RequiresExprBodyDecl *
+RequiresExprBodyDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) RequiresExprBodyDecl(C, nullptr, SourceLocation());
}
@@ -2279,7 +2320,8 @@ CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
isInline, ConstexprKind, EndLocation, TrailingRequiresClause);
}
-CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) CXXMethodDecl(
CXXMethod, C, nullptr, SourceLocation(), DeclarationNameInfo(),
QualType(), nullptr, SC_None, false, false,
@@ -2543,8 +2585,19 @@ QualType CXXMethodDecl::getThisType(const FunctionProtoType *FPT,
const CXXRecordDecl *Decl) {
ASTContext &C = Decl->getASTContext();
QualType ObjectTy = ::getThisObjectType(C, FPT, Decl);
- return C.getLangOpts().HLSL ? C.getLValueReferenceType(ObjectTy)
- : C.getPointerType(ObjectTy);
+
+ // Unlike 'const' and 'volatile', a '__restrict' qualifier must be
+ // attached to the pointer type, not the pointee.
+ bool Restrict = FPT->getMethodQuals().hasRestrict();
+ if (Restrict)
+ ObjectTy.removeLocalRestrict();
+
+ ObjectTy = C.getLangOpts().HLSL ? C.getLValueReferenceType(ObjectTy)
+ : C.getPointerType(ObjectTy);
+
+ if (Restrict)
+ ObjectTy.addRestrict();
+ return ObjectTy;
}
QualType CXXMethodDecl::getThisType() const {
@@ -2686,7 +2739,7 @@ CXXConstructorDecl::CXXConstructorDecl(
void CXXConstructorDecl::anchor() {}
CXXConstructorDecl *CXXConstructorDecl::CreateDeserialized(ASTContext &C,
- unsigned ID,
+ GlobalDeclID ID,
uint64_t AllocKind) {
bool hasTrailingExplicit = static_cast<bool>(AllocKind & TAKHasTailExplicit);
bool isInheritingConstructor =
@@ -2832,8 +2885,8 @@ bool CXXConstructorDecl::isSpecializationCopyingObject() const {
void CXXDestructorDecl::anchor() {}
-CXXDestructorDecl *
-CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+CXXDestructorDecl *CXXDestructorDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) CXXDestructorDecl(
C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
false, false, false, ConstexprSpecKind::Unspecified, nullptr);
@@ -2864,8 +2917,8 @@ void CXXDestructorDecl::setOperatorDelete(FunctionDecl *OD, Expr *ThisArg) {
void CXXConversionDecl::anchor() {}
-CXXConversionDecl *
-CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+CXXConversionDecl *CXXConversionDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) CXXConversionDecl(
C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
false, false, ExplicitSpecifier(), ConstexprSpecKind::Unspecified,
@@ -2911,7 +2964,7 @@ LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C, DeclContext *DC,
}
LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID)
LinkageSpecDecl(nullptr, SourceLocation(), SourceLocation(),
LinkageSpecLanguageIDs::C, false);
@@ -2927,13 +2980,13 @@ UsingDirectiveDecl *UsingDirectiveDecl::Create(ASTContext &C, DeclContext *DC,
NamedDecl *Used,
DeclContext *CommonAncestor) {
if (auto *NS = dyn_cast_or_null<NamespaceDecl>(Used))
- Used = NS->getOriginalNamespace();
+ Used = NS->getFirstDecl();
return new (C, DC) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc,
IdentLoc, Used, CommonAncestor);
}
UsingDirectiveDecl *UsingDirectiveDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) UsingDirectiveDecl(nullptr, SourceLocation(),
SourceLocation(),
NestedNameSpecifierLoc(),
@@ -2952,16 +3005,9 @@ NamespaceDecl::NamespaceDecl(ASTContext &C, DeclContext *DC, bool Inline,
bool Nested)
: NamedDecl(Namespace, DC, IdLoc, Id), DeclContext(Namespace),
redeclarable_base(C), LocStart(StartLoc) {
- unsigned Flags = 0;
- if (Inline)
- Flags |= F_Inline;
- if (Nested)
- Flags |= F_Nested;
- AnonOrFirstNamespaceAndFlags = {nullptr, Flags};
+ setInline(Inline);
+ setNested(Nested);
setPreviousDecl(PrevDecl);
-
- if (PrevDecl)
- AnonOrFirstNamespaceAndFlags.setPointer(PrevDecl->getOriginalNamespace());
}
NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC,
@@ -2972,27 +3018,12 @@ NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC,
NamespaceDecl(C, DC, Inline, StartLoc, IdLoc, Id, PrevDecl, Nested);
}
-NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) NamespaceDecl(C, nullptr, false, SourceLocation(),
SourceLocation(), nullptr, nullptr, false);
}
-NamespaceDecl *NamespaceDecl::getOriginalNamespace() {
- if (isFirstDecl())
- return this;
-
- return AnonOrFirstNamespaceAndFlags.getPointer();
-}
-
-const NamespaceDecl *NamespaceDecl::getOriginalNamespace() const {
- if (isFirstDecl())
- return this;
-
- return AnonOrFirstNamespaceAndFlags.getPointer();
-}
-
-bool NamespaceDecl::isOriginalNamespace() const { return isFirstDecl(); }
-
NamespaceDecl *NamespaceDecl::getNextRedeclarationImpl() {
return getNextRedeclaration();
}
@@ -3028,13 +3059,13 @@ NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
NamedDecl *Namespace) {
// FIXME: Preserve the aliased namespace as written.
if (auto *NS = dyn_cast_or_null<NamespaceDecl>(Namespace))
- Namespace = NS->getOriginalNamespace();
+ Namespace = NS->getFirstDecl();
return new (C, DC) NamespaceAliasDecl(C, DC, UsingLoc, AliasLoc, Alias,
QualifierLoc, IdentLoc, Namespace);
}
-NamespaceAliasDecl *
-NamespaceAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+NamespaceAliasDecl *NamespaceAliasDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) NamespaceAliasDecl(C, nullptr, SourceLocation(),
SourceLocation(), nullptr,
NestedNameSpecifierLoc(),
@@ -3089,8 +3120,8 @@ UsingShadowDecl::UsingShadowDecl(Kind K, ASTContext &C, EmptyShell Empty)
: NamedDecl(K, nullptr, SourceLocation(), DeclarationName()),
redeclarable_base(C) {}
-UsingShadowDecl *
-UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+UsingShadowDecl *UsingShadowDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) UsingShadowDecl(UsingShadow, C, EmptyShell());
}
@@ -3113,7 +3144,7 @@ ConstructorUsingShadowDecl::Create(ASTContext &C, DeclContext *DC,
}
ConstructorUsingShadowDecl *
-ConstructorUsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ConstructorUsingShadowDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) ConstructorUsingShadowDecl(C, EmptyShell());
}
@@ -3161,7 +3192,7 @@ UsingDecl *UsingDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation UL,
return new (C, DC) UsingDecl(DC, UL, QualifierLoc, NameInfo, HasTypename);
}
-UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) UsingDecl(nullptr, SourceLocation(),
NestedNameSpecifierLoc(), DeclarationNameInfo(),
false);
@@ -3185,7 +3216,8 @@ UsingEnumDecl *UsingEnumDecl::Create(ASTContext &C, DeclContext *DC,
UsingEnumDecl(DC, EnumType->getType()->getAsTagDecl()->getDeclName(), UL, EL, NL, EnumType);
}
-UsingEnumDecl *UsingEnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+UsingEnumDecl *UsingEnumDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID)
UsingEnumDecl(nullptr, DeclarationName(), SourceLocation(),
SourceLocation(), SourceLocation(), nullptr);
@@ -3204,7 +3236,7 @@ UsingPackDecl *UsingPackDecl::Create(ASTContext &C, DeclContext *DC,
return new (C, DC, Extra) UsingPackDecl(DC, InstantiatedFrom, UsingDecls);
}
-UsingPackDecl *UsingPackDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+UsingPackDecl *UsingPackDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID,
unsigned NumExpansions) {
size_t Extra = additionalSizeToAlloc<NamedDecl *>(NumExpansions);
auto *Result =
@@ -3230,7 +3262,7 @@ UnresolvedUsingValueDecl::Create(ASTContext &C, DeclContext *DC,
}
UnresolvedUsingValueDecl *
-UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) UnresolvedUsingValueDecl(nullptr, QualType(),
SourceLocation(),
NestedNameSpecifierLoc(),
@@ -3260,7 +3292,8 @@ UnresolvedUsingTypenameDecl::Create(ASTContext &C, DeclContext *DC,
}
UnresolvedUsingTypenameDecl *
-UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) UnresolvedUsingTypenameDecl(
nullptr, SourceLocation(), SourceLocation(), NestedNameSpecifierLoc(),
SourceLocation(), nullptr, SourceLocation());
@@ -3273,7 +3306,8 @@ UnresolvedUsingIfExistsDecl::Create(ASTContext &Ctx, DeclContext *DC,
}
UnresolvedUsingIfExistsDecl *
-UnresolvedUsingIfExistsDecl::CreateDeserialized(ASTContext &Ctx, unsigned ID) {
+UnresolvedUsingIfExistsDecl::CreateDeserialized(ASTContext &Ctx,
+ GlobalDeclID ID) {
return new (Ctx, ID)
UnresolvedUsingIfExistsDecl(nullptr, SourceLocation(), DeclarationName());
}
@@ -3297,7 +3331,7 @@ StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
}
StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) StaticAssertDecl(nullptr, SourceLocation(), nullptr,
nullptr, SourceLocation(), false);
}
@@ -3319,7 +3353,7 @@ BindingDecl *BindingDecl::Create(ASTContext &C, DeclContext *DC,
return new (C, DC) BindingDecl(DC, IdLoc, Id);
}
-BindingDecl *BindingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+BindingDecl *BindingDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) BindingDecl(nullptr, SourceLocation(), nullptr);
}
@@ -3350,7 +3384,7 @@ DecompositionDecl *DecompositionDecl::Create(ASTContext &C, DeclContext *DC,
}
DecompositionDecl *DecompositionDecl::CreateDeserialized(ASTContext &C,
- unsigned ID,
+ GlobalDeclID ID,
unsigned NumBindings) {
size_t Extra = additionalSizeToAlloc<BindingDecl *>(NumBindings);
auto *Result = new (C, ID, Extra)
@@ -3389,7 +3423,7 @@ MSPropertyDecl *MSPropertyDecl::Create(ASTContext &C, DeclContext *DC,
}
MSPropertyDecl *MSPropertyDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) MSPropertyDecl(nullptr, SourceLocation(),
DeclarationName(), QualType(), nullptr,
SourceLocation(), nullptr, nullptr);
@@ -3406,7 +3440,7 @@ MSGuidDecl *MSGuidDecl::Create(const ASTContext &C, QualType T, Parts P) {
return new (C, DC) MSGuidDecl(DC, T, P);
}
-MSGuidDecl *MSGuidDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+MSGuidDecl *MSGuidDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) MSGuidDecl(nullptr, QualType(), Parts());
}
@@ -3456,7 +3490,8 @@ static bool isValidStructGUID(ASTContext &Ctx, QualType T) {
return false;
auto MatcherIt = Fields.begin();
for (const FieldDecl *FD : RD->fields()) {
- if (FD->isUnnamedBitfield()) continue;
+ if (FD->isUnnamedBitField())
+ continue;
if (FD->isBitField() || MatcherIt == Fields.end() ||
!(*MatcherIt)(FD->getType()))
return false;
@@ -3515,7 +3550,7 @@ UnnamedGlobalConstantDecl::Create(const ASTContext &C, QualType T,
}
UnnamedGlobalConstantDecl *
-UnnamedGlobalConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+UnnamedGlobalConstantDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID)
UnnamedGlobalConstantDecl(C, nullptr, QualType(), APValue());
}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclFriend.cpp b/contrib/llvm-project/clang/lib/AST/DeclFriend.cpp
index 8ec1dea84df5..04b9b93699f3 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclFriend.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclFriend.cpp
@@ -62,7 +62,7 @@ FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
return FD;
}
-FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID,
unsigned FriendTypeNumTPLists) {
std::size_t Extra =
additionalSizeToAlloc<TemplateParameterList *>(FriendTypeNumTPLists);
diff --git a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
index 962f503306a0..83062b0e6887 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
@@ -66,7 +66,8 @@ void ObjCProtocolList::set(ObjCProtocolDecl* const* InList, unsigned Elts,
//===----------------------------------------------------------------------===//
ObjCContainerDecl::ObjCContainerDecl(Kind DK, DeclContext *DC,
- IdentifierInfo *Id, SourceLocation nameLoc,
+ const IdentifierInfo *Id,
+ SourceLocation nameLoc,
SourceLocation atStartLoc)
: NamedDecl(DK, DC, nameLoc, Id), DeclContext(DK) {
setAtStartLoc(atStartLoc);
@@ -378,10 +379,8 @@ SourceLocation ObjCInterfaceDecl::getSuperClassLoc() const {
/// FindPropertyVisibleInPrimaryClass - Finds declaration of the property
/// with name 'PropertyId' in the primary class; including those in protocols
/// (direct or indirect) used by the primary class.
-ObjCPropertyDecl *
-ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass(
- IdentifierInfo *PropertyId,
- ObjCPropertyQueryKind QueryKind) const {
+ObjCPropertyDecl *ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass(
+ const IdentifierInfo *PropertyId, ObjCPropertyQueryKind QueryKind) const {
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return nullptr;
@@ -863,7 +862,8 @@ ObjCMethodDecl *ObjCMethodDecl::Create(
isImplicitlyDeclared, isDefined, impControl, HasRelatedResultType);
}
-ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID) ObjCMethodDecl(SourceLocation(), SourceLocation(),
Selector(), QualType(), nullptr, nullptr);
}
@@ -1487,7 +1487,7 @@ ObjCTypeParamDecl *ObjCTypeParamDecl::Create(ASTContext &ctx, DeclContext *dc,
}
ObjCTypeParamDecl *ObjCTypeParamDecl::CreateDeserialized(ASTContext &ctx,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (ctx, ID) ObjCTypeParamDecl(ctx, nullptr,
ObjCTypeParamVariance::Invariant,
SourceLocation(), 0, SourceLocation(),
@@ -1539,14 +1539,10 @@ void ObjCTypeParamList::gatherDefaultTypeArgs(
// ObjCInterfaceDecl
//===----------------------------------------------------------------------===//
-ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C,
- DeclContext *DC,
- SourceLocation atLoc,
- IdentifierInfo *Id,
- ObjCTypeParamList *typeParamList,
- ObjCInterfaceDecl *PrevDecl,
- SourceLocation ClassLoc,
- bool isInternal){
+ObjCInterfaceDecl *ObjCInterfaceDecl::Create(
+ const ASTContext &C, DeclContext *DC, SourceLocation atLoc,
+ const IdentifierInfo *Id, ObjCTypeParamList *typeParamList,
+ ObjCInterfaceDecl *PrevDecl, SourceLocation ClassLoc, bool isInternal) {
auto *Result = new (C, DC)
ObjCInterfaceDecl(C, DC, atLoc, Id, typeParamList, ClassLoc, PrevDecl,
isInternal);
@@ -1556,7 +1552,7 @@ ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C,
}
ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(const ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
auto *Result = new (C, ID)
ObjCInterfaceDecl(C, nullptr, SourceLocation(), nullptr, nullptr,
SourceLocation(), nullptr, false);
@@ -1564,12 +1560,10 @@ ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(const ASTContext &C,
return Result;
}
-ObjCInterfaceDecl::ObjCInterfaceDecl(const ASTContext &C, DeclContext *DC,
- SourceLocation AtLoc, IdentifierInfo *Id,
- ObjCTypeParamList *typeParamList,
- SourceLocation CLoc,
- ObjCInterfaceDecl *PrevDecl,
- bool IsInternal)
+ObjCInterfaceDecl::ObjCInterfaceDecl(
+ const ASTContext &C, DeclContext *DC, SourceLocation AtLoc,
+ const IdentifierInfo *Id, ObjCTypeParamList *typeParamList,
+ SourceLocation CLoc, ObjCInterfaceDecl *PrevDecl, bool IsInternal)
: ObjCContainerDecl(ObjCInterface, DC, Id, CLoc, AtLoc),
redeclarable_base(C) {
setPreviousDecl(PrevDecl);
@@ -1751,8 +1745,8 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
/// categories for this class and returns it. Name of the category is passed
/// in 'CategoryId'. If category not found, return 0;
///
-ObjCCategoryDecl *
-ObjCInterfaceDecl::FindCategoryDeclaration(IdentifierInfo *CategoryId) const {
+ObjCCategoryDecl *ObjCInterfaceDecl::FindCategoryDeclaration(
+ const IdentifierInfo *CategoryId) const {
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return nullptr;
@@ -1838,10 +1832,10 @@ void ObjCIvarDecl::anchor() {}
ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC,
SourceLocation StartLoc,
- SourceLocation IdLoc, IdentifierInfo *Id,
- QualType T, TypeSourceInfo *TInfo,
- AccessControl ac, Expr *BW,
- bool synthesized) {
+ SourceLocation IdLoc,
+ const IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo, AccessControl ac,
+ Expr *BW, bool synthesized) {
if (DC) {
// Ivar's can only appear in interfaces, implementations (via synthesized
// properties), and class extensions (via direct declaration, or synthesized
@@ -1872,7 +1866,7 @@ ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC,
synthesized);
}
-ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) ObjCIvarDecl(nullptr, SourceLocation(), SourceLocation(),
nullptr, QualType(), nullptr,
ObjCIvarDecl::None, nullptr, false);
@@ -1921,7 +1915,7 @@ ObjCAtDefsFieldDecl
}
ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) ObjCAtDefsFieldDecl(nullptr, SourceLocation(),
SourceLocation(), nullptr, QualType(),
nullptr);
@@ -1956,7 +1950,7 @@ ObjCProtocolDecl *ObjCProtocolDecl::Create(ASTContext &C, DeclContext *DC,
}
ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
ObjCProtocolDecl *Result =
new (C, ID) ObjCProtocolDecl(C, nullptr, nullptr, SourceLocation(),
SourceLocation(), nullptr);
@@ -2120,28 +2114,23 @@ void ObjCProtocolDecl::setHasODRHash(bool HasHash) {
void ObjCCategoryDecl::anchor() {}
-ObjCCategoryDecl::ObjCCategoryDecl(DeclContext *DC, SourceLocation AtLoc,
- SourceLocation ClassNameLoc,
- SourceLocation CategoryNameLoc,
- IdentifierInfo *Id, ObjCInterfaceDecl *IDecl,
- ObjCTypeParamList *typeParamList,
- SourceLocation IvarLBraceLoc,
- SourceLocation IvarRBraceLoc)
+ObjCCategoryDecl::ObjCCategoryDecl(
+ DeclContext *DC, SourceLocation AtLoc, SourceLocation ClassNameLoc,
+ SourceLocation CategoryNameLoc, const IdentifierInfo *Id,
+ ObjCInterfaceDecl *IDecl, ObjCTypeParamList *typeParamList,
+ SourceLocation IvarLBraceLoc, SourceLocation IvarRBraceLoc)
: ObjCContainerDecl(ObjCCategory, DC, Id, ClassNameLoc, AtLoc),
ClassInterface(IDecl), CategoryNameLoc(CategoryNameLoc),
IvarLBraceLoc(IvarLBraceLoc), IvarRBraceLoc(IvarRBraceLoc) {
setTypeParamList(typeParamList);
}
-ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation AtLoc,
- SourceLocation ClassNameLoc,
- SourceLocation CategoryNameLoc,
- IdentifierInfo *Id,
- ObjCInterfaceDecl *IDecl,
- ObjCTypeParamList *typeParamList,
- SourceLocation IvarLBraceLoc,
- SourceLocation IvarRBraceLoc) {
+ObjCCategoryDecl *ObjCCategoryDecl::Create(
+ ASTContext &C, DeclContext *DC, SourceLocation AtLoc,
+ SourceLocation ClassNameLoc, SourceLocation CategoryNameLoc,
+ const IdentifierInfo *Id, ObjCInterfaceDecl *IDecl,
+ ObjCTypeParamList *typeParamList, SourceLocation IvarLBraceLoc,
+ SourceLocation IvarRBraceLoc) {
auto *CatDecl =
new (C, DC) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc, CategoryNameLoc, Id,
IDecl, typeParamList, IvarLBraceLoc,
@@ -2160,7 +2149,7 @@ ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC,
}
ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) ObjCCategoryDecl(nullptr, SourceLocation(),
SourceLocation(), SourceLocation(),
nullptr, nullptr, nullptr);
@@ -2190,21 +2179,18 @@ void ObjCCategoryDecl::setTypeParamList(ObjCTypeParamList *TPL) {
void ObjCCategoryImplDecl::anchor() {}
-ObjCCategoryImplDecl *
-ObjCCategoryImplDecl::Create(ASTContext &C, DeclContext *DC,
- IdentifierInfo *Id,
- ObjCInterfaceDecl *ClassInterface,
- SourceLocation nameLoc,
- SourceLocation atStartLoc,
- SourceLocation CategoryNameLoc) {
+ObjCCategoryImplDecl *ObjCCategoryImplDecl::Create(
+ ASTContext &C, DeclContext *DC, const IdentifierInfo *Id,
+ ObjCInterfaceDecl *ClassInterface, SourceLocation nameLoc,
+ SourceLocation atStartLoc, SourceLocation CategoryNameLoc) {
if (ClassInterface && ClassInterface->hasDefinition())
ClassInterface = ClassInterface->getDefinition();
return new (C, DC) ObjCCategoryImplDecl(DC, Id, ClassInterface, nameLoc,
atStartLoc, CategoryNameLoc);
}
-ObjCCategoryImplDecl *ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ObjCCategoryImplDecl *
+ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) ObjCCategoryImplDecl(nullptr, nullptr, nullptr,
SourceLocation(), SourceLocation(),
SourceLocation());
@@ -2311,7 +2297,7 @@ ObjCImplementationDecl::Create(ASTContext &C, DeclContext *DC,
}
ObjCImplementationDecl *
-ObjCImplementationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ObjCImplementationDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) ObjCImplementationDecl(nullptr, nullptr, nullptr,
SourceLocation(), SourceLocation());
}
@@ -2354,7 +2340,7 @@ ObjCCompatibleAliasDecl::Create(ASTContext &C, DeclContext *DC,
}
ObjCCompatibleAliasDecl *
-ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) ObjCCompatibleAliasDecl(nullptr, SourceLocation(),
nullptr, nullptr);
}
@@ -2365,20 +2351,17 @@ ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
void ObjCPropertyDecl::anchor() {}
-ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation L,
- IdentifierInfo *Id,
- SourceLocation AtLoc,
- SourceLocation LParenLoc,
- QualType T,
- TypeSourceInfo *TSI,
- PropertyControl propControl) {
+ObjCPropertyDecl *
+ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ const IdentifierInfo *Id, SourceLocation AtLoc,
+ SourceLocation LParenLoc, QualType T,
+ TypeSourceInfo *TSI, PropertyControl propControl) {
return new (C, DC) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T, TSI,
propControl);
}
ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) ObjCPropertyDecl(nullptr, SourceLocation(), nullptr,
SourceLocation(), SourceLocation(),
QualType(), nullptr, None);
@@ -2410,8 +2393,8 @@ ObjCPropertyImplDecl *ObjCPropertyImplDecl::Create(ASTContext &C,
ivarLoc);
}
-ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ObjCPropertyImplDecl *
+ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) ObjCPropertyImplDecl(nullptr, SourceLocation(),
SourceLocation(), nullptr, Dynamic,
nullptr, SourceLocation());
diff --git a/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp b/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp
index ac5780f82dbb..81ca48e60942 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp
@@ -36,7 +36,7 @@ OMPThreadPrivateDecl *OMPThreadPrivateDecl::Create(ASTContext &C,
}
OMPThreadPrivateDecl *OMPThreadPrivateDecl::CreateDeserialized(ASTContext &C,
- unsigned ID,
+ GlobalDeclID ID,
unsigned N) {
return OMPDeclarativeDirective::createEmptyDirective<OMPThreadPrivateDecl>(
C, ID, 0, N);
@@ -63,7 +63,8 @@ OMPAllocateDecl *OMPAllocateDecl::Create(ASTContext &C, DeclContext *DC,
return D;
}
-OMPAllocateDecl *OMPAllocateDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+OMPAllocateDecl *OMPAllocateDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID,
unsigned NVars,
unsigned NClauses) {
return OMPDeclarativeDirective::createEmptyDirective<OMPAllocateDecl>(
@@ -89,7 +90,8 @@ OMPRequiresDecl *OMPRequiresDecl::Create(ASTContext &C, DeclContext *DC,
L);
}
-OMPRequiresDecl *OMPRequiresDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+OMPRequiresDecl *OMPRequiresDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID,
unsigned N) {
return OMPDeclarativeDirective::createEmptyDirective<OMPRequiresDecl>(
C, ID, N, 0, SourceLocation());
@@ -117,7 +119,7 @@ OMPDeclareReductionDecl *OMPDeclareReductionDecl::Create(
}
OMPDeclareReductionDecl *
-OMPDeclareReductionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+OMPDeclareReductionDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) OMPDeclareReductionDecl(
OMPDeclareReduction, /*DC=*/nullptr, SourceLocation(), DeclarationName(),
QualType(), /*PrevDeclInScope=*/nullptr);
@@ -148,7 +150,7 @@ OMPDeclareMapperDecl *OMPDeclareMapperDecl::Create(
}
OMPDeclareMapperDecl *OMPDeclareMapperDecl::CreateDeserialized(ASTContext &C,
- unsigned ID,
+ GlobalDeclID ID,
unsigned N) {
return OMPDeclarativeDirective::createEmptyDirective<OMPDeclareMapperDecl>(
C, ID, N, 1, SourceLocation(), DeclarationName(), QualType(),
@@ -179,7 +181,7 @@ OMPCapturedExprDecl *OMPCapturedExprDecl::Create(ASTContext &C, DeclContext *DC,
}
OMPCapturedExprDecl *OMPCapturedExprDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) OMPCapturedExprDecl(C, nullptr, nullptr, QualType(),
/*TInfo=*/nullptr, SourceLocation());
}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
index 822ac12c4c7d..26773a69ab9a 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -49,18 +50,6 @@ namespace {
void PrintObjCTypeParams(ObjCTypeParamList *Params);
- enum class AttrPrintLoc {
- None = 0,
- Left = 1,
- Right = 2,
- Any = Left | Right,
-
- LLVM_MARK_AS_BITMASK_ENUM(/*DefaultValue=*/Any)
- };
-
- void prettyPrintAttributes(Decl *D, raw_ostream &out,
- AttrPrintLoc loc = AttrPrintLoc::Any);
-
public:
DeclPrinter(raw_ostream &Out, const PrintingPolicy &Policy,
const ASTContext &Context, unsigned Indentation = 0,
@@ -129,11 +118,10 @@ namespace {
const TemplateParameterList *Params);
void printTemplateArguments(llvm::ArrayRef<TemplateArgumentLoc> Args,
const TemplateParameterList *Params);
-
- inline void prettyPrintAttributes(Decl *D) {
- prettyPrintAttributes(D, Out);
- }
-
+ enum class AttrPosAsWritten { Default = 0, Left, Right };
+ bool
+ prettyPrintAttributes(const Decl *D,
+ AttrPosAsWritten Pos = AttrPosAsWritten::Default);
void prettyPrintPragmas(Decl *D);
void printDeclType(QualType T, StringRef DeclName, bool Pack = false);
};
@@ -250,89 +238,55 @@ raw_ostream& DeclPrinter::Indent(unsigned Indentation) {
return Out;
}
-// For CLANG_ATTR_LIST_CanPrintOnLeft macro.
-#include "clang/Basic/AttrLeftSideCanPrintList.inc"
-
-// For CLANG_ATTR_LIST_PrintOnLeft macro.
-#include "clang/Basic/AttrLeftSideMustPrintList.inc"
-
-static bool canPrintOnLeftSide(attr::Kind kind) {
-#ifdef CLANG_ATTR_LIST_CanPrintOnLeft
- switch (kind) {
- CLANG_ATTR_LIST_CanPrintOnLeft
- return true;
- default:
- return false;
- }
-#else
- return false;
-#endif
-}
-
-static bool canPrintOnLeftSide(const Attr *A) {
- if (A->isStandardAttributeSyntax())
- return false;
+static DeclPrinter::AttrPosAsWritten getPosAsWritten(const Attr *A,
+ const Decl *D) {
+ SourceLocation ALoc = A->getLoc();
+ SourceLocation DLoc = D->getLocation();
+ const ASTContext &C = D->getASTContext();
+ if (ALoc.isInvalid() || DLoc.isInvalid())
+ return DeclPrinter::AttrPosAsWritten::Left;
- return canPrintOnLeftSide(A->getKind());
-}
+ if (C.getSourceManager().isBeforeInTranslationUnit(ALoc, DLoc))
+ return DeclPrinter::AttrPosAsWritten::Left;
-static bool mustPrintOnLeftSide(attr::Kind kind) {
-#ifdef CLANG_ATTR_LIST_PrintOnLeft
- switch (kind) {
- CLANG_ATTR_LIST_PrintOnLeft
- return true;
- default:
- return false;
- }
-#else
- return false;
-#endif
+ return DeclPrinter::AttrPosAsWritten::Right;
}
-static bool mustPrintOnLeftSide(const Attr *A) {
- if (A->isDeclspecAttribute())
- return true;
-
- return mustPrintOnLeftSide(A->getKind());
-}
-
-void DeclPrinter::prettyPrintAttributes(Decl *D, llvm::raw_ostream &Out,
- AttrPrintLoc Loc) {
- if (Policy.PolishForDeclaration)
- return;
+// returns true if an attribute was printed.
+bool DeclPrinter::prettyPrintAttributes(const Decl *D,
+ AttrPosAsWritten Pos /*=Default*/) {
+ bool hasPrinted = false;
if (D->hasAttrs()) {
- AttrVec &Attrs = D->getAttrs();
+ const AttrVec &Attrs = D->getAttrs();
for (auto *A : Attrs) {
if (A->isInherited() || A->isImplicit())
continue;
-
- AttrPrintLoc AttrLoc = AttrPrintLoc::Right;
- if (mustPrintOnLeftSide(A)) {
- // If we must always print on left side (e.g. declspec), then mark as
- // so.
- AttrLoc = AttrPrintLoc::Left;
- } else if (canPrintOnLeftSide(A)) {
- // For functions with body defined we print the attributes on the left
- // side so that GCC accept our dumps as well.
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- FD && FD->isThisDeclarationADefinition())
- // In case Decl is a function with a body, then attrs should be print
- // on the left side.
- AttrLoc = AttrPrintLoc::Left;
-
- // In case it is a variable declaration with a ctor, then allow
- // printing on the left side for readbility.
- else if (const VarDecl *VD = dyn_cast<VarDecl>(D);
- VD && VD->getInit() &&
- VD->getInitStyle() == VarDecl::CallInit)
- AttrLoc = AttrPrintLoc::Left;
+ // Print out the keyword attributes, they aren't regular attributes.
+ if (Policy.PolishForDeclaration && !A->isKeywordAttribute())
+ continue;
+ switch (A->getKind()) {
+#define ATTR(X)
+#define PRAGMA_SPELLING_ATTR(X) case attr::X:
+#include "clang/Basic/AttrList.inc"
+ break;
+ default:
+ AttrPosAsWritten APos = getPosAsWritten(A, D);
+ assert(APos != AttrPosAsWritten::Default &&
+ "Default not a valid for an attribute location");
+ if (Pos == AttrPosAsWritten::Default || Pos == APos) {
+ if (Pos != AttrPosAsWritten::Left)
+ Out << ' ';
+ A->printPretty(Out, Policy);
+ hasPrinted = true;
+ if (Pos == AttrPosAsWritten::Left)
+ Out << ' ';
+ }
+ break;
}
- // Only print the side matches the user requested.
- if ((Loc & AttrLoc) != AttrPrintLoc::None)
- A->printPretty(Out, Policy);
}
}
+ return hasPrinted;
}
void DeclPrinter::prettyPrintPragmas(Decl *D) {
@@ -679,10 +633,22 @@ static void printExplicitSpecifier(ExplicitSpecifier ES, llvm::raw_ostream &Out,
Out << Proto;
}
+static void MaybePrintTagKeywordIfSupressingScopes(PrintingPolicy &Policy,
+ QualType T,
+ llvm::raw_ostream &Out) {
+ StringRef prefix = T->isClassType() ? "class "
+ : T->isStructureType() ? "struct "
+ : T->isUnionType() ? "union "
+ : "";
+ Out << prefix;
+}
+
void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (!D->getDescribedFunctionTemplate() &&
- !D->isFunctionTemplateSpecialization())
+ !D->isFunctionTemplateSpecialization()) {
prettyPrintPragmas(D);
+ prettyPrintAttributes(D, AttrPosAsWritten::Left);
+ }
if (D->isFunctionTemplateSpecialization())
Out << "template<> ";
@@ -692,22 +658,6 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
printTemplateParameters(D->getTemplateParameterList(I));
}
- std::string LeftsideAttrs;
- llvm::raw_string_ostream LSAS(LeftsideAttrs);
-
- prettyPrintAttributes(D, LSAS, AttrPrintLoc::Left);
-
- // prettyPrintAttributes print a space on left side of the attribute.
- if (LeftsideAttrs[0] == ' ') {
- // Skip the space prettyPrintAttributes generated.
- LeftsideAttrs.erase(0, LeftsideAttrs.find_first_not_of(' '));
-
- // Add a single space between the attribute and the Decl name.
- LSAS << ' ';
- }
-
- Out << LeftsideAttrs;
-
CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D);
CXXConversionDecl *ConversionDecl = dyn_cast<CXXConversionDecl>(D);
CXXDeductionGuideDecl *GuideDecl = dyn_cast<CXXDeductionGuideDecl>(D);
@@ -855,6 +805,10 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Out << Proto << " -> ";
Proto.clear();
}
+ if (!Policy.SuppressTagKeyword && Policy.SuppressScope &&
+ !Policy.SuppressUnwrittenScope)
+ MaybePrintTagKeywordIfSupressingScopes(Policy, AFT->getReturnType(),
+ Out);
AFT->getReturnType().print(Out, Policy, Proto);
Proto.clear();
}
@@ -869,13 +823,18 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Ty.print(Out, Policy, Proto);
}
- prettyPrintAttributes(D, Out, AttrPrintLoc::Right);
+ prettyPrintAttributes(D, AttrPosAsWritten::Right);
if (D->isPureVirtual())
Out << " = 0";
- else if (D->isDeletedAsWritten())
+ else if (D->isDeletedAsWritten()) {
Out << " = delete";
- else if (D->isExplicitlyDefaulted())
+ if (const StringLiteral *M = D->getDeletedMessage()) {
+ Out << "(";
+ M->outputString(Out);
+ Out << ")";
+ }
+ } else if (D->isExplicitlyDefaulted())
Out << " = default";
else if (D->doesThisDeclarationHaveABody()) {
if (!Policy.TerseOutput) {
@@ -962,27 +921,12 @@ void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
void DeclPrinter::VisitVarDecl(VarDecl *D) {
prettyPrintPragmas(D);
+ prettyPrintAttributes(D, AttrPosAsWritten::Left);
+
if (const auto *Param = dyn_cast<ParmVarDecl>(D);
Param && Param->isExplicitObjectParameter())
Out << "this ";
- std::string LeftSide;
- llvm::raw_string_ostream LeftSideStream(LeftSide);
-
- // Print attributes that should be placed on the left, such as __declspec.
- prettyPrintAttributes(D, LeftSideStream, AttrPrintLoc::Left);
-
- // prettyPrintAttributes print a space on left side of the attribute.
- if (LeftSide[0] == ' ') {
- // Skip the space prettyPrintAttributes generated.
- LeftSide.erase(0, LeftSide.find_first_not_of(' '));
-
- // Add a single space between the attribute and the Decl name.
- LeftSideStream << ' ';
- }
-
- Out << LeftSide;
-
QualType T = D->getTypeSourceInfo()
? D->getTypeSourceInfo()->getType()
: D->getASTContext().getUnqualifiedObjCPointerType(D->getType());
@@ -1015,18 +959,16 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
}
}
- StringRef Name;
-
- Name = (isa<ParmVarDecl>(D) && Policy.CleanUglifiedParameters &&
- D->getIdentifier())
- ? D->getIdentifier()->deuglifiedName()
- : D->getName();
+ if (!Policy.SuppressTagKeyword && Policy.SuppressScope &&
+ !Policy.SuppressUnwrittenScope)
+ MaybePrintTagKeywordIfSupressingScopes(Policy, T, Out);
- printDeclType(T, Name);
+ printDeclType(T, (isa<ParmVarDecl>(D) && Policy.CleanUglifiedParameters &&
+ D->getIdentifier())
+ ? D->getIdentifier()->deuglifiedName()
+ : D->getName());
- // Print the attributes that should be placed right before the end of the
- // decl.
- prettyPrintAttributes(D, Out, AttrPrintLoc::Right);
+ prettyPrintAttributes(D, AttrPosAsWritten::Right);
Expr *Init = D->getInit();
if (!Policy.SuppressInitializers && Init) {
@@ -1128,38 +1070,38 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
// FIXME: add printing of pragma attributes if required.
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
Out << "__module_private__ ";
- Out << D->getKindName();
- prettyPrintAttributes(D);
+ Out << D->getKindName() << ' ';
- if (D->getIdentifier()) {
+ // FIXME: Move before printing the decl kind to match the behavior of the
+ // attribute printing for variables and function where they are printed first.
+ if (prettyPrintAttributes(D, AttrPosAsWritten::Left))
Out << ' ';
+
+ if (D->getIdentifier()) {
if (auto *NNS = D->getQualifier())
NNS->print(Out, Policy);
Out << *D;
- if (auto S = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
- ArrayRef<TemplateArgument> Args = S->getTemplateArgs().asArray();
- if (!Policy.PrintCanonicalTypes)
- if (const auto* TSI = S->getTypeAsWritten())
- if (const auto *TST =
- dyn_cast<TemplateSpecializationType>(TSI->getType()))
- Args = TST->template_arguments();
- printTemplateArguments(
- Args, S->getSpecializedTemplate()->getTemplateParameters());
+ if (auto *S = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ const TemplateParameterList *TParams =
+ S->getSpecializedTemplate()->getTemplateParameters();
+ const ASTTemplateArgumentListInfo *TArgAsWritten =
+ S->getTemplateArgsAsWritten();
+ if (TArgAsWritten && !Policy.PrintCanonicalTypes)
+ printTemplateArguments(TArgAsWritten->arguments(), TParams);
+ else
+ printTemplateArguments(S->getTemplateArgs().asArray(), TParams);
}
}
- if (D->hasDefinition()) {
- if (D->hasAttr<FinalAttr>()) {
- Out << " final";
- }
- }
+ prettyPrintAttributes(D, AttrPosAsWritten::Right);
if (D->isCompleteDefinition()) {
+ Out << ' ';
// Print the base classes
if (D->getNumBases()) {
- Out << " : ";
+ Out << ": ";
for (CXXRecordDecl::base_class_iterator Base = D->bases_begin(),
BaseEnd = D->bases_end(); Base != BaseEnd; ++Base) {
if (Base != D->bases_begin())
@@ -1178,14 +1120,15 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
if (Base->isPackExpansion())
Out << "...";
}
+ Out << ' ';
}
// Print the class definition
// FIXME: Doesn't print access specifiers, e.g., "public:"
if (Policy.TerseOutput) {
- Out << " {}";
+ Out << "{}";
} else {
- Out << " {\n";
+ Out << "{\n";
VisitDeclContext(D);
Indent() << "}";
}
@@ -1215,6 +1158,10 @@ void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params,
bool OmitTemplateKW) {
assert(Params);
+ // Don't print invented template parameter lists.
+ if (!Params->empty() && Params->getParam(0)->isImplicit())
+ return;
+
if (!OmitTemplateKW)
Out << "template ";
Out << '<';
@@ -1240,6 +1187,13 @@ void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params,
}
Out << '>';
+
+ if (const Expr *RequiresClause = Params->getRequiresClause()) {
+ Out << " requires ";
+ RequiresClause->printPretty(Out, nullptr, Policy, Indentation, "\n",
+ &Context);
+ }
+
if (!OmitTemplateKW)
Out << ' ';
}
@@ -1282,7 +1236,10 @@ void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
if (const TemplateTemplateParmDecl *TTP =
dyn_cast<TemplateTemplateParmDecl>(D)) {
- Out << "class";
+ if (TTP->wasDeclaredWithTypename())
+ Out << "typename";
+ else
+ Out << "class";
if (TTP->isParameterPack())
Out << " ...";
@@ -1513,6 +1470,11 @@ void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
return;
}
bool eolnOut = false;
+ if (OID->hasAttrs()) {
+ prettyPrintAttributes(OID);
+ Out << "\n";
+ }
+
Out << "@interface " << I;
if (auto TypeParams = OID->getTypeParamListAsWritten()) {
@@ -1928,7 +1890,8 @@ void DeclPrinter::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP) {
if (TTP->hasDefaultArgument()) {
Out << " = ";
- Out << TTP->getDefaultArgument().getAsString(Policy);
+ TTP->getDefaultArgument().getArgument().print(Policy, Out,
+ /*IncludeType=*/false);
}
}
@@ -1942,7 +1905,7 @@ void DeclPrinter::VisitNonTypeTemplateParmDecl(
if (NTTP->hasDefaultArgument()) {
Out << " = ";
- NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy, Indentation,
- "\n", &Context);
+ NTTP->getDefaultArgument().getArgument().print(Policy, Out,
+ /*IncludeType=*/false);
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
index 7d7556e670f9..722c7fcf0b0d 100755
--- a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
@@ -337,9 +337,10 @@ void RedeclarableTemplateDecl::loadLazySpecializationsImpl() const {
CommonBase *CommonBasePtr = getMostRecentDecl()->getCommonPtr();
if (CommonBasePtr->LazySpecializations) {
ASTContext &Context = getASTContext();
- uint32_t *Specs = CommonBasePtr->LazySpecializations;
+ GlobalDeclID *Specs = CommonBasePtr->LazySpecializations;
CommonBasePtr->LazySpecializations = nullptr;
- for (uint32_t I = 0, N = *Specs++; I != N; ++I)
+ unsigned SpecSize = (*Specs++).getRawValue();
+ for (unsigned I = 0; I != SpecSize; ++I)
(void)Context.getExternalSource()->GetExternalDecl(Specs[I]);
}
}
@@ -417,8 +418,8 @@ FunctionTemplateDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
return TD;
}
-FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+FunctionTemplateDecl *
+FunctionTemplateDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) FunctionTemplateDecl(C, nullptr, SourceLocation(),
DeclarationName(), nullptr, nullptr);
}
@@ -503,7 +504,7 @@ ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C, DeclContext *DC,
}
ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) ClassTemplateDecl(C, nullptr, SourceLocation(),
DeclarationName(), nullptr, nullptr);
}
@@ -626,9 +627,10 @@ ClassTemplateDecl::getInjectedClassNameSpecialization() {
TemplateParameterList *Params = getTemplateParameters();
SmallVector<TemplateArgument, 16> TemplateArgs;
Context.getInjectedTemplateArgs(Params, TemplateArgs);
- CommonPtr->InjectedClassNameType
- = Context.getTemplateSpecializationType(TemplateName(this),
- TemplateArgs);
+ TemplateName Name = Context.getQualifiedTemplateName(
+ /*NNS=*/nullptr, /*TemplateKeyword=*/false, TemplateName(this));
+ CommonPtr->InjectedClassNameType =
+ Context.getTemplateSpecializationType(Name, TemplateArgs);
return CommonPtr->InjectedClassNameType;
}
@@ -652,14 +654,14 @@ TemplateTypeParmDecl *TemplateTypeParmDecl::Create(
}
TemplateTypeParmDecl *
-TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, GlobalDeclID ID) {
return new (C, ID)
TemplateTypeParmDecl(nullptr, SourceLocation(), SourceLocation(), nullptr,
false, false, std::nullopt);
}
TemplateTypeParmDecl *
-TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID,
+TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, GlobalDeclID ID,
bool HasTypeConstraint) {
return new (C, ID,
additionalSizeToAlloc<TypeConstraint>(HasTypeConstraint ? 1 : 0))
@@ -668,23 +670,30 @@ TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID,
}
SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
- return hasDefaultArgument()
- ? getDefaultArgumentInfo()->getTypeLoc().getBeginLoc()
- : SourceLocation();
+ return hasDefaultArgument() ? getDefaultArgument().getLocation()
+ : SourceLocation();
}
SourceRange TemplateTypeParmDecl::getSourceRange() const {
if (hasDefaultArgument() && !defaultArgumentWasInherited())
return SourceRange(getBeginLoc(),
- getDefaultArgumentInfo()->getTypeLoc().getEndLoc());
+ getDefaultArgument().getSourceRange().getEnd());
// TypeDecl::getSourceRange returns a range containing name location, which is
// wrong for unnamed template parameters. e.g:
// it will return <[[typename>]] instead of <[[typename]]>
- else if (getDeclName().isEmpty())
+ if (getDeclName().isEmpty())
return SourceRange(getBeginLoc());
return TypeDecl::getSourceRange();
}
+void TemplateTypeParmDecl::setDefaultArgument(
+ const ASTContext &C, const TemplateArgumentLoc &DefArg) {
+ if (DefArg.getArgument().isNull())
+ DefaultArgument.set(nullptr);
+ else
+ DefaultArgument.set(new (C) TemplateArgumentLoc(DefArg));
+}
+
unsigned TemplateTypeParmDecl::getDepth() const {
return getTypeForDecl()->castAs<TemplateTypeParmType>()->getDepth();
}
@@ -715,7 +724,7 @@ void TemplateTypeParmDecl::setTypeConstraint(
NonTypeTemplateParmDecl::NonTypeTemplateParmDecl(
DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, unsigned D,
- unsigned P, IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
+ unsigned P, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
ArrayRef<QualType> ExpandedTypes, ArrayRef<TypeSourceInfo *> ExpandedTInfos)
: DeclaratorDecl(NonTypeTemplateParm, DC, IdLoc, Id, T, TInfo, StartLoc),
TemplateParmPosition(D, P), ParameterPack(true),
@@ -730,12 +739,10 @@ NonTypeTemplateParmDecl::NonTypeTemplateParmDecl(
}
}
-NonTypeTemplateParmDecl *
-NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
- SourceLocation StartLoc, SourceLocation IdLoc,
- unsigned D, unsigned P, IdentifierInfo *Id,
- QualType T, bool ParameterPack,
- TypeSourceInfo *TInfo) {
+NonTypeTemplateParmDecl *NonTypeTemplateParmDecl::Create(
+ const ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, unsigned D, unsigned P, const IdentifierInfo *Id,
+ QualType T, bool ParameterPack, TypeSourceInfo *TInfo) {
AutoType *AT =
C.getLangOpts().CPlusPlus20 ? T->getContainedAutoType() : nullptr;
return new (C, DC,
@@ -748,7 +755,7 @@ NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
NonTypeTemplateParmDecl *NonTypeTemplateParmDecl::Create(
const ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
- SourceLocation IdLoc, unsigned D, unsigned P, IdentifierInfo *Id,
+ SourceLocation IdLoc, unsigned D, unsigned P, const IdentifierInfo *Id,
QualType T, TypeSourceInfo *TInfo, ArrayRef<QualType> ExpandedTypes,
ArrayRef<TypeSourceInfo *> ExpandedTInfos) {
AutoType *AT = TInfo->getType()->getContainedAutoType();
@@ -761,7 +768,7 @@ NonTypeTemplateParmDecl *NonTypeTemplateParmDecl::Create(
}
NonTypeTemplateParmDecl *
-NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID,
bool HasTypeConstraint) {
return new (C, ID, additionalSizeToAlloc<std::pair<QualType,
TypeSourceInfo *>,
@@ -772,7 +779,7 @@ NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
}
NonTypeTemplateParmDecl *
-NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID,
unsigned NumExpandedTypes,
bool HasTypeConstraint) {
auto *NTTP =
@@ -789,14 +796,21 @@ NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
SourceRange NonTypeTemplateParmDecl::getSourceRange() const {
if (hasDefaultArgument() && !defaultArgumentWasInherited())
return SourceRange(getOuterLocStart(),
- getDefaultArgument()->getSourceRange().getEnd());
+ getDefaultArgument().getSourceRange().getEnd());
return DeclaratorDecl::getSourceRange();
}
SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
- return hasDefaultArgument()
- ? getDefaultArgument()->getSourceRange().getBegin()
- : SourceLocation();
+ return hasDefaultArgument() ? getDefaultArgument().getSourceRange().getBegin()
+ : SourceLocation();
+}
+
+void NonTypeTemplateParmDecl::setDefaultArgument(
+ const ASTContext &C, const TemplateArgumentLoc &DefArg) {
+ if (DefArg.getArgument().isNull())
+ DefaultArgument.set(nullptr);
+ else
+ DefaultArgument.set(new (C) TemplateArgumentLoc(DefArg));
}
//===----------------------------------------------------------------------===//
@@ -807,10 +821,10 @@ void TemplateTemplateParmDecl::anchor() {}
TemplateTemplateParmDecl::TemplateTemplateParmDecl(
DeclContext *DC, SourceLocation L, unsigned D, unsigned P,
- IdentifierInfo *Id, TemplateParameterList *Params,
+ IdentifierInfo *Id, bool Typename, TemplateParameterList *Params,
ArrayRef<TemplateParameterList *> Expansions)
: TemplateDecl(TemplateTemplateParm, DC, L, Id, Params),
- TemplateParmPosition(D, P), ParameterPack(true),
+ TemplateParmPosition(D, P), Typename(Typename), ParameterPack(true),
ExpandedParameterPack(true), NumExpandedParams(Expansions.size()) {
if (!Expansions.empty())
std::uninitialized_copy(Expansions.begin(), Expansions.end(),
@@ -821,35 +835,35 @@ TemplateTemplateParmDecl *
TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D, unsigned P,
bool ParameterPack, IdentifierInfo *Id,
- TemplateParameterList *Params) {
+ bool Typename, TemplateParameterList *Params) {
return new (C, DC) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id,
- Params);
+ Typename, Params);
}
TemplateTemplateParmDecl *
TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D, unsigned P,
- IdentifierInfo *Id,
+ IdentifierInfo *Id, bool Typename,
TemplateParameterList *Params,
ArrayRef<TemplateParameterList *> Expansions) {
return new (C, DC,
additionalSizeToAlloc<TemplateParameterList *>(Expansions.size()))
- TemplateTemplateParmDecl(DC, L, D, P, Id, Params, Expansions);
+ TemplateTemplateParmDecl(DC, L, D, P, Id, Typename, Params, Expansions);
}
TemplateTemplateParmDecl *
-TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0,
- false, nullptr, nullptr);
+ false, nullptr, false, nullptr);
}
TemplateTemplateParmDecl *
-TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID,
unsigned NumExpansions) {
auto *TTP =
new (C, ID, additionalSizeToAlloc<TemplateParameterList *>(NumExpansions))
TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0, nullptr,
- nullptr, std::nullopt);
+ false, nullptr, std::nullopt);
TTP->NumExpandedParams = NumExpansions;
return TTP;
}
@@ -871,8 +885,7 @@ void TemplateTemplateParmDecl::setDefaultArgument(
// TemplateArgumentList Implementation
//===----------------------------------------------------------------------===//
TemplateArgumentList::TemplateArgumentList(ArrayRef<TemplateArgument> Args)
- : Arguments(getTrailingObjects<TemplateArgument>()),
- NumArguments(Args.size()) {
+ : NumArguments(Args.size()) {
std::uninitialized_copy(Args.begin(), Args.end(),
getTrailingObjects<TemplateArgument>());
}
@@ -886,7 +899,7 @@ TemplateArgumentList::CreateCopy(ASTContext &Context,
FunctionTemplateSpecializationInfo *FunctionTemplateSpecializationInfo::Create(
ASTContext &C, FunctionDecl *FD, FunctionTemplateDecl *Template,
- TemplateSpecializationKind TSK, const TemplateArgumentList *TemplateArgs,
+ TemplateSpecializationKind TSK, TemplateArgumentList *TemplateArgs,
const TemplateArgumentListInfo *TemplateArgsAsWritten, SourceLocation POI,
MemberSpecializationInfo *MSInfo) {
const ASTTemplateArgumentListInfo *ArgsAsWritten = nullptr;
@@ -952,7 +965,7 @@ ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
ClassTemplateSpecializationDecl *
ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
auto *Result =
new (C, ID) ClassTemplateSpecializationDecl(C, ClassTemplateSpecialization);
Result->setMayHaveOutOfDateDef(false);
@@ -987,41 +1000,67 @@ ClassTemplateSpecializationDecl::getSpecializedTemplate() const {
SourceRange
ClassTemplateSpecializationDecl::getSourceRange() const {
- if (ExplicitInfo) {
- SourceLocation Begin = getTemplateKeywordLoc();
- if (Begin.isValid()) {
- // Here we have an explicit (partial) specialization or instantiation.
- assert(getSpecializationKind() == TSK_ExplicitSpecialization ||
- getSpecializationKind() == TSK_ExplicitInstantiationDeclaration ||
- getSpecializationKind() == TSK_ExplicitInstantiationDefinition);
- if (getExternLoc().isValid())
- Begin = getExternLoc();
- SourceLocation End = getBraceRange().getEnd();
- if (End.isInvalid())
- End = getTypeAsWritten()->getTypeLoc().getEndLoc();
- return SourceRange(Begin, End);
- }
- // An implicit instantiation of a class template partial specialization
- // uses ExplicitInfo to record the TypeAsWritten, but the source
- // locations should be retrieved from the instantiation pattern.
- using CTPSDecl = ClassTemplatePartialSpecializationDecl;
- auto *ctpsd = const_cast<CTPSDecl *>(cast<CTPSDecl>(this));
- CTPSDecl *inst_from = ctpsd->getInstantiatedFromMember();
- assert(inst_from != nullptr);
- return inst_from->getSourceRange();
- }
- else {
- // No explicit info available.
+ switch (getSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ImplicitInstantiation: {
llvm::PointerUnion<ClassTemplateDecl *,
ClassTemplatePartialSpecializationDecl *>
- inst_from = getInstantiatedFrom();
- if (inst_from.isNull())
- return getSpecializedTemplate()->getSourceRange();
- if (const auto *ctd = inst_from.dyn_cast<ClassTemplateDecl *>())
- return ctd->getSourceRange();
- return inst_from.get<ClassTemplatePartialSpecializationDecl *>()
- ->getSourceRange();
+ Pattern = getSpecializedTemplateOrPartial();
+ assert(!Pattern.isNull() &&
+ "Class template specialization without pattern?");
+ if (const auto *CTPSD =
+ Pattern.dyn_cast<ClassTemplatePartialSpecializationDecl *>())
+ return CTPSD->getSourceRange();
+ return Pattern.get<ClassTemplateDecl *>()->getSourceRange();
}
+ case TSK_ExplicitSpecialization: {
+ SourceRange Range = CXXRecordDecl::getSourceRange();
+ if (const ASTTemplateArgumentListInfo *Args = getTemplateArgsAsWritten();
+ !isThisDeclarationADefinition() && Args)
+ Range.setEnd(Args->getRAngleLoc());
+ return Range;
+ }
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition: {
+ SourceRange Range = CXXRecordDecl::getSourceRange();
+ if (SourceLocation ExternKW = getExternKeywordLoc(); ExternKW.isValid())
+ Range.setBegin(ExternKW);
+ else if (SourceLocation TemplateKW = getTemplateKeywordLoc();
+ TemplateKW.isValid())
+ Range.setBegin(TemplateKW);
+ if (const ASTTemplateArgumentListInfo *Args = getTemplateArgsAsWritten())
+ Range.setEnd(Args->getRAngleLoc());
+ return Range;
+ }
+ }
+ llvm_unreachable("unhandled template specialization kind");
+}
+
+void ClassTemplateSpecializationDecl::setExternKeywordLoc(SourceLocation Loc) {
+ auto *Info = ExplicitInfo.dyn_cast<ExplicitInstantiationInfo *>();
+ if (!Info) {
+ // Don't allocate if the location is invalid.
+ if (Loc.isInvalid())
+ return;
+ Info = new (getASTContext()) ExplicitInstantiationInfo;
+ Info->TemplateArgsAsWritten = getTemplateArgsAsWritten();
+ ExplicitInfo = Info;
+ }
+ Info->ExternKeywordLoc = Loc;
+}
+
+void ClassTemplateSpecializationDecl::setTemplateKeywordLoc(
+ SourceLocation Loc) {
+ auto *Info = ExplicitInfo.dyn_cast<ExplicitInstantiationInfo *>();
+ if (!Info) {
+ // Don't allocate if the location is invalid.
+ if (Loc.isInvalid())
+ return;
+ Info = new (getASTContext()) ExplicitInstantiationInfo;
+ Info->TemplateArgsAsWritten = getTemplateArgsAsWritten();
+ ExplicitInfo = Info;
+ }
+ Info->TemplateKeywordLoc = Loc;
}
//===----------------------------------------------------------------------===//
@@ -1038,8 +1077,7 @@ ConceptDecl *ConceptDecl::Create(ASTContext &C, DeclContext *DC,
return TD;
}
-ConceptDecl *ConceptDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ConceptDecl *ConceptDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
ConceptDecl *Result = new (C, ID) ConceptDecl(nullptr, SourceLocation(),
DeclarationName(),
nullptr, nullptr);
@@ -1073,7 +1111,7 @@ ImplicitConceptSpecializationDecl *ImplicitConceptSpecializationDecl::Create(
ImplicitConceptSpecializationDecl *
ImplicitConceptSpecializationDecl::CreateDeserialized(
- const ASTContext &C, unsigned ID, unsigned NumTemplateArgs) {
+ const ASTContext &C, GlobalDeclID ID, unsigned NumTemplateArgs) {
return new (C, ID, additionalSizeToAlloc<TemplateArgument>(NumTemplateArgs))
ImplicitConceptSpecializationDecl(EmptyShell{}, NumTemplateArgs);
}
@@ -1090,43 +1128,29 @@ void ImplicitConceptSpecializationDecl::setTemplateArguments(
//===----------------------------------------------------------------------===//
void ClassTemplatePartialSpecializationDecl::anchor() {}
-ClassTemplatePartialSpecializationDecl::
-ClassTemplatePartialSpecializationDecl(ASTContext &Context, TagKind TK,
- DeclContext *DC,
- SourceLocation StartLoc,
- SourceLocation IdLoc,
- TemplateParameterList *Params,
- ClassTemplateDecl *SpecializedTemplate,
- ArrayRef<TemplateArgument> Args,
- const ASTTemplateArgumentListInfo *ArgInfos,
- ClassTemplatePartialSpecializationDecl *PrevDecl)
- : ClassTemplateSpecializationDecl(Context,
- ClassTemplatePartialSpecialization,
- TK, DC, StartLoc, IdLoc,
- SpecializedTemplate, Args, PrevDecl),
- TemplateParams(Params), ArgsAsWritten(ArgInfos),
- InstantiatedFromMember(nullptr, false) {
+ClassTemplatePartialSpecializationDecl::ClassTemplatePartialSpecializationDecl(
+ ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, TemplateParameterList *Params,
+ ClassTemplateDecl *SpecializedTemplate, ArrayRef<TemplateArgument> Args,
+ ClassTemplatePartialSpecializationDecl *PrevDecl)
+ : ClassTemplateSpecializationDecl(
+ Context, ClassTemplatePartialSpecialization, TK, DC, StartLoc, IdLoc,
+ SpecializedTemplate, Args, PrevDecl),
+ TemplateParams(Params), InstantiatedFromMember(nullptr, false) {
if (AdoptTemplateParameterList(Params, this))
setInvalidDecl();
}
ClassTemplatePartialSpecializationDecl *
-ClassTemplatePartialSpecializationDecl::
-Create(ASTContext &Context, TagKind TK,DeclContext *DC,
- SourceLocation StartLoc, SourceLocation IdLoc,
- TemplateParameterList *Params,
- ClassTemplateDecl *SpecializedTemplate,
- ArrayRef<TemplateArgument> Args,
- const TemplateArgumentListInfo &ArgInfos,
- QualType CanonInjectedType,
- ClassTemplatePartialSpecializationDecl *PrevDecl) {
- const ASTTemplateArgumentListInfo *ASTArgInfos =
- ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
-
- auto *Result = new (Context, DC)
- ClassTemplatePartialSpecializationDecl(Context, TK, DC, StartLoc, IdLoc,
- Params, SpecializedTemplate, Args,
- ASTArgInfos, PrevDecl);
+ClassTemplatePartialSpecializationDecl::Create(
+ ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, TemplateParameterList *Params,
+ ClassTemplateDecl *SpecializedTemplate, ArrayRef<TemplateArgument> Args,
+ QualType CanonInjectedType,
+ ClassTemplatePartialSpecializationDecl *PrevDecl) {
+ auto *Result = new (Context, DC) ClassTemplatePartialSpecializationDecl(
+ Context, TK, DC, StartLoc, IdLoc, Params, SpecializedTemplate, Args,
+ PrevDecl);
Result->setSpecializationKind(TSK_ExplicitSpecialization);
Result->setMayHaveOutOfDateDef(false);
@@ -1136,12 +1160,24 @@ Create(ASTContext &Context, TagKind TK,DeclContext *DC,
ClassTemplatePartialSpecializationDecl *
ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
auto *Result = new (C, ID) ClassTemplatePartialSpecializationDecl(C);
Result->setMayHaveOutOfDateDef(false);
return Result;
}
+SourceRange ClassTemplatePartialSpecializationDecl::getSourceRange() const {
+ if (const ClassTemplatePartialSpecializationDecl *MT =
+ getInstantiatedFromMember();
+ MT && !isMemberSpecialization())
+ return MT->getSourceRange();
+ SourceRange Range = ClassTemplateSpecializationDecl::getSourceRange();
+ if (const TemplateParameterList *TPL = getTemplateParameters();
+ TPL && !getNumTemplateParameterLists())
+ Range.setBegin(TPL->getTemplateLoc());
+ return Range;
+}
+
//===----------------------------------------------------------------------===//
// FriendTemplateDecl Implementation
//===----------------------------------------------------------------------===//
@@ -1163,7 +1199,7 @@ FriendTemplateDecl::Create(ASTContext &Context, DeclContext *DC,
}
FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) FriendTemplateDecl(EmptyShell());
}
@@ -1182,8 +1218,8 @@ TypeAliasTemplateDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
return TD;
}
-TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+TypeAliasTemplateDecl *
+TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
return new (C, ID) TypeAliasTemplateDecl(C, nullptr, SourceLocation(),
DeclarationName(), nullptr, nullptr);
}
@@ -1221,7 +1257,7 @@ VarTemplateDecl *VarTemplateDecl::Create(ASTContext &C, DeclContext *DC,
}
VarTemplateDecl *VarTemplateDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) VarTemplateDecl(C, nullptr, SourceLocation(),
DeclarationName(), nullptr, nullptr);
}
@@ -1343,7 +1379,8 @@ VarTemplateSpecializationDecl *VarTemplateSpecializationDecl::Create(
}
VarTemplateSpecializationDecl *
-VarTemplateSpecializationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+VarTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
+ GlobalDeclID ID) {
return new (C, ID)
VarTemplateSpecializationDecl(VarTemplateSpecialization, C);
}
@@ -1373,27 +1410,74 @@ VarTemplateDecl *VarTemplateSpecializationDecl::getSpecializedTemplate() const {
return SpecializedTemplate.get<VarTemplateDecl *>();
}
-void VarTemplateSpecializationDecl::setTemplateArgsInfo(
- const TemplateArgumentListInfo &ArgsInfo) {
- TemplateArgsInfo =
- ASTTemplateArgumentListInfo::Create(getASTContext(), ArgsInfo);
-}
-
-void VarTemplateSpecializationDecl::setTemplateArgsInfo(
- const ASTTemplateArgumentListInfo *ArgsInfo) {
- TemplateArgsInfo =
- ASTTemplateArgumentListInfo::Create(getASTContext(), ArgsInfo);
-}
-
SourceRange VarTemplateSpecializationDecl::getSourceRange() const {
- if (isExplicitSpecialization() && !hasInit()) {
- if (const ASTTemplateArgumentListInfo *Info = getTemplateArgsInfo())
- return SourceRange(getOuterLocStart(), Info->getRAngleLoc());
+ switch (getSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ImplicitInstantiation: {
+ llvm::PointerUnion<VarTemplateDecl *,
+ VarTemplatePartialSpecializationDecl *>
+ Pattern = getSpecializedTemplateOrPartial();
+ assert(!Pattern.isNull() &&
+ "Variable template specialization without pattern?");
+ if (const auto *VTPSD =
+ Pattern.dyn_cast<VarTemplatePartialSpecializationDecl *>())
+ return VTPSD->getSourceRange();
+ VarTemplateDecl *VTD = Pattern.get<VarTemplateDecl *>();
+ if (hasInit()) {
+ if (VarTemplateDecl *Definition = VTD->getDefinition())
+ return Definition->getSourceRange();
+ }
+ return VTD->getCanonicalDecl()->getSourceRange();
+ }
+ case TSK_ExplicitSpecialization: {
+ SourceRange Range = VarDecl::getSourceRange();
+ if (const ASTTemplateArgumentListInfo *Args = getTemplateArgsAsWritten();
+ !hasInit() && Args)
+ Range.setEnd(Args->getRAngleLoc());
+ return Range;
+ }
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition: {
+ SourceRange Range = VarDecl::getSourceRange();
+ if (SourceLocation ExternKW = getExternKeywordLoc(); ExternKW.isValid())
+ Range.setBegin(ExternKW);
+ else if (SourceLocation TemplateKW = getTemplateKeywordLoc();
+ TemplateKW.isValid())
+ Range.setBegin(TemplateKW);
+ if (const ASTTemplateArgumentListInfo *Args = getTemplateArgsAsWritten())
+ Range.setEnd(Args->getRAngleLoc());
+ return Range;
+ }
+ }
+ llvm_unreachable("unhandled template specialization kind");
+}
+
+void VarTemplateSpecializationDecl::setExternKeywordLoc(SourceLocation Loc) {
+ auto *Info = ExplicitInfo.dyn_cast<ExplicitInstantiationInfo *>();
+ if (!Info) {
+ // Don't allocate if the location is invalid.
+ if (Loc.isInvalid())
+ return;
+ Info = new (getASTContext()) ExplicitInstantiationInfo;
+ Info->TemplateArgsAsWritten = getTemplateArgsAsWritten();
+ ExplicitInfo = Info;
}
- return VarDecl::getSourceRange();
+ Info->ExternKeywordLoc = Loc;
+}
+
+void VarTemplateSpecializationDecl::setTemplateKeywordLoc(SourceLocation Loc) {
+ auto *Info = ExplicitInfo.dyn_cast<ExplicitInstantiationInfo *>();
+ if (!Info) {
+ // Don't allocate if the location is invalid.
+ if (Loc.isInvalid())
+ return;
+ Info = new (getASTContext()) ExplicitInstantiationInfo;
+ Info->TemplateArgsAsWritten = getTemplateArgsAsWritten();
+ ExplicitInfo = Info;
+ }
+ Info->TemplateKeywordLoc = Loc;
}
-
//===----------------------------------------------------------------------===//
// VarTemplatePartialSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
@@ -1404,13 +1488,11 @@ VarTemplatePartialSpecializationDecl::VarTemplatePartialSpecializationDecl(
ASTContext &Context, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, TemplateParameterList *Params,
VarTemplateDecl *SpecializedTemplate, QualType T, TypeSourceInfo *TInfo,
- StorageClass S, ArrayRef<TemplateArgument> Args,
- const ASTTemplateArgumentListInfo *ArgInfos)
+ StorageClass S, ArrayRef<TemplateArgument> Args)
: VarTemplateSpecializationDecl(VarTemplatePartialSpecialization, Context,
DC, StartLoc, IdLoc, SpecializedTemplate, T,
TInfo, S, Args),
- TemplateParams(Params), ArgsAsWritten(ArgInfos),
- InstantiatedFromMember(nullptr, false) {
+ TemplateParams(Params), InstantiatedFromMember(nullptr, false) {
if (AdoptTemplateParameterList(Params, DC))
setInvalidDecl();
}
@@ -1420,31 +1502,30 @@ VarTemplatePartialSpecializationDecl::Create(
ASTContext &Context, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, TemplateParameterList *Params,
VarTemplateDecl *SpecializedTemplate, QualType T, TypeSourceInfo *TInfo,
- StorageClass S, ArrayRef<TemplateArgument> Args,
- const TemplateArgumentListInfo &ArgInfos) {
- const ASTTemplateArgumentListInfo *ASTArgInfos
- = ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
-
- auto *Result =
- new (Context, DC) VarTemplatePartialSpecializationDecl(
- Context, DC, StartLoc, IdLoc, Params, SpecializedTemplate, T, TInfo,
- S, Args, ASTArgInfos);
+ StorageClass S, ArrayRef<TemplateArgument> Args) {
+ auto *Result = new (Context, DC) VarTemplatePartialSpecializationDecl(
+ Context, DC, StartLoc, IdLoc, Params, SpecializedTemplate, T, TInfo, S,
+ Args);
Result->setSpecializationKind(TSK_ExplicitSpecialization);
return Result;
}
VarTemplatePartialSpecializationDecl *
VarTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
+ GlobalDeclID ID) {
return new (C, ID) VarTemplatePartialSpecializationDecl(C);
}
SourceRange VarTemplatePartialSpecializationDecl::getSourceRange() const {
- if (isExplicitSpecialization() && !hasInit()) {
- if (const ASTTemplateArgumentListInfo *Info = getTemplateArgsAsWritten())
- return SourceRange(getOuterLocStart(), Info->getRAngleLoc());
- }
- return VarDecl::getSourceRange();
+ if (const VarTemplatePartialSpecializationDecl *MT =
+ getInstantiatedFromMember();
+ MT && !isMemberSpecialization())
+ return MT->getSourceRange();
+ SourceRange Range = VarTemplateSpecializationDecl::getSourceRange();
+ if (const TemplateParameterList *TPL = getTemplateParameters();
+ TPL && !getNumTemplateParameterLists())
+ Range.setBegin(TPL->getTemplateLoc());
+ return Range;
}
static TemplateParameterList *
@@ -1472,7 +1553,7 @@ createMakeIntegerSeqParameterList(const ASTContext &C, DeclContext *DC) {
// template <typename T, ...Ints> class IntSeq
auto *TemplateTemplateParm = TemplateTemplateParmDecl::Create(
C, DC, SourceLocation(), /*Depth=*/0, /*Position=*/0,
- /*ParameterPack=*/false, /*Id=*/nullptr, TPL);
+ /*ParameterPack=*/false, /*Id=*/nullptr, /*Typename=*/false, TPL);
TemplateTemplateParm->setImplicit(true);
// typename T
@@ -1549,7 +1630,7 @@ TemplateParamObjectDecl *TemplateParamObjectDecl::Create(const ASTContext &C,
}
TemplateParamObjectDecl *
-TemplateParamObjectDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+TemplateParamObjectDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) {
auto *TPOD = new (C, ID) TemplateParamObjectDecl(nullptr, QualType(), APValue());
C.addDestruction(&TPOD->Value);
return TPOD;
@@ -1583,6 +1664,10 @@ void TemplateParamObjectDecl::printAsInit(llvm::raw_ostream &OS,
TemplateParameterList *clang::getReplacedTemplateParameterList(Decl *D) {
switch (D->getKind()) {
+ case Decl::Kind::CXXRecord:
+ return cast<CXXRecordDecl>(D)
+ ->getDescribedTemplate()
+ ->getTemplateParameters();
case Decl::Kind::ClassTemplate:
return cast<ClassTemplateDecl>(D)->getTemplateParameters();
case Decl::Kind::ClassTemplateSpecialization: {
diff --git a/contrib/llvm-project/clang/lib/AST/Expr.cpp b/contrib/llvm-project/clang/lib/AST/Expr.cpp
index f1efa98e175e..9d5b8167d0ee 100644
--- a/contrib/llvm-project/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Expr.cpp
@@ -86,12 +86,12 @@ const Expr *Expr::skipRValueSubobjectAdjustments(
while (true) {
E = E->IgnoreParens();
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (const auto *CE = dyn_cast<CastExpr>(E)) {
if ((CE->getCastKind() == CK_DerivedToBase ||
CE->getCastKind() == CK_UncheckedDerivedToBase) &&
E->getType()->isRecordType()) {
E = CE->getSubExpr();
- auto *Derived =
+ const auto *Derived =
cast<CXXRecordDecl>(E->getType()->castAs<RecordType>()->getDecl());
Adjustments.push_back(SubobjectAdjustment(CE, Derived));
continue;
@@ -101,10 +101,10 @@ const Expr *Expr::skipRValueSubobjectAdjustments(
E = CE->getSubExpr();
continue;
}
- } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ } else if (const auto *ME = dyn_cast<MemberExpr>(E)) {
if (!ME->isArrow()) {
- assert(ME->getBase()->getType()->isRecordType());
- if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ assert(ME->getBase()->getType()->getAsRecordDecl());
+ if (const auto *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
if (!Field->isBitField() && !Field->getType()->isReferenceType()) {
E = ME->getBase();
Adjustments.push_back(SubobjectAdjustment(Field));
@@ -112,12 +112,11 @@ const Expr *Expr::skipRValueSubobjectAdjustments(
}
}
}
- } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ } else if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->getOpcode() == BO_PtrMemD) {
assert(BO->getRHS()->isPRValue());
E = BO->getLHS();
- const MemberPointerType *MPT =
- BO->getRHS()->getType()->getAs<MemberPointerType>();
+ const auto *MPT = BO->getRHS()->getType()->getAs<MemberPointerType>();
Adjustments.push_back(SubobjectAdjustment(MPT, BO->getRHS()));
continue;
}
@@ -264,6 +263,17 @@ namespace {
}
}
+QualType Expr::getEnumCoercedType(const ASTContext &Ctx) const {
+ if (isa<EnumType>(getType()))
+ return getType();
+ if (const auto *ECD = getEnumConstantDecl()) {
+ const auto *ED = cast<EnumDecl>(ECD->getDeclContext());
+ if (ED->isCompleteDefinition())
+ return Ctx.getTypeDeclType(ED);
+ }
+ return getType();
+}
+
SourceLocation Expr::getExprLoc() const {
switch (getStmtClass()) {
case Stmt::NoStmtClass: llvm_unreachable("statement without class");
@@ -666,7 +676,8 @@ StringRef PredefinedExpr::getIdentKindName(PredefinedIdentKind IK) {
// FIXME: Maybe this should use DeclPrinter with a special "print predefined
// expr" policy instead.
std::string PredefinedExpr::ComputeName(PredefinedIdentKind IK,
- const Decl *CurrentDecl) {
+ const Decl *CurrentDecl,
+ bool ForceElaboratedPrinting) {
ASTContext &Context = CurrentDecl->getASTContext();
if (IK == PredefinedIdentKind::FuncDName) {
@@ -714,10 +725,21 @@ std::string PredefinedExpr::ComputeName(PredefinedIdentKind IK,
return std::string(Out.str());
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
- if (IK != PredefinedIdentKind::PrettyFunction &&
+ const auto &LO = Context.getLangOpts();
+ bool IsFuncOrFunctionInNonMSVCCompatEnv =
+ ((IK == PredefinedIdentKind::Func ||
+ IK == PredefinedIdentKind ::Function) &&
+ !LO.MSVCCompat);
+ bool IsLFunctionInMSVCCommpatEnv =
+ IK == PredefinedIdentKind::LFunction && LO.MSVCCompat;
+ bool IsFuncOrFunctionOrLFunctionOrFuncDName =
+ IK != PredefinedIdentKind::PrettyFunction &&
IK != PredefinedIdentKind::PrettyFunctionNoVirtual &&
IK != PredefinedIdentKind::FuncSig &&
- IK != PredefinedIdentKind::LFuncSig)
+ IK != PredefinedIdentKind::LFuncSig;
+ if ((ForceElaboratedPrinting &&
+ (IsFuncOrFunctionInNonMSVCCompatEnv || IsLFunctionInMSVCCommpatEnv)) ||
+ (!ForceElaboratedPrinting && IsFuncOrFunctionOrLFunctionOrFuncDName))
return FD->getNameAsString();
SmallString<256> Name;
@@ -745,6 +767,8 @@ std::string PredefinedExpr::ComputeName(PredefinedIdentKind IK,
PrintingPolicy Policy(Context.getLangOpts());
PrettyCallbacks PrettyCB(Context.getLangOpts());
Policy.Callbacks = &PrettyCB;
+ if (IK == PredefinedIdentKind::Function && ForceElaboratedPrinting)
+ Policy.SuppressTagKeyword = !LO.MSVCCompat;
std::string Proto;
llvm::raw_string_ostream POut(Proto);
@@ -772,6 +796,12 @@ std::string PredefinedExpr::ComputeName(PredefinedIdentKind IK,
FD->printQualifiedName(POut, Policy);
+ if (IK == PredefinedIdentKind::Function) {
+ POut.flush();
+ Out << Proto;
+ return std::string(Name);
+ }
+
POut << "(";
if (FT) {
for (unsigned i = 0, e = Decl->getNumParams(); i != e; ++i) {
@@ -807,7 +837,7 @@ std::string PredefinedExpr::ComputeName(PredefinedIdentKind IK,
typedef SmallVector<const ClassTemplateSpecializationDecl *, 8> SpecsTy;
SpecsTy Specs;
const DeclContext *Ctx = FD->getDeclContext();
- while (Ctx && isa<NamedDecl>(Ctx)) {
+ while (isa_and_nonnull<NamedDecl>(Ctx)) {
const ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(Ctx);
if (Spec && !Spec->isExplicitSpecialization())
@@ -1682,8 +1712,11 @@ UnaryExprOrTypeTraitExpr::UnaryExprOrTypeTraitExpr(
}
MemberExpr::MemberExpr(Expr *Base, bool IsArrow, SourceLocation OperatorLoc,
- ValueDecl *MemberDecl,
- const DeclarationNameInfo &NameInfo, QualType T,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc, ValueDecl *MemberDecl,
+ DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs, QualType T,
ExprValueKind VK, ExprObjectKind OK,
NonOdrUseReason NOUR)
: Expr(MemberExprClass, T, VK, OK), Base(Base), MemberDecl(MemberDecl),
@@ -1691,11 +1724,30 @@ MemberExpr::MemberExpr(Expr *Base, bool IsArrow, SourceLocation OperatorLoc,
assert(!NameInfo.getName() ||
MemberDecl->getDeclName() == NameInfo.getName());
MemberExprBits.IsArrow = IsArrow;
- MemberExprBits.HasQualifierOrFoundDecl = false;
- MemberExprBits.HasTemplateKWAndArgsInfo = false;
+ MemberExprBits.HasQualifier = QualifierLoc.hasQualifier();
+ MemberExprBits.HasFoundDecl =
+ FoundDecl.getDecl() != MemberDecl ||
+ FoundDecl.getAccess() != MemberDecl->getAccess();
+ MemberExprBits.HasTemplateKWAndArgsInfo =
+ TemplateArgs || TemplateKWLoc.isValid();
MemberExprBits.HadMultipleCandidates = false;
MemberExprBits.NonOdrUseReason = NOUR;
MemberExprBits.OperatorLoc = OperatorLoc;
+
+ if (hasQualifier())
+ new (getTrailingObjects<NestedNameSpecifierLoc>())
+ NestedNameSpecifierLoc(QualifierLoc);
+ if (hasFoundDecl())
+ *getTrailingObjects<DeclAccessPair>() = FoundDecl;
+ if (TemplateArgs) {
+ auto Deps = TemplateArgumentDependence::None;
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc, *TemplateArgs, getTrailingObjects<TemplateArgumentLoc>(),
+ Deps);
+ } else if (TemplateKWLoc.isValid()) {
+ getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
+ TemplateKWLoc);
+ }
setDependence(computeDependence(this));
}
@@ -1705,48 +1757,20 @@ MemberExpr *MemberExpr::Create(
ValueDecl *MemberDecl, DeclAccessPair FoundDecl,
DeclarationNameInfo NameInfo, const TemplateArgumentListInfo *TemplateArgs,
QualType T, ExprValueKind VK, ExprObjectKind OK, NonOdrUseReason NOUR) {
- bool HasQualOrFound = QualifierLoc || FoundDecl.getDecl() != MemberDecl ||
- FoundDecl.getAccess() != MemberDecl->getAccess();
+ bool HasQualifier = QualifierLoc.hasQualifier();
+ bool HasFoundDecl = FoundDecl.getDecl() != MemberDecl ||
+ FoundDecl.getAccess() != MemberDecl->getAccess();
bool HasTemplateKWAndArgsInfo = TemplateArgs || TemplateKWLoc.isValid();
std::size_t Size =
- totalSizeToAlloc<MemberExprNameQualifier, ASTTemplateKWAndArgsInfo,
- TemplateArgumentLoc>(
- HasQualOrFound ? 1 : 0, HasTemplateKWAndArgsInfo ? 1 : 0,
+ totalSizeToAlloc<NestedNameSpecifierLoc, DeclAccessPair,
+ ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasQualifier, HasFoundDecl, HasTemplateKWAndArgsInfo,
TemplateArgs ? TemplateArgs->size() : 0);
void *Mem = C.Allocate(Size, alignof(MemberExpr));
- MemberExpr *E = new (Mem) MemberExpr(Base, IsArrow, OperatorLoc, MemberDecl,
- NameInfo, T, VK, OK, NOUR);
-
- if (HasQualOrFound) {
- E->MemberExprBits.HasQualifierOrFoundDecl = true;
-
- MemberExprNameQualifier *NQ =
- E->getTrailingObjects<MemberExprNameQualifier>();
- NQ->QualifierLoc = QualifierLoc;
- NQ->FoundDecl = FoundDecl;
- }
-
- E->MemberExprBits.HasTemplateKWAndArgsInfo =
- TemplateArgs || TemplateKWLoc.isValid();
-
- // FIXME: remove remaining dependence computation to computeDependence().
- auto Deps = E->getDependence();
- if (TemplateArgs) {
- auto TemplateArgDeps = TemplateArgumentDependence::None;
- E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
- TemplateKWLoc, *TemplateArgs,
- E->getTrailingObjects<TemplateArgumentLoc>(), TemplateArgDeps);
- for (const TemplateArgumentLoc &ArgLoc : TemplateArgs->arguments()) {
- Deps |= toExprDependence(ArgLoc.getArgument().getDependence());
- }
- } else if (TemplateKWLoc.isValid()) {
- E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
- TemplateKWLoc);
- }
- E->setDependence(Deps);
-
- return E;
+ return new (Mem) MemberExpr(Base, IsArrow, OperatorLoc, QualifierLoc,
+ TemplateKWLoc, MemberDecl, FoundDecl, NameInfo,
+ TemplateArgs, T, VK, OK, NOUR);
}
MemberExpr *MemberExpr::CreateEmpty(const ASTContext &Context,
@@ -1755,12 +1779,11 @@ MemberExpr *MemberExpr::CreateEmpty(const ASTContext &Context,
unsigned NumTemplateArgs) {
assert((!NumTemplateArgs || HasTemplateKWAndArgsInfo) &&
"template args but no template arg info?");
- bool HasQualOrFound = HasQualifier || HasFoundDecl;
std::size_t Size =
- totalSizeToAlloc<MemberExprNameQualifier, ASTTemplateKWAndArgsInfo,
- TemplateArgumentLoc>(HasQualOrFound ? 1 : 0,
- HasTemplateKWAndArgsInfo ? 1 : 0,
- NumTemplateArgs);
+ totalSizeToAlloc<NestedNameSpecifierLoc, DeclAccessPair,
+ ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
+ HasQualifier, HasFoundDecl, HasTemplateKWAndArgsInfo,
+ NumTemplateArgs);
void *Mem = Context.Allocate(Size, alignof(MemberExpr));
return new (Mem) MemberExpr(EmptyShell());
}
@@ -1898,6 +1921,7 @@ bool CastExpr::CastConsistency() const {
case CK_FixedPointToIntegral:
case CK_IntegralToFixedPoint:
case CK_MatrixCast:
+ case CK_HLSLVectorTruncation:
assert(!getType()->isBooleanType() && "unheralded conversion to bool");
goto CheckNoBasePath;
@@ -1917,6 +1941,7 @@ bool CastExpr::CastConsistency() const {
case CK_UserDefinedConversion: // operator bool()
case CK_BuiltinFnToFnPtr:
case CK_FixedPointToBoolean:
+ case CK_HLSLArrayRValue:
CheckNoBasePath:
assert(path_empty() && "Cast kind should not have a base path!");
break;
@@ -2019,7 +2044,7 @@ const FieldDecl *CastExpr::getTargetFieldForToUnionCast(const RecordDecl *RD,
for (Field = RD->field_begin(), FieldEnd = RD->field_end();
Field != FieldEnd; ++Field) {
if (Ctx.hasSameUnqualifiedType(Field->getType(), OpType) &&
- !Field->isUnnamedBitfield()) {
+ !Field->isUnnamedBitField()) {
return *Field;
}
}
@@ -2348,6 +2373,17 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
llvm_unreachable("unhandled case");
}
+EmbedExpr::EmbedExpr(const ASTContext &Ctx, SourceLocation Loc,
+ EmbedDataStorage *Data, unsigned Begin,
+ unsigned NumOfElements)
+ : Expr(EmbedExprClass, Ctx.IntTy, VK_PRValue, OK_Ordinary),
+ EmbedKeywordLoc(Loc), Ctx(&Ctx), Data(Data), Begin(Begin),
+ NumOfElements(NumOfElements) {
+ setDependence(ExprDependence::None);
+ FakeChildNode = IntegerLiteral::Create(
+ Ctx, llvm::APInt::getZero(Ctx.getTypeSize(getType())), getType(), Loc);
+}
+
InitListExpr::InitListExpr(const ASTContext &C, SourceLocation lbraceloc,
ArrayRef<Expr *> initExprs, SourceLocation rbraceloc)
: Expr(InitListExprClass, QualType(), VK_PRValue, OK_Ordinary),
@@ -3042,7 +3078,7 @@ Expr *Expr::IgnoreParenCasts() {
Expr *Expr::IgnoreConversionOperatorSingleStep() {
if (auto *MCE = dyn_cast<CXXMemberCallExpr>(this)) {
- if (MCE->getMethodDecl() && isa<CXXConversionDecl>(MCE->getMethodDecl()))
+ if (isa_and_nonnull<CXXConversionDecl>(MCE->getMethodDecl()))
return MCE->getImplicitObjectArgument();
}
return this;
@@ -3329,6 +3365,12 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
DIUE->getUpdater()->isConstantInitializer(Ctx, false, Culprit);
}
case InitListExprClass: {
+ // C++ [dcl.init.aggr]p2:
+ // The elements of an aggregate are:
+ // - for an array, the array elements in increasing subscript order, or
+ // - for a class, the direct base classes in declaration order, followed
+ // by the direct non-static data members (11.4) that are not members of
+ // an anonymous union, in declaration order.
const InitListExpr *ILE = cast<InitListExpr>(this);
assert(ILE->isSemanticForm() && "InitListExpr must be in semantic form");
if (ILE->getType()->isArrayType()) {
@@ -3343,13 +3385,26 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
if (ILE->getType()->isRecordType()) {
unsigned ElementNo = 0;
RecordDecl *RD = ILE->getType()->castAs<RecordType>()->getDecl();
+
+ // In C++17, bases were added to the list of members used by aggregate
+ // initialization.
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (unsigned i = 0, e = CXXRD->getNumBases(); i < e; i++) {
+ if (ElementNo < ILE->getNumInits()) {
+ const Expr *Elt = ILE->getInit(ElementNo++);
+ if (!Elt->isConstantInitializer(Ctx, false, Culprit))
+ return false;
+ }
+ }
+ }
+
for (const auto *Field : RD->fields()) {
// If this is a union, skip all the fields that aren't being initialized.
if (RD->isUnion() && ILE->getInitializedFieldInUnion() != Field)
continue;
// Don't emit anonymous bitfields, they just affect layout.
- if (Field->isUnnamedBitfield())
+ if (Field->isUnnamedBitField())
continue;
if (ElementNo < ILE->getNumInits()) {
@@ -3397,6 +3452,11 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
return Exp->getSubExpr()->isConstantInitializer(Ctx, false, Culprit);
break;
}
+ case PackIndexingExprClass: {
+ return cast<PackIndexingExpr>(this)
+ ->getSelectedExpr()
+ ->isConstantInitializer(Ctx, false, Culprit);
+ }
case CXXFunctionalCastExprClass:
case CXXStaticCastExprClass:
case ImplicitCastExprClass:
@@ -3566,9 +3626,11 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case CXXUuidofExprClass:
case OpaqueValueExprClass:
case SourceLocExprClass:
+ case EmbedExprClass:
case ConceptSpecializationExprClass:
case RequiresExprClass:
case SYCLUniqueStableNameExprClass:
+ case PackIndexingExprClass:
// These never have a side-effect.
return false;
@@ -3628,7 +3690,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case ParenExprClass:
case ArraySubscriptExprClass:
case MatrixSubscriptExprClass:
- case OMPArraySectionExprClass:
+ case ArraySectionExprClass:
case OMPArrayShapingExprClass:
case OMPIteratorExprClass:
case MemberExprClass:
@@ -3719,10 +3781,18 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
break;
}
- case CXXTypeidExprClass:
- // typeid might throw if its subexpression is potentially-evaluated, so has
- // side-effects in that case whether or not its subexpression does.
- return cast<CXXTypeidExpr>(this)->isPotentiallyEvaluated();
+ case CXXTypeidExprClass: {
+ const auto *TE = cast<CXXTypeidExpr>(this);
+ if (!TE->isPotentiallyEvaluated())
+ return false;
+
+ // If this type id expression can throw because of a null pointer, that is a
+ // side-effect independent of if the operand has a side-effect
+ if (IncludePossibleEffects && TE->hasNullCheck())
+ return true;
+
+ break;
+ }
case CXXConstructExprClass:
case CXXTemporaryObjectExprClass: {
@@ -3841,9 +3911,14 @@ namespace {
}
void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E) {
- if (E->getTemporary()->getDestructor()->isTrivial()) {
- Inherited::VisitStmt(E);
- return;
+ // Destructor of the temporary might be null if destructor declaration
+ // is not valid.
+ if (const CXXDestructorDecl *DtorDecl =
+ E->getTemporary()->getDestructor()) {
+ if (DtorDecl->isTrivial()) {
+ Inherited::VisitStmt(E);
+ return;
+ }
}
NonTrivial = true;
@@ -4071,6 +4146,13 @@ FieldDecl *Expr::getSourceBitField() {
return nullptr;
}
+EnumConstantDecl *Expr::getEnumConstantDecl() {
+ Expr *E = this->IgnoreParenImpCasts();
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E))
+ return dyn_cast<EnumConstantDecl>(DRE->getDecl());
+ return nullptr;
+}
+
bool Expr::refersToVectorElement() const {
// FIXME: Why do we not just look at the ObjectKind here?
const Expr *E = this->IgnoreParens();
@@ -4559,8 +4641,17 @@ SourceRange DesignatedInitExpr::getDesignatorsSourceRange() const {
SourceLocation DesignatedInitExpr::getBeginLoc() const {
auto *DIE = const_cast<DesignatedInitExpr *>(this);
Designator &First = *DIE->getDesignator(0);
- if (First.isFieldDesignator())
- return GNUSyntax ? First.getFieldLoc() : First.getDotLoc();
+ if (First.isFieldDesignator()) {
+ // Skip past implicit designators for anonymous structs/unions, since
+ // these do not have valid source locations.
+ for (unsigned int i = 0; i < DIE->size(); i++) {
+ Designator &Des = *DIE->getDesignator(i);
+ SourceLocation retval = GNUSyntax ? Des.getFieldLoc() : Des.getDotLoc();
+ if (!retval.isValid())
+ continue;
+ return retval;
+ }
+ }
return First.getLBracketLoc();
}
@@ -4992,9 +5083,9 @@ QualType AtomicExpr::getValueType() const {
return T;
}
-QualType OMPArraySectionExpr::getBaseOriginalType(const Expr *Base) {
+QualType ArraySectionExpr::getBaseOriginalType(const Expr *Base) {
unsigned ArraySectionCount = 0;
- while (auto *OASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParens())) {
+ while (auto *OASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParens())) {
Base = OASE->getBase();
++ArraySectionCount;
}
diff --git a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
index e61c11dffd88..45e2badf2ddd 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
@@ -166,6 +166,53 @@ QualType CXXTypeidExpr::getTypeOperand(ASTContext &Context) const {
Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType(), Quals);
}
+static bool isGLValueFromPointerDeref(const Expr *E) {
+ E = E->IgnoreParens();
+
+ if (const auto *CE = dyn_cast<CastExpr>(E)) {
+ if (!CE->getSubExpr()->isGLValue())
+ return false;
+ return isGLValueFromPointerDeref(CE->getSubExpr());
+ }
+
+ if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
+ return isGLValueFromPointerDeref(OVE->getSourceExpr());
+
+ if (const auto *BO = dyn_cast<BinaryOperator>(E))
+ if (BO->getOpcode() == BO_Comma)
+ return isGLValueFromPointerDeref(BO->getRHS());
+
+ if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
+ return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
+ isGLValueFromPointerDeref(ACO->getFalseExpr());
+
+ // C++11 [expr.sub]p1:
+ // The expression E1[E2] is identical (by definition) to *((E1)+(E2))
+ if (isa<ArraySubscriptExpr>(E))
+ return true;
+
+ if (const auto *UO = dyn_cast<UnaryOperator>(E))
+ if (UO->getOpcode() == UO_Deref)
+ return true;
+
+ return false;
+}
+
+bool CXXTypeidExpr::hasNullCheck() const {
+ if (!isPotentiallyEvaluated())
+ return false;
+
+ // C++ [expr.typeid]p2:
+ // If the glvalue expression is obtained by applying the unary * operator to
+ // a pointer and the pointer is a null pointer value, the typeid expression
+ // throws the std::bad_typeid exception.
+ //
+ // However, this paragraph's intent is not clear. We choose a very generous
+ // interpretation which implores us to consider comma operators, conditional
+ // operators, parentheses and other such constructs.
+ return isGLValueFromPointerDeref(getExprOperand());
+}
+
QualType CXXUuidofExpr::getTypeOperand(ASTContext &Context) const {
assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
Qualifiers Quals;
@@ -353,15 +400,15 @@ SourceLocation CXXPseudoDestructorExpr::getEndLoc() const {
UnresolvedLookupExpr::UnresolvedLookupExpr(
const ASTContext &Context, CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
- const DeclarationNameInfo &NameInfo, bool RequiresADL, bool Overloaded,
+ const DeclarationNameInfo &NameInfo, bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs, UnresolvedSetIterator Begin,
- UnresolvedSetIterator End, bool KnownDependent)
+ UnresolvedSetIterator End, bool KnownDependent,
+ bool KnownInstantiationDependent)
: OverloadExpr(UnresolvedLookupExprClass, Context, QualifierLoc,
TemplateKWLoc, NameInfo, TemplateArgs, Begin, End,
- KnownDependent, false, false),
+ KnownDependent, KnownInstantiationDependent, false),
NamingClass(NamingClass) {
UnresolvedLookupExprBits.RequiresADL = RequiresADL;
- UnresolvedLookupExprBits.Overloaded = Overloaded;
}
UnresolvedLookupExpr::UnresolvedLookupExpr(EmptyShell Empty,
@@ -373,15 +420,17 @@ UnresolvedLookupExpr::UnresolvedLookupExpr(EmptyShell Empty,
UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
const ASTContext &Context, CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo,
- bool RequiresADL, bool Overloaded, UnresolvedSetIterator Begin,
- UnresolvedSetIterator End) {
+ bool RequiresADL, UnresolvedSetIterator Begin, UnresolvedSetIterator End,
+ bool KnownDependent, bool KnownInstantiationDependent) {
unsigned NumResults = End - Begin;
unsigned Size = totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo,
TemplateArgumentLoc>(NumResults, 0, 0);
void *Mem = Context.Allocate(Size, alignof(UnresolvedLookupExpr));
- return new (Mem) UnresolvedLookupExpr(Context, NamingClass, QualifierLoc,
- SourceLocation(), NameInfo, RequiresADL,
- Overloaded, nullptr, Begin, End, false);
+ return new (Mem) UnresolvedLookupExpr(
+ Context, NamingClass, QualifierLoc,
+ /*TemplateKWLoc=*/SourceLocation(), NameInfo, RequiresADL,
+ /*TemplateArgs=*/nullptr, Begin, End, KnownDependent,
+ KnownInstantiationDependent);
}
UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
@@ -389,17 +438,18 @@ UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo, bool RequiresADL,
const TemplateArgumentListInfo *Args, UnresolvedSetIterator Begin,
- UnresolvedSetIterator End, bool KnownDependent) {
- assert(Args || TemplateKWLoc.isValid());
+ UnresolvedSetIterator End, bool KnownDependent,
+ bool KnownInstantiationDependent) {
unsigned NumResults = End - Begin;
+ bool HasTemplateKWAndArgsInfo = Args || TemplateKWLoc.isValid();
unsigned NumTemplateArgs = Args ? Args->size() : 0;
- unsigned Size =
- totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo,
- TemplateArgumentLoc>(NumResults, 1, NumTemplateArgs);
+ unsigned Size = totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo,
+ TemplateArgumentLoc>(
+ NumResults, HasTemplateKWAndArgsInfo, NumTemplateArgs);
void *Mem = Context.Allocate(Size, alignof(UnresolvedLookupExpr));
return new (Mem) UnresolvedLookupExpr(
Context, NamingClass, QualifierLoc, TemplateKWLoc, NameInfo, RequiresADL,
- /*Overloaded=*/true, Args, Begin, End, KnownDependent);
+ Args, Begin, End, KnownDependent, KnownInstantiationDependent);
}
UnresolvedLookupExpr *UnresolvedLookupExpr::CreateEmpty(
@@ -511,14 +561,14 @@ DependentScopeDeclRefExpr::CreateEmpty(const ASTContext &Context,
}
SourceLocation CXXConstructExpr::getBeginLoc() const {
- if (isa<CXXTemporaryObjectExpr>(this))
- return cast<CXXTemporaryObjectExpr>(this)->getBeginLoc();
+ if (const auto *TOE = dyn_cast<CXXTemporaryObjectExpr>(this))
+ return TOE->getBeginLoc();
return getLocation();
}
SourceLocation CXXConstructExpr::getEndLoc() const {
- if (isa<CXXTemporaryObjectExpr>(this))
- return cast<CXXTemporaryObjectExpr>(this)->getEndLoc();
+ if (const auto *TOE = dyn_cast<CXXTemporaryObjectExpr>(this))
+ return TOE->getEndLoc();
if (ParenOrBraceRange.isValid())
return ParenOrBraceRange.getEnd();
@@ -1665,6 +1715,41 @@ NonTypeTemplateParmDecl *SubstNonTypeTemplateParmExpr::getParameter() const {
getReplacedTemplateParameterList(getAssociatedDecl())->asArray()[Index]);
}
+PackIndexingExpr *PackIndexingExpr::Create(
+ ASTContext &Context, SourceLocation EllipsisLoc, SourceLocation RSquareLoc,
+ Expr *PackIdExpr, Expr *IndexExpr, std::optional<int64_t> Index,
+ ArrayRef<Expr *> SubstitutedExprs, bool ExpandedToEmptyPack) {
+ QualType Type;
+ if (Index && !SubstitutedExprs.empty())
+ Type = SubstitutedExprs[*Index]->getType();
+ else
+ Type = Context.DependentTy;
+
+ void *Storage =
+ Context.Allocate(totalSizeToAlloc<Expr *>(SubstitutedExprs.size()));
+ return new (Storage)
+ PackIndexingExpr(Type, EllipsisLoc, RSquareLoc, PackIdExpr, IndexExpr,
+ SubstitutedExprs, ExpandedToEmptyPack);
+}
+
+NamedDecl *PackIndexingExpr::getPackDecl() const {
+ if (auto *D = dyn_cast<DeclRefExpr>(getPackIdExpression()); D) {
+ NamedDecl *ND = dyn_cast<NamedDecl>(D->getDecl());
+ assert(ND && "exected a named decl");
+ return ND;
+ }
+ assert(false && "invalid declaration kind in pack indexing expression");
+ return nullptr;
+}
+
+PackIndexingExpr *
+PackIndexingExpr::CreateDeserialized(ASTContext &Context,
+ unsigned NumTransformedExprs) {
+ void *Storage =
+ Context.Allocate(totalSizeToAlloc<Expr *>(NumTransformedExprs));
+ return new (Storage) PackIndexingExpr(EmptyShell{});
+}
+
QualType SubstNonTypeTemplateParmExpr::getParameterType(
const ASTContext &Context) const {
// Note that, for a class type NTTP, we will have an lvalue of type 'const
diff --git a/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp b/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
index ffa7c6802ea6..6482cb6d39ac 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
@@ -145,7 +145,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::FunctionParmPackExprClass:
case Expr::MSPropertyRefExprClass:
case Expr::MSPropertySubscriptExprClass:
- case Expr::OMPArraySectionExprClass:
+ case Expr::ArraySectionExprClass:
case Expr::OMPArrayShapingExprClass:
case Expr::OMPIteratorExprClass:
return Cl::CL_LValue;
@@ -204,6 +204,11 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::RequiresExprClass:
return Cl::CL_PRValue;
+ case Expr::EmbedExprClass:
+ // Nominally, this just goes through as a PRValue until we actually expand
+ // it and check it.
+ return Cl::CL_PRValue;
+
// Make HLSL this reference-like
case Expr::CXXThisExprClass:
return Lang.HLSL ? Cl::CL_LValue : Cl::CL_PRValue;
@@ -216,6 +221,14 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return ClassifyInternal(Ctx,
cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
+ case Expr::PackIndexingExprClass: {
+ // A pack-index-expression always expands to an id-expression.
+ // Consider it as an LValue expression.
+ if (cast<PackIndexingExpr>(E)->isInstantiationDependent())
+ return Cl::CL_LValue;
+ return ClassifyInternal(Ctx, cast<PackIndexingExpr>(E)->getSelectedExpr());
+ }
+
// C, C++98 [expr.sub]p1: The result is an lvalue of type "T".
// C++11 (DR1213): in the case of an array operand, the result is an lvalue
// if that operand is an lvalue and an xvalue otherwise.
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstShared.h b/contrib/llvm-project/clang/lib/AST/ExprConstShared.h
index a97eac85abc6..2a7088e4e371 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConstShared.h
+++ b/contrib/llvm-project/clang/lib/AST/ExprConstShared.h
@@ -14,6 +14,9 @@
#ifndef LLVM_CLANG_LIB_AST_EXPRCONSTSHARED_H
#define LLVM_CLANG_LIB_AST_EXPRCONSTSHARED_H
+namespace llvm {
+class APFloat;
+}
namespace clang {
class QualType;
class LangOptions;
@@ -56,4 +59,11 @@ enum class GCCTypeClass {
GCCTypeClass EvaluateBuiltinClassifyType(QualType T,
const LangOptions &LangOpts);
+void HandleComplexComplexMul(llvm::APFloat A, llvm::APFloat B, llvm::APFloat C,
+ llvm::APFloat D, llvm::APFloat &ResR,
+ llvm::APFloat &ResI);
+void HandleComplexComplexDiv(llvm::APFloat A, llvm::APFloat B, llvm::APFloat C,
+ llvm::APFloat D, llvm::APFloat &ResR,
+ llvm::APFloat &ResI);
+
#endif
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
index edf9b5e2d52b..5e57b5e8bc8f 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
@@ -58,6 +58,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/SipHash.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
@@ -209,7 +210,7 @@ namespace {
IsArray = true;
if (auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
- ArraySize = CAT->getSize().getZExtValue();
+ ArraySize = CAT->getZExtSize();
} else {
assert(I == 0 && "unexpected unsized array designator");
FirstEntryIsUnsizedArray = true;
@@ -240,15 +241,19 @@ namespace {
/// True if the subobject was named in a manner not supported by C++11. Such
/// lvalues can still be folded, but they are not core constant expressions
/// and we cannot perform lvalue-to-rvalue conversions on them.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Invalid : 1;
/// Is this a pointer one past the end of an object?
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsOnePastTheEnd : 1;
/// Indicator of whether the first entry is an unsized array.
+ LLVM_PREFERRED_TYPE(bool)
unsigned FirstEntryIsAnUnsizedArray : 1;
/// Indicator of whether the most-derived object is an array element.
+ LLVM_PREFERRED_TYPE(bool)
unsigned MostDerivedIsArrayElement : 1;
/// The length of the path to the most-derived object of which this is a
@@ -397,7 +402,7 @@ namespace {
// This is a most-derived object.
MostDerivedType = CAT->getElementType();
MostDerivedIsArrayElement = true;
- MostDerivedArraySize = CAT->getSize().getZExtValue();
+ MostDerivedArraySize = CAT->getZExtSize();
MostDerivedPathLength = Entries.size();
}
/// Update this designator to refer to the first element within the array of
@@ -1880,7 +1885,8 @@ static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
EvalInfo &Info);
static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
- EvalInfo &Info);
+ EvalInfo &Info,
+ std::string *StringResult = nullptr);
/// Evaluate an integer or fixed point expression into an APResult.
static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
@@ -2037,6 +2043,7 @@ static bool IsNoOpCall(const CallExpr *E) {
unsigned Builtin = E->getBuiltinCallee();
return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
Builtin == Builtin::BI__builtin___NSStringMakeConstantString ||
+ Builtin == Builtin::BI__builtin_ptrauth_sign_constant ||
Builtin == Builtin::BI__builtin_function_start);
}
@@ -2126,7 +2133,7 @@ static bool IsWeakLValue(const LValue &Value) {
static bool isZeroSized(const LValue &Value) {
const ValueDecl *Decl = GetLValueBaseDecl(Value);
- if (Decl && isa<VarDecl>(Decl)) {
+ if (isa_and_nonnull<VarDecl>(Decl)) {
QualType Ty = Decl->getType();
if (Ty->isArrayType())
return Ty->isIncompleteType() ||
@@ -2488,7 +2495,7 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
}
}
for (const auto *I : RD->fields()) {
- if (I->isUnnamedBitfield())
+ if (I->isUnnamedBitField())
continue;
if (!CheckEvaluationResult(CERK, Info, DiagLoc, I->getType(),
@@ -2702,7 +2709,11 @@ static bool checkFloatingPointResult(EvalInfo &Info, const Expr *E,
static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
QualType SrcType, QualType DestType,
APFloat &Result) {
- assert(isa<CastExpr>(E) || isa<CompoundAssignOperator>(E));
+ assert((isa<CastExpr>(E) || isa<CompoundAssignOperator>(E) ||
+ isa<ConvertVectorExpr>(E)) &&
+ "HandleFloatToFloatCast has been checked with only CastExpr, "
+ "CompoundAssignOperator and ConvertVectorExpr. Please either validate "
+ "the new expression or address the root cause of this usage.");
llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
APFloat::opStatus St;
APFloat Value = Result;
@@ -2774,7 +2785,9 @@ static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
if (Info.checkingForUndefinedBehavior())
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
diag::warn_integer_constant_overflow)
- << toString(Result, 10) << E->getType() << E->getSourceRange();
+ << toString(Result, 10, Result.isSigned(), /*formatAsCLiteral=*/false,
+ /*UpperCase=*/true, /*InsertSeparators=*/true)
+ << E->getType() << E->getSourceRange();
return HandleOverflow(Info, E, Value, E->getType());
}
return true;
@@ -2826,6 +2839,8 @@ static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
// During constant-folding, a negative shift is an opposite shift. Such
// a shift is not a constant expression.
Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
+ if (!Info.noteUndefinedBehavior())
+ return false;
RHS = -RHS;
goto shift_right;
}
@@ -2836,15 +2851,22 @@ static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
if (SA != RHS) {
Info.CCEDiag(E, diag::note_constexpr_large_shift)
<< RHS << E->getType() << LHS.getBitWidth();
+ if (!Info.noteUndefinedBehavior())
+ return false;
} else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus20) {
// C++11 [expr.shift]p2: A signed left shift must have a non-negative
// operand, and must not overflow the corresponding unsigned type.
// C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to
// E1 x 2^E2 module 2^N.
- if (LHS.isNegative())
+ if (LHS.isNegative()) {
Info.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS;
- else if (LHS.countl_zero() < SA)
+ if (!Info.noteUndefinedBehavior())
+ return false;
+ } else if (LHS.countl_zero() < SA) {
Info.CCEDiag(E, diag::note_constexpr_lshift_discards);
+ if (!Info.noteUndefinedBehavior())
+ return false;
+ }
}
Result = LHS << SA;
return true;
@@ -2859,6 +2881,8 @@ static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
// During constant-folding, a negative shift is an opposite shift. Such a
// shift is not a constant expression.
Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
+ if (!Info.noteUndefinedBehavior())
+ return false;
RHS = -RHS;
goto shift_left;
}
@@ -2866,9 +2890,13 @@ static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
// C++11 [expr.shift]p1: Shift width must be less than the bit width of the
// shifted type.
unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
- if (SA != RHS)
+ if (SA != RHS) {
Info.CCEDiag(E, diag::note_constexpr_large_shift)
<< RHS << E->getType() << LHS.getBitWidth();
+ if (!Info.noteUndefinedBehavior())
+ return false;
+ }
+
Result = LHS >> SA;
return true;
}
@@ -3470,7 +3498,7 @@ static void expandStringLiteral(EvalInfo &Info, const StringLiteral *S,
QualType CharType = CAT->getElementType();
assert(CharType->isIntegerType() && "unexpected character type");
- unsigned Elts = CAT->getSize().getZExtValue();
+ unsigned Elts = CAT->getZExtSize();
Result = APValue(APValue::UninitArray(),
std::min(S->getLength(), Elts), Elts);
APSInt Value(Info.Ctx.getTypeSize(CharType),
@@ -3523,7 +3551,7 @@ static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD) {
return false;
for (auto *Field : RD->fields())
- if (!Field->isUnnamedBitfield() &&
+ if (!Field->isUnnamedBitField() &&
isReadByLvalueToRvalueConversion(Field->getType()))
return true;
@@ -3613,7 +3641,7 @@ static bool CheckArraySize(EvalInfo &Info, const ConstantArrayType *CAT,
SourceLocation CallLoc = {}) {
return Info.CheckArraySize(
CAT->getSizeExpr() ? CAT->getSizeExpr()->getBeginLoc() : CallLoc,
- CAT->getNumAddressingBits(Info.Ctx), CAT->getSize().getZExtValue(),
+ CAT->getNumAddressingBits(Info.Ctx), CAT->getZExtSize(),
/*Diag=*/true);
}
@@ -4127,6 +4155,10 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
}
bool IsConstant = BaseType.isConstant(Info.Ctx);
+ bool ConstexprVar = false;
+ if (const auto *VD = dyn_cast_if_present<VarDecl>(
+ Info.EvaluatingDecl.dyn_cast<const ValueDecl *>()))
+ ConstexprVar = VD->isConstexpr();
// Unless we're looking at a local variable or argument in a constexpr call,
// the variable we're reading must be const.
@@ -4146,6 +4178,9 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
return CompleteObject();
} else if (VD->isConstexpr()) {
// OK, we can read this variable.
+ } else if (Info.getLangOpts().C23 && ConstexprVar) {
+ Info.FFDiag(E);
+ return CompleteObject();
} else if (BaseType->isIntegralOrEnumerationType()) {
if (!IsConstant) {
if (!IsAccess)
@@ -4885,7 +4920,7 @@ static bool handleDefaultInitValue(QualType T, APValue &Result) {
handleDefaultInitValue(I->getType(), Result.getStructBase(Index));
for (const auto *I : RD->fields()) {
- if (I->isUnnamedBitfield())
+ if (I->isUnnamedBitField())
continue;
Success &= handleDefaultInitValue(
I->getType(), Result.getStructField(I->getFieldIndex()));
@@ -4895,7 +4930,7 @@ static bool handleDefaultInitValue(QualType T, APValue &Result) {
if (auto *AT =
dyn_cast_or_null<ConstantArrayType>(T->getAsArrayTypeUnsafe())) {
- Result = APValue(APValue::UninitArray(), 0, AT->getSize().getZExtValue());
+ Result = APValue(APValue::UninitArray(), 0, AT->getZExtSize());
if (Result.hasArrayFiller())
Success &=
handleDefaultInitValue(AT->getElementType(), Result.getArrayFiller());
@@ -5569,6 +5604,32 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
MSConstexprContextRAII ConstexprContext(
*Info.CurrentCall, hasSpecificAttr<MSConstexprAttr>(AS->getAttrs()) &&
isa<ReturnStmt>(SS));
+
+ auto LO = Info.getCtx().getLangOpts();
+ if (LO.CXXAssumptions && !LO.MSVCCompat) {
+ for (auto *Attr : AS->getAttrs()) {
+ auto *AA = dyn_cast<CXXAssumeAttr>(Attr);
+ if (!AA)
+ continue;
+
+ auto *Assumption = AA->getAssumption();
+ if (Assumption->isValueDependent())
+ return ESR_Failed;
+
+ if (Assumption->HasSideEffects(Info.getCtx()))
+ continue;
+
+ bool Value;
+ if (!EvaluateAsBooleanCondition(Assumption, Value, Info))
+ return ESR_Failed;
+ if (!Value) {
+ Info.CCEDiag(Assumption->getExprLoc(),
+ diag::note_constexpr_assumption_failed);
+ return ESR_Failed;
+ }
+ }
+ }
+
return EvaluateStmt(Result, Info, SS, Case);
}
@@ -6397,7 +6458,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
// Default-initialize any fields with no explicit initializer.
for (; !declaresSameEntity(*FieldIt, FD); ++FieldIt) {
assert(FieldIt != RD->field_end() && "missing field?");
- if (!FieldIt->isUnnamedBitfield())
+ if (!FieldIt->isUnnamedBitField())
Success &= handleDefaultInitValue(
FieldIt->getType(),
Result.getStructField(FieldIt->getFieldIndex()));
@@ -6417,7 +6478,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
// Non-virtual base classes are initialized in the order in the class
// definition. We have already checked for virtual base classes.
assert(!BaseIt->isVirtual() && "virtual base for literal type");
- assert(Info.Ctx.hasSameType(BaseIt->getType(), BaseType) &&
+ assert(Info.Ctx.hasSameUnqualifiedType(BaseIt->getType(), BaseType) &&
"base class initializers not in expected order");
++BaseIt;
#endif
@@ -6507,7 +6568,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
// Default-initialize any remaining fields.
if (!RD->isUnion()) {
for (; FieldIt != RD->field_end(); ++FieldIt) {
- if (!FieldIt->isUnnamedBitfield())
+ if (!FieldIt->isUnnamedBitField())
Success &= handleDefaultInitValue(
FieldIt->getType(),
Result.getStructField(FieldIt->getFieldIndex()));
@@ -6556,7 +6617,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceRange CallRange,
// For arrays, destroy elements right-to-left.
if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T)) {
- uint64_t Size = CAT->getSize().getZExtValue();
+ uint64_t Size = CAT->getZExtSize();
QualType ElemT = CAT->getElementType();
if (!CheckArraySize(Info, CAT, CallRange.getBegin()))
@@ -6669,7 +6730,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceRange CallRange,
// fields first and then walk them backwards.
SmallVector<FieldDecl*, 16> Fields(RD->fields());
for (const FieldDecl *FD : llvm::reverse(Fields)) {
- if (FD->isUnnamedBitfield())
+ if (FD->isUnnamedBitField())
continue;
LValue Subobject = This;
@@ -7308,9 +7369,6 @@ class BufferToAPValueConverter {
for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
- if (BaseDecl->isEmpty() ||
- Info.Ctx.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
- continue;
std::optional<APValue> SubObj = visitType(
BS.getType(), Layout.getBaseClassOffset(BaseDecl) + Offset);
@@ -7360,7 +7418,7 @@ class BufferToAPValueConverter {
}
std::optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) {
- size_t Size = Ty->getSize().getLimitedValue();
+ size_t Size = Ty->getLimitedSize();
CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(Ty->getElementType());
APValue ArrayValue(APValue::UninitArray(), Size, Size);
@@ -7687,6 +7745,11 @@ public:
return Error(E);
}
+ bool VisitEmbedExpr(const EmbedExpr *E) {
+ const auto It = E->begin();
+ return StmtVisitorTy::Visit(*It);
+ }
+
bool VisitPredefinedExpr(const PredefinedExpr *E) {
return StmtVisitorTy::Visit(E->getFunctionName());
}
@@ -8006,7 +8069,8 @@ public:
assert(CorrespondingCallOpSpecialization &&
"We must always have a function call operator specialization "
"that corresponds to our static invoker specialization");
- FD = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
+ assert(isa<CXXMethodDecl>(CorrespondingCallOpSpecialization));
+ FD = CorrespondingCallOpSpecialization;
} else
FD = LambdaCallOp;
} else if (FD->isReplaceableGlobalAllocationFunction()) {
@@ -8259,6 +8323,10 @@ public:
llvm_unreachable("Return from function from the loop above.");
}
+ bool VisitPackIndexingExpr(const PackIndexingExpr *E) {
+ return StmtVisitorTy::Visit(E->getSelectedExpr());
+ }
+
/// Visit a value which is evaluated, but whose value is ignored.
void VisitIgnoredValue(const Expr *E) {
EvaluateIgnoredValue(Info, E);
@@ -8476,6 +8544,53 @@ public:
};
} // end anonymous namespace
+/// Get an lvalue to a field of a lambda's closure type.
+static bool HandleLambdaCapture(EvalInfo &Info, const Expr *E, LValue &Result,
+ const CXXMethodDecl *MD, const FieldDecl *FD,
+ bool LValueToRValueConversion) {
+ // Static lambda function call operators can't have captures. We already
+ // diagnosed this, so bail out here.
+ if (MD->isStatic()) {
+ assert(Info.CurrentCall->This == nullptr &&
+ "This should not be set for a static call operator");
+ return false;
+ }
+
+ // Start with 'Result' referring to the complete closure object...
+ if (MD->isExplicitObjectMemberFunction()) {
+ // Self may be passed by reference or by value.
+ const ParmVarDecl *Self = MD->getParamDecl(0);
+ if (Self->getType()->isReferenceType()) {
+ APValue *RefValue = Info.getParamSlot(Info.CurrentCall->Arguments, Self);
+ Result.setFrom(Info.Ctx, *RefValue);
+ } else {
+ const ParmVarDecl *VD = Info.CurrentCall->Arguments.getOrigParam(Self);
+ CallStackFrame *Frame =
+ Info.getCallFrameAndDepth(Info.CurrentCall->Arguments.CallIndex)
+ .first;
+ unsigned Version = Info.CurrentCall->Arguments.Version;
+ Result.set({VD, Frame->Index, Version});
+ }
+ } else
+ Result = *Info.CurrentCall->This;
+
+ // ... then update it to refer to the field of the closure object
+ // that represents the capture.
+ if (!HandleLValueMember(Info, E, Result, FD))
+ return false;
+
+ // And if the field is of reference type (or if we captured '*this' by
+ // reference), update 'Result' to refer to what
+ // the field refers to.
+ if (LValueToRValueConversion) {
+ APValue RVal;
+ if (!handleLValueToRValueConversion(Info, E, FD->getType(), Result, RVal))
+ return false;
+ Result.setFrom(Info.Ctx, RVal);
+ }
+ return true;
+}
+
/// Evaluate an expression as an lvalue. This can be legitimately called on
/// expressions which are not glvalues, in three cases:
/// * function designators in C, and
@@ -8520,37 +8635,8 @@ bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
if (auto *FD = Info.CurrentCall->LambdaCaptureFields.lookup(VD)) {
const auto *MD = cast<CXXMethodDecl>(Info.CurrentCall->Callee);
-
- // Static lambda function call operators can't have captures. We already
- // diagnosed this, so bail out here.
- if (MD->isStatic()) {
- assert(Info.CurrentCall->This == nullptr &&
- "This should not be set for a static call operator");
- return false;
- }
-
- // Start with 'Result' referring to the complete closure object...
- if (MD->isExplicitObjectMemberFunction()) {
- APValue *RefValue =
- Info.getParamSlot(Info.CurrentCall->Arguments, MD->getParamDecl(0));
- Result.setFrom(Info.Ctx, *RefValue);
- } else
- Result = *Info.CurrentCall->This;
-
- // ... then update it to refer to the field of the closure object
- // that represents the capture.
- if (!HandleLValueMember(Info, E, Result, FD))
- return false;
- // And if the field is of reference type, update 'Result' to refer to what
- // the field refers to.
- if (FD->getType()->isReferenceType()) {
- APValue RVal;
- if (!handleLValueToRValueConversion(Info, E, FD->getType(), Result,
- RVal))
- return false;
- Result.setFrom(Info.Ctx, RVal);
- }
- return true;
+ return HandleLambdaCapture(Info, E, Result, MD, FD,
+ FD->getType()->isReferenceType());
}
}
@@ -9028,45 +9114,46 @@ public:
return Error(E);
}
bool VisitCXXThisExpr(const CXXThisExpr *E) {
- // Can't look at 'this' when checking a potential constant expression.
- if (Info.checkingPotentialConstantExpression())
- return false;
- if (!Info.CurrentCall->This) {
+ auto DiagnoseInvalidUseOfThis = [&] {
if (Info.getLangOpts().CPlusPlus11)
Info.FFDiag(E, diag::note_constexpr_this) << E->isImplicit();
else
Info.FFDiag(E);
+ };
+
+ // Can't look at 'this' when checking a potential constant expression.
+ if (Info.checkingPotentialConstantExpression())
return false;
+
+ bool IsExplicitLambda =
+ isLambdaCallWithExplicitObjectParameter(Info.CurrentCall->Callee);
+ if (!IsExplicitLambda) {
+ if (!Info.CurrentCall->This) {
+ DiagnoseInvalidUseOfThis();
+ return false;
+ }
+
+ Result = *Info.CurrentCall->This;
}
- Result = *Info.CurrentCall->This;
if (isLambdaCallOperator(Info.CurrentCall->Callee)) {
// Ensure we actually have captured 'this'. If something was wrong with
// 'this' capture, the error would have been previously reported.
// Otherwise we can be inside of a default initialization of an object
// declared by lambda's body, so no need to return false.
- if (!Info.CurrentCall->LambdaThisCaptureField)
- return true;
-
- // If we have captured 'this', the 'this' expression refers
- // to the enclosing '*this' object (either by value or reference) which is
- // either copied into the closure object's field that represents the
- // '*this' or refers to '*this'.
- // Update 'Result' to refer to the data member/field of the closure object
- // that represents the '*this' capture.
- if (!HandleLValueMember(Info, E, Result,
- Info.CurrentCall->LambdaThisCaptureField))
- return false;
- // If we captured '*this' by reference, replace the field with its referent.
- if (Info.CurrentCall->LambdaThisCaptureField->getType()
- ->isPointerType()) {
- APValue RVal;
- if (!handleLValueToRValueConversion(Info, E, E->getType(), Result,
- RVal))
+ if (!Info.CurrentCall->LambdaThisCaptureField) {
+ if (IsExplicitLambda && !Info.CurrentCall->This) {
+ DiagnoseInvalidUseOfThis();
return false;
+ }
- Result.setFrom(Info.Ctx, RVal);
+ return true;
}
+
+ const auto *MD = cast<CXXMethodDecl>(Info.CurrentCall->Callee);
+ return HandleLambdaCapture(
+ Info, E, Result, MD, Info.CurrentCall->LambdaThisCaptureField,
+ Info.CurrentCall->LambdaThisCaptureField->getType()->isPointerType());
}
return true;
}
@@ -9081,6 +9168,11 @@ public:
return true;
}
+ bool VisitEmbedExpr(const EmbedExpr *E) {
+ llvm::report_fatal_error("Not yet implemented for ExprConstant.cpp");
+ return true;
+ }
+
bool VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E) {
std::string ResultStr = E->ComputeName(Info.Ctx);
@@ -9177,9 +9269,10 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
bool HasValidResult = !Result.InvalidBase && !Result.Designator.Invalid &&
!Result.IsNullPtr;
bool VoidPtrCastMaybeOK =
- HasValidResult &&
- Info.Ctx.hasSameUnqualifiedType(Result.Designator.getType(Info.Ctx),
- E->getType()->getPointeeType());
+ Result.IsNullPtr ||
+ (HasValidResult &&
+ Info.Ctx.hasSimilarType(Result.Designator.getType(Info.Ctx),
+ E->getType()->getPointeeType()));
// 1. We'll allow it in std::allocator::allocate, and anything which that
// calls.
// 2. HACK 2022-03-28: Work around an issue with libstdc++'s
@@ -9193,7 +9286,8 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
Info.getLangOpts().CPlusPlus26)) {
// Permitted.
} else {
- if (SubExpr->getType()->isVoidPointerType()) {
+ if (SubExpr->getType()->isVoidPointerType() &&
+ Info.getLangOpts().CPlusPlus) {
if (HasValidResult)
CCEDiag(E, diag::note_constexpr_invalid_void_star_cast)
<< SubExpr->getType() << Info.getLangOpts().CPlusPlus26
@@ -9259,6 +9353,13 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
Result.IsNullPtr = false;
return true;
} else {
+ // In rare instances, the value isn't an lvalue.
+ // For example, when the value is the difference between the addresses of
+ // two labels. We reject that as a constant expression because we can't
+ // compute a valid offset to convert into a pointer.
+ if (!Value.isLValue())
+ return false;
+
// Cast is of an lvalue, no need to change value.
Result.setFrom(Info.Ctx, Value);
return true;
@@ -9890,7 +9991,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
assert(CAT && "unexpected type for array initializer");
unsigned Bits =
- std::max(CAT->getSize().getBitWidth(), ArrayBound.getBitWidth());
+ std::max(CAT->getSizeBitWidth(), ArrayBound.getBitWidth());
llvm::APInt InitBound = CAT->getSize().zext(Bits);
llvm::APInt AllocBound = ArrayBound.zext(Bits);
if (InitBound.ugt(AllocBound)) {
@@ -10159,7 +10260,7 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
for (const auto *I : RD->fields()) {
// -- if T is a reference type, no initialization is performed.
- if (I->isUnnamedBitfield() || I->getType()->isReferenceType())
+ if (I->isUnnamedBitField() || I->getType()->isReferenceType())
continue;
LValue Subobject = This;
@@ -10182,7 +10283,7 @@ bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) {
// C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
// object's first non-static named data member is zero-initialized
RecordDecl::field_iterator I = RD->field_begin();
- while (I != RD->field_end() && (*I)->isUnnamedBitfield())
+ while (I != RD->field_end() && (*I)->isUnnamedBitField())
++I;
if (I == RD->field_end()) {
Result = APValue((const FieldDecl*)nullptr);
@@ -10329,7 +10430,7 @@ bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr(
for (const auto *Field : RD->fields()) {
// Anonymous bit-fields are not considered members of the class for
// purposes of aggregate initialization.
- if (Field->isUnnamedBitfield())
+ if (Field->isUnnamedBitField())
continue;
LValue Subobject = This;
@@ -10349,7 +10450,7 @@ bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr(
if (Field->getType()->isIncompleteArrayType()) {
if (auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType())) {
- if (!CAT->getSize().isZero()) {
+ if (!CAT->isZeroSize()) {
// Bail out for now. This might sort of "work", but the rest of the
// code isn't really prepared to handle it.
Info.FFDiag(Init, diag::note_constexpr_unsupported_flexible_array);
@@ -10462,48 +10563,37 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
// Get a pointer to the first element of the array.
Array.addArray(Info, E, ArrayType);
- auto InvalidType = [&] {
- Info.FFDiag(E, diag::note_constexpr_unsupported_layout)
- << E->getType();
- return false;
- };
-
- // FIXME: Perform the checks on the field types in SemaInit.
- RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
- RecordDecl::field_iterator Field = Record->field_begin();
- if (Field == Record->field_end())
- return InvalidType();
-
- // Start pointer.
- if (!Field->getType()->isPointerType() ||
- !Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
- ArrayType->getElementType()))
- return InvalidType();
-
// FIXME: What if the initializer_list type has base classes, etc?
Result = APValue(APValue::UninitStruct(), 0, 2);
Array.moveInto(Result.getStructField(0));
- if (++Field == Record->field_end())
- return InvalidType();
-
- if (Field->getType()->isPointerType() &&
- Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
- ArrayType->getElementType())) {
+ RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator Field = Record->field_begin();
+ assert(Field != Record->field_end() &&
+ Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType()) &&
+ "Expected std::initializer_list first field to be const E *");
+ ++Field;
+ assert(Field != Record->field_end() &&
+ "Expected std::initializer_list to have two fields");
+
+ if (Info.Ctx.hasSameType(Field->getType(), Info.Ctx.getSizeType())) {
+ // Length.
+ Result.getStructField(1) = APValue(APSInt(ArrayType->getSize()));
+ } else {
// End pointer.
+ assert(Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType()) &&
+ "Expected std::initializer_list second field to be const E *");
if (!HandleLValueArrayAdjustment(Info, E, Array,
ArrayType->getElementType(),
- ArrayType->getSize().getZExtValue()))
+ ArrayType->getZExtSize()))
return false;
Array.moveInto(Result.getStructField(1));
- } else if (Info.Ctx.hasSameType(Field->getType(), Info.Ctx.getSizeType()))
- // Length.
- Result.getStructField(1) = APValue(APSInt(ArrayType->getSize()));
- else
- return InvalidType();
+ }
- if (++Field != Record->field_end())
- return InvalidType();
+ assert(++Field == Record->field_end() &&
+ "Expected std::initializer_list to only have two fields");
return true;
}
@@ -10648,8 +10738,11 @@ namespace {
bool VisitUnaryImag(const UnaryOperator *E);
bool VisitBinaryOperator(const BinaryOperator *E);
bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitConvertVectorExpr(const ConvertVectorExpr *E);
+ bool VisitShuffleVectorExpr(const ShuffleVectorExpr *E);
+
// FIXME: Missing: conditional operator (for GNU
- // conditional select), shufflevector, ExtVectorElementExpr
+ // conditional select), ExtVectorElementExpr
};
} // end anonymous namespace
@@ -10900,6 +10993,122 @@ bool VectorExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+static bool handleVectorElementCast(EvalInfo &Info, const FPOptions FPO,
+ const Expr *E, QualType SourceTy,
+ QualType DestTy, APValue const &Original,
+ APValue &Result) {
+ if (SourceTy->isIntegerType()) {
+ if (DestTy->isRealFloatingType()) {
+ Result = APValue(APFloat(0.0));
+ return HandleIntToFloatCast(Info, E, FPO, SourceTy, Original.getInt(),
+ DestTy, Result.getFloat());
+ }
+ if (DestTy->isIntegerType()) {
+ Result = APValue(
+ HandleIntToIntCast(Info, E, DestTy, SourceTy, Original.getInt()));
+ return true;
+ }
+ } else if (SourceTy->isRealFloatingType()) {
+ if (DestTy->isRealFloatingType()) {
+ Result = Original;
+ return HandleFloatToFloatCast(Info, E, SourceTy, DestTy,
+ Result.getFloat());
+ }
+ if (DestTy->isIntegerType()) {
+ Result = APValue(APSInt());
+ return HandleFloatToIntCast(Info, E, SourceTy, Original.getFloat(),
+ DestTy, Result.getInt());
+ }
+ }
+
+ Info.FFDiag(E, diag::err_convertvector_constexpr_unsupported_vector_cast)
+ << SourceTy << DestTy;
+ return false;
+}
+
+bool VectorExprEvaluator::VisitConvertVectorExpr(const ConvertVectorExpr *E) {
+ APValue Source;
+ QualType SourceVecType = E->getSrcExpr()->getType();
+ if (!EvaluateAsRValue(Info, E->getSrcExpr(), Source))
+ return false;
+
+ QualType DestTy = E->getType()->castAs<VectorType>()->getElementType();
+ QualType SourceTy = SourceVecType->castAs<VectorType>()->getElementType();
+
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+
+ auto SourceLen = Source.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(SourceLen);
+ for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
+ APValue Elt;
+ if (!handleVectorElementCast(Info, FPO, E, SourceTy, DestTy,
+ Source.getVectorElt(EltNum), Elt))
+ return false;
+ ResultElements.push_back(std::move(Elt));
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+}
+
+static bool handleVectorShuffle(EvalInfo &Info, const ShuffleVectorExpr *E,
+ QualType ElemType, APValue const &VecVal1,
+ APValue const &VecVal2, unsigned EltNum,
+ APValue &Result) {
+ unsigned const TotalElementsInInputVector1 = VecVal1.getVectorLength();
+ unsigned const TotalElementsInInputVector2 = VecVal2.getVectorLength();
+
+ APSInt IndexVal = E->getShuffleMaskIdx(Info.Ctx, EltNum);
+ int64_t index = IndexVal.getExtValue();
+ // The spec says that -1 should be treated as undef for optimizations,
+ // but in constexpr we'd have to produce an APValue::Indeterminate,
+ // which is prohibited from being a top-level constant value. Emit a
+ // diagnostic instead.
+ if (index == -1) {
+ Info.FFDiag(
+ E, diag::err_shufflevector_minus_one_is_undefined_behavior_constexpr)
+ << EltNum;
+ return false;
+ }
+
+ if (index < 0 ||
+ index >= TotalElementsInInputVector1 + TotalElementsInInputVector2)
+ llvm_unreachable("Out of bounds shuffle index");
+
+ if (index >= TotalElementsInInputVector1)
+ Result = VecVal2.getVectorElt(index - TotalElementsInInputVector1);
+ else
+ Result = VecVal1.getVectorElt(index);
+ return true;
+}
+
+bool VectorExprEvaluator::VisitShuffleVectorExpr(const ShuffleVectorExpr *E) {
+ APValue VecVal1;
+ const Expr *Vec1 = E->getExpr(0);
+ if (!EvaluateAsRValue(Info, Vec1, VecVal1))
+ return false;
+ APValue VecVal2;
+ const Expr *Vec2 = E->getExpr(1);
+ if (!EvaluateAsRValue(Info, Vec2, VecVal2))
+ return false;
+
+ VectorType const *DestVecTy = E->getType()->castAs<VectorType>();
+ QualType DestElTy = DestVecTy->getElementType();
+
+ auto TotalElementsInOutputVector = DestVecTy->getNumElements();
+
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(TotalElementsInOutputVector);
+ for (unsigned EltNum = 0; EltNum < TotalElementsInOutputVector; ++EltNum) {
+ APValue Elt;
+ if (!handleVectorShuffle(Info, E, DestElTy, VecVal1, VecVal2, EltNum, Elt))
+ return false;
+ ResultElements.push_back(std::move(Elt));
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+}
+
//===----------------------------------------------------------------------===//
// Array Evaluation
//===----------------------------------------------------------------------===//
@@ -10935,8 +11144,7 @@ namespace {
return Error(E);
}
- Result = APValue(APValue::UninitArray(), 0,
- CAT->getSize().getZExtValue());
+ Result = APValue(APValue::UninitArray(), 0, CAT->getZExtSize());
if (!Result.hasArrayFiller())
return true;
@@ -11061,12 +11269,21 @@ bool ArrayExprEvaluator::VisitCXXParenListOrInitListExpr(
Filler = Result.getArrayFiller();
unsigned NumEltsToInit = Args.size();
- unsigned NumElts = CAT->getSize().getZExtValue();
+ unsigned NumElts = CAT->getZExtSize();
// If the initializer might depend on the array index, run it for each
// array element.
- if (NumEltsToInit != NumElts && MaybeElementDependentArrayFiller(ArrayFiller))
+ if (NumEltsToInit != NumElts &&
+ MaybeElementDependentArrayFiller(ArrayFiller)) {
NumEltsToInit = NumElts;
+ } else {
+ for (auto *Init : Args) {
+ if (auto *EmbedS = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts()))
+ NumEltsToInit += EmbedS->getDataElementCount() - 1;
+ }
+ if (NumEltsToInit > NumElts)
+ NumEltsToInit = NumElts;
+ }
LLVM_DEBUG(llvm::dbgs() << "The number of elements to initialize: "
<< NumEltsToInit << ".\n");
@@ -11084,16 +11301,49 @@ bool ArrayExprEvaluator::VisitCXXParenListOrInitListExpr(
LValue Subobject = This;
Subobject.addArray(Info, ExprToVisit, CAT);
- for (unsigned Index = 0; Index != NumEltsToInit; ++Index) {
- const Expr *Init = Index < Args.size() ? Args[Index] : ArrayFiller;
- if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
- Info, Subobject, Init) ||
+ auto Eval = [&](const Expr *Init, unsigned ArrayIndex) {
+ if (!EvaluateInPlace(Result.getArrayInitializedElt(ArrayIndex), Info,
+ Subobject, Init) ||
!HandleLValueArrayAdjustment(Info, Init, Subobject,
CAT->getElementType(), 1)) {
if (!Info.noteFailure())
return false;
Success = false;
}
+ return true;
+ };
+ unsigned ArrayIndex = 0;
+ QualType DestTy = CAT->getElementType();
+ APSInt Value(Info.Ctx.getTypeSize(DestTy), DestTy->isUnsignedIntegerType());
+ for (unsigned Index = 0; Index != NumEltsToInit; ++Index) {
+ const Expr *Init = Index < Args.size() ? Args[Index] : ArrayFiller;
+ if (ArrayIndex >= NumEltsToInit)
+ break;
+ if (auto *EmbedS = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
+ StringLiteral *SL = EmbedS->getDataStringLiteral();
+ for (unsigned I = EmbedS->getStartingElementPos(),
+ N = EmbedS->getDataElementCount();
+ I != EmbedS->getStartingElementPos() + N; ++I) {
+ Value = SL->getCodeUnit(I);
+ if (DestTy->isIntegerType()) {
+ Result.getArrayInitializedElt(ArrayIndex) = APValue(Value);
+ } else {
+ assert(DestTy->isFloatingType() && "unexpected type");
+ const FPOptions FPO =
+ Init->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
+ APFloat FValue(0.0);
+ if (!HandleIntToFloatCast(Info, Init, FPO, EmbedS->getType(), Value,
+ DestTy, FValue))
+ return false;
+ Result.getArrayInitializedElt(ArrayIndex) = APValue(FValue);
+ }
+ ArrayIndex++;
+ }
+ } else {
+ if (!Eval(Init, ArrayIndex))
+ return false;
+ ++ArrayIndex;
+ }
}
if (!Result.hasArrayFiller())
@@ -11119,7 +11369,7 @@ bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
auto *CAT = cast<ConstantArrayType>(E->getType()->castAsArrayTypeUnsafe());
- uint64_t Elements = CAT->getSize().getZExtValue();
+ uint64_t Elements = CAT->getZExtSize();
Result = APValue(APValue::UninitArray(), Elements, Elements);
LValue Subobject = This;
@@ -11164,7 +11414,7 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
bool HadZeroInit = Value->hasValue();
if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) {
- unsigned FinalSize = CAT->getSize().getZExtValue();
+ unsigned FinalSize = CAT->getZExtSize();
// Preserve the array filler if we had prior zero-initialization.
APValue Filler =
@@ -11236,7 +11486,7 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
bool ArrayExprEvaluator::VisitCXXParenListInitExpr(
const CXXParenListInitExpr *E) {
- assert(dyn_cast<ConstantArrayType>(E->getType()) &&
+ assert(E->getType()->isConstantArrayType() &&
"Expression result is not a constant array type");
return VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
@@ -11423,6 +11673,10 @@ class FixedPointExprEvaluator
return true;
}
+ bool ZeroInitialization(const Expr *E) {
+ return Success(0, E);
+ }
+
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
@@ -11619,6 +11873,8 @@ GCCTypeClass EvaluateBuiltinClassifyType(QualType T,
#include "clang/Basic/RISCVVTypes.def"
#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
return GCCTypeClass::None;
case BuiltinType::Dependent:
@@ -11635,6 +11891,7 @@ GCCTypeClass EvaluateBuiltinClassifyType(QualType T,
case Type::IncompleteArray:
case Type::FunctionNoProto:
case Type::FunctionProto:
+ case Type::ArrayParameter:
return GCCTypeClass::Pointer;
case Type::MemberPointer:
@@ -11794,8 +12051,8 @@ static QualType getObjectType(APValue::LValueBase B) {
static const Expr *ignorePointerCastsAndParens(const Expr *E) {
assert(E->isPRValue() && E->getType()->hasPointerRepresentation());
- auto *NoParens = E->IgnoreParens();
- auto *Cast = dyn_cast<CastExpr>(NoParens);
+ const Expr *NoParens = E->IgnoreParens();
+ const auto *Cast = dyn_cast<CastExpr>(NoParens);
if (Cast == nullptr)
return NoParens;
@@ -11806,7 +12063,7 @@ static const Expr *ignorePointerCastsAndParens(const Expr *E) {
CastKind != CK_AddressSpaceConversion)
return NoParens;
- auto *SubExpr = Cast->getSubExpr();
+ const auto *SubExpr = Cast->getSubExpr();
if (!SubExpr->getType()->hasPointerRepresentation() || !SubExpr->isPRValue())
return NoParens;
return ignorePointerCastsAndParens(SubExpr);
@@ -11875,7 +12132,7 @@ static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
return true;
const auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType));
uint64_t Index = Entry.getAsArrayIndex();
- if (Index + 1 != CAT->getSize())
+ if (Index + 1 != CAT->getZExtSize())
return false;
BaseType = CAT->getElementType();
} else if (BaseType->isAnyComplexType()) {
@@ -12289,6 +12546,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll:
case Builtin::BI__builtin_clzs:
+ case Builtin::BI__builtin_clzg:
case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
case Builtin::BI__lzcnt:
case Builtin::BI__lzcnt64: {
@@ -12296,14 +12554,28 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- // When the argument is 0, the result of GCC builtins is undefined, whereas
- // for Microsoft intrinsics, the result is the bit-width of the argument.
- bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
- BuiltinOp != Builtin::BI__lzcnt &&
- BuiltinOp != Builtin::BI__lzcnt64;
+ std::optional<APSInt> Fallback;
+ if (BuiltinOp == Builtin::BI__builtin_clzg && E->getNumArgs() > 1) {
+ APSInt FallbackTemp;
+ if (!EvaluateInteger(E->getArg(1), FallbackTemp, Info))
+ return false;
+ Fallback = FallbackTemp;
+ }
+
+ if (!Val) {
+ if (Fallback)
+ return Success(*Fallback, E);
- if (ZeroIsUndefined && !Val)
- return Error(E);
+ // When the argument is 0, the result of GCC builtins is undefined,
+ // whereas for Microsoft intrinsics, the result is the bit-width of the
+ // argument.
+ bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
+ BuiltinOp != Builtin::BI__lzcnt &&
+ BuiltinOp != Builtin::BI__lzcnt64;
+
+ if (ZeroIsUndefined)
+ return Error(E);
+ }
return Success(Val.countl_zero(), E);
}
@@ -12345,12 +12617,26 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll:
- case Builtin::BI__builtin_ctzs: {
+ case Builtin::BI__builtin_ctzs:
+ case Builtin::BI__builtin_ctzg: {
APSInt Val;
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- if (!Val)
+
+ std::optional<APSInt> Fallback;
+ if (BuiltinOp == Builtin::BI__builtin_ctzg && E->getNumArgs() > 1) {
+ APSInt FallbackTemp;
+ if (!EvaluateInteger(E->getArg(1), FallbackTemp, Info))
+ return false;
+ Fallback = FallbackTemp;
+ }
+
+ if (!Val) {
+ if (Fallback)
+ return Success(*Fallback, E);
+
return Error(E);
+ }
return Success(Val.countr_zero(), E);
}
@@ -12365,6 +12651,13 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_expect_with_probability:
return Visit(E->getArg(0));
+ case Builtin::BI__builtin_ptrauth_string_discriminator: {
+ const auto *Literal =
+ cast<StringLiteral>(E->getArg(0)->IgnoreParenImpCasts());
+ uint64_t Result = getPointerAuthStableSipHash(Literal->getString());
+ return Success(Result, E);
+ }
+
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
case Builtin::BI__builtin_ffsll: {
@@ -12461,6 +12754,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll:
+ case Builtin::BI__builtin_popcountg:
case Builtin::BI__popcnt16: // Microsoft variants of popcount
case Builtin::BI__popcnt:
case Builtin::BI__popcnt64: {
@@ -12663,19 +12957,35 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) {
if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
- Size == CharUnits::One() ||
- E->getArg(1)->isNullPointerConstant(Info.Ctx,
- Expr::NPC_NeverValueDependent))
- // OK, we will inline appropriately-aligned operations of this size,
- // and _Atomic(T) is appropriately-aligned.
+ Size == CharUnits::One())
return Success(1, E);
- QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()->
- castAs<PointerType>()->getPointeeType();
- if (!PointeeType->isIncompleteType() &&
- Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) {
- // OK, we will inline operations on this object.
+ // If the pointer argument can be evaluated to a compile-time constant
+ // integer (or nullptr), check if that value is appropriately aligned.
+ const Expr *PtrArg = E->getArg(1);
+ Expr::EvalResult ExprResult;
+ APSInt IntResult;
+ if (PtrArg->EvaluateAsRValue(ExprResult, Info.Ctx) &&
+ ExprResult.Val.toIntegralConstant(IntResult, PtrArg->getType(),
+ Info.Ctx) &&
+ IntResult.isAligned(Size.getAsAlign()))
return Success(1, E);
+
+ // Otherwise, check if the type's alignment against Size.
+ if (auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
+ // Drop the potential implicit-cast to 'const volatile void*', getting
+ // the underlying type.
+ if (ICE->getCastKind() == CK_BitCast)
+ PtrArg = ICE->getSubExpr();
+ }
+
+ if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
+ QualType PointeeType = PtrTy->getPointeeType();
+ if (!PointeeType->isIncompleteType() &&
+ Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) {
+ // OK, we will inline operations on this object.
+ return Success(1, E);
+ }
}
}
}
@@ -12683,6 +12993,59 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
Success(0, E) : Error(E);
}
+ case Builtin::BI__builtin_addcb:
+ case Builtin::BI__builtin_addcs:
+ case Builtin::BI__builtin_addc:
+ case Builtin::BI__builtin_addcl:
+ case Builtin::BI__builtin_addcll:
+ case Builtin::BI__builtin_subcb:
+ case Builtin::BI__builtin_subcs:
+ case Builtin::BI__builtin_subc:
+ case Builtin::BI__builtin_subcl:
+ case Builtin::BI__builtin_subcll: {
+ LValue CarryOutLValue;
+ APSInt LHS, RHS, CarryIn, CarryOut, Result;
+ QualType ResultType = E->getArg(0)->getType();
+ if (!EvaluateInteger(E->getArg(0), LHS, Info) ||
+ !EvaluateInteger(E->getArg(1), RHS, Info) ||
+ !EvaluateInteger(E->getArg(2), CarryIn, Info) ||
+ !EvaluatePointer(E->getArg(3), CarryOutLValue, Info))
+ return false;
+ // Copy the number of bits and sign.
+ Result = LHS;
+ CarryOut = LHS;
+
+ bool FirstOverflowed = false;
+ bool SecondOverflowed = false;
+ switch (BuiltinOp) {
+ default:
+ llvm_unreachable("Invalid value for BuiltinOp");
+ case Builtin::BI__builtin_addcb:
+ case Builtin::BI__builtin_addcs:
+ case Builtin::BI__builtin_addc:
+ case Builtin::BI__builtin_addcl:
+ case Builtin::BI__builtin_addcll:
+ Result =
+ LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
+ break;
+ case Builtin::BI__builtin_subcb:
+ case Builtin::BI__builtin_subcs:
+ case Builtin::BI__builtin_subc:
+ case Builtin::BI__builtin_subcl:
+ case Builtin::BI__builtin_subcll:
+ Result =
+ LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
+ break;
+ }
+
+ // It is possible for both overflows to happen but CGBuiltin uses an OR so
+ // this is consistent.
+ CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
+ APValue APV{CarryOut};
+ if (!handleAssignment(Info, E, CarryOutLValue, ResultType, APV))
+ return false;
+ return Success(Result, E);
+ }
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow:
@@ -12824,6 +13187,10 @@ static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx,
if (Ty->isIncompleteType())
return true;
+ // Can't be past the end of an invalid object.
+ if (LV.getLValueDesignator().Invalid)
+ return false;
+
// We're a past-the-end pointer if we point to the byte after the object,
// no matter what our type or path is.
auto Size = Ctx.getTypeSizeInChars(Ty);
@@ -13695,6 +14062,12 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
E);
}
+ case UETT_PtrAuthTypeDiscriminator: {
+ if (E->getArgumentType()->isDependentType())
+ return false;
+ return Success(
+ Info.Ctx.getPointerAuthTypeDiscriminator(E->getArgumentType()), E);
+ }
case UETT_VecStep: {
QualType Ty = E->getTypeOfArgument();
@@ -13738,8 +14111,8 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
QualType Ty = E->getTypeOfArgument();
// If the vector has a fixed size, we can determine the number of elements
// at compile time.
- if (Ty->isVectorType())
- return Success(Ty->castAs<VectorType>()->getNumElements(), E);
+ if (const auto *VT = Ty->getAs<VectorType>())
+ return Success(VT->getNumElements(), E);
assert(Ty->isSizelessVectorType());
if (Info.InConstantContext)
@@ -13844,7 +14217,9 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
if (Info.checkingForUndefinedBehavior())
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
diag::warn_integer_constant_overflow)
- << toString(Value, 10) << E->getType() << E->getSourceRange();
+ << toString(Value, 10, Value.isSigned(), /*formatAsCLiteral=*/false,
+ /*UpperCase=*/true, /*InsertSeparators=*/true)
+ << E->getType() << E->getSourceRange();
if (!HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
E->getType()))
@@ -13914,6 +14289,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_FixedPointCast:
case CK_IntegralToFixedPoint:
case CK_MatrixCast:
+ case CK_HLSLVectorTruncation:
llvm_unreachable("invalid cast kind for integral value");
case CK_BitCast:
@@ -13931,6 +14307,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_AtomicToNonAtomic:
case CK_NoOp:
case CK_LValueToRValueBitCast:
+ case CK_HLSLArrayRValue:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_MemberPointerToBoolean:
@@ -14752,12 +15129,14 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_FixedPointToIntegral:
case CK_IntegralToFixedPoint:
case CK_MatrixCast:
+ case CK_HLSLVectorTruncation:
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
case CK_AtomicToNonAtomic:
case CK_NoOp:
case CK_LValueToRValueBitCast:
+ case CK_HLSLArrayRValue:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_Dependent:
@@ -14844,6 +15223,104 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
llvm_unreachable("unknown cast resulting in complex value");
}
+void HandleComplexComplexMul(APFloat A, APFloat B, APFloat C, APFloat D,
+ APFloat &ResR, APFloat &ResI) {
+ // This is an implementation of complex multiplication according to the
+ // constraints laid out in C11 Annex G. The implementation uses the
+ // following naming scheme:
+ // (a + ib) * (c + id)
+
+ APFloat AC = A * C;
+ APFloat BD = B * D;
+ APFloat AD = A * D;
+ APFloat BC = B * C;
+ ResR = AC - BD;
+ ResI = AD + BC;
+ if (ResR.isNaN() && ResI.isNaN()) {
+ bool Recalc = false;
+ if (A.isInfinity() || B.isInfinity()) {
+ A = APFloat::copySign(APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0),
+ A);
+ B = APFloat::copySign(APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0),
+ B);
+ if (C.isNaN())
+ C = APFloat::copySign(APFloat(C.getSemantics()), C);
+ if (D.isNaN())
+ D = APFloat::copySign(APFloat(D.getSemantics()), D);
+ Recalc = true;
+ }
+ if (C.isInfinity() || D.isInfinity()) {
+ C = APFloat::copySign(APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0),
+ C);
+ D = APFloat::copySign(APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0),
+ D);
+ if (A.isNaN())
+ A = APFloat::copySign(APFloat(A.getSemantics()), A);
+ if (B.isNaN())
+ B = APFloat::copySign(APFloat(B.getSemantics()), B);
+ Recalc = true;
+ }
+ if (!Recalc && (AC.isInfinity() || BD.isInfinity() || AD.isInfinity() ||
+ BC.isInfinity())) {
+ if (A.isNaN())
+ A = APFloat::copySign(APFloat(A.getSemantics()), A);
+ if (B.isNaN())
+ B = APFloat::copySign(APFloat(B.getSemantics()), B);
+ if (C.isNaN())
+ C = APFloat::copySign(APFloat(C.getSemantics()), C);
+ if (D.isNaN())
+ D = APFloat::copySign(APFloat(D.getSemantics()), D);
+ Recalc = true;
+ }
+ if (Recalc) {
+ ResR = APFloat::getInf(A.getSemantics()) * (A * C - B * D);
+ ResI = APFloat::getInf(A.getSemantics()) * (A * D + B * C);
+ }
+ }
+}
+
+void HandleComplexComplexDiv(APFloat A, APFloat B, APFloat C, APFloat D,
+ APFloat &ResR, APFloat &ResI) {
+ // This is an implementation of complex division according to the
+ // constraints laid out in C11 Annex G. The implementation uses the
+ // following naming scheme:
+ // (a + ib) / (c + id)
+
+ int DenomLogB = 0;
+ APFloat MaxCD = maxnum(abs(C), abs(D));
+ if (MaxCD.isFinite()) {
+ DenomLogB = ilogb(MaxCD);
+ C = scalbn(C, -DenomLogB, APFloat::rmNearestTiesToEven);
+ D = scalbn(D, -DenomLogB, APFloat::rmNearestTiesToEven);
+ }
+ APFloat Denom = C * C + D * D;
+ ResR =
+ scalbn((A * C + B * D) / Denom, -DenomLogB, APFloat::rmNearestTiesToEven);
+ ResI =
+ scalbn((B * C - A * D) / Denom, -DenomLogB, APFloat::rmNearestTiesToEven);
+ if (ResR.isNaN() && ResI.isNaN()) {
+ if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) {
+ ResR = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * A;
+ ResI = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * B;
+ } else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() &&
+ D.isFinite()) {
+ A = APFloat::copySign(APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0),
+ A);
+ B = APFloat::copySign(APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0),
+ B);
+ ResR = APFloat::getInf(ResR.getSemantics()) * (A * C + B * D);
+ ResI = APFloat::getInf(ResI.getSemantics()) * (B * C - A * D);
+ } else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) {
+ C = APFloat::copySign(APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0),
+ C);
+ D = APFloat::copySign(APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0),
+ D);
+ ResR = APFloat::getZero(ResR.getSemantics()) * (A * C + B * D);
+ ResI = APFloat::getZero(ResI.getSemantics()) * (B * C - A * D);
+ }
+ }
+}
+
bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
@@ -14927,61 +15404,23 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
APFloat &ResI = Result.getComplexFloatImag();
if (LHSReal) {
assert(!RHSReal && "Cannot have two real operands for a complex op!");
- ResR = A * C;
- ResI = A * D;
+ ResR = A;
+ ResI = A;
+ // ResR = A * C;
+ // ResI = A * D;
+ if (!handleFloatFloatBinOp(Info, E, ResR, BO_Mul, C) ||
+ !handleFloatFloatBinOp(Info, E, ResI, BO_Mul, D))
+ return false;
} else if (RHSReal) {
- ResR = C * A;
- ResI = C * B;
+ // ResR = C * A;
+ // ResI = C * B;
+ ResR = C;
+ ResI = C;
+ if (!handleFloatFloatBinOp(Info, E, ResR, BO_Mul, A) ||
+ !handleFloatFloatBinOp(Info, E, ResI, BO_Mul, B))
+ return false;
} else {
- // In the fully general case, we need to handle NaNs and infinities
- // robustly.
- APFloat AC = A * C;
- APFloat BD = B * D;
- APFloat AD = A * D;
- APFloat BC = B * C;
- ResR = AC - BD;
- ResI = AD + BC;
- if (ResR.isNaN() && ResI.isNaN()) {
- bool Recalc = false;
- if (A.isInfinity() || B.isInfinity()) {
- A = APFloat::copySign(
- APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A);
- B = APFloat::copySign(
- APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B);
- if (C.isNaN())
- C = APFloat::copySign(APFloat(C.getSemantics()), C);
- if (D.isNaN())
- D = APFloat::copySign(APFloat(D.getSemantics()), D);
- Recalc = true;
- }
- if (C.isInfinity() || D.isInfinity()) {
- C = APFloat::copySign(
- APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C);
- D = APFloat::copySign(
- APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D);
- if (A.isNaN())
- A = APFloat::copySign(APFloat(A.getSemantics()), A);
- if (B.isNaN())
- B = APFloat::copySign(APFloat(B.getSemantics()), B);
- Recalc = true;
- }
- if (!Recalc && (AC.isInfinity() || BD.isInfinity() ||
- AD.isInfinity() || BC.isInfinity())) {
- if (A.isNaN())
- A = APFloat::copySign(APFloat(A.getSemantics()), A);
- if (B.isNaN())
- B = APFloat::copySign(APFloat(B.getSemantics()), B);
- if (C.isNaN())
- C = APFloat::copySign(APFloat(C.getSemantics()), C);
- if (D.isNaN())
- D = APFloat::copySign(APFloat(D.getSemantics()), D);
- Recalc = true;
- }
- if (Recalc) {
- ResR = APFloat::getInf(A.getSemantics()) * (A * C - B * D);
- ResI = APFloat::getInf(A.getSemantics()) * (A * D + B * C);
- }
- }
+ HandleComplexComplexMul(A, B, C, D, ResR, ResI);
}
} else {
ComplexValue LHS = Result;
@@ -15007,46 +15446,19 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
APFloat &ResR = Result.getComplexFloatReal();
APFloat &ResI = Result.getComplexFloatImag();
if (RHSReal) {
- ResR = A / C;
- ResI = B / C;
+ ResR = A;
+ ResI = B;
+ // ResR = A / C;
+ // ResI = B / C;
+ if (!handleFloatFloatBinOp(Info, E, ResR, BO_Div, C) ||
+ !handleFloatFloatBinOp(Info, E, ResI, BO_Div, C))
+ return false;
} else {
if (LHSReal) {
// No real optimizations we can do here, stub out with zero.
B = APFloat::getZero(A.getSemantics());
}
- int DenomLogB = 0;
- APFloat MaxCD = maxnum(abs(C), abs(D));
- if (MaxCD.isFinite()) {
- DenomLogB = ilogb(MaxCD);
- C = scalbn(C, -DenomLogB, APFloat::rmNearestTiesToEven);
- D = scalbn(D, -DenomLogB, APFloat::rmNearestTiesToEven);
- }
- APFloat Denom = C * C + D * D;
- ResR = scalbn((A * C + B * D) / Denom, -DenomLogB,
- APFloat::rmNearestTiesToEven);
- ResI = scalbn((B * C - A * D) / Denom, -DenomLogB,
- APFloat::rmNearestTiesToEven);
- if (ResR.isNaN() && ResI.isNaN()) {
- if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) {
- ResR = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * A;
- ResI = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * B;
- } else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() &&
- D.isFinite()) {
- A = APFloat::copySign(
- APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A);
- B = APFloat::copySign(
- APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B);
- ResR = APFloat::getInf(ResR.getSemantics()) * (A * C + B * D);
- ResI = APFloat::getInf(ResI.getSemantics()) * (B * C - A * D);
- } else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) {
- C = APFloat::copySign(
- APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C);
- D = APFloat::copySign(
- APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D);
- ResR = APFloat::getZero(ResR.getSemantics()) * (A * C + B * D);
- ResI = APFloat::getZero(ResI.getSemantics()) * (B * C - A * D);
- }
- }
+ HandleComplexComplexDiv(A, B, C, D, ResR, ResI);
}
} else {
if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0)
@@ -15484,9 +15896,12 @@ static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result,
if (const auto *CE = dyn_cast<ConstantExpr>(Exp)) {
if (CE->hasAPValueResult()) {
- Result.Val = CE->getAPValueResult();
- IsConst = true;
- return true;
+ APValue APV = CE->getAPValueResult();
+ if (!APV.isLValue()) {
+ Result.Val = std::move(APV);
+ IsConst = true;
+ return true;
+ }
}
// The SubExpr is usually just an IntegerLiteral.
@@ -15754,7 +16169,8 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
EStatus.Diag = &Notes;
EvalInfo Info(Ctx, EStatus,
- (IsConstantInitialization && Ctx.getLangOpts().CPlusPlus)
+ (IsConstantInitialization &&
+ (Ctx.getLangOpts().CPlusPlus || Ctx.getLangOpts().C23))
? EvalInfo::EM_ConstantExpression
: EvalInfo::EM_ConstantFold);
Info.setEvaluatingDecl(VD, Value);
@@ -15971,7 +16387,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::StringLiteralClass:
case Expr::ArraySubscriptExprClass:
case Expr::MatrixSubscriptExprClass:
- case Expr::OMPArraySectionExprClass:
+ case Expr::ArraySectionExprClass:
case Expr::OMPArrayShapingExprClass:
case Expr::OMPIteratorExprClass:
case Expr::MemberExprClass:
@@ -16065,8 +16481,12 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::SizeOfPackExprClass:
case Expr::GNUNullExprClass:
case Expr::SourceLocExprClass:
+ case Expr::EmbedExprClass:
return NoDiag();
+ case Expr::PackIndexingExprClass:
+ return CheckICE(cast<PackIndexingExpr>(E)->getSelectedExpr(), Ctx);
+
case Expr::SubstNonTypeTemplateParmExprClass:
return
CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx);
@@ -16630,7 +17050,7 @@ bool Expr::tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx,
}
static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
- EvalInfo &Info) {
+ EvalInfo &Info, std::string *StringResult) {
if (!E->getType()->hasPointerRepresentation() || !E->isPRValue())
return false;
@@ -16657,6 +17077,8 @@ static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
Str = Str.substr(0, Pos);
Result = Str.size();
+ if (StringResult)
+ *StringResult = Str;
return true;
}
@@ -16672,12 +17094,24 @@ static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
if (!Char.getInt()) {
Result = Strlen;
return true;
- }
+ } else if (StringResult)
+ StringResult->push_back(Char.getInt().getExtValue());
if (!HandleLValueArrayAdjustment(Info, E, String, CharTy, 1))
return false;
}
}
+std::optional<std::string> Expr::tryEvaluateString(ASTContext &Ctx) const {
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
+ uint64_t Result;
+ std::string StringResult;
+
+ if (EvaluateBuiltinStrLen(this, Result, Info, &StringResult))
+ return StringResult;
+ return {};
+}
+
bool Expr::EvaluateCharRangeAsString(std::string &Result,
const Expr *SizeExpression,
const Expr *PtrExpression, ASTContext &Ctx,
@@ -16691,13 +17125,13 @@ bool Expr::EvaluateCharRangeAsString(std::string &Result,
if (!::EvaluateInteger(SizeExpression, SizeValue, Info))
return false;
- int64_t Size = SizeValue.getExtValue();
+ uint64_t Size = SizeValue.getZExtValue();
if (!::EvaluatePointer(PtrExpression, String, Info))
return false;
QualType CharTy = PtrExpression->getType()->getPointeeType();
- for (int64_t I = 0; I < Size; ++I) {
+ for (uint64_t I = 0; I < Size; ++I) {
APValue Char;
if (!handleLValueToRValueConversion(Info, PtrExpression, CharTy, String,
Char))
diff --git a/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp b/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp
index 090ef02aa422..a5b6f80bde69 100644
--- a/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp
@@ -15,10 +15,10 @@
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/Basic/ASTSourceDescriptor.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
-#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstdint>
@@ -68,9 +68,7 @@ bool ExternalASTSource::layoutRecordType(
return false;
}
-Decl *ExternalASTSource::GetExternalDecl(uint32_t ID) {
- return nullptr;
-}
+Decl *ExternalASTSource::GetExternalDecl(GlobalDeclID ID) { return nullptr; }
Selector ExternalASTSource::GetExternalSelector(uint32_t ID) {
return Selector();
diff --git a/contrib/llvm-project/clang/lib/AST/FormatString.cpp b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
index c5d14b4af7ff..da8164bad518 100644
--- a/contrib/llvm-project/clang/lib/AST/FormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
@@ -403,13 +403,17 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
else if (ETy->isUnscopedEnumerationType())
argTy = ETy->getDecl()->getIntegerType();
}
+
+ if (argTy->isSaturatedFixedPointType())
+ argTy = C.getCorrespondingUnsaturatedType(argTy);
+
argTy = C.getCanonicalType(argTy).getUnqualifiedType();
if (T == argTy)
return Match;
if (const auto *BT = argTy->getAs<BuiltinType>()) {
// Check if the only difference between them is signed vs unsigned
- // if true, we consider they are compatible.
+ // if true, return match signedness.
switch (BT->getKind()) {
default:
break;
@@ -419,44 +423,53 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
[[fallthrough]];
case BuiltinType::Char_S:
case BuiltinType::SChar:
+ if (T == C.UnsignedShortTy || T == C.ShortTy)
+ return NoMatchTypeConfusion;
+ if (T == C.UnsignedCharTy)
+ return NoMatchSignedness;
+ if (T == C.SignedCharTy)
+ return Match;
+ break;
case BuiltinType::Char_U:
case BuiltinType::UChar:
if (T == C.UnsignedShortTy || T == C.ShortTy)
return NoMatchTypeConfusion;
- if (T == C.UnsignedCharTy || T == C.SignedCharTy)
+ if (T == C.UnsignedCharTy)
return Match;
+ if (T == C.SignedCharTy)
+ return NoMatchSignedness;
break;
case BuiltinType::Short:
if (T == C.UnsignedShortTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::UShort:
if (T == C.ShortTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::Int:
if (T == C.UnsignedIntTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::UInt:
if (T == C.IntTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::Long:
if (T == C.UnsignedLongTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::ULong:
if (T == C.LongTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::LongLong:
if (T == C.UnsignedLongLongTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::ULongLong:
if (T == C.LongLongTy)
- return Match;
+ return NoMatchSignedness;
break;
}
// "Partially matched" because of promotions?
@@ -761,6 +774,16 @@ const char *ConversionSpecifier::toString() const {
// MS specific specifiers.
case ZArg: return "Z";
+
+ // ISO/IEC TR 18037 (fixed-point) specific specifiers.
+ case rArg:
+ return "r";
+ case RArg:
+ return "R";
+ case kArg:
+ return "k";
+ case KArg:
+ return "K";
}
return nullptr;
}
@@ -825,6 +848,9 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target,
if (LO.OpenCL && CS.isDoubleArg())
return !VectorNumElts.isInvalid();
+ if (CS.isFixedPointArg())
+ return true;
+
if (Target.getTriple().isOSMSVCRT()) {
switch (CS.getKind()) {
case ConversionSpecifier::cArg:
@@ -877,6 +903,9 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target,
return true;
}
+ if (CS.isFixedPointArg())
+ return true;
+
switch (CS.getKind()) {
case ConversionSpecifier::bArg:
case ConversionSpecifier::BArg:
@@ -1043,6 +1072,11 @@ bool FormatSpecifier::hasStandardConversionSpecifier(
case ConversionSpecifier::UArg:
case ConversionSpecifier::ZArg:
return false;
+ case ConversionSpecifier::rArg:
+ case ConversionSpecifier::RArg:
+ case ConversionSpecifier::kArg:
+ case ConversionSpecifier::KArg:
+ return LangOpt.FixedPoint;
}
llvm_unreachable("Invalid ConversionSpecifier Kind!");
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h b/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
index 336f7941dfc4..23f728603676 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
@@ -45,15 +45,10 @@ class Boolean final {
Boolean operator-(const Boolean &Other) const { return Boolean(V - Other.V); }
Boolean operator~() const { return Boolean(true); }
- explicit operator int8_t() const { return V; }
- explicit operator uint8_t() const { return V; }
- explicit operator int16_t() const { return V; }
- explicit operator uint16_t() const { return V; }
- explicit operator int32_t() const { return V; }
- explicit operator uint32_t() const { return V; }
- explicit operator int64_t() const { return V; }
- explicit operator uint64_t() const { return V; }
- explicit operator bool() const { return V; }
+ template <typename Ty, typename = std::enable_if_t<std::is_integral_v<Ty>>>
+ explicit operator Ty() const {
+ return V;
+ }
APSInt toAPSInt() const {
return APSInt(APInt(1, static_cast<uint64_t>(V), false), true);
@@ -61,7 +56,7 @@ class Boolean final {
APSInt toAPSInt(unsigned NumBits) const {
return APSInt(toAPSInt().zextOrTrunc(NumBits), true);
}
- APValue toAPValue() const { return APValue(toAPSInt()); }
+ APValue toAPValue(const ASTContext &) const { return APValue(toAPSInt()); }
Boolean toUnsigned() const { return *this; }
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp
index fd2a92d9d3f9..fee4432a8f66 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp
@@ -7,12 +7,13 @@
//===----------------------------------------------------------------------===//
#include "ByteCodeEmitter.h"
-#include "ByteCodeGenError.h"
#include "Context.h"
#include "Floating.h"
+#include "IntegralAP.h"
#include "Opcode.h"
#include "Program.h"
#include "clang/AST/ASTLambda.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/Basic/Builtins.h"
#include <type_traits>
@@ -20,7 +21,51 @@
using namespace clang;
using namespace clang::interp;
+/// Unevaluated builtins don't get their arguments put on the stack
+/// automatically. They instead operate on the AST of their Call
+/// Expression.
+/// Similar information is available via ASTContext::BuiltinInfo,
+/// but that is not correct for our use cases.
+static bool isUnevaluatedBuiltin(unsigned BuiltinID) {
+ return BuiltinID == Builtin::BI__builtin_classify_type ||
+ BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size;
+}
+
Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) {
+
+ // Manually created functions that haven't been assigned proper
+ // parameters yet.
+ if (!FuncDecl->param_empty() && !FuncDecl->param_begin())
+ return nullptr;
+
+ bool IsLambdaStaticInvoker = false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl);
+ MD && MD->isLambdaStaticInvoker()) {
+ // For a lambda static invoker, we might have to pick a specialized
+ // version if the lambda is generic. In that case, the picked function
+ // will *NOT* be a static invoker anymore. However, it will still
+ // be a non-static member function, this (usually) requiring an
+ // instance pointer. We suppress that later in this function.
+ IsLambdaStaticInvoker = true;
+
+ const CXXRecordDecl *ClosureClass = MD->getParent();
+ assert(ClosureClass->captures_begin() == ClosureClass->captures_end());
+ if (ClosureClass->isGenericLambda()) {
+ const CXXMethodDecl *LambdaCallOp = ClosureClass->getLambdaCallOperator();
+ assert(MD->isFunctionTemplateSpecialization() &&
+ "A generic lambda's static-invoker function must be a "
+ "template specialization");
+ const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
+ FunctionTemplateDecl *CallOpTemplate =
+ LambdaCallOp->getDescribedFunctionTemplate();
+ void *InsertPos = nullptr;
+ const FunctionDecl *CorrespondingCallOpSpecialization =
+ CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos);
+ assert(CorrespondingCallOpSpecialization);
+ FuncDecl = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
+ }
+ }
+
// Set up argument indices.
unsigned ParamOffset = 0;
SmallVector<PrimType, 8> ParamTypes;
@@ -44,15 +89,22 @@ Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) {
// InterpStack when calling the function.
bool HasThisPointer = false;
if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl)) {
- if (MD->isImplicitObjectMemberFunction()) {
- HasThisPointer = true;
- ParamTypes.push_back(PT_Ptr);
- ParamOffsets.push_back(ParamOffset);
- ParamOffset += align(primSize(PT_Ptr));
+ if (!IsLambdaStaticInvoker) {
+ HasThisPointer = MD->isInstance();
+ if (MD->isImplicitObjectMemberFunction()) {
+ ParamTypes.push_back(PT_Ptr);
+ ParamOffsets.push_back(ParamOffset);
+ ParamOffset += align(primSize(PT_Ptr));
+ }
}
// Set up lambda capture to closure record field mapping.
if (isLambdaCallOperator(MD)) {
+ // The parent record needs to be complete, we need to know about all
+ // the lambda captures.
+ if (!MD->getParent()->isCompleteDefinition())
+ return nullptr;
+
const Record *R = P.getOrCreateRecord(MD->getParent());
llvm::DenseMap<const ValueDecl *, FieldDecl *> LC;
FieldDecl *LTC;
@@ -69,8 +121,12 @@ Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) {
this->LambdaCaptures[Cap.first] = {
Offset, Cap.second->getType()->isReferenceType()};
}
- if (LTC)
- this->LambdaThisCapture = R->getField(LTC)->Offset;
+ if (LTC) {
+ QualType CaptureType = R->getField(LTC)->Decl->getType();
+ this->LambdaThisCapture = {R->getField(LTC)->Offset,
+ CaptureType->isReferenceType() ||
+ CaptureType->isPointerType()};
+ }
}
}
@@ -92,7 +148,7 @@ Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) {
if (!Func) {
bool IsUnevaluatedBuiltin = false;
if (unsigned BI = FuncDecl->getBuiltinID())
- IsUnevaluatedBuiltin = Ctx.getASTContext().BuiltinInfo.isUnevaluated(BI);
+ IsUnevaluatedBuiltin = isUnevaluatedBuiltin(BI);
Func =
P.createFunction(FuncDecl, ParamOffset, std::move(ParamTypes),
@@ -103,7 +159,8 @@ Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) {
assert(Func);
// For not-yet-defined functions, we only create a Function instance and
// compile their body later.
- if (!FuncDecl->isDefined()) {
+ if (!FuncDecl->isDefined() ||
+ (FuncDecl->willHaveBody() && !FuncDecl->hasBody())) {
Func->setDefined(false);
return Func;
}
@@ -115,7 +172,8 @@ Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) {
if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl))
IsEligibleForCompilation = MD->isLambdaStaticInvoker();
if (!IsEligibleForCompilation)
- IsEligibleForCompilation = FuncDecl->isConstexpr();
+ IsEligibleForCompilation =
+ FuncDecl->isConstexpr() || FuncDecl->hasAttr<MSConstexprAttr>();
// Compile the function body.
if (!IsEligibleForCompilation || !visitFunc(FuncDecl)) {
@@ -209,9 +267,11 @@ static void emit(Program &P, std::vector<std::byte> &Code, const T &Val,
}
}
-template <>
-void emit(Program &P, std::vector<std::byte> &Code, const Floating &Val,
- bool &Success) {
+/// Emits a serializable value. These usually (potentially) contain
+/// heap-allocated memory and aren't trivially copyable.
+template <typename T>
+static void emitSerialized(std::vector<std::byte> &Code, const T &Val,
+ bool &Success) {
size_t Size = Val.bytesToSerialize();
if (Code.size() + Size > std::numeric_limits<unsigned>::max()) {
@@ -228,6 +288,24 @@ void emit(Program &P, std::vector<std::byte> &Code, const Floating &Val,
Val.serialize(Code.data() + ValPos);
}
+template <>
+void emit(Program &P, std::vector<std::byte> &Code, const Floating &Val,
+ bool &Success) {
+ emitSerialized(Code, Val, Success);
+}
+
+template <>
+void emit(Program &P, std::vector<std::byte> &Code,
+ const IntegralAP<false> &Val, bool &Success) {
+ emitSerialized(Code, Val, Success);
+}
+
+template <>
+void emit(Program &P, std::vector<std::byte> &Code, const IntegralAP<true> &Val,
+ bool &Success) {
+ emitSerialized(Code, Val, Success);
+}
+
template <typename... Tys>
bool ByteCodeEmitter::emitOp(Opcode Op, const Tys &... Args, const SourceInfo &SI) {
bool Success = true;
@@ -238,10 +316,7 @@ bool ByteCodeEmitter::emitOp(Opcode Op, const Tys &... Args, const SourceInfo &S
if (SI)
SrcMap.emplace_back(Code.size(), SI);
- // The initializer list forces the expression to be evaluated
- // for each argument in the variadic template, in order.
- (void)std::initializer_list<int>{(emit(P, Code, Args, Success), 0)...};
-
+ (..., emit(P, Code, Args, Success));
return Success;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h
index 03de286582c9..a19a25c2f9e8 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h
@@ -1,4 +1,4 @@
-//===--- ByteCodeEmitter.h - Instruction emitter for the VM ---------*- C++ -*-===//
+//===--- ByteCodeEmitter.h - Instruction emitter for the VM -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -46,7 +46,7 @@ protected:
/// Methods implemented by the compiler.
virtual bool visitFunc(const FunctionDecl *E) = 0;
virtual bool visitExpr(const Expr *E) = 0;
- virtual bool visitDecl(const VarDecl *E) = 0;
+ virtual bool visitDeclAndReturn(const VarDecl *E, bool ConstantContext) = 0;
/// Emits jumps.
bool jumpTrue(const LabelTy &Label);
@@ -54,6 +54,9 @@ protected:
bool jump(const LabelTy &Label);
bool fallthrough(const LabelTy &Label);
+ /// We're always emitting bytecode.
+ bool isActive() const { return true; }
+
/// Callback for local registration.
Local createLocal(Descriptor *D);
@@ -62,7 +65,7 @@ protected:
/// Lambda captures.
llvm::DenseMap<const ValueDecl *, ParamOffset> LambdaCaptures;
/// Offset of the This parameter in a lambda record.
- unsigned LambdaThisCapture = 0;
+ ParamOffset LambdaThisCapture{0, false};
/// Local descriptors.
llvm::SmallVector<SmallVector<Local, 8>, 2> Descriptors;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp
deleted file mode 100644
index cfcef067b92b..000000000000
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ /dev/null
@@ -1,3088 +0,0 @@
-//===--- ByteCodeExprGen.cpp - Code generator for expressions ---*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "ByteCodeExprGen.h"
-#include "ByteCodeEmitter.h"
-#include "ByteCodeGenError.h"
-#include "ByteCodeStmtGen.h"
-#include "Context.h"
-#include "Floating.h"
-#include "Function.h"
-#include "PrimType.h"
-#include "Program.h"
-
-using namespace clang;
-using namespace clang::interp;
-
-using APSInt = llvm::APSInt;
-
-namespace clang {
-namespace interp {
-
-/// Scope used to handle temporaries in toplevel variable declarations.
-template <class Emitter> class DeclScope final : public VariableScope<Emitter> {
-public:
- DeclScope(ByteCodeExprGen<Emitter> *Ctx, const ValueDecl *VD)
- : VariableScope<Emitter>(Ctx), Scope(Ctx->P, VD),
- OldGlobalDecl(Ctx->GlobalDecl) {
- Ctx->GlobalDecl = Context::shouldBeGloballyIndexed(VD);
- }
-
- void addExtended(const Scope::Local &Local) override {
- return this->addLocal(Local);
- }
-
- ~DeclScope() { this->Ctx->GlobalDecl = OldGlobalDecl; }
-
-private:
- Program::DeclScope Scope;
- bool OldGlobalDecl;
-};
-
-/// Scope used to handle initialization methods.
-template <class Emitter> class OptionScope final {
-public:
- /// Root constructor, compiling or discarding primitives.
- OptionScope(ByteCodeExprGen<Emitter> *Ctx, bool NewDiscardResult,
- bool NewInitializing)
- : Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult),
- OldInitializing(Ctx->Initializing) {
- Ctx->DiscardResult = NewDiscardResult;
- Ctx->Initializing = NewInitializing;
- }
-
- ~OptionScope() {
- Ctx->DiscardResult = OldDiscardResult;
- Ctx->Initializing = OldInitializing;
- }
-
-private:
- /// Parent context.
- ByteCodeExprGen<Emitter> *Ctx;
- /// Old discard flag to restore.
- bool OldDiscardResult;
- bool OldInitializing;
-};
-
-} // namespace interp
-} // namespace clang
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
- const Expr *SubExpr = CE->getSubExpr();
- switch (CE->getCastKind()) {
-
- case CK_LValueToRValue: {
- return dereference(
- SubExpr, DerefKind::Read,
- [](PrimType) {
- // Value loaded - nothing to do here.
- return true;
- },
- [this, CE](PrimType T) {
- // Pointer on stack - dereference it.
- if (!this->emitLoadPop(T, CE))
- return false;
- return DiscardResult ? this->emitPop(T, CE) : true;
- });
- }
-
- case CK_UncheckedDerivedToBase:
- case CK_DerivedToBase: {
- if (!this->visit(SubExpr))
- return false;
-
- unsigned DerivedOffset = collectBaseOffset(getRecordTy(CE->getType()),
- getRecordTy(SubExpr->getType()));
-
- return this->emitGetPtrBasePop(DerivedOffset, CE);
- }
-
- case CK_BaseToDerived: {
- if (!this->visit(SubExpr))
- return false;
-
- unsigned DerivedOffset = collectBaseOffset(getRecordTy(SubExpr->getType()),
- getRecordTy(CE->getType()));
-
- return this->emitGetPtrDerivedPop(DerivedOffset, CE);
- }
-
- case CK_FloatingCast: {
- if (DiscardResult)
- return this->discard(SubExpr);
- if (!this->visit(SubExpr))
- return false;
- const auto *TargetSemantics = &Ctx.getFloatSemantics(CE->getType());
- return this->emitCastFP(TargetSemantics, getRoundingMode(CE), CE);
- }
-
- case CK_IntegralToFloating: {
- if (DiscardResult)
- return this->discard(SubExpr);
- std::optional<PrimType> FromT = classify(SubExpr->getType());
- if (!FromT)
- return false;
-
- if (!this->visit(SubExpr))
- return false;
-
- const auto *TargetSemantics = &Ctx.getFloatSemantics(CE->getType());
- llvm::RoundingMode RM = getRoundingMode(CE);
- return this->emitCastIntegralFloating(*FromT, TargetSemantics, RM, CE);
- }
-
- case CK_FloatingToBoolean:
- case CK_FloatingToIntegral: {
- if (DiscardResult)
- return this->discard(SubExpr);
-
- std::optional<PrimType> ToT = classify(CE->getType());
-
- if (!ToT)
- return false;
-
- if (!this->visit(SubExpr))
- return false;
-
- if (ToT == PT_IntAP)
- return this->emitCastFloatingIntegralAP(Ctx.getBitWidth(CE->getType()),
- CE);
- if (ToT == PT_IntAPS)
- return this->emitCastFloatingIntegralAPS(Ctx.getBitWidth(CE->getType()),
- CE);
-
- return this->emitCastFloatingIntegral(*ToT, CE);
- }
-
- case CK_NullToPointer:
- if (DiscardResult)
- return true;
- return this->emitNull(classifyPrim(CE->getType()), CE);
-
- case CK_PointerToIntegral: {
- // TODO: Discard handling.
- if (!this->visit(SubExpr))
- return false;
-
- PrimType T = classifyPrim(CE->getType());
- return this->emitCastPointerIntegral(T, CE);
- }
-
- case CK_ArrayToPointerDecay: {
- if (!this->visit(SubExpr))
- return false;
- if (!this->emitArrayDecay(CE))
- return false;
- if (DiscardResult)
- return this->emitPopPtr(CE);
- return true;
- }
-
- case CK_AtomicToNonAtomic:
- case CK_ConstructorConversion:
- case CK_FunctionToPointerDecay:
- case CK_NonAtomicToAtomic:
- case CK_NoOp:
- case CK_UserDefinedConversion:
- case CK_BitCast:
- return this->delegate(SubExpr);
-
- case CK_IntegralToBoolean:
- case CK_IntegralCast: {
- if (DiscardResult)
- return this->discard(SubExpr);
- std::optional<PrimType> FromT = classify(SubExpr->getType());
- std::optional<PrimType> ToT = classify(CE->getType());
-
- if (!FromT || !ToT)
- return false;
-
- if (!this->visit(SubExpr))
- return false;
-
- if (ToT == PT_IntAP)
- return this->emitCastAP(*FromT, Ctx.getBitWidth(CE->getType()), CE);
- if (ToT == PT_IntAPS)
- return this->emitCastAPS(*FromT, Ctx.getBitWidth(CE->getType()), CE);
-
- if (FromT == ToT)
- return true;
- return this->emitCast(*FromT, *ToT, CE);
- }
-
- case CK_PointerToBoolean: {
- PrimType PtrT = classifyPrim(SubExpr->getType());
-
- // Just emit p != nullptr for this.
- if (!this->visit(SubExpr))
- return false;
-
- if (!this->emitNull(PtrT, CE))
- return false;
-
- return this->emitNE(PtrT, CE);
- }
-
- case CK_IntegralComplexToBoolean:
- case CK_FloatingComplexToBoolean: {
- std::optional<PrimType> ElemT =
- classifyComplexElementType(SubExpr->getType());
- if (!ElemT)
- return false;
- // We emit the expression (__real(E) != 0 || __imag(E) != 0)
- // for us, that means (bool)E[0] || (bool)E[1]
- if (!this->visit(SubExpr))
- return false;
- if (!this->emitConstUint8(0, CE))
- return false;
- if (!this->emitArrayElemPtrUint8(CE))
- return false;
- if (!this->emitLoadPop(*ElemT, CE))
- return false;
- if (*ElemT == PT_Float) {
- if (!this->emitCastFloatingIntegral(PT_Bool, CE))
- return false;
- } else {
- if (!this->emitCast(*ElemT, PT_Bool, CE))
- return false;
- }
-
- // We now have the bool value of E[0] on the stack.
- LabelTy LabelTrue = this->getLabel();
- if (!this->jumpTrue(LabelTrue))
- return false;
-
- if (!this->emitConstUint8(1, CE))
- return false;
- if (!this->emitArrayElemPtrPopUint8(CE))
- return false;
- if (!this->emitLoadPop(*ElemT, CE))
- return false;
- if (*ElemT == PT_Float) {
- if (!this->emitCastFloatingIntegral(PT_Bool, CE))
- return false;
- } else {
- if (!this->emitCast(*ElemT, PT_Bool, CE))
- return false;
- }
- // Leave the boolean value of E[1] on the stack.
- LabelTy EndLabel = this->getLabel();
- this->jump(EndLabel);
-
- this->emitLabel(LabelTrue);
- if (!this->emitPopPtr(CE))
- return false;
- if (!this->emitConstBool(true, CE))
- return false;
-
- this->fallthrough(EndLabel);
- this->emitLabel(EndLabel);
-
- return true;
- }
-
- case CK_IntegralComplexToReal:
- case CK_FloatingComplexToReal:
- return this->emitComplexReal(SubExpr);
-
- case CK_IntegralRealToComplex:
- case CK_FloatingRealToComplex: {
- // We're creating a complex value here, so we need to
- // allocate storage for it.
- if (!Initializing) {
- std::optional<unsigned> LocalIndex =
- allocateLocal(CE, /*IsExtended=*/true);
- if (!LocalIndex)
- return false;
- if (!this->emitGetPtrLocal(*LocalIndex, CE))
- return false;
- }
-
- // Init the complex value to {SubExpr, 0}.
- if (!this->visitArrayElemInit(0, SubExpr))
- return false;
- // Zero-init the second element.
- PrimType T = classifyPrim(SubExpr->getType());
- if (!this->visitZeroInitializer(T, SubExpr->getType(), SubExpr))
- return false;
- return this->emitInitElem(T, 1, SubExpr);
- }
-
- case CK_ToVoid:
- return discard(SubExpr);
-
- default:
- assert(false && "Cast not implemented");
- }
- llvm_unreachable("Unhandled clang::CastKind enum");
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitIntegerLiteral(const IntegerLiteral *LE) {
- if (DiscardResult)
- return true;
-
- return this->emitConst(LE->getValue(), LE);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitFloatingLiteral(const FloatingLiteral *E) {
- if (DiscardResult)
- return true;
-
- return this->emitConstFloat(E->getValue(), E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitParenExpr(const ParenExpr *E) {
- return this->delegate(E->getSubExpr());
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) {
- // Need short-circuiting for these.
- if (BO->isLogicalOp())
- return this->VisitLogicalBinOp(BO);
-
- if (BO->getType()->isAnyComplexType())
- return this->VisitComplexBinOp(BO);
-
- const Expr *LHS = BO->getLHS();
- const Expr *RHS = BO->getRHS();
-
- if (BO->isPtrMemOp())
- return this->visit(RHS);
-
- // Typecheck the args.
- std::optional<PrimType> LT = classify(LHS->getType());
- std::optional<PrimType> RT = classify(RHS->getType());
- std::optional<PrimType> T = classify(BO->getType());
-
- // Deal with operations which have composite or void types.
- if (BO->isCommaOp()) {
- if (!this->discard(LHS))
- return false;
- if (RHS->getType()->isVoidType())
- return this->discard(RHS);
-
- return this->delegate(RHS);
- }
-
- // Special case for C++'s three-way/spaceship operator <=>, which
- // returns a std::{strong,weak,partial}_ordering (which is a class, so doesn't
- // have a PrimType).
- if (!T) {
- if (DiscardResult)
- return true;
- const ComparisonCategoryInfo *CmpInfo =
- Ctx.getASTContext().CompCategories.lookupInfoForType(BO->getType());
- assert(CmpInfo);
-
- // We need a temporary variable holding our return value.
- if (!Initializing) {
- std::optional<unsigned> ResultIndex = this->allocateLocal(BO, false);
- if (!this->emitGetPtrLocal(*ResultIndex, BO))
- return false;
- }
-
- if (!visit(LHS) || !visit(RHS))
- return false;
-
- return this->emitCMP3(*LT, CmpInfo, BO);
- }
-
- if (!LT || !RT || !T)
- return false;
-
- // Pointer arithmetic special case.
- if (BO->getOpcode() == BO_Add || BO->getOpcode() == BO_Sub) {
- if (T == PT_Ptr || (LT == PT_Ptr && RT == PT_Ptr))
- return this->VisitPointerArithBinOp(BO);
- }
-
- if (!visit(LHS) || !visit(RHS))
- return false;
-
- // For languages such as C, cast the result of one
- // of our comparision opcodes to T (which is usually int).
- auto MaybeCastToBool = [this, T, BO](bool Result) {
- if (!Result)
- return false;
- if (DiscardResult)
- return this->emitPop(*T, BO);
- if (T != PT_Bool)
- return this->emitCast(PT_Bool, *T, BO);
- return true;
- };
-
- auto Discard = [this, T, BO](bool Result) {
- if (!Result)
- return false;
- return DiscardResult ? this->emitPop(*T, BO) : true;
- };
-
- switch (BO->getOpcode()) {
- case BO_EQ:
- return MaybeCastToBool(this->emitEQ(*LT, BO));
- case BO_NE:
- return MaybeCastToBool(this->emitNE(*LT, BO));
- case BO_LT:
- return MaybeCastToBool(this->emitLT(*LT, BO));
- case BO_LE:
- return MaybeCastToBool(this->emitLE(*LT, BO));
- case BO_GT:
- return MaybeCastToBool(this->emitGT(*LT, BO));
- case BO_GE:
- return MaybeCastToBool(this->emitGE(*LT, BO));
- case BO_Sub:
- if (BO->getType()->isFloatingType())
- return Discard(this->emitSubf(getRoundingMode(BO), BO));
- return Discard(this->emitSub(*T, BO));
- case BO_Add:
- if (BO->getType()->isFloatingType())
- return Discard(this->emitAddf(getRoundingMode(BO), BO));
- return Discard(this->emitAdd(*T, BO));
- case BO_Mul:
- if (BO->getType()->isFloatingType())
- return Discard(this->emitMulf(getRoundingMode(BO), BO));
- return Discard(this->emitMul(*T, BO));
- case BO_Rem:
- return Discard(this->emitRem(*T, BO));
- case BO_Div:
- if (BO->getType()->isFloatingType())
- return Discard(this->emitDivf(getRoundingMode(BO), BO));
- return Discard(this->emitDiv(*T, BO));
- case BO_Assign:
- if (DiscardResult)
- return LHS->refersToBitField() ? this->emitStoreBitFieldPop(*T, BO)
- : this->emitStorePop(*T, BO);
- return LHS->refersToBitField() ? this->emitStoreBitField(*T, BO)
- : this->emitStore(*T, BO);
- case BO_And:
- return Discard(this->emitBitAnd(*T, BO));
- case BO_Or:
- return Discard(this->emitBitOr(*T, BO));
- case BO_Shl:
- return Discard(this->emitShl(*LT, *RT, BO));
- case BO_Shr:
- return Discard(this->emitShr(*LT, *RT, BO));
- case BO_Xor:
- return Discard(this->emitBitXor(*T, BO));
- case BO_LOr:
- case BO_LAnd:
- llvm_unreachable("Already handled earlier");
- default:
- return false;
- }
-
- llvm_unreachable("Unhandled binary op");
-}
-
-/// Perform addition/subtraction of a pointer and an integer or
-/// subtraction of two pointers.
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitPointerArithBinOp(const BinaryOperator *E) {
- BinaryOperatorKind Op = E->getOpcode();
- const Expr *LHS = E->getLHS();
- const Expr *RHS = E->getRHS();
-
- if ((Op != BO_Add && Op != BO_Sub) ||
- (!LHS->getType()->isPointerType() && !RHS->getType()->isPointerType()))
- return false;
-
- std::optional<PrimType> LT = classify(LHS);
- std::optional<PrimType> RT = classify(RHS);
-
- if (!LT || !RT)
- return false;
-
- if (LHS->getType()->isPointerType() && RHS->getType()->isPointerType()) {
- if (Op != BO_Sub)
- return false;
-
- assert(E->getType()->isIntegerType());
- if (!visit(RHS) || !visit(LHS))
- return false;
-
- return this->emitSubPtr(classifyPrim(E->getType()), E);
- }
-
- PrimType OffsetType;
- if (LHS->getType()->isIntegerType()) {
- if (!visit(RHS) || !visit(LHS))
- return false;
- OffsetType = *LT;
- } else if (RHS->getType()->isIntegerType()) {
- if (!visit(LHS) || !visit(RHS))
- return false;
- OffsetType = *RT;
- } else {
- return false;
- }
-
- if (Op == BO_Add)
- return this->emitAddOffset(OffsetType, E);
- else if (Op == BO_Sub)
- return this->emitSubOffset(OffsetType, E);
-
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitLogicalBinOp(const BinaryOperator *E) {
- assert(E->isLogicalOp());
- BinaryOperatorKind Op = E->getOpcode();
- const Expr *LHS = E->getLHS();
- const Expr *RHS = E->getRHS();
- std::optional<PrimType> T = classify(E->getType());
-
- if (Op == BO_LOr) {
- // Logical OR. Visit LHS and only evaluate RHS if LHS was FALSE.
- LabelTy LabelTrue = this->getLabel();
- LabelTy LabelEnd = this->getLabel();
-
- if (!this->visitBool(LHS))
- return false;
- if (!this->jumpTrue(LabelTrue))
- return false;
-
- if (!this->visitBool(RHS))
- return false;
- if (!this->jump(LabelEnd))
- return false;
-
- this->emitLabel(LabelTrue);
- this->emitConstBool(true, E);
- this->fallthrough(LabelEnd);
- this->emitLabel(LabelEnd);
-
- } else {
- assert(Op == BO_LAnd);
- // Logical AND.
- // Visit LHS. Only visit RHS if LHS was TRUE.
- LabelTy LabelFalse = this->getLabel();
- LabelTy LabelEnd = this->getLabel();
-
- if (!this->visitBool(LHS))
- return false;
- if (!this->jumpFalse(LabelFalse))
- return false;
-
- if (!this->visitBool(RHS))
- return false;
- if (!this->jump(LabelEnd))
- return false;
-
- this->emitLabel(LabelFalse);
- this->emitConstBool(false, E);
- this->fallthrough(LabelEnd);
- this->emitLabel(LabelEnd);
- }
-
- if (DiscardResult)
- return this->emitPopBool(E);
-
- // For C, cast back to integer type.
- assert(T);
- if (T != PT_Bool)
- return this->emitCast(PT_Bool, *T, E);
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitComplexBinOp(const BinaryOperator *E) {
- assert(Initializing);
-
- const Expr *LHS = E->getLHS();
- const Expr *RHS = E->getRHS();
- PrimType LHSElemT = *this->classifyComplexElementType(LHS->getType());
- PrimType RHSElemT = *this->classifyComplexElementType(RHS->getType());
-
- unsigned LHSOffset = this->allocateLocalPrimitive(LHS, PT_Ptr, true, false);
- unsigned RHSOffset = this->allocateLocalPrimitive(RHS, PT_Ptr, true, false);
- unsigned ResultOffset = ~0u;
- if (!this->DiscardResult)
- ResultOffset = this->allocateLocalPrimitive(E, PT_Ptr, true, false);
-
- assert(LHSElemT == RHSElemT);
-
- // Save result pointer in ResultOffset
- if (!this->DiscardResult) {
- if (!this->emitDupPtr(E))
- return false;
- if (!this->emitSetLocal(PT_Ptr, ResultOffset, E))
- return false;
- }
-
- // Evaluate LHS and save value to LHSOffset.
- if (!this->visit(LHS))
- return false;
- if (!this->emitSetLocal(PT_Ptr, LHSOffset, E))
- return false;
-
- // Same with RHS.
- if (!this->visit(RHS))
- return false;
- if (!this->emitSetLocal(PT_Ptr, RHSOffset, E))
- return false;
-
- // Now we can get pointers to the LHS and RHS from the offsets above.
- BinaryOperatorKind Op = E->getOpcode();
- for (unsigned ElemIndex = 0; ElemIndex != 2; ++ElemIndex) {
- // Result pointer for the store later.
- if (!this->DiscardResult) {
- if (!this->emitGetLocal(PT_Ptr, ResultOffset, E))
- return false;
- }
-
- if (!this->emitGetLocal(PT_Ptr, LHSOffset, E))
- return false;
- if (!this->emitConstUint8(ElemIndex, E))
- return false;
- if (!this->emitArrayElemPtrPopUint8(E))
- return false;
- if (!this->emitLoadPop(LHSElemT, E))
- return false;
-
- if (!this->emitGetLocal(PT_Ptr, RHSOffset, E))
- return false;
- if (!this->emitConstUint8(ElemIndex, E))
- return false;
- if (!this->emitArrayElemPtrPopUint8(E))
- return false;
- if (!this->emitLoadPop(RHSElemT, E))
- return false;
-
- // The actual operation.
- switch (Op) {
- case BO_Add:
- if (LHSElemT == PT_Float) {
- if (!this->emitAddf(getRoundingMode(E), E))
- return false;
- } else {
- if (!this->emitAdd(LHSElemT, E))
- return false;
- }
- break;
- case BO_Sub:
- if (LHSElemT == PT_Float) {
- if (!this->emitSubf(getRoundingMode(E), E))
- return false;
- } else {
- if (!this->emitSub(LHSElemT, E))
- return false;
- }
- break;
-
- default:
- return false;
- }
-
- if (!this->DiscardResult) {
- // Initialize array element with the value we just computed.
- if (!this->emitInitElemPop(LHSElemT, ElemIndex, E))
- return false;
- } else {
- if (!this->emitPop(LHSElemT, E))
- return false;
- }
- }
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
- QualType QT = E->getType();
-
- if (std::optional<PrimType> T = classify(QT))
- return this->visitZeroInitializer(*T, QT, E);
-
- if (QT->isRecordType())
- return false;
-
- if (QT->isIncompleteArrayType())
- return true;
-
- if (QT->isArrayType()) {
- const ArrayType *AT = QT->getAsArrayTypeUnsafe();
- assert(AT);
- const auto *CAT = cast<ConstantArrayType>(AT);
- size_t NumElems = CAT->getSize().getZExtValue();
- PrimType ElemT = classifyPrim(CAT->getElementType());
-
- for (size_t I = 0; I != NumElems; ++I) {
- if (!this->visitZeroInitializer(ElemT, CAT->getElementType(), E))
- return false;
- if (!this->emitInitElem(ElemT, I, E))
- return false;
- }
-
- return true;
- }
-
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitArraySubscriptExpr(
- const ArraySubscriptExpr *E) {
- const Expr *Base = E->getBase();
- const Expr *Index = E->getIdx();
-
- if (DiscardResult)
- return this->discard(Base) && this->discard(Index);
-
- // Take pointer of LHS, add offset from RHS.
- // What's left on the stack after this is a pointer.
- if (!this->visit(Base))
- return false;
-
- if (!this->visit(Index))
- return false;
-
- PrimType IndexT = classifyPrim(Index->getType());
- return this->emitArrayElemPtrPop(IndexT, E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
- const Expr *E) {
- assert(E->getType()->isRecordType());
- const Record *R = getRecord(E->getType());
-
- unsigned InitIndex = 0;
- for (const Expr *Init : Inits) {
- if (!this->emitDupPtr(E))
- return false;
-
- if (std::optional<PrimType> T = classify(Init)) {
- const Record::Field *FieldToInit = R->getField(InitIndex);
- if (!this->visit(Init))
- return false;
-
- if (FieldToInit->isBitField()) {
- if (!this->emitInitBitField(*T, FieldToInit, E))
- return false;
- } else {
- if (!this->emitInitField(*T, FieldToInit->Offset, E))
- return false;
- }
-
- if (!this->emitPopPtr(E))
- return false;
- ++InitIndex;
- } else {
- // Initializer for a direct base class.
- if (const Record::Base *B = R->getBase(Init->getType())) {
- if (!this->emitGetPtrBasePop(B->Offset, Init))
- return false;
-
- if (!this->visitInitializer(Init))
- return false;
-
- if (!this->emitInitPtrPop(E))
- return false;
- // Base initializers don't increase InitIndex, since they don't count
- // into the Record's fields.
- } else {
- const Record::Field *FieldToInit = R->getField(InitIndex);
- // Non-primitive case. Get a pointer to the field-to-initialize
- // on the stack and recurse into visitInitializer().
- if (!this->emitGetPtrField(FieldToInit->Offset, Init))
- return false;
-
- if (!this->visitInitializer(Init))
- return false;
-
- if (!this->emitPopPtr(E))
- return false;
- ++InitIndex;
- }
- }
- }
- return true;
-}
-
-/// Pointer to the array(not the element!) must be on the stack when calling
-/// this.
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitArrayElemInit(unsigned ElemIndex,
- const Expr *Init) {
- if (std::optional<PrimType> T = classify(Init->getType())) {
- // Visit the primitive element like normal.
- if (!this->visit(Init))
- return false;
- return this->emitInitElem(*T, ElemIndex, Init);
- }
-
- // Advance the pointer currently on the stack to the given
- // dimension.
- if (!this->emitConstUint32(ElemIndex, Init))
- return false;
- if (!this->emitArrayElemPtrUint32(Init))
- return false;
- if (!this->visitInitializer(Init))
- return false;
- return this->emitPopPtr(Init);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitInitListExpr(const InitListExpr *E) {
- // Handle discarding first.
- if (DiscardResult) {
- for (const Expr *Init : E->inits()) {
- if (!this->discard(Init))
- return false;
- }
- return true;
- }
-
- // Primitive values.
- if (std::optional<PrimType> T = classify(E->getType())) {
- assert(!DiscardResult);
- if (E->getNumInits() == 0)
- return this->visitZeroInitializer(*T, E->getType(), E);
- assert(E->getNumInits() == 1);
- return this->delegate(E->inits()[0]);
- }
-
- QualType T = E->getType();
- if (T->isRecordType())
- return this->visitInitList(E->inits(), E);
-
- if (T->isArrayType()) {
- // FIXME: Array fillers.
- unsigned ElementIndex = 0;
- for (const Expr *Init : E->inits()) {
- if (!this->visitArrayElemInit(ElementIndex, Init))
- return false;
- ++ElementIndex;
- }
- return true;
- }
-
- if (T->isAnyComplexType()) {
- unsigned NumInits = E->getNumInits();
-
- if (NumInits == 1)
- return this->delegate(E->inits()[0]);
-
- QualType ElemQT = E->getType()->getAs<ComplexType>()->getElementType();
- PrimType ElemT = classifyPrim(ElemQT);
- if (NumInits == 0) {
- // Zero-initialize both elements.
- for (unsigned I = 0; I < 2; ++I) {
- if (!this->visitZeroInitializer(ElemT, ElemQT, E))
- return false;
- if (!this->emitInitElem(ElemT, I, E))
- return false;
- }
- } else if (NumInits == 2) {
- unsigned InitIndex = 0;
- for (const Expr *Init : E->inits()) {
- if (!this->visit(Init))
- return false;
-
- if (!this->emitInitElem(ElemT, InitIndex, E))
- return false;
- ++InitIndex;
- }
- }
- return true;
- }
-
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXParenListInitExpr(
- const CXXParenListInitExpr *E) {
- if (DiscardResult) {
- for (const Expr *Init : E->getInitExprs()) {
- if (!this->discard(Init))
- return false;
- }
- return true;
- }
-
- assert(E->getType()->isRecordType());
- return this->visitInitList(E->getInitExprs(), E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitSubstNonTypeTemplateParmExpr(
- const SubstNonTypeTemplateParmExpr *E) {
- return this->delegate(E->getReplacement());
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitConstantExpr(const ConstantExpr *E) {
- // Try to emit the APValue directly, without visiting the subexpr.
- // This will only fail if we can't emit the APValue, so won't emit any
- // diagnostics or any double values.
- std::optional<PrimType> T = classify(E->getType());
- if (T && E->hasAPValueResult() &&
- this->visitAPValue(E->getAPValueResult(), *T, E))
- return true;
-
- return this->delegate(E->getSubExpr());
-}
-
-static CharUnits AlignOfType(QualType T, const ASTContext &ASTCtx,
- UnaryExprOrTypeTrait Kind) {
- bool AlignOfReturnsPreferred =
- ASTCtx.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver7;
-
- // C++ [expr.alignof]p3:
- // When alignof is applied to a reference type, the result is the
- // alignment of the referenced type.
- if (const auto *Ref = T->getAs<ReferenceType>())
- T = Ref->getPointeeType();
-
- // __alignof is defined to return the preferred alignment.
- // Before 8, clang returned the preferred alignment for alignof and
- // _Alignof as well.
- if (Kind == UETT_PreferredAlignOf || AlignOfReturnsPreferred)
- return ASTCtx.toCharUnitsFromBits(ASTCtx.getPreferredTypeAlign(T));
-
- return ASTCtx.getTypeAlignInChars(T);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitUnaryExprOrTypeTraitExpr(
- const UnaryExprOrTypeTraitExpr *E) {
- UnaryExprOrTypeTrait Kind = E->getKind();
- ASTContext &ASTCtx = Ctx.getASTContext();
-
- if (Kind == UETT_SizeOf) {
- QualType ArgType = E->getTypeOfArgument();
- CharUnits Size;
- if (ArgType->isVoidType() || ArgType->isFunctionType())
- Size = CharUnits::One();
- else {
- if (ArgType->isDependentType() || !ArgType->isConstantSizeType())
- return false;
-
- Size = ASTCtx.getTypeSizeInChars(ArgType);
- }
-
- if (DiscardResult)
- return true;
-
- return this->emitConst(Size.getQuantity(), E);
- }
-
- if (Kind == UETT_AlignOf || Kind == UETT_PreferredAlignOf) {
- CharUnits Size;
-
- if (E->isArgumentType()) {
- QualType ArgType = E->getTypeOfArgument();
-
- Size = AlignOfType(ArgType, ASTCtx, Kind);
- } else {
- // Argument is an expression, not a type.
- const Expr *Arg = E->getArgumentExpr()->IgnoreParens();
-
- // The kinds of expressions that we have special-case logic here for
- // should be kept up to date with the special checks for those
- // expressions in Sema.
-
- // alignof decl is always accepted, even if it doesn't make sense: we
- // default to 1 in those cases.
- if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg))
- Size = ASTCtx.getDeclAlign(DRE->getDecl(),
- /*RefAsPointee*/ true);
- else if (const auto *ME = dyn_cast<MemberExpr>(Arg))
- Size = ASTCtx.getDeclAlign(ME->getMemberDecl(),
- /*RefAsPointee*/ true);
- else
- Size = AlignOfType(Arg->getType(), ASTCtx, Kind);
- }
-
- if (DiscardResult)
- return true;
-
- return this->emitConst(Size.getQuantity(), E);
- }
-
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitMemberExpr(const MemberExpr *E) {
- // 'Base.Member'
- const Expr *Base = E->getBase();
-
- if (DiscardResult)
- return this->discard(Base);
-
- if (!this->visit(Base))
- return false;
-
- // Base above gives us a pointer on the stack.
- // TODO: Implement non-FieldDecl members.
- const ValueDecl *Member = E->getMemberDecl();
- if (const auto *FD = dyn_cast<FieldDecl>(Member)) {
- const RecordDecl *RD = FD->getParent();
- const Record *R = getRecord(RD);
- const Record::Field *F = R->getField(FD);
- // Leave a pointer to the field on the stack.
- if (F->Decl->getType()->isReferenceType())
- return this->emitGetFieldPop(PT_Ptr, F->Offset, E);
- return this->emitGetPtrField(F->Offset, E);
- }
-
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitArrayInitIndexExpr(
- const ArrayInitIndexExpr *E) {
- // ArrayIndex might not be set if a ArrayInitIndexExpr is being evaluated
- // stand-alone, e.g. via EvaluateAsInt().
- if (!ArrayIndex)
- return false;
- return this->emitConst(*ArrayIndex, E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitArrayInitLoopExpr(
- const ArrayInitLoopExpr *E) {
- assert(Initializing);
- assert(!DiscardResult);
- // TODO: This compiles to quite a lot of bytecode if the array is larger.
- // Investigate compiling this to a loop.
-
- const Expr *SubExpr = E->getSubExpr();
- const Expr *CommonExpr = E->getCommonExpr();
- size_t Size = E->getArraySize().getZExtValue();
-
- // If the common expression is an opaque expression, we visit it
- // here once so we have its value cached.
- // FIXME: This might be necessary (or useful) for all expressions.
- if (isa<OpaqueValueExpr>(CommonExpr)) {
- if (!this->discard(CommonExpr))
- return false;
- }
-
- // So, every iteration, we execute an assignment here
- // where the LHS is on the stack (the target array)
- // and the RHS is our SubExpr.
- for (size_t I = 0; I != Size; ++I) {
- ArrayIndexScope<Emitter> IndexScope(this, I);
- BlockScope<Emitter> BS(this);
-
- if (!this->visitArrayElemInit(I, SubExpr))
- return false;
- }
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
- if (Initializing)
- return this->visitInitializer(E->getSourceExpr());
-
- PrimType SubExprT = classify(E->getSourceExpr()).value_or(PT_Ptr);
- if (auto It = OpaqueExprs.find(E); It != OpaqueExprs.end())
- return this->emitGetLocal(SubExprT, It->second, E);
-
- if (!this->visit(E->getSourceExpr()))
- return false;
-
- // At this point we either have the evaluated source expression or a pointer
- // to an object on the stack. We want to create a local variable that stores
- // this value.
- std::optional<unsigned> LocalIndex =
- allocateLocalPrimitive(E, SubExprT, /*IsConst=*/true);
- if (!LocalIndex)
- return false;
- if (!this->emitSetLocal(SubExprT, *LocalIndex, E))
- return false;
-
- // Here the local variable is created but the value is removed from the stack,
- // so we put it back, because the caller might need it.
- if (!DiscardResult) {
- if (!this->emitGetLocal(SubExprT, *LocalIndex, E))
- return false;
- }
-
- // FIXME: Ideally the cached value should be cleaned up later.
- OpaqueExprs.insert({E, *LocalIndex});
-
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitAbstractConditionalOperator(
- const AbstractConditionalOperator *E) {
- const Expr *Condition = E->getCond();
- const Expr *TrueExpr = E->getTrueExpr();
- const Expr *FalseExpr = E->getFalseExpr();
-
- LabelTy LabelEnd = this->getLabel(); // Label after the operator.
- LabelTy LabelFalse = this->getLabel(); // Label for the false expr.
-
- if (!this->visitBool(Condition))
- return false;
-
- if (!this->jumpFalse(LabelFalse))
- return false;
-
- if (!this->delegate(TrueExpr))
- return false;
- if (!this->jump(LabelEnd))
- return false;
-
- this->emitLabel(LabelFalse);
-
- if (!this->delegate(FalseExpr))
- return false;
-
- this->fallthrough(LabelEnd);
- this->emitLabel(LabelEnd);
-
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitStringLiteral(const StringLiteral *E) {
- if (DiscardResult)
- return true;
-
- if (!Initializing) {
- unsigned StringIndex = P.createGlobalString(E);
- return this->emitGetPtrGlobal(StringIndex, E);
- }
-
- // We are initializing an array on the stack.
- const ConstantArrayType *CAT =
- Ctx.getASTContext().getAsConstantArrayType(E->getType());
- assert(CAT && "a string literal that's not a constant array?");
-
- // If the initializer string is too long, a diagnostic has already been
- // emitted. Read only the array length from the string literal.
- unsigned ArraySize = CAT->getSize().getZExtValue();
- unsigned N = std::min(ArraySize, E->getLength());
- size_t CharWidth = E->getCharByteWidth();
-
- for (unsigned I = 0; I != N; ++I) {
- uint32_t CodeUnit = E->getCodeUnit(I);
-
- if (CharWidth == 1) {
- this->emitConstSint8(CodeUnit, E);
- this->emitInitElemSint8(I, E);
- } else if (CharWidth == 2) {
- this->emitConstUint16(CodeUnit, E);
- this->emitInitElemUint16(I, E);
- } else if (CharWidth == 4) {
- this->emitConstUint32(CodeUnit, E);
- this->emitInitElemUint32(I, E);
- } else {
- llvm_unreachable("unsupported character width");
- }
- }
-
- // Fill up the rest of the char array with NUL bytes.
- for (unsigned I = N; I != ArraySize; ++I) {
- if (CharWidth == 1) {
- this->emitConstSint8(0, E);
- this->emitInitElemSint8(I, E);
- } else if (CharWidth == 2) {
- this->emitConstUint16(0, E);
- this->emitInitElemUint16(I, E);
- } else if (CharWidth == 4) {
- this->emitConstUint32(0, E);
- this->emitInitElemUint32(I, E);
- } else {
- llvm_unreachable("unsupported character width");
- }
- }
-
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCharacterLiteral(
- const CharacterLiteral *E) {
- if (DiscardResult)
- return true;
- return this->emitConst(E->getValue(), E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitFloatCompoundAssignOperator(
- const CompoundAssignOperator *E) {
-
- const Expr *LHS = E->getLHS();
- const Expr *RHS = E->getRHS();
- QualType LHSType = LHS->getType();
- QualType LHSComputationType = E->getComputationLHSType();
- QualType ResultType = E->getComputationResultType();
- std::optional<PrimType> LT = classify(LHSComputationType);
- std::optional<PrimType> RT = classify(ResultType);
-
- assert(ResultType->isFloatingType());
-
- if (!LT || !RT)
- return false;
-
- PrimType LHST = classifyPrim(LHSType);
-
- // C++17 onwards require that we evaluate the RHS first.
- // Compute RHS and save it in a temporary variable so we can
- // load it again later.
- if (!visit(RHS))
- return false;
-
- unsigned TempOffset = this->allocateLocalPrimitive(E, *RT, /*IsConst=*/true);
- if (!this->emitSetLocal(*RT, TempOffset, E))
- return false;
-
- // First, visit LHS.
- if (!visit(LHS))
- return false;
- if (!this->emitLoad(LHST, E))
- return false;
-
- // If necessary, convert LHS to its computation type.
- if (!this->emitPrimCast(LHST, classifyPrim(LHSComputationType),
- LHSComputationType, E))
- return false;
-
- // Now load RHS.
- if (!this->emitGetLocal(*RT, TempOffset, E))
- return false;
-
- llvm::RoundingMode RM = getRoundingMode(E);
- switch (E->getOpcode()) {
- case BO_AddAssign:
- if (!this->emitAddf(RM, E))
- return false;
- break;
- case BO_SubAssign:
- if (!this->emitSubf(RM, E))
- return false;
- break;
- case BO_MulAssign:
- if (!this->emitMulf(RM, E))
- return false;
- break;
- case BO_DivAssign:
- if (!this->emitDivf(RM, E))
- return false;
- break;
- default:
- return false;
- }
-
- if (!this->emitPrimCast(classifyPrim(ResultType), LHST, LHS->getType(), E))
- return false;
-
- if (DiscardResult)
- return this->emitStorePop(LHST, E);
- return this->emitStore(LHST, E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitPointerCompoundAssignOperator(
- const CompoundAssignOperator *E) {
- BinaryOperatorKind Op = E->getOpcode();
- const Expr *LHS = E->getLHS();
- const Expr *RHS = E->getRHS();
- std::optional<PrimType> LT = classify(LHS->getType());
- std::optional<PrimType> RT = classify(RHS->getType());
-
- if (Op != BO_AddAssign && Op != BO_SubAssign)
- return false;
-
- if (!LT || !RT)
- return false;
- assert(*LT == PT_Ptr);
-
- if (!visit(LHS))
- return false;
-
- if (!this->emitLoadPtr(LHS))
- return false;
-
- if (!visit(RHS))
- return false;
-
- if (Op == BO_AddAssign)
- this->emitAddOffset(*RT, E);
- else
- this->emitSubOffset(*RT, E);
-
- if (DiscardResult)
- return this->emitStorePopPtr(E);
- return this->emitStorePtr(E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCompoundAssignOperator(
- const CompoundAssignOperator *E) {
-
- const Expr *LHS = E->getLHS();
- const Expr *RHS = E->getRHS();
- std::optional<PrimType> LHSComputationT =
- classify(E->getComputationLHSType());
- std::optional<PrimType> LT = classify(LHS->getType());
- std::optional<PrimType> RT = classify(E->getComputationResultType());
- std::optional<PrimType> ResultT = classify(E->getType());
-
- if (!LT || !RT || !ResultT || !LHSComputationT)
- return false;
-
- // Handle floating point operations separately here, since they
- // require special care.
-
- if (ResultT == PT_Float || RT == PT_Float)
- return VisitFloatCompoundAssignOperator(E);
-
- if (E->getType()->isPointerType())
- return VisitPointerCompoundAssignOperator(E);
-
- assert(!E->getType()->isPointerType() && "Handled above");
- assert(!E->getType()->isFloatingType() && "Handled above");
-
- // C++17 onwards require that we evaluate the RHS first.
- // Compute RHS and save it in a temporary variable so we can
- // load it again later.
- // FIXME: Compound assignments are unsequenced in C, so we might
- // have to figure out how to reject them.
- if (!visit(RHS))
- return false;
-
- unsigned TempOffset = this->allocateLocalPrimitive(E, *RT, /*IsConst=*/true);
-
- if (!this->emitSetLocal(*RT, TempOffset, E))
- return false;
-
- // Get LHS pointer, load its value and cast it to the
- // computation type if necessary.
- if (!visit(LHS))
- return false;
- if (!this->emitLoad(*LT, E))
- return false;
- if (*LT != *LHSComputationT) {
- if (!this->emitCast(*LT, *LHSComputationT, E))
- return false;
- }
-
- // Get the RHS value on the stack.
- if (!this->emitGetLocal(*RT, TempOffset, E))
- return false;
-
- // Perform operation.
- switch (E->getOpcode()) {
- case BO_AddAssign:
- if (!this->emitAdd(*LHSComputationT, E))
- return false;
- break;
- case BO_SubAssign:
- if (!this->emitSub(*LHSComputationT, E))
- return false;
- break;
- case BO_MulAssign:
- if (!this->emitMul(*LHSComputationT, E))
- return false;
- break;
- case BO_DivAssign:
- if (!this->emitDiv(*LHSComputationT, E))
- return false;
- break;
- case BO_RemAssign:
- if (!this->emitRem(*LHSComputationT, E))
- return false;
- break;
- case BO_ShlAssign:
- if (!this->emitShl(*LHSComputationT, *RT, E))
- return false;
- break;
- case BO_ShrAssign:
- if (!this->emitShr(*LHSComputationT, *RT, E))
- return false;
- break;
- case BO_AndAssign:
- if (!this->emitBitAnd(*LHSComputationT, E))
- return false;
- break;
- case BO_XorAssign:
- if (!this->emitBitXor(*LHSComputationT, E))
- return false;
- break;
- case BO_OrAssign:
- if (!this->emitBitOr(*LHSComputationT, E))
- return false;
- break;
- default:
- llvm_unreachable("Unimplemented compound assign operator");
- }
-
- // And now cast from LHSComputationT to ResultT.
- if (*ResultT != *LHSComputationT) {
- if (!this->emitCast(*LHSComputationT, *ResultT, E))
- return false;
- }
-
- // And store the result in LHS.
- if (DiscardResult) {
- if (LHS->refersToBitField())
- return this->emitStoreBitFieldPop(*ResultT, E);
- return this->emitStorePop(*ResultT, E);
- }
- if (LHS->refersToBitField())
- return this->emitStoreBitField(*ResultT, E);
- return this->emitStore(*ResultT, E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitExprWithCleanups(
- const ExprWithCleanups *E) {
- const Expr *SubExpr = E->getSubExpr();
-
- assert(E->getNumObjects() == 0 && "TODO: Implement cleanups");
-
- return this->delegate(SubExpr);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitMaterializeTemporaryExpr(
- const MaterializeTemporaryExpr *E) {
- const Expr *SubExpr = E->getSubExpr();
-
- if (Initializing) {
- // We already have a value, just initialize that.
- return this->visitInitializer(SubExpr);
- }
- // If we don't end up using the materialized temporary anyway, don't
- // bother creating it.
- if (DiscardResult)
- return this->discard(SubExpr);
-
- // When we're initializing a global variable *or* the storage duration of
- // the temporary is explicitly static, create a global variable.
- std::optional<PrimType> SubExprT = classify(SubExpr);
- bool IsStatic = E->getStorageDuration() == SD_Static;
- if (GlobalDecl || IsStatic) {
- std::optional<unsigned> GlobalIndex = P.createGlobal(E);
- if (!GlobalIndex)
- return false;
-
- const LifetimeExtendedTemporaryDecl *TempDecl =
- E->getLifetimeExtendedTemporaryDecl();
- if (IsStatic)
- assert(TempDecl);
-
- if (SubExprT) {
- if (!this->visit(SubExpr))
- return false;
- if (IsStatic) {
- if (!this->emitInitGlobalTemp(*SubExprT, *GlobalIndex, TempDecl, E))
- return false;
- } else {
- if (!this->emitInitGlobal(*SubExprT, *GlobalIndex, E))
- return false;
- }
- return this->emitGetPtrGlobal(*GlobalIndex, E);
- }
-
- // Non-primitive values.
- if (!this->emitGetPtrGlobal(*GlobalIndex, E))
- return false;
- if (!this->visitInitializer(SubExpr))
- return false;
- if (IsStatic)
- return this->emitInitGlobalTempComp(TempDecl, E);
- return true;
- }
-
- // For everyhing else, use local variables.
- if (SubExprT) {
- if (std::optional<unsigned> LocalIndex = allocateLocalPrimitive(
- SubExpr, *SubExprT, /*IsConst=*/true, /*IsExtended=*/true)) {
- if (!this->visit(SubExpr))
- return false;
- this->emitSetLocal(*SubExprT, *LocalIndex, E);
- return this->emitGetPtrLocal(*LocalIndex, E);
- }
- } else {
- if (std::optional<unsigned> LocalIndex =
- allocateLocal(SubExpr, /*IsExtended=*/true)) {
- if (!this->emitGetPtrLocal(*LocalIndex, E))
- return false;
- return this->visitInitializer(SubExpr);
- }
- }
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXBindTemporaryExpr(
- const CXXBindTemporaryExpr *E) {
- return this->delegate(E->getSubExpr());
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCompoundLiteralExpr(
- const CompoundLiteralExpr *E) {
- const Expr *Init = E->getInitializer();
- if (Initializing) {
- // We already have a value, just initialize that.
- return this->visitInitializer(Init);
- }
-
- std::optional<PrimType> T = classify(E->getType());
- if (E->isFileScope()) {
- if (std::optional<unsigned> GlobalIndex = P.createGlobal(E)) {
- if (classify(E->getType()))
- return this->visit(Init);
- if (!this->emitGetPtrGlobal(*GlobalIndex, E))
- return false;
- return this->visitInitializer(Init);
- }
- }
-
- // Otherwise, use a local variable.
- if (T) {
- // For primitive types, we just visit the initializer.
- return this->delegate(Init);
- } else {
- if (std::optional<unsigned> LocalIndex = allocateLocal(Init)) {
- if (!this->emitGetPtrLocal(*LocalIndex, E))
- return false;
- if (!this->visitInitializer(Init))
- return false;
- if (DiscardResult)
- return this->emitPopPtr(E);
- return true;
- }
- }
-
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitTypeTraitExpr(const TypeTraitExpr *E) {
- if (DiscardResult)
- return true;
- return this->emitConstBool(E->getValue(), E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitLambdaExpr(const LambdaExpr *E) {
- assert(Initializing);
- const Record *R = P.getOrCreateRecord(E->getLambdaClass());
-
- auto *CaptureInitIt = E->capture_init_begin();
- // Initialize all fields (which represent lambda captures) of the
- // record with their initializers.
- for (const Record::Field &F : R->fields()) {
- const Expr *Init = *CaptureInitIt;
- ++CaptureInitIt;
-
- if (std::optional<PrimType> T = classify(Init)) {
- if (!this->visit(Init))
- return false;
-
- if (!this->emitSetField(*T, F.Offset, E))
- return false;
- } else {
- if (!this->emitDupPtr(E))
- return false;
-
- if (!this->emitGetPtrField(F.Offset, E))
- return false;
-
- if (!this->visitInitializer(Init))
- return false;
-
- if (!this->emitPopPtr(E))
- return false;
- }
- }
-
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitPredefinedExpr(const PredefinedExpr *E) {
- if (DiscardResult)
- return true;
-
- assert(!Initializing);
- return this->visit(E->getFunctionName());
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXThrowExpr(const CXXThrowExpr *E) {
- if (E->getSubExpr() && !this->discard(E->getSubExpr()))
- return false;
-
- return this->emitInvalid(E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXReinterpretCastExpr(
- const CXXReinterpretCastExpr *E) {
- if (!this->discard(E->getSubExpr()))
- return false;
-
- return this->emitInvalidCast(CastKind::Reinterpret, E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
- assert(E->getType()->isBooleanType());
-
- if (DiscardResult)
- return true;
- return this->emitConstBool(E->getValue(), E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXConstructExpr(
- const CXXConstructExpr *E) {
- QualType T = E->getType();
- assert(!classify(T));
-
- if (T->isRecordType()) {
- const CXXConstructorDecl *Ctor = E->getConstructor();
-
- // Trivial zero initialization.
- if (E->requiresZeroInitialization() && Ctor->isTrivial()) {
- const Record *R = getRecord(E->getType());
- return this->visitZeroRecordInitializer(R, E);
- }
-
- const Function *Func = getFunction(Ctor);
-
- if (!Func)
- return false;
-
- assert(Func->hasThisPointer());
- assert(!Func->hasRVO());
-
- // If we're discarding a construct expression, we still need
- // to allocate a variable and call the constructor and destructor.
- if (DiscardResult) {
- assert(!Initializing);
- std::optional<unsigned> LocalIndex =
- allocateLocal(E, /*IsExtended=*/true);
-
- if (!LocalIndex)
- return false;
-
- if (!this->emitGetPtrLocal(*LocalIndex, E))
- return false;
- }
-
- // The This pointer is already on the stack because this is an initializer,
- // but we need to dup() so the call() below has its own copy.
- if (!this->emitDupPtr(E))
- return false;
-
- // Constructor arguments.
- for (const auto *Arg : E->arguments()) {
- if (!this->visit(Arg))
- return false;
- }
-
- if (!this->emitCall(Func, E))
- return false;
-
- // Immediately call the destructor if we have to.
- if (DiscardResult) {
- if (!this->emitPopPtr(E))
- return false;
- }
- return true;
- }
-
- if (T->isArrayType()) {
- const ConstantArrayType *CAT =
- Ctx.getASTContext().getAsConstantArrayType(E->getType());
- assert(CAT);
- size_t NumElems = CAT->getSize().getZExtValue();
- const Function *Func = getFunction(E->getConstructor());
- if (!Func || !Func->isConstexpr())
- return false;
-
- // FIXME(perf): We're calling the constructor once per array element here,
- // in the old intepreter we had a special-case for trivial constructors.
- for (size_t I = 0; I != NumElems; ++I) {
- if (!this->emitConstUint64(I, E))
- return false;
- if (!this->emitArrayElemPtrUint64(E))
- return false;
-
- // Constructor arguments.
- for (const auto *Arg : E->arguments()) {
- if (!this->visit(Arg))
- return false;
- }
-
- if (!this->emitCall(Func, E))
- return false;
- }
- return true;
- }
-
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitSourceLocExpr(const SourceLocExpr *E) {
- if (DiscardResult)
- return true;
-
- const APValue Val =
- E->EvaluateInContext(Ctx.getASTContext(), SourceLocDefaultExpr);
-
- // Things like __builtin_LINE().
- if (E->getType()->isIntegerType()) {
- assert(Val.isInt());
- const APSInt &I = Val.getInt();
- return this->emitConst(I, E);
- }
- // Otherwise, the APValue is an LValue, with only one element.
- // Theoretically, we don't need the APValue at all of course.
- assert(E->getType()->isPointerType());
- assert(Val.isLValue());
- const APValue::LValueBase &Base = Val.getLValueBase();
- if (const Expr *LValueExpr = Base.dyn_cast<const Expr *>())
- return this->visit(LValueExpr);
-
- // Otherwise, we have a decl (which is the case for
- // __builtin_source_location).
- assert(Base.is<const ValueDecl *>());
- assert(Val.getLValuePath().size() == 0);
- const auto *BaseDecl = Base.dyn_cast<const ValueDecl *>();
- assert(BaseDecl);
-
- auto *UGCD = cast<UnnamedGlobalConstantDecl>(BaseDecl);
-
- std::optional<unsigned> GlobalIndex = P.getOrCreateGlobal(UGCD);
- if (!GlobalIndex)
- return false;
-
- if (!this->emitGetPtrGlobal(*GlobalIndex, E))
- return false;
-
- const Record *R = getRecord(E->getType());
- const APValue &V = UGCD->getValue();
- for (unsigned I = 0, N = R->getNumFields(); I != N; ++I) {
- const Record::Field *F = R->getField(I);
- const APValue &FieldValue = V.getStructField(I);
-
- PrimType FieldT = classifyPrim(F->Decl->getType());
-
- if (!this->visitAPValue(FieldValue, FieldT, E))
- return false;
- if (!this->emitInitField(FieldT, F->Offset, E))
- return false;
- }
-
- // Leave the pointer to the global on the stack.
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitOffsetOfExpr(const OffsetOfExpr *E) {
- unsigned N = E->getNumComponents();
- if (N == 0)
- return false;
-
- for (unsigned I = 0; I != N; ++I) {
- const OffsetOfNode &Node = E->getComponent(I);
- if (Node.getKind() == OffsetOfNode::Array) {
- const Expr *ArrayIndexExpr = E->getIndexExpr(Node.getArrayExprIndex());
- PrimType IndexT = classifyPrim(ArrayIndexExpr->getType());
-
- if (DiscardResult) {
- if (!this->discard(ArrayIndexExpr))
- return false;
- continue;
- }
-
- if (!this->visit(ArrayIndexExpr))
- return false;
- // Cast to Sint64.
- if (IndexT != PT_Sint64) {
- if (!this->emitCast(IndexT, PT_Sint64, E))
- return false;
- }
- }
- }
-
- if (DiscardResult)
- return true;
-
- PrimType T = classifyPrim(E->getType());
- return this->emitOffsetOf(T, E, E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXScalarValueInitExpr(
- const CXXScalarValueInitExpr *E) {
- QualType Ty = E->getType();
-
- if (Ty->isVoidType())
- return true;
-
- return this->visitZeroInitializer(classifyPrim(Ty), Ty, E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitSizeOfPackExpr(const SizeOfPackExpr *E) {
- return this->emitConst(E->getPackLength(), E);
-}
-
-template <class Emitter> bool ByteCodeExprGen<Emitter>::discard(const Expr *E) {
- if (E->containsErrors())
- return false;
-
- OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/true,
- /*NewInitializing=*/false);
- return this->Visit(E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::delegate(const Expr *E) {
- if (E->containsErrors())
- return false;
-
- // We're basically doing:
- // OptionScope<Emitter> Scope(this, DicardResult, Initializing);
- // but that's unnecessary of course.
- return this->Visit(E);
-}
-
-template <class Emitter> bool ByteCodeExprGen<Emitter>::visit(const Expr *E) {
- if (E->containsErrors())
- return false;
-
- if (E->getType()->isVoidType())
- return this->discard(E);
-
- // Create local variable to hold the return value.
- if (!E->isGLValue() && !E->getType()->isAnyComplexType() &&
- !classify(E->getType())) {
- std::optional<unsigned> LocalIndex = allocateLocal(E, /*IsExtended=*/true);
- if (!LocalIndex)
- return false;
-
- if (!this->emitGetPtrLocal(*LocalIndex, E))
- return false;
- return this->visitInitializer(E);
- }
-
- // Otherwise,we have a primitive return value, produce the value directly
- // and push it on the stack.
- OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false,
- /*NewInitializing=*/false);
- return this->Visit(E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitInitializer(const Expr *E) {
- assert(!classify(E->getType()));
-
- if (E->containsErrors())
- return false;
-
- OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false,
- /*NewInitializing=*/true);
- return this->Visit(E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitBool(const Expr *E) {
- std::optional<PrimType> T = classify(E->getType());
- if (!T)
- return false;
-
- if (!this->visit(E))
- return false;
-
- if (T == PT_Bool)
- return true;
-
- // Convert pointers to bool.
- if (T == PT_Ptr || T == PT_FnPtr) {
- if (!this->emitNull(*T, E))
- return false;
- return this->emitNE(*T, E);
- }
-
- // Or Floats.
- if (T == PT_Float)
- return this->emitCastFloatingIntegralBool(E);
-
- // Or anything else we can.
- return this->emitCast(*T, PT_Bool, E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitZeroInitializer(PrimType T, QualType QT,
- const Expr *E) {
- switch (T) {
- case PT_Bool:
- return this->emitZeroBool(E);
- case PT_Sint8:
- return this->emitZeroSint8(E);
- case PT_Uint8:
- return this->emitZeroUint8(E);
- case PT_Sint16:
- return this->emitZeroSint16(E);
- case PT_Uint16:
- return this->emitZeroUint16(E);
- case PT_Sint32:
- return this->emitZeroSint32(E);
- case PT_Uint32:
- return this->emitZeroUint32(E);
- case PT_Sint64:
- return this->emitZeroSint64(E);
- case PT_Uint64:
- return this->emitZeroUint64(E);
- case PT_IntAP:
- return this->emitZeroIntAP(Ctx.getBitWidth(QT), E);
- case PT_IntAPS:
- return this->emitZeroIntAPS(Ctx.getBitWidth(QT), E);
- case PT_Ptr:
- return this->emitNullPtr(E);
- case PT_FnPtr:
- return this->emitNullFnPtr(E);
- case PT_Float: {
- return this->emitConstFloat(APFloat::getZero(Ctx.getFloatSemantics(QT)), E);
- }
- }
- llvm_unreachable("unknown primitive type");
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitZeroRecordInitializer(const Record *R,
- const Expr *E) {
- assert(E);
- assert(R);
- // Fields
- for (const Record::Field &Field : R->fields()) {
- const Descriptor *D = Field.Desc;
- if (D->isPrimitive()) {
- QualType QT = D->getType();
- PrimType T = classifyPrim(D->getType());
- if (!this->visitZeroInitializer(T, QT, E))
- return false;
- if (!this->emitInitField(T, Field.Offset, E))
- return false;
- continue;
- }
-
- // TODO: Add GetPtrFieldPop and get rid of this dup.
- if (!this->emitDupPtr(E))
- return false;
- if (!this->emitGetPtrField(Field.Offset, E))
- return false;
-
- if (D->isPrimitiveArray()) {
- QualType ET = D->getElemQualType();
- PrimType T = classifyPrim(ET);
- for (uint32_t I = 0, N = D->getNumElems(); I != N; ++I) {
- if (!this->visitZeroInitializer(T, ET, E))
- return false;
- if (!this->emitInitElem(T, I, E))
- return false;
- }
- } else if (D->isCompositeArray()) {
- const Record *ElemRecord = D->ElemDesc->ElemRecord;
- assert(D->ElemDesc->ElemRecord);
- for (uint32_t I = 0, N = D->getNumElems(); I != N; ++I) {
- if (!this->emitConstUint32(I, E))
- return false;
- if (!this->emitArrayElemPtr(PT_Uint32, E))
- return false;
- if (!this->visitZeroRecordInitializer(ElemRecord, E))
- return false;
- if (!this->emitPopPtr(E))
- return false;
- }
- } else if (D->isRecord()) {
- if (!this->visitZeroRecordInitializer(D->ElemRecord, E))
- return false;
- } else {
- assert(false);
- }
-
- if (!this->emitPopPtr(E))
- return false;
- }
-
- for (const Record::Base &B : R->bases()) {
- if (!this->emitGetPtrBase(B.Offset, E))
- return false;
- if (!this->visitZeroRecordInitializer(B.R, E))
- return false;
- if (!this->emitInitPtrPop(E))
- return false;
- }
-
- // FIXME: Virtual bases.
-
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::dereference(
- const Expr *LV, DerefKind AK, llvm::function_ref<bool(PrimType)> Direct,
- llvm::function_ref<bool(PrimType)> Indirect) {
- if (std::optional<PrimType> T = classify(LV->getType())) {
- if (!LV->refersToBitField()) {
- // Only primitive, non bit-field types can be dereferenced directly.
- if (const auto *DE = dyn_cast<DeclRefExpr>(LV)) {
- if (!DE->getDecl()->getType()->isReferenceType()) {
- if (const auto *PD = dyn_cast<ParmVarDecl>(DE->getDecl()))
- return dereferenceParam(LV, *T, PD, AK, Direct, Indirect);
- if (const auto *VD = dyn_cast<VarDecl>(DE->getDecl()))
- return dereferenceVar(LV, *T, VD, AK, Direct, Indirect);
- }
- }
- }
-
- if (!visit(LV))
- return false;
- return Indirect(*T);
- }
-
- if (LV->getType()->isAnyComplexType())
- return this->delegate(LV);
-
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::dereferenceParam(
- const Expr *LV, PrimType T, const ParmVarDecl *PD, DerefKind AK,
- llvm::function_ref<bool(PrimType)> Direct,
- llvm::function_ref<bool(PrimType)> Indirect) {
- auto It = this->Params.find(PD);
- if (It != this->Params.end()) {
- unsigned Idx = It->second.Offset;
- switch (AK) {
- case DerefKind::Read:
- return DiscardResult ? true : this->emitGetParam(T, Idx, LV);
-
- case DerefKind::Write:
- if (!Direct(T))
- return false;
- if (!this->emitSetParam(T, Idx, LV))
- return false;
- return DiscardResult ? true : this->emitGetPtrParam(Idx, LV);
-
- case DerefKind::ReadWrite:
- if (!this->emitGetParam(T, Idx, LV))
- return false;
- if (!Direct(T))
- return false;
- if (!this->emitSetParam(T, Idx, LV))
- return false;
- return DiscardResult ? true : this->emitGetPtrParam(Idx, LV);
- }
- return true;
- }
-
- // If the param is a pointer, we can dereference a dummy value.
- if (!DiscardResult && T == PT_Ptr && AK == DerefKind::Read) {
- if (auto Idx = P.getOrCreateDummy(PD))
- return this->emitGetPtrGlobal(*Idx, PD);
- return false;
- }
-
- // Value cannot be produced - try to emit pointer and do stuff with it.
- return visit(LV) && Indirect(T);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::dereferenceVar(
- const Expr *LV, PrimType T, const VarDecl *VD, DerefKind AK,
- llvm::function_ref<bool(PrimType)> Direct,
- llvm::function_ref<bool(PrimType)> Indirect) {
- auto It = Locals.find(VD);
- if (It != Locals.end()) {
- const auto &L = It->second;
- switch (AK) {
- case DerefKind::Read:
- if (!this->emitGetLocal(T, L.Offset, LV))
- return false;
- return DiscardResult ? this->emitPop(T, LV) : true;
-
- case DerefKind::Write:
- if (!Direct(T))
- return false;
- if (!this->emitSetLocal(T, L.Offset, LV))
- return false;
- return DiscardResult ? true : this->emitGetPtrLocal(L.Offset, LV);
-
- case DerefKind::ReadWrite:
- if (!this->emitGetLocal(T, L.Offset, LV))
- return false;
- if (!Direct(T))
- return false;
- if (!this->emitSetLocal(T, L.Offset, LV))
- return false;
- return DiscardResult ? true : this->emitGetPtrLocal(L.Offset, LV);
- }
- } else if (auto Idx = P.getGlobal(VD)) {
- switch (AK) {
- case DerefKind::Read:
- if (!this->emitGetGlobal(T, *Idx, LV))
- return false;
- return DiscardResult ? this->emitPop(T, LV) : true;
-
- case DerefKind::Write:
- if (!Direct(T))
- return false;
- if (!this->emitSetGlobal(T, *Idx, LV))
- return false;
- return DiscardResult ? true : this->emitGetPtrGlobal(*Idx, LV);
-
- case DerefKind::ReadWrite:
- if (!this->emitGetGlobal(T, *Idx, LV))
- return false;
- if (!Direct(T))
- return false;
- if (!this->emitSetGlobal(T, *Idx, LV))
- return false;
- return DiscardResult ? true : this->emitGetPtrGlobal(*Idx, LV);
- }
- }
-
- // If the declaration is a constant value, emit it here even
- // though the declaration was not evaluated in the current scope.
- // The access mode can only be read in this case.
- if (!DiscardResult && AK == DerefKind::Read) {
- if (VD->hasLocalStorage() && VD->hasInit() && !VD->isConstexpr()) {
- QualType VT = VD->getType();
- if (VT.isConstQualified() && VT->isFundamentalType())
- return this->visit(VD->getInit());
- }
- }
-
- // Value cannot be produced - try to emit pointer.
- return visit(LV) && Indirect(T);
-}
-
-template <class Emitter>
-template <typename T>
-bool ByteCodeExprGen<Emitter>::emitConst(T Value, PrimType Ty, const Expr *E) {
- switch (Ty) {
- case PT_Sint8:
- return this->emitConstSint8(Value, E);
- case PT_Uint8:
- return this->emitConstUint8(Value, E);
- case PT_Sint16:
- return this->emitConstSint16(Value, E);
- case PT_Uint16:
- return this->emitConstUint16(Value, E);
- case PT_Sint32:
- return this->emitConstSint32(Value, E);
- case PT_Uint32:
- return this->emitConstUint32(Value, E);
- case PT_Sint64:
- return this->emitConstSint64(Value, E);
- case PT_Uint64:
- return this->emitConstUint64(Value, E);
- case PT_IntAP:
- case PT_IntAPS:
- assert(false);
- return false;
- case PT_Bool:
- return this->emitConstBool(Value, E);
- case PT_Ptr:
- case PT_FnPtr:
- case PT_Float:
- llvm_unreachable("Invalid integral type");
- break;
- }
- llvm_unreachable("unknown primitive type");
-}
-
-template <class Emitter>
-template <typename T>
-bool ByteCodeExprGen<Emitter>::emitConst(T Value, const Expr *E) {
- return this->emitConst(Value, classifyPrim(E->getType()), E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::emitConst(const APSInt &Value, PrimType Ty,
- const Expr *E) {
- if (Value.isSigned())
- return this->emitConst(Value.getSExtValue(), Ty, E);
- return this->emitConst(Value.getZExtValue(), Ty, E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::emitConst(const APSInt &Value, const Expr *E) {
- return this->emitConst(Value, classifyPrim(E->getType()), E);
-}
-
-template <class Emitter>
-unsigned ByteCodeExprGen<Emitter>::allocateLocalPrimitive(DeclTy &&Src,
- PrimType Ty,
- bool IsConst,
- bool IsExtended) {
- // Make sure we don't accidentally register the same decl twice.
- if (const auto *VD =
- dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
- assert(!P.getGlobal(VD));
- assert(!Locals.contains(VD));
- }
-
- // FIXME: There are cases where Src.is<Expr*>() is wrong, e.g.
- // (int){12} in C. Consider using Expr::isTemporaryObject() instead
- // or isa<MaterializeTemporaryExpr>().
- Descriptor *D = P.createDescriptor(Src, Ty, Descriptor::InlineDescMD, IsConst,
- Src.is<const Expr *>());
- Scope::Local Local = this->createLocal(D);
- if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>()))
- Locals.insert({VD, Local});
- VarScope->add(Local, IsExtended);
- return Local.Offset;
-}
-
-template <class Emitter>
-std::optional<unsigned>
-ByteCodeExprGen<Emitter>::allocateLocal(DeclTy &&Src, bool IsExtended) {
- // Make sure we don't accidentally register the same decl twice.
- if ([[maybe_unused]] const auto *VD =
- dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
- assert(!P.getGlobal(VD));
- assert(!Locals.contains(VD));
- }
-
- QualType Ty;
- const ValueDecl *Key = nullptr;
- const Expr *Init = nullptr;
- bool IsTemporary = false;
- if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
- Key = VD;
- Ty = VD->getType();
-
- if (const auto *VarD = dyn_cast<VarDecl>(VD))
- Init = VarD->getInit();
- }
- if (auto *E = Src.dyn_cast<const Expr *>()) {
- IsTemporary = true;
- Ty = E->getType();
- }
-
- Descriptor *D = P.createDescriptor(
- Src, Ty.getTypePtr(), Descriptor::InlineDescMD, Ty.isConstQualified(),
- IsTemporary, /*IsMutable=*/false, Init);
- if (!D)
- return {};
-
- Scope::Local Local = this->createLocal(D);
- if (Key)
- Locals.insert({Key, Local});
- VarScope->add(Local, IsExtended);
- return Local.Offset;
-}
-
-template <class Emitter>
-const RecordType *ByteCodeExprGen<Emitter>::getRecordTy(QualType Ty) {
- if (const PointerType *PT = dyn_cast<PointerType>(Ty))
- return PT->getPointeeType()->getAs<RecordType>();
- return Ty->getAs<RecordType>();
-}
-
-template <class Emitter>
-Record *ByteCodeExprGen<Emitter>::getRecord(QualType Ty) {
- if (const auto *RecordTy = getRecordTy(Ty))
- return getRecord(RecordTy->getDecl());
- return nullptr;
-}
-
-template <class Emitter>
-Record *ByteCodeExprGen<Emitter>::getRecord(const RecordDecl *RD) {
- return P.getOrCreateRecord(RD);
-}
-
-template <class Emitter>
-const Function *ByteCodeExprGen<Emitter>::getFunction(const FunctionDecl *FD) {
- return Ctx.getOrCreateFunction(FD);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitExpr(const Expr *E) {
- ExprScope<Emitter> RootScope(this);
- // Void expressions.
- if (E->getType()->isVoidType()) {
- if (!visit(E))
- return false;
- return this->emitRetVoid(E);
- }
-
- // Expressions with a primitive return type.
- if (std::optional<PrimType> T = classify(E)) {
- if (!visit(E))
- return false;
- return this->emitRet(*T, E);
- }
-
- // Expressions with a composite return type.
- // For us, that means everything we don't
- // have a PrimType for.
- if (std::optional<unsigned> LocalOffset = this->allocateLocal(E)) {
- if (!this->visitLocalInitializer(E, *LocalOffset))
- return false;
-
- if (!this->emitGetPtrLocal(*LocalOffset, E))
- return false;
- return this->emitRetValue(E);
- }
-
- return false;
-}
-
-/// Toplevel visitDecl().
-/// We get here from evaluateAsInitializer().
-/// We need to evaluate the initializer and return its value.
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitDecl(const VarDecl *VD) {
- assert(!VD->isInvalidDecl() && "Trying to constant evaluate an invalid decl");
-
- // Create and initialize the variable.
- if (!this->visitVarDecl(VD))
- return false;
-
- std::optional<PrimType> VarT = classify(VD->getType());
- // Get a pointer to the variable
- if (Context::shouldBeGloballyIndexed(VD)) {
- auto GlobalIndex = P.getGlobal(VD);
- assert(GlobalIndex); // visitVarDecl() didn't return false.
- if (VarT) {
- if (!this->emitGetGlobalUnchecked(*VarT, *GlobalIndex, VD))
- return false;
- } else {
- if (!this->emitGetPtrGlobal(*GlobalIndex, VD))
- return false;
- }
- } else {
- auto Local = Locals.find(VD);
- assert(Local != Locals.end()); // Same here.
- if (VarT) {
- if (!this->emitGetLocal(*VarT, Local->second.Offset, VD))
- return false;
- } else {
- if (!this->emitGetPtrLocal(Local->second.Offset, VD))
- return false;
- }
- }
-
- // Return the value
- if (VarT)
- return this->emitRet(*VarT, VD);
-
- // Return non-primitive values as pointers here.
- return this->emitRet(PT_Ptr, VD);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitVarDecl(const VarDecl *VD) {
- // We don't know what to do with these, so just return false.
- if (VD->getType().isNull())
- return false;
-
- const Expr *Init = VD->getInit();
- std::optional<PrimType> VarT = classify(VD->getType());
-
- if (Context::shouldBeGloballyIndexed(VD)) {
- // We've already seen and initialized this global.
- if (P.getGlobal(VD))
- return true;
-
- std::optional<unsigned> GlobalIndex = P.createGlobal(VD, Init);
-
- if (!GlobalIndex)
- return false;
-
- assert(Init);
- {
- DeclScope<Emitter> LocalScope(this, VD);
-
- if (VarT) {
- if (!this->visit(Init))
- return false;
- return this->emitInitGlobal(*VarT, *GlobalIndex, VD);
- }
- return this->visitGlobalInitializer(Init, *GlobalIndex);
- }
- } else {
- VariableScope<Emitter> LocalScope(this);
- if (VarT) {
- unsigned Offset = this->allocateLocalPrimitive(
- VD, *VarT, VD->getType().isConstQualified());
- if (Init) {
- // Compile the initializer in its own scope.
- ExprScope<Emitter> Scope(this);
- if (!this->visit(Init))
- return false;
-
- return this->emitSetLocal(*VarT, Offset, VD);
- }
- } else {
- if (std::optional<unsigned> Offset = this->allocateLocal(VD)) {
- if (Init)
- return this->visitLocalInitializer(Init, *Offset);
- }
- }
- return true;
- }
-
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitAPValue(const APValue &Val,
- PrimType ValType, const Expr *E) {
- assert(!DiscardResult);
- if (Val.isInt())
- return this->emitConst(Val.getInt(), ValType, E);
-
- if (Val.isLValue()) {
- APValue::LValueBase Base = Val.getLValueBase();
- if (const Expr *BaseExpr = Base.dyn_cast<const Expr *>())
- return this->visit(BaseExpr);
- }
-
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitBuiltinCallExpr(const CallExpr *E) {
- const Function *Func = getFunction(E->getDirectCallee());
- if (!Func)
- return false;
-
- if (!Func->isUnevaluatedBuiltin()) {
- // Put arguments on the stack.
- for (const auto *Arg : E->arguments()) {
- if (!this->visit(Arg))
- return false;
- }
- }
-
- if (!this->emitCallBI(Func, E, E))
- return false;
-
- QualType ReturnType = E->getCallReturnType(Ctx.getASTContext());
- if (DiscardResult && !ReturnType->isVoidType()) {
- PrimType T = classifyPrim(ReturnType);
- return this->emitPop(T, E);
- }
-
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCallExpr(const CallExpr *E) {
- if (E->getBuiltinCallee())
- return VisitBuiltinCallExpr(E);
-
- QualType ReturnType = E->getCallReturnType(Ctx.getASTContext());
- std::optional<PrimType> T = classify(ReturnType);
- bool HasRVO = !ReturnType->isVoidType() && !T;
-
- if (HasRVO) {
- if (DiscardResult) {
- // If we need to discard the return value but the function returns its
- // value via an RVO pointer, we need to create one such pointer just
- // for this call.
- if (std::optional<unsigned> LocalIndex = allocateLocal(E)) {
- if (!this->emitGetPtrLocal(*LocalIndex, E))
- return false;
- }
- } else {
- assert(Initializing);
- if (!this->emitDupPtr(E))
- return false;
- }
- }
-
- // Add the (optional, implicit) This pointer.
- if (const auto *MC = dyn_cast<CXXMemberCallExpr>(E)) {
- if (!this->visit(MC->getImplicitObjectArgument()))
- return false;
- }
-
- // Put arguments on the stack.
- for (const auto *Arg : E->arguments()) {
- if (!this->visit(Arg))
- return false;
- }
-
- if (const FunctionDecl *FuncDecl = E->getDirectCallee()) {
- const Function *Func = getFunction(FuncDecl);
- if (!Func)
- return false;
- // If the function is being compiled right now, this is a recursive call.
- // In that case, the function can't be valid yet, even though it will be
- // later.
- // If the function is already fully compiled but not constexpr, it was
- // found to be faulty earlier on, so bail out.
- if (Func->isFullyCompiled() && !Func->isConstexpr())
- return false;
-
- assert(HasRVO == Func->hasRVO());
-
- bool HasQualifier = false;
- if (const auto *ME = dyn_cast<MemberExpr>(E->getCallee()))
- HasQualifier = ME->hasQualifier();
-
- bool IsVirtual = false;
- if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl))
- IsVirtual = MD->isVirtual();
-
- // In any case call the function. The return value will end up on the stack
- // and if the function has RVO, we already have the pointer on the stack to
- // write the result into.
- if (IsVirtual && !HasQualifier) {
- if (!this->emitCallVirt(Func, E))
- return false;
- } else {
- if (!this->emitCall(Func, E))
- return false;
- }
- } else {
- // Indirect call. Visit the callee, which will leave a FunctionPointer on
- // the stack. Cleanup of the returned value if necessary will be done after
- // the function call completed.
- if (!this->visit(E->getCallee()))
- return false;
-
- if (!this->emitCallPtr(E))
- return false;
- }
-
- // Cleanup for discarded return values.
- if (DiscardResult && !ReturnType->isVoidType() && T)
- return this->emitPop(*T, E);
-
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXDefaultInitExpr(
- const CXXDefaultInitExpr *E) {
- SourceLocScope<Emitter> SLS(this, E);
- if (Initializing)
- return this->visitInitializer(E->getExpr());
-
- assert(classify(E->getType()));
- return this->visit(E->getExpr());
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXDefaultArgExpr(
- const CXXDefaultArgExpr *E) {
- SourceLocScope<Emitter> SLS(this, E);
-
- const Expr *SubExpr = E->getExpr();
- if (std::optional<PrimType> T = classify(E->getExpr()))
- return this->visit(SubExpr);
-
- assert(Initializing);
- return this->visitInitializer(SubExpr);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXBoolLiteralExpr(
- const CXXBoolLiteralExpr *E) {
- if (DiscardResult)
- return true;
-
- return this->emitConstBool(E->getValue(), E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXNullPtrLiteralExpr(
- const CXXNullPtrLiteralExpr *E) {
- if (DiscardResult)
- return true;
-
- return this->emitNullPtr(E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitGNUNullExpr(const GNUNullExpr *E) {
- if (DiscardResult)
- return true;
-
- assert(E->getType()->isIntegerType());
-
- PrimType T = classifyPrim(E->getType());
- return this->emitZero(T, E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
- if (DiscardResult)
- return true;
-
- if (this->LambdaThisCapture > 0)
- return this->emitGetThisFieldPtr(this->LambdaThisCapture, E);
-
- return this->emitThis(E);
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
- const Expr *SubExpr = E->getSubExpr();
- std::optional<PrimType> T = classify(SubExpr->getType());
-
- switch (E->getOpcode()) {
- case UO_PostInc: { // x++
- if (!this->visit(SubExpr))
- return false;
-
- if (T == PT_Ptr) {
- if (!this->emitIncPtr(E))
- return false;
-
- return DiscardResult ? this->emitPopPtr(E) : true;
- }
-
- if (T == PT_Float) {
- return DiscardResult ? this->emitIncfPop(getRoundingMode(E), E)
- : this->emitIncf(getRoundingMode(E), E);
- }
-
- return DiscardResult ? this->emitIncPop(*T, E) : this->emitInc(*T, E);
- }
- case UO_PostDec: { // x--
- if (!this->visit(SubExpr))
- return false;
-
- if (T == PT_Ptr) {
- if (!this->emitDecPtr(E))
- return false;
-
- return DiscardResult ? this->emitPopPtr(E) : true;
- }
-
- if (T == PT_Float) {
- return DiscardResult ? this->emitDecfPop(getRoundingMode(E), E)
- : this->emitDecf(getRoundingMode(E), E);
- }
-
- return DiscardResult ? this->emitDecPop(*T, E) : this->emitDec(*T, E);
- }
- case UO_PreInc: { // ++x
- if (!this->visit(SubExpr))
- return false;
-
- if (T == PT_Ptr) {
- if (!this->emitLoadPtr(E))
- return false;
- if (!this->emitConstUint8(1, E))
- return false;
- if (!this->emitAddOffsetUint8(E))
- return false;
- return DiscardResult ? this->emitStorePopPtr(E) : this->emitStorePtr(E);
- }
-
- // Post-inc and pre-inc are the same if the value is to be discarded.
- if (DiscardResult) {
- if (T == PT_Float)
- return this->emitIncfPop(getRoundingMode(E), E);
- return this->emitIncPop(*T, E);
- }
-
- if (T == PT_Float) {
- const auto &TargetSemantics = Ctx.getFloatSemantics(E->getType());
- if (!this->emitLoadFloat(E))
- return false;
- if (!this->emitConstFloat(llvm::APFloat(TargetSemantics, 1), E))
- return false;
- if (!this->emitAddf(getRoundingMode(E), E))
- return false;
- return this->emitStoreFloat(E);
- }
- if (!this->emitLoad(*T, E))
- return false;
- if (!this->emitConst(1, E))
- return false;
- if (!this->emitAdd(*T, E))
- return false;
- return this->emitStore(*T, E);
- }
- case UO_PreDec: { // --x
- if (!this->visit(SubExpr))
- return false;
-
- if (T == PT_Ptr) {
- if (!this->emitLoadPtr(E))
- return false;
- if (!this->emitConstUint8(1, E))
- return false;
- if (!this->emitSubOffsetUint8(E))
- return false;
- return DiscardResult ? this->emitStorePopPtr(E) : this->emitStorePtr(E);
- }
-
- // Post-dec and pre-dec are the same if the value is to be discarded.
- if (DiscardResult) {
- if (T == PT_Float)
- return this->emitDecfPop(getRoundingMode(E), E);
- return this->emitDecPop(*T, E);
- }
-
- if (T == PT_Float) {
- const auto &TargetSemantics = Ctx.getFloatSemantics(E->getType());
- if (!this->emitLoadFloat(E))
- return false;
- if (!this->emitConstFloat(llvm::APFloat(TargetSemantics, 1), E))
- return false;
- if (!this->emitSubf(getRoundingMode(E), E))
- return false;
- return this->emitStoreFloat(E);
- }
- if (!this->emitLoad(*T, E))
- return false;
- if (!this->emitConst(1, E))
- return false;
- if (!this->emitSub(*T, E))
- return false;
- return this->emitStore(*T, E);
- }
- case UO_LNot: // !x
- if (DiscardResult)
- return this->discard(SubExpr);
-
- if (!this->visitBool(SubExpr))
- return false;
-
- if (!this->emitInvBool(E))
- return false;
-
- if (PrimType ET = classifyPrim(E->getType()); ET != PT_Bool)
- return this->emitCast(PT_Bool, ET, E);
- return true;
- case UO_Minus: // -x
- if (!this->visit(SubExpr))
- return false;
- return DiscardResult ? this->emitPop(*T, E) : this->emitNeg(*T, E);
- case UO_Plus: // +x
- if (!this->visit(SubExpr)) // noop
- return false;
- return DiscardResult ? this->emitPop(*T, E) : true;
- case UO_AddrOf: // &x
- // We should already have a pointer when we get here.
- return this->delegate(SubExpr);
- case UO_Deref: // *x
- return dereference(
- SubExpr, DerefKind::Read,
- [](PrimType) {
- llvm_unreachable("Dereferencing requires a pointer");
- return false;
- },
- [this, E](PrimType T) {
- return DiscardResult ? this->emitPop(T, E) : true;
- });
- case UO_Not: // ~x
- if (!this->visit(SubExpr))
- return false;
- return DiscardResult ? this->emitPop(*T, E) : this->emitComp(*T, E);
- case UO_Real: // __real x
- if (T)
- return this->delegate(SubExpr);
- return this->emitComplexReal(SubExpr);
- case UO_Imag: { // __imag x
- if (T) {
- if (!this->discard(SubExpr))
- return false;
- return this->visitZeroInitializer(*T, SubExpr->getType(), SubExpr);
- }
- if (!this->visit(SubExpr))
- return false;
- if (!this->emitConstUint8(1, E))
- return false;
- if (!this->emitArrayElemPtrPopUint8(E))
- return false;
-
- // Since our _Complex implementation does not map to a primitive type,
- // we sometimes have to do the lvalue-to-rvalue conversion here manually.
- if (!SubExpr->isLValue())
- return this->emitLoadPop(classifyPrim(E->getType()), E);
- return true;
- }
- case UO_Extension:
- return this->delegate(SubExpr);
- case UO_Coawait:
- assert(false && "Unhandled opcode");
- }
-
- return false;
-}
-
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
- if (DiscardResult)
- return true;
-
- const auto *D = E->getDecl();
-
- if (const auto *ECD = dyn_cast<EnumConstantDecl>(D)) {
- return this->emitConst(ECD->getInitVal(), E);
- } else if (const auto *BD = dyn_cast<BindingDecl>(D)) {
- return this->visit(BD->getBinding());
- } else if (const auto *FuncDecl = dyn_cast<FunctionDecl>(D)) {
- const Function *F = getFunction(FuncDecl);
- return F && this->emitGetFnPtr(F, E);
- }
-
- // References are implemented via pointers, so when we see a DeclRefExpr
- // pointing to a reference, we need to get its value directly (i.e. the
- // pointer to the actual value) instead of a pointer to the pointer to the
- // value.
- bool IsReference = D->getType()->isReferenceType();
-
- // Check for local/global variables and parameters.
- if (auto It = Locals.find(D); It != Locals.end()) {
- const unsigned Offset = It->second.Offset;
-
- if (IsReference)
- return this->emitGetLocal(PT_Ptr, Offset, E);
- return this->emitGetPtrLocal(Offset, E);
- } else if (auto GlobalIndex = P.getGlobal(D)) {
- if (IsReference)
- return this->emitGetGlobalPtr(*GlobalIndex, E);
-
- return this->emitGetPtrGlobal(*GlobalIndex, E);
- } else if (const auto *PVD = dyn_cast<ParmVarDecl>(D)) {
- if (auto It = this->Params.find(PVD); It != this->Params.end()) {
- if (IsReference || !It->second.IsPtr)
- return this->emitGetParamPtr(It->second.Offset, E);
-
- return this->emitGetPtrParam(It->second.Offset, E);
- }
- }
-
- // Handle lambda captures.
- if (auto It = this->LambdaCaptures.find(D);
- It != this->LambdaCaptures.end()) {
- auto [Offset, IsPtr] = It->second;
-
- if (IsPtr)
- return this->emitGetThisFieldPtr(Offset, E);
- return this->emitGetPtrThisField(Offset, E);
- }
-
- // Lazily visit global declarations we haven't seen yet.
- // This happens in C.
- if (!Ctx.getLangOpts().CPlusPlus) {
- if (const auto *VD = dyn_cast<VarDecl>(D);
- VD && VD->hasGlobalStorage() && VD->getAnyInitializer() &&
- VD->getType().isConstQualified()) {
- if (!this->visitVarDecl(VD))
- return false;
- // Retry.
- return this->VisitDeclRefExpr(E);
- }
-
- if (std::optional<unsigned> I = P.getOrCreateDummy(D))
- return this->emitGetPtrGlobal(*I, E);
- }
-
- return this->emitInvalidDeclRef(E, E);
-}
-
-template <class Emitter>
-void ByteCodeExprGen<Emitter>::emitCleanup() {
- for (VariableScope<Emitter> *C = VarScope; C; C = C->getParent())
- C->emitDestruction();
-}
-
-template <class Emitter>
-unsigned
-ByteCodeExprGen<Emitter>::collectBaseOffset(const RecordType *BaseType,
- const RecordType *DerivedType) {
- const auto *FinalDecl = cast<CXXRecordDecl>(BaseType->getDecl());
- const RecordDecl *CurDecl = DerivedType->getDecl();
- const Record *CurRecord = getRecord(CurDecl);
- assert(CurDecl && FinalDecl);
-
- unsigned OffsetSum = 0;
- for (;;) {
- assert(CurRecord->getNumBases() > 0);
- // One level up
- for (const Record::Base &B : CurRecord->bases()) {
- const auto *BaseDecl = cast<CXXRecordDecl>(B.Decl);
-
- if (BaseDecl == FinalDecl || BaseDecl->isDerivedFrom(FinalDecl)) {
- OffsetSum += B.Offset;
- CurRecord = B.R;
- CurDecl = BaseDecl;
- break;
- }
- }
- if (CurDecl == FinalDecl)
- break;
- }
-
- assert(OffsetSum > 0);
- return OffsetSum;
-}
-
-/// Emit casts from a PrimType to another PrimType.
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::emitPrimCast(PrimType FromT, PrimType ToT,
- QualType ToQT, const Expr *E) {
-
- if (FromT == PT_Float) {
- // Floating to floating.
- if (ToT == PT_Float) {
- const llvm::fltSemantics *ToSem = &Ctx.getFloatSemantics(ToQT);
- return this->emitCastFP(ToSem, getRoundingMode(E), E);
- }
-
- // Float to integral.
- if (isIntegralType(ToT) || ToT == PT_Bool)
- return this->emitCastFloatingIntegral(ToT, E);
- }
-
- if (isIntegralType(FromT) || FromT == PT_Bool) {
- // Integral to integral.
- if (isIntegralType(ToT) || ToT == PT_Bool)
- return FromT != ToT ? this->emitCast(FromT, ToT, E) : true;
-
- if (ToT == PT_Float) {
- // Integral to floating.
- const llvm::fltSemantics *ToSem = &Ctx.getFloatSemantics(ToQT);
- return this->emitCastIntegralFloating(FromT, ToSem, getRoundingMode(E),
- E);
- }
- }
-
- return false;
-}
-
-/// Emits __real(SubExpr)
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::emitComplexReal(const Expr *SubExpr) {
- assert(SubExpr->getType()->isAnyComplexType());
-
- if (DiscardResult)
- return this->discard(SubExpr);
-
- if (!this->visit(SubExpr))
- return false;
- if (!this->emitConstUint8(0, SubExpr))
- return false;
- if (!this->emitArrayElemPtrPopUint8(SubExpr))
- return false;
-
- // Since our _Complex implementation does not map to a primitive type,
- // we sometimes have to do the lvalue-to-rvalue conversion here manually.
- if (!SubExpr->isLValue())
- return this->emitLoadPop(*classifyComplexElementType(SubExpr->getType()),
- SubExpr);
- return true;
-}
-
-/// When calling this, we have a pointer of the local-to-destroy
-/// on the stack.
-/// Emit destruction of record types (or arrays of record types).
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::emitRecordDestruction(const Descriptor *Desc) {
- assert(Desc);
- assert(!Desc->isPrimitive());
- assert(!Desc->isPrimitiveArray());
-
- // Arrays.
- if (Desc->isArray()) {
- const Descriptor *ElemDesc = Desc->ElemDesc;
- assert(ElemDesc);
-
- // Don't need to do anything for these.
- if (ElemDesc->isPrimitiveArray())
- return this->emitPopPtr(SourceInfo{});
-
- // If this is an array of record types, check if we need
- // to call the element destructors at all. If not, try
- // to save the work.
- if (const Record *ElemRecord = ElemDesc->ElemRecord) {
- if (const CXXDestructorDecl *Dtor = ElemRecord->getDestructor();
- !Dtor || Dtor->isTrivial())
- return this->emitPopPtr(SourceInfo{});
- }
-
- for (ssize_t I = Desc->getNumElems() - 1; I >= 0; --I) {
- if (!this->emitConstUint64(I, SourceInfo{}))
- return false;
- if (!this->emitArrayElemPtrUint64(SourceInfo{}))
- return false;
- if (!this->emitRecordDestruction(ElemDesc))
- return false;
- }
- return this->emitPopPtr(SourceInfo{});
- }
-
- const Record *R = Desc->ElemRecord;
- assert(R);
- // First, destroy all fields.
- for (const Record::Field &Field : llvm::reverse(R->fields())) {
- const Descriptor *D = Field.Desc;
- if (!D->isPrimitive() && !D->isPrimitiveArray()) {
- if (!this->emitDupPtr(SourceInfo{}))
- return false;
- if (!this->emitGetPtrField(Field.Offset, SourceInfo{}))
- return false;
- if (!this->emitRecordDestruction(D))
- return false;
- }
- }
-
- // FIXME: Unions need to be handled differently here. We don't want to
- // call the destructor of its members.
-
- // Now emit the destructor and recurse into base classes.
- if (const CXXDestructorDecl *Dtor = R->getDestructor();
- Dtor && !Dtor->isTrivial()) {
- if (const Function *DtorFunc = getFunction(Dtor)) {
- assert(DtorFunc->hasThisPointer());
- assert(DtorFunc->getNumParams() == 1);
- if (!this->emitDupPtr(SourceInfo{}))
- return false;
- if (!this->emitCall(DtorFunc, SourceInfo{}))
- return false;
- }
- }
-
- for (const Record::Base &Base : llvm::reverse(R->bases())) {
- if (!this->emitGetPtrBase(Base.Offset, SourceInfo{}))
- return false;
- if (!this->emitRecordDestruction(Base.Desc))
- return false;
- }
- // FIXME: Virtual bases.
-
- // Remove the instance pointer.
- return this->emitPopPtr(SourceInfo{});
-}
-
-namespace clang {
-namespace interp {
-
-template class ByteCodeExprGen<ByteCodeEmitter>;
-template class ByteCodeExprGen<EvalEmitter>;
-
-} // namespace interp
-} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h
deleted file mode 100644
index af464b5ed4ab..000000000000
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h
+++ /dev/null
@@ -1,46 +0,0 @@
-//===--- ByteCodeGenError.h - Byte code generation error ----------*- C -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_AST_INTERP_BYTECODEGENERROR_H
-#define LLVM_CLANG_AST_INTERP_BYTECODEGENERROR_H
-
-#include "clang/AST/Decl.h"
-#include "clang/AST/Stmt.h"
-#include "clang/Basic/SourceLocation.h"
-#include "llvm/Support/Error.h"
-
-namespace clang {
-namespace interp {
-
-/// Error thrown by the compiler.
-struct ByteCodeGenError : public llvm::ErrorInfo<ByteCodeGenError> {
-public:
- ByteCodeGenError(SourceRange Range) : Range(Range) {}
- ByteCodeGenError(const Stmt *S) : ByteCodeGenError(S->getSourceRange()) {}
- ByteCodeGenError(const Decl *D) : ByteCodeGenError(D->getSourceRange()) {}
-
- void log(raw_ostream &OS) const override { OS << "unimplemented feature"; }
-
- const SourceRange &getRange() const { return Range; }
-
- static char ID;
-
-private:
- // Range of the item where the error occurred.
- SourceRange Range;
-
- // Users are not expected to use error_code.
- std::error_code convertToErrorCode() const override {
- return llvm::inconvertibleErrorCode();
- }
-};
-
-} // namespace interp
-} // namespace clang
-
-#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp
deleted file mode 100644
index a2d8c4e13010..000000000000
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp
+++ /dev/null
@@ -1,680 +0,0 @@
-//===--- ByteCodeStmtGen.cpp - Code generator for expressions ---*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "ByteCodeStmtGen.h"
-#include "ByteCodeEmitter.h"
-#include "ByteCodeGenError.h"
-#include "Context.h"
-#include "Function.h"
-#include "PrimType.h"
-
-using namespace clang;
-using namespace clang::interp;
-
-namespace clang {
-namespace interp {
-
-/// Scope managing label targets.
-template <class Emitter> class LabelScope {
-public:
- virtual ~LabelScope() { }
-
-protected:
- LabelScope(ByteCodeStmtGen<Emitter> *Ctx) : Ctx(Ctx) {}
- /// ByteCodeStmtGen instance.
- ByteCodeStmtGen<Emitter> *Ctx;
-};
-
-/// Sets the context for break/continue statements.
-template <class Emitter> class LoopScope final : public LabelScope<Emitter> {
-public:
- using LabelTy = typename ByteCodeStmtGen<Emitter>::LabelTy;
- using OptLabelTy = typename ByteCodeStmtGen<Emitter>::OptLabelTy;
-
- LoopScope(ByteCodeStmtGen<Emitter> *Ctx, LabelTy BreakLabel,
- LabelTy ContinueLabel)
- : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel),
- OldContinueLabel(Ctx->ContinueLabel) {
- this->Ctx->BreakLabel = BreakLabel;
- this->Ctx->ContinueLabel = ContinueLabel;
- }
-
- ~LoopScope() {
- this->Ctx->BreakLabel = OldBreakLabel;
- this->Ctx->ContinueLabel = OldContinueLabel;
- }
-
-private:
- OptLabelTy OldBreakLabel;
- OptLabelTy OldContinueLabel;
-};
-
-// Sets the context for a switch scope, mapping labels.
-template <class Emitter> class SwitchScope final : public LabelScope<Emitter> {
-public:
- using LabelTy = typename ByteCodeStmtGen<Emitter>::LabelTy;
- using OptLabelTy = typename ByteCodeStmtGen<Emitter>::OptLabelTy;
- using CaseMap = typename ByteCodeStmtGen<Emitter>::CaseMap;
-
- SwitchScope(ByteCodeStmtGen<Emitter> *Ctx, CaseMap &&CaseLabels,
- LabelTy BreakLabel, OptLabelTy DefaultLabel)
- : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel),
- OldDefaultLabel(this->Ctx->DefaultLabel),
- OldCaseLabels(std::move(this->Ctx->CaseLabels)) {
- this->Ctx->BreakLabel = BreakLabel;
- this->Ctx->DefaultLabel = DefaultLabel;
- this->Ctx->CaseLabels = std::move(CaseLabels);
- }
-
- ~SwitchScope() {
- this->Ctx->BreakLabel = OldBreakLabel;
- this->Ctx->DefaultLabel = OldDefaultLabel;
- this->Ctx->CaseLabels = std::move(OldCaseLabels);
- }
-
-private:
- OptLabelTy OldBreakLabel;
- OptLabelTy OldDefaultLabel;
- CaseMap OldCaseLabels;
-};
-
-} // namespace interp
-} // namespace clang
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::emitLambdaStaticInvokerBody(
- const CXXMethodDecl *MD) {
- assert(MD->isLambdaStaticInvoker());
- assert(MD->hasBody());
- assert(cast<CompoundStmt>(MD->getBody())->body_empty());
-
- const CXXRecordDecl *ClosureClass = MD->getParent();
- const CXXMethodDecl *LambdaCallOp = ClosureClass->getLambdaCallOperator();
- assert(ClosureClass->captures_begin() == ClosureClass->captures_end());
- const Function *Func = this->getFunction(LambdaCallOp);
- if (!Func)
- return false;
- assert(Func->hasThisPointer());
- assert(Func->getNumParams() == (MD->getNumParams() + 1 + Func->hasRVO()));
-
- if (Func->hasRVO()) {
- if (!this->emitRVOPtr(MD))
- return false;
- }
-
- // The lambda call operator needs an instance pointer, but we don't have
- // one here, and we don't need one either because the lambda cannot have
- // any captures, as verified above. Emit a null pointer. This is then
- // special-cased when interpreting to not emit any misleading diagnostics.
- if (!this->emitNullPtr(MD))
- return false;
-
- // Forward all arguments from the static invoker to the lambda call operator.
- for (const ParmVarDecl *PVD : MD->parameters()) {
- auto It = this->Params.find(PVD);
- assert(It != this->Params.end());
-
- // We do the lvalue-to-rvalue conversion manually here, so no need
- // to care about references.
- PrimType ParamType = this->classify(PVD->getType()).value_or(PT_Ptr);
- if (!this->emitGetParam(ParamType, It->second.Offset, MD))
- return false;
- }
-
- if (!this->emitCall(Func, LambdaCallOp))
- return false;
-
- this->emitCleanup();
- if (ReturnType)
- return this->emitRet(*ReturnType, MD);
-
- // Nothing to do, since we emitted the RVO pointer above.
- return this->emitRetVoid(MD);
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitFunc(const FunctionDecl *F) {
- // Classify the return type.
- ReturnType = this->classify(F->getReturnType());
-
- auto emitFieldInitializer = [&](const Record::Field *F, unsigned FieldOffset,
- const Expr *InitExpr) -> bool {
- if (std::optional<PrimType> T = this->classify(InitExpr)) {
- if (!this->visit(InitExpr))
- return false;
-
- if (F->isBitField())
- return this->emitInitThisBitField(*T, F, FieldOffset, InitExpr);
- return this->emitInitThisField(*T, FieldOffset, InitExpr);
- }
- // Non-primitive case. Get a pointer to the field-to-initialize
- // on the stack and call visitInitialzer() for it.
- if (!this->emitGetPtrThisField(FieldOffset, InitExpr))
- return false;
-
- if (!this->visitInitializer(InitExpr))
- return false;
-
- return this->emitPopPtr(InitExpr);
- };
-
- // Emit custom code if this is a lambda static invoker.
- if (const auto *MD = dyn_cast<CXXMethodDecl>(F);
- MD && MD->isLambdaStaticInvoker())
- return this->emitLambdaStaticInvokerBody(MD);
-
- // Constructor. Set up field initializers.
- if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(F)) {
- const RecordDecl *RD = Ctor->getParent();
- const Record *R = this->getRecord(RD);
- if (!R)
- return false;
-
- for (const auto *Init : Ctor->inits()) {
- // Scope needed for the initializers.
- BlockScope<Emitter> Scope(this);
-
- const Expr *InitExpr = Init->getInit();
- if (const FieldDecl *Member = Init->getMember()) {
- const Record::Field *F = R->getField(Member);
-
- if (!emitFieldInitializer(F, F->Offset, InitExpr))
- return false;
- } else if (const Type *Base = Init->getBaseClass()) {
- // Base class initializer.
- // Get This Base and call initializer on it.
- const auto *BaseDecl = Base->getAsCXXRecordDecl();
- assert(BaseDecl);
- const Record::Base *B = R->getBase(BaseDecl);
- assert(B);
- if (!this->emitGetPtrThisBase(B->Offset, InitExpr))
- return false;
- if (!this->visitInitializer(InitExpr))
- return false;
- if (!this->emitInitPtrPop(InitExpr))
- return false;
- } else if (const IndirectFieldDecl *IFD = Init->getIndirectMember()) {
- assert(IFD->getChainingSize() >= 2);
-
- unsigned NestedFieldOffset = 0;
- const Record::Field *NestedField = nullptr;
- for (const NamedDecl *ND : IFD->chain()) {
- const auto *FD = cast<FieldDecl>(ND);
- const Record *FieldRecord =
- this->P.getOrCreateRecord(FD->getParent());
- assert(FieldRecord);
-
- NestedField = FieldRecord->getField(FD);
- assert(NestedField);
-
- NestedFieldOffset += NestedField->Offset;
- }
- assert(NestedField);
-
- if (!emitFieldInitializer(NestedField, NestedFieldOffset, InitExpr))
- return false;
- } else {
- assert(Init->isDelegatingInitializer());
- if (!this->emitThis(InitExpr))
- return false;
- if (!this->visitInitializer(Init->getInit()))
- return false;
- if (!this->emitPopPtr(InitExpr))
- return false;
- }
- }
- }
-
- if (const auto *Body = F->getBody())
- if (!visitStmt(Body))
- return false;
-
- // Emit a guard return to protect against a code path missing one.
- if (F->getReturnType()->isVoidType())
- return this->emitRetVoid(SourceInfo{});
- else
- return this->emitNoRet(SourceInfo{});
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitStmt(const Stmt *S) {
- switch (S->getStmtClass()) {
- case Stmt::CompoundStmtClass:
- return visitCompoundStmt(cast<CompoundStmt>(S));
- case Stmt::DeclStmtClass:
- return visitDeclStmt(cast<DeclStmt>(S));
- case Stmt::ReturnStmtClass:
- return visitReturnStmt(cast<ReturnStmt>(S));
- case Stmt::IfStmtClass:
- return visitIfStmt(cast<IfStmt>(S));
- case Stmt::WhileStmtClass:
- return visitWhileStmt(cast<WhileStmt>(S));
- case Stmt::DoStmtClass:
- return visitDoStmt(cast<DoStmt>(S));
- case Stmt::ForStmtClass:
- return visitForStmt(cast<ForStmt>(S));
- case Stmt::CXXForRangeStmtClass:
- return visitCXXForRangeStmt(cast<CXXForRangeStmt>(S));
- case Stmt::BreakStmtClass:
- return visitBreakStmt(cast<BreakStmt>(S));
- case Stmt::ContinueStmtClass:
- return visitContinueStmt(cast<ContinueStmt>(S));
- case Stmt::SwitchStmtClass:
- return visitSwitchStmt(cast<SwitchStmt>(S));
- case Stmt::CaseStmtClass:
- return visitCaseStmt(cast<CaseStmt>(S));
- case Stmt::DefaultStmtClass:
- return visitDefaultStmt(cast<DefaultStmt>(S));
- case Stmt::GCCAsmStmtClass:
- case Stmt::MSAsmStmtClass:
- return visitAsmStmt(cast<AsmStmt>(S));
- case Stmt::AttributedStmtClass:
- return visitAttributedStmt(cast<AttributedStmt>(S));
- case Stmt::CXXTryStmtClass:
- return visitCXXTryStmt(cast<CXXTryStmt>(S));
- case Stmt::NullStmtClass:
- return true;
- default: {
- if (auto *Exp = dyn_cast<Expr>(S))
- return this->discard(Exp);
- return false;
- }
- }
-}
-
-/// Visits the given statment without creating a variable
-/// scope for it in case it is a compound statement.
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitLoopBody(const Stmt *S) {
- if (isa<NullStmt>(S))
- return true;
-
- if (const auto *CS = dyn_cast<CompoundStmt>(S)) {
- for (auto *InnerStmt : CS->body())
- if (!visitStmt(InnerStmt))
- return false;
- return true;
- }
-
- return this->visitStmt(S);
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitCompoundStmt(
- const CompoundStmt *CompoundStmt) {
- BlockScope<Emitter> Scope(this);
- for (auto *InnerStmt : CompoundStmt->body())
- if (!visitStmt(InnerStmt))
- return false;
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitDeclStmt(const DeclStmt *DS) {
- for (auto *D : DS->decls()) {
- if (isa<StaticAssertDecl, TagDecl, TypedefNameDecl>(D))
- continue;
-
- const auto *VD = dyn_cast<VarDecl>(D);
- if (!VD)
- return false;
- if (!this->visitVarDecl(VD))
- return false;
- }
-
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitReturnStmt(const ReturnStmt *RS) {
- if (const Expr *RE = RS->getRetValue()) {
- ExprScope<Emitter> RetScope(this);
- if (ReturnType) {
- // Primitive types are simply returned.
- if (!this->visit(RE))
- return false;
- this->emitCleanup();
- return this->emitRet(*ReturnType, RS);
- } else if (RE->getType()->isVoidType()) {
- if (!this->visit(RE))
- return false;
- } else {
- // RVO - construct the value in the return location.
- if (!this->emitRVOPtr(RE))
- return false;
- if (!this->visitInitializer(RE))
- return false;
- if (!this->emitPopPtr(RE))
- return false;
-
- this->emitCleanup();
- return this->emitRetVoid(RS);
- }
- }
-
- // Void return.
- this->emitCleanup();
- return this->emitRetVoid(RS);
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitIfStmt(const IfStmt *IS) {
- BlockScope<Emitter> IfScope(this);
-
- if (IS->isNonNegatedConsteval())
- return visitStmt(IS->getThen());
- if (IS->isNegatedConsteval())
- return IS->getElse() ? visitStmt(IS->getElse()) : true;
-
- if (auto *CondInit = IS->getInit())
- if (!visitStmt(CondInit))
- return false;
-
- if (const DeclStmt *CondDecl = IS->getConditionVariableDeclStmt())
- if (!visitDeclStmt(CondDecl))
- return false;
-
- if (!this->visitBool(IS->getCond()))
- return false;
-
- if (const Stmt *Else = IS->getElse()) {
- LabelTy LabelElse = this->getLabel();
- LabelTy LabelEnd = this->getLabel();
- if (!this->jumpFalse(LabelElse))
- return false;
- if (!visitStmt(IS->getThen()))
- return false;
- if (!this->jump(LabelEnd))
- return false;
- this->emitLabel(LabelElse);
- if (!visitStmt(Else))
- return false;
- this->emitLabel(LabelEnd);
- } else {
- LabelTy LabelEnd = this->getLabel();
- if (!this->jumpFalse(LabelEnd))
- return false;
- if (!visitStmt(IS->getThen()))
- return false;
- this->emitLabel(LabelEnd);
- }
-
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitWhileStmt(const WhileStmt *S) {
- const Expr *Cond = S->getCond();
- const Stmt *Body = S->getBody();
-
- LabelTy CondLabel = this->getLabel(); // Label before the condition.
- LabelTy EndLabel = this->getLabel(); // Label after the loop.
- LoopScope<Emitter> LS(this, EndLabel, CondLabel);
-
- this->emitLabel(CondLabel);
- if (!this->visitBool(Cond))
- return false;
- if (!this->jumpFalse(EndLabel))
- return false;
-
- LocalScope<Emitter> Scope(this);
- {
- DestructorScope<Emitter> DS(Scope);
- if (!this->visitLoopBody(Body))
- return false;
- }
-
- if (!this->jump(CondLabel))
- return false;
- this->emitLabel(EndLabel);
-
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitDoStmt(const DoStmt *S) {
- const Expr *Cond = S->getCond();
- const Stmt *Body = S->getBody();
-
- LabelTy StartLabel = this->getLabel();
- LabelTy EndLabel = this->getLabel();
- LabelTy CondLabel = this->getLabel();
- LoopScope<Emitter> LS(this, EndLabel, CondLabel);
- LocalScope<Emitter> Scope(this);
-
- this->emitLabel(StartLabel);
- {
- DestructorScope<Emitter> DS(Scope);
-
- if (!this->visitLoopBody(Body))
- return false;
- this->emitLabel(CondLabel);
- if (!this->visitBool(Cond))
- return false;
- }
- if (!this->jumpTrue(StartLabel))
- return false;
-
- this->emitLabel(EndLabel);
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitForStmt(const ForStmt *S) {
- // for (Init; Cond; Inc) { Body }
- const Stmt *Init = S->getInit();
- const Expr *Cond = S->getCond();
- const Expr *Inc = S->getInc();
- const Stmt *Body = S->getBody();
-
- LabelTy EndLabel = this->getLabel();
- LabelTy CondLabel = this->getLabel();
- LabelTy IncLabel = this->getLabel();
- LoopScope<Emitter> LS(this, EndLabel, IncLabel);
- LocalScope<Emitter> Scope(this);
-
- if (Init && !this->visitStmt(Init))
- return false;
- this->emitLabel(CondLabel);
- if (Cond) {
- if (!this->visitBool(Cond))
- return false;
- if (!this->jumpFalse(EndLabel))
- return false;
- }
-
- {
- DestructorScope<Emitter> DS(Scope);
-
- if (Body && !this->visitLoopBody(Body))
- return false;
- this->emitLabel(IncLabel);
- if (Inc && !this->discard(Inc))
- return false;
- }
-
- if (!this->jump(CondLabel))
- return false;
- this->emitLabel(EndLabel);
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitCXXForRangeStmt(const CXXForRangeStmt *S) {
- const Stmt *Init = S->getInit();
- const Expr *Cond = S->getCond();
- const Expr *Inc = S->getInc();
- const Stmt *Body = S->getBody();
- const Stmt *BeginStmt = S->getBeginStmt();
- const Stmt *RangeStmt = S->getRangeStmt();
- const Stmt *EndStmt = S->getEndStmt();
- const VarDecl *LoopVar = S->getLoopVariable();
-
- LabelTy EndLabel = this->getLabel();
- LabelTy CondLabel = this->getLabel();
- LabelTy IncLabel = this->getLabel();
- LoopScope<Emitter> LS(this, EndLabel, IncLabel);
-
- // Emit declarations needed in the loop.
- if (Init && !this->visitStmt(Init))
- return false;
- if (!this->visitStmt(RangeStmt))
- return false;
- if (!this->visitStmt(BeginStmt))
- return false;
- if (!this->visitStmt(EndStmt))
- return false;
-
- // Now the condition as well as the loop variable assignment.
- this->emitLabel(CondLabel);
- if (!this->visitBool(Cond))
- return false;
- if (!this->jumpFalse(EndLabel))
- return false;
-
- if (!this->visitVarDecl(LoopVar))
- return false;
-
- // Body.
- LocalScope<Emitter> Scope(this);
- {
- DestructorScope<Emitter> DS(Scope);
-
- if (!this->visitLoopBody(Body))
- return false;
- this->emitLabel(IncLabel);
- if (!this->discard(Inc))
- return false;
- }
- if (!this->jump(CondLabel))
- return false;
-
- this->emitLabel(EndLabel);
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitBreakStmt(const BreakStmt *S) {
- if (!BreakLabel)
- return false;
-
- this->VarScope->emitDestructors();
- return this->jump(*BreakLabel);
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitContinueStmt(const ContinueStmt *S) {
- if (!ContinueLabel)
- return false;
-
- this->VarScope->emitDestructors();
- return this->jump(*ContinueLabel);
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitSwitchStmt(const SwitchStmt *S) {
- const Expr *Cond = S->getCond();
- PrimType CondT = this->classifyPrim(Cond->getType());
-
- LabelTy EndLabel = this->getLabel();
- OptLabelTy DefaultLabel = std::nullopt;
- unsigned CondVar = this->allocateLocalPrimitive(Cond, CondT, true, false);
-
- if (const auto *CondInit = S->getInit())
- if (!visitStmt(CondInit))
- return false;
-
- // Initialize condition variable.
- if (!this->visit(Cond))
- return false;
- if (!this->emitSetLocal(CondT, CondVar, S))
- return false;
-
- CaseMap CaseLabels;
- // Create labels and comparison ops for all case statements.
- for (const SwitchCase *SC = S->getSwitchCaseList(); SC;
- SC = SC->getNextSwitchCase()) {
- if (const auto *CS = dyn_cast<CaseStmt>(SC)) {
- // FIXME: Implement ranges.
- if (CS->caseStmtIsGNURange())
- return false;
- CaseLabels[SC] = this->getLabel();
-
- const Expr *Value = CS->getLHS();
- PrimType ValueT = this->classifyPrim(Value->getType());
-
- // Compare the case statement's value to the switch condition.
- if (!this->emitGetLocal(CondT, CondVar, CS))
- return false;
- if (!this->visit(Value))
- return false;
-
- // Compare and jump to the case label.
- if (!this->emitEQ(ValueT, S))
- return false;
- if (!this->jumpTrue(CaseLabels[CS]))
- return false;
- } else {
- assert(!DefaultLabel);
- DefaultLabel = this->getLabel();
- }
- }
-
- // If none of the conditions above were true, fall through to the default
- // statement or jump after the switch statement.
- if (DefaultLabel) {
- if (!this->jump(*DefaultLabel))
- return false;
- } else {
- if (!this->jump(EndLabel))
- return false;
- }
-
- SwitchScope<Emitter> SS(this, std::move(CaseLabels), EndLabel, DefaultLabel);
- if (!this->visitStmt(S->getBody()))
- return false;
- this->emitLabel(EndLabel);
- return true;
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitCaseStmt(const CaseStmt *S) {
- this->emitLabel(CaseLabels[S]);
- return this->visitStmt(S->getSubStmt());
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitDefaultStmt(const DefaultStmt *S) {
- this->emitLabel(*DefaultLabel);
- return this->visitStmt(S->getSubStmt());
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitAsmStmt(const AsmStmt *S) {
- return this->emitInvalid(S);
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitAttributedStmt(const AttributedStmt *S) {
- // Ignore all attributes.
- return this->visitStmt(S->getSubStmt());
-}
-
-template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitCXXTryStmt(const CXXTryStmt *S) {
- // Ignore all handlers.
- return this->visitStmt(S->getTryBlock());
-}
-
-namespace clang {
-namespace interp {
-
-template class ByteCodeStmtGen<ByteCodeEmitter>;
-
-} // namespace interp
-} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h
deleted file mode 100644
index 64e03587ab21..000000000000
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h
+++ /dev/null
@@ -1,91 +0,0 @@
-//===--- ByteCodeStmtGen.h - Code generator for expressions -----*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Defines the constexpr bytecode compiler.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_AST_INTERP_BYTECODESTMTGEN_H
-#define LLVM_CLANG_AST_INTERP_BYTECODESTMTGEN_H
-
-#include "ByteCodeEmitter.h"
-#include "ByteCodeExprGen.h"
-#include "EvalEmitter.h"
-#include "PrimType.h"
-#include "clang/AST/StmtVisitor.h"
-
-namespace clang {
-namespace interp {
-
-template <class Emitter> class LoopScope;
-template <class Emitter> class SwitchScope;
-template <class Emitter> class LabelScope;
-
-/// Compilation context for statements.
-template <class Emitter>
-class ByteCodeStmtGen final : public ByteCodeExprGen<Emitter> {
- using LabelTy = typename Emitter::LabelTy;
- using AddrTy = typename Emitter::AddrTy;
- using OptLabelTy = std::optional<LabelTy>;
- using CaseMap = llvm::DenseMap<const SwitchCase *, LabelTy>;
-
-public:
- template<typename... Tys>
- ByteCodeStmtGen(Tys&&... Args)
- : ByteCodeExprGen<Emitter>(std::forward<Tys>(Args)...) {}
-
-protected:
- bool visitFunc(const FunctionDecl *F) override;
-
-private:
- friend class LabelScope<Emitter>;
- friend class LoopScope<Emitter>;
- friend class SwitchScope<Emitter>;
-
- // Statement visitors.
- bool visitStmt(const Stmt *S);
- bool visitCompoundStmt(const CompoundStmt *S);
- bool visitLoopBody(const Stmt *S);
- bool visitDeclStmt(const DeclStmt *DS);
- bool visitReturnStmt(const ReturnStmt *RS);
- bool visitIfStmt(const IfStmt *IS);
- bool visitWhileStmt(const WhileStmt *S);
- bool visitDoStmt(const DoStmt *S);
- bool visitForStmt(const ForStmt *S);
- bool visitCXXForRangeStmt(const CXXForRangeStmt *S);
- bool visitBreakStmt(const BreakStmt *S);
- bool visitContinueStmt(const ContinueStmt *S);
- bool visitSwitchStmt(const SwitchStmt *S);
- bool visitCaseStmt(const CaseStmt *S);
- bool visitDefaultStmt(const DefaultStmt *S);
- bool visitAsmStmt(const AsmStmt *S);
- bool visitAttributedStmt(const AttributedStmt *S);
- bool visitCXXTryStmt(const CXXTryStmt *S);
-
- bool emitLambdaStaticInvokerBody(const CXXMethodDecl *MD);
-
- /// Type of the expression returned by the function.
- std::optional<PrimType> ReturnType;
-
- /// Switch case mapping.
- CaseMap CaseLabels;
-
- /// Point to break to.
- OptLabelTy BreakLabel;
- /// Point to continue to.
- OptLabelTy ContinueLabel;
- /// Default case label.
- OptLabelTy DefaultLabel;
-};
-
-extern template class ByteCodeExprGen<EvalEmitter>;
-
-} // namespace interp
-} // namespace clang
-
-#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Compiler.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Compiler.cpp
new file mode 100644
index 000000000000..0fc93c14131e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Compiler.cpp
@@ -0,0 +1,5599 @@
+//===--- Compiler.cpp - Code generator for expressions ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Compiler.h"
+#include "ByteCodeEmitter.h"
+#include "Context.h"
+#include "Floating.h"
+#include "Function.h"
+#include "InterpShared.h"
+#include "PrimType.h"
+#include "Program.h"
+#include "clang/AST/Attr.h"
+
+using namespace clang;
+using namespace clang::interp;
+
+using APSInt = llvm::APSInt;
+
+namespace clang {
+namespace interp {
+
+/// Scope used to handle temporaries in toplevel variable declarations.
+template <class Emitter> class DeclScope final : public LocalScope<Emitter> {
+public:
+ DeclScope(Compiler<Emitter> *Ctx, const ValueDecl *VD)
+ : LocalScope<Emitter>(Ctx, VD), Scope(Ctx->P, VD),
+ OldGlobalDecl(Ctx->GlobalDecl),
+ OldInitializingDecl(Ctx->InitializingDecl) {
+ Ctx->GlobalDecl = Context::shouldBeGloballyIndexed(VD);
+ Ctx->InitializingDecl = VD;
+ Ctx->InitStack.push_back(InitLink::Decl(VD));
+ }
+
+ void addExtended(const Scope::Local &Local) override {
+ return this->addLocal(Local);
+ }
+
+ ~DeclScope() {
+ this->Ctx->GlobalDecl = OldGlobalDecl;
+ this->Ctx->InitializingDecl = OldInitializingDecl;
+ this->Ctx->InitStack.pop_back();
+ }
+
+private:
+ Program::DeclScope Scope;
+ bool OldGlobalDecl;
+ const ValueDecl *OldInitializingDecl;
+};
+
+/// Scope used to handle initialization methods.
+template <class Emitter> class OptionScope final {
+public:
+ /// Root constructor, compiling or discarding primitives.
+ OptionScope(Compiler<Emitter> *Ctx, bool NewDiscardResult,
+ bool NewInitializing)
+ : Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult),
+ OldInitializing(Ctx->Initializing) {
+ Ctx->DiscardResult = NewDiscardResult;
+ Ctx->Initializing = NewInitializing;
+ }
+
+ ~OptionScope() {
+ Ctx->DiscardResult = OldDiscardResult;
+ Ctx->Initializing = OldInitializing;
+ }
+
+private:
+ /// Parent context.
+ Compiler<Emitter> *Ctx;
+ /// Old discard flag to restore.
+ bool OldDiscardResult;
+ bool OldInitializing;
+};
+
+template <class Emitter>
+bool InitLink::emit(Compiler<Emitter> *Ctx, const Expr *E) const {
+ switch (Kind) {
+ case K_This:
+ return Ctx->emitThis(E);
+ case K_Field:
+ // We're assuming there's a base pointer on the stack already.
+ return Ctx->emitGetPtrFieldPop(Offset, E);
+ case K_Temp:
+ return Ctx->emitGetPtrLocal(Offset, E);
+ case K_Decl:
+ return Ctx->visitDeclRef(D, E);
+ default:
+ llvm_unreachable("Unhandled InitLink kind");
+ }
+ return true;
+}
+
+/// Scope managing label targets.
+template <class Emitter> class LabelScope {
+public:
+ virtual ~LabelScope() {}
+
+protected:
+ LabelScope(Compiler<Emitter> *Ctx) : Ctx(Ctx) {}
+ /// Compiler instance.
+ Compiler<Emitter> *Ctx;
+};
+
+/// Sets the context for break/continue statements.
+template <class Emitter> class LoopScope final : public LabelScope<Emitter> {
+public:
+ using LabelTy = typename Compiler<Emitter>::LabelTy;
+ using OptLabelTy = typename Compiler<Emitter>::OptLabelTy;
+
+ LoopScope(Compiler<Emitter> *Ctx, LabelTy BreakLabel, LabelTy ContinueLabel)
+ : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel),
+ OldContinueLabel(Ctx->ContinueLabel) {
+ this->Ctx->BreakLabel = BreakLabel;
+ this->Ctx->ContinueLabel = ContinueLabel;
+ }
+
+ ~LoopScope() {
+ this->Ctx->BreakLabel = OldBreakLabel;
+ this->Ctx->ContinueLabel = OldContinueLabel;
+ }
+
+private:
+ OptLabelTy OldBreakLabel;
+ OptLabelTy OldContinueLabel;
+};
+
+// Sets the context for a switch scope, mapping labels.
+template <class Emitter> class SwitchScope final : public LabelScope<Emitter> {
+public:
+ using LabelTy = typename Compiler<Emitter>::LabelTy;
+ using OptLabelTy = typename Compiler<Emitter>::OptLabelTy;
+ using CaseMap = typename Compiler<Emitter>::CaseMap;
+
+ SwitchScope(Compiler<Emitter> *Ctx, CaseMap &&CaseLabels, LabelTy BreakLabel,
+ OptLabelTy DefaultLabel)
+ : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel),
+ OldDefaultLabel(this->Ctx->DefaultLabel),
+ OldCaseLabels(std::move(this->Ctx->CaseLabels)) {
+ this->Ctx->BreakLabel = BreakLabel;
+ this->Ctx->DefaultLabel = DefaultLabel;
+ this->Ctx->CaseLabels = std::move(CaseLabels);
+ }
+
+ ~SwitchScope() {
+ this->Ctx->BreakLabel = OldBreakLabel;
+ this->Ctx->DefaultLabel = OldDefaultLabel;
+ this->Ctx->CaseLabels = std::move(OldCaseLabels);
+ }
+
+private:
+ OptLabelTy OldBreakLabel;
+ OptLabelTy OldDefaultLabel;
+ CaseMap OldCaseLabels;
+};
+
+template <class Emitter> class StmtExprScope final {
+public:
+ StmtExprScope(Compiler<Emitter> *Ctx) : Ctx(Ctx), OldFlag(Ctx->InStmtExpr) {
+ Ctx->InStmtExpr = true;
+ }
+
+ ~StmtExprScope() { Ctx->InStmtExpr = OldFlag; }
+
+private:
+ Compiler<Emitter> *Ctx;
+ bool OldFlag;
+};
+
+} // namespace interp
+} // namespace clang
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
+ const Expr *SubExpr = CE->getSubExpr();
+ switch (CE->getCastKind()) {
+
+ case CK_LValueToRValue: {
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ std::optional<PrimType> SubExprT = classify(SubExpr->getType());
+ // Prepare storage for the result.
+ if (!Initializing && !SubExprT) {
+ std::optional<unsigned> LocalIndex = allocateLocal(SubExpr);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*LocalIndex, CE))
+ return false;
+ }
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (SubExprT)
+ return this->emitLoadPop(*SubExprT, CE);
+
+ // If the subexpr type is not primitive, we need to perform a copy here.
+ // This happens for example in C when dereferencing a pointer of struct
+ // type.
+ return this->emitMemcpy(CE);
+ }
+
+ case CK_DerivedToBaseMemberPointer: {
+ assert(classifyPrim(CE->getType()) == PT_MemberPtr);
+ assert(classifyPrim(SubExpr->getType()) == PT_MemberPtr);
+ const auto *FromMP = SubExpr->getType()->getAs<MemberPointerType>();
+ const auto *ToMP = CE->getType()->getAs<MemberPointerType>();
+
+ unsigned DerivedOffset = collectBaseOffset(QualType(ToMP->getClass(), 0),
+ QualType(FromMP->getClass(), 0));
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ return this->emitGetMemberPtrBasePop(DerivedOffset, CE);
+ }
+
+ case CK_BaseToDerivedMemberPointer: {
+ assert(classifyPrim(CE) == PT_MemberPtr);
+ assert(classifyPrim(SubExpr) == PT_MemberPtr);
+ const auto *FromMP = SubExpr->getType()->getAs<MemberPointerType>();
+ const auto *ToMP = CE->getType()->getAs<MemberPointerType>();
+
+ unsigned DerivedOffset = collectBaseOffset(QualType(FromMP->getClass(), 0),
+ QualType(ToMP->getClass(), 0));
+
+ if (!this->visit(SubExpr))
+ return false;
+ return this->emitGetMemberPtrBasePop(-DerivedOffset, CE);
+ }
+
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ if (!this->visit(SubExpr))
+ return false;
+
+ const auto extractRecordDecl = [](QualType Ty) -> const CXXRecordDecl * {
+ if (const auto *PT = dyn_cast<PointerType>(Ty))
+ return PT->getPointeeType()->getAsCXXRecordDecl();
+ return Ty->getAsCXXRecordDecl();
+ };
+
+ // FIXME: We can express a series of non-virtual casts as a single
+ // GetPtrBasePop op.
+ QualType CurType = SubExpr->getType();
+ for (const CXXBaseSpecifier *B : CE->path()) {
+ if (B->isVirtual()) {
+ if (!this->emitGetPtrVirtBasePop(extractRecordDecl(B->getType()), CE))
+ return false;
+ CurType = B->getType();
+ } else {
+ unsigned DerivedOffset = collectBaseOffset(B->getType(), CurType);
+ if (!this->emitGetPtrBasePop(DerivedOffset, CE))
+ return false;
+ CurType = B->getType();
+ }
+ }
+
+ return true;
+ }
+
+ case CK_BaseToDerived: {
+ if (!this->visit(SubExpr))
+ return false;
+
+ unsigned DerivedOffset =
+ collectBaseOffset(SubExpr->getType(), CE->getType());
+
+ return this->emitGetPtrDerivedPop(DerivedOffset, CE);
+ }
+
+ case CK_FloatingCast: {
+ // HLSL uses CK_FloatingCast to cast between vectors.
+ if (!SubExpr->getType()->isFloatingType() ||
+ !CE->getType()->isFloatingType())
+ return false;
+ if (DiscardResult)
+ return this->discard(SubExpr);
+ if (!this->visit(SubExpr))
+ return false;
+ const auto *TargetSemantics = &Ctx.getFloatSemantics(CE->getType());
+ return this->emitCastFP(TargetSemantics, getRoundingMode(CE), CE);
+ }
+
+ case CK_IntegralToFloating: {
+ if (DiscardResult)
+ return this->discard(SubExpr);
+ std::optional<PrimType> FromT = classify(SubExpr->getType());
+ if (!FromT)
+ return false;
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ const auto *TargetSemantics = &Ctx.getFloatSemantics(CE->getType());
+ llvm::RoundingMode RM = getRoundingMode(CE);
+ return this->emitCastIntegralFloating(*FromT, TargetSemantics, RM, CE);
+ }
+
+ case CK_FloatingToBoolean:
+ case CK_FloatingToIntegral: {
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ std::optional<PrimType> ToT = classify(CE->getType());
+
+ if (!ToT)
+ return false;
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (ToT == PT_IntAP)
+ return this->emitCastFloatingIntegralAP(Ctx.getBitWidth(CE->getType()),
+ CE);
+ if (ToT == PT_IntAPS)
+ return this->emitCastFloatingIntegralAPS(Ctx.getBitWidth(CE->getType()),
+ CE);
+
+ return this->emitCastFloatingIntegral(*ToT, CE);
+ }
+
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer: {
+ if (DiscardResult)
+ return true;
+
+ const Descriptor *Desc = nullptr;
+ const QualType PointeeType = CE->getType()->getPointeeType();
+ if (!PointeeType.isNull()) {
+ if (std::optional<PrimType> T = classify(PointeeType))
+ Desc = P.createDescriptor(SubExpr, *T);
+ }
+ return this->emitNull(classifyPrim(CE->getType()), Desc, CE);
+ }
+
+ case CK_PointerToIntegral: {
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ // If SubExpr doesn't result in a pointer, make it one.
+ if (PrimType FromT = classifyPrim(SubExpr->getType()); FromT != PT_Ptr) {
+ assert(isPtrType(FromT));
+ if (!this->emitDecayPtr(FromT, PT_Ptr, CE))
+ return false;
+ }
+
+ PrimType T = classifyPrim(CE->getType());
+ if (T == PT_IntAP)
+ return this->emitCastPointerIntegralAP(Ctx.getBitWidth(CE->getType()),
+ CE);
+ if (T == PT_IntAPS)
+ return this->emitCastPointerIntegralAPS(Ctx.getBitWidth(CE->getType()),
+ CE);
+ return this->emitCastPointerIntegral(T, CE);
+ }
+
+ case CK_ArrayToPointerDecay: {
+ if (!this->visit(SubExpr))
+ return false;
+ if (!this->emitArrayDecay(CE))
+ return false;
+ if (DiscardResult)
+ return this->emitPopPtr(CE);
+ return true;
+ }
+
+ case CK_IntegralToPointer: {
+ QualType IntType = SubExpr->getType();
+ assert(IntType->isIntegralOrEnumerationType());
+ if (!this->visit(SubExpr))
+ return false;
+ // FIXME: I think the discard is wrong since the int->ptr cast might cause a
+ // diagnostic.
+ PrimType T = classifyPrim(IntType);
+ if (DiscardResult)
+ return this->emitPop(T, CE);
+
+ QualType PtrType = CE->getType();
+ assert(PtrType->isPointerType());
+
+ const Descriptor *Desc;
+ if (std::optional<PrimType> T = classify(PtrType->getPointeeType()))
+ Desc = P.createDescriptor(SubExpr, *T);
+ else if (PtrType->getPointeeType()->isVoidType())
+ Desc = nullptr;
+ else
+ Desc = P.createDescriptor(CE, PtrType->getPointeeType().getTypePtr(),
+ Descriptor::InlineDescMD, true, false,
+ /*IsMutable=*/false, nullptr);
+
+ if (!this->emitGetIntPtr(T, Desc, CE))
+ return false;
+
+ PrimType DestPtrT = classifyPrim(PtrType);
+ if (DestPtrT == PT_Ptr)
+ return true;
+
+ // In case we're converting the integer to a non-Pointer.
+ return this->emitDecayPtr(PT_Ptr, DestPtrT, CE);
+ }
+
+ case CK_AtomicToNonAtomic:
+ case CK_ConstructorConversion:
+ case CK_FunctionToPointerDecay:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ case CK_AddressSpaceConversion:
+ return this->delegate(SubExpr);
+
+ case CK_BitCast: {
+ // Reject bitcasts to atomic types.
+ if (CE->getType()->isAtomicType()) {
+ if (!this->discard(SubExpr))
+ return false;
+ return this->emitInvalidCast(CastKind::Reinterpret, CE);
+ }
+
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ QualType SubExprTy = SubExpr->getType();
+ std::optional<PrimType> FromT = classify(SubExprTy);
+ std::optional<PrimType> ToT = classify(CE->getType());
+ if (!FromT || !ToT)
+ return false;
+
+ assert(isPtrType(*FromT));
+ assert(isPtrType(*ToT));
+ if (FromT == ToT) {
+ if (CE->getType()->isVoidPointerType())
+ return this->delegate(SubExpr);
+
+ if (!this->visit(SubExpr))
+ return false;
+ if (FromT == PT_Ptr)
+ return this->emitPtrPtrCast(SubExprTy->isVoidPointerType(), CE);
+ return true;
+ }
+
+ if (!this->visit(SubExpr))
+ return false;
+ return this->emitDecayPtr(*FromT, *ToT, CE);
+ }
+
+ case CK_IntegralToBoolean:
+ case CK_BooleanToSignedIntegral:
+ case CK_IntegralCast: {
+ if (DiscardResult)
+ return this->discard(SubExpr);
+ std::optional<PrimType> FromT = classify(SubExpr->getType());
+ std::optional<PrimType> ToT = classify(CE->getType());
+
+ if (!FromT || !ToT)
+ return false;
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ // Possibly diagnose casts to enum types if the target type does not
+ // have a fixed size.
+ if (Ctx.getLangOpts().CPlusPlus && CE->getType()->isEnumeralType()) {
+ if (const auto *ET = CE->getType().getCanonicalType()->getAs<EnumType>();
+ ET && !ET->getDecl()->isFixed()) {
+ if (!this->emitCheckEnumValue(*FromT, ET->getDecl(), CE))
+ return false;
+ }
+ }
+
+ if (ToT == PT_IntAP)
+ return this->emitCastAP(*FromT, Ctx.getBitWidth(CE->getType()), CE);
+ if (ToT == PT_IntAPS)
+ return this->emitCastAPS(*FromT, Ctx.getBitWidth(CE->getType()), CE);
+
+ if (FromT == ToT)
+ return true;
+ if (!this->emitCast(*FromT, *ToT, CE))
+ return false;
+
+ if (CE->getCastKind() == CK_BooleanToSignedIntegral)
+ return this->emitNeg(*ToT, CE);
+ return true;
+ }
+
+ case CK_PointerToBoolean:
+ case CK_MemberPointerToBoolean: {
+ PrimType PtrT = classifyPrim(SubExpr->getType());
+
+ // Just emit p != nullptr for this.
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (!this->emitNull(PtrT, nullptr, CE))
+ return false;
+
+ return this->emitNE(PtrT, CE);
+ }
+
+ case CK_IntegralComplexToBoolean:
+ case CK_FloatingComplexToBoolean: {
+ if (DiscardResult)
+ return this->discard(SubExpr);
+ if (!this->visit(SubExpr))
+ return false;
+ return this->emitComplexBoolCast(SubExpr);
+ }
+
+ case CK_IntegralComplexToReal:
+ case CK_FloatingComplexToReal:
+ return this->emitComplexReal(SubExpr);
+
+ case CK_IntegralRealToComplex:
+ case CK_FloatingRealToComplex: {
+ // We're creating a complex value here, so we need to
+ // allocate storage for it.
+ if (!Initializing) {
+ std::optional<unsigned> LocalIndex = allocateLocal(CE);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*LocalIndex, CE))
+ return false;
+ }
+
+ // Init the complex value to {SubExpr, 0}.
+ if (!this->visitArrayElemInit(0, SubExpr))
+ return false;
+ // Zero-init the second element.
+ PrimType T = classifyPrim(SubExpr->getType());
+ if (!this->visitZeroInitializer(T, SubExpr->getType(), SubExpr))
+ return false;
+ return this->emitInitElem(T, 1, SubExpr);
+ }
+
+ case CK_IntegralComplexCast:
+ case CK_FloatingComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_FloatingComplexToIntegralComplex: {
+ assert(CE->getType()->isAnyComplexType());
+ assert(SubExpr->getType()->isAnyComplexType());
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ if (!Initializing) {
+ std::optional<unsigned> LocalIndex = allocateLocal(CE);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*LocalIndex, CE))
+ return false;
+ }
+
+ // Location for the SubExpr.
+ // Since SubExpr is of complex type, visiting it results in a pointer
+ // anyway, so we just create a temporary pointer variable.
+ unsigned SubExprOffset = allocateLocalPrimitive(
+ SubExpr, PT_Ptr, /*IsConst=*/true, /*IsExtended=*/false);
+ if (!this->visit(SubExpr))
+ return false;
+ if (!this->emitSetLocal(PT_Ptr, SubExprOffset, CE))
+ return false;
+
+ PrimType SourceElemT = classifyComplexElementType(SubExpr->getType());
+ QualType DestElemType =
+ CE->getType()->getAs<ComplexType>()->getElementType();
+ PrimType DestElemT = classifyPrim(DestElemType);
+ // Cast both elements individually.
+ for (unsigned I = 0; I != 2; ++I) {
+ if (!this->emitGetLocal(PT_Ptr, SubExprOffset, CE))
+ return false;
+ if (!this->emitArrayElemPop(SourceElemT, I, CE))
+ return false;
+
+ // Do the cast.
+ if (!this->emitPrimCast(SourceElemT, DestElemT, DestElemType, CE))
+ return false;
+
+ // Save the value.
+ if (!this->emitInitElem(DestElemT, I, CE))
+ return false;
+ }
+ return true;
+ }
+
+ case CK_VectorSplat: {
+ assert(!classify(CE->getType()));
+ assert(classify(SubExpr->getType()));
+ assert(CE->getType()->isVectorType());
+
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ if (!Initializing) {
+ std::optional<unsigned> LocalIndex = allocateLocal(CE);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*LocalIndex, CE))
+ return false;
+ }
+
+ const auto *VT = CE->getType()->getAs<VectorType>();
+ PrimType ElemT = classifyPrim(SubExpr->getType());
+ unsigned ElemOffset = allocateLocalPrimitive(
+ SubExpr, ElemT, /*IsConst=*/true, /*IsExtended=*/false);
+
+ // Prepare a local variable for the scalar value.
+ if (!this->visit(SubExpr))
+ return false;
+ if (classifyPrim(SubExpr) == PT_Ptr && !this->emitLoadPop(ElemT, CE))
+ return false;
+
+ if (!this->emitSetLocal(ElemT, ElemOffset, CE))
+ return false;
+
+ for (unsigned I = 0; I != VT->getNumElements(); ++I) {
+ if (!this->emitGetLocal(ElemT, ElemOffset, CE))
+ return false;
+ if (!this->emitInitElem(ElemT, I, CE))
+ return false;
+ }
+
+ return true;
+ }
+
+ case CK_ToVoid:
+ return discard(SubExpr);
+
+ default:
+ return this->emitInvalid(CE);
+ }
+ llvm_unreachable("Unhandled clang::CastKind enum");
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitIntegerLiteral(const IntegerLiteral *LE) {
+ if (DiscardResult)
+ return true;
+
+ return this->emitConst(LE->getValue(), LE);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitFloatingLiteral(const FloatingLiteral *E) {
+ if (DiscardResult)
+ return true;
+
+ return this->emitConstFloat(E->getValue(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitImaginaryLiteral(const ImaginaryLiteral *E) {
+ assert(E->getType()->isAnyComplexType());
+ if (DiscardResult)
+ return true;
+
+ if (!Initializing) {
+ std::optional<unsigned> LocalIndex = allocateLocal(E);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+
+ const Expr *SubExpr = E->getSubExpr();
+ PrimType SubExprT = classifyPrim(SubExpr->getType());
+
+ if (!this->visitZeroInitializer(SubExprT, SubExpr->getType(), SubExpr))
+ return false;
+ if (!this->emitInitElem(SubExprT, 0, SubExpr))
+ return false;
+ return this->visitArrayElemInit(1, SubExpr);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitParenExpr(const ParenExpr *E) {
+ return this->delegate(E->getSubExpr());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) {
+ // Need short-circuiting for these.
+ if (BO->isLogicalOp())
+ return this->VisitLogicalBinOp(BO);
+
+ const Expr *LHS = BO->getLHS();
+ const Expr *RHS = BO->getRHS();
+
+ // Handle comma operators. Just discard the LHS
+ // and delegate to RHS.
+ if (BO->isCommaOp()) {
+ if (!this->discard(LHS))
+ return false;
+ if (RHS->getType()->isVoidType())
+ return this->discard(RHS);
+
+ return this->delegate(RHS);
+ }
+
+ if (BO->getType()->isAnyComplexType())
+ return this->VisitComplexBinOp(BO);
+ if ((LHS->getType()->isAnyComplexType() ||
+ RHS->getType()->isAnyComplexType()) &&
+ BO->isComparisonOp())
+ return this->emitComplexComparison(LHS, RHS, BO);
+
+ if (BO->isPtrMemOp()) {
+ if (!this->visit(LHS))
+ return false;
+
+ if (!this->visit(RHS))
+ return false;
+
+ if (!this->emitToMemberPtr(BO))
+ return false;
+
+ if (classifyPrim(BO) == PT_MemberPtr)
+ return true;
+
+ if (!this->emitCastMemberPtrPtr(BO))
+ return false;
+ return DiscardResult ? this->emitPopPtr(BO) : true;
+ }
+
+ // Typecheck the args.
+ std::optional<PrimType> LT = classify(LHS->getType());
+ std::optional<PrimType> RT = classify(RHS->getType());
+ std::optional<PrimType> T = classify(BO->getType());
+
+ // Special case for C++'s three-way/spaceship operator <=>, which
+ // returns a std::{strong,weak,partial}_ordering (which is a class, so doesn't
+ // have a PrimType).
+ if (!T && BO->getOpcode() == BO_Cmp) {
+ if (DiscardResult)
+ return true;
+ const ComparisonCategoryInfo *CmpInfo =
+ Ctx.getASTContext().CompCategories.lookupInfoForType(BO->getType());
+ assert(CmpInfo);
+
+ // We need a temporary variable holding our return value.
+ if (!Initializing) {
+ std::optional<unsigned> ResultIndex = this->allocateLocal(BO);
+ if (!this->emitGetPtrLocal(*ResultIndex, BO))
+ return false;
+ }
+
+ if (!visit(LHS) || !visit(RHS))
+ return false;
+
+ return this->emitCMP3(*LT, CmpInfo, BO);
+ }
+
+ if (!LT || !RT || !T)
+ return false;
+
+ // Pointer arithmetic special case.
+ if (BO->getOpcode() == BO_Add || BO->getOpcode() == BO_Sub) {
+ if (isPtrType(*T) || (isPtrType(*LT) && isPtrType(*RT)))
+ return this->VisitPointerArithBinOp(BO);
+ }
+
+ if (!visit(LHS) || !visit(RHS))
+ return false;
+
+ // For languages such as C, cast the result of one
+ // of our comparision opcodes to T (which is usually int).
+ auto MaybeCastToBool = [this, T, BO](bool Result) {
+ if (!Result)
+ return false;
+ if (DiscardResult)
+ return this->emitPop(*T, BO);
+ if (T != PT_Bool)
+ return this->emitCast(PT_Bool, *T, BO);
+ return true;
+ };
+
+ auto Discard = [this, T, BO](bool Result) {
+ if (!Result)
+ return false;
+ return DiscardResult ? this->emitPop(*T, BO) : true;
+ };
+
+ switch (BO->getOpcode()) {
+ case BO_EQ:
+ return MaybeCastToBool(this->emitEQ(*LT, BO));
+ case BO_NE:
+ return MaybeCastToBool(this->emitNE(*LT, BO));
+ case BO_LT:
+ return MaybeCastToBool(this->emitLT(*LT, BO));
+ case BO_LE:
+ return MaybeCastToBool(this->emitLE(*LT, BO));
+ case BO_GT:
+ return MaybeCastToBool(this->emitGT(*LT, BO));
+ case BO_GE:
+ return MaybeCastToBool(this->emitGE(*LT, BO));
+ case BO_Sub:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitSubf(getRoundingMode(BO), BO));
+ return Discard(this->emitSub(*T, BO));
+ case BO_Add:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitAddf(getRoundingMode(BO), BO));
+ return Discard(this->emitAdd(*T, BO));
+ case BO_Mul:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitMulf(getRoundingMode(BO), BO));
+ return Discard(this->emitMul(*T, BO));
+ case BO_Rem:
+ return Discard(this->emitRem(*T, BO));
+ case BO_Div:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitDivf(getRoundingMode(BO), BO));
+ return Discard(this->emitDiv(*T, BO));
+ case BO_Assign:
+ if (DiscardResult)
+ return LHS->refersToBitField() ? this->emitStoreBitFieldPop(*T, BO)
+ : this->emitStorePop(*T, BO);
+ if (LHS->refersToBitField()) {
+ if (!this->emitStoreBitField(*T, BO))
+ return false;
+ } else {
+ if (!this->emitStore(*T, BO))
+ return false;
+ }
+ // Assignments aren't necessarily lvalues in C.
+ // Load from them in that case.
+ if (!BO->isLValue())
+ return this->emitLoadPop(*T, BO);
+ return true;
+ case BO_And:
+ return Discard(this->emitBitAnd(*T, BO));
+ case BO_Or:
+ return Discard(this->emitBitOr(*T, BO));
+ case BO_Shl:
+ return Discard(this->emitShl(*LT, *RT, BO));
+ case BO_Shr:
+ return Discard(this->emitShr(*LT, *RT, BO));
+ case BO_Xor:
+ return Discard(this->emitBitXor(*T, BO));
+ case BO_LOr:
+ case BO_LAnd:
+ llvm_unreachable("Already handled earlier");
+ default:
+ return false;
+ }
+
+ llvm_unreachable("Unhandled binary op");
+}
+
+/// Perform addition/subtraction of a pointer and an integer or
+/// subtraction of two pointers.
+template <class Emitter>
+bool Compiler<Emitter>::VisitPointerArithBinOp(const BinaryOperator *E) {
+ BinaryOperatorKind Op = E->getOpcode();
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+
+ if ((Op != BO_Add && Op != BO_Sub) ||
+ (!LHS->getType()->isPointerType() && !RHS->getType()->isPointerType()))
+ return false;
+
+ std::optional<PrimType> LT = classify(LHS);
+ std::optional<PrimType> RT = classify(RHS);
+
+ if (!LT || !RT)
+ return false;
+
+ if (LHS->getType()->isPointerType() && RHS->getType()->isPointerType()) {
+ if (Op != BO_Sub)
+ return false;
+
+ assert(E->getType()->isIntegerType());
+ if (!visit(RHS) || !visit(LHS))
+ return false;
+
+ return this->emitSubPtr(classifyPrim(E->getType()), E);
+ }
+
+ PrimType OffsetType;
+ if (LHS->getType()->isIntegerType()) {
+ if (!visit(RHS) || !visit(LHS))
+ return false;
+ OffsetType = *LT;
+ } else if (RHS->getType()->isIntegerType()) {
+ if (!visit(LHS) || !visit(RHS))
+ return false;
+ OffsetType = *RT;
+ } else {
+ return false;
+ }
+
+ if (Op == BO_Add)
+ return this->emitAddOffset(OffsetType, E);
+ else if (Op == BO_Sub)
+ return this->emitSubOffset(OffsetType, E);
+
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitLogicalBinOp(const BinaryOperator *E) {
+ assert(E->isLogicalOp());
+ BinaryOperatorKind Op = E->getOpcode();
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ std::optional<PrimType> T = classify(E->getType());
+
+ if (Op == BO_LOr) {
+ // Logical OR. Visit LHS and only evaluate RHS if LHS was FALSE.
+ LabelTy LabelTrue = this->getLabel();
+ LabelTy LabelEnd = this->getLabel();
+
+ if (!this->visitBool(LHS))
+ return false;
+ if (!this->jumpTrue(LabelTrue))
+ return false;
+
+ if (!this->visitBool(RHS))
+ return false;
+ if (!this->jump(LabelEnd))
+ return false;
+
+ this->emitLabel(LabelTrue);
+ this->emitConstBool(true, E);
+ this->fallthrough(LabelEnd);
+ this->emitLabel(LabelEnd);
+
+ } else {
+ assert(Op == BO_LAnd);
+ // Logical AND.
+ // Visit LHS. Only visit RHS if LHS was TRUE.
+ LabelTy LabelFalse = this->getLabel();
+ LabelTy LabelEnd = this->getLabel();
+
+ if (!this->visitBool(LHS))
+ return false;
+ if (!this->jumpFalse(LabelFalse))
+ return false;
+
+ if (!this->visitBool(RHS))
+ return false;
+ if (!this->jump(LabelEnd))
+ return false;
+
+ this->emitLabel(LabelFalse);
+ this->emitConstBool(false, E);
+ this->fallthrough(LabelEnd);
+ this->emitLabel(LabelEnd);
+ }
+
+ if (DiscardResult)
+ return this->emitPopBool(E);
+
+ // For C, cast back to integer type.
+ assert(T);
+ if (T != PT_Bool)
+ return this->emitCast(PT_Bool, *T, E);
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitComplexBinOp(const BinaryOperator *E) {
+ // Prepare storage for result.
+ if (!Initializing) {
+ std::optional<unsigned> LocalIndex = allocateLocal(E);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+
+ // Both LHS and RHS might _not_ be of complex type, but one of them
+ // needs to be.
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+
+ PrimType ResultElemT = this->classifyComplexElementType(E->getType());
+ unsigned ResultOffset = ~0u;
+ if (!DiscardResult)
+ ResultOffset = this->allocateLocalPrimitive(E, PT_Ptr, true, false);
+
+ // Save result pointer in ResultOffset
+ if (!this->DiscardResult) {
+ if (!this->emitDupPtr(E))
+ return false;
+ if (!this->emitSetLocal(PT_Ptr, ResultOffset, E))
+ return false;
+ }
+ QualType LHSType = LHS->getType();
+ if (const auto *AT = LHSType->getAs<AtomicType>())
+ LHSType = AT->getValueType();
+ QualType RHSType = RHS->getType();
+ if (const auto *AT = RHSType->getAs<AtomicType>())
+ RHSType = AT->getValueType();
+
+ bool LHSIsComplex = LHSType->isAnyComplexType();
+ unsigned LHSOffset;
+ bool RHSIsComplex = RHSType->isAnyComplexType();
+
+ // For ComplexComplex Mul, we have special ops to make their implementation
+ // easier.
+ BinaryOperatorKind Op = E->getOpcode();
+ if (Op == BO_Mul && LHSIsComplex && RHSIsComplex) {
+ assert(classifyPrim(LHSType->getAs<ComplexType>()->getElementType()) ==
+ classifyPrim(RHSType->getAs<ComplexType>()->getElementType()));
+ PrimType ElemT =
+ classifyPrim(LHSType->getAs<ComplexType>()->getElementType());
+ if (!this->visit(LHS))
+ return false;
+ if (!this->visit(RHS))
+ return false;
+ return this->emitMulc(ElemT, E);
+ }
+
+ if (Op == BO_Div && RHSIsComplex) {
+ QualType ElemQT = RHSType->getAs<ComplexType>()->getElementType();
+ PrimType ElemT = classifyPrim(ElemQT);
+ // If the LHS is not complex, we still need to do the full complex
+ // division, so just stub create a complex value and stub it out with
+ // the LHS and a zero.
+
+ if (!LHSIsComplex) {
+ // This is using the RHS type for the fake-complex LHS.
+ if (auto LHSO = allocateLocal(RHS))
+ LHSOffset = *LHSO;
+ else
+ return false;
+
+ if (!this->emitGetPtrLocal(LHSOffset, E))
+ return false;
+
+ if (!this->visit(LHS))
+ return false;
+ // real is LHS
+ if (!this->emitInitElem(ElemT, 0, E))
+ return false;
+ // imag is zero
+ if (!this->visitZeroInitializer(ElemT, ElemQT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, 1, E))
+ return false;
+ } else {
+ if (!this->visit(LHS))
+ return false;
+ }
+
+ if (!this->visit(RHS))
+ return false;
+ return this->emitDivc(ElemT, E);
+ }
+
+ // Evaluate LHS and save value to LHSOffset.
+ if (LHSType->isAnyComplexType()) {
+ LHSOffset = this->allocateLocalPrimitive(LHS, PT_Ptr, true, false);
+ if (!this->visit(LHS))
+ return false;
+ if (!this->emitSetLocal(PT_Ptr, LHSOffset, E))
+ return false;
+ } else {
+ PrimType LHST = classifyPrim(LHSType);
+ LHSOffset = this->allocateLocalPrimitive(LHS, LHST, true, false);
+ if (!this->visit(LHS))
+ return false;
+ if (!this->emitSetLocal(LHST, LHSOffset, E))
+ return false;
+ }
+
+ // Same with RHS.
+ unsigned RHSOffset;
+ if (RHSType->isAnyComplexType()) {
+ RHSOffset = this->allocateLocalPrimitive(RHS, PT_Ptr, true, false);
+ if (!this->visit(RHS))
+ return false;
+ if (!this->emitSetLocal(PT_Ptr, RHSOffset, E))
+ return false;
+ } else {
+ PrimType RHST = classifyPrim(RHSType);
+ RHSOffset = this->allocateLocalPrimitive(RHS, RHST, true, false);
+ if (!this->visit(RHS))
+ return false;
+ if (!this->emitSetLocal(RHST, RHSOffset, E))
+ return false;
+ }
+
+ // For both LHS and RHS, either load the value from the complex pointer, or
+ // directly from the local variable. For index 1 (i.e. the imaginary part),
+ // just load 0 and do the operation anyway.
+ auto loadComplexValue = [this](bool IsComplex, bool LoadZero,
+ unsigned ElemIndex, unsigned Offset,
+ const Expr *E) -> bool {
+ if (IsComplex) {
+ if (!this->emitGetLocal(PT_Ptr, Offset, E))
+ return false;
+ return this->emitArrayElemPop(classifyComplexElementType(E->getType()),
+ ElemIndex, E);
+ }
+ if (ElemIndex == 0 || !LoadZero)
+ return this->emitGetLocal(classifyPrim(E->getType()), Offset, E);
+ return this->visitZeroInitializer(classifyPrim(E->getType()), E->getType(),
+ E);
+ };
+
+ // Now we can get pointers to the LHS and RHS from the offsets above.
+ for (unsigned ElemIndex = 0; ElemIndex != 2; ++ElemIndex) {
+ // Result pointer for the store later.
+ if (!this->DiscardResult) {
+ if (!this->emitGetLocal(PT_Ptr, ResultOffset, E))
+ return false;
+ }
+
+ // The actual operation.
+ switch (Op) {
+ case BO_Add:
+ if (!loadComplexValue(LHSIsComplex, true, ElemIndex, LHSOffset, LHS))
+ return false;
+
+ if (!loadComplexValue(RHSIsComplex, true, ElemIndex, RHSOffset, RHS))
+ return false;
+ if (ResultElemT == PT_Float) {
+ if (!this->emitAddf(getRoundingMode(E), E))
+ return false;
+ } else {
+ if (!this->emitAdd(ResultElemT, E))
+ return false;
+ }
+ break;
+ case BO_Sub:
+ if (!loadComplexValue(LHSIsComplex, true, ElemIndex, LHSOffset, LHS))
+ return false;
+
+ if (!loadComplexValue(RHSIsComplex, true, ElemIndex, RHSOffset, RHS))
+ return false;
+ if (ResultElemT == PT_Float) {
+ if (!this->emitSubf(getRoundingMode(E), E))
+ return false;
+ } else {
+ if (!this->emitSub(ResultElemT, E))
+ return false;
+ }
+ break;
+ case BO_Mul:
+ if (!loadComplexValue(LHSIsComplex, false, ElemIndex, LHSOffset, LHS))
+ return false;
+
+ if (!loadComplexValue(RHSIsComplex, false, ElemIndex, RHSOffset, RHS))
+ return false;
+
+ if (ResultElemT == PT_Float) {
+ if (!this->emitMulf(getRoundingMode(E), E))
+ return false;
+ } else {
+ if (!this->emitMul(ResultElemT, E))
+ return false;
+ }
+ break;
+ case BO_Div:
+ assert(!RHSIsComplex);
+ if (!loadComplexValue(LHSIsComplex, false, ElemIndex, LHSOffset, LHS))
+ return false;
+
+ if (!loadComplexValue(RHSIsComplex, false, ElemIndex, RHSOffset, RHS))
+ return false;
+
+ if (ResultElemT == PT_Float) {
+ if (!this->emitDivf(getRoundingMode(E), E))
+ return false;
+ } else {
+ if (!this->emitDiv(ResultElemT, E))
+ return false;
+ }
+ break;
+
+ default:
+ return false;
+ }
+
+ if (!this->DiscardResult) {
+ // Initialize array element with the value we just computed.
+ if (!this->emitInitElemPop(ResultElemT, ElemIndex, E))
+ return false;
+ } else {
+ if (!this->emitPop(ResultElemT, E))
+ return false;
+ }
+ }
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitImplicitValueInitExpr(
+ const ImplicitValueInitExpr *E) {
+ QualType QT = E->getType();
+
+ if (std::optional<PrimType> T = classify(QT))
+ return this->visitZeroInitializer(*T, QT, E);
+
+ if (QT->isRecordType()) {
+ const RecordDecl *RD = QT->getAsRecordDecl();
+ assert(RD);
+ if (RD->isInvalidDecl())
+ return false;
+ if (RD->isUnion()) {
+ // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
+ // object's first non-static named data member is zero-initialized
+ // FIXME
+ return false;
+ }
+
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+ CXXRD && CXXRD->getNumVBases() > 0) {
+ // TODO: Diagnose.
+ return false;
+ }
+
+ const Record *R = getRecord(QT);
+ if (!R)
+ return false;
+
+ assert(Initializing);
+ return this->visitZeroRecordInitializer(R, E);
+ }
+
+ if (QT->isIncompleteArrayType())
+ return true;
+
+ if (QT->isArrayType()) {
+ const ArrayType *AT = QT->getAsArrayTypeUnsafe();
+ assert(AT);
+ const auto *CAT = cast<ConstantArrayType>(AT);
+ size_t NumElems = CAT->getZExtSize();
+ PrimType ElemT = classifyPrim(CAT->getElementType());
+
+ for (size_t I = 0; I != NumElems; ++I) {
+ if (!this->visitZeroInitializer(ElemT, CAT->getElementType(), E))
+ return false;
+ if (!this->emitInitElem(ElemT, I, E))
+ return false;
+ }
+
+ return true;
+ }
+
+ if (const auto *ComplexTy = E->getType()->getAs<ComplexType>()) {
+ assert(Initializing);
+ QualType ElemQT = ComplexTy->getElementType();
+ PrimType ElemT = classifyPrim(ElemQT);
+ for (unsigned I = 0; I < 2; ++I) {
+ if (!this->visitZeroInitializer(ElemT, ElemQT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, I, E))
+ return false;
+ }
+ return true;
+ }
+
+ if (const auto *VecT = E->getType()->getAs<VectorType>()) {
+ unsigned NumVecElements = VecT->getNumElements();
+ QualType ElemQT = VecT->getElementType();
+ PrimType ElemT = classifyPrim(ElemQT);
+
+ for (unsigned I = 0; I < NumVecElements; ++I) {
+ if (!this->visitZeroInitializer(ElemT, ElemQT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, I, E))
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ const Expr *Base = E->getBase();
+ const Expr *Index = E->getIdx();
+
+ if (DiscardResult)
+ return this->discard(Base) && this->discard(Index);
+
+ // Take pointer of LHS, add offset from RHS.
+ // What's left on the stack after this is a pointer.
+ if (!this->visit(Base))
+ return false;
+
+ if (!this->visit(Index))
+ return false;
+
+ PrimType IndexT = classifyPrim(Index->getType());
+ return this->emitArrayElemPtrPop(IndexT, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
+ const Expr *ArrayFiller, const Expr *E) {
+
+ QualType QT = E->getType();
+
+ if (const auto *AT = QT->getAs<AtomicType>())
+ QT = AT->getValueType();
+
+ if (QT->isVoidType())
+ return this->emitInvalid(E);
+
+ // Handle discarding first.
+ if (DiscardResult) {
+ for (const Expr *Init : Inits) {
+ if (!this->discard(Init))
+ return false;
+ }
+ return true;
+ }
+
+ // Primitive values.
+ if (std::optional<PrimType> T = classify(QT)) {
+ assert(!DiscardResult);
+ if (Inits.size() == 0)
+ return this->visitZeroInitializer(*T, QT, E);
+ assert(Inits.size() == 1);
+ return this->delegate(Inits[0]);
+ }
+
+ if (QT->isRecordType()) {
+ const Record *R = getRecord(QT);
+
+ if (Inits.size() == 1 && E->getType() == Inits[0]->getType())
+ return this->delegate(Inits[0]);
+
+ auto initPrimitiveField = [=](const Record::Field *FieldToInit,
+ const Expr *Init, PrimType T) -> bool {
+ InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(Init));
+ if (!this->visit(Init))
+ return false;
+
+ if (FieldToInit->isBitField())
+ return this->emitInitBitField(T, FieldToInit, E);
+ return this->emitInitField(T, FieldToInit->Offset, E);
+ };
+
+ auto initCompositeField = [=](const Record::Field *FieldToInit,
+ const Expr *Init) -> bool {
+ InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(Init));
+ InitLinkScope<Emitter> ILS(this, InitLink::Field(FieldToInit->Offset));
+ // Non-primitive case. Get a pointer to the field-to-initialize
+ // on the stack and recurse into visitInitializer().
+ if (!this->emitGetPtrField(FieldToInit->Offset, Init))
+ return false;
+ if (!this->visitInitializer(Init))
+ return false;
+ return this->emitPopPtr(E);
+ };
+
+ if (R->isUnion()) {
+ if (Inits.size() == 0) {
+ // Zero-initialize the first union field.
+ if (R->getNumFields() == 0)
+ return this->emitFinishInit(E);
+ const Record::Field *FieldToInit = R->getField(0u);
+ QualType FieldType = FieldToInit->Desc->getType();
+ if (std::optional<PrimType> T = classify(FieldType)) {
+ if (!this->visitZeroInitializer(*T, FieldType, E))
+ return false;
+ if (!this->emitInitField(*T, FieldToInit->Offset, E))
+ return false;
+ }
+ // FIXME: Non-primitive case?
+ } else {
+ const Expr *Init = Inits[0];
+ const FieldDecl *FToInit = nullptr;
+ if (const auto *ILE = dyn_cast<InitListExpr>(E))
+ FToInit = ILE->getInitializedFieldInUnion();
+ else
+ FToInit = cast<CXXParenListInitExpr>(E)->getInitializedFieldInUnion();
+
+ const Record::Field *FieldToInit = R->getField(FToInit);
+ if (std::optional<PrimType> T = classify(Init)) {
+ if (!initPrimitiveField(FieldToInit, Init, *T))
+ return false;
+ } else {
+ if (!initCompositeField(FieldToInit, Init))
+ return false;
+ }
+ }
+ return this->emitFinishInit(E);
+ }
+
+ assert(!R->isUnion());
+ unsigned InitIndex = 0;
+ for (const Expr *Init : Inits) {
+ // Skip unnamed bitfields.
+ while (InitIndex < R->getNumFields() &&
+ R->getField(InitIndex)->Decl->isUnnamedBitField())
+ ++InitIndex;
+
+ if (std::optional<PrimType> T = classify(Init)) {
+ const Record::Field *FieldToInit = R->getField(InitIndex);
+ if (!initPrimitiveField(FieldToInit, Init, *T))
+ return false;
+ ++InitIndex;
+ } else {
+ // Initializer for a direct base class.
+ if (const Record::Base *B = R->getBase(Init->getType())) {
+ if (!this->emitGetPtrBase(B->Offset, Init))
+ return false;
+
+ if (!this->visitInitializer(Init))
+ return false;
+
+ if (!this->emitFinishInitPop(E))
+ return false;
+ // Base initializers don't increase InitIndex, since they don't count
+ // into the Record's fields.
+ } else {
+ const Record::Field *FieldToInit = R->getField(InitIndex);
+ if (!initCompositeField(FieldToInit, Init))
+ return false;
+ ++InitIndex;
+ }
+ }
+ }
+ return this->emitFinishInit(E);
+ }
+
+ if (QT->isArrayType()) {
+ if (Inits.size() == 1 && QT == Inits[0]->getType())
+ return this->delegate(Inits[0]);
+
+ unsigned ElementIndex = 0;
+ for (const Expr *Init : Inits) {
+ if (const auto *EmbedS =
+ dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
+ PrimType TargetT = classifyPrim(Init->getType());
+
+ auto Eval = [&](const Expr *Init, unsigned ElemIndex) {
+ PrimType InitT = classifyPrim(Init->getType());
+ if (!this->visit(Init))
+ return false;
+ if (InitT != TargetT) {
+ if (!this->emitCast(InitT, TargetT, E))
+ return false;
+ }
+ return this->emitInitElem(TargetT, ElemIndex, Init);
+ };
+ if (!EmbedS->doForEachDataElement(Eval, ElementIndex))
+ return false;
+ } else {
+ if (!this->visitArrayElemInit(ElementIndex, Init))
+ return false;
+ ++ElementIndex;
+ }
+ }
+
+ // Expand the filler expression.
+ // FIXME: This should go away.
+ if (ArrayFiller) {
+ const ConstantArrayType *CAT =
+ Ctx.getASTContext().getAsConstantArrayType(QT);
+ uint64_t NumElems = CAT->getZExtSize();
+
+ for (; ElementIndex != NumElems; ++ElementIndex) {
+ if (!this->visitArrayElemInit(ElementIndex, ArrayFiller))
+ return false;
+ }
+ }
+
+ return this->emitFinishInit(E);
+ }
+
+ if (const auto *ComplexTy = QT->getAs<ComplexType>()) {
+ unsigned NumInits = Inits.size();
+
+ if (NumInits == 1)
+ return this->delegate(Inits[0]);
+
+ QualType ElemQT = ComplexTy->getElementType();
+ PrimType ElemT = classifyPrim(ElemQT);
+ if (NumInits == 0) {
+ // Zero-initialize both elements.
+ for (unsigned I = 0; I < 2; ++I) {
+ if (!this->visitZeroInitializer(ElemT, ElemQT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, I, E))
+ return false;
+ }
+ } else if (NumInits == 2) {
+ unsigned InitIndex = 0;
+ for (const Expr *Init : Inits) {
+ if (!this->visit(Init))
+ return false;
+
+ if (!this->emitInitElem(ElemT, InitIndex, E))
+ return false;
+ ++InitIndex;
+ }
+ }
+ return true;
+ }
+
+ if (const auto *VecT = QT->getAs<VectorType>()) {
+ unsigned NumVecElements = VecT->getNumElements();
+ assert(NumVecElements >= Inits.size());
+
+ QualType ElemQT = VecT->getElementType();
+ PrimType ElemT = classifyPrim(ElemQT);
+
+ // All initializer elements.
+ unsigned InitIndex = 0;
+ for (const Expr *Init : Inits) {
+ if (!this->visit(Init))
+ return false;
+
+ // If the initializer is of vector type itself, we have to deconstruct
+ // that and initialize all the target fields from the initializer fields.
+ if (const auto *InitVecT = Init->getType()->getAs<VectorType>()) {
+ if (!this->emitCopyArray(ElemT, 0, InitIndex,
+ InitVecT->getNumElements(), E))
+ return false;
+ InitIndex += InitVecT->getNumElements();
+ } else {
+ if (!this->emitInitElem(ElemT, InitIndex, E))
+ return false;
+ ++InitIndex;
+ }
+ }
+
+ assert(InitIndex <= NumVecElements);
+
+ // Fill the rest with zeroes.
+ for (; InitIndex != NumVecElements; ++InitIndex) {
+ if (!this->visitZeroInitializer(ElemT, ElemQT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, InitIndex, E))
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+/// Pointer to the array(not the element!) must be on the stack when calling
+/// this.
+template <class Emitter>
+bool Compiler<Emitter>::visitArrayElemInit(unsigned ElemIndex,
+ const Expr *Init) {
+ if (std::optional<PrimType> T = classify(Init->getType())) {
+ // Visit the primitive element like normal.
+ if (!this->visit(Init))
+ return false;
+ return this->emitInitElem(*T, ElemIndex, Init);
+ }
+
+ // Advance the pointer currently on the stack to the given
+ // dimension.
+ if (!this->emitConstUint32(ElemIndex, Init))
+ return false;
+ if (!this->emitArrayElemPtrUint32(Init))
+ return false;
+ if (!this->visitInitializer(Init))
+ return false;
+ return this->emitFinishInitPop(Init);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitInitListExpr(const InitListExpr *E) {
+ return this->visitInitList(E->inits(), E->getArrayFiller(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXParenListInitExpr(
+ const CXXParenListInitExpr *E) {
+ return this->visitInitList(E->getInitExprs(), E->getArrayFiller(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitSubstNonTypeTemplateParmExpr(
+ const SubstNonTypeTemplateParmExpr *E) {
+ return this->delegate(E->getReplacement());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitConstantExpr(const ConstantExpr *E) {
+ std::optional<PrimType> T = classify(E->getType());
+ if (T && E->hasAPValueResult()) {
+ // Try to emit the APValue directly, without visiting the subexpr.
+ // This will only fail if we can't emit the APValue, so won't emit any
+ // diagnostics or any double values.
+ if (DiscardResult)
+ return true;
+
+ if (this->visitAPValue(E->getAPValueResult(), *T, E))
+ return true;
+ }
+ return this->delegate(E->getSubExpr());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitEmbedExpr(const EmbedExpr *E) {
+ auto It = E->begin();
+ return this->visit(*It);
+}
+
+static CharUnits AlignOfType(QualType T, const ASTContext &ASTCtx,
+ UnaryExprOrTypeTrait Kind) {
+ bool AlignOfReturnsPreferred =
+ ASTCtx.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver7;
+
+ // C++ [expr.alignof]p3:
+ // When alignof is applied to a reference type, the result is the
+ // alignment of the referenced type.
+ if (const auto *Ref = T->getAs<ReferenceType>())
+ T = Ref->getPointeeType();
+
+ if (T.getQualifiers().hasUnaligned())
+ return CharUnits::One();
+
+ // __alignof is defined to return the preferred alignment.
+ // Before 8, clang returned the preferred alignment for alignof and
+ // _Alignof as well.
+ if (Kind == UETT_PreferredAlignOf || AlignOfReturnsPreferred)
+ return ASTCtx.toCharUnitsFromBits(ASTCtx.getPreferredTypeAlign(T));
+
+ return ASTCtx.getTypeAlignInChars(T);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *E) {
+ UnaryExprOrTypeTrait Kind = E->getKind();
+ const ASTContext &ASTCtx = Ctx.getASTContext();
+
+ if (Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) {
+ QualType ArgType = E->getTypeOfArgument();
+
+ // C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
+ // the result is the size of the referenced type."
+ if (const auto *Ref = ArgType->getAs<ReferenceType>())
+ ArgType = Ref->getPointeeType();
+
+ CharUnits Size;
+ if (ArgType->isVoidType() || ArgType->isFunctionType())
+ Size = CharUnits::One();
+ else {
+ if (ArgType->isDependentType() || !ArgType->isConstantSizeType())
+ return false;
+
+ if (Kind == UETT_SizeOf)
+ Size = ASTCtx.getTypeSizeInChars(ArgType);
+ else
+ Size = ASTCtx.getTypeInfoDataSizeInChars(ArgType).Width;
+ }
+
+ if (DiscardResult)
+ return true;
+
+ return this->emitConst(Size.getQuantity(), E);
+ }
+
+ if (Kind == UETT_AlignOf || Kind == UETT_PreferredAlignOf) {
+ CharUnits Size;
+
+ if (E->isArgumentType()) {
+ QualType ArgType = E->getTypeOfArgument();
+
+ Size = AlignOfType(ArgType, ASTCtx, Kind);
+ } else {
+ // Argument is an expression, not a type.
+ const Expr *Arg = E->getArgumentExpr()->IgnoreParens();
+
+ // The kinds of expressions that we have special-case logic here for
+ // should be kept up to date with the special checks for those
+ // expressions in Sema.
+
+ // alignof decl is always accepted, even if it doesn't make sense: we
+ // default to 1 in those cases.
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg))
+ Size = ASTCtx.getDeclAlign(DRE->getDecl(),
+ /*RefAsPointee*/ true);
+ else if (const auto *ME = dyn_cast<MemberExpr>(Arg))
+ Size = ASTCtx.getDeclAlign(ME->getMemberDecl(),
+ /*RefAsPointee*/ true);
+ else
+ Size = AlignOfType(Arg->getType(), ASTCtx, Kind);
+ }
+
+ if (DiscardResult)
+ return true;
+
+ return this->emitConst(Size.getQuantity(), E);
+ }
+
+ if (Kind == UETT_VectorElements) {
+ if (const auto *VT = E->getTypeOfArgument()->getAs<VectorType>())
+ return this->emitConst(VT->getNumElements(), E);
+ assert(E->getTypeOfArgument()->isSizelessVectorType());
+ return this->emitSizelessVectorElementSize(E);
+ }
+
+ if (Kind == UETT_VecStep) {
+ if (const auto *VT = E->getTypeOfArgument()->getAs<VectorType>()) {
+ unsigned N = VT->getNumElements();
+
+ // The vec_step built-in functions that take a 3-component
+ // vector return 4. (OpenCL 1.1 spec 6.11.12)
+ if (N == 3)
+ N = 4;
+
+ return this->emitConst(N, E);
+ }
+ return this->emitConst(1, E);
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitMemberExpr(const MemberExpr *E) {
+ // 'Base.Member'
+ const Expr *Base = E->getBase();
+ const ValueDecl *Member = E->getMemberDecl();
+
+ if (DiscardResult)
+ return this->discard(Base);
+
+ // MemberExprs are almost always lvalues, in which case we don't need to
+ // do the load. But sometimes they aren't.
+ const auto maybeLoadValue = [&]() -> bool {
+ if (E->isGLValue())
+ return true;
+ if (std::optional<PrimType> T = classify(E))
+ return this->emitLoadPop(*T, E);
+ return false;
+ };
+
+ if (const auto *VD = dyn_cast<VarDecl>(Member)) {
+ // I am almost confident in saying that a var decl must be static
+ // and therefore registered as a global variable. But this will probably
+ // turn out to be wrong some time in the future, as always.
+ if (auto GlobalIndex = P.getGlobal(VD))
+ return this->emitGetPtrGlobal(*GlobalIndex, E) && maybeLoadValue();
+ return false;
+ }
+
+ if (!isa<FieldDecl>(Member))
+ return this->discard(Base) && this->visitDeclRef(Member, E);
+
+ if (Initializing) {
+ if (!this->delegate(Base))
+ return false;
+ } else {
+ if (!this->visit(Base))
+ return false;
+ }
+
+ // Base above gives us a pointer on the stack.
+ const auto *FD = cast<FieldDecl>(Member);
+ const RecordDecl *RD = FD->getParent();
+ const Record *R = getRecord(RD);
+ if (!R)
+ return false;
+ const Record::Field *F = R->getField(FD);
+ // Leave a pointer to the field on the stack.
+ if (F->Decl->getType()->isReferenceType())
+ return this->emitGetFieldPop(PT_Ptr, F->Offset, E) && maybeLoadValue();
+ return this->emitGetPtrFieldPop(F->Offset, E) && maybeLoadValue();
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) {
+ // ArrayIndex might not be set if a ArrayInitIndexExpr is being evaluated
+ // stand-alone, e.g. via EvaluateAsInt().
+ if (!ArrayIndex)
+ return false;
+ return this->emitConst(*ArrayIndex, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
+ assert(Initializing);
+ assert(!DiscardResult);
+
+ // We visit the common opaque expression here once so we have its value
+ // cached.
+ if (!this->discard(E->getCommonExpr()))
+ return false;
+
+ // TODO: This compiles to quite a lot of bytecode if the array is larger.
+ // Investigate compiling this to a loop.
+ const Expr *SubExpr = E->getSubExpr();
+ size_t Size = E->getArraySize().getZExtValue();
+
+ // So, every iteration, we execute an assignment here
+ // where the LHS is on the stack (the target array)
+ // and the RHS is our SubExpr.
+ for (size_t I = 0; I != Size; ++I) {
+ ArrayIndexScope<Emitter> IndexScope(this, I);
+ BlockScope<Emitter> BS(this);
+
+ if (!this->visitArrayElemInit(I, SubExpr))
+ return false;
+ if (!BS.destroyLocals())
+ return false;
+ }
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
+ const Expr *SourceExpr = E->getSourceExpr();
+ if (!SourceExpr)
+ return false;
+
+ if (Initializing)
+ return this->visitInitializer(SourceExpr);
+
+ PrimType SubExprT = classify(SourceExpr).value_or(PT_Ptr);
+ if (auto It = OpaqueExprs.find(E); It != OpaqueExprs.end())
+ return this->emitGetLocal(SubExprT, It->second, E);
+
+ if (!this->visit(SourceExpr))
+ return false;
+
+ // At this point we either have the evaluated source expression or a pointer
+ // to an object on the stack. We want to create a local variable that stores
+ // this value.
+ unsigned LocalIndex = allocateLocalPrimitive(E, SubExprT, /*IsConst=*/true);
+ if (!this->emitSetLocal(SubExprT, LocalIndex, E))
+ return false;
+
+ // Here the local variable is created but the value is removed from the stack,
+ // so we put it back if the caller needs it.
+ if (!DiscardResult) {
+ if (!this->emitGetLocal(SubExprT, LocalIndex, E))
+ return false;
+ }
+
+ // This is cleaned up when the local variable is destroyed.
+ OpaqueExprs.insert({E, LocalIndex});
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitAbstractConditionalOperator(
+ const AbstractConditionalOperator *E) {
+ const Expr *Condition = E->getCond();
+ const Expr *TrueExpr = E->getTrueExpr();
+ const Expr *FalseExpr = E->getFalseExpr();
+
+ LabelTy LabelEnd = this->getLabel(); // Label after the operator.
+ LabelTy LabelFalse = this->getLabel(); // Label for the false expr.
+
+ if (!this->visitBool(Condition))
+ return false;
+
+ if (!this->jumpFalse(LabelFalse))
+ return false;
+
+ if (!this->delegate(TrueExpr))
+ return false;
+ if (!this->jump(LabelEnd))
+ return false;
+
+ this->emitLabel(LabelFalse);
+
+ if (!this->delegate(FalseExpr))
+ return false;
+
+ this->fallthrough(LabelEnd);
+ this->emitLabel(LabelEnd);
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitStringLiteral(const StringLiteral *E) {
+ if (DiscardResult)
+ return true;
+
+ if (!Initializing) {
+ unsigned StringIndex = P.createGlobalString(E);
+ return this->emitGetPtrGlobal(StringIndex, E);
+ }
+
+ // We are initializing an array on the stack.
+ const ConstantArrayType *CAT =
+ Ctx.getASTContext().getAsConstantArrayType(E->getType());
+ assert(CAT && "a string literal that's not a constant array?");
+
+ // If the initializer string is too long, a diagnostic has already been
+ // emitted. Read only the array length from the string literal.
+ unsigned ArraySize = CAT->getZExtSize();
+ unsigned N = std::min(ArraySize, E->getLength());
+ size_t CharWidth = E->getCharByteWidth();
+
+ for (unsigned I = 0; I != N; ++I) {
+ uint32_t CodeUnit = E->getCodeUnit(I);
+
+ if (CharWidth == 1) {
+ this->emitConstSint8(CodeUnit, E);
+ this->emitInitElemSint8(I, E);
+ } else if (CharWidth == 2) {
+ this->emitConstUint16(CodeUnit, E);
+ this->emitInitElemUint16(I, E);
+ } else if (CharWidth == 4) {
+ this->emitConstUint32(CodeUnit, E);
+ this->emitInitElemUint32(I, E);
+ } else {
+ llvm_unreachable("unsupported character width");
+ }
+ }
+
+ // Fill up the rest of the char array with NUL bytes.
+ for (unsigned I = N; I != ArraySize; ++I) {
+ if (CharWidth == 1) {
+ this->emitConstSint8(0, E);
+ this->emitInitElemSint8(I, E);
+ } else if (CharWidth == 2) {
+ this->emitConstUint16(0, E);
+ this->emitInitElemUint16(I, E);
+ } else if (CharWidth == 4) {
+ this->emitConstUint32(0, E);
+ this->emitInitElemUint32(I, E);
+ } else {
+ llvm_unreachable("unsupported character width");
+ }
+ }
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitObjCStringLiteral(const ObjCStringLiteral *E) {
+ return this->delegate(E->getString());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitObjCEncodeExpr(const ObjCEncodeExpr *E) {
+ auto &A = Ctx.getASTContext();
+ std::string Str;
+ A.getObjCEncodingForType(E->getEncodedType(), Str);
+ StringLiteral *SL =
+ StringLiteral::Create(A, Str, StringLiteralKind::Ordinary,
+ /*Pascal=*/false, E->getType(), E->getAtLoc());
+ return this->delegate(SL);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitSYCLUniqueStableNameExpr(
+ const SYCLUniqueStableNameExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ assert(!Initializing);
+
+ auto &A = Ctx.getASTContext();
+ std::string ResultStr = E->ComputeName(A);
+
+ QualType CharTy = A.CharTy.withConst();
+ APInt Size(A.getTypeSize(A.getSizeType()), ResultStr.size() + 1);
+ QualType ArrayTy = A.getConstantArrayType(CharTy, Size, nullptr,
+ ArraySizeModifier::Normal, 0);
+
+ StringLiteral *SL =
+ StringLiteral::Create(A, ResultStr, StringLiteralKind::Ordinary,
+ /*Pascal=*/false, ArrayTy, E->getLocation());
+
+ unsigned StringIndex = P.createGlobalString(SL);
+ return this->emitGetPtrGlobal(StringIndex, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCharacterLiteral(const CharacterLiteral *E) {
+ if (DiscardResult)
+ return true;
+ return this->emitConst(E->getValue(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitFloatCompoundAssignOperator(
+ const CompoundAssignOperator *E) {
+
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ QualType LHSType = LHS->getType();
+ QualType LHSComputationType = E->getComputationLHSType();
+ QualType ResultType = E->getComputationResultType();
+ std::optional<PrimType> LT = classify(LHSComputationType);
+ std::optional<PrimType> RT = classify(ResultType);
+
+ assert(ResultType->isFloatingType());
+
+ if (!LT || !RT)
+ return false;
+
+ PrimType LHST = classifyPrim(LHSType);
+
+ // C++17 onwards require that we evaluate the RHS first.
+ // Compute RHS and save it in a temporary variable so we can
+ // load it again later.
+ if (!visit(RHS))
+ return false;
+
+ unsigned TempOffset = this->allocateLocalPrimitive(E, *RT, /*IsConst=*/true);
+ if (!this->emitSetLocal(*RT, TempOffset, E))
+ return false;
+
+ // First, visit LHS.
+ if (!visit(LHS))
+ return false;
+ if (!this->emitLoad(LHST, E))
+ return false;
+
+ // If necessary, convert LHS to its computation type.
+ if (!this->emitPrimCast(LHST, classifyPrim(LHSComputationType),
+ LHSComputationType, E))
+ return false;
+
+ // Now load RHS.
+ if (!this->emitGetLocal(*RT, TempOffset, E))
+ return false;
+
+ llvm::RoundingMode RM = getRoundingMode(E);
+ switch (E->getOpcode()) {
+ case BO_AddAssign:
+ if (!this->emitAddf(RM, E))
+ return false;
+ break;
+ case BO_SubAssign:
+ if (!this->emitSubf(RM, E))
+ return false;
+ break;
+ case BO_MulAssign:
+ if (!this->emitMulf(RM, E))
+ return false;
+ break;
+ case BO_DivAssign:
+ if (!this->emitDivf(RM, E))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ if (!this->emitPrimCast(classifyPrim(ResultType), LHST, LHS->getType(), E))
+ return false;
+
+ if (DiscardResult)
+ return this->emitStorePop(LHST, E);
+ return this->emitStore(LHST, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitPointerCompoundAssignOperator(
+ const CompoundAssignOperator *E) {
+ BinaryOperatorKind Op = E->getOpcode();
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ std::optional<PrimType> LT = classify(LHS->getType());
+ std::optional<PrimType> RT = classify(RHS->getType());
+
+ if (Op != BO_AddAssign && Op != BO_SubAssign)
+ return false;
+
+ if (!LT || !RT)
+ return false;
+
+ if (!visit(LHS))
+ return false;
+
+ if (!this->emitLoad(*LT, LHS))
+ return false;
+
+ if (!visit(RHS))
+ return false;
+
+ if (Op == BO_AddAssign) {
+ if (!this->emitAddOffset(*RT, E))
+ return false;
+ } else {
+ if (!this->emitSubOffset(*RT, E))
+ return false;
+ }
+
+ if (DiscardResult)
+ return this->emitStorePopPtr(E);
+ return this->emitStorePtr(E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCompoundAssignOperator(
+ const CompoundAssignOperator *E) {
+
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ std::optional<PrimType> LHSComputationT =
+ classify(E->getComputationLHSType());
+ std::optional<PrimType> LT = classify(LHS->getType());
+ std::optional<PrimType> RT = classify(RHS->getType());
+ std::optional<PrimType> ResultT = classify(E->getType());
+
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->visit(RHS) && this->visit(LHS) && this->emitError(E);
+
+ if (!LT || !RT || !ResultT || !LHSComputationT)
+ return false;
+
+ // Handle floating point operations separately here, since they
+ // require special care.
+
+ if (ResultT == PT_Float || RT == PT_Float)
+ return VisitFloatCompoundAssignOperator(E);
+
+ if (E->getType()->isPointerType())
+ return VisitPointerCompoundAssignOperator(E);
+
+ assert(!E->getType()->isPointerType() && "Handled above");
+ assert(!E->getType()->isFloatingType() && "Handled above");
+
+ // C++17 onwards require that we evaluate the RHS first.
+ // Compute RHS and save it in a temporary variable so we can
+ // load it again later.
+ // FIXME: Compound assignments are unsequenced in C, so we might
+ // have to figure out how to reject them.
+ if (!visit(RHS))
+ return false;
+
+ unsigned TempOffset = this->allocateLocalPrimitive(E, *RT, /*IsConst=*/true);
+
+ if (!this->emitSetLocal(*RT, TempOffset, E))
+ return false;
+
+ // Get LHS pointer, load its value and cast it to the
+ // computation type if necessary.
+ if (!visit(LHS))
+ return false;
+ if (!this->emitLoad(*LT, E))
+ return false;
+ if (LT != LHSComputationT) {
+ if (!this->emitCast(*LT, *LHSComputationT, E))
+ return false;
+ }
+
+ // Get the RHS value on the stack.
+ if (!this->emitGetLocal(*RT, TempOffset, E))
+ return false;
+
+ // Perform operation.
+ switch (E->getOpcode()) {
+ case BO_AddAssign:
+ if (!this->emitAdd(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_SubAssign:
+ if (!this->emitSub(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_MulAssign:
+ if (!this->emitMul(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_DivAssign:
+ if (!this->emitDiv(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_RemAssign:
+ if (!this->emitRem(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_ShlAssign:
+ if (!this->emitShl(*LHSComputationT, *RT, E))
+ return false;
+ break;
+ case BO_ShrAssign:
+ if (!this->emitShr(*LHSComputationT, *RT, E))
+ return false;
+ break;
+ case BO_AndAssign:
+ if (!this->emitBitAnd(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_XorAssign:
+ if (!this->emitBitXor(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_OrAssign:
+ if (!this->emitBitOr(*LHSComputationT, E))
+ return false;
+ break;
+ default:
+ llvm_unreachable("Unimplemented compound assign operator");
+ }
+
+ // And now cast from LHSComputationT to ResultT.
+ if (ResultT != LHSComputationT) {
+ if (!this->emitCast(*LHSComputationT, *ResultT, E))
+ return false;
+ }
+
+ // And store the result in LHS.
+ if (DiscardResult) {
+ if (LHS->refersToBitField())
+ return this->emitStoreBitFieldPop(*ResultT, E);
+ return this->emitStorePop(*ResultT, E);
+ }
+ if (LHS->refersToBitField())
+ return this->emitStoreBitField(*ResultT, E);
+ return this->emitStore(*ResultT, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitExprWithCleanups(const ExprWithCleanups *E) {
+ LocalScope<Emitter> ES(this);
+ const Expr *SubExpr = E->getSubExpr();
+
+ assert(E->getNumObjects() == 0 && "TODO: Implement cleanups");
+
+ return this->delegate(SubExpr) && ES.destroyLocals();
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *E) {
+ const Expr *SubExpr = E->getSubExpr();
+
+ if (Initializing) {
+ // We already have a value, just initialize that.
+ return this->delegate(SubExpr);
+ }
+ // If we don't end up using the materialized temporary anyway, don't
+ // bother creating it.
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ // When we're initializing a global variable *or* the storage duration of
+ // the temporary is explicitly static, create a global variable.
+ std::optional<PrimType> SubExprT = classify(SubExpr);
+ bool IsStatic = E->getStorageDuration() == SD_Static;
+ if (GlobalDecl || IsStatic) {
+ std::optional<unsigned> GlobalIndex = P.createGlobal(E);
+ if (!GlobalIndex)
+ return false;
+
+ const LifetimeExtendedTemporaryDecl *TempDecl =
+ E->getLifetimeExtendedTemporaryDecl();
+ if (IsStatic)
+ assert(TempDecl);
+
+ if (SubExprT) {
+ if (!this->visit(SubExpr))
+ return false;
+ if (IsStatic) {
+ if (!this->emitInitGlobalTemp(*SubExprT, *GlobalIndex, TempDecl, E))
+ return false;
+ } else {
+ if (!this->emitInitGlobal(*SubExprT, *GlobalIndex, E))
+ return false;
+ }
+ return this->emitGetPtrGlobal(*GlobalIndex, E);
+ }
+
+ // Non-primitive values.
+ if (!this->emitGetPtrGlobal(*GlobalIndex, E))
+ return false;
+ if (!this->visitInitializer(SubExpr))
+ return false;
+ if (IsStatic)
+ return this->emitInitGlobalTempComp(TempDecl, E);
+ return true;
+ }
+
+ // For everyhing else, use local variables.
+ if (SubExprT) {
+ unsigned LocalIndex = allocateLocalPrimitive(
+ SubExpr, *SubExprT, /*IsConst=*/true, /*IsExtended=*/true);
+ if (!this->visit(SubExpr))
+ return false;
+ if (!this->emitSetLocal(*SubExprT, LocalIndex, E))
+ return false;
+ return this->emitGetPtrLocal(LocalIndex, E);
+ } else {
+ const Expr *Inner = E->getSubExpr()->skipRValueSubobjectAdjustments();
+ if (std::optional<unsigned> LocalIndex =
+ allocateLocal(Inner, E->getExtendingDecl())) {
+ InitLinkScope<Emitter> ILS(this, InitLink::Temp(*LocalIndex));
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ return this->visitInitializer(SubExpr);
+ }
+ }
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXBindTemporaryExpr(
+ const CXXBindTemporaryExpr *E) {
+ return this->delegate(E->getSubExpr());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ const Expr *Init = E->getInitializer();
+ if (Initializing) {
+ // We already have a value, just initialize that.
+ return this->visitInitializer(Init) && this->emitFinishInit(E);
+ }
+
+ std::optional<PrimType> T = classify(E->getType());
+ if (E->isFileScope()) {
+ // Avoid creating a variable if this is a primitive RValue anyway.
+ if (T && !E->isLValue())
+ return this->delegate(Init);
+
+ if (std::optional<unsigned> GlobalIndex = P.createGlobal(E)) {
+ if (!this->emitGetPtrGlobal(*GlobalIndex, E))
+ return false;
+
+ if (T) {
+ if (!this->visit(Init))
+ return false;
+ return this->emitInitGlobal(*T, *GlobalIndex, E);
+ }
+
+ return this->visitInitializer(Init) && this->emitFinishInit(E);
+ }
+
+ return false;
+ }
+
+ // Otherwise, use a local variable.
+ if (T && !E->isLValue()) {
+ // For primitive types, we just visit the initializer.
+ return this->delegate(Init);
+ } else {
+ unsigned LocalIndex;
+
+ if (T)
+ LocalIndex = this->allocateLocalPrimitive(Init, *T, false, false);
+ else if (std::optional<unsigned> MaybeIndex = this->allocateLocal(Init))
+ LocalIndex = *MaybeIndex;
+ else
+ return false;
+
+ if (!this->emitGetPtrLocal(LocalIndex, E))
+ return false;
+
+ if (T) {
+ if (!this->visit(Init)) {
+ return false;
+ }
+ return this->emitInit(*T, E);
+ } else {
+ if (!this->visitInitializer(Init) || !this->emitFinishInit(E))
+ return false;
+ }
+
+ if (DiscardResult)
+ return this->emitPopPtr(E);
+ return true;
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitTypeTraitExpr(const TypeTraitExpr *E) {
+ if (DiscardResult)
+ return true;
+ if (E->getType()->isBooleanType())
+ return this->emitConstBool(E->getValue(), E);
+ return this->emitConst(E->getValue(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
+ if (DiscardResult)
+ return true;
+ return this->emitConst(E->getValue(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitLambdaExpr(const LambdaExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ assert(Initializing);
+ const Record *R = P.getOrCreateRecord(E->getLambdaClass());
+
+ auto *CaptureInitIt = E->capture_init_begin();
+ // Initialize all fields (which represent lambda captures) of the
+ // record with their initializers.
+ for (const Record::Field &F : R->fields()) {
+ const Expr *Init = *CaptureInitIt;
+ ++CaptureInitIt;
+
+ if (!Init)
+ continue;
+
+ if (std::optional<PrimType> T = classify(Init)) {
+ if (!this->visit(Init))
+ return false;
+
+ if (!this->emitInitField(*T, F.Offset, E))
+ return false;
+ } else {
+ if (!this->emitGetPtrField(F.Offset, E))
+ return false;
+
+ if (!this->visitInitializer(Init))
+ return false;
+
+ if (!this->emitPopPtr(E))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitPredefinedExpr(const PredefinedExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ return this->delegate(E->getFunctionName());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXThrowExpr(const CXXThrowExpr *E) {
+ if (E->getSubExpr() && !this->discard(E->getSubExpr()))
+ return false;
+
+ return this->emitInvalid(E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXReinterpretCastExpr(
+ const CXXReinterpretCastExpr *E) {
+ if (!this->discard(E->getSubExpr()))
+ return false;
+
+ return this->emitInvalidCast(CastKind::Reinterpret, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
+ assert(E->getType()->isBooleanType());
+
+ if (DiscardResult)
+ return true;
+ return this->emitConstBool(E->getValue(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ QualType T = E->getType();
+ assert(!classify(T));
+
+ if (T->isRecordType()) {
+ const CXXConstructorDecl *Ctor = E->getConstructor();
+
+ // Trivial copy/move constructor. Avoid copy.
+ if (Ctor->isDefaulted() && Ctor->isCopyOrMoveConstructor() &&
+ Ctor->isTrivial() &&
+ E->getArg(0)->isTemporaryObject(Ctx.getASTContext(),
+ T->getAsCXXRecordDecl()))
+ return this->visitInitializer(E->getArg(0));
+
+ // If we're discarding a construct expression, we still need
+ // to allocate a variable and call the constructor and destructor.
+ if (DiscardResult) {
+ if (Ctor->isTrivial())
+ return true;
+ assert(!Initializing);
+ std::optional<unsigned> LocalIndex = allocateLocal(E);
+
+ if (!LocalIndex)
+ return false;
+
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+
+ // Zero initialization.
+ if (E->requiresZeroInitialization()) {
+ const Record *R = getRecord(E->getType());
+
+ if (!this->visitZeroRecordInitializer(R, E))
+ return false;
+
+ // If the constructor is trivial anyway, we're done.
+ if (Ctor->isTrivial())
+ return true;
+ }
+
+ const Function *Func = getFunction(Ctor);
+
+ if (!Func)
+ return false;
+
+ assert(Func->hasThisPointer());
+ assert(!Func->hasRVO());
+
+ // The This pointer is already on the stack because this is an initializer,
+ // but we need to dup() so the call() below has its own copy.
+ if (!this->emitDupPtr(E))
+ return false;
+
+ // Constructor arguments.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+
+ if (Func->isVariadic()) {
+ uint32_t VarArgSize = 0;
+ unsigned NumParams = Func->getNumWrittenParams();
+ for (unsigned I = NumParams, N = E->getNumArgs(); I != N; ++I) {
+ VarArgSize +=
+ align(primSize(classify(E->getArg(I)->getType()).value_or(PT_Ptr)));
+ }
+ if (!this->emitCallVar(Func, VarArgSize, E))
+ return false;
+ } else {
+ if (!this->emitCall(Func, 0, E))
+ return false;
+ }
+
+ // Immediately call the destructor if we have to.
+ if (DiscardResult) {
+ if (!this->emitRecordDestruction(getRecord(E->getType())))
+ return false;
+ if (!this->emitPopPtr(E))
+ return false;
+ }
+ return true;
+ }
+
+ if (T->isArrayType()) {
+ const ConstantArrayType *CAT =
+ Ctx.getASTContext().getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return false;
+
+ size_t NumElems = CAT->getZExtSize();
+ const Function *Func = getFunction(E->getConstructor());
+ if (!Func || !Func->isConstexpr())
+ return false;
+
+ // FIXME(perf): We're calling the constructor once per array element here,
+ // in the old intepreter we had a special-case for trivial constructors.
+ for (size_t I = 0; I != NumElems; ++I) {
+ if (!this->emitConstUint64(I, E))
+ return false;
+ if (!this->emitArrayElemPtrUint64(E))
+ return false;
+
+ // Constructor arguments.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+
+ if (!this->emitCall(Func, 0, E))
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitSourceLocExpr(const SourceLocExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ const APValue Val =
+ E->EvaluateInContext(Ctx.getASTContext(), SourceLocDefaultExpr);
+
+ // Things like __builtin_LINE().
+ if (E->getType()->isIntegerType()) {
+ assert(Val.isInt());
+ const APSInt &I = Val.getInt();
+ return this->emitConst(I, E);
+ }
+ // Otherwise, the APValue is an LValue, with only one element.
+ // Theoretically, we don't need the APValue at all of course.
+ assert(E->getType()->isPointerType());
+ assert(Val.isLValue());
+ const APValue::LValueBase &Base = Val.getLValueBase();
+ if (const Expr *LValueExpr = Base.dyn_cast<const Expr *>())
+ return this->visit(LValueExpr);
+
+ // Otherwise, we have a decl (which is the case for
+ // __builtin_source_location).
+ assert(Base.is<const ValueDecl *>());
+ assert(Val.getLValuePath().size() == 0);
+ const auto *BaseDecl = Base.dyn_cast<const ValueDecl *>();
+ assert(BaseDecl);
+
+ auto *UGCD = cast<UnnamedGlobalConstantDecl>(BaseDecl);
+
+ std::optional<unsigned> GlobalIndex = P.getOrCreateGlobal(UGCD);
+ if (!GlobalIndex)
+ return false;
+
+ if (!this->emitGetPtrGlobal(*GlobalIndex, E))
+ return false;
+
+ const Record *R = getRecord(E->getType());
+ const APValue &V = UGCD->getValue();
+ for (unsigned I = 0, N = R->getNumFields(); I != N; ++I) {
+ const Record::Field *F = R->getField(I);
+ const APValue &FieldValue = V.getStructField(I);
+
+ PrimType FieldT = classifyPrim(F->Decl->getType());
+
+ if (!this->visitAPValue(FieldValue, FieldT, E))
+ return false;
+ if (!this->emitInitField(FieldT, F->Offset, E))
+ return false;
+ }
+
+ // Leave the pointer to the global on the stack.
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitOffsetOfExpr(const OffsetOfExpr *E) {
+ unsigned N = E->getNumComponents();
+ if (N == 0)
+ return false;
+
+ for (unsigned I = 0; I != N; ++I) {
+ const OffsetOfNode &Node = E->getComponent(I);
+ if (Node.getKind() == OffsetOfNode::Array) {
+ const Expr *ArrayIndexExpr = E->getIndexExpr(Node.getArrayExprIndex());
+ PrimType IndexT = classifyPrim(ArrayIndexExpr->getType());
+
+ if (DiscardResult) {
+ if (!this->discard(ArrayIndexExpr))
+ return false;
+ continue;
+ }
+
+ if (!this->visit(ArrayIndexExpr))
+ return false;
+ // Cast to Sint64.
+ if (IndexT != PT_Sint64) {
+ if (!this->emitCast(IndexT, PT_Sint64, E))
+ return false;
+ }
+ }
+ }
+
+ if (DiscardResult)
+ return true;
+
+ PrimType T = classifyPrim(E->getType());
+ return this->emitOffsetOf(T, E, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXScalarValueInitExpr(
+ const CXXScalarValueInitExpr *E) {
+ QualType Ty = E->getType();
+
+ if (DiscardResult || Ty->isVoidType())
+ return true;
+
+ if (std::optional<PrimType> T = classify(Ty))
+ return this->visitZeroInitializer(*T, Ty, E);
+
+ if (const auto *CT = Ty->getAs<ComplexType>()) {
+ if (!Initializing) {
+ std::optional<unsigned> LocalIndex = allocateLocal(E);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+
+ // Initialize both fields to 0.
+ QualType ElemQT = CT->getElementType();
+ PrimType ElemT = classifyPrim(ElemQT);
+
+ for (unsigned I = 0; I != 2; ++I) {
+ if (!this->visitZeroInitializer(ElemT, ElemQT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, I, E))
+ return false;
+ }
+ return true;
+ }
+
+ if (const auto *VT = Ty->getAs<VectorType>()) {
+ // FIXME: Code duplication with the _Complex case above.
+ if (!Initializing) {
+ std::optional<unsigned> LocalIndex = allocateLocal(E);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+
+ // Initialize all fields to 0.
+ QualType ElemQT = VT->getElementType();
+ PrimType ElemT = classifyPrim(ElemQT);
+
+ for (unsigned I = 0, N = VT->getNumElements(); I != N; ++I) {
+ if (!this->visitZeroInitializer(ElemT, ElemQT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, I, E))
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitSizeOfPackExpr(const SizeOfPackExpr *E) {
+ return this->emitConst(E->getPackLength(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitGenericSelectionExpr(
+ const GenericSelectionExpr *E) {
+ return this->delegate(E->getResultExpr());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitChooseExpr(const ChooseExpr *E) {
+ return this->delegate(E->getChosenSubExpr());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ return this->emitConst(E->getValue(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXInheritedCtorInitExpr(
+ const CXXInheritedCtorInitExpr *E) {
+ const CXXConstructorDecl *Ctor = E->getConstructor();
+ assert(!Ctor->isTrivial() &&
+ "Trivial CXXInheritedCtorInitExpr, implement. (possible?)");
+ const Function *F = this->getFunction(Ctor);
+ assert(F);
+ assert(!F->hasRVO());
+ assert(F->hasThisPointer());
+
+ if (!this->emitDupPtr(SourceInfo{}))
+ return false;
+
+ // Forward all arguments of the current function (which should be a
+ // constructor itself) to the inherited ctor.
+ // This is necessary because the calling code has pushed the pointer
+ // of the correct base for us already, but the arguments need
+ // to come after.
+ unsigned Offset = align(primSize(PT_Ptr)); // instance pointer.
+ for (const ParmVarDecl *PD : Ctor->parameters()) {
+ PrimType PT = this->classify(PD->getType()).value_or(PT_Ptr);
+
+ if (!this->emitGetParam(PT, Offset, E))
+ return false;
+ Offset += align(primSize(PT));
+ }
+
+ return this->emitCall(F, 0, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
+ assert(classifyPrim(E->getType()) == PT_Ptr);
+ const Expr *Init = E->getInitializer();
+ QualType ElementType = E->getAllocatedType();
+ std::optional<PrimType> ElemT = classify(ElementType);
+ unsigned PlacementArgs = E->getNumPlacementArgs();
+ bool IsNoThrow = false;
+
+ // FIXME: Better diagnostic. diag::note_constexpr_new_placement
+ if (PlacementArgs != 0) {
+ // The only new-placement list we support is of the form (std::nothrow).
+ //
+ // FIXME: There is no restriction on this, but it's not clear that any
+ // other form makes any sense. We get here for cases such as:
+ //
+ // new (std::align_val_t{N}) X(int)
+ //
+ // (which should presumably be valid only if N is a multiple of
+ // alignof(int), and in any case can't be deallocated unless N is
+ // alignof(X) and X has new-extended alignment).
+ if (PlacementArgs != 1 || !E->getPlacementArg(0)->getType()->isNothrowT())
+ return this->emitInvalid(E);
+
+ if (!this->discard(E->getPlacementArg(0)))
+ return false;
+ IsNoThrow = true;
+ }
+
+ const Descriptor *Desc;
+ if (ElemT) {
+ if (E->isArray())
+ Desc = nullptr; // We're not going to use it in this case.
+ else
+ Desc = P.createDescriptor(E, *ElemT, Descriptor::InlineDescMD,
+ /*IsConst=*/false, /*IsTemporary=*/false,
+ /*IsMutable=*/false);
+ } else {
+ Desc = P.createDescriptor(
+ E, ElementType.getTypePtr(),
+ E->isArray() ? std::nullopt : Descriptor::InlineDescMD,
+ /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false, Init);
+ }
+
+ if (E->isArray()) {
+ std::optional<const Expr *> ArraySizeExpr = E->getArraySize();
+ if (!ArraySizeExpr)
+ return false;
+
+ const Expr *Stripped = *ArraySizeExpr;
+ for (; auto *ICE = dyn_cast<ImplicitCastExpr>(Stripped);
+ Stripped = ICE->getSubExpr())
+ if (ICE->getCastKind() != CK_NoOp &&
+ ICE->getCastKind() != CK_IntegralCast)
+ break;
+
+ PrimType SizeT = classifyPrim(Stripped->getType());
+
+ if (!this->visit(Stripped))
+ return false;
+
+ if (ElemT) {
+ // N primitive elements.
+ if (!this->emitAllocN(SizeT, *ElemT, E, IsNoThrow, E))
+ return false;
+ } else {
+ // N Composite elements.
+ if (!this->emitAllocCN(SizeT, Desc, IsNoThrow, E))
+ return false;
+ }
+
+ if (Init && !this->visitInitializer(Init))
+ return false;
+
+ } else {
+ // Allocate just one element.
+ if (!this->emitAlloc(Desc, E))
+ return false;
+
+ if (Init) {
+ if (ElemT) {
+ if (!this->visit(Init))
+ return false;
+
+ if (!this->emitInit(*ElemT, E))
+ return false;
+ } else {
+ // Composite.
+ if (!this->visitInitializer(Init))
+ return false;
+ }
+ }
+ }
+
+ if (DiscardResult)
+ return this->emitPopPtr(E);
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
+ const Expr *Arg = E->getArgument();
+
+ // Arg must be an lvalue.
+ if (!this->visit(Arg))
+ return false;
+
+ return this->emitFree(E->isArrayForm(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
+ assert(Ctx.getLangOpts().CPlusPlus);
+ return this->emitConstBool(E->getValue(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
+ if (DiscardResult)
+ return true;
+ assert(!Initializing);
+
+ const MSGuidDecl *GuidDecl = E->getGuidDecl();
+ const RecordDecl *RD = GuidDecl->getType()->getAsRecordDecl();
+ assert(RD);
+ // If the definiton of the result type is incomplete, just return a dummy.
+ // If (and when) that is read from, we will fail, but not now.
+ if (!RD->isCompleteDefinition()) {
+ if (std::optional<unsigned> I = P.getOrCreateDummy(GuidDecl))
+ return this->emitGetPtrGlobal(*I, E);
+ return false;
+ }
+
+ std::optional<unsigned> GlobalIndex = P.getOrCreateGlobal(GuidDecl);
+ if (!GlobalIndex)
+ return false;
+ if (!this->emitGetPtrGlobal(*GlobalIndex, E))
+ return false;
+
+ assert(this->getRecord(E->getType()));
+
+ const APValue &V = GuidDecl->getAsAPValue();
+ if (V.getKind() == APValue::None)
+ return true;
+
+ assert(V.isStruct());
+ assert(V.getStructNumBases() == 0);
+ if (!this->visitAPValueInitializer(V, E))
+ return false;
+
+ return this->emitFinishInit(E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitRequiresExpr(const RequiresExpr *E) {
+ assert(classifyPrim(E->getType()) == PT_Bool);
+ if (DiscardResult)
+ return true;
+ return this->emitConstBool(E->isSatisfied(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitConceptSpecializationExpr(
+ const ConceptSpecializationExpr *E) {
+ assert(classifyPrim(E->getType()) == PT_Bool);
+ if (DiscardResult)
+ return true;
+ return this->emitConstBool(E->isSatisfied(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXRewrittenBinaryOperator(
+ const CXXRewrittenBinaryOperator *E) {
+ return this->delegate(E->getSemanticForm());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitPseudoObjectExpr(const PseudoObjectExpr *E) {
+
+ for (const Expr *SemE : E->semantics()) {
+ if (auto *OVE = dyn_cast<OpaqueValueExpr>(SemE)) {
+ if (SemE == E->getResultExpr())
+ return false;
+
+ if (OVE->isUnique())
+ continue;
+
+ if (!this->discard(OVE))
+ return false;
+ } else if (SemE == E->getResultExpr()) {
+ if (!this->delegate(SemE))
+ return false;
+ } else {
+ if (!this->discard(SemE))
+ return false;
+ }
+ }
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitPackIndexingExpr(const PackIndexingExpr *E) {
+ return this->delegate(E->getSelectedExpr());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitRecoveryExpr(const RecoveryExpr *E) {
+ return this->emitError(E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitAddrLabelExpr(const AddrLabelExpr *E) {
+ assert(E->getType()->isVoidPointerType());
+
+ unsigned Offset = allocateLocalPrimitive(
+ E->getLabel(), PT_Ptr, /*IsConst=*/true, /*IsExtended=*/false);
+
+ return this->emitGetLocal(PT_Ptr, Offset, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitConvertVectorExpr(const ConvertVectorExpr *E) {
+ assert(Initializing);
+ const auto *VT = E->getType()->castAs<VectorType>();
+ QualType ElemType = VT->getElementType();
+ PrimType ElemT = classifyPrim(ElemType);
+ const Expr *Src = E->getSrcExpr();
+ PrimType SrcElemT =
+ classifyPrim(Src->getType()->castAs<VectorType>()->getElementType());
+
+ unsigned SrcOffset = this->allocateLocalPrimitive(Src, PT_Ptr, true, false);
+ if (!this->visit(Src))
+ return false;
+ if (!this->emitSetLocal(PT_Ptr, SrcOffset, E))
+ return false;
+
+ for (unsigned I = 0; I != VT->getNumElements(); ++I) {
+ if (!this->emitGetLocal(PT_Ptr, SrcOffset, E))
+ return false;
+ if (!this->emitArrayElemPop(SrcElemT, I, E))
+ return false;
+ if (SrcElemT != ElemT) {
+ if (!this->emitPrimCast(SrcElemT, ElemT, ElemType, E))
+ return false;
+ }
+ if (!this->emitInitElem(ElemT, I, E))
+ return false;
+ }
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitShuffleVectorExpr(const ShuffleVectorExpr *E) {
+ assert(Initializing);
+ assert(E->getNumSubExprs() > 2);
+
+ const Expr *Vecs[] = {E->getExpr(0), E->getExpr(1)};
+ const VectorType *VT = Vecs[0]->getType()->castAs<VectorType>();
+ PrimType ElemT = classifyPrim(VT->getElementType());
+ unsigned NumInputElems = VT->getNumElements();
+ unsigned NumOutputElems = E->getNumSubExprs() - 2;
+ assert(NumOutputElems > 0);
+
+ // Save both input vectors to a local variable.
+ unsigned VectorOffsets[2];
+ for (unsigned I = 0; I != 2; ++I) {
+ VectorOffsets[I] = this->allocateLocalPrimitive(
+ Vecs[I], PT_Ptr, /*IsConst=*/true, /*IsExtended=*/false);
+ if (!this->visit(Vecs[I]))
+ return false;
+ if (!this->emitSetLocal(PT_Ptr, VectorOffsets[I], E))
+ return false;
+ }
+ for (unsigned I = 0; I != NumOutputElems; ++I) {
+ APSInt ShuffleIndex = E->getShuffleMaskIdx(Ctx.getASTContext(), I);
+ if (ShuffleIndex == -1)
+ return this->emitInvalid(E); // FIXME: Better diagnostic.
+
+ assert(ShuffleIndex < (NumInputElems * 2));
+ if (!this->emitGetLocal(PT_Ptr,
+ VectorOffsets[ShuffleIndex >= NumInputElems], E))
+ return false;
+ unsigned InputVectorIndex = ShuffleIndex.getZExtValue() % NumInputElems;
+ if (!this->emitArrayElemPop(ElemT, InputVectorIndex, E))
+ return false;
+
+ if (!this->emitInitElem(ElemT, I, E))
+ return false;
+ }
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitExtVectorElementExpr(
+ const ExtVectorElementExpr *E) {
+ const Expr *Base = E->getBase();
+ assert(
+ Base->getType()->isVectorType() ||
+ Base->getType()->getAs<PointerType>()->getPointeeType()->isVectorType());
+
+ SmallVector<uint32_t, 4> Indices;
+ E->getEncodedElementAccess(Indices);
+
+ if (Indices.size() == 1) {
+ if (!this->visit(Base))
+ return false;
+
+ if (E->isGLValue()) {
+ if (!this->emitConstUint32(Indices[0], E))
+ return false;
+ return this->emitArrayElemPtrPop(PT_Uint32, E);
+ }
+ // Else, also load the value.
+ return this->emitArrayElemPop(classifyPrim(E->getType()), Indices[0], E);
+ }
+
+ // Create a local variable for the base.
+ unsigned BaseOffset = allocateLocalPrimitive(Base, PT_Ptr, /*IsConst=*/true,
+ /*IsExtended=*/false);
+ if (!this->visit(Base))
+ return false;
+ if (!this->emitSetLocal(PT_Ptr, BaseOffset, E))
+ return false;
+
+ // Now the vector variable for the return value.
+ if (!Initializing) {
+ std::optional<unsigned> ResultIndex;
+ ResultIndex = allocateLocal(E);
+ if (!ResultIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*ResultIndex, E))
+ return false;
+ }
+
+ assert(Indices.size() == E->getType()->getAs<VectorType>()->getNumElements());
+
+ PrimType ElemT =
+ classifyPrim(E->getType()->getAs<VectorType>()->getElementType());
+ uint32_t DstIndex = 0;
+ for (uint32_t I : Indices) {
+ if (!this->emitGetLocal(PT_Ptr, BaseOffset, E))
+ return false;
+ if (!this->emitArrayElemPop(ElemT, I, E))
+ return false;
+ if (!this->emitInitElem(ElemT, DstIndex, E))
+ return false;
+ ++DstIndex;
+ }
+
+ // Leave the result pointer on the stack.
+ assert(!DiscardResult);
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
+ if (!E->isExpressibleAsConstantInitializer())
+ return this->emitInvalid(E);
+
+ return this->delegate(E->getSubExpr());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXStdInitializerListExpr(
+ const CXXStdInitializerListExpr *E) {
+ const Expr *SubExpr = E->getSubExpr();
+ const ConstantArrayType *ArrayType =
+ Ctx.getASTContext().getAsConstantArrayType(SubExpr->getType());
+ const Record *R = getRecord(E->getType());
+ assert(Initializing);
+ assert(SubExpr->isGLValue());
+
+ if (!this->visit(SubExpr))
+ return false;
+ if (!this->emitInitFieldPtr(R->getField(0u)->Offset, E))
+ return false;
+
+ PrimType SecondFieldT = classifyPrim(R->getField(1u)->Decl->getType());
+ if (isIntegralType(SecondFieldT)) {
+ if (!this->emitConst(static_cast<APSInt>(ArrayType->getSize()),
+ SecondFieldT, E))
+ return false;
+ return this->emitInitField(SecondFieldT, R->getField(1u)->Offset, E);
+ }
+ assert(SecondFieldT == PT_Ptr);
+
+ if (!this->emitGetFieldPtr(R->getField(0u)->Offset, E))
+ return false;
+ if (!this->emitConst(static_cast<APSInt>(ArrayType->getSize()), PT_Uint64, E))
+ return false;
+ if (!this->emitArrayElemPtrPop(PT_Uint64, E))
+ return false;
+ return this->emitInitFieldPtr(R->getField(1u)->Offset, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitStmtExpr(const StmtExpr *E) {
+ BlockScope<Emitter> BS(this);
+ StmtExprScope<Emitter> SS(this);
+
+ const CompoundStmt *CS = E->getSubStmt();
+ const Stmt *Result = CS->getStmtExprResult();
+ for (const Stmt *S : CS->body()) {
+ if (S != Result) {
+ if (!this->visitStmt(S))
+ return false;
+ continue;
+ }
+
+ assert(S == Result);
+ if (const Expr *ResultExpr = dyn_cast<Expr>(S)) {
+ if (DiscardResult)
+ return this->discard(ResultExpr);
+ return this->delegate(ResultExpr);
+ }
+
+ return this->visitStmt(S);
+ }
+
+ return BS.destroyLocals();
+}
+
+template <class Emitter> bool Compiler<Emitter>::discard(const Expr *E) {
+ OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/true,
+ /*NewInitializing=*/false);
+ return this->Visit(E);
+}
+
+template <class Emitter> bool Compiler<Emitter>::delegate(const Expr *E) {
+ if (E->containsErrors())
+ return this->emitError(E);
+
+ // We're basically doing:
+ // OptionScope<Emitter> Scope(this, DicardResult, Initializing);
+ // but that's unnecessary of course.
+ return this->Visit(E);
+}
+
+template <class Emitter> bool Compiler<Emitter>::visit(const Expr *E) {
+ if (E->getType().isNull())
+ return false;
+
+ if (E->getType()->isVoidType())
+ return this->discard(E);
+
+ // Create local variable to hold the return value.
+ if (!E->isGLValue() && !E->getType()->isAnyComplexType() &&
+ !classify(E->getType())) {
+ std::optional<unsigned> LocalIndex = allocateLocal(E);
+ if (!LocalIndex)
+ return false;
+
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ return this->visitInitializer(E);
+ }
+
+ // Otherwise,we have a primitive return value, produce the value directly
+ // and push it on the stack.
+ OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false,
+ /*NewInitializing=*/false);
+ return this->Visit(E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitInitializer(const Expr *E) {
+ assert(!classify(E->getType()));
+
+ if (E->containsErrors())
+ return this->emitError(E);
+
+ OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false,
+ /*NewInitializing=*/true);
+ return this->Visit(E);
+}
+
+template <class Emitter> bool Compiler<Emitter>::visitBool(const Expr *E) {
+ std::optional<PrimType> T = classify(E->getType());
+ if (!T) {
+ // Convert complex values to bool.
+ if (E->getType()->isAnyComplexType()) {
+ if (!this->visit(E))
+ return false;
+ return this->emitComplexBoolCast(E);
+ }
+ return false;
+ }
+
+ if (!this->visit(E))
+ return false;
+
+ if (T == PT_Bool)
+ return true;
+
+ // Convert pointers to bool.
+ if (T == PT_Ptr || T == PT_FnPtr) {
+ if (!this->emitNull(*T, nullptr, E))
+ return false;
+ return this->emitNE(*T, E);
+ }
+
+ // Or Floats.
+ if (T == PT_Float)
+ return this->emitCastFloatingIntegralBool(E);
+
+ // Or anything else we can.
+ return this->emitCast(*T, PT_Bool, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitZeroInitializer(PrimType T, QualType QT,
+ const Expr *E) {
+ switch (T) {
+ case PT_Bool:
+ return this->emitZeroBool(E);
+ case PT_Sint8:
+ return this->emitZeroSint8(E);
+ case PT_Uint8:
+ return this->emitZeroUint8(E);
+ case PT_Sint16:
+ return this->emitZeroSint16(E);
+ case PT_Uint16:
+ return this->emitZeroUint16(E);
+ case PT_Sint32:
+ return this->emitZeroSint32(E);
+ case PT_Uint32:
+ return this->emitZeroUint32(E);
+ case PT_Sint64:
+ return this->emitZeroSint64(E);
+ case PT_Uint64:
+ return this->emitZeroUint64(E);
+ case PT_IntAP:
+ return this->emitZeroIntAP(Ctx.getBitWidth(QT), E);
+ case PT_IntAPS:
+ return this->emitZeroIntAPS(Ctx.getBitWidth(QT), E);
+ case PT_Ptr:
+ return this->emitNullPtr(nullptr, E);
+ case PT_FnPtr:
+ return this->emitNullFnPtr(nullptr, E);
+ case PT_MemberPtr:
+ return this->emitNullMemberPtr(nullptr, E);
+ case PT_Float: {
+ return this->emitConstFloat(APFloat::getZero(Ctx.getFloatSemantics(QT)), E);
+ }
+ }
+ llvm_unreachable("unknown primitive type");
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitZeroRecordInitializer(const Record *R,
+ const Expr *E) {
+ assert(E);
+ assert(R);
+ // Fields
+ for (const Record::Field &Field : R->fields()) {
+ const Descriptor *D = Field.Desc;
+ if (D->isPrimitive()) {
+ QualType QT = D->getType();
+ PrimType T = classifyPrim(D->getType());
+ if (!this->visitZeroInitializer(T, QT, E))
+ return false;
+ if (!this->emitInitField(T, Field.Offset, E))
+ return false;
+ continue;
+ }
+
+ if (!this->emitGetPtrField(Field.Offset, E))
+ return false;
+
+ if (D->isPrimitiveArray()) {
+ QualType ET = D->getElemQualType();
+ PrimType T = classifyPrim(ET);
+ for (uint32_t I = 0, N = D->getNumElems(); I != N; ++I) {
+ if (!this->visitZeroInitializer(T, ET, E))
+ return false;
+ if (!this->emitInitElem(T, I, E))
+ return false;
+ }
+ } else if (D->isCompositeArray()) {
+ const Record *ElemRecord = D->ElemDesc->ElemRecord;
+ assert(D->ElemDesc->ElemRecord);
+ for (uint32_t I = 0, N = D->getNumElems(); I != N; ++I) {
+ if (!this->emitConstUint32(I, E))
+ return false;
+ if (!this->emitArrayElemPtr(PT_Uint32, E))
+ return false;
+ if (!this->visitZeroRecordInitializer(ElemRecord, E))
+ return false;
+ if (!this->emitPopPtr(E))
+ return false;
+ }
+ } else if (D->isRecord()) {
+ if (!this->visitZeroRecordInitializer(D->ElemRecord, E))
+ return false;
+ } else {
+ assert(false);
+ }
+
+ if (!this->emitPopPtr(E))
+ return false;
+ }
+
+ for (const Record::Base &B : R->bases()) {
+ if (!this->emitGetPtrBase(B.Offset, E))
+ return false;
+ if (!this->visitZeroRecordInitializer(B.R, E))
+ return false;
+ if (!this->emitFinishInitPop(E))
+ return false;
+ }
+
+ // FIXME: Virtual bases.
+
+ return true;
+}
+
+template <class Emitter>
+template <typename T>
+bool Compiler<Emitter>::emitConst(T Value, PrimType Ty, const Expr *E) {
+ switch (Ty) {
+ case PT_Sint8:
+ return this->emitConstSint8(Value, E);
+ case PT_Uint8:
+ return this->emitConstUint8(Value, E);
+ case PT_Sint16:
+ return this->emitConstSint16(Value, E);
+ case PT_Uint16:
+ return this->emitConstUint16(Value, E);
+ case PT_Sint32:
+ return this->emitConstSint32(Value, E);
+ case PT_Uint32:
+ return this->emitConstUint32(Value, E);
+ case PT_Sint64:
+ return this->emitConstSint64(Value, E);
+ case PT_Uint64:
+ return this->emitConstUint64(Value, E);
+ case PT_Bool:
+ return this->emitConstBool(Value, E);
+ case PT_Ptr:
+ case PT_FnPtr:
+ case PT_MemberPtr:
+ case PT_Float:
+ case PT_IntAP:
+ case PT_IntAPS:
+ llvm_unreachable("Invalid integral type");
+ break;
+ }
+ llvm_unreachable("unknown primitive type");
+}
+
+template <class Emitter>
+template <typename T>
+bool Compiler<Emitter>::emitConst(T Value, const Expr *E) {
+ return this->emitConst(Value, classifyPrim(E->getType()), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::emitConst(const APSInt &Value, PrimType Ty,
+ const Expr *E) {
+ if (Ty == PT_IntAPS)
+ return this->emitConstIntAPS(Value, E);
+ if (Ty == PT_IntAP)
+ return this->emitConstIntAP(Value, E);
+
+ if (Value.isSigned())
+ return this->emitConst(Value.getSExtValue(), Ty, E);
+ return this->emitConst(Value.getZExtValue(), Ty, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::emitConst(const APSInt &Value, const Expr *E) {
+ return this->emitConst(Value, classifyPrim(E->getType()), E);
+}
+
+template <class Emitter>
+unsigned Compiler<Emitter>::allocateLocalPrimitive(DeclTy &&Src, PrimType Ty,
+ bool IsConst,
+ bool IsExtended) {
+ // Make sure we don't accidentally register the same decl twice.
+ if (const auto *VD =
+ dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
+ assert(!P.getGlobal(VD));
+ assert(!Locals.contains(VD));
+ (void)VD;
+ }
+
+ // FIXME: There are cases where Src.is<Expr*>() is wrong, e.g.
+ // (int){12} in C. Consider using Expr::isTemporaryObject() instead
+ // or isa<MaterializeTemporaryExpr>().
+ Descriptor *D = P.createDescriptor(Src, Ty, Descriptor::InlineDescMD, IsConst,
+ Src.is<const Expr *>());
+ Scope::Local Local = this->createLocal(D);
+ if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>()))
+ Locals.insert({VD, Local});
+ VarScope->add(Local, IsExtended);
+ return Local.Offset;
+}
+
+template <class Emitter>
+std::optional<unsigned>
+Compiler<Emitter>::allocateLocal(DeclTy &&Src, const ValueDecl *ExtendingDecl) {
+ // Make sure we don't accidentally register the same decl twice.
+ if ([[maybe_unused]] const auto *VD =
+ dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
+ assert(!P.getGlobal(VD));
+ assert(!Locals.contains(VD));
+ }
+
+ QualType Ty;
+ const ValueDecl *Key = nullptr;
+ const Expr *Init = nullptr;
+ bool IsTemporary = false;
+ if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
+ Key = VD;
+ Ty = VD->getType();
+
+ if (const auto *VarD = dyn_cast<VarDecl>(VD))
+ Init = VarD->getInit();
+ }
+ if (auto *E = Src.dyn_cast<const Expr *>()) {
+ IsTemporary = true;
+ Ty = E->getType();
+ }
+
+ Descriptor *D = P.createDescriptor(
+ Src, Ty.getTypePtr(), Descriptor::InlineDescMD, Ty.isConstQualified(),
+ IsTemporary, /*IsMutable=*/false, Init);
+ if (!D)
+ return std::nullopt;
+
+ Scope::Local Local = this->createLocal(D);
+ if (Key)
+ Locals.insert({Key, Local});
+ if (ExtendingDecl)
+ VarScope->addExtended(Local, ExtendingDecl);
+ else
+ VarScope->add(Local, false);
+ return Local.Offset;
+}
+
+template <class Emitter>
+const RecordType *Compiler<Emitter>::getRecordTy(QualType Ty) {
+ if (const PointerType *PT = dyn_cast<PointerType>(Ty))
+ return PT->getPointeeType()->getAs<RecordType>();
+ return Ty->getAs<RecordType>();
+}
+
+template <class Emitter> Record *Compiler<Emitter>::getRecord(QualType Ty) {
+ if (const auto *RecordTy = getRecordTy(Ty))
+ return getRecord(RecordTy->getDecl());
+ return nullptr;
+}
+
+template <class Emitter>
+Record *Compiler<Emitter>::getRecord(const RecordDecl *RD) {
+ return P.getOrCreateRecord(RD);
+}
+
+template <class Emitter>
+const Function *Compiler<Emitter>::getFunction(const FunctionDecl *FD) {
+ return Ctx.getOrCreateFunction(FD);
+}
+
+template <class Emitter> bool Compiler<Emitter>::visitExpr(const Expr *E) {
+ LocalScope<Emitter> RootScope(this);
+ // Void expressions.
+ if (E->getType()->isVoidType()) {
+ if (!visit(E))
+ return false;
+ return this->emitRetVoid(E) && RootScope.destroyLocals();
+ }
+
+ // Expressions with a primitive return type.
+ if (std::optional<PrimType> T = classify(E)) {
+ if (!visit(E))
+ return false;
+ return this->emitRet(*T, E) && RootScope.destroyLocals();
+ }
+
+ // Expressions with a composite return type.
+ // For us, that means everything we don't
+ // have a PrimType for.
+ if (std::optional<unsigned> LocalOffset = this->allocateLocal(E)) {
+ if (!this->emitGetPtrLocal(*LocalOffset, E))
+ return false;
+
+ if (!visitInitializer(E))
+ return false;
+
+ if (!this->emitFinishInit(E))
+ return false;
+ // We are destroying the locals AFTER the Ret op.
+ // The Ret op needs to copy the (alive) values, but the
+ // destructors may still turn the entire expression invalid.
+ return this->emitRetValue(E) && RootScope.destroyLocals();
+ }
+
+ RootScope.destroyLocals();
+ return false;
+}
+
+template <class Emitter>
+VarCreationState Compiler<Emitter>::visitDecl(const VarDecl *VD) {
+
+ auto R = this->visitVarDecl(VD, /*Toplevel=*/true);
+
+ if (R.notCreated())
+ return R;
+
+ if (R)
+ return true;
+
+ if (!R && Context::shouldBeGloballyIndexed(VD)) {
+ if (auto GlobalIndex = P.getGlobal(VD)) {
+ Block *GlobalBlock = P.getGlobal(*GlobalIndex);
+ GlobalInlineDescriptor &GD =
+ *reinterpret_cast<GlobalInlineDescriptor *>(GlobalBlock->rawData());
+
+ GD.InitState = GlobalInitState::InitializerFailed;
+ GlobalBlock->invokeDtor();
+ }
+ }
+
+ return R;
+}
+
+/// Toplevel visitDeclAndReturn().
+/// We get here from evaluateAsInitializer().
+/// We need to evaluate the initializer and return its value.
+template <class Emitter>
+bool Compiler<Emitter>::visitDeclAndReturn(const VarDecl *VD,
+ bool ConstantContext) {
+ std::optional<PrimType> VarT = classify(VD->getType());
+
+ // We only create variables if we're evaluating in a constant context.
+ // Otherwise, just evaluate the initializer and return it.
+ if (!ConstantContext) {
+ DeclScope<Emitter> LS(this, VD);
+ if (!this->visit(VD->getAnyInitializer()))
+ return false;
+ return this->emitRet(VarT.value_or(PT_Ptr), VD) && LS.destroyLocals();
+ }
+
+ LocalScope<Emitter> VDScope(this, VD);
+ if (!this->visitVarDecl(VD, /*Toplevel=*/true))
+ return false;
+
+ if (Context::shouldBeGloballyIndexed(VD)) {
+ auto GlobalIndex = P.getGlobal(VD);
+ assert(GlobalIndex); // visitVarDecl() didn't return false.
+ if (VarT) {
+ if (!this->emitGetGlobalUnchecked(*VarT, *GlobalIndex, VD))
+ return false;
+ } else {
+ if (!this->emitGetPtrGlobal(*GlobalIndex, VD))
+ return false;
+ }
+ } else {
+ auto Local = Locals.find(VD);
+ assert(Local != Locals.end()); // Same here.
+ if (VarT) {
+ if (!this->emitGetLocal(*VarT, Local->second.Offset, VD))
+ return false;
+ } else {
+ if (!this->emitGetPtrLocal(Local->second.Offset, VD))
+ return false;
+ }
+ }
+
+ // Return the value.
+ if (!this->emitRet(VarT.value_or(PT_Ptr), VD)) {
+ // If the Ret above failed and this is a global variable, mark it as
+ // uninitialized, even everything else succeeded.
+ if (Context::shouldBeGloballyIndexed(VD)) {
+ auto GlobalIndex = P.getGlobal(VD);
+ assert(GlobalIndex);
+ Block *GlobalBlock = P.getGlobal(*GlobalIndex);
+ GlobalInlineDescriptor &GD =
+ *reinterpret_cast<GlobalInlineDescriptor *>(GlobalBlock->rawData());
+
+ GD.InitState = GlobalInitState::InitializerFailed;
+ GlobalBlock->invokeDtor();
+ }
+ return false;
+ }
+
+ return VDScope.destroyLocals();
+}
+
+template <class Emitter>
+VarCreationState Compiler<Emitter>::visitVarDecl(const VarDecl *VD, bool Toplevel) {
+ // We don't know what to do with these, so just return false.
+ if (VD->getType().isNull())
+ return false;
+
+ // This case is EvalEmitter-only. If we won't create any instructions for the
+ // initializer anyway, don't bother creating the variable in the first place.
+ if (!this->isActive())
+ return VarCreationState::NotCreated();
+
+ const Expr *Init = VD->getInit();
+ std::optional<PrimType> VarT = classify(VD->getType());
+
+ if (Context::shouldBeGloballyIndexed(VD)) {
+ auto checkDecl = [&]() -> bool {
+ bool NeedsOp = !Toplevel && VD->isLocalVarDecl() && VD->isStaticLocal();
+ return !NeedsOp || this->emitCheckDecl(VD, VD);
+ };
+
+ auto initGlobal = [&](unsigned GlobalIndex) -> bool {
+ assert(Init);
+ DeclScope<Emitter> LocalScope(this, VD);
+
+ if (VarT) {
+ if (!this->visit(Init))
+ return checkDecl() && false;
+
+ return checkDecl() && this->emitInitGlobal(*VarT, GlobalIndex, VD);
+ }
+
+ if (!checkDecl())
+ return false;
+
+ if (!this->emitGetPtrGlobal(GlobalIndex, Init))
+ return false;
+
+ if (!visitInitializer(Init))
+ return false;
+
+ if (!this->emitFinishInit(Init))
+ return false;
+
+ return this->emitPopPtr(Init);
+ };
+
+ // We've already seen and initialized this global.
+ if (std::optional<unsigned> GlobalIndex = P.getGlobal(VD)) {
+ if (P.getPtrGlobal(*GlobalIndex).isInitialized())
+ return checkDecl();
+
+ // The previous attempt at initialization might've been unsuccessful,
+ // so let's try this one.
+ return Init && checkDecl() && initGlobal(*GlobalIndex);
+ }
+
+ std::optional<unsigned> GlobalIndex = P.createGlobal(VD, Init);
+
+ if (!GlobalIndex)
+ return false;
+
+ return !Init || (checkDecl() && initGlobal(*GlobalIndex));
+ } else {
+ InitLinkScope<Emitter> ILS(this, InitLink::Decl(VD));
+
+ if (VarT) {
+ unsigned Offset = this->allocateLocalPrimitive(
+ VD, *VarT, VD->getType().isConstQualified());
+ if (Init) {
+ // If this is a toplevel declaration, create a scope for the
+ // initializer.
+ if (Toplevel) {
+ LocalScope<Emitter> Scope(this);
+ if (!this->visit(Init))
+ return false;
+ return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals();
+ } else {
+ if (!this->visit(Init))
+ return false;
+ return this->emitSetLocal(*VarT, Offset, VD);
+ }
+ }
+ } else {
+ if (std::optional<unsigned> Offset = this->allocateLocal(VD)) {
+ if (!Init)
+ return true;
+
+ if (!this->emitGetPtrLocal(*Offset, Init))
+ return false;
+
+ if (!visitInitializer(Init))
+ return false;
+
+ if (!this->emitFinishInit(Init))
+ return false;
+
+ return this->emitPopPtr(Init);
+ }
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitAPValue(const APValue &Val, PrimType ValType,
+ const Expr *E) {
+ assert(!DiscardResult);
+ if (Val.isInt())
+ return this->emitConst(Val.getInt(), ValType, E);
+ else if (Val.isFloat())
+ return this->emitConstFloat(Val.getFloat(), E);
+
+ if (Val.isLValue()) {
+ if (Val.isNullPointer())
+ return this->emitNull(ValType, nullptr, E);
+ APValue::LValueBase Base = Val.getLValueBase();
+ if (const Expr *BaseExpr = Base.dyn_cast<const Expr *>())
+ return this->visit(BaseExpr);
+ else if (const auto *VD = Base.dyn_cast<const ValueDecl *>()) {
+ return this->visitDeclRef(VD, E);
+ }
+ } else if (Val.isMemberPointer()) {
+ if (const ValueDecl *MemberDecl = Val.getMemberPointerDecl())
+ return this->emitGetMemberPtr(MemberDecl, E);
+ return this->emitNullMemberPtr(nullptr, E);
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitAPValueInitializer(const APValue &Val,
+ const Expr *E) {
+
+ if (Val.isStruct()) {
+ const Record *R = this->getRecord(E->getType());
+ assert(R);
+ for (unsigned I = 0, N = Val.getStructNumFields(); I != N; ++I) {
+ const APValue &F = Val.getStructField(I);
+ const Record::Field *RF = R->getField(I);
+
+ if (F.isInt() || F.isFloat() || F.isLValue() || F.isMemberPointer()) {
+ PrimType T = classifyPrim(RF->Decl->getType());
+ if (!this->visitAPValue(F, T, E))
+ return false;
+ if (!this->emitInitField(T, RF->Offset, E))
+ return false;
+ } else if (F.isArray()) {
+ assert(RF->Desc->isPrimitiveArray());
+ const auto *ArrType = RF->Decl->getType()->getAsArrayTypeUnsafe();
+ PrimType ElemT = classifyPrim(ArrType->getElementType());
+ assert(ArrType);
+
+ if (!this->emitGetPtrField(RF->Offset, E))
+ return false;
+
+ for (unsigned A = 0, AN = F.getArraySize(); A != AN; ++A) {
+ if (!this->visitAPValue(F.getArrayInitializedElt(A), ElemT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, A, E))
+ return false;
+ }
+
+ if (!this->emitPopPtr(E))
+ return false;
+ } else if (F.isStruct() || F.isUnion()) {
+ if (!this->emitGetPtrField(RF->Offset, E))
+ return false;
+ if (!this->visitAPValueInitializer(F, E))
+ return false;
+ if (!this->emitPopPtr(E))
+ return false;
+ } else {
+ assert(false && "I don't think this should be possible");
+ }
+ }
+ return true;
+ } else if (Val.isUnion()) {
+ const FieldDecl *UnionField = Val.getUnionField();
+ const Record *R = this->getRecord(UnionField->getParent());
+ assert(R);
+ const APValue &F = Val.getUnionValue();
+ const Record::Field *RF = R->getField(UnionField);
+ PrimType T = classifyPrim(RF->Decl->getType());
+ if (!this->visitAPValue(F, T, E))
+ return false;
+ return this->emitInitField(T, RF->Offset, E);
+ }
+ // TODO: Other types.
+
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitBuiltinCallExpr(const CallExpr *E) {
+ const Function *Func = getFunction(E->getDirectCallee());
+ if (!Func)
+ return false;
+
+ // For these, we're expected to ultimately return an APValue pointing
+ // to the CallExpr. This is needed to get the correct codegen.
+ unsigned Builtin = E->getBuiltinCallee();
+ if (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
+ Builtin == Builtin::BI__builtin___NSStringMakeConstantString ||
+ Builtin == Builtin::BI__builtin_ptrauth_sign_constant ||
+ Builtin == Builtin::BI__builtin_function_start) {
+ if (std::optional<unsigned> GlobalOffset = P.createGlobal(E)) {
+ if (!this->emitGetPtrGlobal(*GlobalOffset, E))
+ return false;
+
+ if (PrimType PT = classifyPrim(E); PT != PT_Ptr && isPtrType(PT))
+ return this->emitDecayPtr(PT_Ptr, PT, E);
+ return true;
+ }
+ return false;
+ }
+
+ QualType ReturnType = E->getType();
+ std::optional<PrimType> ReturnT = classify(E);
+
+ // Non-primitive return type. Prepare storage.
+ if (!Initializing && !ReturnT && !ReturnType->isVoidType()) {
+ std::optional<unsigned> LocalIndex = allocateLocal(E);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+
+ if (!Func->isUnevaluatedBuiltin()) {
+ // Put arguments on the stack.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+ }
+
+ if (!this->emitCallBI(Func, E, E))
+ return false;
+
+ if (DiscardResult && !ReturnType->isVoidType()) {
+ assert(ReturnT);
+ return this->emitPop(*ReturnT, E);
+ }
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) {
+ if (E->getBuiltinCallee())
+ return VisitBuiltinCallExpr(E);
+
+ QualType ReturnType = E->getCallReturnType(Ctx.getASTContext());
+ std::optional<PrimType> T = classify(ReturnType);
+ bool HasRVO = !ReturnType->isVoidType() && !T;
+ const FunctionDecl *FuncDecl = E->getDirectCallee();
+
+ if (HasRVO) {
+ if (DiscardResult) {
+ // If we need to discard the return value but the function returns its
+ // value via an RVO pointer, we need to create one such pointer just
+ // for this call.
+ if (std::optional<unsigned> LocalIndex = allocateLocal(E)) {
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+ } else {
+ // We need the result. Prepare a pointer to return or
+ // dup the current one.
+ if (!Initializing) {
+ if (std::optional<unsigned> LocalIndex = allocateLocal(E)) {
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+ }
+ if (!this->emitDupPtr(E))
+ return false;
+ }
+ }
+
+ auto Args = llvm::ArrayRef(E->getArgs(), E->getNumArgs());
+ // Calling a static operator will still
+ // pass the instance, but we don't need it.
+ // Discard it here.
+ if (isa<CXXOperatorCallExpr>(E)) {
+ if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(FuncDecl);
+ MD && MD->isStatic()) {
+ if (!this->discard(E->getArg(0)))
+ return false;
+ Args = Args.drop_front();
+ }
+ }
+
+ std::optional<unsigned> CalleeOffset;
+ // Add the (optional, implicit) This pointer.
+ if (const auto *MC = dyn_cast<CXXMemberCallExpr>(E)) {
+ if (!FuncDecl && classifyPrim(E->getCallee()) == PT_MemberPtr) {
+ // If we end up creating a CallPtr op for this, we need the base of the
+ // member pointer as the instance pointer, and later extract the function
+ // decl as the function pointer.
+ const Expr *Callee = E->getCallee();
+ CalleeOffset =
+ this->allocateLocalPrimitive(Callee, PT_MemberPtr, true, false);
+ if (!this->visit(Callee))
+ return false;
+ if (!this->emitSetLocal(PT_MemberPtr, *CalleeOffset, E))
+ return false;
+ if (!this->emitGetLocal(PT_MemberPtr, *CalleeOffset, E))
+ return false;
+ if (!this->emitGetMemberPtrBase(E))
+ return false;
+ } else if (!this->visit(MC->getImplicitObjectArgument())) {
+ return false;
+ }
+ }
+
+ llvm::BitVector NonNullArgs = collectNonNullArgs(FuncDecl, Args);
+ // Put arguments on the stack.
+ unsigned ArgIndex = 0;
+ for (const auto *Arg : Args) {
+ if (!this->visit(Arg))
+ return false;
+
+ // If we know the callee already, check the known parametrs for nullability.
+ if (FuncDecl && NonNullArgs[ArgIndex]) {
+ PrimType ArgT = classify(Arg).value_or(PT_Ptr);
+ if (ArgT == PT_Ptr || ArgT == PT_FnPtr) {
+ if (!this->emitCheckNonNullArg(ArgT, Arg))
+ return false;
+ }
+ }
+ ++ArgIndex;
+ }
+
+ if (FuncDecl) {
+ const Function *Func = getFunction(FuncDecl);
+ if (!Func)
+ return false;
+ assert(HasRVO == Func->hasRVO());
+
+ bool HasQualifier = false;
+ if (const auto *ME = dyn_cast<MemberExpr>(E->getCallee()))
+ HasQualifier = ME->hasQualifier();
+
+ bool IsVirtual = false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl))
+ IsVirtual = MD->isVirtual();
+
+ // In any case call the function. The return value will end up on the stack
+ // and if the function has RVO, we already have the pointer on the stack to
+ // write the result into.
+ if (IsVirtual && !HasQualifier) {
+ uint32_t VarArgSize = 0;
+ unsigned NumParams =
+ Func->getNumWrittenParams() + isa<CXXOperatorCallExpr>(E);
+ for (unsigned I = NumParams, N = E->getNumArgs(); I != N; ++I)
+ VarArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr)));
+
+ if (!this->emitCallVirt(Func, VarArgSize, E))
+ return false;
+ } else if (Func->isVariadic()) {
+ uint32_t VarArgSize = 0;
+ unsigned NumParams =
+ Func->getNumWrittenParams() + isa<CXXOperatorCallExpr>(E);
+ for (unsigned I = NumParams, N = E->getNumArgs(); I != N; ++I)
+ VarArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr)));
+ if (!this->emitCallVar(Func, VarArgSize, E))
+ return false;
+ } else {
+ if (!this->emitCall(Func, 0, E))
+ return false;
+ }
+ } else {
+ // Indirect call. Visit the callee, which will leave a FunctionPointer on
+ // the stack. Cleanup of the returned value if necessary will be done after
+ // the function call completed.
+
+ // Sum the size of all args from the call expr.
+ uint32_t ArgSize = 0;
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ ArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr)));
+
+ // Get the callee, either from a member pointer saved in CalleeOffset,
+ // or by just visiting the Callee expr.
+ if (CalleeOffset) {
+ if (!this->emitGetLocal(PT_MemberPtr, *CalleeOffset, E))
+ return false;
+ if (!this->emitGetMemberPtrDecl(E))
+ return false;
+ if (!this->emitCallPtr(ArgSize, E, E))
+ return false;
+ } else {
+ if (!this->visit(E->getCallee()))
+ return false;
+
+ if (!this->emitCallPtr(ArgSize, E, E))
+ return false;
+ }
+ }
+
+ // Cleanup for discarded return values.
+ if (DiscardResult && !ReturnType->isVoidType() && T)
+ return this->emitPop(*T, E);
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
+ SourceLocScope<Emitter> SLS(this, E);
+
+ return this->delegate(E->getExpr());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) {
+ SourceLocScope<Emitter> SLS(this, E);
+
+ const Expr *SubExpr = E->getExpr();
+ if (std::optional<PrimType> T = classify(E->getExpr()))
+ return this->visit(SubExpr);
+
+ assert(Initializing);
+ return this->visitInitializer(SubExpr);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ return this->emitConstBool(E->getValue(), E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXNullPtrLiteralExpr(
+ const CXXNullPtrLiteralExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ return this->emitNullPtr(nullptr, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitGNUNullExpr(const GNUNullExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ assert(E->getType()->isIntegerType());
+
+ PrimType T = classifyPrim(E->getType());
+ return this->emitZero(T, E);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ if (this->LambdaThisCapture.Offset > 0) {
+ if (this->LambdaThisCapture.IsPtr)
+ return this->emitGetThisFieldPtr(this->LambdaThisCapture.Offset, E);
+ return this->emitGetPtrThisField(this->LambdaThisCapture.Offset, E);
+ }
+
+ // In some circumstances, the 'this' pointer does not actually refer to the
+ // instance pointer of the current function frame, but e.g. to the declaration
+ // currently being initialized. Here we emit the necessary instruction(s) for
+ // this scenario.
+ if (!InitStackActive || !E->isImplicit())
+ return this->emitThis(E);
+
+ if (InitStackActive && !InitStack.empty()) {
+ unsigned StartIndex = 0;
+ for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) {
+ if (InitStack[StartIndex].Kind != InitLink::K_Field)
+ break;
+ }
+
+ for (unsigned I = StartIndex, N = InitStack.size(); I != N; ++I) {
+ if (!InitStack[I].template emit<Emitter>(this, E))
+ return false;
+ }
+ return true;
+ }
+ return this->emitThis(E);
+}
+
+template <class Emitter> bool Compiler<Emitter>::visitStmt(const Stmt *S) {
+ switch (S->getStmtClass()) {
+ case Stmt::CompoundStmtClass:
+ return visitCompoundStmt(cast<CompoundStmt>(S));
+ case Stmt::DeclStmtClass:
+ return visitDeclStmt(cast<DeclStmt>(S));
+ case Stmt::ReturnStmtClass:
+ return visitReturnStmt(cast<ReturnStmt>(S));
+ case Stmt::IfStmtClass:
+ return visitIfStmt(cast<IfStmt>(S));
+ case Stmt::WhileStmtClass:
+ return visitWhileStmt(cast<WhileStmt>(S));
+ case Stmt::DoStmtClass:
+ return visitDoStmt(cast<DoStmt>(S));
+ case Stmt::ForStmtClass:
+ return visitForStmt(cast<ForStmt>(S));
+ case Stmt::CXXForRangeStmtClass:
+ return visitCXXForRangeStmt(cast<CXXForRangeStmt>(S));
+ case Stmt::BreakStmtClass:
+ return visitBreakStmt(cast<BreakStmt>(S));
+ case Stmt::ContinueStmtClass:
+ return visitContinueStmt(cast<ContinueStmt>(S));
+ case Stmt::SwitchStmtClass:
+ return visitSwitchStmt(cast<SwitchStmt>(S));
+ case Stmt::CaseStmtClass:
+ return visitCaseStmt(cast<CaseStmt>(S));
+ case Stmt::DefaultStmtClass:
+ return visitDefaultStmt(cast<DefaultStmt>(S));
+ case Stmt::AttributedStmtClass:
+ return visitAttributedStmt(cast<AttributedStmt>(S));
+ case Stmt::CXXTryStmtClass:
+ return visitCXXTryStmt(cast<CXXTryStmt>(S));
+ case Stmt::NullStmtClass:
+ return true;
+ // Always invalid statements.
+ case Stmt::GCCAsmStmtClass:
+ case Stmt::MSAsmStmtClass:
+ case Stmt::GotoStmtClass:
+ return this->emitInvalid(S);
+ case Stmt::LabelStmtClass:
+ return this->visitStmt(cast<LabelStmt>(S)->getSubStmt());
+ default: {
+ if (const auto *E = dyn_cast<Expr>(S))
+ return this->discard(E);
+ return false;
+ }
+ }
+}
+
+/// Visits the given statment without creating a variable
+/// scope for it in case it is a compound statement.
+template <class Emitter> bool Compiler<Emitter>::visitLoopBody(const Stmt *S) {
+ if (isa<NullStmt>(S))
+ return true;
+
+ if (const auto *CS = dyn_cast<CompoundStmt>(S)) {
+ for (const auto *InnerStmt : CS->body())
+ if (!visitStmt(InnerStmt))
+ return false;
+ return true;
+ }
+
+ return this->visitStmt(S);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitCompoundStmt(const CompoundStmt *S) {
+ BlockScope<Emitter> Scope(this);
+ for (const auto *InnerStmt : S->body())
+ if (!visitStmt(InnerStmt))
+ return false;
+ return Scope.destroyLocals();
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitDeclStmt(const DeclStmt *DS) {
+ for (const auto *D : DS->decls()) {
+ if (isa<StaticAssertDecl, TagDecl, TypedefNameDecl, UsingEnumDecl,
+ FunctionDecl>(D))
+ continue;
+
+ const auto *VD = dyn_cast<VarDecl>(D);
+ if (!VD)
+ return false;
+ if (!this->visitVarDecl(VD))
+ return false;
+ }
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitReturnStmt(const ReturnStmt *RS) {
+ if (this->InStmtExpr)
+ return this->emitUnsupported(RS);
+
+ if (const Expr *RE = RS->getRetValue()) {
+ LocalScope<Emitter> RetScope(this);
+ if (ReturnType) {
+ // Primitive types are simply returned.
+ if (!this->visit(RE))
+ return false;
+ this->emitCleanup();
+ return this->emitRet(*ReturnType, RS);
+ } else if (RE->getType()->isVoidType()) {
+ if (!this->visit(RE))
+ return false;
+ } else {
+ // RVO - construct the value in the return location.
+ if (!this->emitRVOPtr(RE))
+ return false;
+ if (!this->visitInitializer(RE))
+ return false;
+ if (!this->emitPopPtr(RE))
+ return false;
+
+ this->emitCleanup();
+ return this->emitRetVoid(RS);
+ }
+ }
+
+ // Void return.
+ this->emitCleanup();
+ return this->emitRetVoid(RS);
+}
+
+template <class Emitter> bool Compiler<Emitter>::visitIfStmt(const IfStmt *IS) {
+ BlockScope<Emitter> IfScope(this);
+
+ if (IS->isNonNegatedConsteval())
+ return visitStmt(IS->getThen());
+ if (IS->isNegatedConsteval())
+ return IS->getElse() ? visitStmt(IS->getElse()) : true;
+
+ if (auto *CondInit = IS->getInit())
+ if (!visitStmt(CondInit))
+ return false;
+
+ if (const DeclStmt *CondDecl = IS->getConditionVariableDeclStmt())
+ if (!visitDeclStmt(CondDecl))
+ return false;
+
+ if (!this->visitBool(IS->getCond()))
+ return false;
+
+ if (const Stmt *Else = IS->getElse()) {
+ LabelTy LabelElse = this->getLabel();
+ LabelTy LabelEnd = this->getLabel();
+ if (!this->jumpFalse(LabelElse))
+ return false;
+ if (!visitStmt(IS->getThen()))
+ return false;
+ if (!this->jump(LabelEnd))
+ return false;
+ this->emitLabel(LabelElse);
+ if (!visitStmt(Else))
+ return false;
+ this->emitLabel(LabelEnd);
+ } else {
+ LabelTy LabelEnd = this->getLabel();
+ if (!this->jumpFalse(LabelEnd))
+ return false;
+ if (!visitStmt(IS->getThen()))
+ return false;
+ this->emitLabel(LabelEnd);
+ }
+
+ return IfScope.destroyLocals();
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitWhileStmt(const WhileStmt *S) {
+ const Expr *Cond = S->getCond();
+ const Stmt *Body = S->getBody();
+
+ LabelTy CondLabel = this->getLabel(); // Label before the condition.
+ LabelTy EndLabel = this->getLabel(); // Label after the loop.
+ LoopScope<Emitter> LS(this, EndLabel, CondLabel);
+
+ this->fallthrough(CondLabel);
+ this->emitLabel(CondLabel);
+
+ if (const DeclStmt *CondDecl = S->getConditionVariableDeclStmt())
+ if (!visitDeclStmt(CondDecl))
+ return false;
+
+ if (!this->visitBool(Cond))
+ return false;
+ if (!this->jumpFalse(EndLabel))
+ return false;
+
+ LocalScope<Emitter> Scope(this);
+ {
+ DestructorScope<Emitter> DS(Scope);
+ if (!this->visitLoopBody(Body))
+ return false;
+ }
+
+ if (!this->jump(CondLabel))
+ return false;
+ this->emitLabel(EndLabel);
+
+ return true;
+}
+
+template <class Emitter> bool Compiler<Emitter>::visitDoStmt(const DoStmt *S) {
+ const Expr *Cond = S->getCond();
+ const Stmt *Body = S->getBody();
+
+ LabelTy StartLabel = this->getLabel();
+ LabelTy EndLabel = this->getLabel();
+ LabelTy CondLabel = this->getLabel();
+ LoopScope<Emitter> LS(this, EndLabel, CondLabel);
+ LocalScope<Emitter> Scope(this);
+
+ this->fallthrough(StartLabel);
+ this->emitLabel(StartLabel);
+ {
+ DestructorScope<Emitter> DS(Scope);
+
+ if (!this->visitLoopBody(Body))
+ return false;
+ this->fallthrough(CondLabel);
+ this->emitLabel(CondLabel);
+ if (!this->visitBool(Cond))
+ return false;
+ }
+ if (!this->jumpTrue(StartLabel))
+ return false;
+
+ this->fallthrough(EndLabel);
+ this->emitLabel(EndLabel);
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitForStmt(const ForStmt *S) {
+ // for (Init; Cond; Inc) { Body }
+ const Stmt *Init = S->getInit();
+ const Expr *Cond = S->getCond();
+ const Expr *Inc = S->getInc();
+ const Stmt *Body = S->getBody();
+
+ LabelTy EndLabel = this->getLabel();
+ LabelTy CondLabel = this->getLabel();
+ LabelTy IncLabel = this->getLabel();
+ LoopScope<Emitter> LS(this, EndLabel, IncLabel);
+ LocalScope<Emitter> Scope(this);
+
+ if (Init && !this->visitStmt(Init))
+ return false;
+ this->fallthrough(CondLabel);
+ this->emitLabel(CondLabel);
+
+ if (const DeclStmt *CondDecl = S->getConditionVariableDeclStmt())
+ if (!visitDeclStmt(CondDecl))
+ return false;
+
+ if (Cond) {
+ if (!this->visitBool(Cond))
+ return false;
+ if (!this->jumpFalse(EndLabel))
+ return false;
+ }
+
+ {
+ DestructorScope<Emitter> DS(Scope);
+
+ if (Body && !this->visitLoopBody(Body))
+ return false;
+ this->fallthrough(IncLabel);
+ this->emitLabel(IncLabel);
+ if (Inc && !this->discard(Inc))
+ return false;
+ }
+
+ if (!this->jump(CondLabel))
+ return false;
+ this->fallthrough(EndLabel);
+ this->emitLabel(EndLabel);
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitCXXForRangeStmt(const CXXForRangeStmt *S) {
+ const Stmt *Init = S->getInit();
+ const Expr *Cond = S->getCond();
+ const Expr *Inc = S->getInc();
+ const Stmt *Body = S->getBody();
+ const Stmt *BeginStmt = S->getBeginStmt();
+ const Stmt *RangeStmt = S->getRangeStmt();
+ const Stmt *EndStmt = S->getEndStmt();
+ const VarDecl *LoopVar = S->getLoopVariable();
+
+ LabelTy EndLabel = this->getLabel();
+ LabelTy CondLabel = this->getLabel();
+ LabelTy IncLabel = this->getLabel();
+ LoopScope<Emitter> LS(this, EndLabel, IncLabel);
+
+ // Emit declarations needed in the loop.
+ if (Init && !this->visitStmt(Init))
+ return false;
+ if (!this->visitStmt(RangeStmt))
+ return false;
+ if (!this->visitStmt(BeginStmt))
+ return false;
+ if (!this->visitStmt(EndStmt))
+ return false;
+
+ // Now the condition as well as the loop variable assignment.
+ this->fallthrough(CondLabel);
+ this->emitLabel(CondLabel);
+ if (!this->visitBool(Cond))
+ return false;
+ if (!this->jumpFalse(EndLabel))
+ return false;
+
+ if (!this->visitVarDecl(LoopVar))
+ return false;
+
+ // Body.
+ LocalScope<Emitter> Scope(this);
+ {
+ DestructorScope<Emitter> DS(Scope);
+
+ if (!this->visitLoopBody(Body))
+ return false;
+ this->fallthrough(IncLabel);
+ this->emitLabel(IncLabel);
+ if (!this->discard(Inc))
+ return false;
+ }
+
+ if (!this->jump(CondLabel))
+ return false;
+
+ this->fallthrough(EndLabel);
+ this->emitLabel(EndLabel);
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitBreakStmt(const BreakStmt *S) {
+ if (!BreakLabel)
+ return false;
+
+ this->VarScope->emitDestructors();
+ return this->jump(*BreakLabel);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitContinueStmt(const ContinueStmt *S) {
+ if (!ContinueLabel)
+ return false;
+
+ this->VarScope->emitDestructors();
+ return this->jump(*ContinueLabel);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitSwitchStmt(const SwitchStmt *S) {
+ const Expr *Cond = S->getCond();
+ PrimType CondT = this->classifyPrim(Cond->getType());
+
+ LabelTy EndLabel = this->getLabel();
+ OptLabelTy DefaultLabel = std::nullopt;
+ unsigned CondVar = this->allocateLocalPrimitive(Cond, CondT, true, false);
+
+ if (const auto *CondInit = S->getInit())
+ if (!visitStmt(CondInit))
+ return false;
+
+ if (const DeclStmt *CondDecl = S->getConditionVariableDeclStmt())
+ if (!visitDeclStmt(CondDecl))
+ return false;
+
+ // Initialize condition variable.
+ if (!this->visit(Cond))
+ return false;
+ if (!this->emitSetLocal(CondT, CondVar, S))
+ return false;
+
+ CaseMap CaseLabels;
+ // Create labels and comparison ops for all case statements.
+ for (const SwitchCase *SC = S->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase()) {
+ if (const auto *CS = dyn_cast<CaseStmt>(SC)) {
+ // FIXME: Implement ranges.
+ if (CS->caseStmtIsGNURange())
+ return false;
+ CaseLabels[SC] = this->getLabel();
+
+ const Expr *Value = CS->getLHS();
+ PrimType ValueT = this->classifyPrim(Value->getType());
+
+ // Compare the case statement's value to the switch condition.
+ if (!this->emitGetLocal(CondT, CondVar, CS))
+ return false;
+ if (!this->visit(Value))
+ return false;
+
+ // Compare and jump to the case label.
+ if (!this->emitEQ(ValueT, S))
+ return false;
+ if (!this->jumpTrue(CaseLabels[CS]))
+ return false;
+ } else {
+ assert(!DefaultLabel);
+ DefaultLabel = this->getLabel();
+ }
+ }
+
+ // If none of the conditions above were true, fall through to the default
+ // statement or jump after the switch statement.
+ if (DefaultLabel) {
+ if (!this->jump(*DefaultLabel))
+ return false;
+ } else {
+ if (!this->jump(EndLabel))
+ return false;
+ }
+
+ SwitchScope<Emitter> SS(this, std::move(CaseLabels), EndLabel, DefaultLabel);
+ if (!this->visitStmt(S->getBody()))
+ return false;
+ this->emitLabel(EndLabel);
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitCaseStmt(const CaseStmt *S) {
+ this->emitLabel(CaseLabels[S]);
+ return this->visitStmt(S->getSubStmt());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitDefaultStmt(const DefaultStmt *S) {
+ this->emitLabel(*DefaultLabel);
+ return this->visitStmt(S->getSubStmt());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitAttributedStmt(const AttributedStmt *S) {
+ if (this->Ctx.getLangOpts().CXXAssumptions &&
+ !this->Ctx.getLangOpts().MSVCCompat) {
+ for (const Attr *A : S->getAttrs()) {
+ auto *AA = dyn_cast<CXXAssumeAttr>(A);
+ if (!AA)
+ continue;
+
+ assert(isa<NullStmt>(S->getSubStmt()));
+
+ const Expr *Assumption = AA->getAssumption();
+ if (Assumption->isValueDependent())
+ return false;
+
+ if (Assumption->HasSideEffects(this->Ctx.getASTContext()))
+ continue;
+
+ // Evaluate assumption.
+ if (!this->visitBool(Assumption))
+ return false;
+
+ if (!this->emitAssume(Assumption))
+ return false;
+ }
+ }
+
+ // Ignore other attributes.
+ return this->visitStmt(S->getSubStmt());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitCXXTryStmt(const CXXTryStmt *S) {
+ // Ignore all handlers.
+ return this->visitStmt(S->getTryBlock());
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::emitLambdaStaticInvokerBody(const CXXMethodDecl *MD) {
+ assert(MD->isLambdaStaticInvoker());
+ assert(MD->hasBody());
+ assert(cast<CompoundStmt>(MD->getBody())->body_empty());
+
+ const CXXRecordDecl *ClosureClass = MD->getParent();
+ const CXXMethodDecl *LambdaCallOp = ClosureClass->getLambdaCallOperator();
+ assert(ClosureClass->captures_begin() == ClosureClass->captures_end());
+ const Function *Func = this->getFunction(LambdaCallOp);
+ if (!Func)
+ return false;
+ assert(Func->hasThisPointer());
+ assert(Func->getNumParams() == (MD->getNumParams() + 1 + Func->hasRVO()));
+
+ if (Func->hasRVO()) {
+ if (!this->emitRVOPtr(MD))
+ return false;
+ }
+
+ // The lambda call operator needs an instance pointer, but we don't have
+ // one here, and we don't need one either because the lambda cannot have
+ // any captures, as verified above. Emit a null pointer. This is then
+ // special-cased when interpreting to not emit any misleading diagnostics.
+ if (!this->emitNullPtr(nullptr, MD))
+ return false;
+
+ // Forward all arguments from the static invoker to the lambda call operator.
+ for (const ParmVarDecl *PVD : MD->parameters()) {
+ auto It = this->Params.find(PVD);
+ assert(It != this->Params.end());
+
+ // We do the lvalue-to-rvalue conversion manually here, so no need
+ // to care about references.
+ PrimType ParamType = this->classify(PVD->getType()).value_or(PT_Ptr);
+ if (!this->emitGetParam(ParamType, It->second.Offset, MD))
+ return false;
+ }
+
+ if (!this->emitCall(Func, 0, LambdaCallOp))
+ return false;
+
+ this->emitCleanup();
+ if (ReturnType)
+ return this->emitRet(*ReturnType, MD);
+
+ // Nothing to do, since we emitted the RVO pointer above.
+ return this->emitRetVoid(MD);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitFunc(const FunctionDecl *F) {
+ // Classify the return type.
+ ReturnType = this->classify(F->getReturnType());
+
+ auto emitFieldInitializer = [&](const Record::Field *F, unsigned FieldOffset,
+ const Expr *InitExpr) -> bool {
+ // We don't know what to do with these, so just return false.
+ if (InitExpr->getType().isNull())
+ return false;
+
+ if (std::optional<PrimType> T = this->classify(InitExpr)) {
+ if (!this->visit(InitExpr))
+ return false;
+
+ if (F->isBitField())
+ return this->emitInitThisBitField(*T, F, FieldOffset, InitExpr);
+ return this->emitInitThisField(*T, FieldOffset, InitExpr);
+ }
+ // Non-primitive case. Get a pointer to the field-to-initialize
+ // on the stack and call visitInitialzer() for it.
+ InitLinkScope<Emitter> FieldScope(this, InitLink::Field(F->Offset));
+ if (!this->emitGetPtrThisField(FieldOffset, InitExpr))
+ return false;
+
+ if (!this->visitInitializer(InitExpr))
+ return false;
+
+ return this->emitPopPtr(InitExpr);
+ };
+
+ // Emit custom code if this is a lambda static invoker.
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(F);
+ MD && MD->isLambdaStaticInvoker())
+ return this->emitLambdaStaticInvokerBody(MD);
+
+ // Constructor. Set up field initializers.
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(F)) {
+ const RecordDecl *RD = Ctor->getParent();
+ const Record *R = this->getRecord(RD);
+ if (!R)
+ return false;
+
+ InitLinkScope<Emitter> InitScope(this, InitLink::This());
+ for (const auto *Init : Ctor->inits()) {
+ // Scope needed for the initializers.
+ BlockScope<Emitter> Scope(this);
+
+ const Expr *InitExpr = Init->getInit();
+ if (const FieldDecl *Member = Init->getMember()) {
+ const Record::Field *F = R->getField(Member);
+
+ if (!emitFieldInitializer(F, F->Offset, InitExpr))
+ return false;
+ } else if (const Type *Base = Init->getBaseClass()) {
+ const auto *BaseDecl = Base->getAsCXXRecordDecl();
+ assert(BaseDecl);
+
+ if (Init->isBaseVirtual()) {
+ assert(R->getVirtualBase(BaseDecl));
+ if (!this->emitGetPtrThisVirtBase(BaseDecl, InitExpr))
+ return false;
+
+ } else {
+ // Base class initializer.
+ // Get This Base and call initializer on it.
+ const Record::Base *B = R->getBase(BaseDecl);
+ assert(B);
+ if (!this->emitGetPtrThisBase(B->Offset, InitExpr))
+ return false;
+ }
+
+ if (!this->visitInitializer(InitExpr))
+ return false;
+ if (!this->emitFinishInitPop(InitExpr))
+ return false;
+ } else if (const IndirectFieldDecl *IFD = Init->getIndirectMember()) {
+ assert(IFD->getChainingSize() >= 2);
+
+ unsigned NestedFieldOffset = 0;
+ const Record::Field *NestedField = nullptr;
+ for (const NamedDecl *ND : IFD->chain()) {
+ const auto *FD = cast<FieldDecl>(ND);
+ const Record *FieldRecord =
+ this->P.getOrCreateRecord(FD->getParent());
+ assert(FieldRecord);
+
+ NestedField = FieldRecord->getField(FD);
+ assert(NestedField);
+
+ NestedFieldOffset += NestedField->Offset;
+ }
+ assert(NestedField);
+
+ if (!emitFieldInitializer(NestedField, NestedFieldOffset, InitExpr))
+ return false;
+ } else {
+ assert(Init->isDelegatingInitializer());
+ if (!this->emitThis(InitExpr))
+ return false;
+ if (!this->visitInitializer(Init->getInit()))
+ return false;
+ if (!this->emitPopPtr(InitExpr))
+ return false;
+ }
+
+ if (!Scope.destroyLocals())
+ return false;
+ }
+ }
+
+ if (const auto *Body = F->getBody())
+ if (!visitStmt(Body))
+ return false;
+
+ // Emit a guard return to protect against a code path missing one.
+ if (F->getReturnType()->isVoidType())
+ return this->emitRetVoid(SourceInfo{});
+ return this->emitNoRet(SourceInfo{});
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
+ const Expr *SubExpr = E->getSubExpr();
+ if (SubExpr->getType()->isAnyComplexType())
+ return this->VisitComplexUnaryOperator(E);
+ std::optional<PrimType> T = classify(SubExpr->getType());
+
+ switch (E->getOpcode()) {
+ case UO_PostInc: { // x++
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+ if (!T)
+ return this->emitError(E);
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (T == PT_Ptr || T == PT_FnPtr) {
+ if (!this->emitIncPtr(E))
+ return false;
+
+ return DiscardResult ? this->emitPopPtr(E) : true;
+ }
+
+ if (T == PT_Float) {
+ return DiscardResult ? this->emitIncfPop(getRoundingMode(E), E)
+ : this->emitIncf(getRoundingMode(E), E);
+ }
+
+ return DiscardResult ? this->emitIncPop(*T, E) : this->emitInc(*T, E);
+ }
+ case UO_PostDec: { // x--
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+ if (!T)
+ return this->emitError(E);
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (T == PT_Ptr || T == PT_FnPtr) {
+ if (!this->emitDecPtr(E))
+ return false;
+
+ return DiscardResult ? this->emitPopPtr(E) : true;
+ }
+
+ if (T == PT_Float) {
+ return DiscardResult ? this->emitDecfPop(getRoundingMode(E), E)
+ : this->emitDecf(getRoundingMode(E), E);
+ }
+
+ return DiscardResult ? this->emitDecPop(*T, E) : this->emitDec(*T, E);
+ }
+ case UO_PreInc: { // ++x
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+ if (!T)
+ return this->emitError(E);
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (T == PT_Ptr || T == PT_FnPtr) {
+ if (!this->emitLoadPtr(E))
+ return false;
+ if (!this->emitConstUint8(1, E))
+ return false;
+ if (!this->emitAddOffsetUint8(E))
+ return false;
+ return DiscardResult ? this->emitStorePopPtr(E) : this->emitStorePtr(E);
+ }
+
+ // Post-inc and pre-inc are the same if the value is to be discarded.
+ if (DiscardResult) {
+ if (T == PT_Float)
+ return this->emitIncfPop(getRoundingMode(E), E);
+ return this->emitIncPop(*T, E);
+ }
+
+ if (T == PT_Float) {
+ const auto &TargetSemantics = Ctx.getFloatSemantics(E->getType());
+ if (!this->emitLoadFloat(E))
+ return false;
+ if (!this->emitConstFloat(llvm::APFloat(TargetSemantics, 1), E))
+ return false;
+ if (!this->emitAddf(getRoundingMode(E), E))
+ return false;
+ if (!this->emitStoreFloat(E))
+ return false;
+ } else {
+ assert(isIntegralType(*T));
+ if (!this->emitLoad(*T, E))
+ return false;
+ if (!this->emitConst(1, E))
+ return false;
+ if (!this->emitAdd(*T, E))
+ return false;
+ if (!this->emitStore(*T, E))
+ return false;
+ }
+ return E->isGLValue() || this->emitLoadPop(*T, E);
+ }
+ case UO_PreDec: { // --x
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+ if (!T)
+ return this->emitError(E);
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (T == PT_Ptr || T == PT_FnPtr) {
+ if (!this->emitLoadPtr(E))
+ return false;
+ if (!this->emitConstUint8(1, E))
+ return false;
+ if (!this->emitSubOffsetUint8(E))
+ return false;
+ return DiscardResult ? this->emitStorePopPtr(E) : this->emitStorePtr(E);
+ }
+
+ // Post-dec and pre-dec are the same if the value is to be discarded.
+ if (DiscardResult) {
+ if (T == PT_Float)
+ return this->emitDecfPop(getRoundingMode(E), E);
+ return this->emitDecPop(*T, E);
+ }
+
+ if (T == PT_Float) {
+ const auto &TargetSemantics = Ctx.getFloatSemantics(E->getType());
+ if (!this->emitLoadFloat(E))
+ return false;
+ if (!this->emitConstFloat(llvm::APFloat(TargetSemantics, 1), E))
+ return false;
+ if (!this->emitSubf(getRoundingMode(E), E))
+ return false;
+ if (!this->emitStoreFloat(E))
+ return false;
+ } else {
+ assert(isIntegralType(*T));
+ if (!this->emitLoad(*T, E))
+ return false;
+ if (!this->emitConst(1, E))
+ return false;
+ if (!this->emitSub(*T, E))
+ return false;
+ if (!this->emitStore(*T, E))
+ return false;
+ }
+ return E->isGLValue() || this->emitLoadPop(*T, E);
+ }
+ case UO_LNot: // !x
+ if (!T)
+ return this->emitError(E);
+
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ if (!this->visitBool(SubExpr))
+ return false;
+
+ if (!this->emitInvBool(E))
+ return false;
+
+ if (PrimType ET = classifyPrim(E->getType()); ET != PT_Bool)
+ return this->emitCast(PT_Bool, ET, E);
+ return true;
+ case UO_Minus: // -x
+ if (!T)
+ return this->emitError(E);
+
+ if (!this->visit(SubExpr))
+ return false;
+ return DiscardResult ? this->emitPop(*T, E) : this->emitNeg(*T, E);
+ case UO_Plus: // +x
+ if (!T)
+ return this->emitError(E);
+
+ if (!this->visit(SubExpr)) // noop
+ return false;
+ return DiscardResult ? this->emitPop(*T, E) : true;
+ case UO_AddrOf: // &x
+ if (E->getType()->isMemberPointerType()) {
+ // C++11 [expr.unary.op]p3 has very strict rules on how the address of a
+ // member can be formed.
+ return this->emitGetMemberPtr(cast<DeclRefExpr>(SubExpr)->getDecl(), E);
+ }
+ // We should already have a pointer when we get here.
+ return this->delegate(SubExpr);
+ case UO_Deref: // *x
+ if (DiscardResult)
+ return this->discard(SubExpr);
+ return this->visit(SubExpr);
+ case UO_Not: // ~x
+ if (!T)
+ return this->emitError(E);
+
+ if (!this->visit(SubExpr))
+ return false;
+ return DiscardResult ? this->emitPop(*T, E) : this->emitComp(*T, E);
+ case UO_Real: // __real x
+ assert(T);
+ return this->delegate(SubExpr);
+ case UO_Imag: { // __imag x
+ assert(T);
+ if (!this->discard(SubExpr))
+ return false;
+ return this->visitZeroInitializer(*T, SubExpr->getType(), SubExpr);
+ }
+ case UO_Extension:
+ return this->delegate(SubExpr);
+ case UO_Coawait:
+ assert(false && "Unhandled opcode");
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitComplexUnaryOperator(const UnaryOperator *E) {
+ const Expr *SubExpr = E->getSubExpr();
+ assert(SubExpr->getType()->isAnyComplexType());
+
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ std::optional<PrimType> ResT = classify(E);
+ auto prepareResult = [=]() -> bool {
+ if (!ResT && !Initializing) {
+ std::optional<unsigned> LocalIndex = allocateLocal(SubExpr);
+ if (!LocalIndex)
+ return false;
+ return this->emitGetPtrLocal(*LocalIndex, E);
+ }
+
+ return true;
+ };
+
+ // The offset of the temporary, if we created one.
+ unsigned SubExprOffset = ~0u;
+ auto createTemp = [=, &SubExprOffset]() -> bool {
+ SubExprOffset = this->allocateLocalPrimitive(SubExpr, PT_Ptr, true, false);
+ if (!this->visit(SubExpr))
+ return false;
+ return this->emitSetLocal(PT_Ptr, SubExprOffset, E);
+ };
+
+ PrimType ElemT = classifyComplexElementType(SubExpr->getType());
+ auto getElem = [=](unsigned Offset, unsigned Index) -> bool {
+ if (!this->emitGetLocal(PT_Ptr, Offset, E))
+ return false;
+ return this->emitArrayElemPop(ElemT, Index, E);
+ };
+
+ switch (E->getOpcode()) {
+ case UO_Minus:
+ if (!prepareResult())
+ return false;
+ if (!createTemp())
+ return false;
+ for (unsigned I = 0; I != 2; ++I) {
+ if (!getElem(SubExprOffset, I))
+ return false;
+ if (!this->emitNeg(ElemT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, I, E))
+ return false;
+ }
+ break;
+
+ case UO_Plus: // +x
+ case UO_AddrOf: // &x
+ case UO_Deref: // *x
+ return this->delegate(SubExpr);
+
+ case UO_LNot:
+ if (!this->visit(SubExpr))
+ return false;
+ if (!this->emitComplexBoolCast(SubExpr))
+ return false;
+ if (!this->emitInvBool(E))
+ return false;
+ if (PrimType ET = classifyPrim(E->getType()); ET != PT_Bool)
+ return this->emitCast(PT_Bool, ET, E);
+ return true;
+
+ case UO_Real:
+ return this->emitComplexReal(SubExpr);
+
+ case UO_Imag:
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (SubExpr->isLValue()) {
+ if (!this->emitConstUint8(1, E))
+ return false;
+ return this->emitArrayElemPtrPopUint8(E);
+ }
+
+ // Since our _Complex implementation does not map to a primitive type,
+ // we sometimes have to do the lvalue-to-rvalue conversion here manually.
+ return this->emitArrayElemPop(classifyPrim(E->getType()), 1, E);
+
+ case UO_Not: // ~x
+ if (!this->visit(SubExpr))
+ return false;
+ // Negate the imaginary component.
+ if (!this->emitArrayElem(ElemT, 1, E))
+ return false;
+ if (!this->emitNeg(ElemT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, 1, E))
+ return false;
+ return DiscardResult ? this->emitPopPtr(E) : true;
+
+ case UO_Extension:
+ return this->delegate(SubExpr);
+
+ default:
+ return this->emitInvalid(E);
+ }
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::visitDeclRef(const ValueDecl *D, const Expr *E) {
+ if (DiscardResult)
+ return true;
+
+ if (const auto *ECD = dyn_cast<EnumConstantDecl>(D)) {
+ return this->emitConst(ECD->getInitVal(), E);
+ } else if (const auto *BD = dyn_cast<BindingDecl>(D)) {
+ return this->visit(BD->getBinding());
+ } else if (const auto *FuncDecl = dyn_cast<FunctionDecl>(D)) {
+ const Function *F = getFunction(FuncDecl);
+ return F && this->emitGetFnPtr(F, E);
+ } else if (const auto *TPOD = dyn_cast<TemplateParamObjectDecl>(D)) {
+ if (std::optional<unsigned> Index = P.getOrCreateGlobal(D)) {
+ if (!this->emitGetPtrGlobal(*Index, E))
+ return false;
+ if (std::optional<PrimType> T = classify(E->getType())) {
+ if (!this->visitAPValue(TPOD->getValue(), *T, E))
+ return false;
+ return this->emitInitGlobal(*T, *Index, E);
+ }
+ return this->visitAPValueInitializer(TPOD->getValue(), E);
+ }
+ return false;
+ }
+
+ // References are implemented via pointers, so when we see a DeclRefExpr
+ // pointing to a reference, we need to get its value directly (i.e. the
+ // pointer to the actual value) instead of a pointer to the pointer to the
+ // value.
+ bool IsReference = D->getType()->isReferenceType();
+
+ // Check for local/global variables and parameters.
+ if (auto It = Locals.find(D); It != Locals.end()) {
+ const unsigned Offset = It->second.Offset;
+ if (IsReference)
+ return this->emitGetLocal(PT_Ptr, Offset, E);
+ return this->emitGetPtrLocal(Offset, E);
+ } else if (auto GlobalIndex = P.getGlobal(D)) {
+ if (IsReference) {
+ if (!Ctx.getLangOpts().CPlusPlus11)
+ return this->emitGetGlobal(classifyPrim(E), *GlobalIndex, E);
+ return this->emitGetGlobalUnchecked(classifyPrim(E), *GlobalIndex, E);
+ }
+
+ return this->emitGetPtrGlobal(*GlobalIndex, E);
+ } else if (const auto *PVD = dyn_cast<ParmVarDecl>(D)) {
+ if (auto It = this->Params.find(PVD); It != this->Params.end()) {
+ if (IsReference || !It->second.IsPtr)
+ return this->emitGetParam(classifyPrim(E), It->second.Offset, E);
+
+ return this->emitGetPtrParam(It->second.Offset, E);
+ }
+ }
+
+ // In case we need to re-visit a declaration.
+ auto revisit = [&](const VarDecl *VD) -> bool {
+ auto VarState = this->visitDecl(VD);
+
+ if (VarState.notCreated())
+ return true;
+ if (!VarState)
+ return false;
+ // Retry.
+ return this->visitDeclRef(D, E);
+ };
+
+ // Handle lambda captures.
+ if (auto It = this->LambdaCaptures.find(D);
+ It != this->LambdaCaptures.end()) {
+ auto [Offset, IsPtr] = It->second;
+
+ if (IsPtr)
+ return this->emitGetThisFieldPtr(Offset, E);
+ return this->emitGetPtrThisField(Offset, E);
+ } else if (const auto *DRE = dyn_cast<DeclRefExpr>(E);
+ DRE && DRE->refersToEnclosingVariableOrCapture()) {
+ if (const auto *VD = dyn_cast<VarDecl>(D); VD && VD->isInitCapture())
+ return revisit(VD);
+ }
+
+ if (D != InitializingDecl) {
+ // Try to lazily visit (or emit dummy pointers for) declarations
+ // we haven't seen yet.
+ if (Ctx.getLangOpts().CPlusPlus) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ const auto typeShouldBeVisited = [&](QualType T) -> bool {
+ if (T.isConstant(Ctx.getASTContext()))
+ return true;
+ if (const auto *RT = T->getAs<ReferenceType>())
+ return RT->getPointeeType().isConstQualified();
+ return false;
+ };
+
+ // Visit local const variables like normal.
+ if ((VD->hasGlobalStorage() || VD->isLocalVarDecl() ||
+ VD->isStaticDataMember()) &&
+ typeShouldBeVisited(VD->getType()))
+ return revisit(VD);
+ }
+ } else {
+ if (const auto *VD = dyn_cast<VarDecl>(D);
+ VD && VD->getAnyInitializer() &&
+ VD->getType().isConstant(Ctx.getASTContext()) && !VD->isWeak())
+ return revisit(VD);
+ }
+ }
+
+ if (std::optional<unsigned> I = P.getOrCreateDummy(D)) {
+ if (!this->emitGetPtrGlobal(*I, E))
+ return false;
+ if (E->getType()->isVoidType())
+ return true;
+ // Convert the dummy pointer to another pointer type if we have to.
+ if (PrimType PT = classifyPrim(E); PT != PT_Ptr) {
+ if (isPtrType(PT))
+ return this->emitDecayPtr(PT_Ptr, PT, E);
+ return false;
+ }
+ return true;
+ }
+
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
+ return this->emitInvalidDeclRef(DRE, E);
+ return false;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
+ const auto *D = E->getDecl();
+ return this->visitDeclRef(D, E);
+}
+
+template <class Emitter> void Compiler<Emitter>::emitCleanup() {
+ for (VariableScope<Emitter> *C = VarScope; C; C = C->getParent())
+ C->emitDestruction();
+}
+
+template <class Emitter>
+unsigned Compiler<Emitter>::collectBaseOffset(const QualType BaseType,
+ const QualType DerivedType) {
+ const auto extractRecordDecl = [](QualType Ty) -> const CXXRecordDecl * {
+ if (const auto *PT = dyn_cast<PointerType>(Ty))
+ return PT->getPointeeType()->getAsCXXRecordDecl();
+ return Ty->getAsCXXRecordDecl();
+ };
+ const CXXRecordDecl *BaseDecl = extractRecordDecl(BaseType);
+ const CXXRecordDecl *DerivedDecl = extractRecordDecl(DerivedType);
+
+ return Ctx.collectBaseOffset(BaseDecl, DerivedDecl);
+}
+
+/// Emit casts from a PrimType to another PrimType.
+template <class Emitter>
+bool Compiler<Emitter>::emitPrimCast(PrimType FromT, PrimType ToT,
+ QualType ToQT, const Expr *E) {
+
+ if (FromT == PT_Float) {
+ // Floating to floating.
+ if (ToT == PT_Float) {
+ const llvm::fltSemantics *ToSem = &Ctx.getFloatSemantics(ToQT);
+ return this->emitCastFP(ToSem, getRoundingMode(E), E);
+ }
+
+ if (ToT == PT_IntAP)
+ return this->emitCastFloatingIntegralAP(Ctx.getBitWidth(ToQT), E);
+ if (ToT == PT_IntAPS)
+ return this->emitCastFloatingIntegralAPS(Ctx.getBitWidth(ToQT), E);
+
+ // Float to integral.
+ if (isIntegralType(ToT) || ToT == PT_Bool)
+ return this->emitCastFloatingIntegral(ToT, E);
+ }
+
+ if (isIntegralType(FromT) || FromT == PT_Bool) {
+ if (ToT == PT_IntAP)
+ return this->emitCastAP(FromT, Ctx.getBitWidth(ToQT), E);
+ if (ToT == PT_IntAPS)
+ return this->emitCastAPS(FromT, Ctx.getBitWidth(ToQT), E);
+
+ // Integral to integral.
+ if (isIntegralType(ToT) || ToT == PT_Bool)
+ return FromT != ToT ? this->emitCast(FromT, ToT, E) : true;
+
+ if (ToT == PT_Float) {
+ // Integral to floating.
+ const llvm::fltSemantics *ToSem = &Ctx.getFloatSemantics(ToQT);
+ return this->emitCastIntegralFloating(FromT, ToSem, getRoundingMode(E),
+ E);
+ }
+ }
+
+ return false;
+}
+
+/// Emits __real(SubExpr)
+template <class Emitter>
+bool Compiler<Emitter>::emitComplexReal(const Expr *SubExpr) {
+ assert(SubExpr->getType()->isAnyComplexType());
+
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ if (!this->visit(SubExpr))
+ return false;
+ if (SubExpr->isLValue()) {
+ if (!this->emitConstUint8(0, SubExpr))
+ return false;
+ return this->emitArrayElemPtrPopUint8(SubExpr);
+ }
+
+ // Rvalue, load the actual element.
+ return this->emitArrayElemPop(classifyComplexElementType(SubExpr->getType()),
+ 0, SubExpr);
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::emitComplexBoolCast(const Expr *E) {
+ assert(!DiscardResult);
+ PrimType ElemT = classifyComplexElementType(E->getType());
+ // We emit the expression (__real(E) != 0 || __imag(E) != 0)
+ // for us, that means (bool)E[0] || (bool)E[1]
+ if (!this->emitArrayElem(ElemT, 0, E))
+ return false;
+ if (ElemT == PT_Float) {
+ if (!this->emitCastFloatingIntegral(PT_Bool, E))
+ return false;
+ } else {
+ if (!this->emitCast(ElemT, PT_Bool, E))
+ return false;
+ }
+
+ // We now have the bool value of E[0] on the stack.
+ LabelTy LabelTrue = this->getLabel();
+ if (!this->jumpTrue(LabelTrue))
+ return false;
+
+ if (!this->emitArrayElemPop(ElemT, 1, E))
+ return false;
+ if (ElemT == PT_Float) {
+ if (!this->emitCastFloatingIntegral(PT_Bool, E))
+ return false;
+ } else {
+ if (!this->emitCast(ElemT, PT_Bool, E))
+ return false;
+ }
+ // Leave the boolean value of E[1] on the stack.
+ LabelTy EndLabel = this->getLabel();
+ this->jump(EndLabel);
+
+ this->emitLabel(LabelTrue);
+ if (!this->emitPopPtr(E))
+ return false;
+ if (!this->emitConstBool(true, E))
+ return false;
+
+ this->fallthrough(EndLabel);
+ this->emitLabel(EndLabel);
+
+ return true;
+}
+
+template <class Emitter>
+bool Compiler<Emitter>::emitComplexComparison(const Expr *LHS, const Expr *RHS,
+ const BinaryOperator *E) {
+ assert(E->isComparisonOp());
+ assert(!Initializing);
+ assert(!DiscardResult);
+
+ PrimType ElemT;
+ bool LHSIsComplex;
+ unsigned LHSOffset;
+ if (LHS->getType()->isAnyComplexType()) {
+ LHSIsComplex = true;
+ ElemT = classifyComplexElementType(LHS->getType());
+ LHSOffset = allocateLocalPrimitive(LHS, PT_Ptr, /*IsConst=*/true,
+ /*IsExtended=*/false);
+ if (!this->visit(LHS))
+ return false;
+ if (!this->emitSetLocal(PT_Ptr, LHSOffset, E))
+ return false;
+ } else {
+ LHSIsComplex = false;
+ PrimType LHST = classifyPrim(LHS->getType());
+ LHSOffset = this->allocateLocalPrimitive(LHS, LHST, true, false);
+ if (!this->visit(LHS))
+ return false;
+ if (!this->emitSetLocal(LHST, LHSOffset, E))
+ return false;
+ }
+
+ bool RHSIsComplex;
+ unsigned RHSOffset;
+ if (RHS->getType()->isAnyComplexType()) {
+ RHSIsComplex = true;
+ ElemT = classifyComplexElementType(RHS->getType());
+ RHSOffset = allocateLocalPrimitive(RHS, PT_Ptr, /*IsConst=*/true,
+ /*IsExtended=*/false);
+ if (!this->visit(RHS))
+ return false;
+ if (!this->emitSetLocal(PT_Ptr, RHSOffset, E))
+ return false;
+ } else {
+ RHSIsComplex = false;
+ PrimType RHST = classifyPrim(RHS->getType());
+ RHSOffset = this->allocateLocalPrimitive(RHS, RHST, true, false);
+ if (!this->visit(RHS))
+ return false;
+ if (!this->emitSetLocal(RHST, RHSOffset, E))
+ return false;
+ }
+
+ auto getElem = [&](unsigned LocalOffset, unsigned Index,
+ bool IsComplex) -> bool {
+ if (IsComplex) {
+ if (!this->emitGetLocal(PT_Ptr, LocalOffset, E))
+ return false;
+ return this->emitArrayElemPop(ElemT, Index, E);
+ }
+ return this->emitGetLocal(ElemT, LocalOffset, E);
+ };
+
+ for (unsigned I = 0; I != 2; ++I) {
+ // Get both values.
+ if (!getElem(LHSOffset, I, LHSIsComplex))
+ return false;
+ if (!getElem(RHSOffset, I, RHSIsComplex))
+ return false;
+ // And compare them.
+ if (!this->emitEQ(ElemT, E))
+ return false;
+
+ if (!this->emitCastBoolUint8(E))
+ return false;
+ }
+
+ // We now have two bool values on the stack. Compare those.
+ if (!this->emitAddUint8(E))
+ return false;
+ if (!this->emitConstUint8(2, E))
+ return false;
+
+ if (E->getOpcode() == BO_EQ) {
+ if (!this->emitEQUint8(E))
+ return false;
+ } else if (E->getOpcode() == BO_NE) {
+ if (!this->emitNEUint8(E))
+ return false;
+ } else
+ return false;
+
+ // In C, this returns an int.
+ if (PrimType ResT = classifyPrim(E->getType()); ResT != PT_Bool)
+ return this->emitCast(PT_Bool, ResT, E);
+ return true;
+}
+
+/// When calling this, we have a pointer of the local-to-destroy
+/// on the stack.
+/// Emit destruction of record types (or arrays of record types).
+template <class Emitter>
+bool Compiler<Emitter>::emitRecordDestruction(const Record *R) {
+ assert(R);
+ // First, destroy all fields.
+ for (const Record::Field &Field : llvm::reverse(R->fields())) {
+ const Descriptor *D = Field.Desc;
+ if (!D->isPrimitive() && !D->isPrimitiveArray()) {
+ if (!this->emitGetPtrField(Field.Offset, SourceInfo{}))
+ return false;
+ if (!this->emitDestruction(D))
+ return false;
+ if (!this->emitPopPtr(SourceInfo{}))
+ return false;
+ }
+ }
+
+ // FIXME: Unions need to be handled differently here. We don't want to
+ // call the destructor of its members.
+
+ // Now emit the destructor and recurse into base classes.
+ if (const CXXDestructorDecl *Dtor = R->getDestructor();
+ Dtor && !Dtor->isTrivial()) {
+ const Function *DtorFunc = getFunction(Dtor);
+ if (!DtorFunc)
+ return false;
+ assert(DtorFunc->hasThisPointer());
+ assert(DtorFunc->getNumParams() == 1);
+ if (!this->emitDupPtr(SourceInfo{}))
+ return false;
+ if (!this->emitCall(DtorFunc, 0, SourceInfo{}))
+ return false;
+ }
+
+ for (const Record::Base &Base : llvm::reverse(R->bases())) {
+ if (!this->emitGetPtrBase(Base.Offset, SourceInfo{}))
+ return false;
+ if (!this->emitRecordDestruction(Base.R))
+ return false;
+ if (!this->emitPopPtr(SourceInfo{}))
+ return false;
+ }
+
+ // FIXME: Virtual bases.
+ return true;
+}
+/// When calling this, we have a pointer of the local-to-destroy
+/// on the stack.
+/// Emit destruction of record types (or arrays of record types).
+template <class Emitter>
+bool Compiler<Emitter>::emitDestruction(const Descriptor *Desc) {
+ assert(Desc);
+ assert(!Desc->isPrimitive());
+ assert(!Desc->isPrimitiveArray());
+
+ // Arrays.
+ if (Desc->isArray()) {
+ const Descriptor *ElemDesc = Desc->ElemDesc;
+ assert(ElemDesc);
+
+ // Don't need to do anything for these.
+ if (ElemDesc->isPrimitiveArray())
+ return true;
+
+ // If this is an array of record types, check if we need
+ // to call the element destructors at all. If not, try
+ // to save the work.
+ if (const Record *ElemRecord = ElemDesc->ElemRecord) {
+ if (const CXXDestructorDecl *Dtor = ElemRecord->getDestructor();
+ !Dtor || Dtor->isTrivial())
+ return true;
+ }
+
+ for (ssize_t I = Desc->getNumElems() - 1; I >= 0; --I) {
+ if (!this->emitConstUint64(I, SourceInfo{}))
+ return false;
+ if (!this->emitArrayElemPtrUint64(SourceInfo{}))
+ return false;
+ if (!this->emitDestruction(ElemDesc))
+ return false;
+ if (!this->emitPopPtr(SourceInfo{}))
+ return false;
+ }
+ return true;
+ }
+
+ assert(Desc->ElemRecord);
+ return this->emitRecordDestruction(Desc->ElemRecord);
+}
+
+namespace clang {
+namespace interp {
+
+template class Compiler<ByteCodeEmitter>;
+template class Compiler<EvalEmitter>;
+
+} // namespace interp
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h b/contrib/llvm-project/clang/lib/AST/Interp/Compiler.h
index df4cb736299c..084f5aef25f8 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Compiler.h
@@ -1,4 +1,4 @@
-//===--- ByteCodeExprGen.h - Code generator for expressions -----*- C++ -*-===//
+//===--- Compiler.h - Code generator for expressions -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -30,21 +30,78 @@ namespace interp {
template <class Emitter> class LocalScope;
template <class Emitter> class DestructorScope;
-template <class Emitter> class RecordScope;
template <class Emitter> class VariableScope;
template <class Emitter> class DeclScope;
+template <class Emitter> class InitLinkScope;
+template <class Emitter> class InitStackScope;
template <class Emitter> class OptionScope;
template <class Emitter> class ArrayIndexScope;
template <class Emitter> class SourceLocScope;
+template <class Emitter> class LoopScope;
+template <class Emitter> class LabelScope;
+template <class Emitter> class SwitchScope;
+template <class Emitter> class StmtExprScope;
+
+template <class Emitter> class Compiler;
+struct InitLink {
+public:
+ enum {
+ K_This = 0,
+ K_Field = 1,
+ K_Temp = 2,
+ K_Decl = 3,
+ };
+
+ static InitLink This() { return InitLink{K_This}; }
+ static InitLink Field(unsigned Offset) {
+ InitLink IL{K_Field};
+ IL.Offset = Offset;
+ return IL;
+ }
+ static InitLink Temp(unsigned Offset) {
+ InitLink IL{K_Temp};
+ IL.Offset = Offset;
+ return IL;
+ }
+ static InitLink Decl(const ValueDecl *D) {
+ InitLink IL{K_Decl};
+ IL.D = D;
+ return IL;
+ }
+
+ InitLink(uint8_t Kind) : Kind(Kind) {}
+ template <class Emitter>
+ bool emit(Compiler<Emitter> *Ctx, const Expr *E) const;
+
+ uint32_t Kind;
+ union {
+ unsigned Offset;
+ const ValueDecl *D;
+ };
+};
+
+/// State encapsulating if a the variable creation has been successful,
+/// unsuccessful, or no variable has been created at all.
+struct VarCreationState {
+ std::optional<bool> S = std::nullopt;
+ VarCreationState() = default;
+ VarCreationState(bool b) : S(b) {}
+ static VarCreationState NotCreated() { return VarCreationState(); }
+
+ operator bool() const { return S && *S; }
+ bool notCreated() const { return !S; }
+};
/// Compilation context for expressions.
template <class Emitter>
-class ByteCodeExprGen : public ConstStmtVisitor<ByteCodeExprGen<Emitter>, bool>,
- public Emitter {
+class Compiler : public ConstStmtVisitor<Compiler<Emitter>, bool>,
+ public Emitter {
protected:
// Aliases for types defined in the emitter.
using LabelTy = typename Emitter::LabelTy;
using AddrTy = typename Emitter::AddrTy;
+ using OptLabelTy = std::optional<LabelTy>;
+ using CaseMap = llvm::DenseMap<const SwitchCase *, LabelTy>;
/// Current compilation context.
Context &Ctx;
@@ -54,13 +111,14 @@ protected:
public:
/// Initializes the compiler and the backend emitter.
template <typename... Tys>
- ByteCodeExprGen(Context &Ctx, Program &P, Tys &&... Args)
+ Compiler(Context &Ctx, Program &P, Tys &&...Args)
: Emitter(Ctx, P, Args...), Ctx(Ctx), P(P) {}
- // Expression visitors - result returned on interp stack.
+ // Expressions.
bool VisitCastExpr(const CastExpr *E);
bool VisitIntegerLiteral(const IntegerLiteral *E);
bool VisitFloatingLiteral(const FloatingLiteral *E);
+ bool VisitImaginaryLiteral(const ImaginaryLiteral *E);
bool VisitParenExpr(const ParenExpr *E);
bool VisitBinaryOperator(const BinaryOperator *E);
bool VisitLogicalBinOp(const BinaryOperator *E);
@@ -75,6 +133,7 @@ public:
bool VisitGNUNullExpr(const GNUNullExpr *E);
bool VisitCXXThisExpr(const CXXThisExpr *E);
bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitComplexUnaryOperator(const UnaryOperator *E);
bool VisitDeclRefExpr(const DeclRefExpr *E);
bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E);
bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E);
@@ -89,6 +148,9 @@ public:
bool VisitOpaqueValueExpr(const OpaqueValueExpr *E);
bool VisitAbstractConditionalOperator(const AbstractConditionalOperator *E);
bool VisitStringLiteral(const StringLiteral *E);
+ bool VisitObjCStringLiteral(const ObjCStringLiteral *E);
+ bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E);
+ bool VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E);
bool VisitCharacterLiteral(const CharacterLiteral *E);
bool VisitCompoundAssignOperator(const CompoundAssignOperator *E);
bool VisitFloatCompoundAssignOperator(const CompoundAssignOperator *E);
@@ -98,6 +160,7 @@ public:
bool VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E);
bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
bool VisitTypeTraitExpr(const TypeTraitExpr *E);
+ bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E);
bool VisitLambdaExpr(const LambdaExpr *E);
bool VisitPredefinedExpr(const PredefinedExpr *E);
bool VisitCXXThrowExpr(const CXXThrowExpr *E);
@@ -108,10 +171,53 @@ public:
bool VisitOffsetOfExpr(const OffsetOfExpr *E);
bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E);
bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
+ bool VisitGenericSelectionExpr(const GenericSelectionExpr *E);
+ bool VisitChooseExpr(const ChooseExpr *E);
+ bool VisitEmbedExpr(const EmbedExpr *E);
+ bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E);
+ bool VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
+ bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E);
+ bool VisitCXXUuidofExpr(const CXXUuidofExpr *E);
+ bool VisitRequiresExpr(const RequiresExpr *E);
+ bool VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E);
+ bool VisitCXXRewrittenBinaryOperator(const CXXRewrittenBinaryOperator *E);
+ bool VisitPseudoObjectExpr(const PseudoObjectExpr *E);
+ bool VisitPackIndexingExpr(const PackIndexingExpr *E);
+ bool VisitRecoveryExpr(const RecoveryExpr *E);
+ bool VisitAddrLabelExpr(const AddrLabelExpr *E);
+ bool VisitConvertVectorExpr(const ConvertVectorExpr *E);
+ bool VisitShuffleVectorExpr(const ShuffleVectorExpr *E);
+ bool VisitExtVectorElementExpr(const ExtVectorElementExpr *E);
+ bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E);
+ bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E);
+ bool VisitStmtExpr(const StmtExpr *E);
+ bool VisitCXXNewExpr(const CXXNewExpr *E);
+ bool VisitCXXDeleteExpr(const CXXDeleteExpr *E);
+
+ // Statements.
+ bool visitCompoundStmt(const CompoundStmt *S);
+ bool visitLoopBody(const Stmt *S);
+ bool visitDeclStmt(const DeclStmt *DS);
+ bool visitReturnStmt(const ReturnStmt *RS);
+ bool visitIfStmt(const IfStmt *IS);
+ bool visitWhileStmt(const WhileStmt *S);
+ bool visitDoStmt(const DoStmt *S);
+ bool visitForStmt(const ForStmt *S);
+ bool visitCXXForRangeStmt(const CXXForRangeStmt *S);
+ bool visitBreakStmt(const BreakStmt *S);
+ bool visitContinueStmt(const ContinueStmt *S);
+ bool visitSwitchStmt(const SwitchStmt *S);
+ bool visitCaseStmt(const CaseStmt *S);
+ bool visitDefaultStmt(const DefaultStmt *S);
+ bool visitAttributedStmt(const AttributedStmt *S);
+ bool visitCXXTryStmt(const CXXTryStmt *S);
protected:
+ bool visitStmt(const Stmt *S);
bool visitExpr(const Expr *E) override;
- bool visitDecl(const VarDecl *VD) override;
+ bool visitFunc(const FunctionDecl *F) override;
+
+ bool visitDeclAndReturn(const VarDecl *VD, bool ConstantContext) override;
protected:
/// Emits scope cleanup instructions.
@@ -124,31 +230,31 @@ protected:
Record *getRecord(QualType Ty);
Record *getRecord(const RecordDecl *RD);
- // Returns a function for the given FunctionDecl.
- // If the function does not exist yet, it is compiled.
+ /// Returns a function for the given FunctionDecl.
+ /// If the function does not exist yet, it is compiled.
const Function *getFunction(const FunctionDecl *FD);
- /// Classifies a type.
std::optional<PrimType> classify(const Expr *E) const {
- if (E->isGLValue()) {
- if (E->getType()->isFunctionType())
- return PT_FnPtr;
- return PT_Ptr;
- }
-
- return classify(E->getType());
+ return Ctx.classify(E);
}
std::optional<PrimType> classify(QualType Ty) const {
return Ctx.classify(Ty);
}
- /// Classifies a known primitive type
+ /// Classifies a known primitive type.
PrimType classifyPrim(QualType Ty) const {
if (auto T = classify(Ty)) {
return *T;
}
llvm_unreachable("not a primitive type");
}
+ /// Classifies a known primitive expression.
+ PrimType classifyPrim(const Expr *E) const {
+ if (auto T = classify(E))
+ return *T;
+ llvm_unreachable("not a primitive type");
+ }
+
/// Evaluates an expression and places the result on the stack. If the
/// expression is of composite type, a local variable will be created
/// and a pointer to said variable will be placed on the stack.
@@ -163,49 +269,20 @@ protected:
/// Just pass evaluation on to \p E. This leaves all the parsing flags
/// intact.
bool delegate(const Expr *E);
-
/// Creates and initializes a variable from the given decl.
- bool visitVarDecl(const VarDecl *VD);
+ VarCreationState visitVarDecl(const VarDecl *VD, bool Toplevel = false);
+ VarCreationState visitDecl(const VarDecl *VD);
/// Visit an APValue.
bool visitAPValue(const APValue &Val, PrimType ValType, const Expr *E);
+ bool visitAPValueInitializer(const APValue &Val, const Expr *E);
+ /// Visit the given decl as if we have a reference to it.
+ bool visitDeclRef(const ValueDecl *D, const Expr *E);
/// Visits an expression and converts it to a boolean.
bool visitBool(const Expr *E);
- /// Visits an initializer for a local.
- bool visitLocalInitializer(const Expr *Init, unsigned I) {
- if (!this->emitGetPtrLocal(I, Init))
- return false;
-
- if (!visitInitializer(Init))
- return false;
-
- return this->emitPopPtr(Init);
- }
-
- /// Visits an initializer for a global.
- bool visitGlobalInitializer(const Expr *Init, unsigned I) {
- if (!this->emitGetPtrGlobal(I, Init))
- return false;
-
- if (!visitInitializer(Init))
- return false;
-
- return this->emitPopPtr(Init);
- }
-
- /// Visits a delegated initializer.
- bool visitThisInitializer(const Expr *I) {
- if (!this->emitThis(I))
- return false;
-
- if (!visitInitializer(I))
- return false;
-
- return this->emitPopPtr(I);
- }
-
- bool visitInitList(ArrayRef<const Expr *> Inits, const Expr *E);
+ bool visitInitList(ArrayRef<const Expr *> Inits, const Expr *ArrayFiller,
+ const Expr *E);
bool visitArrayElemInit(unsigned ElemIndex, const Expr *Init);
/// Creates a local primitive value.
@@ -213,45 +290,29 @@ protected:
bool IsExtended = false);
/// Allocates a space storing a local given its type.
- std::optional<unsigned> allocateLocal(DeclTy &&Decl, bool IsExtended = false);
+ std::optional<unsigned>
+ allocateLocal(DeclTy &&Decl, const ValueDecl *ExtendingDecl = nullptr);
private:
friend class VariableScope<Emitter>;
friend class LocalScope<Emitter>;
friend class DestructorScope<Emitter>;
- friend class RecordScope<Emitter>;
friend class DeclScope<Emitter>;
+ friend class InitLinkScope<Emitter>;
+ friend class InitStackScope<Emitter>;
friend class OptionScope<Emitter>;
friend class ArrayIndexScope<Emitter>;
friend class SourceLocScope<Emitter>;
+ friend struct InitLink;
+ friend class LoopScope<Emitter>;
+ friend class LabelScope<Emitter>;
+ friend class SwitchScope<Emitter>;
+ friend class StmtExprScope<Emitter>;
/// Emits a zero initializer.
bool visitZeroInitializer(PrimType T, QualType QT, const Expr *E);
bool visitZeroRecordInitializer(const Record *R, const Expr *E);
- enum class DerefKind {
- /// Value is read and pushed to stack.
- Read,
- /// Direct method generates a value which is written. Returns pointer.
- Write,
- /// Direct method receives the value, pushes mutated value. Returns pointer.
- ReadWrite,
- };
-
- /// Method to directly load a value. If the value can be fetched directly,
- /// the direct handler is called. Otherwise, a pointer is left on the stack
- /// and the indirect handler is expected to operate on that.
- bool dereference(const Expr *LV, DerefKind AK,
- llvm::function_ref<bool(PrimType)> Direct,
- llvm::function_ref<bool(PrimType)> Indirect);
- bool dereferenceParam(const Expr *LV, PrimType T, const ParmVarDecl *PD,
- DerefKind AK,
- llvm::function_ref<bool(PrimType)> Direct,
- llvm::function_ref<bool(PrimType)> Indirect);
- bool dereferenceVar(const Expr *LV, PrimType T, const VarDecl *PD,
- DerefKind AK, llvm::function_ref<bool(PrimType)> Direct,
- llvm::function_ref<bool(PrimType)> Indirect);
-
/// Emits an APSInt constant.
bool emitConst(const llvm::APSInt &Value, PrimType Ty, const Expr *E);
bool emitConst(const llvm::APSInt &Value, const Expr *E);
@@ -263,15 +324,6 @@ private:
template <typename T> bool emitConst(T Value, PrimType Ty, const Expr *E);
template <typename T> bool emitConst(T Value, const Expr *E);
- /// Returns the CXXRecordDecl for the type of the given expression,
- /// or nullptr if no such decl exists.
- const CXXRecordDecl *getRecordDecl(const Expr *E) const {
- QualType T = E->getType();
- if (const auto *RD = T->getPointeeCXXRecordDecl())
- return RD;
- return T->getAsCXXRecordDecl();
- }
-
llvm::RoundingMode getRoundingMode(const Expr *E) const {
FPOptions FPO = E->getFPFeaturesInEffect(Ctx.getLangOpts());
@@ -282,19 +334,24 @@ private:
}
bool emitPrimCast(PrimType FromT, PrimType ToT, QualType ToQT, const Expr *E);
- std::optional<PrimType> classifyComplexElementType(QualType T) const {
+ PrimType classifyComplexElementType(QualType T) const {
assert(T->isAnyComplexType());
QualType ElemType = T->getAs<ComplexType>()->getElementType();
- return this->classify(ElemType);
+ return *this->classify(ElemType);
}
bool emitComplexReal(const Expr *SubExpr);
+ bool emitComplexBoolCast(const Expr *E);
+ bool emitComplexComparison(const Expr *LHS, const Expr *RHS,
+ const BinaryOperator *E);
- bool emitRecordDestruction(const Descriptor *Desc);
- unsigned collectBaseOffset(const RecordType *BaseType,
- const RecordType *DerivedType);
+ bool emitRecordDestruction(const Record *R);
+ bool emitDestruction(const Descriptor *Desc);
+ unsigned collectBaseOffset(const QualType BaseType,
+ const QualType DerivedType);
+ bool emitLambdaStaticInvokerBody(const CXXMethodDecl *MD);
protected:
/// Variable to storage mapping.
@@ -315,22 +372,41 @@ protected:
/// Flag indicating if return value is to be discarded.
bool DiscardResult = false;
+ bool InStmtExpr = false;
+
/// Flag inidicating if we're initializing an already created
/// variable. This is set in visitInitializer().
bool Initializing = false;
+ const ValueDecl *InitializingDecl = nullptr;
+
+ llvm::SmallVector<InitLink> InitStack;
+ bool InitStackActive = false;
/// Flag indicating if we're initializing a global variable.
bool GlobalDecl = false;
+
+ /// Type of the expression returned by the function.
+ std::optional<PrimType> ReturnType;
+
+ /// Switch case mapping.
+ CaseMap CaseLabels;
+
+ /// Point to break to.
+ OptLabelTy BreakLabel;
+ /// Point to continue to.
+ OptLabelTy ContinueLabel;
+ /// Default case label.
+ OptLabelTy DefaultLabel;
};
-extern template class ByteCodeExprGen<ByteCodeEmitter>;
-extern template class ByteCodeExprGen<EvalEmitter>;
+extern template class Compiler<ByteCodeEmitter>;
+extern template class Compiler<EvalEmitter>;
/// Scope chain managing the variable lifetimes.
template <class Emitter> class VariableScope {
public:
- VariableScope(ByteCodeExprGen<Emitter> *Ctx)
- : Ctx(Ctx), Parent(Ctx->VarScope) {
+ VariableScope(Compiler<Emitter> *Ctx, const ValueDecl *VD)
+ : Ctx(Ctx), Parent(Ctx->VarScope), ValDecl(VD) {
Ctx->VarScope = this;
}
@@ -353,36 +429,64 @@ public:
this->Parent->addExtended(Local);
}
+ void addExtended(const Scope::Local &Local, const ValueDecl *ExtendingDecl) {
+ // Walk up the chain of scopes until we find the one for ExtendingDecl.
+ // If there is no such scope, attach it to the parent one.
+ VariableScope *P = this;
+ while (P) {
+ if (P->ValDecl == ExtendingDecl) {
+ P->addLocal(Local);
+ return;
+ }
+ P = P->Parent;
+ if (!P)
+ break;
+ }
+
+ // Use the parent scope.
+ addExtended(Local);
+ }
+
virtual void emitDestruction() {}
- virtual void emitDestructors() {}
+ virtual bool emitDestructors() { return true; }
VariableScope *getParent() const { return Parent; }
protected:
- /// ByteCodeExprGen instance.
- ByteCodeExprGen<Emitter> *Ctx;
+ /// Compiler instance.
+ Compiler<Emitter> *Ctx;
/// Link to the parent scope.
VariableScope *Parent;
+ const ValueDecl *ValDecl = nullptr;
};
/// Generic scope for local variables.
template <class Emitter> class LocalScope : public VariableScope<Emitter> {
public:
- LocalScope(ByteCodeExprGen<Emitter> *Ctx) : VariableScope<Emitter>(Ctx) {}
+ LocalScope(Compiler<Emitter> *Ctx) : VariableScope<Emitter>(Ctx, nullptr) {}
+ LocalScope(Compiler<Emitter> *Ctx, const ValueDecl *VD)
+ : VariableScope<Emitter>(Ctx, VD) {}
/// Emit a Destroy op for this scope.
~LocalScope() override {
if (!Idx)
return;
this->Ctx->emitDestroy(*Idx, SourceInfo{});
+ removeStoredOpaqueValues();
}
/// Overriden to support explicit destruction.
- void emitDestruction() override {
+ void emitDestruction() override { destroyLocals(); }
+
+ /// Explicit destruction of local variables.
+ bool destroyLocals() {
if (!Idx)
- return;
- this->emitDestructors();
+ return true;
+
+ bool Success = this->emitDestructors();
this->Ctx->emitDestroy(*Idx, SourceInfo{});
+ removeStoredOpaqueValues();
this->Idx = std::nullopt;
+ return Success;
}
void addLocal(const Scope::Local &Local) override {
@@ -394,17 +498,43 @@ public:
this->Ctx->Descriptors[*Idx].emplace_back(Local);
}
- void emitDestructors() override {
+ bool emitDestructors() override {
if (!Idx)
- return;
+ return true;
// Emit destructor calls for local variables of record
// type with a destructor.
for (Scope::Local &Local : this->Ctx->Descriptors[*Idx]) {
if (!Local.Desc->isPrimitive() && !Local.Desc->isPrimitiveArray()) {
- this->Ctx->emitGetPtrLocal(Local.Offset, SourceInfo{});
- this->Ctx->emitRecordDestruction(Local.Desc);
+ if (!this->Ctx->emitGetPtrLocal(Local.Offset, SourceInfo{}))
+ return false;
+
+ if (!this->Ctx->emitDestruction(Local.Desc))
+ return false;
+
+ if (!this->Ctx->emitPopPtr(SourceInfo{}))
+ return false;
+ removeIfStoredOpaqueValue(Local);
}
}
+ return true;
+ }
+
+ void removeStoredOpaqueValues() {
+ if (!Idx)
+ return;
+
+ for (const Scope::Local &Local : this->Ctx->Descriptors[*Idx]) {
+ removeIfStoredOpaqueValue(Local);
+ }
+ }
+
+ void removeIfStoredOpaqueValue(const Scope::Local &Local) {
+ if (const auto *OVE =
+ llvm::dyn_cast_if_present<OpaqueValueExpr>(Local.Desc->asExpr())) {
+ if (auto It = this->Ctx->OpaqueExprs.find(OVE);
+ It != this->Ctx->OpaqueExprs.end())
+ this->Ctx->OpaqueExprs.erase(It);
+ };
}
/// Index of the scope in the chain.
@@ -424,21 +554,10 @@ private:
LocalScope<Emitter> &OtherScope;
};
-/// Like a regular LocalScope, except that the destructors of all local
-/// variables are automatically emitted when the AutoScope is destroyed.
-template <class Emitter> class AutoScope : public LocalScope<Emitter> {
-public:
- AutoScope(ByteCodeExprGen<Emitter> *Ctx)
- : LocalScope<Emitter>(Ctx), DS(*this) {}
-
-private:
- DestructorScope<Emitter> DS;
-};
-
/// Scope for storage declared in a compound statement.
-template <class Emitter> class BlockScope final : public AutoScope<Emitter> {
+template <class Emitter> class BlockScope final : public LocalScope<Emitter> {
public:
- BlockScope(ByteCodeExprGen<Emitter> *Ctx) : AutoScope<Emitter>(Ctx) {}
+ BlockScope(Compiler<Emitter> *Ctx) : LocalScope<Emitter>(Ctx) {}
void addExtended(const Scope::Local &Local) override {
// If we to this point, just add the variable as a normal local
@@ -448,21 +567,9 @@ public:
}
};
-/// Expression scope which tracks potentially lifetime extended
-/// temporaries which are hoisted to the parent scope on exit.
-template <class Emitter> class ExprScope final : public AutoScope<Emitter> {
-public:
- ExprScope(ByteCodeExprGen<Emitter> *Ctx) : AutoScope<Emitter>(Ctx) {}
-
- void addExtended(const Scope::Local &Local) override {
- if (this->Parent)
- this->Parent->addLocal(Local);
- }
-};
-
template <class Emitter> class ArrayIndexScope final {
public:
- ArrayIndexScope(ByteCodeExprGen<Emitter> *Ctx, uint64_t Index) : Ctx(Ctx) {
+ ArrayIndexScope(Compiler<Emitter> *Ctx, uint64_t Index) : Ctx(Ctx) {
OldArrayIndex = Ctx->ArrayIndex;
Ctx->ArrayIndex = Index;
}
@@ -470,14 +577,13 @@ public:
~ArrayIndexScope() { Ctx->ArrayIndex = OldArrayIndex; }
private:
- ByteCodeExprGen<Emitter> *Ctx;
+ Compiler<Emitter> *Ctx;
std::optional<uint64_t> OldArrayIndex;
};
template <class Emitter> class SourceLocScope final {
public:
- SourceLocScope(ByteCodeExprGen<Emitter> *Ctx, const Expr *DefaultExpr)
- : Ctx(Ctx) {
+ SourceLocScope(Compiler<Emitter> *Ctx, const Expr *DefaultExpr) : Ctx(Ctx) {
assert(DefaultExpr);
// We only switch if the current SourceLocDefaultExpr is null.
if (!Ctx->SourceLocDefaultExpr) {
@@ -492,10 +598,36 @@ public:
}
private:
- ByteCodeExprGen<Emitter> *Ctx;
+ Compiler<Emitter> *Ctx;
bool Enabled = false;
};
+template <class Emitter> class InitLinkScope final {
+public:
+ InitLinkScope(Compiler<Emitter> *Ctx, InitLink &&Link) : Ctx(Ctx) {
+ Ctx->InitStack.push_back(std::move(Link));
+ }
+
+ ~InitLinkScope() { this->Ctx->InitStack.pop_back(); }
+
+private:
+ Compiler<Emitter> *Ctx;
+};
+
+template <class Emitter> class InitStackScope final {
+public:
+ InitStackScope(Compiler<Emitter> *Ctx, bool Active)
+ : Ctx(Ctx), OldValue(Ctx->InitStackActive) {
+ Ctx->InitStackActive = Active;
+ }
+
+ ~InitStackScope() { this->Ctx->InitStackActive = OldValue; }
+
+private:
+ Compiler<Emitter> *Ctx;
+ bool OldValue;
+};
+
} // namespace interp
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
index 75a300bcbace..b5e992c5a9ac 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
@@ -8,9 +8,7 @@
#include "Context.h"
#include "ByteCodeEmitter.h"
-#include "ByteCodeExprGen.h"
-#include "ByteCodeGenError.h"
-#include "ByteCodeStmtGen.h"
+#include "Compiler.h"
#include "EvalEmitter.h"
#include "Interp.h"
#include "InterpFrame.h"
@@ -31,102 +29,96 @@ bool Context::isPotentialConstantExpr(State &Parent, const FunctionDecl *FD) {
assert(Stk.empty());
Function *Func = P->getFunction(FD);
if (!Func || !Func->hasBody())
- Func = ByteCodeStmtGen<ByteCodeEmitter>(*this, *P).compileFunc(FD);
+ Func = Compiler<ByteCodeEmitter>(*this, *P).compileFunc(FD);
+
+ if (!Func)
+ return false;
APValue DummyResult;
- if (!Run(Parent, Func, DummyResult)) {
+ if (!Run(Parent, Func, DummyResult))
return false;
- }
return Func->isConstexpr();
}
bool Context::evaluateAsRValue(State &Parent, const Expr *E, APValue &Result) {
- assert(Stk.empty());
- ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result);
+ ++EvalID;
+ bool Recursing = !Stk.empty();
+ Compiler<EvalEmitter> C(*this, *P, Parent, Stk);
- auto Res = C.interpretExpr(E);
+ auto Res = C.interpretExpr(E, /*ConvertResultToRValue=*/E->isGLValue());
if (Res.isInvalid()) {
+ C.cleanup();
Stk.clear();
return false;
}
- assert(Stk.empty());
+ if (!Recursing) {
+ assert(Stk.empty());
#ifndef NDEBUG
- // Make sure we don't rely on some value being still alive in
- // InterpStack memory.
- Stk.clear();
+ // Make sure we don't rely on some value being still alive in
+ // InterpStack memory.
+ Stk.clear();
#endif
-
- // Implicit lvalue-to-rvalue conversion.
- if (E->isGLValue()) {
- std::optional<APValue> RValueResult = Res.toRValue();
- if (!RValueResult) {
- return false;
- }
- Result = *RValueResult;
- } else {
- Result = Res.toAPValue();
}
+ Result = Res.toAPValue();
+
return true;
}
bool Context::evaluate(State &Parent, const Expr *E, APValue &Result) {
- assert(Stk.empty());
- ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result);
+ ++EvalID;
+ bool Recursing = !Stk.empty();
+ Compiler<EvalEmitter> C(*this, *P, Parent, Stk);
auto Res = C.interpretExpr(E);
if (Res.isInvalid()) {
+ C.cleanup();
Stk.clear();
return false;
}
- assert(Stk.empty());
+ if (!Recursing) {
+ assert(Stk.empty());
#ifndef NDEBUG
- // Make sure we don't rely on some value being still alive in
- // InterpStack memory.
- Stk.clear();
+ // Make sure we don't rely on some value being still alive in
+ // InterpStack memory.
+ Stk.clear();
#endif
+ }
+
Result = Res.toAPValue();
return true;
}
bool Context::evaluateAsInitializer(State &Parent, const VarDecl *VD,
APValue &Result) {
- assert(Stk.empty());
- ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result);
-
- auto Res = C.interpretDecl(VD);
+ ++EvalID;
+ bool Recursing = !Stk.empty();
+ Compiler<EvalEmitter> C(*this, *P, Parent, Stk);
+
+ bool CheckGlobalInitialized =
+ shouldBeGloballyIndexed(VD) &&
+ (VD->getType()->isRecordType() || VD->getType()->isArrayType());
+ auto Res = C.interpretDecl(VD, CheckGlobalInitialized);
if (Res.isInvalid()) {
+ C.cleanup();
Stk.clear();
return false;
}
- assert(Stk.empty());
+ if (!Recursing) {
+ assert(Stk.empty());
#ifndef NDEBUG
- // Make sure we don't rely on some value being still alive in
- // InterpStack memory.
- Stk.clear();
+ // Make sure we don't rely on some value being still alive in
+ // InterpStack memory.
+ Stk.clear();
#endif
+ }
- // Ensure global variables are fully initialized.
- if (shouldBeGloballyIndexed(VD) && !Res.isInvalid() &&
- (VD->getType()->isRecordType() || VD->getType()->isArrayType())) {
- assert(Res.isLValue());
-
- if (!Res.checkFullyInitialized(C.getState()))
- return false;
-
- // lvalue-to-rvalue conversion.
- std::optional<APValue> RValueResult = Res.toRValue();
- if (!RValueResult)
- return false;
- Result = *RValueResult;
-
- } else
- Result = Res.toAPValue();
+ Result = Res.toAPValue();
return true;
}
@@ -136,7 +128,8 @@ std::optional<PrimType> Context::classify(QualType T) const {
if (T->isBooleanType())
return PT_Bool;
- if (T->isAnyComplexType())
+ // We map these to primitive arrays.
+ if (T->isAnyComplexType() || T->isVectorType())
return std::nullopt;
if (T->isSignedIntegerOrEnumerationType()) {
@@ -175,22 +168,24 @@ std::optional<PrimType> Context::classify(QualType T) const {
if (T->isFloatingType())
return PT_Float;
+ if (T->isSpecificBuiltinType(BuiltinType::BoundMember) ||
+ T->isMemberPointerType())
+ return PT_MemberPtr;
+
if (T->isFunctionPointerType() || T->isFunctionReferenceType() ||
- T->isFunctionType() || T->isSpecificBuiltinType(BuiltinType::BoundMember))
+ T->isFunctionType())
return PT_FnPtr;
- if (T->isReferenceType() || T->isPointerType())
+ if (T->isReferenceType() || T->isPointerType() ||
+ T->isObjCObjectPointerType())
return PT_Ptr;
- if (const auto *AT = dyn_cast<AtomicType>(T))
+ if (const auto *AT = T->getAs<AtomicType>())
return classify(AT->getValueType());
if (const auto *DT = dyn_cast<DecltypeType>(T))
return classify(DT->getUnderlyingType());
- if (const auto *DT = dyn_cast<MemberPointerType>(T))
- return classify(DT->getPointeeType());
-
return std::nullopt;
}
@@ -208,7 +203,8 @@ bool Context::Run(State &Parent, const Function *Func, APValue &Result) {
{
InterpState State(Parent, *P, Stk, *this);
- State.Current = new InterpFrame(State, Func, /*Caller=*/nullptr, {});
+ State.Current = new InterpFrame(State, Func, /*Caller=*/nullptr, CodePtr(),
+ Func->getArgSize());
if (Interpret(State, Result)) {
assert(Stk.empty());
return true;
@@ -222,22 +218,14 @@ bool Context::Run(State &Parent, const Function *Func, APValue &Result) {
return false;
}
-bool Context::Check(State &Parent, llvm::Expected<bool> &&Flag) {
- if (Flag)
- return *Flag;
- handleAllErrors(Flag.takeError(), [&Parent](ByteCodeGenError &Err) {
- Parent.FFDiag(Err.getRange().getBegin(),
- diag::err_experimental_clang_interp_failed)
- << Err.getRange();
- });
- return false;
-}
-
// TODO: Virtual bases?
const CXXMethodDecl *
Context::getOverridingFunction(const CXXRecordDecl *DynamicDecl,
const CXXRecordDecl *StaticDecl,
const CXXMethodDecl *InitialFunction) const {
+ assert(DynamicDecl);
+ assert(StaticDecl);
+ assert(InitialFunction);
const CXXRecordDecl *CurRecord = DynamicDecl;
const CXXMethodDecl *FoundFunction = InitialFunction;
@@ -278,9 +266,44 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FD) {
return Func;
if (!Func || WasNotDefined) {
- if (auto F = ByteCodeStmtGen<ByteCodeEmitter>(*this, *P).compileFunc(FD))
+ if (auto F = Compiler<ByteCodeEmitter>(*this, *P).compileFunc(FD))
Func = F;
}
return Func;
}
+
+unsigned Context::collectBaseOffset(const RecordDecl *BaseDecl,
+ const RecordDecl *DerivedDecl) const {
+ assert(BaseDecl);
+ assert(DerivedDecl);
+ const auto *FinalDecl = cast<CXXRecordDecl>(BaseDecl);
+ const RecordDecl *CurDecl = DerivedDecl;
+ const Record *CurRecord = P->getOrCreateRecord(CurDecl);
+ assert(CurDecl && FinalDecl);
+
+ unsigned OffsetSum = 0;
+ for (;;) {
+ assert(CurRecord->getNumBases() > 0);
+ // One level up
+ for (const Record::Base &B : CurRecord->bases()) {
+ const auto *BaseDecl = cast<CXXRecordDecl>(B.Decl);
+
+ if (BaseDecl == FinalDecl || BaseDecl->isDerivedFrom(FinalDecl)) {
+ OffsetSum += B.Offset;
+ CurRecord = B.R;
+ CurDecl = BaseDecl;
+ break;
+ }
+ }
+ if (CurDecl == FinalDecl)
+ break;
+ }
+
+ assert(OffsetSum > 0);
+ return OffsetSum;
+}
+
+const Record *Context::getRecord(const RecordDecl *D) const {
+ return P->getOrCreateRecord(D);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Context.h b/contrib/llvm-project/clang/lib/AST/Interp/Context.h
index ab83a8d13224..b8ea4ad6b3b4 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Context.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Context.h
@@ -70,9 +70,21 @@ public:
/// Return the size of T in bits.
uint32_t getBitWidth(QualType T) const { return Ctx.getIntWidth(T); }
- /// Classifies an expression.
+ /// Classifies a type.
std::optional<PrimType> classify(QualType T) const;
+ /// Classifies an expression.
+ std::optional<PrimType> classify(const Expr *E) const {
+ assert(E);
+ if (E->isGLValue()) {
+ if (E->getType()->isFunctionType())
+ return PT_FnPtr;
+ return PT_Ptr;
+ }
+
+ return classify(E->getType());
+ }
+
const CXXMethodDecl *
getOverridingFunction(const CXXRecordDecl *DynamicDecl,
const CXXRecordDecl *StaticDecl,
@@ -92,19 +104,25 @@ public:
/// Returns the program. This is only needed for unittests.
Program &getProgram() const { return *P.get(); }
+ unsigned collectBaseOffset(const RecordDecl *BaseDecl,
+ const RecordDecl *DerivedDecl) const;
+
+ const Record *getRecord(const RecordDecl *D) const;
+
+ unsigned getEvalID() const { return EvalID; }
+
private:
/// Runs a function.
bool Run(State &Parent, const Function *Func, APValue &Result);
- /// Checks a result from the interpreter.
- bool Check(State &Parent, llvm::Expected<bool> &&R);
-
/// Current compilation context.
ASTContext &Ctx;
/// Interpreter stack, shared across invocations.
InterpStack Stk;
/// Constexpr program.
std::unique_ptr<Program> P;
+ /// ID identifying an evaluation.
+ unsigned EvalID = 0;
};
} // namespace interp
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp
index b330e54baf33..4f7e9eac76a3 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp
@@ -11,6 +11,7 @@
#include "Floating.h"
#include "FunctionPointer.h"
#include "IntegralAP.h"
+#include "MemberPointer.h"
#include "Pointer.h"
#include "PrimType.h"
#include "Record.h"
@@ -63,6 +64,16 @@ static void dtorArrayTy(Block *, std::byte *Ptr, const Descriptor *D) {
template <typename T>
static void moveArrayTy(Block *, const std::byte *Src, std::byte *Dst,
const Descriptor *D) {
+ // FIXME: Get rid of the const_cast.
+ InitMapPtr &SrcIMP =
+ *reinterpret_cast<InitMapPtr *>(const_cast<std::byte *>(Src));
+ if (SrcIMP) {
+ // We only ever invoke the moveFunc when moving block contents to a
+ // DeadBlock. DeadBlocks don't need InitMaps, so we destroy them here.
+ SrcIMP = std::nullopt;
+ }
+ Src += sizeof(InitMapPtr);
+ Dst += sizeof(InitMapPtr);
for (unsigned I = 0, NE = D->getNumElems(); I < NE; ++I) {
const auto *SrcPtr = &reinterpret_cast<const T *>(Src)[I];
auto *DstPtr = &reinterpret_cast<T *>(Dst)[I];
@@ -133,41 +144,82 @@ static void moveArrayDesc(Block *B, const std::byte *Src, std::byte *Dst,
}
}
+static void initField(Block *B, std::byte *Ptr, bool IsConst, bool IsMutable,
+ bool IsActive, bool IsUnion, const Descriptor *D,
+ unsigned FieldOffset) {
+ auto *Desc = reinterpret_cast<InlineDescriptor *>(Ptr + FieldOffset) - 1;
+ Desc->Offset = FieldOffset;
+ Desc->Desc = D;
+ Desc->IsInitialized = D->IsArray;
+ Desc->IsBase = false;
+ Desc->IsActive = IsActive && !IsUnion;
+ Desc->IsConst = IsConst || D->IsConst;
+ Desc->IsFieldMutable = IsMutable || D->IsMutable;
+
+ if (auto Fn = D->CtorFn)
+ Fn(B, Ptr + FieldOffset, Desc->IsConst, Desc->IsFieldMutable,
+ Desc->IsActive, D);
+}
+
+static void initBase(Block *B, std::byte *Ptr, bool IsConst, bool IsMutable,
+ bool IsActive, const Descriptor *D, unsigned FieldOffset,
+ bool IsVirtualBase) {
+ assert(D);
+ assert(D->ElemRecord);
+
+ bool IsUnion = D->ElemRecord->isUnion();
+ auto *Desc = reinterpret_cast<InlineDescriptor *>(Ptr + FieldOffset) - 1;
+ Desc->Offset = FieldOffset;
+ Desc->Desc = D;
+ Desc->IsInitialized = D->IsArray;
+ Desc->IsBase = true;
+ Desc->IsVirtualBase = IsVirtualBase;
+ Desc->IsActive = IsActive && !IsUnion;
+ Desc->IsConst = IsConst || D->IsConst;
+ Desc->IsFieldMutable = IsMutable || D->IsMutable;
+
+ for (const auto &V : D->ElemRecord->bases())
+ initBase(B, Ptr + FieldOffset, IsConst, IsMutable, IsActive, V.Desc,
+ V.Offset, false);
+ for (const auto &F : D->ElemRecord->fields())
+ initField(B, Ptr + FieldOffset, IsConst, IsMutable, IsActive, IsUnion,
+ F.Desc, F.Offset);
+}
+
static void ctorRecord(Block *B, std::byte *Ptr, bool IsConst, bool IsMutable,
bool IsActive, const Descriptor *D) {
- const bool IsUnion = D->ElemRecord->isUnion();
- auto CtorSub = [=](unsigned SubOff, Descriptor *F, bool IsBase) {
- auto *Desc = reinterpret_cast<InlineDescriptor *>(Ptr + SubOff) - 1;
- Desc->Offset = SubOff;
- Desc->Desc = F;
- Desc->IsInitialized = F->IsArray && !IsBase;
- Desc->IsBase = IsBase;
- Desc->IsActive = IsActive && !IsUnion;
- Desc->IsConst = IsConst || F->IsConst;
- Desc->IsFieldMutable = IsMutable || F->IsMutable;
- if (auto Fn = F->CtorFn)
- Fn(B, Ptr + SubOff, Desc->IsConst, Desc->IsFieldMutable, Desc->IsActive,
- F);
- };
- for (const auto &B : D->ElemRecord->bases())
- CtorSub(B.Offset, B.Desc, /*isBase=*/true);
+ for (const auto &V : D->ElemRecord->bases())
+ initBase(B, Ptr, IsConst, IsMutable, IsActive, V.Desc, V.Offset, false);
for (const auto &F : D->ElemRecord->fields())
- CtorSub(F.Offset, F.Desc, /*isBase=*/false);
+ initField(B, Ptr, IsConst, IsMutable, IsActive, D->ElemRecord->isUnion(), F.Desc, F.Offset);
for (const auto &V : D->ElemRecord->virtual_bases())
- CtorSub(V.Offset, V.Desc, /*isBase=*/true);
+ initBase(B, Ptr, IsConst, IsMutable, IsActive, V.Desc, V.Offset, true);
+}
+
+static void destroyField(Block *B, std::byte *Ptr, const Descriptor *D,
+ unsigned FieldOffset) {
+ if (auto Fn = D->DtorFn)
+ Fn(B, Ptr + FieldOffset, D);
+}
+
+static void destroyBase(Block *B, std::byte *Ptr, const Descriptor *D,
+ unsigned FieldOffset) {
+ assert(D);
+ assert(D->ElemRecord);
+
+ for (const auto &V : D->ElemRecord->bases())
+ destroyBase(B, Ptr + FieldOffset, V.Desc, V.Offset);
+ for (const auto &F : D->ElemRecord->fields())
+ destroyField(B, Ptr + FieldOffset, F.Desc, F.Offset);
}
static void dtorRecord(Block *B, std::byte *Ptr, const Descriptor *D) {
- auto DtorSub = [=](unsigned SubOff, Descriptor *F) {
- if (auto Fn = F->DtorFn)
- Fn(B, Ptr + SubOff, F);
- };
for (const auto &F : D->ElemRecord->bases())
- DtorSub(F.Offset, F.Desc);
+ destroyBase(B, Ptr, F.Desc, F.Offset);
for (const auto &F : D->ElemRecord->fields())
- DtorSub(F.Offset, F.Desc);
+ destroyField(B, Ptr, F.Desc, F.Offset);
for (const auto &F : D->ElemRecord->virtual_bases())
- DtorSub(F.Offset, F.Desc);
+ destroyBase(B, Ptr, F.Desc, F.Offset);
}
static void moveRecord(Block *B, const std::byte *Src, std::byte *Dst,
@@ -190,6 +242,8 @@ static BlockCtorFn getCtorPrim(PrimType Type) {
return ctorTy<PrimConv<PT_IntAP>::T>;
if (Type == PT_IntAPS)
return ctorTy<PrimConv<PT_IntAPS>::T>;
+ if (Type == PT_MemberPtr)
+ return ctorTy<PrimConv<PT_MemberPtr>::T>;
COMPOSITE_TYPE_SWITCH(Type, return ctorTy<T>, return nullptr);
}
@@ -203,6 +257,8 @@ static BlockDtorFn getDtorPrim(PrimType Type) {
return dtorTy<PrimConv<PT_IntAP>::T>;
if (Type == PT_IntAPS)
return dtorTy<PrimConv<PT_IntAPS>::T>;
+ if (Type == PT_MemberPtr)
+ return dtorTy<PrimConv<PT_MemberPtr>::T>;
COMPOSITE_TYPE_SWITCH(Type, return dtorTy<T>, return nullptr);
}
@@ -230,9 +286,10 @@ static BlockMoveFn getMoveArrayPrim(PrimType Type) {
Descriptor::Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD,
bool IsConst, bool IsTemporary, bool IsMutable)
: Source(D), ElemSize(primSize(Type)), Size(ElemSize),
- MDSize(MD.value_or(0)), AllocSize(align(Size + MDSize)), IsConst(IsConst),
- IsMutable(IsMutable), IsTemporary(IsTemporary), CtorFn(getCtorPrim(Type)),
- DtorFn(getDtorPrim(Type)), MoveFn(getMovePrim(Type)) {
+ MDSize(MD.value_or(0)), AllocSize(align(Size + MDSize)), PrimT(Type),
+ IsConst(IsConst), IsMutable(IsMutable), IsTemporary(IsTemporary),
+ CtorFn(getCtorPrim(Type)), DtorFn(getDtorPrim(Type)),
+ MoveFn(getMovePrim(Type)) {
assert(AllocSize >= Size);
assert(Source && "Missing source");
}
@@ -243,18 +300,20 @@ Descriptor::Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD,
bool IsMutable)
: Source(D), ElemSize(primSize(Type)), Size(ElemSize * NumElems),
MDSize(MD.value_or(0)),
- AllocSize(align(Size) + sizeof(InitMapPtr) + MDSize), IsConst(IsConst),
- IsMutable(IsMutable), IsTemporary(IsTemporary), IsArray(true),
- CtorFn(getCtorArrayPrim(Type)), DtorFn(getDtorArrayPrim(Type)),
- MoveFn(getMoveArrayPrim(Type)) {
+ AllocSize(align(MDSize) + align(Size) + sizeof(InitMapPtr)), PrimT(Type),
+ IsConst(IsConst), IsMutable(IsMutable), IsTemporary(IsTemporary),
+ IsArray(true), CtorFn(getCtorArrayPrim(Type)),
+ DtorFn(getDtorArrayPrim(Type)), MoveFn(getMoveArrayPrim(Type)) {
assert(Source && "Missing source");
+ assert(NumElems <= (MaxArrayElemBytes / ElemSize));
}
/// Primitive unknown-size arrays.
-Descriptor::Descriptor(const DeclTy &D, PrimType Type, bool IsTemporary,
- UnknownSize)
- : Source(D), ElemSize(primSize(Type)), Size(UnknownSizeMark), MDSize(0),
- AllocSize(alignof(void *) + sizeof(InitMapPtr)), IsConst(true),
+Descriptor::Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD,
+ bool IsTemporary, UnknownSize)
+ : Source(D), ElemSize(primSize(Type)), Size(UnknownSizeMark),
+ MDSize(MD.value_or(0)),
+ AllocSize(MDSize + sizeof(InitMapPtr) + alignof(void *)), IsConst(true),
IsMutable(false), IsTemporary(IsTemporary), IsArray(true),
CtorFn(getCtorArrayPrim(Type)), DtorFn(getDtorArrayPrim(Type)),
MoveFn(getMoveArrayPrim(Type)) {
@@ -275,12 +334,12 @@ Descriptor::Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD,
}
/// Unknown-size arrays of composite elements.
-Descriptor::Descriptor(const DeclTy &D, const Descriptor *Elem,
+Descriptor::Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD,
bool IsTemporary, UnknownSize)
: Source(D), ElemSize(Elem->getAllocSize() + sizeof(InlineDescriptor)),
- Size(UnknownSizeMark), MDSize(0),
- AllocSize(alignof(void *) + sizeof(InitMapPtr)), ElemDesc(Elem),
- IsConst(true), IsMutable(false), IsTemporary(IsTemporary), IsArray(true),
+ Size(UnknownSizeMark), MDSize(MD.value_or(0)),
+ AllocSize(MDSize + alignof(void *)), ElemDesc(Elem), IsConst(true),
+ IsMutable(false), IsTemporary(IsTemporary), IsArray(true),
CtorFn(ctorArrayDesc), DtorFn(dtorArrayDesc), MoveFn(moveArrayDesc) {
assert(Source && "Missing source");
}
@@ -296,27 +355,34 @@ Descriptor::Descriptor(const DeclTy &D, const Record *R, MetadataSize MD,
assert(Source && "Missing source");
}
-Descriptor::Descriptor(const DeclTy &D, MetadataSize MD)
- : Source(D), ElemSize(1), Size(ElemSize), MDSize(MD.value_or(0)),
- AllocSize(Size + MDSize), ElemRecord(nullptr), IsConst(true),
- IsMutable(false), IsTemporary(false), IsDummy(true) {
+/// Dummy.
+Descriptor::Descriptor(const DeclTy &D)
+ : Source(D), ElemSize(1), Size(1), MDSize(0), AllocSize(MDSize),
+ ElemRecord(nullptr), IsConst(true), IsMutable(false), IsTemporary(false),
+ IsDummy(true) {
assert(Source && "Missing source");
}
QualType Descriptor::getType() const {
- if (auto *E = asExpr())
+ if (const auto *E = asExpr())
return E->getType();
- if (auto *D = asValueDecl())
+ if (const auto *D = asValueDecl())
return D->getType();
- if (auto *T = dyn_cast<TypeDecl>(asDecl()))
+ if (const auto *T = dyn_cast<TypeDecl>(asDecl()))
return QualType(T->getTypeForDecl(), 0);
llvm_unreachable("Invalid descriptor type");
}
QualType Descriptor::getElemQualType() const {
assert(isArray());
- const auto *AT = cast<ArrayType>(getType());
- return AT->getElementType();
+ QualType T = getType();
+ if (const auto *AT = T->getAsArrayTypeUnsafe())
+ return AT->getElementType();
+ if (const auto *CT = T->getAs<ComplexType>())
+ return CT->getElementType();
+ if (const auto *CT = T->getAs<VectorType>())
+ return CT->getElementType();
+ llvm_unreachable("Array that's not an array/complex/vector type?");
}
SourceLocation Descriptor::getLocation() const {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h
index 580c200f9095..0cc5d77c407e 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h
@@ -13,6 +13,7 @@
#ifndef LLVM_CLANG_AST_INTERP_DESCRIPTOR_H
#define LLVM_CLANG_AST_INTERP_DESCRIPTOR_H
+#include "PrimType.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
@@ -47,6 +48,18 @@ using BlockMoveFn = void (*)(Block *Storage, const std::byte *SrcFieldPtr,
std::byte *DstFieldPtr,
const Descriptor *FieldDesc);
+enum class GlobalInitState {
+ Initialized,
+ NoInitializer,
+ InitializerFailed,
+};
+
+/// Descriptor used for global variables.
+struct alignas(void *) GlobalInlineDescriptor {
+ GlobalInitState InitState = GlobalInitState::InitializerFailed;
+};
+static_assert(sizeof(GlobalInlineDescriptor) == sizeof(void *), "");
+
/// Inline descriptor embedded in structures and arrays.
///
/// Such descriptors precede all composite array elements and structure fields.
@@ -59,21 +72,36 @@ struct InlineDescriptor {
/// Flag indicating if the storage is constant or not.
/// Relevant for primitive fields.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsConst : 1;
/// For primitive fields, it indicates if the field was initialized.
/// Primitive fields in static storage are always initialized.
/// Arrays are always initialized, even though their elements might not be.
/// Base classes are initialized after the constructor is invoked.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsInitialized : 1;
/// Flag indicating if the field is an embedded base class.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsBase : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsVirtualBase : 1;
/// Flag indicating if the field is the active member of a union.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsActive : 1;
/// Flag indicating if the field is mutable (if in a record).
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsFieldMutable : 1;
const Descriptor *Desc;
+
+ InlineDescriptor(const Descriptor *D)
+ : Offset(sizeof(InlineDescriptor)), IsConst(false), IsInitialized(false),
+ IsBase(false), IsActive(false), IsFieldMutable(false), Desc(D) {}
+
+ void dump() const { dump(llvm::errs()); }
+ void dump(llvm::raw_ostream &OS) const;
};
+static_assert(sizeof(GlobalInlineDescriptor) != sizeof(InlineDescriptor), "");
/// Describes a memory block created by an allocation site.
struct Descriptor final {
@@ -98,11 +126,21 @@ public:
using MetadataSize = std::optional<unsigned>;
static constexpr MetadataSize InlineDescMD = sizeof(InlineDescriptor);
+ static constexpr MetadataSize GlobalMD = sizeof(GlobalInlineDescriptor);
+
+ /// Maximum number of bytes to be used for array elements.
+ static constexpr unsigned MaxArrayElemBytes =
+ std::numeric_limits<decltype(AllocSize)>::max() - sizeof(InitMapPtr) -
+ align(std::max(*InlineDescMD, *GlobalMD));
/// Pointer to the record, if block contains records.
const Record *const ElemRecord = nullptr;
/// Descriptor of the array element.
const Descriptor *const ElemDesc = nullptr;
+ /// The primitive type this descriptor was created for,
+ /// or the primitive element type in case this is
+ /// a primitive array.
+ const std::optional<PrimType> PrimT = std::nullopt;
/// Flag indicating if the block is mutable.
const bool IsConst = false;
/// Flag indicating if a field is mutable.
@@ -112,7 +150,7 @@ public:
/// Flag indicating if the block is an array.
const bool IsArray = false;
/// Flag indicating if this is a dummy descriptor.
- const bool IsDummy = false;
+ bool IsDummy = false;
/// Storage management methods.
const BlockCtorFn CtorFn = nullptr;
@@ -128,21 +166,26 @@ public:
bool IsConst, bool IsTemporary, bool IsMutable);
/// Allocates a descriptor for an array of primitives of unknown size.
- Descriptor(const DeclTy &D, PrimType Type, bool IsTemporary, UnknownSize);
+ Descriptor(const DeclTy &D, PrimType Type, MetadataSize MDSize,
+ bool IsTemporary, UnknownSize);
/// Allocates a descriptor for an array of composites.
Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD,
unsigned NumElems, bool IsConst, bool IsTemporary, bool IsMutable);
/// Allocates a descriptor for an array of composites of unknown size.
- Descriptor(const DeclTy &D, const Descriptor *Elem, bool IsTemporary,
- UnknownSize);
+ Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD,
+ bool IsTemporary, UnknownSize);
/// Allocates a descriptor for a record.
Descriptor(const DeclTy &D, const Record *R, MetadataSize MD, bool IsConst,
bool IsTemporary, bool IsMutable);
- Descriptor(const DeclTy &D, MetadataSize MD);
+ /// Allocates a dummy descriptor.
+ Descriptor(const DeclTy &D);
+
+ /// Make this descriptor a dummy descriptor.
+ void makeDummy() { IsDummy = true; }
QualType getType() const;
QualType getElemQualType() const;
@@ -150,11 +193,16 @@ public:
const Decl *asDecl() const { return Source.dyn_cast<const Decl *>(); }
const Expr *asExpr() const { return Source.dyn_cast<const Expr *>(); }
+ const DeclTy &getSource() const { return Source; }
const ValueDecl *asValueDecl() const {
return dyn_cast_if_present<ValueDecl>(asDecl());
}
+ const VarDecl *asVarDecl() const {
+ return dyn_cast_if_present<VarDecl>(asDecl());
+ }
+
const FieldDecl *asFieldDecl() const {
return dyn_cast_if_present<FieldDecl>(asDecl());
}
@@ -169,6 +217,11 @@ public:
return Size;
}
+ PrimType getPrimType() const {
+ assert(isPrimitiveArray() || isPrimitive());
+ return *PrimT;
+ }
+
/// Returns the allocated size, including metadata.
unsigned getAllocSize() const { return AllocSize; }
/// returns the size of an element when the structure is viewed as an array.
@@ -199,6 +252,9 @@ public:
bool isRecord() const { return !IsArray && ElemRecord; }
/// Checks if this is a dummy descriptor.
bool isDummy() const { return IsDummy; }
+
+ void dump() const;
+ void dump(llvm::raw_ostream &OS) const;
};
/// Bitfield tracking the initialisation status of elements of primitive arrays.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
index d276df8f2926..867284ecf7f4 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
@@ -10,12 +10,22 @@
//
//===----------------------------------------------------------------------===//
+#include "Boolean.h"
+#include "Context.h"
+#include "EvaluationResult.h"
#include "Floating.h"
#include "Function.h"
+#include "FunctionPointer.h"
+#include "Integral.h"
+#include "IntegralAP.h"
+#include "InterpFrame.h"
+#include "MemberPointer.h"
#include "Opcode.h"
#include "PrimType.h"
#include "Program.h"
+#include "clang/AST/ASTDumperUtils.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Format.h"
@@ -37,10 +47,27 @@ template <> inline Floating ReadArg<Floating>(Program &P, CodePtr &OpPC) {
return F;
}
+template <>
+inline IntegralAP<false> ReadArg<IntegralAP<false>>(Program &P, CodePtr &OpPC) {
+ IntegralAP<false> I = IntegralAP<false>::deserialize(*OpPC);
+ OpPC += align(I.bytesToSerialize());
+ return I;
+}
+
+template <>
+inline IntegralAP<true> ReadArg<IntegralAP<true>>(Program &P, CodePtr &OpPC) {
+ IntegralAP<true> I = IntegralAP<true>::deserialize(*OpPC);
+ OpPC += align(I.bytesToSerialize());
+ return I;
+}
+
LLVM_DUMP_METHOD void Function::dump() const { dump(llvm::errs()); }
LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
- OS << getName() << " " << (const void *)this << "\n";
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_GREEN, true});
+ OS << getName() << " " << (const void *)this << "\n";
+ }
OS << "frame size: " << getFrameSize() << "\n";
OS << "arg size: " << getArgSize() << "\n";
OS << "rvo: " << hasRVO() << "\n";
@@ -67,15 +94,289 @@ LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
LLVM_DUMP_METHOD void Program::dump() const { dump(llvm::errs()); }
+static const char *primTypeToString(PrimType T) {
+ switch (T) {
+ case PT_Sint8:
+ return "Sint8";
+ case PT_Uint8:
+ return "Uint8";
+ case PT_Sint16:
+ return "Sint16";
+ case PT_Uint16:
+ return "Uint16";
+ case PT_Sint32:
+ return "Sint32";
+ case PT_Uint32:
+ return "Uint32";
+ case PT_Sint64:
+ return "Sint64";
+ case PT_Uint64:
+ return "Uint64";
+ case PT_IntAP:
+ return "IntAP";
+ case PT_IntAPS:
+ return "IntAPS";
+ case PT_Bool:
+ return "Bool";
+ case PT_Float:
+ return "Float";
+ case PT_Ptr:
+ return "Ptr";
+ case PT_FnPtr:
+ return "FnPtr";
+ case PT_MemberPtr:
+ return "MemberPtr";
+ }
+ llvm_unreachable("Unhandled PrimType");
+}
+
LLVM_DUMP_METHOD void Program::dump(llvm::raw_ostream &OS) const {
- OS << ":: Program\n";
- OS << "Global Variables: " << Globals.size() << "\n";
- OS << "Functions: " << Funcs.size() << "\n";
- OS << "\n";
- for (auto &Func : Funcs) {
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_RED, true});
+ OS << "\n:: Program\n";
+ }
+
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::WHITE, true});
+ OS << "Total memory : " << Allocator.getTotalMemory() << " bytes\n";
+ OS << "Global Variables: " << Globals.size() << "\n";
+ }
+ unsigned GI = 0;
+ for (const Global *G : Globals) {
+ const Descriptor *Desc = G->block()->getDescriptor();
+ Pointer GP = getPtrGlobal(GI);
+
+ OS << GI << ": " << (const void *)G->block() << " ";
+ {
+ ColorScope SC(OS, true,
+ GP.isInitialized()
+ ? TerminalColor{llvm::raw_ostream::GREEN, false}
+ : TerminalColor{llvm::raw_ostream::RED, false});
+ OS << (GP.isInitialized() ? "initialized " : "uninitialized ");
+ }
+ Desc->dump(OS);
+
+ if (GP.isInitialized() && Desc->IsTemporary) {
+ if (const auto *MTE =
+ dyn_cast_if_present<MaterializeTemporaryExpr>(Desc->asExpr());
+ MTE && MTE->getLifetimeExtendedTemporaryDecl()) {
+ if (const APValue *V =
+ MTE->getLifetimeExtendedTemporaryDecl()->getValue()) {
+ OS << " (global temporary value: ";
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_MAGENTA, true});
+ std::string VStr;
+ llvm::raw_string_ostream SS(VStr);
+ V->dump(SS, Ctx.getASTContext());
+
+ for (unsigned I = 0; I != VStr.size(); ++I) {
+ if (VStr[I] == '\n')
+ VStr[I] = ' ';
+ }
+ VStr.pop_back(); // Remove the newline (or now space) at the end.
+ OS << VStr;
+ }
+ OS << ')';
+ }
+ }
+ }
+
+ OS << "\n";
+ if (GP.isInitialized() && Desc->isPrimitive() && !Desc->isDummy()) {
+ OS << " ";
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_CYAN, false});
+ OS << primTypeToString(Desc->getPrimType()) << " ";
+ }
+ TYPE_SWITCH(Desc->getPrimType(), { GP.deref<T>().print(OS); });
+ OS << "\n";
+ }
+ ++GI;
+ }
+
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::WHITE, true});
+ OS << "Functions: " << Funcs.size() << "\n";
+ }
+ for (const auto &Func : Funcs) {
Func.second->dump();
}
- for (auto &Anon : AnonFuncs) {
+ for (const auto &Anon : AnonFuncs) {
Anon->dump();
}
}
+
+LLVM_DUMP_METHOD void Descriptor::dump() const {
+ dump(llvm::errs());
+ llvm::errs() << '\n';
+}
+
+LLVM_DUMP_METHOD void Descriptor::dump(llvm::raw_ostream &OS) const {
+ // Source
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BLUE, true});
+ if (const auto *ND = dyn_cast_if_present<NamedDecl>(asDecl()))
+ ND->printQualifiedName(OS);
+ else if (asExpr())
+ OS << "Expr " << (const void *)asExpr();
+ }
+
+ // Print a few interesting bits about the descriptor.
+ if (isPrimitiveArray())
+ OS << " primitive-array";
+ else if (isCompositeArray())
+ OS << " composite-array";
+ else if (isRecord())
+ OS << " record";
+ else if (isPrimitive())
+ OS << " primitive";
+
+ if (isZeroSizeArray())
+ OS << " zero-size-array";
+ else if (isUnknownSizeArray())
+ OS << " unknown-size-array";
+
+ if (isDummy())
+ OS << " dummy";
+}
+
+LLVM_DUMP_METHOD void InlineDescriptor::dump(llvm::raw_ostream &OS) const {
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BLUE, true});
+ OS << "InlineDescriptor " << (const void *)this << "\n";
+ }
+ OS << "Offset: " << Offset << "\n";
+ OS << "IsConst: " << IsConst << "\n";
+ OS << "IsInitialized: " << IsInitialized << "\n";
+ OS << "IsBase: " << IsBase << "\n";
+ OS << "IsActive: " << IsActive << "\n";
+ OS << "IsFieldMutable: " << IsFieldMutable << "\n";
+ OS << "Desc: ";
+ if (Desc)
+ Desc->dump(OS);
+ else
+ OS << "nullptr";
+ OS << "\n";
+}
+
+LLVM_DUMP_METHOD void InterpFrame::dump(llvm::raw_ostream &OS,
+ unsigned Indent) const {
+ unsigned Spaces = Indent * 2;
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BLUE, true});
+ OS.indent(Spaces);
+ if (getCallee())
+ describe(OS);
+ else
+ OS << "Frame (Depth: " << getDepth() << ")";
+ OS << "\n";
+ }
+ OS.indent(Spaces) << "Function: " << getFunction();
+ if (const Function *F = getFunction()) {
+ OS << " (" << F->getName() << ")";
+ }
+ OS << "\n";
+ OS.indent(Spaces) << "This: " << getThis() << "\n";
+ OS.indent(Spaces) << "RVO: " << getRVOPtr() << "\n";
+
+ while (const InterpFrame *F = this->Caller) {
+ F->dump(OS, Indent + 1);
+ F = F->Caller;
+ }
+}
+
+LLVM_DUMP_METHOD void Record::dump(llvm::raw_ostream &OS, unsigned Indentation,
+ unsigned Offset) const {
+ unsigned Indent = Indentation * 2;
+ OS.indent(Indent);
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BLUE, true});
+ OS << getName() << "\n";
+ }
+
+ unsigned I = 0;
+ for (const Record::Base &B : bases()) {
+ OS.indent(Indent) << "- Base " << I << ". Offset " << (Offset + B.Offset)
+ << "\n";
+ B.R->dump(OS, Indentation + 1, Offset + B.Offset);
+ ++I;
+ }
+
+ I = 0;
+ for (const Record::Field &F : fields()) {
+ OS.indent(Indent) << "- Field " << I << ": ";
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_RED, true});
+ OS << F.Decl->getName();
+ }
+ OS << ". Offset " << (Offset + F.Offset) << "\n";
+ ++I;
+ }
+
+ I = 0;
+ for (const Record::Base &B : virtual_bases()) {
+ OS.indent(Indent) << "- Virtual Base " << I << ". Offset "
+ << (Offset + B.Offset) << "\n";
+ B.R->dump(OS, Indentation + 1, Offset + B.Offset);
+ ++I;
+ }
+}
+
+LLVM_DUMP_METHOD void Block::dump(llvm::raw_ostream &OS) const {
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_BLUE, true});
+ OS << "Block " << (const void *)this;
+ }
+ OS << " (";
+ Desc->dump(OS);
+ OS << ")\n";
+ unsigned NPointers = 0;
+ for (const Pointer *P = Pointers; P; P = P->Next) {
+ ++NPointers;
+ }
+ OS << " Pointers: " << NPointers << "\n";
+ OS << " Dead: " << IsDead << "\n";
+ OS << " Static: " << IsStatic << "\n";
+ OS << " Extern: " << IsExtern << "\n";
+ OS << " Initialized: " << IsInitialized << "\n";
+}
+
+LLVM_DUMP_METHOD void EvaluationResult::dump() const {
+ assert(Ctx);
+ auto &OS = llvm::errs();
+ const ASTContext &ASTCtx = Ctx->getASTContext();
+
+ switch (Kind) {
+ case Empty:
+ OS << "Empty\n";
+ break;
+ case RValue:
+ OS << "RValue: ";
+ std::get<APValue>(Value).dump(OS, ASTCtx);
+ break;
+ case LValue: {
+ assert(Source);
+ QualType SourceType;
+ if (const auto *D = Source.dyn_cast<const Decl *>()) {
+ if (const auto *VD = dyn_cast<ValueDecl>(D))
+ SourceType = VD->getType();
+ } else if (const auto *E = Source.dyn_cast<const Expr *>()) {
+ SourceType = E->getType();
+ }
+
+ OS << "LValue: ";
+ if (const auto *P = std::get_if<Pointer>(&Value))
+ P->toAPValue(ASTCtx).printPretty(OS, ASTCtx, SourceType);
+ else if (const auto *FP = std::get_if<FunctionPointer>(&Value)) // Nope
+ FP->toAPValue(ASTCtx).printPretty(OS, ASTCtx, SourceType);
+ OS << "\n";
+ break;
+ }
+ case Invalid:
+ OS << "Invalid\n";
+ break;
+ case Valid:
+ OS << "Valid\n";
+ break;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.cpp b/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.cpp
new file mode 100644
index 000000000000..a51599774078
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.cpp
@@ -0,0 +1,118 @@
+//==-------- DynamicAllocator.cpp - Dynamic allocations ----------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DynamicAllocator.h"
+#include "InterpBlock.h"
+#include "InterpState.h"
+
+using namespace clang;
+using namespace clang::interp;
+
+DynamicAllocator::~DynamicAllocator() { cleanup(); }
+
+void DynamicAllocator::cleanup() {
+ // Invoke destructors of all the blocks and as a last restort,
+ // reset all the pointers pointing to them to null pointees.
+ // This should never show up in diagnostics, but it's necessary
+ // for us to not cause use-after-free problems.
+ for (auto &Iter : AllocationSites) {
+ auto &AllocSite = Iter.second;
+ for (auto &Alloc : AllocSite.Allocations) {
+ Block *B = reinterpret_cast<Block *>(Alloc.Memory.get());
+ B->invokeDtor();
+ if (B->hasPointers()) {
+ while (B->Pointers) {
+ Pointer *Next = B->Pointers->Next;
+ B->Pointers->PointeeStorage.BS.Pointee = nullptr;
+ B->Pointers = Next;
+ }
+ B->Pointers = nullptr;
+ }
+ }
+ }
+
+ AllocationSites.clear();
+}
+
+Block *DynamicAllocator::allocate(const Expr *Source, PrimType T,
+ size_t NumElements, unsigned EvalID) {
+ // Create a new descriptor for an array of the specified size and
+ // element type.
+ const Descriptor *D = allocateDescriptor(
+ Source, T, Descriptor::InlineDescMD, NumElements, /*IsConst=*/false,
+ /*IsTemporary=*/false, /*IsMutable=*/false);
+
+ return allocate(D, EvalID);
+}
+
+Block *DynamicAllocator::allocate(const Descriptor *ElementDesc,
+ size_t NumElements, unsigned EvalID) {
+ // Create a new descriptor for an array of the specified size and
+ // element type.
+ const Descriptor *D = allocateDescriptor(
+ ElementDesc->asExpr(), ElementDesc, Descriptor::InlineDescMD, NumElements,
+ /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false);
+ return allocate(D, EvalID);
+}
+
+Block *DynamicAllocator::allocate(const Descriptor *D, unsigned EvalID) {
+ assert(D);
+ assert(D->asExpr());
+
+ auto Memory =
+ std::make_unique<std::byte[]>(sizeof(Block) + D->getAllocSize());
+ auto *B = new (Memory.get()) Block(EvalID, D, /*isStatic=*/false);
+ B->invokeCtor();
+
+ InlineDescriptor *ID = reinterpret_cast<InlineDescriptor *>(B->rawData());
+ ID->Desc = D;
+ ID->IsActive = true;
+ ID->Offset = sizeof(InlineDescriptor);
+ ID->IsBase = false;
+ ID->IsFieldMutable = false;
+ ID->IsConst = false;
+ ID->IsInitialized = false;
+
+ B->IsDynamic = true;
+
+ if (auto It = AllocationSites.find(D->asExpr()); It != AllocationSites.end())
+ It->second.Allocations.emplace_back(std::move(Memory));
+ else
+ AllocationSites.insert(
+ {D->asExpr(), AllocationSite(std::move(Memory), D->isArray())});
+ return B;
+}
+
+bool DynamicAllocator::deallocate(const Expr *Source,
+ const Block *BlockToDelete, InterpState &S) {
+ auto It = AllocationSites.find(Source);
+ if (It == AllocationSites.end())
+ return false;
+
+ auto &Site = It->second;
+ assert(Site.size() > 0);
+
+ // Find the Block to delete.
+ auto AllocIt = llvm::find_if(Site.Allocations, [&](const Allocation &A) {
+ const Block *B = reinterpret_cast<const Block *>(A.Memory.get());
+ return BlockToDelete == B;
+ });
+
+ assert(AllocIt != Site.Allocations.end());
+
+ Block *B = reinterpret_cast<Block *>(AllocIt->Memory.get());
+ B->invokeDtor();
+
+ S.deallocate(B);
+ Site.Allocations.erase(AllocIt);
+
+ if (Site.size() == 0)
+ AllocationSites.erase(It);
+
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.h b/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.h
new file mode 100644
index 000000000000..a84600aa54cc
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.h
@@ -0,0 +1,102 @@
+//==--------- DynamicAllocator.h - Dynamic allocations ------------*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_DYNAMIC_ALLOCATOR_H
+#define LLVM_CLANG_AST_INTERP_DYNAMIC_ALLOCATOR_H
+
+#include "Descriptor.h"
+#include "InterpBlock.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Allocator.h"
+
+namespace clang {
+class Expr;
+namespace interp {
+class Block;
+class InterpState;
+
+/// Manages dynamic memory allocations done during bytecode interpretation.
+///
+/// We manage allocations as a map from their new-expression to a list
+/// of allocations. This is called an AllocationSite. For each site, we
+/// record whether it was allocated using new or new[], the
+/// IsArrayAllocation flag.
+///
+/// For all array allocations, we need to allocate new Descriptor instances,
+/// so the DynamicAllocator has a llvm::BumpPtrAllocator similar to Program.
+class DynamicAllocator final {
+ struct Allocation {
+ std::unique_ptr<std::byte[]> Memory;
+ Allocation(std::unique_ptr<std::byte[]> Memory)
+ : Memory(std::move(Memory)) {}
+ };
+
+ struct AllocationSite {
+ llvm::SmallVector<Allocation> Allocations;
+ bool IsArrayAllocation = false;
+
+ AllocationSite(std::unique_ptr<std::byte[]> Memory, bool Array)
+ : IsArrayAllocation(Array) {
+ Allocations.push_back({std::move(Memory)});
+ }
+
+ size_t size() const { return Allocations.size(); }
+ };
+
+public:
+ DynamicAllocator() = default;
+ ~DynamicAllocator();
+
+ void cleanup();
+
+ unsigned getNumAllocations() const { return AllocationSites.size(); }
+
+ /// Allocate ONE element of the given descriptor.
+ Block *allocate(const Descriptor *D, unsigned EvalID);
+ /// Allocate \p NumElements primitive elements of the given type.
+ Block *allocate(const Expr *Source, PrimType T, size_t NumElements,
+ unsigned EvalID);
+ /// Allocate \p NumElements elements of the given descriptor.
+ Block *allocate(const Descriptor *D, size_t NumElements, unsigned EvalID);
+
+ /// Deallocate the given source+block combination.
+ /// Returns \c true if anything has been deallocatd, \c false otherwise.
+ bool deallocate(const Expr *Source, const Block *BlockToDelete,
+ InterpState &S);
+
+ /// Checks whether the allocation done at the given source is an array
+ /// allocation.
+ bool isArrayAllocation(const Expr *Source) const {
+ if (auto It = AllocationSites.find(Source); It != AllocationSites.end())
+ return It->second.IsArrayAllocation;
+ return false;
+ }
+
+ /// Allocation site iterator.
+ using const_virtual_iter =
+ llvm::DenseMap<const Expr *, AllocationSite>::const_iterator;
+ llvm::iterator_range<const_virtual_iter> allocation_sites() const {
+ return llvm::make_range(AllocationSites.begin(), AllocationSites.end());
+ }
+
+private:
+ llvm::DenseMap<const Expr *, AllocationSite> AllocationSites;
+
+ using PoolAllocTy = llvm::BumpPtrAllocatorImpl<llvm::MallocAllocator>;
+ PoolAllocTy DescAllocator;
+
+ /// Allocates a new descriptor.
+ template <typename... Ts> Descriptor *allocateDescriptor(Ts &&...Args) {
+ return new (DescAllocator) Descriptor(std::forward<Ts>(Args)...);
+ }
+};
+
+} // namespace interp
+} // namespace clang
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp
index a60f893de8bd..08536536ac3c 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "EvalEmitter.h"
-#include "ByteCodeGenError.h"
#include "Context.h"
#include "IntegralAP.h"
#include "Interp.h"
@@ -18,11 +17,11 @@ using namespace clang;
using namespace clang::interp;
EvalEmitter::EvalEmitter(Context &Ctx, Program &P, State &Parent,
- InterpStack &Stk, APValue &Result)
+ InterpStack &Stk)
: Ctx(Ctx), P(P), S(Parent, P, Stk, Ctx, this), EvalResult(&Ctx) {
// Create a dummy frame for the interpreter which does not have locals.
S.Current =
- new InterpFrame(S, /*Func=*/nullptr, /*Caller=*/nullptr, CodePtr());
+ new InterpFrame(S, /*Func=*/nullptr, /*Caller=*/nullptr, CodePtr(), 0);
}
EvalEmitter::~EvalEmitter() {
@@ -33,21 +32,47 @@ EvalEmitter::~EvalEmitter() {
}
}
-EvaluationResult EvalEmitter::interpretExpr(const Expr *E) {
+/// Clean up all our resources. This needs to done in failed evaluations before
+/// we call InterpStack::clear(), because there might be a Pointer on the stack
+/// pointing into a Block in the EvalEmitter.
+void EvalEmitter::cleanup() { S.cleanup(); }
+
+EvaluationResult EvalEmitter::interpretExpr(const Expr *E,
+ bool ConvertResultToRValue) {
+ S.setEvalLocation(E->getExprLoc());
+ this->ConvertResultToRValue = ConvertResultToRValue && !isa<ConstantExpr>(E);
+ this->CheckFullyInitialized = isa<ConstantExpr>(E);
EvalResult.setSource(E);
- if (!this->visitExpr(E))
+ if (!this->visitExpr(E)) {
+ // EvalResult may already have a result set, but something failed
+ // after that (e.g. evaluating destructors).
EvalResult.setInvalid();
+ }
return std::move(this->EvalResult);
}
-EvaluationResult EvalEmitter::interpretDecl(const VarDecl *VD) {
+EvaluationResult EvalEmitter::interpretDecl(const VarDecl *VD,
+ bool CheckFullyInitialized) {
+ this->CheckFullyInitialized = CheckFullyInitialized;
+ S.EvaluatingDecl = VD;
+ EvalResult.setSource(VD);
+
+ if (const Expr *Init = VD->getAnyInitializer()) {
+ QualType T = VD->getType();
+ this->ConvertResultToRValue = !Init->isGLValue() && !T->isPointerType() &&
+ !T->isObjCObjectPointerType();
+ } else
+ this->ConvertResultToRValue = false;
+
EvalResult.setSource(VD);
- if (!this->visitDecl(VD))
+ if (!this->visitDeclAndReturn(VD, S.inConstantContext()))
EvalResult.setInvalid();
+ S.EvaluatingDecl = nullptr;
+ updateGlobalTemporaries();
return std::move(this->EvalResult);
}
@@ -60,7 +85,7 @@ EvalEmitter::LabelTy EvalEmitter::getLabel() { return NextLabel++; }
Scope::Local EvalEmitter::createLocal(Descriptor *D) {
// Allocate memory for a local.
auto Memory = std::make_unique<char[]>(sizeof(Block) + D->getAllocSize());
- auto *B = new (Memory.get()) Block(D, /*isStatic=*/false);
+ auto *B = new (Memory.get()) Block(Ctx.getEvalID(), D, /*isStatic=*/false);
B->invokeCtor();
// Initialize local variable inline descriptor.
@@ -108,35 +133,90 @@ bool EvalEmitter::fallthrough(const LabelTy &Label) {
return true;
}
+static bool checkReturnState(InterpState &S) {
+ return S.maybeDiagnoseDanglingAllocations();
+}
+
template <PrimType OpType> bool EvalEmitter::emitRet(const SourceInfo &Info) {
if (!isActive())
return true;
+
+ if (!checkReturnState(S))
+ return false;
+
using T = typename PrimConv<OpType>::T;
- EvalResult.setValue(S.Stk.pop<T>().toAPValue());
+ EvalResult.setValue(S.Stk.pop<T>().toAPValue(Ctx.getASTContext()));
return true;
}
template <> bool EvalEmitter::emitRet<PT_Ptr>(const SourceInfo &Info) {
if (!isActive())
return true;
- EvalResult.setPointer(S.Stk.pop<Pointer>());
+
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!EvalResult.checkReturnValue(S, Ctx, Ptr, Info))
+ return false;
+ if (CheckFullyInitialized && !EvalResult.checkFullyInitialized(S, Ptr))
+ return false;
+
+ if (!checkReturnState(S))
+ return false;
+
+ // Implicitly convert lvalue to rvalue, if requested.
+ if (ConvertResultToRValue) {
+ if (!Ptr.isZero() && !Ptr.isDereferencable())
+ return false;
+ // Never allow reading from a non-const pointer, unless the memory
+ // has been created in this evaluation.
+ if (!Ptr.isZero() && Ptr.isBlockPointer() &&
+ Ptr.block()->getEvalID() != Ctx.getEvalID() &&
+ (!CheckLoad(S, OpPC, Ptr, AK_Read) || !Ptr.isConst()))
+ return false;
+
+ if (std::optional<APValue> V =
+ Ptr.toRValue(Ctx, EvalResult.getSourceType())) {
+ EvalResult.setValue(*V);
+ } else {
+ return false;
+ }
+ } else {
+ EvalResult.setValue(Ptr.toAPValue(Ctx.getASTContext()));
+ }
+
return true;
}
template <> bool EvalEmitter::emitRet<PT_FnPtr>(const SourceInfo &Info) {
if (!isActive())
return true;
+
+ if (!checkReturnState(S))
+ return false;
+ // Function pointers cannot be converted to rvalues.
EvalResult.setFunctionPointer(S.Stk.pop<FunctionPointer>());
return true;
}
bool EvalEmitter::emitRetVoid(const SourceInfo &Info) {
+ if (!checkReturnState(S))
+ return false;
EvalResult.setValid();
return true;
}
bool EvalEmitter::emitRetValue(const SourceInfo &Info) {
const auto &Ptr = S.Stk.pop<Pointer>();
- if (std::optional<APValue> APV = Ptr.toRValue(S.getCtx())) {
+
+ if (!EvalResult.checkReturnValue(S, Ctx, Ptr, Info))
+ return false;
+ if (CheckFullyInitialized && !EvalResult.checkFullyInitialized(S, Ptr))
+ return false;
+
+ if (!checkReturnState(S))
+ return false;
+
+ if (std::optional<APValue> APV =
+ Ptr.toRValue(S.getCtx(), EvalResult.getSourceType())) {
EvalResult.setValue(*APV);
return true;
}
@@ -193,6 +273,30 @@ bool EvalEmitter::emitDestroy(uint32_t I, const SourceInfo &Info) {
return true;
}
+/// Global temporaries (LifetimeExtendedTemporary) carry their value
+/// around as an APValue, which codegen accesses.
+/// We set their value once when creating them, but we don't update it
+/// afterwards when code changes it later.
+/// This is what we do here.
+void EvalEmitter::updateGlobalTemporaries() {
+ for (const auto &[E, Temp] : S.SeenGlobalTemporaries) {
+ if (std::optional<unsigned> GlobalIndex = P.getGlobal(E)) {
+ const Pointer &Ptr = P.getPtrGlobal(*GlobalIndex);
+ APValue *Cached = Temp->getOrCreateValue(true);
+
+ if (std::optional<PrimType> T = Ctx.classify(E->getType())) {
+ TYPE_SWITCH(
+ *T, { *Cached = Ptr.deref<T>().toAPValue(Ctx.getASTContext()); });
+ } else {
+ if (std::optional<APValue> APV =
+ Ptr.toRValue(Ctx, Temp->getTemporaryExpr()->getType()))
+ *Cached = *APV;
+ }
+ }
+ }
+ S.SeenGlobalTemporaries.clear();
+}
+
//===----------------------------------------------------------------------===//
// Opcode evaluators
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h
index deb2ebc4e61f..338786d3dea9 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h
@@ -34,14 +34,17 @@ public:
using AddrTy = uintptr_t;
using Local = Scope::Local;
- EvaluationResult interpretExpr(const Expr *E);
- EvaluationResult interpretDecl(const VarDecl *VD);
+ EvaluationResult interpretExpr(const Expr *E,
+ bool ConvertResultToRValue = false);
+ EvaluationResult interpretDecl(const VarDecl *VD, bool CheckFullyInitialized);
+
+ /// Clean up all resources.
+ void cleanup();
InterpState &getState() { return S; }
protected:
- EvalEmitter(Context &Ctx, Program &P, State &Parent, InterpStack &Stk,
- APValue &Result);
+ EvalEmitter(Context &Ctx, Program &P, State &Parent, InterpStack &Stk);
virtual ~EvalEmitter();
@@ -52,7 +55,8 @@ protected:
/// Methods implemented by the compiler.
virtual bool visitExpr(const Expr *E) = 0;
- virtual bool visitDecl(const VarDecl *VD) = 0;
+ virtual bool visitDeclAndReturn(const VarDecl *VD, bool ConstantContext) = 0;
+ virtual bool visitFunc(const FunctionDecl *F) = 0;
/// Emits jumps.
bool jumpTrue(const LabelTy &Label);
@@ -60,6 +64,10 @@ protected:
bool jump(const LabelTy &Label);
bool fallthrough(const LabelTy &Label);
+ /// Since expressions can only jump forward, predicated execution is
+ /// used to deal with if-else statements.
+ bool isActive() const { return CurrentLabel == ActiveLabel; }
+
/// Callback for registering a local.
Local createLocal(Descriptor *D);
@@ -73,7 +81,7 @@ protected:
/// Lambda captures.
llvm::DenseMap<const ValueDecl *, ParamOffset> LambdaCaptures;
/// Offset of the This parameter in a lambda record.
- unsigned LambdaThisCapture = 0;
+ ParamOffset LambdaThisCapture{0, false};
/// Local descriptors.
llvm::SmallVector<SmallVector<Local, 8>, 2> Descriptors;
@@ -86,6 +94,11 @@ private:
InterpState S;
/// Location to write the result to.
EvaluationResult EvalResult;
+ /// Whether the result should be converted to an RValue.
+ bool ConvertResultToRValue = false;
+ /// Whether we should check if the result has been fully
+ /// initialized.
+ bool CheckFullyInitialized = false;
/// Temporaries which require storage.
llvm::DenseMap<unsigned, std::unique_ptr<char[]>> Locals;
@@ -96,6 +109,8 @@ private:
return reinterpret_cast<Block *>(It->second.get());
}
+ void updateGlobalTemporaries();
+
// The emitter always tracks the current instruction and sets OpPC to a token
// value which is mapped to the location of the opcode being evaluated.
CodePtr OpPC;
@@ -109,10 +124,6 @@ private:
/// Active block which should be executed.
LabelTy ActiveLabel = 0;
- /// Since expressions can only jump forward, predicated execution is
- /// used to deal with if-else statements.
- bool isActive() const { return CurrentLabel == ActiveLabel; }
-
protected:
#define GET_EVAL_PROTO
#include "Opcodes.inc"
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp
index a14dc87f1dfd..1b255711c7b3 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp
@@ -7,10 +7,10 @@
//===----------------------------------------------------------------------===//
#include "EvaluationResult.h"
-#include "Context.h"
#include "InterpState.h"
#include "Record.h"
#include "clang/AST/ExprCXX.h"
+#include "llvm/ADT/SetVector.h"
namespace clang {
namespace interp {
@@ -21,9 +21,9 @@ APValue EvaluationResult::toAPValue() const {
case LValue:
// Either a pointer or a function pointer.
if (const auto *P = std::get_if<Pointer>(&Value))
- return P->toAPValue();
+ return P->toAPValue(Ctx->getASTContext());
else if (const auto *FP = std::get_if<FunctionPointer>(&Value))
- return FP->toAPValue();
+ return FP->toAPValue(Ctx->getASTContext());
else
llvm_unreachable("Unhandled LValue type");
break;
@@ -44,9 +44,9 @@ std::optional<APValue> EvaluationResult::toRValue() const {
// We have a pointer and want an RValue.
if (const auto *P = std::get_if<Pointer>(&Value))
- return P->toRValue(*Ctx);
+ return P->toRValue(*Ctx, getSourceType());
else if (const auto *FP = std::get_if<FunctionPointer>(&Value)) // Nope
- return FP->toAPValue();
+ return FP->toAPValue(Ctx->getASTContext());
llvm_unreachable("Unhandled lvalue kind");
}
@@ -66,7 +66,7 @@ static bool CheckArrayInitialized(InterpState &S, SourceLocation Loc,
const Pointer &BasePtr,
const ConstantArrayType *CAT) {
bool Result = true;
- size_t NumElems = CAT->getSize().getZExtValue();
+ size_t NumElems = CAT->getZExtSize();
QualType ElemType = CAT->getElementType();
if (ElemType->isRecordType()) {
@@ -101,10 +101,16 @@ static bool CheckFieldsInitialized(InterpState &S, SourceLocation Loc,
Pointer FieldPtr = BasePtr.atField(F.Offset);
QualType FieldType = F.Decl->getType();
+ // Don't check inactive union members.
+ if (R->isUnion() && !FieldPtr.isActive())
+ continue;
+
if (FieldType->isRecordType()) {
Result &= CheckFieldsInitialized(S, Loc, FieldPtr, FieldPtr.getRecord());
} else if (FieldType->isIncompleteArrayType()) {
// Nothing to do here.
+ } else if (F.Decl->isUnnamedBitField()) {
+ // Nothing do do here.
} else if (FieldType->isArrayType()) {
const auto *CAT =
cast<ConstantArrayType>(FieldType->getAsArrayTypeUnsafe());
@@ -119,9 +125,16 @@ static bool CheckFieldsInitialized(InterpState &S, SourceLocation Loc,
for (const Record::Base &B : R->bases()) {
Pointer P = BasePtr.atField(B.Offset);
if (!P.isInitialized()) {
- S.FFDiag(BasePtr.getDeclDesc()->asDecl()->getLocation(),
- diag::note_constexpr_uninitialized_base)
- << B.Desc->getType();
+ const Descriptor *Desc = BasePtr.getDeclDesc();
+ if (Desc->asDecl())
+ S.FFDiag(BasePtr.getDeclDesc()->asDecl()->getLocation(),
+ diag::note_constexpr_uninitialized_base)
+ << B.Desc->getType();
+ else
+ S.FFDiag(BasePtr.getDeclDesc()->asExpr()->getExprLoc(),
+ diag::note_constexpr_uninitialized_base)
+ << B.Desc->getType();
+
return false;
}
Result &= CheckFieldsInitialized(S, Loc, P, B.R);
@@ -132,64 +145,99 @@ static bool CheckFieldsInitialized(InterpState &S, SourceLocation Loc,
return Result;
}
-bool EvaluationResult::checkFullyInitialized(InterpState &S) const {
+bool EvaluationResult::checkFullyInitialized(InterpState &S,
+ const Pointer &Ptr) const {
assert(Source);
- assert(isLValue());
+ assert(empty());
- // Our Source must be a VarDecl.
- const Decl *SourceDecl = Source.dyn_cast<const Decl *>();
- assert(SourceDecl);
- const auto *VD = cast<VarDecl>(SourceDecl);
- assert(VD->getType()->isRecordType() || VD->getType()->isArrayType());
- SourceLocation InitLoc = VD->getAnyInitializer()->getExprLoc();
+ if (Ptr.isZero())
+ return true;
- const Pointer &Ptr = *std::get_if<Pointer>(&Value);
- assert(!Ptr.isZero());
+ // We can't inspect dead pointers at all. Return true here so we can
+ // diagnose them later.
+ if (!Ptr.isLive())
+ return true;
+
+ SourceLocation InitLoc;
+ if (const auto *D = Source.dyn_cast<const Decl *>())
+ InitLoc = cast<VarDecl>(D)->getAnyInitializer()->getExprLoc();
+ else if (const auto *E = Source.dyn_cast<const Expr *>())
+ InitLoc = E->getExprLoc();
if (const Record *R = Ptr.getRecord())
return CheckFieldsInitialized(S, InitLoc, Ptr, R);
- const auto *CAT =
- cast<ConstantArrayType>(Ptr.getType()->getAsArrayTypeUnsafe());
- return CheckArrayInitialized(S, InitLoc, Ptr, CAT);
+
+ if (const auto *CAT = dyn_cast_if_present<ConstantArrayType>(
+ Ptr.getType()->getAsArrayTypeUnsafe()))
+ return CheckArrayInitialized(S, InitLoc, Ptr, CAT);
return true;
}
-void EvaluationResult::dump() const {
- assert(Ctx);
- auto &OS = llvm::errs();
- const ASTContext &ASTCtx = Ctx->getASTContext();
+static void collectBlocks(const Pointer &Ptr,
+ llvm::SetVector<const Block *> &Blocks) {
+ auto isUsefulPtr = [](const Pointer &P) -> bool {
+ return P.isLive() && !P.isZero() && !P.isDummy() &&
+ !P.isUnknownSizeArray() && !P.isOnePastEnd() && P.isBlockPointer();
+ };
- switch (Kind) {
- case Empty:
- OS << "Empty\n";
- break;
- case RValue:
- OS << "RValue: ";
- std::get<APValue>(Value).dump(OS, ASTCtx);
- break;
- case LValue: {
- assert(Source);
- QualType SourceType;
- if (const auto *D = Source.dyn_cast<const Decl *>()) {
- if (const auto *VD = dyn_cast<ValueDecl>(D))
- SourceType = VD->getType();
- } else if (const auto *E = Source.dyn_cast<const Expr *>()) {
- SourceType = E->getType();
- }
+ if (!isUsefulPtr(Ptr))
+ return;
- OS << "LValue: ";
- if (const auto *P = std::get_if<Pointer>(&Value))
- P->toAPValue().printPretty(OS, ASTCtx, SourceType);
- else if (const auto *FP = std::get_if<FunctionPointer>(&Value)) // Nope
- FP->toAPValue().printPretty(OS, ASTCtx, SourceType);
- OS << "\n";
- break;
+ Blocks.insert(Ptr.block());
+
+ const Descriptor *Desc = Ptr.getFieldDesc();
+ if (!Desc)
+ return;
+
+ if (const Record *R = Desc->ElemRecord) {
+ for (const Record::Field &F : R->fields()) {
+ const Pointer &FieldPtr = Ptr.atField(F.Offset);
+ assert(FieldPtr.block() == Ptr.block());
+ collectBlocks(FieldPtr, Blocks);
+ }
+ } else if (Desc->isPrimitive() && Desc->getPrimType() == PT_Ptr) {
+ const Pointer &Pointee = Ptr.deref<Pointer>();
+ if (isUsefulPtr(Pointee) && !Blocks.contains(Pointee.block()))
+ collectBlocks(Pointee, Blocks);
+
+ } else if (Desc->isPrimitiveArray() && Desc->getPrimType() == PT_Ptr) {
+ for (unsigned I = 0; I != Desc->getNumElems(); ++I) {
+ const Pointer &ElemPointee = Ptr.atIndex(I).deref<Pointer>();
+ if (isUsefulPtr(ElemPointee) && !Blocks.contains(ElemPointee.block()))
+ collectBlocks(ElemPointee, Blocks);
+ }
+ } else if (Desc->isCompositeArray()) {
+ for (unsigned I = 0; I != Desc->getNumElems(); ++I) {
+ const Pointer &ElemPtr = Ptr.atIndex(I).narrow();
+ collectBlocks(ElemPtr, Blocks);
+ }
}
+}
- default:
- llvm_unreachable("Can't print that.");
+bool EvaluationResult::checkReturnValue(InterpState &S, const Context &Ctx,
+ const Pointer &Ptr,
+ const SourceInfo &Info) {
+ // Collect all blocks that this pointer (transitively) points to and
+ // return false if any of them is a dynamic block.
+ llvm::SetVector<const Block *> Blocks;
+
+ collectBlocks(Ptr, Blocks);
+
+ for (const Block *B : Blocks) {
+ if (B->isDynamic()) {
+ assert(B->getDescriptor());
+ assert(B->getDescriptor()->asExpr());
+
+ S.FFDiag(Info, diag::note_constexpr_dynamic_alloc)
+ << Ptr.getType()->isReferenceType() << !Ptr.isRoot();
+ S.Note(B->getDescriptor()->asExpr()->getExprLoc(),
+ diag::note_constexpr_dynamic_alloc_here);
+ return false;
+ }
}
+
+ return true;
}
} // namespace interp
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h
index 2b9fc16f1a0a..ef662e3779bc 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h
@@ -56,8 +56,8 @@ private:
void setSource(DeclTy D) { Source = D; }
void setValue(const APValue &V) {
+ // V could still be an LValue.
assert(empty());
- assert(!V.isLValue());
Value = std::move(V);
Kind = RValue;
}
@@ -72,7 +72,8 @@ private:
Kind = LValue;
}
void setInvalid() {
- assert(empty());
+ // We are NOT asserting empty() here, since setting it to invalid
+ // is allowed even if there is already a result.
Kind = Invalid;
}
void setValid() {
@@ -97,12 +98,27 @@ public:
/// LValue and we can't read from it.
std::optional<APValue> toRValue() const;
- bool checkFullyInitialized(InterpState &S) const;
+ /// Check that all subobjects of the given pointer have been initialized.
+ bool checkFullyInitialized(InterpState &S, const Pointer &Ptr) const;
+ /// Check that none of the blocks the given pointer (transitively) points
+ /// to are dynamically allocated.
+ bool checkReturnValue(InterpState &S, const Context &Ctx, const Pointer &Ptr,
+ const SourceInfo &Info);
+
+ QualType getSourceType() const {
+ if (const auto *D =
+ dyn_cast_if_present<ValueDecl>(Source.dyn_cast<const Decl *>()))
+ return D->getType();
+ else if (const auto *E = Source.dyn_cast<const Expr *>())
+ return E->getType();
+ return QualType();
+ }
/// Dump to stderr.
void dump() const;
friend class EvalEmitter;
+ friend class InterpState;
};
} // namespace interp
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Floating.h b/contrib/llvm-project/clang/lib/AST/Interp/Floating.h
index e4ac76d8509f..114487821880 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Floating.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Floating.h
@@ -69,7 +69,7 @@ public:
APSInt toAPSInt(unsigned NumBits = 0) const {
return APSInt(F.bitcastToAPInt());
}
- APValue toAPValue() const { return APValue(F); }
+ APValue toAPValue(const ASTContext &) const { return APValue(F); }
void print(llvm::raw_ostream &OS) const {
// Can't use APFloat::print() since it appends a newline.
SmallVector<char, 16> Buffer;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
index 1d04998d5dd1..00f5a1fced53 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
@@ -40,7 +40,8 @@ SourceInfo Function::getSource(CodePtr PC) const {
unsigned Offset = PC - getCodeBegin();
using Elem = std::pair<unsigned, SourceInfo>;
auto It = llvm::lower_bound(SrcMap, Elem{Offset, {}}, llvm::less_first());
- assert(It != SrcMap.end());
+ if (It == SrcMap.end())
+ return SrcMap.back().second;
return It->second;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Function.h b/contrib/llvm-project/clang/lib/AST/Interp/Function.h
index 7c3e0f630249..92bcd9692791 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Function.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Function.h
@@ -15,9 +15,10 @@
#ifndef LLVM_CLANG_AST_INTERP_FUNCTION_H
#define LLVM_CLANG_AST_INTERP_FUNCTION_H
-#include "Source.h"
#include "Descriptor.h"
+#include "Source.h"
#include "clang/AST/ASTLambda.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "llvm/Support/raw_ostream.h"
@@ -108,6 +109,8 @@ public:
/// Checks if the first argument is a RVO pointer.
bool hasRVO() const { return HasRVO; }
+ bool hasNonNullAttr() const { return getDecl()->hasAttr<NonNullAttr>(); }
+
/// Range over the scope blocks.
llvm::iterator_range<llvm::SmallVector<Scope, 2>::const_iterator>
scopes() const {
@@ -183,6 +186,22 @@ public:
unsigned getNumParams() const { return ParamTypes.size(); }
+ /// Returns the number of parameter this function takes when it's called,
+ /// i.e excluding the instance pointer and the RVO pointer.
+ unsigned getNumWrittenParams() const {
+ assert(getNumParams() >= (unsigned)(hasThisPointer() + hasRVO()));
+ return getNumParams() - hasThisPointer() - hasRVO();
+ }
+ unsigned getWrittenArgSize() const {
+ return ArgSize - (align(primSize(PT_Ptr)) * (hasThisPointer() + hasRVO()));
+ }
+
+ bool isThisPointerExplicit() const {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(F))
+ return MD->isExplicitObjectMemberFunction();
+ return false;
+ }
+
unsigned getParamOffset(unsigned ParamIndex) const {
return ParamOffsets[ParamIndex];
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h b/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h
index 4a3f993d4882..0f2c6e571a1d 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h
@@ -1,4 +1,4 @@
-//===--- FunctionPointer.h - Types for the constexpr VM ----------*- C++ -*-===//
+//===--- FunctionPointer.h - Types for the constexpr VM ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -20,26 +20,46 @@ namespace interp {
class FunctionPointer final {
private:
const Function *Func;
+ bool Valid;
public:
- FunctionPointer() : Func(nullptr) {}
- FunctionPointer(const Function *Func) : Func(Func) { assert(Func); }
+ FunctionPointer(const Function *Func) : Func(Func), Valid(true) {
+ assert(Func);
+ }
+
+ FunctionPointer(uintptr_t IntVal = 0, const Descriptor *Desc = nullptr)
+ : Func(reinterpret_cast<const Function *>(IntVal)), Valid(false) {}
const Function *getFunction() const { return Func; }
+ bool isZero() const { return !Func; }
+ bool isValid() const { return Valid; }
+ bool isWeak() const {
+ if (!Func || !Valid)
+ return false;
+
+ return Func->getDecl()->isWeak();
+ }
- APValue toAPValue() const {
+ APValue toAPValue(const ASTContext &) const {
if (!Func)
return APValue(static_cast<Expr *>(nullptr), CharUnits::Zero(), {},
/*OnePastTheEnd=*/false, /*IsNull=*/true);
+ if (!Valid)
+ return APValue(static_cast<Expr *>(nullptr),
+ CharUnits::fromQuantity(getIntegerRepresentation()), {},
+ /*OnePastTheEnd=*/false, /*IsNull=*/false);
+
return APValue(Func->getDecl(), CharUnits::Zero(), {},
/*OnePastTheEnd=*/false, /*IsNull=*/false);
}
void print(llvm::raw_ostream &OS) const {
OS << "FnPtr(";
- if (Func)
+ if (Func && Valid)
OS << Func->getName();
+ else if (Func)
+ OS << reinterpret_cast<uintptr_t>(Func);
else
OS << "nullptr";
OS << ")";
@@ -49,7 +69,11 @@ public:
if (!Func)
return "nullptr";
- return toAPValue().getAsString(Ctx, Func->getDecl()->getType());
+ return toAPValue(Ctx).getAsString(Ctx, Func->getDecl()->getType());
+ }
+
+ uint64_t getIntegerRepresentation() const {
+ return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Func));
}
ComparisonCategoryResult compare(const FunctionPointer &RHS) const {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Integral.h b/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
index cc1cab8f39fb..aafdd02676c9 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
@@ -98,10 +98,10 @@ public:
return Integral<DstBits, DstSign>(V);
}
- explicit operator unsigned() const { return V; }
- explicit operator int64_t() const { return V; }
- explicit operator uint64_t() const { return V; }
- explicit operator int32_t() const { return V; }
+ template <typename Ty, typename = std::enable_if_t<std::is_integral_v<Ty>>>
+ explicit operator Ty() const {
+ return V;
+ }
APSInt toAPSInt() const {
return APSInt(APInt(Bits, static_cast<uint64_t>(V), Signed), !Signed);
@@ -112,7 +112,7 @@ public:
else
return APSInt(toAPSInt().zextOrTrunc(NumBits), !Signed);
}
- APValue toAPValue() const { return APValue(toAPSInt()); }
+ APValue toAPValue(const ASTContext &) const { return APValue(toAPSInt()); }
Integral<Bits, false> toUnsigned() const {
return Integral<Bits, false>(*this);
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h b/contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h
index 55e29caa1cd7..b8aa21038256 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h
@@ -61,7 +61,7 @@ public:
IntegralAP(APInt V) : V(V) {}
/// Arbitrary value for uninitialized variables.
- IntegralAP() : IntegralAP(-1, 1024) {}
+ IntegralAP() : IntegralAP(-1, 3) {}
IntegralAP operator-() const { return IntegralAP(-V); }
IntegralAP operator-(const IntegralAP &Other) const {
@@ -133,7 +133,7 @@ public:
else
return APSInt(V.zext(Bits), !Signed);
}
- APValue toAPValue() const { return APValue(toAPSInt()); }
+ APValue toAPValue(const ASTContext &) const { return APValue(toAPSInt()); }
bool isZero() const { return V.isZero(); }
bool isPositive() const { return V.isNonNegative(); }
@@ -154,7 +154,10 @@ public:
}
IntegralAP truncate(unsigned BitWidth) const {
- return IntegralAP(V.trunc(BitWidth));
+ if constexpr (Signed)
+ return IntegralAP(V.trunc(BitWidth).sextOrTrunc(this->bitWidth()));
+ else
+ return IntegralAP(V.trunc(BitWidth).zextOrTrunc(this->bitWidth()));
}
IntegralAP<false> toUnsigned() const {
@@ -263,6 +266,31 @@ public:
*R = IntegralAP(A.V.lshr(ShiftAmount));
}
+ // === Serialization support ===
+ size_t bytesToSerialize() const {
+ // 4 bytes for the BitWidth followed by N bytes for the actual APInt.
+ return sizeof(uint32_t) + (V.getBitWidth() / CHAR_BIT);
+ }
+
+ void serialize(std::byte *Buff) const {
+ assert(V.getBitWidth() < std::numeric_limits<uint8_t>::max());
+ uint32_t BitWidth = V.getBitWidth();
+
+ std::memcpy(Buff, &BitWidth, sizeof(uint32_t));
+ llvm::StoreIntToMemory(V, (uint8_t *)(Buff + sizeof(uint32_t)),
+ BitWidth / CHAR_BIT);
+ }
+
+ static IntegralAP<Signed> deserialize(const std::byte *Buff) {
+ uint32_t BitWidth;
+ std::memcpy(&BitWidth, Buff, sizeof(uint32_t));
+ IntegralAP<Signed> Val(APInt(BitWidth, 0ull, !Signed));
+
+ llvm::LoadIntFromMemory(Val.V, (const uint8_t *)Buff + sizeof(uint32_t),
+ BitWidth / CHAR_BIT);
+ return Val;
+ }
+
private:
template <template <typename T> class Op>
static bool CheckAddSubMulUB(const IntegralAP &A, const IntegralAP &B,
@@ -289,6 +317,11 @@ inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
return OS;
}
+template <bool Signed>
+IntegralAP<Signed> getSwappedBytes(IntegralAP<Signed> F) {
+ return F;
+}
+
} // namespace interp
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
index 807b860f3565..0f9eedc3f38e 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
@@ -7,10 +7,9 @@
//===----------------------------------------------------------------------===//
#include "Interp.h"
-#include <limits>
-#include <vector>
#include "Function.h"
#include "InterpFrame.h"
+#include "InterpShared.h"
#include "InterpStack.h"
#include "Opcode.h"
#include "PrimType.h"
@@ -19,9 +18,15 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/StringExtras.h"
+#include <limits>
+#include <vector>
+
+using namespace clang;
using namespace clang;
using namespace clang::interp;
@@ -53,22 +58,67 @@ static bool Jf(InterpState &S, CodePtr &PC, int32_t Offset) {
return true;
}
+static void diagnoseMissingInitializer(InterpState &S, CodePtr OpPC,
+ const ValueDecl *VD) {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.FFDiag(E, diag::note_constexpr_var_init_unknown, 1) << VD;
+ S.Note(VD->getLocation(), diag::note_declared_at) << VD->getSourceRange();
+}
+
+static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC,
+ const ValueDecl *VD);
+static bool diagnoseUnknownDecl(InterpState &S, CodePtr OpPC,
+ const ValueDecl *D) {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+
+ if (isa<ParmVarDecl>(D)) {
+ if (S.getLangOpts().CPlusPlus11) {
+ S.FFDiag(E, diag::note_constexpr_function_param_value_unknown) << D;
+ S.Note(D->getLocation(), diag::note_declared_at) << D->getSourceRange();
+ } else {
+ S.FFDiag(E);
+ }
+ return false;
+ }
+
+ if (!D->getType().isConstQualified())
+ diagnoseNonConstVariable(S, OpPC, D);
+ else if (const auto *VD = dyn_cast<VarDecl>(D);
+ VD && !VD->getAnyInitializer())
+ diagnoseMissingInitializer(S, OpPC, VD);
+
+ return false;
+}
+
static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC,
const ValueDecl *VD) {
if (!S.getLangOpts().CPlusPlus)
return;
const SourceInfo &Loc = S.Current->getSource(OpPC);
+ if (const auto *VarD = dyn_cast<VarDecl>(VD);
+ VarD && VarD->getType().isConstQualified() &&
+ !VarD->getAnyInitializer()) {
+ diagnoseMissingInitializer(S, OpPC, VD);
+ return;
+ }
- if (VD->getType()->isIntegralOrEnumerationType())
+ // Rather random, but this is to match the diagnostic output of the current
+ // interpreter.
+ if (isa<ObjCIvarDecl>(VD))
+ return;
+
+ if (VD->getType()->isIntegralOrEnumerationType()) {
S.FFDiag(Loc, diag::note_constexpr_ltor_non_const_int, 1) << VD;
- else
- S.FFDiag(Loc,
- S.getLangOpts().CPlusPlus11
- ? diag::note_constexpr_ltor_non_constexpr
- : diag::note_constexpr_ltor_non_integral,
- 1)
- << VD << VD->getType();
+ S.Note(VD->getLocation(), diag::note_declared_at);
+ return;
+ }
+
+ S.FFDiag(Loc,
+ S.getLangOpts().CPlusPlus11 ? diag::note_constexpr_ltor_non_constexpr
+ : diag::note_constexpr_ltor_non_integral,
+ 1)
+ << VD << VD->getType();
S.Note(VD->getLocation(), diag::note_declared_at);
}
@@ -141,7 +191,7 @@ static bool CheckGlobal(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
namespace clang {
namespace interp {
static void popArg(InterpState &S, const Expr *Arg) {
- PrimType Ty = S.getContext().classify(Arg->getType()).value_or(PT_Ptr);
+ PrimType Ty = S.getContext().classify(Arg).value_or(PT_Ptr);
TYPE_SWITCH(Ty, S.Stk.discard<T>());
}
@@ -169,16 +219,27 @@ void cleanupAfterFunctionCall(InterpState &S, CodePtr OpPC) {
// CallExpr we're look for is at the return PC of the current function, i.e.
// in the caller.
// This code path should be executed very rarely.
- const auto *CE =
- cast<CallExpr>(S.Current->Caller->getExpr(S.Current->getRetPC()));
- unsigned FixedParams = CurFunc->getNumParams();
- int32_t ArgsToPop = CE->getNumArgs() - FixedParams;
- assert(ArgsToPop >= 0);
- for (int32_t I = ArgsToPop - 1; I >= 0; --I) {
- const Expr *A = CE->getArg(FixedParams + I);
+ unsigned NumVarArgs;
+ const Expr *const *Args = nullptr;
+ unsigned NumArgs = 0;
+ const Expr *CallSite = S.Current->Caller->getExpr(S.Current->getRetPC());
+ if (const auto *CE = dyn_cast<CallExpr>(CallSite)) {
+ Args = CE->getArgs();
+ NumArgs = CE->getNumArgs();
+ } else if (const auto *CE = dyn_cast<CXXConstructExpr>(CallSite)) {
+ Args = CE->getArgs();
+ NumArgs = CE->getNumArgs();
+ } else
+ assert(false && "Can't get arguments from that expression type");
+
+ assert(NumArgs >= CurFunc->getNumWrittenParams());
+ NumVarArgs = NumArgs - CurFunc->getNumWrittenParams();
+ for (unsigned I = 0; I != NumVarArgs; ++I) {
+ const Expr *A = Args[NumArgs - 1 - I];
popArg(S, A);
}
}
+
// And in any case, remove the fixed parameters (the non-variadic ones)
// at the end.
S.Current->popArgs();
@@ -188,6 +249,10 @@ bool CheckExtern(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!Ptr.isExtern())
return true;
+ if (Ptr.isInitialized() ||
+ (Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl))
+ return true;
+
if (!S.checkingPotentialConstantExpression() && S.getLangOpts().CPlusPlus) {
const auto *VD = Ptr.getDeclDesc()->asValueDecl();
diagnoseNonConstVariable(S, OpPC, VD);
@@ -240,10 +305,12 @@ bool CheckConstant(InterpState &S, CodePtr OpPC, const Descriptor *Desc) {
if (VD->isConstexpr())
return true;
+ QualType T = VD->getType();
if (S.getLangOpts().CPlusPlus && !S.getLangOpts().CPlusPlus11)
- return false;
+ return (T->isSignedIntegerOrEnumerationType() ||
+ T->isUnsignedIntegerOrEnumerationType()) &&
+ T.isConstQualified();
- QualType T = VD->getType();
if (T.isConstQualified())
return true;
@@ -256,31 +323,29 @@ bool CheckConstant(InterpState &S, CodePtr OpPC, const Descriptor *Desc) {
return false;
};
- if (const auto *D = Desc->asValueDecl()) {
- if (const auto *VD = dyn_cast<VarDecl>(D);
- VD && VD->hasGlobalStorage() && !IsConstType(VD)) {
- diagnoseNonConstVariable(S, OpPC, VD);
- return S.inConstantContext();
- }
+ if (const auto *D = Desc->asVarDecl();
+ D && D->hasGlobalStorage() && D != S.EvaluatingDecl && !IsConstType(D)) {
+ diagnoseNonConstVariable(S, OpPC, D);
+ return S.inConstantContext();
}
return true;
}
static bool CheckConstant(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
+ if (Ptr.isIntegralPointer())
+ return true;
return CheckConstant(S, OpPC, Ptr.getDeclDesc());
}
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- return !Ptr.isZero() && !Ptr.isDummy();
-}
-
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK) {
if (!Ptr.isZero())
return true;
const SourceInfo &Loc = S.Current->getSource(OpPC);
- S.FFDiag(Loc, diag::note_constexpr_null_subobject) << CSK;
+ S.FFDiag(Loc, diag::note_constexpr_null_subobject)
+ << CSK << S.Current->getRange(OpPC);
+
return false;
}
@@ -289,7 +354,8 @@ bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
if (!Ptr.isOnePastEnd())
return true;
const SourceInfo &Loc = S.Current->getSource(OpPC);
- S.FFDiag(Loc, diag::note_constexpr_access_past_end) << AK;
+ S.FFDiag(Loc, diag::note_constexpr_access_past_end)
+ << AK << S.Current->getRange(OpPC);
return false;
}
@@ -298,7 +364,8 @@ bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
if (!Ptr.isElementPastEnd())
return true;
const SourceInfo &Loc = S.Current->getSource(OpPC);
- S.FFDiag(Loc, diag::note_constexpr_past_end_subobject) << CSK;
+ S.FFDiag(Loc, diag::note_constexpr_past_end_subobject)
+ << CSK << S.Current->getRange(OpPC);
return false;
}
@@ -308,23 +375,53 @@ bool CheckSubobject(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return true;
const SourceInfo &Loc = S.Current->getSource(OpPC);
- S.FFDiag(Loc, diag::note_constexpr_past_end_subobject) << CSK;
+ S.FFDiag(Loc, diag::note_constexpr_past_end_subobject)
+ << CSK << S.Current->getRange(OpPC);
+ return false;
+}
+
+bool CheckDowncast(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ uint32_t Offset) {
+ uint32_t MinOffset = Ptr.getDeclDesc()->getMetadataSize();
+ uint32_t PtrOffset = Ptr.getByteOffset();
+
+ // We subtract Offset from PtrOffset. The result must be at least
+ // MinOffset.
+ if (Offset < PtrOffset && (PtrOffset - Offset) >= MinOffset)
+ return true;
+
+ const auto *E = cast<CastExpr>(S.Current->getExpr(OpPC));
+ QualType TargetQT = E->getType()->getPointeeType();
+ QualType MostDerivedQT = Ptr.getDeclPtr().getType();
+
+ S.CCEDiag(E, diag::note_constexpr_invalid_downcast)
+ << MostDerivedQT << TargetQT;
+
return false;
}
bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
assert(Ptr.isLive() && "Pointer is not live");
- if (!Ptr.isConst())
+ if (!Ptr.isConst() || Ptr.isMutable())
return true;
// The This pointer is writable in constructors and destructors,
// even if isConst() returns true.
- if (const Function *Func = S.Current->getFunction();
- Func && (Func->isConstructor() || Func->isDestructor()) &&
- Ptr.block() == S.Current->getThis().block()) {
- return true;
+ // TODO(perf): We could be hitting this code path quite a lot in complex
+ // constructors. Is there a better way to do this?
+ if (S.Current->getFunction()) {
+ for (const InterpFrame *Frame = S.Current; Frame; Frame = Frame->Caller) {
+ if (const Function *Func = Frame->getFunction();
+ Func && (Func->isConstructor() || Func->isDestructor()) &&
+ Ptr.block() == Frame->getThis().block()) {
+ return true;
+ }
+ }
}
+ if (!Ptr.isBlockPointer())
+ return false;
+
const QualType Ty = Ptr.getType();
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_modify_const_type) << Ty;
@@ -333,9 +430,14 @@ bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
assert(Ptr.isLive() && "Pointer is not live");
- if (!Ptr.isMutable()) {
+ if (!Ptr.isMutable())
+ return true;
+
+ // In C++14 onwards, it is permitted to read a mutable member whose
+ // lifetime began within the evaluation.
+ if (S.getLangOpts().CPlusPlus14 &&
+ Ptr.block()->getEvalID() == S.Ctx.getEvalID())
return true;
- }
const SourceInfo &Loc = S.Current->getSource(OpPC);
const FieldDecl *Field = Ptr.getField();
@@ -344,11 +446,46 @@ bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return false;
}
+bool CheckVolatile(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK) {
+ assert(Ptr.isLive());
+
+ // FIXME: This check here might be kinda expensive. Maybe it would be better
+ // to have another field in InlineDescriptor for this?
+ if (!Ptr.isBlockPointer())
+ return true;
+
+ QualType PtrType = Ptr.getType();
+ if (!PtrType.isVolatileQualified())
+ return true;
+
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ if (S.getLangOpts().CPlusPlus)
+ S.FFDiag(Loc, diag::note_constexpr_access_volatile_type) << AK << PtrType;
+ else
+ S.FFDiag(Loc);
+ return false;
+}
+
bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK) {
+ assert(Ptr.isLive());
+
if (Ptr.isInitialized())
return true;
+ if (const auto *VD = Ptr.getDeclDesc()->asVarDecl();
+ VD && VD->hasGlobalStorage()) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ if (VD->getAnyInitializer()) {
+ S.FFDiag(Loc, diag::note_constexpr_var_init_non_constant, 1) << VD;
+ S.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ diagnoseMissingInitializer(S, OpPC, VD);
+ }
+ return false;
+ }
+
if (!S.checkingPotentialConstantExpression()) {
S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_uninit)
<< AK << /*uninitialized=*/true << S.Current->getRange(OpPC);
@@ -356,32 +493,54 @@ bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
}
-bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!CheckLive(S, OpPC, Ptr, AK_Read))
+bool CheckGlobalInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
+ if (Ptr.isInitialized())
+ return true;
+
+ assert(S.getLangOpts().CPlusPlus);
+ const auto *VD = cast<VarDecl>(Ptr.getDeclDesc()->asValueDecl());
+ if ((!VD->hasConstantInitialization() &&
+ VD->mightBeUsableInConstantExpressions(S.getCtx())) ||
+ (S.getLangOpts().OpenCL && !S.getLangOpts().CPlusPlus11 &&
+ !VD->hasICEInitializer(S.getCtx()))) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_var_init_non_constant, 1) << VD;
+ S.Note(VD->getLocation(), diag::note_declared_at);
+ }
+ return false;
+}
+
+bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK) {
+ if (!CheckLive(S, OpPC, Ptr, AK))
return false;
if (!CheckConstant(S, OpPC, Ptr))
return false;
- if (!CheckDummy(S, OpPC, Ptr))
+ if (!CheckDummy(S, OpPC, Ptr, AK))
return false;
if (!CheckExtern(S, OpPC, Ptr))
return false;
- if (!CheckRange(S, OpPC, Ptr, AK_Read))
+ if (!CheckRange(S, OpPC, Ptr, AK))
return false;
- if (!CheckInitialized(S, OpPC, Ptr, AK_Read))
+ if (!CheckActive(S, OpPC, Ptr, AK))
return false;
- if (!CheckActive(S, OpPC, Ptr, AK_Read))
+ if (!CheckInitialized(S, OpPC, Ptr, AK))
return false;
- if (!CheckTemporary(S, OpPC, Ptr, AK_Read))
+ if (!CheckTemporary(S, OpPC, Ptr, AK))
return false;
if (!CheckMutable(S, OpPC, Ptr))
return false;
+ if (!CheckVolatile(S, OpPC, Ptr, AK))
+ return false;
return true;
}
bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!CheckLive(S, OpPC, Ptr, AK_Assign))
return false;
+ if (!CheckDummy(S, OpPC, Ptr, AK_Assign))
+ return false;
if (!CheckExtern(S, OpPC, Ptr))
return false;
if (!CheckRange(S, OpPC, Ptr, AK_Assign))
@@ -396,10 +555,12 @@ bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
bool CheckInvoke(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!CheckLive(S, OpPC, Ptr, AK_MemberCall))
return false;
- if (!CheckExtern(S, OpPC, Ptr))
- return false;
- if (!CheckRange(S, OpPC, Ptr, AK_MemberCall))
- return false;
+ if (!Ptr.isDummy()) {
+ if (!CheckExtern(S, OpPC, Ptr))
+ return false;
+ if (!CheckRange(S, OpPC, Ptr, AK_MemberCall))
+ return false;
+ }
return true;
}
@@ -419,45 +580,62 @@ bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
return false;
}
- if (!F->isConstexpr()) {
- const SourceLocation &Loc = S.Current->getLocation(OpPC);
- if (S.getLangOpts().CPlusPlus11) {
- const FunctionDecl *DiagDecl = F->getDecl();
-
- // If this function is not constexpr because it is an inherited
- // non-constexpr constructor, diagnose that directly.
- const auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl);
- if (CD && CD->isInheritingConstructor()) {
- const auto *Inherited = CD->getInheritedConstructor().getConstructor();
- if (!Inherited->isConstexpr())
- DiagDecl = CD = Inherited;
- }
+ if (F->isConstexpr() && F->hasBody() &&
+ (F->getDecl()->isConstexpr() || F->getDecl()->hasAttr<MSConstexprAttr>()))
+ return true;
- // FIXME: If DiagDecl is an implicitly-declared special member function
- // or an inheriting constructor, we should be much more explicit about why
- // it's not constexpr.
- if (CD && CD->isInheritingConstructor()) {
- S.FFDiag(Loc, diag::note_constexpr_invalid_inhctor, 1)
- << CD->getInheritedConstructor().getConstructor()->getParent();
- S.Note(DiagDecl->getLocation(), diag::note_declared_at);
- } else {
- // Don't emit anything if the function isn't defined and we're checking
- // for a constant expression. It might be defined at the point we're
- // actually calling it.
- if (!DiagDecl->isDefined() && S.checkingPotentialConstantExpression())
- return false;
+ // Implicitly constexpr.
+ if (F->isLambdaStaticInvoker())
+ return true;
- S.FFDiag(Loc, diag::note_constexpr_invalid_function, 1)
- << DiagDecl->isConstexpr() << (bool)CD << DiagDecl;
- S.Note(DiagDecl->getLocation(), diag::note_declared_at);
- }
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ if (S.getLangOpts().CPlusPlus11) {
+ const FunctionDecl *DiagDecl = F->getDecl();
+
+ // Invalid decls have been diagnosed before.
+ if (DiagDecl->isInvalidDecl())
+ return false;
+
+ // If this function is not constexpr because it is an inherited
+ // non-constexpr constructor, diagnose that directly.
+ const auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl);
+ if (CD && CD->isInheritingConstructor()) {
+ const auto *Inherited = CD->getInheritedConstructor().getConstructor();
+ if (!Inherited->isConstexpr())
+ DiagDecl = CD = Inherited;
+ }
+
+ // FIXME: If DiagDecl is an implicitly-declared special member function
+ // or an inheriting constructor, we should be much more explicit about why
+ // it's not constexpr.
+ if (CD && CD->isInheritingConstructor()) {
+ S.FFDiag(Loc, diag::note_constexpr_invalid_inhctor, 1)
+ << CD->getInheritedConstructor().getConstructor()->getParent();
+ S.Note(DiagDecl->getLocation(), diag::note_declared_at);
} else {
- S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
+ // Don't emit anything if the function isn't defined and we're checking
+ // for a constant expression. It might be defined at the point we're
+ // actually calling it.
+ bool IsExtern = DiagDecl->getStorageClass() == SC_Extern;
+ if (!DiagDecl->isDefined() && !IsExtern && DiagDecl->isConstexpr() &&
+ S.checkingPotentialConstantExpression())
+ return false;
+
+ // If the declaration is defined, declared 'constexpr' _and_ has a body,
+ // the below diagnostic doesn't add anything useful.
+ if (DiagDecl->isDefined() && DiagDecl->isConstexpr() &&
+ DiagDecl->hasBody())
+ return false;
+
+ S.FFDiag(Loc, diag::note_constexpr_invalid_function, 1)
+ << DiagDecl->isConstexpr() << (bool)CD << DiagDecl;
+ S.Note(DiagDecl->getLocation(), diag::note_declared_at);
}
- return false;
+ } else {
+ S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
}
- return true;
+ return false;
}
bool CheckCallDepth(InterpState &S, CodePtr OpPC) {
@@ -498,17 +676,6 @@ bool CheckPure(InterpState &S, CodePtr OpPC, const CXXMethodDecl *MD) {
return false;
}
-bool CheckPotentialReinterpretCast(InterpState &S, CodePtr OpPC,
- const Pointer &Ptr) {
- if (!S.inConstantContext())
- return true;
-
- const SourceInfo &E = S.Current->getSource(OpPC);
- S.CCEDiag(E, diag::note_constexpr_invalid_cast)
- << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
- return false;
-}
-
bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
APFloat::opStatus Status) {
const SourceInfo &E = S.Current->getSource(OpPC);
@@ -556,36 +723,212 @@ bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
return true;
}
+bool CheckDynamicMemoryAllocation(InterpState &S, CodePtr OpPC) {
+ if (S.getLangOpts().CPlusPlus20)
+ return true;
+
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_new);
+ return true;
+}
+
+bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, bool NewWasArray,
+ bool DeleteIsArray, const Descriptor *D,
+ const Expr *NewExpr) {
+ if (NewWasArray == DeleteIsArray)
+ return true;
+
+ QualType TypeToDiagnose;
+ // We need to shuffle things around a bit here to get a better diagnostic,
+ // because the expression we allocated the block for was of type int*,
+ // but we want to get the array size right.
+ if (D->isArray()) {
+ QualType ElemQT = D->getType()->getPointeeType();
+ TypeToDiagnose = S.getCtx().getConstantArrayType(
+ ElemQT, APInt(64, static_cast<uint64_t>(D->getNumElems()), false),
+ nullptr, ArraySizeModifier::Normal, 0);
+ } else
+ TypeToDiagnose = D->getType()->getPointeeType();
+
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.FFDiag(E, diag::note_constexpr_new_delete_mismatch)
+ << DeleteIsArray << 0 << TypeToDiagnose;
+ S.Note(NewExpr->getExprLoc(), diag::note_constexpr_dynamic_alloc_here)
+ << NewExpr->getSourceRange();
+ return false;
+}
+
+bool CheckDeleteSource(InterpState &S, CodePtr OpPC, const Expr *Source,
+ const Pointer &Ptr) {
+ if (Source && isa<CXXNewExpr>(Source))
+ return true;
+
+ // Whatever this is, we didn't heap allocate it.
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_delete_not_heap_alloc)
+ << Ptr.toDiagnosticString(S.getCtx());
+
+ if (Ptr.isTemporary())
+ S.Note(Ptr.getDeclLoc(), diag::note_constexpr_temporary_here);
+ else
+ S.Note(Ptr.getDeclLoc(), diag::note_declared_at);
+ return false;
+}
+
/// We aleady know the given DeclRefExpr is invalid for some reason,
/// now figure out why and print appropriate diagnostics.
bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
const ValueDecl *D = DR->getDecl();
- const SourceInfo &E = S.Current->getSource(OpPC);
+ return diagnoseUnknownDecl(S, OpPC, D);
+}
- if (isa<ParmVarDecl>(D)) {
- if (S.getLangOpts().CPlusPlus11) {
- S.FFDiag(E, diag::note_constexpr_function_param_value_unknown) << D;
- S.Note(D->getLocation(), diag::note_declared_at) << D->getSourceRange();
- } else {
- S.FFDiag(E);
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK) {
+ if (!Ptr.isDummy())
+ return true;
+
+ const Descriptor *Desc = Ptr.getDeclDesc();
+ const ValueDecl *D = Desc->asValueDecl();
+ if (!D)
+ return false;
+
+ if (AK == AK_Read || AK == AK_Increment || AK == AK_Decrement)
+ return diagnoseUnknownDecl(S, OpPC, D);
+
+ assert(AK == AK_Assign);
+ if (S.getLangOpts().CPlusPlus11) {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.FFDiag(E, diag::note_constexpr_modify_global);
+ }
+ return false;
+}
+
+bool CheckNonNullArgs(InterpState &S, CodePtr OpPC, const Function *F,
+ const CallExpr *CE, unsigned ArgSize) {
+ auto Args = llvm::ArrayRef(CE->getArgs(), CE->getNumArgs());
+ auto NonNullArgs = collectNonNullArgs(F->getDecl(), Args);
+ unsigned Offset = 0;
+ unsigned Index = 0;
+ for (const Expr *Arg : Args) {
+ if (NonNullArgs[Index] && Arg->getType()->isPointerType()) {
+ const Pointer &ArgPtr = S.Stk.peek<Pointer>(ArgSize - Offset);
+ if (ArgPtr.isZero()) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.CCEDiag(Loc, diag::note_non_null_attribute_failed);
+ return false;
+ }
}
- } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
- if (!VD->getType().isConstQualified()) {
- diagnoseNonConstVariable(S, OpPC, VD);
- return false;
+
+ Offset += align(primSize(S.Ctx.classify(Arg).value_or(PT_Ptr)));
+ ++Index;
+ }
+ return true;
+}
+
+// FIXME: This is similar to code we already have in Compiler.cpp.
+// I think it makes sense to instead add the field and base destruction stuff
+// to the destructor Function itself. Then destroying a record would really
+// _just_ be calling its destructor. That would also help with the diagnostic
+// difference when the destructor or a field/base fails.
+static bool runRecordDestructor(InterpState &S, CodePtr OpPC,
+ const Pointer &BasePtr,
+ const Descriptor *Desc) {
+ assert(Desc->isRecord());
+ const Record *R = Desc->ElemRecord;
+ assert(R);
+
+ // Fields.
+ for (const Record::Field &Field : llvm::reverse(R->fields())) {
+ const Descriptor *D = Field.Desc;
+ if (D->isRecord()) {
+ if (!runRecordDestructor(S, OpPC, BasePtr.atField(Field.Offset), D))
+ return false;
+ } else if (D->isCompositeArray()) {
+ const Descriptor *ElemDesc = Desc->ElemDesc;
+ assert(ElemDesc->isRecord());
+ for (unsigned I = 0; I != Desc->getNumElems(); ++I) {
+ if (!runRecordDestructor(S, OpPC, BasePtr.atIndex(I).narrow(),
+ ElemDesc))
+ return false;
+ }
}
+ }
+
+ // Destructor of this record.
+ if (const CXXDestructorDecl *Dtor = R->getDestructor();
+ Dtor && !Dtor->isTrivial()) {
+ const Function *DtorFunc = S.getContext().getOrCreateFunction(Dtor);
+ if (!DtorFunc)
+ return false;
- // const, but no initializer.
- if (!VD->getAnyInitializer()) {
- S.FFDiag(E, diag::note_constexpr_var_init_unknown, 1) << VD;
- S.Note(VD->getLocation(), diag::note_declared_at) << VD->getSourceRange();
+ S.Stk.push<Pointer>(BasePtr);
+ if (!Call(S, OpPC, DtorFunc, 0))
return false;
+ }
+
+ // Bases.
+ for (const Record::Base &Base : llvm::reverse(R->bases())) {
+ if (!runRecordDestructor(S, OpPC, BasePtr.atField(Base.Offset), Base.Desc))
+ return false;
+ }
+
+ return true;
+}
+
+bool RunDestructors(InterpState &S, CodePtr OpPC, const Block *B) {
+ assert(B);
+ const Descriptor *Desc = B->getDescriptor();
+
+ if (Desc->isPrimitive() || Desc->isPrimitiveArray())
+ return true;
+
+ assert(Desc->isRecord() || Desc->isCompositeArray());
+
+ if (Desc->isCompositeArray()) {
+ const Descriptor *ElemDesc = Desc->ElemDesc;
+ assert(ElemDesc->isRecord());
+
+ Pointer RP(const_cast<Block *>(B));
+ for (unsigned I = 0; I != Desc->getNumElems(); ++I) {
+ if (!runRecordDestructor(S, OpPC, RP.atIndex(I).narrow(), ElemDesc))
+ return false;
}
+ return true;
}
- return false;
+ assert(Desc->isRecord());
+ return runRecordDestructor(S, OpPC, Pointer(const_cast<Block *>(B)), Desc);
+}
+
+void diagnoseEnumValue(InterpState &S, CodePtr OpPC, const EnumDecl *ED,
+ const APSInt &Value) {
+ llvm::APInt Min;
+ llvm::APInt Max;
+
+ if (S.EvaluatingDecl && !S.EvaluatingDecl->isConstexpr())
+ return;
+
+ ED->getValueRange(Max, Min);
+ --Max;
+
+ if (ED->getNumNegativeBits() &&
+ (Max.slt(Value.getSExtValue()) || Min.sgt(Value.getSExtValue()))) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.report(Loc, diag::warn_constexpr_unscoped_enum_out_of_range)
+ << llvm::toString(Value, 10) << Min.getSExtValue() << Max.getSExtValue()
+ << ED;
+ } else if (!ED->getNumNegativeBits() && Max.ult(Value.getZExtValue())) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.report(Loc, diag::warn_constexpr_unscoped_enum_out_of_range)
+ << llvm::toString(Value, 10) << Min.getZExtValue() << Max.getZExtValue()
+ << ED;
+ }
}
+// https://github.com/llvm/llvm-project/issues/102513
+#if defined(_WIN32) && !defined(__clang__) && !defined(NDEBUG)
+#pragma optimize("", off)
+#endif
bool Interpret(InterpState &S, APValue &Result) {
// The current stack frame when we started Interpret().
// This is being used by the ops to determine wheter
@@ -610,6 +953,10 @@ bool Interpret(InterpState &S, APValue &Result) {
}
}
}
+// https://github.com/llvm/llvm-project/issues/102513
+#if defined(_WIN32) && !defined(__clang__) && !defined(NDEBUG)
+#pragma optimize("", on)
+#endif
} // namespace interp
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Interp.h b/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
index 65c54ed9c89b..253a433e7340 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
@@ -13,25 +13,24 @@
#ifndef LLVM_CLANG_AST_INTERP_INTERP_H
#define LLVM_CLANG_AST_INTERP_INTERP_H
+#include "../ExprConstShared.h"
#include "Boolean.h"
+#include "DynamicAllocator.h"
#include "Floating.h"
#include "Function.h"
#include "FunctionPointer.h"
#include "InterpFrame.h"
#include "InterpStack.h"
#include "InterpState.h"
+#include "MemberPointer.h"
#include "Opcode.h"
#include "PrimType.h"
#include "Program.h"
#include "State.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/ASTDiagnostic.h"
-#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Expr.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APSInt.h"
-#include "llvm/Support/Endian.h"
-#include <limits>
#include <type_traits>
namespace clang {
@@ -40,8 +39,9 @@ namespace interp {
using APSInt = llvm::APSInt;
/// Convert a value to an APValue.
-template <typename T> bool ReturnValue(const T &V, APValue &R) {
- R = V.toAPValue();
+template <typename T>
+bool ReturnValue(const InterpState &S, const T &V, APValue &R) {
+ R = V.toAPValue(S.getCtx());
return true;
}
@@ -56,7 +56,8 @@ bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK);
/// Checks if a pointer is a dummy pointer.
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
/// Checks if a pointer is null.
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
@@ -74,6 +75,11 @@ bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
bool CheckSubobject(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK);
+/// Checks if the dowcast using the given offset is possible with the given
+/// pointer.
+bool CheckDowncast(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ uint32_t Offset);
+
/// Checks if a pointer points to const storage.
bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
@@ -84,10 +90,13 @@ bool CheckConstant(InterpState &S, CodePtr OpPC, const Descriptor *Desc);
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
/// Checks if a value can be loaded from a block.
-bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK = AK_Read);
bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK);
+/// Check if a global variable is initialized.
+bool CheckGlobalInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
/// Checks if a value can be stored in a block.
bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
@@ -111,15 +120,32 @@ bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This);
/// Checks if a method is pure virtual.
bool CheckPure(InterpState &S, CodePtr OpPC, const CXXMethodDecl *MD);
-/// Checks if reinterpret casts are legal in the current context.
-bool CheckPotentialReinterpretCast(InterpState &S, CodePtr OpPC,
- const Pointer &Ptr);
+/// Checks if all the arguments annotated as 'nonnull' are in fact not null.
+bool CheckNonNullArgs(InterpState &S, CodePtr OpPC, const Function *F,
+ const CallExpr *CE, unsigned ArgSize);
+
+/// Checks if dynamic memory allocation is available in the current
+/// language mode.
+bool CheckDynamicMemoryAllocation(InterpState &S, CodePtr OpPC);
+
+/// Diagnose mismatched new[]/delete or new/delete[] pairs.
+bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, bool NewWasArray,
+ bool DeleteIsArray, const Descriptor *D,
+ const Expr *NewExpr);
+
+/// Check the source of the pointer passed to delete/delete[] has actually
+/// been heap allocated by us.
+bool CheckDeleteSource(InterpState &S, CodePtr OpPC, const Expr *Source,
+ const Pointer &Ptr);
/// Sets the given integral value to the pointer, which is of
/// a std::{weak,partial,strong}_ordering type.
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
const Pointer &Ptr, const APSInt &IntValue);
+/// Copy the contents of Src into Dest.
+bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest);
+
/// Checks if the shift operation is legal.
template <typename LT, typename RT>
bool CheckShift(InterpState &S, CodePtr OpPC, const LT &LHS, const RT &RHS,
@@ -127,7 +153,8 @@ bool CheckShift(InterpState &S, CodePtr OpPC, const LT &LHS, const RT &RHS,
if (RHS.isNegative()) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.CCEDiag(Loc, diag::note_constexpr_negative_shift) << RHS.toAPSInt();
- return false;
+ if (!S.noteUndefinedBehavior())
+ return false;
}
// C++11 [expr.shift]p1: Shift width must be less than the bit width of
@@ -137,17 +164,24 @@ bool CheckShift(InterpState &S, CodePtr OpPC, const LT &LHS, const RT &RHS,
const APSInt Val = RHS.toAPSInt();
QualType Ty = E->getType();
S.CCEDiag(E, diag::note_constexpr_large_shift) << Val << Ty << Bits;
- return false;
+ if (!S.noteUndefinedBehavior())
+ return false;
}
if (LHS.isSigned() && !S.getLangOpts().CPlusPlus20) {
const Expr *E = S.Current->getExpr(OpPC);
// C++11 [expr.shift]p2: A signed left shift must have a non-negative
// operand, and must not overflow the corresponding unsigned type.
- if (LHS.isNegative())
+ if (LHS.isNegative()) {
S.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS.toAPSInt();
- else if (LHS.toUnsigned().countLeadingZeros() < static_cast<unsigned>(RHS))
+ if (!S.noteUndefinedBehavior())
+ return false;
+ } else if (LHS.toUnsigned().countLeadingZeros() <
+ static_cast<unsigned>(RHS)) {
S.CCEDiag(E, diag::note_constexpr_lshift_discards);
+ if (!S.noteUndefinedBehavior())
+ return false;
+ }
}
// C++2a [expr.shift]p2: [P0907R4]:
@@ -161,6 +195,12 @@ template <typename T>
bool CheckDivRem(InterpState &S, CodePtr OpPC, const T &LHS, const T &RHS) {
if (RHS.isZero()) {
const auto *Op = cast<BinaryOperator>(S.Current->getExpr(OpPC));
+ if constexpr (std::is_same_v<T, Floating>) {
+ S.CCEDiag(Op, diag::note_expr_divide_by_zero)
+ << Op->getRHS()->getSourceRange();
+ return true;
+ }
+
S.FFDiag(Op, diag::note_expr_divide_by_zero)
<< Op->getRHS()->getSourceRange();
return false;
@@ -178,6 +218,30 @@ bool CheckDivRem(InterpState &S, CodePtr OpPC, const T &LHS, const T &RHS) {
return true;
}
+template <typename SizeT>
+bool CheckArraySize(InterpState &S, CodePtr OpPC, SizeT *NumElements,
+ unsigned ElemSize, bool IsNoThrow) {
+ // FIXME: Both the SizeT::from() as well as the
+ // NumElements.toAPSInt() in this function are rather expensive.
+
+ // FIXME: GH63562
+ // APValue stores array extents as unsigned,
+ // so anything that is greater that unsigned would overflow when
+ // constructing the array, we catch this here.
+ SizeT MaxElements = SizeT::from(Descriptor::MaxArrayElemBytes / ElemSize);
+ if (NumElements->toAPSInt().getActiveBits() >
+ ConstantArrayType::getMaxSizeBits(S.getCtx()) ||
+ *NumElements > MaxElements) {
+ if (!IsNoThrow) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_new_too_large)
+ << NumElements->toDiagnosticString(S.getCtx());
+ }
+ return false;
+ }
+ return true;
+}
+
/// Checks if the result of a floating-point operation is valid
/// in the current context.
bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
@@ -197,6 +261,8 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
llvm::ArrayRef<int64_t> ArrayIndices, int64_t &Result);
+inline bool Invalid(InterpState &S, CodePtr OpPC);
+
enum class ArithOp { Add, Sub };
//===----------------------------------------------------------------------===//
@@ -234,7 +300,7 @@ bool Ret(InterpState &S, CodePtr &PC, APValue &Result) {
} else {
delete S.Current;
S.Current = nullptr;
- if (!ReturnValue<T>(Ret, Result))
+ if (!ReturnValue<T>(S, Ret, Result))
return false;
}
return true;
@@ -283,19 +349,22 @@ bool AddSubMulHelper(InterpState &S, CodePtr OpPC, unsigned Bits, const T &LHS,
QualType Type = E->getType();
if (S.checkingForUndefinedBehavior()) {
SmallString<32> Trunc;
- Value.trunc(Result.bitWidth()).toString(Trunc, 10);
+ Value.trunc(Result.bitWidth())
+ .toString(Trunc, 10, Result.isSigned(), /*formatAsCLiteral=*/false,
+ /*UpperCase=*/true, /*InsertSeparators=*/true);
auto Loc = E->getExprLoc();
S.report(Loc, diag::warn_integer_constant_overflow)
<< Trunc << Type << E->getSourceRange();
- return true;
- } else {
- S.CCEDiag(E, diag::note_constexpr_overflow) << Value << Type;
- if (!S.noteUndefinedBehavior()) {
- S.Stk.pop<T>();
- return false;
- }
- return true;
}
+
+ S.CCEDiag(E, diag::note_constexpr_overflow) << Value << Type;
+
+ if (!S.noteUndefinedBehavior()) {
+ S.Stk.pop<T>();
+ return false;
+ }
+
+ return true;
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
@@ -351,6 +420,134 @@ inline bool Mulf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
S.Stk.push<Floating>(Result);
return CheckFloatResult(S, OpPC, Result, Status);
}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool Mulc(InterpState &S, CodePtr OpPC) {
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &Result = S.Stk.peek<Pointer>();
+
+ if constexpr (std::is_same_v<T, Floating>) {
+ APFloat A = LHS.atIndex(0).deref<Floating>().getAPFloat();
+ APFloat B = LHS.atIndex(1).deref<Floating>().getAPFloat();
+ APFloat C = RHS.atIndex(0).deref<Floating>().getAPFloat();
+ APFloat D = RHS.atIndex(1).deref<Floating>().getAPFloat();
+
+ APFloat ResR(A.getSemantics());
+ APFloat ResI(A.getSemantics());
+ HandleComplexComplexMul(A, B, C, D, ResR, ResI);
+
+ // Copy into the result.
+ Result.atIndex(0).deref<Floating>() = Floating(ResR);
+ Result.atIndex(0).initialize();
+ Result.atIndex(1).deref<Floating>() = Floating(ResI);
+ Result.atIndex(1).initialize();
+ Result.initialize();
+ } else {
+ // Integer element type.
+ const T &LHSR = LHS.atIndex(0).deref<T>();
+ const T &LHSI = LHS.atIndex(1).deref<T>();
+ const T &RHSR = RHS.atIndex(0).deref<T>();
+ const T &RHSI = RHS.atIndex(1).deref<T>();
+ unsigned Bits = LHSR.bitWidth();
+
+ // real(Result) = (real(LHS) * real(RHS)) - (imag(LHS) * imag(RHS))
+ T A;
+ if (T::mul(LHSR, RHSR, Bits, &A))
+ return false;
+ T B;
+ if (T::mul(LHSI, RHSI, Bits, &B))
+ return false;
+ if (T::sub(A, B, Bits, &Result.atIndex(0).deref<T>()))
+ return false;
+ Result.atIndex(0).initialize();
+
+ // imag(Result) = (real(LHS) * imag(RHS)) + (imag(LHS) * real(RHS))
+ if (T::mul(LHSR, RHSI, Bits, &A))
+ return false;
+ if (T::mul(LHSI, RHSR, Bits, &B))
+ return false;
+ if (T::add(A, B, Bits, &Result.atIndex(1).deref<T>()))
+ return false;
+ Result.atIndex(1).initialize();
+ Result.initialize();
+ }
+
+ return true;
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool Divc(InterpState &S, CodePtr OpPC) {
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &Result = S.Stk.peek<Pointer>();
+
+ if constexpr (std::is_same_v<T, Floating>) {
+ APFloat A = LHS.atIndex(0).deref<Floating>().getAPFloat();
+ APFloat B = LHS.atIndex(1).deref<Floating>().getAPFloat();
+ APFloat C = RHS.atIndex(0).deref<Floating>().getAPFloat();
+ APFloat D = RHS.atIndex(1).deref<Floating>().getAPFloat();
+
+ APFloat ResR(A.getSemantics());
+ APFloat ResI(A.getSemantics());
+ HandleComplexComplexDiv(A, B, C, D, ResR, ResI);
+
+ // Copy into the result.
+ Result.atIndex(0).deref<Floating>() = Floating(ResR);
+ Result.atIndex(0).initialize();
+ Result.atIndex(1).deref<Floating>() = Floating(ResI);
+ Result.atIndex(1).initialize();
+ Result.initialize();
+ } else {
+ // Integer element type.
+ const T &LHSR = LHS.atIndex(0).deref<T>();
+ const T &LHSI = LHS.atIndex(1).deref<T>();
+ const T &RHSR = RHS.atIndex(0).deref<T>();
+ const T &RHSI = RHS.atIndex(1).deref<T>();
+ unsigned Bits = LHSR.bitWidth();
+ const T Zero = T::from(0, Bits);
+
+ if (Compare(RHSR, Zero) == ComparisonCategoryResult::Equal &&
+ Compare(RHSI, Zero) == ComparisonCategoryResult::Equal) {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.FFDiag(E, diag::note_expr_divide_by_zero);
+ return false;
+ }
+
+ // Den = real(RHS)² + imag(RHS)²
+ T A, B;
+ if (T::mul(RHSR, RHSR, Bits, &A) || T::mul(RHSI, RHSI, Bits, &B))
+ return false;
+ T Den;
+ if (T::add(A, B, Bits, &Den))
+ return false;
+
+ // real(Result) = ((real(LHS) * real(RHS)) + (imag(LHS) * imag(RHS))) / Den
+ T &ResultR = Result.atIndex(0).deref<T>();
+ T &ResultI = Result.atIndex(1).deref<T>();
+
+ if (T::mul(LHSR, RHSR, Bits, &A) || T::mul(LHSI, RHSI, Bits, &B))
+ return false;
+ if (T::add(A, B, Bits, &ResultR))
+ return false;
+ if (T::div(ResultR, Den, Bits, &ResultR))
+ return false;
+ Result.atIndex(0).initialize();
+
+ // imag(Result) = ((imag(LHS) * real(RHS)) - (real(LHS) * imag(RHS))) / Den
+ if (T::mul(LHSI, RHSR, Bits, &A) || T::mul(LHSR, RHSI, Bits, &B))
+ return false;
+ if (T::sub(A, B, Bits, &ResultI))
+ return false;
+ if (T::div(ResultI, Den, Bits, &ResultI))
+ return false;
+ Result.atIndex(1).initialize();
+ Result.initialize();
+ }
+
+ return true;
+}
+
/// 1) Pops the RHS from the stack.
/// 2) Pops the LHS from the stack.
/// 3) Pushes 'LHS & RHS' on the stack
@@ -495,7 +692,9 @@ bool Neg(InterpState &S, CodePtr OpPC) {
if (S.checkingForUndefinedBehavior()) {
SmallString<32> Trunc;
- NegatedValue.trunc(Result.bitWidth()).toString(Trunc, 10);
+ NegatedValue.trunc(Result.bitWidth())
+ .toString(Trunc, 10, Result.isSigned(), /*formatAsCLiteral=*/false,
+ /*UpperCase=*/true, /*InsertSeparators=*/true);
auto Loc = E->getExprLoc();
S.report(Loc, diag::warn_integer_constant_overflow)
<< Trunc << Type << E->getSourceRange();
@@ -517,6 +716,13 @@ enum class IncDecOp {
template <typename T, IncDecOp Op, PushVal DoPush>
bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
+ assert(!Ptr.isDummy());
+
+ if constexpr (std::is_same_v<T, Boolean>) {
+ if (!S.getLangOpts().CPlusPlus14)
+ return Invalid(S, OpPC);
+ }
+
const T &Value = Ptr.deref<T>();
T Result;
@@ -549,7 +755,9 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
QualType Type = E->getType();
if (S.checkingForUndefinedBehavior()) {
SmallString<32> Trunc;
- APResult.trunc(Result.bitWidth()).toString(Trunc, 10);
+ APResult.trunc(Result.bitWidth())
+ .toString(Trunc, 10, Result.isSigned(), /*formatAsCLiteral=*/false,
+ /*UpperCase=*/true, /*InsertSeparators=*/true);
auto Loc = E->getExprLoc();
S.report(Loc, diag::warn_integer_constant_overflow)
<< Trunc << Type << E->getSourceRange();
@@ -567,8 +775,7 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Inc(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
- if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
return false;
return IncDecHelper<T, IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr);
@@ -580,8 +787,7 @@ bool Inc(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool IncPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
- if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
return false;
return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr);
@@ -594,8 +800,7 @@ bool IncPop(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Dec(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
- if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
return false;
return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr);
@@ -607,8 +812,7 @@ bool Dec(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool DecPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
- if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
return false;
return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr);
@@ -636,8 +840,7 @@ bool IncDecFloatHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
inline bool Incf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
- if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
return false;
return IncDecFloatHelper<IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr, RM);
@@ -645,8 +848,7 @@ inline bool Incf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
inline bool IncfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
- if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ if (!CheckLoad(S, OpPC, Ptr, AK_Increment))
return false;
return IncDecFloatHelper<IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, RM);
@@ -654,8 +856,7 @@ inline bool IncfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
inline bool Decf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
- if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
return false;
return IncDecFloatHelper<IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr, RM);
@@ -663,8 +864,7 @@ inline bool Decf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
inline bool DecfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
-
- if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ if (!CheckLoad(S, OpPC, Ptr, AK_Decrement))
return false;
return IncDecFloatHelper<IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, RM);
@@ -692,6 +892,9 @@ using CompareFn = llvm::function_ref<bool(ComparisonCategoryResult)>;
template <typename T>
bool CmpHelper(InterpState &S, CodePtr OpPC, CompareFn Fn) {
+ assert((!std::is_same_v<T, MemberPointer>) &&
+ "Non-equality comparisons on member pointer types should already be "
+ "rejected in Sema.");
using BoolT = PrimConv<PT_Bool>::T;
const T &RHS = S.Stk.pop<T>();
const T &LHS = S.Stk.pop<T>();
@@ -723,6 +926,17 @@ inline bool CmpHelperEQ<FunctionPointer>(InterpState &S, CodePtr OpPC,
CompareFn Fn) {
const auto &RHS = S.Stk.pop<FunctionPointer>();
const auto &LHS = S.Stk.pop<FunctionPointer>();
+
+ // We cannot compare against weak declarations at compile time.
+ for (const auto &FP : {LHS, RHS}) {
+ if (FP.isWeak()) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_pointer_weak_comparison)
+ << FP.toDiagnosticString(S.getCtx());
+ return false;
+ }
+ }
+
S.Stk.push<Boolean>(Boolean::from(Fn(LHS.compare(RHS))));
return true;
}
@@ -758,7 +972,33 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
return true;
}
+ // Reject comparisons to weak pointers.
+ for (const auto &P : {LHS, RHS}) {
+ if (P.isZero())
+ continue;
+ if (P.isWeak()) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_pointer_weak_comparison)
+ << P.toDiagnosticString(S.getCtx());
+ return false;
+ }
+ }
+
if (!Pointer::hasSameBase(LHS, RHS)) {
+ if (LHS.isOnePastEnd() && !RHS.isOnePastEnd() && !RHS.isZero() &&
+ RHS.getOffset() == 0) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_past_end)
+ << LHS.toDiagnosticString(S.getCtx());
+ return false;
+ } else if (RHS.isOnePastEnd() && !LHS.isOnePastEnd() && !LHS.isZero() &&
+ LHS.getOffset() == 0) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_past_end)
+ << RHS.toDiagnosticString(S.getCtx());
+ return false;
+ }
+
S.Stk.push<BoolT>(BoolT::from(Fn(ComparisonCategoryResult::Unordered)));
return true;
} else {
@@ -769,9 +1009,9 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
// element in the same array are NOT equal. They have the same Base value,
// but a different Offset. This is a pretty rare case, so we fix this here
// by comparing pointers to the first elements.
- if (LHS.isArrayRoot())
+ if (!LHS.isZero() && LHS.isArrayRoot())
VL = LHS.atIndex(0).getByteOffset();
- if (RHS.isArrayRoot())
+ if (!RHS.isZero() && RHS.isArrayRoot())
VR = RHS.atIndex(0).getByteOffset();
S.Stk.push<BoolT>(BoolT::from(Fn(Compare(VL, VR))));
@@ -779,6 +1019,47 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
}
}
+template <>
+inline bool CmpHelperEQ<MemberPointer>(InterpState &S, CodePtr OpPC,
+ CompareFn Fn) {
+ const auto &RHS = S.Stk.pop<MemberPointer>();
+ const auto &LHS = S.Stk.pop<MemberPointer>();
+
+ // If either operand is a pointer to a weak function, the comparison is not
+ // constant.
+ for (const auto &MP : {LHS, RHS}) {
+ if (const CXXMethodDecl *MD = MP.getMemberFunction(); MD && MD->isWeak()) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_mem_pointer_weak_comparison) << MD;
+ return false;
+ }
+ }
+
+ // C++11 [expr.eq]p2:
+ // If both operands are null, they compare equal. Otherwise if only one is
+ // null, they compare unequal.
+ if (LHS.isZero() && RHS.isZero()) {
+ S.Stk.push<Boolean>(Fn(ComparisonCategoryResult::Equal));
+ return true;
+ }
+ if (LHS.isZero() || RHS.isZero()) {
+ S.Stk.push<Boolean>(Fn(ComparisonCategoryResult::Unordered));
+ return true;
+ }
+
+ // We cannot compare against virtual declarations at compile time.
+ for (const auto &MP : {LHS, RHS}) {
+ if (const CXXMethodDecl *MD = MP.getMemberFunction();
+ MD && MD->isVirtual()) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.CCEDiag(Loc, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+ }
+ }
+
+ S.Stk.push<Boolean>(Boolean::from(Fn(LHS.compare(RHS))));
+ return true;
+}
+
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool EQ(InterpState &S, CodePtr OpPC) {
return CmpHelperEQ<T>(S, OpPC, [](ComparisonCategoryResult R) {
@@ -803,11 +1084,11 @@ bool CMP3(InterpState &S, CodePtr OpPC, const ComparisonCategoryInfo *CmpInfo) {
}
assert(CmpInfo);
- const auto *CmpValueInfo = CmpInfo->getValueInfo(CmpResult);
+ const auto *CmpValueInfo =
+ CmpInfo->getValueInfo(CmpInfo->makeWeakResult(CmpResult));
assert(CmpValueInfo);
assert(CmpValueInfo->hasValidIntValue());
- APSInt IntValue = CmpValueInfo->getIntValue();
- return SetThreeWayComparisonField(S, OpPC, P, IntValue);
+ return SetThreeWayComparisonField(S, OpPC, P, CmpValueInfo->getIntValue());
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
@@ -1003,21 +1284,28 @@ bool SetThisField(InterpState &S, CodePtr OpPC, uint32_t I) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
- const Block *B = S.P.getGlobal(I);
-
- if (!CheckConstant(S, OpPC, B->getDescriptor()))
+ const Pointer &Ptr = S.P.getPtrGlobal(I);
+ if (!CheckConstant(S, OpPC, Ptr.getFieldDesc()))
+ return false;
+ if (Ptr.isExtern())
return false;
- if (B->isExtern())
+
+ // If a global variable is uninitialized, that means the initializer we've
+ // compiled for it wasn't a constant expression. Diagnose that.
+ if (!CheckGlobalInitialized(S, OpPC, Ptr))
return false;
- S.Stk.push<T>(B->deref<T>());
+
+ S.Stk.push<T>(Ptr.deref<T>());
return true;
}
/// Same as GetGlobal, but without the checks.
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetGlobalUnchecked(InterpState &S, CodePtr OpPC, uint32_t I) {
- auto *B = S.P.getGlobal(I);
- S.Stk.push<T>(B->deref<T>());
+ const Pointer &Ptr = S.P.getPtrGlobal(I);
+ if (!Ptr.isInitialized())
+ return false;
+ S.Stk.push<T>(Ptr.deref<T>());
return true;
}
@@ -1029,23 +1317,32 @@ bool SetGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
- S.P.getGlobal(I)->deref<T>() = S.Stk.pop<T>();
+ const Pointer &P = S.P.getGlobal(I);
+ P.deref<T>() = S.Stk.pop<T>();
+ P.initialize();
return true;
}
/// 1) Converts the value on top of the stack to an APValue
/// 2) Sets that APValue on \Temp
-/// 3) Initialized global with index \I with that
+/// 3) Initializes global with index \I with that
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitGlobalTemp(InterpState &S, CodePtr OpPC, uint32_t I,
const LifetimeExtendedTemporaryDecl *Temp) {
- assert(Temp);
+ const Pointer &Ptr = S.P.getGlobal(I);
+
const T Value = S.Stk.peek<T>();
- APValue APV = Value.toAPValue();
+ APValue APV = Value.toAPValue(S.getCtx());
APValue *Cached = Temp->getOrCreateValue(true);
*Cached = APV;
- S.P.getGlobal(I)->deref<T>() = S.Stk.pop<T>();
+ assert(Ptr.getDeclDesc()->asExpr());
+
+ S.SeenGlobalTemporaries.push_back(
+ std::make_pair(Ptr.getDeclDesc()->asExpr(), Temp));
+
+ Ptr.deref<T>() = S.Stk.pop<T>();
+ Ptr.initialize();
return true;
}
@@ -1058,7 +1355,11 @@ inline bool InitGlobalTempComp(InterpState &S, CodePtr OpPC,
const Pointer &P = S.Stk.peek<Pointer>();
APValue *Cached = Temp->getOrCreateValue(true);
- if (std::optional<APValue> APV = P.toRValue(S.getCtx())) {
+ S.SeenGlobalTemporaries.push_back(
+ std::make_pair(P.getDeclDesc()->asExpr(), Temp));
+
+ if (std::optional<APValue> APV =
+ P.toRValue(S.getCtx(), Temp->getTemporaryExpr()->getType())) {
*Cached = *APV;
return true;
}
@@ -1168,19 +1469,49 @@ inline bool GetPtrGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
return true;
}
-/// 1) Pops a Pointer from the stack
+/// 1) Peeks a Pointer
/// 2) Pushes Pointer.atField(Off) on the stack
inline bool GetPtrField(InterpState &S, CodePtr OpPC, uint32_t Off) {
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+
+ if (S.getLangOpts().CPlusPlus && S.inConstantContext() &&
+ !CheckNull(S, OpPC, Ptr, CSK_Field))
+ return false;
+
+ if (!CheckExtern(S, OpPC, Ptr))
+ return false;
+ if (!CheckRange(S, OpPC, Ptr, CSK_Field))
+ return false;
+ if (!CheckArray(S, OpPC, Ptr))
+ return false;
+ if (!CheckSubobject(S, OpPC, Ptr, CSK_Field))
+ return false;
+
+ if (Ptr.isBlockPointer() && Off > Ptr.block()->getSize())
+ return false;
+ S.Stk.push<Pointer>(Ptr.atField(Off));
+ return true;
+}
+
+inline bool GetPtrFieldPop(InterpState &S, CodePtr OpPC, uint32_t Off) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (S.inConstantContext() && !CheckNull(S, OpPC, Ptr, CSK_Field))
+
+ if (S.getLangOpts().CPlusPlus && S.inConstantContext() &&
+ !CheckNull(S, OpPC, Ptr, CSK_Field))
return false;
+
if (!CheckExtern(S, OpPC, Ptr))
return false;
if (!CheckRange(S, OpPC, Ptr, CSK_Field))
return false;
+ if (!CheckArray(S, OpPC, Ptr))
+ return false;
if (!CheckSubobject(S, OpPC, Ptr, CSK_Field))
return false;
+ if (Ptr.isBlockPointer() && Off > Ptr.block()->getSize())
+ return false;
+
S.Stk.push<Pointer>(Ptr.atField(Off));
return true;
}
@@ -1227,6 +1558,9 @@ inline bool GetPtrDerivedPop(InterpState &S, CodePtr OpPC, uint32_t Off) {
return false;
if (!CheckSubobject(S, OpPC, Ptr, CSK_Derived))
return false;
+ if (!CheckDowncast(S, OpPC, Ptr, Off))
+ return false;
+
S.Stk.push<Pointer>(Ptr.atFieldSub(Off));
return true;
}
@@ -1251,6 +1585,12 @@ inline bool GetPtrBasePop(InterpState &S, CodePtr OpPC, uint32_t Off) {
return true;
}
+inline bool GetMemberPtrBasePop(InterpState &S, CodePtr OpPC, int32_t Off) {
+ const auto &Ptr = S.Stk.pop<MemberPointer>();
+ S.Stk.push<MemberPointer>(Ptr.atInstanceBase(Off));
+ return true;
+}
+
inline bool GetPtrThisBase(InterpState &S, CodePtr OpPC, uint32_t Off) {
if (S.checkingPotentialConstantExpression())
return false;
@@ -1261,9 +1601,26 @@ inline bool GetPtrThisBase(InterpState &S, CodePtr OpPC, uint32_t Off) {
return true;
}
-inline bool InitPtrPop(InterpState &S, CodePtr OpPC) {
+inline bool FinishInitPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- Ptr.initialize();
+ if (Ptr.canBeInitialized()) {
+ Ptr.initialize();
+ Ptr.activate();
+ }
+ return true;
+}
+
+inline bool FinishInit(InterpState &S, CodePtr OpPC) {
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (Ptr.canBeInitialized()) {
+ Ptr.initialize();
+ Ptr.activate();
+ }
+ return true;
+}
+
+inline bool Dump(InterpState &S, CodePtr OpPC) {
+ S.Stk.dump();
return true;
}
@@ -1273,12 +1630,14 @@ inline bool VirtBaseHelper(InterpState &S, CodePtr OpPC, const RecordDecl *Decl,
while (Base.isBaseClass())
Base = Base.getBase();
- auto *Field = Base.getRecord()->getVirtualBase(Decl);
- S.Stk.push<Pointer>(Base.atField(Field->Offset));
+ const Record::Base *VirtBase = Base.getRecord()->getVirtualBase(Decl);
+ S.Stk.push<Pointer>(Base.atField(VirtBase->Offset));
return true;
}
-inline bool GetPtrVirtBase(InterpState &S, CodePtr OpPC, const RecordDecl *D) {
+inline bool GetPtrVirtBasePop(InterpState &S, CodePtr OpPC,
+ const RecordDecl *D) {
+ assert(D);
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckNull(S, OpPC, Ptr, CSK_Base))
return false;
@@ -1287,6 +1646,7 @@ inline bool GetPtrVirtBase(InterpState &S, CodePtr OpPC, const RecordDecl *D) {
inline bool GetPtrThisVirtBase(InterpState &S, CodePtr OpPC,
const RecordDecl *D) {
+ assert(D);
if (S.checkingPotentialConstantExpression())
return false;
const Pointer &This = S.Current->getThis();
@@ -1304,6 +1664,8 @@ bool Load(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
if (!CheckLoad(S, OpPC, Ptr))
return false;
+ if (!Ptr.isBlockPointer())
+ return false;
S.Stk.push<T>(Ptr.deref<T>());
return true;
}
@@ -1313,6 +1675,8 @@ bool LoadPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckLoad(S, OpPC, Ptr))
return false;
+ if (!Ptr.isBlockPointer())
+ return false;
S.Stk.push<T>(Ptr.deref<T>());
return true;
}
@@ -1323,7 +1687,7 @@ bool Store(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
if (!CheckStore(S, OpPC, Ptr))
return false;
- if (!Ptr.isRoot())
+ if (Ptr.canBeInitialized())
Ptr.initialize();
Ptr.deref<T>() = Value;
return true;
@@ -1335,7 +1699,7 @@ bool StorePop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckStore(S, OpPC, Ptr))
return false;
- if (!Ptr.isRoot())
+ if (Ptr.canBeInitialized())
Ptr.initialize();
Ptr.deref<T>() = Value;
return true;
@@ -1347,7 +1711,7 @@ bool StoreBitField(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
if (!CheckStore(S, OpPC, Ptr))
return false;
- if (!Ptr.isRoot())
+ if (Ptr.canBeInitialized())
Ptr.initialize();
if (const auto *FD = Ptr.getField())
Ptr.deref<T>() = Value.truncate(FD->getBitWidthValue(S.getCtx()));
@@ -1362,7 +1726,7 @@ bool StoreBitFieldPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckStore(S, OpPC, Ptr))
return false;
- if (!Ptr.isRoot())
+ if (Ptr.canBeInitialized())
Ptr.initialize();
if (const auto *FD = Ptr.getField())
Ptr.deref<T>() = Value.truncate(FD->getBitWidthValue(S.getCtx()));
@@ -1372,6 +1736,19 @@ bool StoreBitFieldPop(InterpState &S, CodePtr OpPC) {
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool Init(InterpState &S, CodePtr OpPC) {
+ const T &Value = S.Stk.pop<T>();
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (!CheckInit(S, OpPC, Ptr)) {
+ assert(false);
+ return false;
+ }
+ Ptr.initialize();
+ new (&Ptr.deref<T>()) T(Value);
+ return true;
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitPop(InterpState &S, CodePtr OpPC) {
const T &Value = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.pop<Pointer>();
@@ -1389,6 +1766,8 @@ template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitElem(InterpState &S, CodePtr OpPC, uint32_t Idx) {
const T &Value = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.peek<Pointer>().atIndex(Idx);
+ if (Ptr.isUnknownSizeArray())
+ return false;
if (!CheckInit(S, OpPC, Ptr))
return false;
Ptr.initialize();
@@ -1401,6 +1780,8 @@ template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitElemPop(InterpState &S, CodePtr OpPC, uint32_t Idx) {
const T &Value = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.pop<Pointer>().atIndex(Idx);
+ if (Ptr.isUnknownSizeArray())
+ return false;
if (!CheckInit(S, OpPC, Ptr))
return false;
Ptr.initialize();
@@ -1408,6 +1789,34 @@ bool InitElemPop(InterpState &S, CodePtr OpPC, uint32_t Idx) {
return true;
}
+inline bool Memcpy(InterpState &S, CodePtr OpPC) {
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ Pointer &Dest = S.Stk.peek<Pointer>();
+
+ if (!CheckLoad(S, OpPC, Src))
+ return false;
+
+ return DoMemcpy(S, OpPC, Src, Dest);
+}
+
+inline bool ToMemberPtr(InterpState &S, CodePtr OpPC) {
+ const auto &Member = S.Stk.pop<MemberPointer>();
+ const auto &Base = S.Stk.pop<Pointer>();
+
+ S.Stk.push<MemberPointer>(Member.takeInstance(Base));
+ return true;
+}
+
+inline bool CastMemberPtrPtr(InterpState &S, CodePtr OpPC) {
+ const auto &MP = S.Stk.pop<MemberPointer>();
+
+ if (std::optional<Pointer> Ptr = MP.toPointer(S.Ctx)) {
+ S.Stk.push<Pointer>(*Ptr);
+ return true;
+ }
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// AddOffset, SubOffset
//===----------------------------------------------------------------------===//
@@ -1415,62 +1824,68 @@ bool InitElemPop(InterpState &S, CodePtr OpPC, uint32_t Idx) {
template <class T, ArithOp Op>
bool OffsetHelper(InterpState &S, CodePtr OpPC, const T &Offset,
const Pointer &Ptr) {
- if (!CheckRange(S, OpPC, Ptr, CSK_ArrayToPointer))
- return false;
-
// A zero offset does not change the pointer.
if (Offset.isZero()) {
S.Stk.push<Pointer>(Ptr);
return true;
}
- if (!CheckNull(S, OpPC, Ptr, CSK_ArrayIndex))
- return false;
+ if (!CheckNull(S, OpPC, Ptr, CSK_ArrayIndex)) {
+ // The CheckNull will have emitted a note already, but we only
+ // abort in C++, since this is fine in C.
+ if (S.getLangOpts().CPlusPlus)
+ return false;
+ }
// Arrays of unknown bounds cannot have pointers into them.
if (!CheckArray(S, OpPC, Ptr))
return false;
- // Get a version of the index comparable to the type.
- T Index = T::from(Ptr.getIndex(), Offset.bitWidth());
- // Compute the largest index into the array.
- T MaxIndex = T::from(Ptr.getNumElems(), Offset.bitWidth());
+ uint64_t MaxIndex = static_cast<uint64_t>(Ptr.getNumElems());
+ uint64_t Index;
+ if (Ptr.isOnePastEnd())
+ Index = MaxIndex;
+ else
+ Index = Ptr.getIndex();
bool Invalid = false;
// Helper to report an invalid offset, computed as APSInt.
auto DiagInvalidOffset = [&]() -> void {
const unsigned Bits = Offset.bitWidth();
- APSInt APOffset(Offset.toAPSInt().extend(Bits + 2), false);
- APSInt APIndex(Index.toAPSInt().extend(Bits + 2), false);
+ APSInt APOffset(Offset.toAPSInt().extend(Bits + 2), /*IsUnsigend=*/false);
+ APSInt APIndex(APInt(Bits + 2, Index, /*IsSigned=*/true),
+ /*IsUnsigned=*/false);
APSInt NewIndex =
(Op == ArithOp::Add) ? (APIndex + APOffset) : (APIndex - APOffset);
S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_array_index)
- << NewIndex
- << /*array*/ static_cast<int>(!Ptr.inArray())
- << static_cast<unsigned>(MaxIndex);
+ << NewIndex << /*array*/ static_cast<int>(!Ptr.inArray()) << MaxIndex;
Invalid = true;
};
- T MaxOffset = T::from(MaxIndex - Index, Offset.bitWidth());
- if constexpr (Op == ArithOp::Add) {
- // If the new offset would be negative, bail out.
- if (Offset.isNegative() && (Offset.isMin() || -Offset > Index))
- DiagInvalidOffset();
-
- // If the new offset would be out of bounds, bail out.
- if (Offset.isPositive() && Offset > MaxOffset)
- DiagInvalidOffset();
- } else {
- // If the new offset would be negative, bail out.
- if (Offset.isPositive() && Index < Offset)
- DiagInvalidOffset();
-
- // If the new offset would be out of bounds, bail out.
- if (Offset.isNegative() && (Offset.isMin() || -Offset > MaxOffset))
- DiagInvalidOffset();
+ if (Ptr.isBlockPointer()) {
+ uint64_t IOffset = static_cast<uint64_t>(Offset);
+ uint64_t MaxOffset = MaxIndex - Index;
+
+ if constexpr (Op == ArithOp::Add) {
+ // If the new offset would be negative, bail out.
+ if (Offset.isNegative() && (Offset.isMin() || -IOffset > Index))
+ DiagInvalidOffset();
+
+ // If the new offset would be out of bounds, bail out.
+ if (Offset.isPositive() && IOffset > MaxOffset)
+ DiagInvalidOffset();
+ } else {
+ // If the new offset would be negative, bail out.
+ if (Offset.isPositive() && Index < IOffset)
+ DiagInvalidOffset();
+
+ // If the new offset would be out of bounds, bail out.
+ if (Offset.isNegative() && (Offset.isMin() || -IOffset > MaxOffset))
+ DiagInvalidOffset();
+ }
}
- if (Invalid && !Ptr.isDummy() && S.getLangOpts().CPlusPlus)
+ if (Invalid && S.getLangOpts().CPlusPlus)
return false;
// Offset is valid - compute it on unsigned.
@@ -1482,7 +1897,16 @@ bool OffsetHelper(InterpState &S, CodePtr OpPC, const T &Offset,
else
Result = WideIndex - WideOffset;
- S.Stk.push<Pointer>(Ptr.atIndex(static_cast<unsigned>(Result)));
+ // When the pointer is one-past-end, going back to index 0 is the only
+ // useful thing we can do. Any other index has been diagnosed before and
+ // we don't get here.
+ if (Result == 0 && Ptr.isOnePastEnd()) {
+ S.Stk.push<Pointer>(Ptr.asBlockPointer().Pointee,
+ Ptr.asBlockPointer().Base);
+ return true;
+ }
+
+ S.Stk.push<Pointer>(Ptr.atIndex(static_cast<uint64_t>(Result)));
return true;
}
@@ -1503,6 +1927,9 @@ bool SubOffset(InterpState &S, CodePtr OpPC) {
template <ArithOp Op>
static inline bool IncDecPtrHelper(InterpState &S, CodePtr OpPC,
const Pointer &Ptr) {
+ if (Ptr.isDummy())
+ return false;
+
using OneT = Integral<8, false>;
const Pointer &P = Ptr.deref<Pointer>();
@@ -1548,13 +1975,25 @@ inline bool SubPtr(InterpState &S, CodePtr OpPC) {
const Pointer &LHS = S.Stk.pop<Pointer>();
const Pointer &RHS = S.Stk.pop<Pointer>();
+ if (RHS.isZero()) {
+ S.Stk.push<T>(T::from(LHS.getIndex()));
+ return true;
+ }
+
if (!Pointer::hasSameBase(LHS, RHS) && S.getLangOpts().CPlusPlus) {
// TODO: Diagnose.
return false;
}
- T A = T::from(LHS.getIndex());
- T B = T::from(RHS.getIndex());
+ if (LHS.isZero() && RHS.isZero()) {
+ S.Stk.push<T>();
+ return true;
+ }
+
+ T A = LHS.isElementPastEnd() ? T::from(LHS.getNumElems())
+ : T::from(LHS.getIndex());
+ T B = RHS.isElementPastEnd() ? T::from(RHS.getNumElems())
+ : T::from(RHS.getIndex());
return AddSubMulHelper<T, T::sub, std::minus>(S, OpPC, A.bitWidth(), A, B);
}
@@ -1631,7 +2070,7 @@ bool CastFloatingIntegral(InterpState &S, CodePtr OpPC) {
auto Status = F.convertToInteger(Result);
// Float-to-Integral overflow check.
- if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite()) {
+ if ((Status & APFloat::opStatus::opInvalidOp)) {
const Expr *E = S.Current->getExpr(OpPC);
QualType Type = E->getType();
@@ -1692,13 +2131,71 @@ template <PrimType Name, class T = typename PrimConv<Name>::T>
bool CastPointerIntegral(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckPotentialReinterpretCast(S, OpPC, Ptr))
+ if (Ptr.isDummy())
return false;
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
+
S.Stk.push<T>(T::from(Ptr.getIntegerRepresentation()));
return true;
}
+static inline bool CastPointerIntegralAP(InterpState &S, CodePtr OpPC,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (Ptr.isDummy())
+ return false;
+
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
+
+ S.Stk.push<IntegralAP<false>>(
+ IntegralAP<false>::from(Ptr.getIntegerRepresentation(), BitWidth));
+ return true;
+}
+
+static inline bool CastPointerIntegralAPS(InterpState &S, CodePtr OpPC,
+ uint32_t BitWidth) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (Ptr.isDummy())
+ return false;
+
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
+
+ S.Stk.push<IntegralAP<true>>(
+ IntegralAP<true>::from(Ptr.getIntegerRepresentation(), BitWidth));
+ return true;
+}
+
+static inline bool PtrPtrCast(InterpState &S, CodePtr OpPC, bool SrcIsVoidPtr) {
+ const auto &Ptr = S.Stk.peek<Pointer>();
+
+ if (SrcIsVoidPtr && S.getLangOpts().CPlusPlus) {
+ bool HasValidResult = !Ptr.isZero();
+
+ if (HasValidResult) {
+ // FIXME: note_constexpr_invalid_void_star_cast
+ } else if (!S.getLangOpts().CPlusPlus26) {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 3 << "'void *'" << S.Current->getRange(OpPC);
+ }
+ } else {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
+ }
+
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// Zero, Nullptr
//===----------------------------------------------------------------------===//
@@ -1720,8 +2217,9 @@ static inline bool ZeroIntAPS(InterpState &S, CodePtr OpPC, uint32_t BitWidth) {
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
-inline bool Null(InterpState &S, CodePtr OpPC) {
- S.Stk.push<T>();
+inline bool Null(InterpState &S, CodePtr OpPC, const Descriptor *Desc) {
+ // Note: Desc can be null.
+ S.Stk.push<T>(0, Desc);
return true;
}
@@ -1739,6 +2237,15 @@ inline bool This(InterpState &S, CodePtr OpPC) {
if (!CheckThis(S, OpPC, This))
return false;
+ // Ensure the This pointer has been cast to the correct base.
+ if (!This.isDummy()) {
+ assert(isa<CXXMethodDecl>(S.Current->getFunction()->getDecl()));
+ assert(This.getRecord());
+ assert(
+ This.getRecord()->getDecl() ==
+ cast<CXXMethodDecl>(S.Current->getFunction()->getDecl())->getParent());
+ }
+
S.Stk.push<Pointer>(This);
return true;
}
@@ -1754,42 +2261,88 @@ inline bool RVOPtr(InterpState &S, CodePtr OpPC) {
//===----------------------------------------------------------------------===//
// Shr, Shl
//===----------------------------------------------------------------------===//
+enum class ShiftDir { Left, Right };
-template <PrimType NameL, PrimType NameR>
-inline bool Shr(InterpState &S, CodePtr OpPC) {
- using LT = typename PrimConv<NameL>::T;
- using RT = typename PrimConv<NameR>::T;
- const auto &RHS = S.Stk.pop<RT>();
- const auto &LHS = S.Stk.pop<LT>();
+template <class LT, class RT, ShiftDir Dir>
+inline bool DoShift(InterpState &S, CodePtr OpPC, LT &LHS, RT &RHS) {
const unsigned Bits = LHS.bitWidth();
+ // OpenCL 6.3j: shift values are effectively % word size of LHS.
+ if (S.getLangOpts().OpenCL)
+ RT::bitAnd(RHS, RT::from(LHS.bitWidth() - 1, RHS.bitWidth()),
+ RHS.bitWidth(), &RHS);
+
+ if (RHS.isNegative()) {
+ // During constant-folding, a negative shift is an opposite shift. Such a
+ // shift is not a constant expression.
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.CCEDiag(Loc, diag::note_constexpr_negative_shift) << RHS.toAPSInt();
+ if (!S.noteUndefinedBehavior())
+ return false;
+ RHS = -RHS;
+ return DoShift < LT, RT,
+ Dir == ShiftDir::Left ? ShiftDir::Right
+ : ShiftDir::Left > (S, OpPC, LHS, RHS);
+ }
+
+ if constexpr (Dir == ShiftDir::Left) {
+ if (LHS.isNegative() && !S.getLangOpts().CPlusPlus20) {
+ // C++11 [expr.shift]p2: A signed left shift must have a non-negative
+ // operand, and must not overflow the corresponding unsigned type.
+ // C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to
+ // E1 x 2^E2 module 2^N.
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.CCEDiag(Loc, diag::note_constexpr_lshift_of_negative) << LHS.toAPSInt();
+ if (!S.noteUndefinedBehavior())
+ return false;
+ }
+ }
+
if (!CheckShift(S, OpPC, LHS, RHS, Bits))
return false;
+ // Limit the shift amount to Bits - 1. If this happened,
+ // it has already been diagnosed by CheckShift() above,
+ // but we still need to handle it.
typename LT::AsUnsigned R;
- LT::AsUnsigned::shiftRight(LT::AsUnsigned::from(LHS),
- LT::AsUnsigned::from(RHS), Bits, &R);
- S.Stk.push<LT>(LT::from(R));
+ if constexpr (Dir == ShiftDir::Left) {
+ if (RHS > RT::from(Bits - 1, RHS.bitWidth()))
+ LT::AsUnsigned::shiftLeft(LT::AsUnsigned::from(LHS),
+ LT::AsUnsigned::from(Bits - 1), Bits, &R);
+ else
+ LT::AsUnsigned::shiftLeft(LT::AsUnsigned::from(LHS),
+ LT::AsUnsigned::from(RHS, Bits), Bits, &R);
+ } else {
+ if (RHS > RT::from(Bits - 1, RHS.bitWidth()))
+ LT::AsUnsigned::shiftRight(LT::AsUnsigned::from(LHS),
+ LT::AsUnsigned::from(Bits - 1), Bits, &R);
+ else
+ LT::AsUnsigned::shiftRight(LT::AsUnsigned::from(LHS),
+ LT::AsUnsigned::from(RHS, Bits), Bits, &R);
+ }
+ S.Stk.push<LT>(LT::from(R));
return true;
}
template <PrimType NameL, PrimType NameR>
-inline bool Shl(InterpState &S, CodePtr OpPC) {
+inline bool Shr(InterpState &S, CodePtr OpPC) {
using LT = typename PrimConv<NameL>::T;
using RT = typename PrimConv<NameR>::T;
- const auto &RHS = S.Stk.pop<RT>();
- const auto &LHS = S.Stk.pop<LT>();
- const unsigned Bits = LHS.bitWidth();
+ auto RHS = S.Stk.pop<RT>();
+ auto LHS = S.Stk.pop<LT>();
- if (!CheckShift(S, OpPC, LHS, RHS, Bits))
- return false;
+ return DoShift<LT, RT, ShiftDir::Right>(S, OpPC, LHS, RHS);
+}
- typename LT::AsUnsigned R;
- LT::AsUnsigned::shiftLeft(LT::AsUnsigned::from(LHS),
- LT::AsUnsigned::from(RHS, Bits), Bits, &R);
- S.Stk.push<LT>(LT::from(R));
- return true;
+template <PrimType NameL, PrimType NameR>
+inline bool Shl(InterpState &S, CodePtr OpPC) {
+ using LT = typename PrimConv<NameL>::T;
+ using RT = typename PrimConv<NameR>::T;
+ auto RHS = S.Stk.pop<RT>();
+ auto LHS = S.Stk.pop<LT>();
+
+ return DoShift<LT, RT, ShiftDir::Left>(S, OpPC, LHS, RHS);
}
//===----------------------------------------------------------------------===//
@@ -1831,18 +2384,87 @@ inline bool ArrayElemPtr(InterpState &S, CodePtr OpPC) {
const T &Offset = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (!Ptr.isZero()) {
+ if (!CheckArray(S, OpPC, Ptr))
+ return false;
+ }
+
if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr))
return false;
return NarrowPtr(S, OpPC);
}
-/// Just takes a pointer and checks if its' an incomplete
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool ArrayElemPtrPop(InterpState &S, CodePtr OpPC) {
+ const T &Offset = S.Stk.pop<T>();
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!Ptr.isZero()) {
+ if (!CheckArray(S, OpPC, Ptr))
+ return false;
+ }
+
+ if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr))
+ return false;
+
+ return NarrowPtr(S, OpPC);
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool ArrayElem(InterpState &S, CodePtr OpPC, uint32_t Index) {
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+
+ if (!CheckLoad(S, OpPC, Ptr))
+ return false;
+
+ S.Stk.push<T>(Ptr.atIndex(Index).deref<T>());
+ return true;
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool ArrayElemPop(InterpState &S, CodePtr OpPC, uint32_t Index) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckLoad(S, OpPC, Ptr))
+ return false;
+
+ S.Stk.push<T>(Ptr.atIndex(Index).deref<T>());
+ return true;
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool CopyArray(InterpState &S, CodePtr OpPC, uint32_t SrcIndex, uint32_t DestIndex, uint32_t Size) {
+ const auto &SrcPtr = S.Stk.pop<Pointer>();
+ const auto &DestPtr = S.Stk.peek<Pointer>();
+
+ for (uint32_t I = 0; I != Size; ++I) {
+ const Pointer &SP = SrcPtr.atIndex(SrcIndex + I);
+
+ if (!CheckLoad(S, OpPC, SP))
+ return false;
+
+ const Pointer &DP = DestPtr.atIndex(DestIndex + I);
+ DP.deref<T>() = SP.deref<T>();
+ DP.initialize();
+ }
+ return true;
+}
+
+/// Just takes a pointer and checks if it's an incomplete
/// array type.
inline bool ArrayDecay(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!Ptr.isUnknownSizeArray()) {
+ if (Ptr.isZero()) {
+ S.Stk.push<Pointer>(Ptr);
+ return true;
+ }
+
+ if (!CheckRange(S, OpPC, Ptr, CSK_ArrayToPointer))
+ return false;
+
+ if (Ptr.isRoot() || !Ptr.isUnknownSizeArray() || Ptr.isDummy()) {
S.Stk.push<Pointer>(Ptr.atIndex(0));
return true;
}
@@ -1853,21 +2475,61 @@ inline bool ArrayDecay(InterpState &S, CodePtr OpPC) {
return false;
}
-template <PrimType Name, class T = typename PrimConv<Name>::T>
-inline bool ArrayElemPtrPop(InterpState &S, CodePtr OpPC) {
- const T &Offset = S.Stk.pop<T>();
- const Pointer &Ptr = S.Stk.pop<Pointer>();
+inline bool CallVar(InterpState &S, CodePtr OpPC, const Function *Func,
+ uint32_t VarArgSize) {
+ if (Func->hasThisPointer()) {
+ size_t ArgSize = Func->getArgSize() + VarArgSize;
+ size_t ThisOffset = ArgSize - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
+ const Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
- if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr))
+ // If the current function is a lambda static invoker and
+ // the function we're about to call is a lambda call operator,
+ // skip the CheckInvoke, since the ThisPtr is a null pointer
+ // anyway.
+ if (!(S.Current->getFunction() &&
+ S.Current->getFunction()->isLambdaStaticInvoker() &&
+ Func->isLambdaCallOperator())) {
+ if (!CheckInvoke(S, OpPC, ThisPtr))
+ return false;
+ }
+
+ if (S.checkingPotentialConstantExpression())
+ return false;
+ }
+
+ if (!CheckCallable(S, OpPC, Func))
return false;
- return NarrowPtr(S, OpPC);
+ if (!CheckCallDepth(S, OpPC))
+ return false;
+
+ auto NewFrame = std::make_unique<InterpFrame>(S, Func, OpPC, VarArgSize);
+ InterpFrame *FrameBefore = S.Current;
+ S.Current = NewFrame.get();
+
+ APValue CallResult;
+ // Note that we cannot assert(CallResult.hasValue()) here since
+ // Ret() above only sets the APValue if the curent frame doesn't
+ // have a caller set.
+ if (Interpret(S, CallResult)) {
+ NewFrame.release(); // Frame was delete'd already.
+ assert(S.Current == FrameBefore);
+ return true;
+ }
+
+ // Interpreting the function failed somehow. Reset to
+ // previous state.
+ S.Current = FrameBefore;
+ return false;
+
+ return false;
}
-inline bool Call(InterpState &S, CodePtr OpPC, const Function *Func) {
+inline bool Call(InterpState &S, CodePtr OpPC, const Function *Func,
+ uint32_t VarArgSize) {
if (Func->hasThisPointer()) {
- size_t ThisOffset =
- Func->getArgSize() - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
+ size_t ArgSize = Func->getArgSize() + VarArgSize;
+ size_t ThisOffset = ArgSize - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
const Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
@@ -1881,18 +2543,18 @@ inline bool Call(InterpState &S, CodePtr OpPC, const Function *Func) {
if (!CheckInvoke(S, OpPC, ThisPtr))
return false;
}
-
- if (S.checkingPotentialConstantExpression())
- return false;
}
if (!CheckCallable(S, OpPC, Func))
return false;
+ if (Func->hasThisPointer() && S.checkingPotentialConstantExpression())
+ return false;
+
if (!CheckCallDepth(S, OpPC))
return false;
- auto NewFrame = std::make_unique<InterpFrame>(S, Func, OpPC);
+ auto NewFrame = std::make_unique<InterpFrame>(S, Func, OpPC, VarArgSize);
InterpFrame *FrameBefore = S.Current;
S.Current = NewFrame.get();
@@ -1912,15 +2574,20 @@ inline bool Call(InterpState &S, CodePtr OpPC, const Function *Func) {
return false;
}
-inline bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func) {
+inline bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
+ uint32_t VarArgSize) {
assert(Func->hasThisPointer());
assert(Func->isVirtual());
- size_t ThisOffset =
- Func->getArgSize() - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
+ size_t ArgSize = Func->getArgSize() + VarArgSize;
+ size_t ThisOffset = ArgSize - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
- const CXXRecordDecl *DynamicDecl =
- ThisPtr.getDeclDesc()->getType()->getAsCXXRecordDecl();
+ QualType DynamicType = ThisPtr.getDeclDesc()->getType();
+ const CXXRecordDecl *DynamicDecl;
+ if (DynamicType->isPointerType() || DynamicType->isReferenceType())
+ DynamicDecl = DynamicType->getPointeeCXXRecordDecl();
+ else
+ DynamicDecl = ThisPtr.getDeclDesc()->getType()->getAsCXXRecordDecl();
const auto *StaticDecl = cast<CXXRecordDecl>(Func->getParentDecl());
const auto *InitialFunction = cast<CXXMethodDecl>(Func->getDecl());
const CXXMethodDecl *Overrider = S.getContext().getOverridingFunction(
@@ -1947,7 +2614,7 @@ inline bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func) {
}
}
- return Call(S, OpPC, Func);
+ return Call(S, OpPC, Func, VarArgSize);
}
inline bool CallBI(InterpState &S, CodePtr &PC, const Function *Func,
@@ -1965,17 +2632,47 @@ inline bool CallBI(InterpState &S, CodePtr &PC, const Function *Func,
return false;
}
-inline bool CallPtr(InterpState &S, CodePtr OpPC) {
+inline bool CallPtr(InterpState &S, CodePtr OpPC, uint32_t ArgSize,
+ const CallExpr *CE) {
const FunctionPointer &FuncPtr = S.Stk.pop<FunctionPointer>();
const Function *F = FuncPtr.getFunction();
- if (!F || !F->isConstexpr())
+ if (!F) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ S.FFDiag(E, diag::note_constexpr_null_callee)
+ << const_cast<Expr *>(E) << E->getSourceRange();
+ return false;
+ }
+
+ if (!FuncPtr.isValid())
+ return false;
+
+ assert(F);
+
+ // This happens when the call expression has been cast to
+ // something else, but we don't support that.
+ if (S.Ctx.classify(F->getDecl()->getReturnType()) !=
+ S.Ctx.classify(CE->getType()))
return false;
+ // Check argument nullability state.
+ if (F->hasNonNullAttr()) {
+ if (!CheckNonNullArgs(S, OpPC, F, CE, ArgSize))
+ return false;
+ }
+
+ assert(ArgSize >= F->getWrittenArgSize());
+ uint32_t VarArgSize = ArgSize - F->getWrittenArgSize();
+
+ // We need to do this explicitly here since we don't have the necessary
+ // information to do it automatically.
+ if (F->isThisPointerExplicit())
+ VarArgSize -= align(primSize(PT_Ptr));
+
if (F->isVirtual())
- return CallVirt(S, OpPC, F);
+ return CallVirt(S, OpPC, F, VarArgSize);
- return Call(S, OpPC, F);
+ return Call(S, OpPC, F, VarArgSize);
}
inline bool GetFnPtr(InterpState &S, CodePtr OpPC, const Function *Func) {
@@ -1984,6 +2681,36 @@ inline bool GetFnPtr(InterpState &S, CodePtr OpPC, const Function *Func) {
return true;
}
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool GetIntPtr(InterpState &S, CodePtr OpPC, const Descriptor *Desc) {
+ const T &IntVal = S.Stk.pop<T>();
+
+ S.Stk.push<Pointer>(static_cast<uint64_t>(IntVal), Desc);
+ return true;
+}
+
+inline bool GetMemberPtr(InterpState &S, CodePtr OpPC, const Decl *D) {
+ S.Stk.push<MemberPointer>(D);
+ return true;
+}
+
+inline bool GetMemberPtrBase(InterpState &S, CodePtr OpPC) {
+ const auto &MP = S.Stk.pop<MemberPointer>();
+
+ S.Stk.push<Pointer>(MP.getBase());
+ return true;
+}
+
+inline bool GetMemberPtrDecl(InterpState &S, CodePtr OpPC) {
+ const auto &MP = S.Stk.pop<MemberPointer>();
+
+ const auto *FD = cast<FunctionDecl>(MP.getDecl());
+ const auto *Func = S.getContext().getOrCreateFunction(FD);
+
+ S.Stk.push<FunctionPointer>(Func);
+ return true;
+}
+
/// Just emit a diagnostic. The expression that caused emission of this
/// op is not valid in a constant context.
inline bool Invalid(InterpState &S, CodePtr OpPC) {
@@ -1993,11 +2720,24 @@ inline bool Invalid(InterpState &S, CodePtr OpPC) {
return false;
}
+inline bool Unsupported(InterpState &S, CodePtr OpPC) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_stmt_expr_unsupported)
+ << S.Current->getRange(OpPC);
+ return false;
+}
+
+/// Do nothing and just abort execution.
+inline bool Error(InterpState &S, CodePtr OpPC) { return false; }
+
/// Same here, but only for casts.
inline bool InvalidCast(InterpState &S, CodePtr OpPC, CastKind Kind) {
const SourceLocation &Loc = S.Current->getLocation(OpPC);
- S.FFDiag(Loc, diag::note_constexpr_invalid_cast)
- << static_cast<unsigned>(Kind) << S.Current->getRange(OpPC);
+
+ // FIXME: Support diagnosing other invalid cast kinds.
+ if (Kind == CastKind::Reinterpret)
+ S.FFDiag(Loc, diag::note_constexpr_invalid_cast)
+ << static_cast<unsigned>(Kind) << S.Current->getRange(OpPC);
return false;
}
@@ -2007,6 +2747,27 @@ inline bool InvalidDeclRef(InterpState &S, CodePtr OpPC,
return CheckDeclRef(S, OpPC, DR);
}
+inline bool SizelessVectorElementSize(InterpState &S, CodePtr OpPC) {
+ if (S.inConstantContext()) {
+ const SourceRange &ArgRange = S.Current->getRange(OpPC);
+ const Expr *E = S.Current->getExpr(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_non_const_vectorelements) << ArgRange;
+ }
+ return false;
+}
+
+inline bool Assume(InterpState &S, CodePtr OpPC) {
+ const auto Val = S.Stk.pop<Boolean>();
+
+ if (Val)
+ return true;
+
+ // Else, diagnose.
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.CCEDiag(Loc, diag::note_constexpr_assumption_failed);
+ return false;
+}
+
template <PrimType Name, class T = typename PrimConv<Name>::T>
inline bool OffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E) {
llvm::SmallVector<int64_t> ArrayIndices;
@@ -2022,6 +2783,180 @@ inline bool OffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E) {
return true;
}
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool CheckNonNullArg(InterpState &S, CodePtr OpPC) {
+ const T &Arg = S.Stk.peek<T>();
+ if (!Arg.isZero())
+ return true;
+
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.CCEDiag(Loc, diag::note_non_null_attribute_failed);
+
+ return false;
+}
+
+void diagnoseEnumValue(InterpState &S, CodePtr OpPC, const EnumDecl *ED,
+ const APSInt &Value);
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool CheckEnumValue(InterpState &S, CodePtr OpPC, const EnumDecl *ED) {
+ assert(ED);
+ assert(!ED->isFixed());
+ const APSInt Val = S.Stk.peek<T>().toAPSInt();
+
+ if (S.inConstantContext())
+ diagnoseEnumValue(S, OpPC, ED, Val);
+ return true;
+}
+
+/// OldPtr -> Integer -> NewPtr.
+template <PrimType TIn, PrimType TOut>
+inline bool DecayPtr(InterpState &S, CodePtr OpPC) {
+ static_assert(isPtrType(TIn) && isPtrType(TOut));
+ using FromT = typename PrimConv<TIn>::T;
+ using ToT = typename PrimConv<TOut>::T;
+
+ const FromT &OldPtr = S.Stk.pop<FromT>();
+ S.Stk.push<ToT>(ToT(OldPtr.getIntegerRepresentation(), nullptr));
+ return true;
+}
+
+inline bool CheckDecl(InterpState &S, CodePtr OpPC, const VarDecl *VD) {
+ // An expression E is a core constant expression unless the evaluation of E
+ // would evaluate one of the following: [C++23] - a control flow that passes
+ // through a declaration of a variable with static or thread storage duration
+ // unless that variable is usable in constant expressions.
+ assert(VD->isLocalVarDecl() &&
+ VD->isStaticLocal()); // Checked before emitting this.
+
+ if (VD == S.EvaluatingDecl)
+ return true;
+
+ if (!VD->isUsableInConstantExpressions(S.getCtx())) {
+ S.CCEDiag(VD->getLocation(), diag::note_constexpr_static_local)
+ << (VD->getTSCSpec() == TSCS_unspecified ? 0 : 1) << VD;
+ return false;
+ }
+ return true;
+}
+
+inline bool Alloc(InterpState &S, CodePtr OpPC, const Descriptor *Desc) {
+ assert(Desc);
+
+ if (!CheckDynamicMemoryAllocation(S, OpPC))
+ return false;
+
+ DynamicAllocator &Allocator = S.getAllocator();
+ Block *B = Allocator.allocate(Desc, S.Ctx.getEvalID());
+ assert(B);
+
+ S.Stk.push<Pointer>(B, sizeof(InlineDescriptor));
+
+ return true;
+}
+
+template <PrimType Name, class SizeT = typename PrimConv<Name>::T>
+inline bool AllocN(InterpState &S, CodePtr OpPC, PrimType T, const Expr *Source,
+ bool IsNoThrow) {
+ if (!CheckDynamicMemoryAllocation(S, OpPC))
+ return false;
+
+ SizeT NumElements = S.Stk.pop<SizeT>();
+ if (!CheckArraySize(S, OpPC, &NumElements, primSize(T), IsNoThrow)) {
+ if (!IsNoThrow)
+ return false;
+
+ // If this failed and is nothrow, just return a null ptr.
+ S.Stk.push<Pointer>(0, nullptr);
+ return true;
+ }
+
+ DynamicAllocator &Allocator = S.getAllocator();
+ Block *B = Allocator.allocate(Source, T, static_cast<size_t>(NumElements),
+ S.Ctx.getEvalID());
+ assert(B);
+ S.Stk.push<Pointer>(B, sizeof(InlineDescriptor));
+
+ return true;
+}
+
+template <PrimType Name, class SizeT = typename PrimConv<Name>::T>
+inline bool AllocCN(InterpState &S, CodePtr OpPC, const Descriptor *ElementDesc,
+ bool IsNoThrow) {
+ if (!CheckDynamicMemoryAllocation(S, OpPC))
+ return false;
+
+ SizeT NumElements = S.Stk.pop<SizeT>();
+ if (!CheckArraySize(S, OpPC, &NumElements, ElementDesc->getSize(),
+ IsNoThrow)) {
+ if (!IsNoThrow)
+ return false;
+
+ // If this failed and is nothrow, just return a null ptr.
+ S.Stk.push<Pointer>(0, ElementDesc);
+ return true;
+ }
+
+ DynamicAllocator &Allocator = S.getAllocator();
+ Block *B = Allocator.allocate(ElementDesc, static_cast<size_t>(NumElements),
+ S.Ctx.getEvalID());
+ assert(B);
+
+ S.Stk.push<Pointer>(B, sizeof(InlineDescriptor));
+
+ return true;
+}
+
+bool RunDestructors(InterpState &S, CodePtr OpPC, const Block *B);
+static inline bool Free(InterpState &S, CodePtr OpPC, bool DeleteIsArrayForm) {
+ if (!CheckDynamicMemoryAllocation(S, OpPC))
+ return false;
+
+ const Expr *Source = nullptr;
+ const Block *BlockToDelete = nullptr;
+ {
+ // Extra scope for this so the block doesn't have this pointer
+ // pointing to it when we destroy it.
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ // Deleteing nullptr is always fine.
+ if (Ptr.isZero())
+ return true;
+
+ if (!Ptr.isRoot() || Ptr.isOnePastEnd() || Ptr.isArrayElement()) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_delete_subobject)
+ << Ptr.toDiagnosticString(S.getCtx()) << Ptr.isOnePastEnd();
+ return false;
+ }
+
+ Source = Ptr.getDeclDesc()->asExpr();
+ BlockToDelete = Ptr.block();
+
+ if (!CheckDeleteSource(S, OpPC, Source, Ptr))
+ return false;
+ }
+ assert(Source);
+ assert(BlockToDelete);
+
+ // Invoke destructors before deallocating the memory.
+ if (!RunDestructors(S, OpPC, BlockToDelete))
+ return false;
+
+ DynamicAllocator &Allocator = S.getAllocator();
+ bool WasArrayAlloc = Allocator.isArrayAllocation(Source);
+ const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
+
+ if (!Allocator.deallocate(Source, BlockToDelete, S)) {
+ // Nothing has been deallocated, this must be a double-delete.
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_double_delete);
+ return false;
+ }
+ return CheckNewDeleteForms(S, OpPC, WasArrayAlloc, DeleteIsArrayForm,
+ BlockDesc, Source);
+}
+
//===----------------------------------------------------------------------===//
// Read opcode arguments
//===----------------------------------------------------------------------===//
@@ -2041,6 +2976,22 @@ template <> inline Floating ReadArg<Floating>(InterpState &S, CodePtr &OpPC) {
return F;
}
+template <>
+inline IntegralAP<false> ReadArg<IntegralAP<false>>(InterpState &S,
+ CodePtr &OpPC) {
+ IntegralAP<false> I = IntegralAP<false>::deserialize(*OpPC);
+ OpPC += align(I.bytesToSerialize());
+ return I;
+}
+
+template <>
+inline IntegralAP<true> ReadArg<IntegralAP<true>>(InterpState &S,
+ CodePtr &OpPC) {
+ IntegralAP<true> I = IntegralAP<true>::deserialize(*OpPC);
+ OpPC += align(I.bytesToSerialize());
+ return I;
+}
+
} // namespace interp
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp
index a62128d9cfae..5ac778aeb607 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp
@@ -73,7 +73,7 @@ void Block::replacePointer(Pointer *Old, Pointer *New) {
removePointer(Old);
addPointer(New);
- Old->Pointee = nullptr;
+ Old->PointeeStorage.BS.Pointee = nullptr;
#ifndef NDEBUG
assert(!hasPointer(Old));
@@ -92,7 +92,8 @@ bool Block::hasPointer(const Pointer *P) const {
#endif
DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk)
- : Root(Root), B(Blk->Desc, Blk->IsStatic, Blk->IsExtern, /*isDead=*/true) {
+ : Root(Root),
+ B(~0u, Blk->Desc, Blk->IsStatic, Blk->IsExtern, /*isDead=*/true) {
// Add the block to the chain of dead blocks.
if (Root)
Root->Prev = this;
@@ -104,10 +105,14 @@ DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk)
// Transfer pointers.
B.Pointers = Blk->Pointers;
for (Pointer *P = Blk->Pointers; P; P = P->Next)
- P->Pointee = &B;
+ P->PointeeStorage.BS.Pointee = &B;
+ Blk->Pointers = nullptr;
}
void DeadBlock::free() {
+ if (B.IsInitialized)
+ B.invokeDtor();
+
if (Prev)
Prev->Next = Next;
if (Next)
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
index 9db82567d2d5..3760ded7b13f 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
@@ -49,13 +49,19 @@ enum PrimType : unsigned;
class Block final {
public:
/// Creates a new block.
- Block(const std::optional<unsigned> &DeclID, const Descriptor *Desc,
- bool IsStatic = false, bool IsExtern = false)
- : DeclID(DeclID), IsStatic(IsStatic), IsExtern(IsExtern), Desc(Desc) {}
+ Block(unsigned EvalID, const std::optional<unsigned> &DeclID,
+ const Descriptor *Desc, bool IsStatic = false, bool IsExtern = false)
+ : EvalID(EvalID), DeclID(DeclID), IsStatic(IsStatic), IsExtern(IsExtern),
+ IsDynamic(false), Desc(Desc) {
+ assert(Desc);
+ }
- Block(const Descriptor *Desc, bool IsStatic = false, bool IsExtern = false)
- : DeclID((unsigned)-1), IsStatic(IsStatic), IsExtern(IsExtern),
- Desc(Desc) {}
+ Block(unsigned EvalID, const Descriptor *Desc, bool IsStatic = false,
+ bool IsExtern = false)
+ : EvalID(EvalID), DeclID((unsigned)-1), IsStatic(IsStatic),
+ IsExtern(IsExtern), IsDynamic(false), Desc(Desc) {
+ assert(Desc);
+ }
/// Returns the block's descriptor.
const Descriptor *getDescriptor() const { return Desc; }
@@ -67,11 +73,16 @@ public:
bool isStatic() const { return IsStatic; }
/// Checks if the block is temporary.
bool isTemporary() const { return Desc->IsTemporary; }
+ bool isDynamic() const { return IsDynamic; }
/// Returns the size of the block.
unsigned getSize() const { return Desc->getAllocSize(); }
/// Returns the declaration ID.
std::optional<unsigned> getDeclID() const { return DeclID; }
+ /// Returns whether the data of this block has been initialized via
+ /// invoking the Ctor func.
bool isInitialized() const { return IsInitialized; }
+ /// The Evaluation ID this block was created in.
+ unsigned getEvalID() const { return EvalID; }
/// Returns a pointer to the stored data.
/// You are allowed to read Desc->getSize() bytes from this address.
@@ -95,15 +106,9 @@ public:
return reinterpret_cast<const std::byte *>(this) + sizeof(Block);
}
- /// Returns a view over the data.
- template <typename T>
- T &deref() { return *reinterpret_cast<T *>(data()); }
- template <typename T> const T &deref() const {
- return *reinterpret_cast<const T *>(data());
- }
-
/// Invokes the constructor.
void invokeCtor() {
+ assert(!IsInitialized);
std::memset(rawData(), 0, Desc->getAllocSize());
if (Desc->CtorFn)
Desc->CtorFn(this, data(), Desc->IsConst, Desc->IsMutable,
@@ -113,18 +118,27 @@ public:
/// Invokes the Destructor.
void invokeDtor() {
+ assert(IsInitialized);
if (Desc->DtorFn)
Desc->DtorFn(this, data(), Desc);
IsInitialized = false;
}
-protected:
+ void dump() const { dump(llvm::errs()); }
+ void dump(llvm::raw_ostream &OS) const;
+
+private:
friend class Pointer;
friend class DeadBlock;
friend class InterpState;
+ friend class DynamicAllocator;
- Block(const Descriptor *Desc, bool IsExtern, bool IsStatic, bool IsDead)
- : IsStatic(IsStatic), IsExtern(IsExtern), IsDead(true), Desc(Desc) {}
+ Block(unsigned EvalID, const Descriptor *Desc, bool IsExtern, bool IsStatic,
+ bool IsDead)
+ : EvalID(EvalID), IsStatic(IsStatic), IsExtern(IsExtern), IsDead(true),
+ IsDynamic(false), Desc(Desc) {
+ assert(Desc);
+ }
/// Deletes a dead block at the end of its lifetime.
void cleanup();
@@ -137,6 +151,7 @@ protected:
bool hasPointer(const Pointer *P) const;
#endif
+ const unsigned EvalID = ~0u;
/// Start of the chain of pointers.
Pointer *Pointers = nullptr;
/// Unique identifier of the declaration.
@@ -151,6 +166,9 @@ protected:
/// Flag indicating if the block contents have been initialized
/// via invokeCtor.
bool IsInitialized = false;
+ /// Flag indicating if this block has been allocated via dynamic
+ /// memory allocation (e.g. malloc).
+ bool IsDynamic = false;
/// Pointer to the stack slot descriptor.
const Descriptor *Desc;
};
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp
index 754ca96b0c64..98928b3c22d7 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp
@@ -9,13 +9,25 @@
#include "Boolean.h"
#include "Interp.h"
#include "PrimType.h"
+#include "clang/AST/OSLog.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/SipHash.h"
namespace clang {
namespace interp {
+static unsigned callArgSize(const InterpState &S, const CallExpr *C) {
+ unsigned O = 0;
+
+ for (const Expr *E : C->arguments()) {
+ O += align(primSize(*S.getContext().classify(E)));
+ }
+
+ return O;
+}
+
template <typename T>
static T getParam(const InterpFrame *Frame, unsigned Index) {
assert(Frame->getFunction()->getNumParams() > Index);
@@ -53,106 +65,43 @@ static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) {
Offset = align(primSize(T));
APSInt R;
- INT_TYPE_SWITCH(T, {
- T Val = Stk.peek<T>(Offset);
- R = APSInt(
- APInt(Val.bitWidth(), static_cast<uint64_t>(Val), T::isSigned()));
- });
+ INT_TYPE_SWITCH(T, R = Stk.peek<T>(Offset).toAPSInt());
return R;
}
-/// Pushes \p Val to the stack, as a target-dependent 'int'.
-static void pushInt(InterpState &S, int32_t Val) {
- PrimType IntType = getIntPrimType(S);
- if (IntType == PT_Sint32)
- S.Stk.push<Integral<32, true>>(Integral<32, true>::from(Val));
- else if (IntType == PT_Sint16)
- S.Stk.push<Integral<16, true>>(Integral<16, true>::from(Val));
- else
- llvm_unreachable("Int isn't 16 or 32 bit?");
-}
-
-static void pushAPSInt(InterpState &S, const APSInt &Val) {
- bool Signed = Val.isSigned();
+/// Pushes \p Val on the stack as the type given by \p QT.
+static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
+ assert(QT->isSignedIntegerOrEnumerationType() ||
+ QT->isUnsignedIntegerOrEnumerationType());
+ std::optional<PrimType> T = S.getContext().classify(QT);
+ assert(T);
- if (Signed) {
- switch (Val.getBitWidth()) {
- case 64:
- S.Stk.push<Integral<64, true>>(
- Integral<64, true>::from(Val.getSExtValue()));
- break;
- case 32:
- S.Stk.push<Integral<32, true>>(
- Integral<32, true>::from(Val.getSExtValue()));
- break;
- case 16:
- S.Stk.push<Integral<16, true>>(
- Integral<16, true>::from(Val.getSExtValue()));
- break;
- case 8:
- S.Stk.push<Integral<8, true>>(
- Integral<8, true>::from(Val.getSExtValue()));
- break;
- default:
- llvm_unreachable("Invalid integer bitwidth");
- }
- return;
- }
-
- // Unsigned.
- switch (Val.getBitWidth()) {
- case 64:
- S.Stk.push<Integral<64, false>>(
- Integral<64, false>::from(Val.getZExtValue()));
- break;
- case 32:
- S.Stk.push<Integral<32, false>>(
- Integral<32, false>::from(Val.getZExtValue()));
- break;
- case 16:
- S.Stk.push<Integral<16, false>>(
- Integral<16, false>::from(Val.getZExtValue()));
- break;
- case 8:
- S.Stk.push<Integral<8, false>>(
- Integral<8, false>::from(Val.getZExtValue()));
- break;
- default:
- llvm_unreachable("Invalid integer bitwidth");
+ if (QT->isSignedIntegerOrEnumerationType()) {
+ int64_t V = Val.getSExtValue();
+ INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V)); });
+ } else {
+ assert(QT->isUnsignedIntegerOrEnumerationType());
+ uint64_t V = Val.getZExtValue();
+ INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V)); });
}
}
-/// Pushes \p Val to the stack, as a target-dependent 'long'.
-static void pushLong(InterpState &S, int64_t Val) {
- PrimType LongType = getLongPrimType(S);
- if (LongType == PT_Sint64)
- S.Stk.push<Integral<64, true>>(Integral<64, true>::from(Val));
- else if (LongType == PT_Sint32)
- S.Stk.push<Integral<32, true>>(Integral<32, true>::from(Val));
- else if (LongType == PT_Sint16)
- S.Stk.push<Integral<16, true>>(Integral<16, true>::from(Val));
+template <typename T>
+static void pushInteger(InterpState &S, T Val, QualType QT) {
+ if constexpr (std::is_same_v<T, APInt>)
+ pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
else
- llvm_unreachable("Long isn't 16, 32 or 64 bit?");
+ pushInteger(S,
+ APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
+ std::is_signed_v<T>),
+ !std::is_signed_v<T>),
+ QT);
}
-static void pushSizeT(InterpState &S, uint64_t Val) {
- const TargetInfo &TI = S.getCtx().getTargetInfo();
- unsigned SizeTWidth = TI.getTypeWidth(TI.getSizeType());
-
- switch (SizeTWidth) {
- case 64:
- S.Stk.push<Integral<64, false>>(Integral<64, false>::from(Val));
- break;
- case 32:
- S.Stk.push<Integral<32, false>>(Integral<32, false>::from(Val));
- break;
- case 16:
- S.Stk.push<Integral<16, false>>(Integral<16, false>::from(Val));
- break;
- default:
- llvm_unreachable("We don't handle this size_t size.");
- }
+static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) {
+ INT_TYPE_SWITCH_NO_BOOL(
+ ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
}
static bool retPrimValue(InterpState &S, CodePtr OpPC, APValue &Result,
@@ -182,14 +131,48 @@ static bool retPrimValue(InterpState &S, CodePtr OpPC, APValue &Result,
#undef RET_CASE
}
+static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const CallExpr *Call) {
+ // The current frame is the one for __builtin_is_constant_evaluated.
+ // The one above that, potentially the one for std::is_constant_evaluated().
+ if (S.inConstantContext() && !S.checkingPotentialConstantExpression() &&
+ Frame->Caller && S.getEvalStatus().Diag) {
+ auto isStdCall = [](const FunctionDecl *F) -> bool {
+ return F && F->isInStdNamespace() && F->getIdentifier() &&
+ F->getIdentifier()->isStr("is_constant_evaluated");
+ };
+ const InterpFrame *Caller = Frame->Caller;
+
+ if (Caller->Caller && isStdCall(Caller->getCallee())) {
+ const Expr *E = Caller->Caller->getExpr(Caller->getRetPC());
+ S.report(E->getExprLoc(),
+ diag::warn_is_constant_evaluated_always_true_constexpr)
+ << "std::is_constant_evaluated" << E->getSourceRange();
+ } else {
+ const Expr *E = Frame->Caller->getExpr(Frame->getRetPC());
+ S.report(E->getExprLoc(),
+ diag::warn_is_constant_evaluated_always_true_constexpr)
+ << "__builtin_is_constant_evaluated" << E->getSourceRange();
+ }
+ }
+
+ S.Stk.push<Boolean>(Boolean::from(S.inConstantContext()));
+ return true;
+}
+
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
- const InterpFrame *Frame) {
+ const InterpFrame *Frame,
+ const CallExpr *Call) {
const Pointer &A = getParam<Pointer>(Frame, 0);
const Pointer &B = getParam<Pointer>(Frame, 1);
if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
return false;
+ if (A.isDummy() || B.isDummy())
+ return false;
+
assert(A.getFieldDesc()->isPrimitiveArray());
assert(B.getFieldDesc()->isPrimitiveArray());
@@ -217,12 +200,13 @@ static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
break;
}
- pushInt(S, Result);
+ pushInteger(S, Result, Call->getType());
return true;
}
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
- const InterpFrame *Frame) {
+ const InterpFrame *Frame,
+ const CallExpr *Call) {
const Pointer &StrPtr = getParam<Pointer>(Frame, 0);
if (!CheckArray(S, OpPC, StrPtr))
@@ -231,7 +215,7 @@ static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
if (!CheckLive(S, OpPC, StrPtr, AK_Read))
return false;
- if (!CheckDummy(S, OpPC, StrPtr))
+ if (!CheckDummy(S, OpPC, StrPtr, AK_Read))
return false;
assert(StrPtr.getFieldDesc()->isPrimitiveArray());
@@ -248,7 +232,8 @@ static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
break;
}
- pushSizeT(S, Len);
+ pushInteger(S, Len, Call->getType());
+
return true;
}
@@ -378,68 +363,71 @@ static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC,
/// take a float, double, long double, etc.
/// But for us, that's all a Floating anyway.
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC,
- const InterpFrame *Frame, const Function *F) {
+ const InterpFrame *Frame, const Function *F,
+ const CallExpr *Call) {
const Floating &Arg = S.Stk.peek<Floating>();
- pushInt(S, Arg.isNan());
+ pushInteger(S, Arg.isNan(), Call->getType());
return true;
}
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
- const Function *F) {
+ const Function *F,
+ const CallExpr *Call) {
const Floating &Arg = S.Stk.peek<Floating>();
- pushInt(S, Arg.isSignaling());
+ pushInteger(S, Arg.isSignaling(), Call->getType());
return true;
}
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame, const Function *F,
- bool CheckSign) {
+ bool CheckSign, const CallExpr *Call) {
const Floating &Arg = S.Stk.peek<Floating>();
bool IsInf = Arg.isInf();
if (CheckSign)
- pushInt(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0);
+ pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType());
else
- pushInt(S, Arg.isInf());
+ pushInteger(S, Arg.isInf(), Call->getType());
return true;
}
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
- const Function *F) {
+ const Function *F, const CallExpr *Call) {
const Floating &Arg = S.Stk.peek<Floating>();
- pushInt(S, Arg.isFinite());
+ pushInteger(S, Arg.isFinite(), Call->getType());
return true;
}
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
- const Function *F) {
+ const Function *F, const CallExpr *Call) {
const Floating &Arg = S.Stk.peek<Floating>();
- pushInt(S, Arg.isNormal());
+ pushInteger(S, Arg.isNormal(), Call->getType());
return true;
}
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
- const Function *F) {
+ const Function *F,
+ const CallExpr *Call) {
const Floating &Arg = S.Stk.peek<Floating>();
- pushInt(S, Arg.isDenormal());
+ pushInteger(S, Arg.isDenormal(), Call->getType());
return true;
}
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC,
- const InterpFrame *Frame,
- const Function *F) {
+ const InterpFrame *Frame, const Function *F,
+ const CallExpr *Call) {
const Floating &Arg = S.Stk.peek<Floating>();
- pushInt(S, Arg.isZero());
+ pushInteger(S, Arg.isZero(), Call->getType());
return true;
}
@@ -456,7 +444,7 @@ static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC,
int32_t Result =
static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue());
- pushInt(S, Result);
+ pushInteger(S, Result, Call->getType());
return true;
}
@@ -464,7 +452,8 @@ static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC,
/// Five int values followed by one floating value.
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
- const Function *Func) {
+ const Function *Func,
+ const CallExpr *Call) {
const Floating &Val = S.Stk.peek<Floating>();
unsigned Index;
@@ -490,7 +479,7 @@ static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
align(primSize(PT_Float)) + ((1 + (4 - Index)) * align(IntSize));
APSInt I = peekToAPSInt(S.Stk, getIntPrimType(S), Offset);
- pushInt(S, I.getZExtValue());
+ pushInteger(S, I, Call->getType());
return true;
}
@@ -514,7 +503,7 @@ static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
const CallExpr *Call) {
PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
APSInt Val = peekToAPSInt(S.Stk, ArgT);
- pushInt(S, Val.popcount());
+ pushInteger(S, Val.popcount(), Call->getType());
return true;
}
@@ -523,7 +512,7 @@ static bool interp__builtin_parity(InterpState &S, CodePtr OpPC,
const Function *Func, const CallExpr *Call) {
PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
APSInt Val = peekToAPSInt(S.Stk, ArgT);
- pushInt(S, Val.popcount() % 2);
+ pushInteger(S, Val.popcount() % 2, Call->getType());
return true;
}
@@ -532,7 +521,7 @@ static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC,
const Function *Func, const CallExpr *Call) {
PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
APSInt Val = peekToAPSInt(S.Stk, ArgT);
- pushInt(S, Val.getBitWidth() - Val.getSignificantBits());
+ pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
return true;
}
@@ -542,7 +531,7 @@ static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC,
const CallExpr *Call) {
PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
APSInt Val = peekToAPSInt(S.Stk, ArgT);
- pushAPSInt(S, APSInt(Val.reverseBits(), /*IsUnsigned=*/true));
+ pushInteger(S, Val.reverseBits(), Call->getType());
return true;
}
@@ -557,7 +546,7 @@ static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC,
GCCTypeClass ResultClass =
EvaluateBuiltinClassifyType(Arg->getType(), S.getLangOpts());
int32_t ReturnVal = static_cast<int32_t>(ResultClass);
- pushInt(S, ReturnVal);
+ pushInteger(S, ReturnVal, Call->getType());
return true;
}
@@ -577,7 +566,7 @@ static bool interp__builtin_expect(InterpState &S, CodePtr OpPC,
Offset += align(primSize(PT_Float));
APSInt Val = peekToAPSInt(S.Stk, ArgT, Offset);
- pushLong(S, Val.getSExtValue());
+ pushInteger(S, Val, Call->getType());
return true;
}
@@ -586,11 +575,12 @@ static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const Function *Func, const CallExpr *Call,
bool Right) {
- PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
- assert(ArgT == *S.getContext().classify(Call->getArg(1)->getType()));
+ PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType());
+ PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType());
- APSInt Amount = peekToAPSInt(S.Stk, ArgT);
- APSInt Value = peekToAPSInt(S.Stk, ArgT, align(primSize(ArgT)) * 2);
+ APSInt Amount = peekToAPSInt(S.Stk, AmountT);
+ APSInt Value = peekToAPSInt(
+ S.Stk, ValueT, align(primSize(AmountT)) + align(primSize(ValueT)));
APSInt Result;
if (Right)
@@ -600,7 +590,7 @@ static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC,
Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())),
/*IsUnsigned=*/true);
- pushAPSInt(S, Result);
+ pushInteger(S, Result, Call->getType());
return true;
}
@@ -611,7 +601,7 @@ static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC,
APSInt Value = peekToAPSInt(S.Stk, ArgT);
uint64_t N = Value.countr_zero();
- pushInt(S, N == Value.getBitWidth() ? 0 : N + 1);
+ pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
return true;
}
@@ -619,8 +609,8 @@ static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const Function *Func,
const CallExpr *Call) {
- PrimType PtrT =
- S.getContext().classify(Call->getArg(0)->getType()).value_or(PT_Ptr);
+ assert(Call->getArg(0)->isLValue());
+ PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
if (PtrT == PT_FnPtr) {
const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>();
@@ -634,28 +624,516 @@ static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_move(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame, const Function *Func,
+ const CallExpr *Call) {
+
+ PrimType ArgT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
+
+ TYPE_SWITCH(ArgT, const T &Arg = S.Stk.peek<T>(); S.Stk.push<T>(Arg););
+
+ return Func->getDecl()->isConstexpr();
+}
+
+static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ APSInt Arg = peekToAPSInt(S.Stk, ArgT);
+
+ int Result =
+ S.getCtx().getTargetInfo().getEHDataRegisterNumber(Arg.getZExtValue());
+ pushInteger(S, Result, Call->getType());
+ return true;
+}
+
+/// Just takes the first Argument to the call and puts it on the stack.
+static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
+ const Function *Func, const CallExpr *Call) {
+ const Pointer &Arg = S.Stk.peek<Pointer>();
+ S.Stk.push<Pointer>(Arg);
+ return true;
+}
+
+// Two integral values followed by a pointer (lhs, rhs, resultOut)
+static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ Pointer &ResultPtr = S.Stk.peek<Pointer>();
+ if (ResultPtr.isDummy())
+ return false;
+
+ unsigned BuiltinOp = Func->getBuiltinID();
+ PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
+ PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
+ APSInt RHS = peekToAPSInt(S.Stk, RHST,
+ align(primSize(PT_Ptr)) + align(primSize(RHST)));
+ APSInt LHS = peekToAPSInt(S.Stk, LHST,
+ align(primSize(PT_Ptr)) + align(primSize(RHST)) +
+ align(primSize(LHST)));
+ QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
+ PrimType ResultT = *S.getContext().classify(ResultType);
+ bool Overflow;
+
+ APSInt Result;
+ if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
+ BuiltinOp == Builtin::BI__builtin_sub_overflow ||
+ BuiltinOp == Builtin::BI__builtin_mul_overflow) {
+ bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
+ ResultType->isSignedIntegerOrEnumerationType();
+ bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
+ ResultType->isSignedIntegerOrEnumerationType();
+ uint64_t LHSSize = LHS.getBitWidth();
+ uint64_t RHSSize = RHS.getBitWidth();
+ uint64_t ResultSize = S.getCtx().getTypeSize(ResultType);
+ uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
+
+ // Add an additional bit if the signedness isn't uniformly agreed to. We
+ // could do this ONLY if there is a signed and an unsigned that both have
+ // MaxBits, but the code to check that is pretty nasty. The issue will be
+ // caught in the shrink-to-result later anyway.
+ if (IsSigned && !AllSigned)
+ ++MaxBits;
+
+ LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
+ RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
+ Result = APSInt(MaxBits, !IsSigned);
+ }
+
+ // Find largest int.
+ switch (BuiltinOp) {
+ default:
+ llvm_unreachable("Invalid value for BuiltinOp");
+ case Builtin::BI__builtin_add_overflow:
+ case Builtin::BI__builtin_sadd_overflow:
+ case Builtin::BI__builtin_saddl_overflow:
+ case Builtin::BI__builtin_saddll_overflow:
+ case Builtin::BI__builtin_uadd_overflow:
+ case Builtin::BI__builtin_uaddl_overflow:
+ case Builtin::BI__builtin_uaddll_overflow:
+ Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
+ : LHS.uadd_ov(RHS, Overflow);
+ break;
+ case Builtin::BI__builtin_sub_overflow:
+ case Builtin::BI__builtin_ssub_overflow:
+ case Builtin::BI__builtin_ssubl_overflow:
+ case Builtin::BI__builtin_ssubll_overflow:
+ case Builtin::BI__builtin_usub_overflow:
+ case Builtin::BI__builtin_usubl_overflow:
+ case Builtin::BI__builtin_usubll_overflow:
+ Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
+ : LHS.usub_ov(RHS, Overflow);
+ break;
+ case Builtin::BI__builtin_mul_overflow:
+ case Builtin::BI__builtin_smul_overflow:
+ case Builtin::BI__builtin_smull_overflow:
+ case Builtin::BI__builtin_smulll_overflow:
+ case Builtin::BI__builtin_umul_overflow:
+ case Builtin::BI__builtin_umull_overflow:
+ case Builtin::BI__builtin_umulll_overflow:
+ Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
+ : LHS.umul_ov(RHS, Overflow);
+ break;
+ }
+
+ // In the case where multiple sizes are allowed, truncate and see if
+ // the values are the same.
+ if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
+ BuiltinOp == Builtin::BI__builtin_sub_overflow ||
+ BuiltinOp == Builtin::BI__builtin_mul_overflow) {
+ // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
+ // since it will give us the behavior of a TruncOrSelf in the case where
+ // its parameter <= its size. We previously set Result to be at least the
+ // type-size of the result, so getTypeSize(ResultType) <= Resu
+ APSInt Temp = Result.extOrTrunc(S.getCtx().getTypeSize(ResultType));
+ Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
+
+ if (!APSInt::isSameValue(Temp, Result))
+ Overflow = true;
+ Result = Temp;
+ }
+
+ // Write Result to ResultPtr and put Overflow on the stacl.
+ assignInteger(ResultPtr, ResultT, Result);
+ ResultPtr.initialize();
+ assert(Func->getDecl()->getReturnType()->isBooleanType());
+ S.Stk.push<Boolean>(Overflow);
+ return true;
+}
+
+/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
+static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ unsigned BuiltinOp = Func->getBuiltinID();
+ PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
+ PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
+ PrimType CarryT = *S.getContext().classify(Call->getArg(2)->getType());
+ APSInt RHS = peekToAPSInt(S.Stk, RHST,
+ align(primSize(PT_Ptr)) + align(primSize(CarryT)) +
+ align(primSize(RHST)));
+ APSInt LHS =
+ peekToAPSInt(S.Stk, LHST,
+ align(primSize(PT_Ptr)) + align(primSize(RHST)) +
+ align(primSize(CarryT)) + align(primSize(LHST)));
+ APSInt CarryIn = peekToAPSInt(
+ S.Stk, LHST, align(primSize(PT_Ptr)) + align(primSize(CarryT)));
+ APSInt CarryOut;
+
+ APSInt Result;
+ // Copy the number of bits and sign.
+ Result = LHS;
+ CarryOut = LHS;
+
+ bool FirstOverflowed = false;
+ bool SecondOverflowed = false;
+ switch (BuiltinOp) {
+ default:
+ llvm_unreachable("Invalid value for BuiltinOp");
+ case Builtin::BI__builtin_addcb:
+ case Builtin::BI__builtin_addcs:
+ case Builtin::BI__builtin_addc:
+ case Builtin::BI__builtin_addcl:
+ case Builtin::BI__builtin_addcll:
+ Result =
+ LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
+ break;
+ case Builtin::BI__builtin_subcb:
+ case Builtin::BI__builtin_subcs:
+ case Builtin::BI__builtin_subc:
+ case Builtin::BI__builtin_subcl:
+ case Builtin::BI__builtin_subcll:
+ Result =
+ LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
+ break;
+ }
+ // It is possible for both overflows to happen but CGBuiltin uses an OR so
+ // this is consistent.
+ CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
+
+ Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
+ QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
+ PrimType CarryOutT = *S.getContext().classify(CarryOutType);
+ assignInteger(CarryOutPtr, CarryOutT, CarryOut);
+ CarryOutPtr.initialize();
+
+ assert(Call->getType() == Call->getArg(0)->getType());
+ pushInteger(S, Result, Call->getType());
+ return true;
+}
+
+static bool interp__builtin_clz(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame, const Function *Func,
+ const CallExpr *Call) {
+ unsigned CallSize = callArgSize(S, Call);
+ unsigned BuiltinOp = Func->getBuiltinID();
+ PrimType ValT = *S.getContext().classify(Call->getArg(0));
+ const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
+
+ // When the argument is 0, the result of GCC builtins is undefined, whereas
+ // for Microsoft intrinsics, the result is the bit-width of the argument.
+ bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
+ BuiltinOp != Builtin::BI__lzcnt &&
+ BuiltinOp != Builtin::BI__lzcnt64;
+
+ if (Val == 0) {
+ if (Func->getBuiltinID() == Builtin::BI__builtin_clzg &&
+ Call->getNumArgs() == 2) {
+ // We have a fallback parameter.
+ PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
+ const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
+ pushInteger(S, Fallback, Call->getType());
+ return true;
+ }
+
+ if (ZeroIsUndefined)
+ return false;
+ }
+
+ pushInteger(S, Val.countl_zero(), Call->getType());
+ return true;
+}
+
+static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame, const Function *Func,
+ const CallExpr *Call) {
+ unsigned CallSize = callArgSize(S, Call);
+ PrimType ValT = *S.getContext().classify(Call->getArg(0));
+ const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
+
+ if (Val == 0) {
+ if (Func->getBuiltinID() == Builtin::BI__builtin_ctzg &&
+ Call->getNumArgs() == 2) {
+ // We have a fallback parameter.
+ PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
+ const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
+ pushInteger(S, Fallback, Call->getType());
+ return true;
+ }
+ return false;
+ }
+
+ pushInteger(S, Val.countr_zero(), Call->getType());
+ return true;
+}
+
+static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func, const CallExpr *Call) {
+ PrimType ReturnT = *S.getContext().classify(Call->getType());
+ PrimType ValT = *S.getContext().classify(Call->getArg(0));
+ const APSInt &Val = peekToAPSInt(S.Stk, ValT);
+ assert(Val.getActiveBits() <= 64);
+
+ INT_TYPE_SWITCH(ReturnT,
+ { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
+ return true;
+}
+
+/// bool __atomic_always_lock_free(size_t, void const volatile*)
+/// bool __atomic_is_lock_free(size_t, void const volatile*)
+/// bool __c11_atomic_is_lock_free(size_t)
+static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ unsigned BuiltinOp = Func->getBuiltinID();
+
+ PrimType ValT = *S.getContext().classify(Call->getArg(0));
+ unsigned SizeValOffset = 0;
+ if (BuiltinOp != Builtin::BI__c11_atomic_is_lock_free)
+ SizeValOffset = align(primSize(ValT)) + align(primSize(PT_Ptr));
+ const APSInt &SizeVal = peekToAPSInt(S.Stk, ValT, SizeValOffset);
+
+ auto returnBool = [&S](bool Value) -> bool {
+ S.Stk.push<Boolean>(Value);
+ return true;
+ };
+
+ // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
+ // of two less than or equal to the maximum inline atomic width, we know it
+ // is lock-free. If the size isn't a power of two, or greater than the
+ // maximum alignment where we promote atomics, we know it is not lock-free
+ // (at least not in the sense of atomic_is_lock_free). Otherwise,
+ // the answer can only be determined at runtime; for example, 16-byte
+ // atomics have lock-free implementations on some, but not all,
+ // x86-64 processors.
+
+ // Check power-of-two.
+ CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
+ if (Size.isPowerOfTwo()) {
+ // Check against inlining width.
+ unsigned InlineWidthBits =
+ S.getCtx().getTargetInfo().getMaxAtomicInlineWidth();
+ if (Size <= S.getCtx().toCharUnitsFromBits(InlineWidthBits)) {
+
+ // OK, we will inline appropriately-aligned operations of this size,
+ // and _Atomic(T) is appropriately-aligned.
+ if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
+ Size == CharUnits::One())
+ return returnBool(true);
+
+ // Same for null pointers.
+ assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (Ptr.isZero())
+ return returnBool(true);
+
+ QualType PointeeType = Call->getArg(1)
+ ->IgnoreImpCasts()
+ ->getType()
+ ->castAs<PointerType>()
+ ->getPointeeType();
+ // OK, we will inline operations on this object.
+ if (!PointeeType->isIncompleteType() &&
+ S.getCtx().getTypeAlignInChars(PointeeType) >= Size)
+ return returnBool(true);
+ }
+ }
+
+ if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
+ return returnBool(false);
+
+ return false;
+}
+
+/// __builtin_complex(Float A, float B);
+static bool interp__builtin_complex(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ const Floating &Arg2 = S.Stk.peek<Floating>();
+ const Floating &Arg1 = S.Stk.peek<Floating>(align(primSize(PT_Float)) * 2);
+ Pointer &Result = S.Stk.peek<Pointer>(align(primSize(PT_Float)) * 2 +
+ align(primSize(PT_Ptr)));
+
+ Result.atIndex(0).deref<Floating>() = Arg1;
+ Result.atIndex(0).initialize();
+ Result.atIndex(1).deref<Floating>() = Arg2;
+ Result.atIndex(1).initialize();
+ Result.initialize();
+
+ return true;
+}
+
+/// __builtin_is_aligned()
+/// __builtin_align_up()
+/// __builtin_align_down()
+/// The first parameter is either an integer or a pointer.
+/// The second parameter is the requested alignment as an integer.
+static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ unsigned BuiltinOp = Func->getBuiltinID();
+ unsigned CallSize = callArgSize(S, Call);
+
+ PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
+ const APSInt &Alignment = peekToAPSInt(S.Stk, AlignmentT);
+
+ if (Alignment < 0 || !Alignment.isPowerOf2()) {
+ S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
+ return false;
+ }
+ unsigned SrcWidth = S.getCtx().getIntWidth(Call->getArg(0)->getType());
+ APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
+ if (APSInt::compareValues(Alignment, MaxValue) > 0) {
+ S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
+ << MaxValue << Call->getArg(0)->getType() << Alignment;
+ return false;
+ }
+
+ // The first parameter is either an integer or a pointer (but not a function
+ // pointer).
+ PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
+
+ if (isIntegralType(FirstArgT)) {
+ const APSInt &Src = peekToAPSInt(S.Stk, FirstArgT, CallSize);
+ APSInt Align = Alignment.extOrTrunc(Src.getBitWidth());
+ if (BuiltinOp == Builtin::BI__builtin_align_up) {
+ APSInt AlignedVal =
+ APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned());
+ pushInteger(S, AlignedVal, Call->getType());
+ } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
+ APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned());
+ pushInteger(S, AlignedVal, Call->getType());
+ } else {
+ assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
+ S.Stk.push<Boolean>((Src & (Align - 1)) == 0);
+ }
+ return true;
+ }
+
+ assert(FirstArgT == PT_Ptr);
+ const Pointer &Ptr = S.Stk.peek<Pointer>(CallSize);
+
+ unsigned PtrOffset = Ptr.getByteOffset();
+ PtrOffset = Ptr.getIndex();
+ CharUnits BaseAlignment =
+ S.getCtx().getDeclAlign(Ptr.getDeclDesc()->asValueDecl());
+ CharUnits PtrAlign =
+ BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
+
+ if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
+ if (PtrAlign.getQuantity() >= Alignment) {
+ S.Stk.push<Boolean>(true);
+ return true;
+ }
+ // If the alignment is not known to be sufficient, some cases could still
+ // be aligned at run time. However, if the requested alignment is less or
+ // equal to the base alignment and the offset is not aligned, we know that
+ // the run-time value can never be aligned.
+ if (BaseAlignment.getQuantity() >= Alignment &&
+ PtrAlign.getQuantity() < Alignment) {
+ S.Stk.push<Boolean>(false);
+ return true;
+ }
+
+ S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
+ << Alignment;
+ return false;
+ }
+
+ assert(BuiltinOp == Builtin::BI__builtin_align_down ||
+ BuiltinOp == Builtin::BI__builtin_align_up);
+
+ // For align_up/align_down, we can return the same value if the alignment
+ // is known to be greater or equal to the requested value.
+ if (PtrAlign.getQuantity() >= Alignment) {
+ S.Stk.push<Pointer>(Ptr);
+ return true;
+ }
+
+ // The alignment could be greater than the minimum at run-time, so we cannot
+ // infer much about the resulting pointer value. One case is possible:
+ // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
+ // can infer the correct index if the requested alignment is smaller than
+ // the base alignment so we can perform the computation on the offset.
+ if (BaseAlignment.getQuantity() >= Alignment) {
+ assert(Alignment.getBitWidth() <= 64 &&
+ "Cannot handle > 64-bit address-space");
+ uint64_t Alignment64 = Alignment.getZExtValue();
+ CharUnits NewOffset =
+ CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
+ ? llvm::alignDown(PtrOffset, Alignment64)
+ : llvm::alignTo(PtrOffset, Alignment64));
+
+ S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
+ return true;
+ }
+
+ // Otherwise, we cannot constant-evaluate the result.
+ S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
+ return false;
+}
+
+static bool interp__builtin_os_log_format_buffer_size(InterpState &S,
+ CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ analyze_os_log::OSLogBufferLayout Layout;
+ analyze_os_log::computeOSLogBufferLayout(S.getCtx(), Call, Layout);
+ pushInteger(S, Layout.size().getQuantity(), Call->getType());
+ return true;
+}
+
+static bool interp__builtin_ptrauth_string_discriminator(
+ InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
+ const Function *Func, const CallExpr *Call) {
+ const auto &Ptr = S.Stk.peek<Pointer>();
+ assert(Ptr.getFieldDesc()->isPrimitiveArray());
+
+ StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
+ uint64_t Result = getPointerAuthStableSipHash(R);
+ pushInteger(S, Result, Call->getType());
+ return true;
+}
+
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
const CallExpr *Call) {
- InterpFrame *Frame = S.Current;
+ const InterpFrame *Frame = S.Current;
APValue Dummy;
- std::optional<PrimType> ReturnT = S.getContext().classify(Call->getType());
-
- // If classify failed, we assume void.
- assert(ReturnT || Call->getType()->isVoidType());
+ std::optional<PrimType> ReturnT = S.getContext().classify(Call);
switch (F->getBuiltinID()) {
case Builtin::BI__builtin_is_constant_evaluated:
- S.Stk.push<Boolean>(Boolean::from(S.inConstantContext()));
+ if (!interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call))
+ return false;
break;
case Builtin::BI__builtin_assume:
+ case Builtin::BI__assume:
break;
case Builtin::BI__builtin_strcmp:
- if (!interp__builtin_strcmp(S, OpPC, Frame))
+ if (!interp__builtin_strcmp(S, OpPC, Frame, Call))
return false;
break;
case Builtin::BI__builtin_strlen:
- if (!interp__builtin_strlen(S, OpPC, Frame))
+ if (!interp__builtin_strlen(S, OpPC, Frame, Call))
return false;
break;
case Builtin::BI__builtin_nan:
@@ -715,38 +1193,38 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
break;
case Builtin::BI__builtin_isnan:
- if (!interp__builtin_isnan(S, OpPC, Frame, F))
+ if (!interp__builtin_isnan(S, OpPC, Frame, F, Call))
return false;
break;
case Builtin::BI__builtin_issignaling:
- if (!interp__builtin_issignaling(S, OpPC, Frame, F))
+ if (!interp__builtin_issignaling(S, OpPC, Frame, F, Call))
return false;
break;
case Builtin::BI__builtin_isinf:
- if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/false))
+ if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/false, Call))
return false;
break;
case Builtin::BI__builtin_isinf_sign:
- if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/true))
+ if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/true, Call))
return false;
break;
case Builtin::BI__builtin_isfinite:
- if (!interp__builtin_isfinite(S, OpPC, Frame, F))
+ if (!interp__builtin_isfinite(S, OpPC, Frame, F, Call))
return false;
break;
case Builtin::BI__builtin_isnormal:
- if (!interp__builtin_isnormal(S, OpPC, Frame, F))
+ if (!interp__builtin_isnormal(S, OpPC, Frame, F, Call))
return false;
break;
case Builtin::BI__builtin_issubnormal:
- if (!interp__builtin_issubnormal(S, OpPC, Frame, F))
+ if (!interp__builtin_issubnormal(S, OpPC, Frame, F, Call))
return false;
break;
case Builtin::BI__builtin_iszero:
- if (!interp__builtin_iszero(S, OpPC, Frame, F))
+ if (!interp__builtin_iszero(S, OpPC, Frame, F, Call))
return false;
break;
case Builtin::BI__builtin_isfpclass:
@@ -754,7 +1232,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
return false;
break;
case Builtin::BI__builtin_fpclassify:
- if (!interp__builtin_fpclassify(S, OpPC, Frame, F))
+ if (!interp__builtin_fpclassify(S, OpPC, Frame, F, Call))
return false;
break;
@@ -769,6 +1247,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll:
+ case Builtin::BI__builtin_popcountg:
case Builtin::BI__popcnt16: // Microsoft variants of popcount
case Builtin::BI__popcnt:
case Builtin::BI__popcnt64:
@@ -848,7 +1327,126 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
return false;
break;
+ case Builtin::BIas_const:
+ case Builtin::BIforward:
+ case Builtin::BIforward_like:
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ if (!interp__builtin_move(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_eh_return_data_regno:
+ if (!interp__builtin_eh_return_data_regno(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_launder:
+ if (!noopPointer(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_add_overflow:
+ case Builtin::BI__builtin_sub_overflow:
+ case Builtin::BI__builtin_mul_overflow:
+ case Builtin::BI__builtin_sadd_overflow:
+ case Builtin::BI__builtin_uadd_overflow:
+ case Builtin::BI__builtin_uaddl_overflow:
+ case Builtin::BI__builtin_uaddll_overflow:
+ case Builtin::BI__builtin_usub_overflow:
+ case Builtin::BI__builtin_usubl_overflow:
+ case Builtin::BI__builtin_usubll_overflow:
+ case Builtin::BI__builtin_umul_overflow:
+ case Builtin::BI__builtin_umull_overflow:
+ case Builtin::BI__builtin_umulll_overflow:
+ case Builtin::BI__builtin_saddl_overflow:
+ case Builtin::BI__builtin_saddll_overflow:
+ case Builtin::BI__builtin_ssub_overflow:
+ case Builtin::BI__builtin_ssubl_overflow:
+ case Builtin::BI__builtin_ssubll_overflow:
+ case Builtin::BI__builtin_smul_overflow:
+ case Builtin::BI__builtin_smull_overflow:
+ case Builtin::BI__builtin_smulll_overflow:
+ if (!interp__builtin_overflowop(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_addcb:
+ case Builtin::BI__builtin_addcs:
+ case Builtin::BI__builtin_addc:
+ case Builtin::BI__builtin_addcl:
+ case Builtin::BI__builtin_addcll:
+ case Builtin::BI__builtin_subcb:
+ case Builtin::BI__builtin_subcs:
+ case Builtin::BI__builtin_subc:
+ case Builtin::BI__builtin_subcl:
+ case Builtin::BI__builtin_subcll:
+ if (!interp__builtin_carryop(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_clz:
+ case Builtin::BI__builtin_clzl:
+ case Builtin::BI__builtin_clzll:
+ case Builtin::BI__builtin_clzs:
+ case Builtin::BI__builtin_clzg:
+ case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
+ case Builtin::BI__lzcnt:
+ case Builtin::BI__lzcnt64:
+ if (!interp__builtin_clz(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_ctz:
+ case Builtin::BI__builtin_ctzl:
+ case Builtin::BI__builtin_ctzll:
+ case Builtin::BI__builtin_ctzs:
+ case Builtin::BI__builtin_ctzg:
+ if (!interp__builtin_ctz(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_bswap16:
+ case Builtin::BI__builtin_bswap32:
+ case Builtin::BI__builtin_bswap64:
+ if (!interp__builtin_bswap(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__atomic_always_lock_free:
+ case Builtin::BI__atomic_is_lock_free:
+ case Builtin::BI__c11_atomic_is_lock_free:
+ if (!interp__builtin_atomic_lock_free(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_complex:
+ if (!interp__builtin_complex(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_is_aligned:
+ case Builtin::BI__builtin_align_up:
+ case Builtin::BI__builtin_align_down:
+ if (!interp__builtin_is_aligned_up_down(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_os_log_format_buffer_size:
+ if (!interp__builtin_os_log_format_buffer_size(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_ptrauth_string_discriminator:
+ if (!interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
default:
+ S.FFDiag(S.Current->getLocation(OpPC),
+ diag::note_invalid_subexpr_in_const_expr)
+ << S.Current->getRange(OpPC);
+
return false;
}
@@ -872,7 +1470,7 @@ bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
const RecordType *RT = CurrentType->getAs<RecordType>();
if (!RT)
return false;
- RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getDecl();
if (RD->isInvalidDecl())
return false;
const ASTRecordLayout &RL = S.getCtx().getASTRecordLayout(RD);
@@ -946,5 +1544,50 @@ bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
return true;
}
+bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
+ assert(Src.isLive() && Dest.isLive());
+
+ [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
+ const Descriptor *DestDesc = Dest.getFieldDesc();
+
+ assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
+
+ if (DestDesc->isPrimitiveArray()) {
+ assert(SrcDesc->isPrimitiveArray());
+ assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
+ PrimType ET = DestDesc->getPrimType();
+ for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
+ Pointer DestElem = Dest.atIndex(I);
+ TYPE_SWITCH(ET, {
+ DestElem.deref<T>() = Src.atIndex(I).deref<T>();
+ DestElem.initialize();
+ });
+ }
+ return true;
+ }
+
+ if (DestDesc->isRecord()) {
+ assert(SrcDesc->isRecord());
+ assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
+ const Record *R = DestDesc->ElemRecord;
+ for (const Record::Field &F : R->fields()) {
+ Pointer DestField = Dest.atField(F.Offset);
+ if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) {
+ TYPE_SWITCH(*FT, {
+ DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
+ DestField.initialize();
+ });
+ } else {
+ return Invalid(S, OpPC);
+ }
+ }
+ return true;
+ }
+
+ // FIXME: Composite types.
+
+ return Invalid(S, OpPC);
+}
+
} // namespace interp
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp
index d460d7ea3710..1c37450ae1c6 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp
@@ -12,6 +12,7 @@
#include "Function.h"
#include "InterpStack.h"
#include "InterpState.h"
+#include "MemberPointer.h"
#include "Pointer.h"
#include "PrimType.h"
#include "Program.h"
@@ -22,10 +23,10 @@ using namespace clang;
using namespace clang::interp;
InterpFrame::InterpFrame(InterpState &S, const Function *Func,
- InterpFrame *Caller, CodePtr RetPC)
+ InterpFrame *Caller, CodePtr RetPC, unsigned ArgSize)
: Caller(Caller), S(S), Depth(Caller ? Caller->Depth + 1 : 0), Func(Func),
- RetPC(RetPC), ArgSize(Func ? Func->getArgSize() : 0),
- Args(static_cast<char *>(S.Stk.top())), FrameOffset(S.Stk.size()) {
+ RetPC(RetPC), ArgSize(ArgSize), Args(static_cast<char *>(S.Stk.top())),
+ FrameOffset(S.Stk.size()) {
if (!Func)
return;
@@ -36,22 +37,17 @@ InterpFrame::InterpFrame(InterpState &S, const Function *Func,
Locals = std::make_unique<char[]>(FrameSize);
for (auto &Scope : Func->scopes()) {
for (auto &Local : Scope.locals()) {
- Block *B = new (localBlock(Local.Offset)) Block(Local.Desc);
+ Block *B =
+ new (localBlock(Local.Offset)) Block(S.Ctx.getEvalID(), Local.Desc);
B->invokeCtor();
- InlineDescriptor *ID = localInlineDesc(Local.Offset);
- ID->Desc = Local.Desc;
- ID->IsActive = true;
- ID->Offset = sizeof(InlineDescriptor);
- ID->IsBase = false;
- ID->IsFieldMutable = false;
- ID->IsConst = false;
- ID->IsInitialized = false;
+ new (localInlineDesc(Local.Offset)) InlineDescriptor(Local.Desc);
}
}
}
-InterpFrame::InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC)
- : InterpFrame(S, Func, S.Current, RetPC) {
+InterpFrame::InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC,
+ unsigned VarArgSize)
+ : InterpFrame(S, Func, S.Current, RetPC, Func->getArgSize() + VarArgSize) {
// As per our calling convention, the this pointer is
// part of the ArgSize.
// If the function has RVO, the RVO pointer is first.
@@ -158,13 +154,25 @@ void print(llvm::raw_ostream &OS, const Pointer &P, ASTContext &Ctx,
}
void InterpFrame::describe(llvm::raw_ostream &OS) const {
+ // We create frames for builtin functions as well, but we can't reliably
+ // diagnose them. The 'in call to' diagnostics for them add no value to the
+ // user _and_ it doesn't generally work since the argument types don't always
+ // match the function prototype. Just ignore them.
+ // Similarly, for lambda static invokers, we would just print __invoke().
+ if (const auto *F = getFunction();
+ F && (F->isBuiltin() || F->isLambdaStaticInvoker()))
+ return;
+
const FunctionDecl *F = getCallee();
if (const auto *M = dyn_cast<CXXMethodDecl>(F);
M && M->isInstance() && !isa<CXXConstructorDecl>(F)) {
print(OS, This, S.getCtx(), S.getCtx().getRecordType(M->getParent()));
OS << "->";
}
- OS << *F << "(";
+
+ F->getNameForDiagnostic(OS, S.getCtx().getPrintingPolicy(),
+ /*Qualified=*/false);
+ OS << '(';
unsigned Off = 0;
Off += Func->hasRVO() ? primSize(PT_Ptr) : 0;
@@ -190,32 +198,36 @@ Frame *InterpFrame::getCaller() const {
}
SourceRange InterpFrame::getCallRange() const {
- if (!Caller->Func)
- return S.getRange(nullptr, {});
+ if (!Caller->Func) {
+ if (SourceRange NullRange = S.getRange(nullptr, {}); NullRange.isValid())
+ return NullRange;
+ return S.EvalLocation;
+ }
return S.getRange(Caller->Func, RetPC - sizeof(uintptr_t));
}
const FunctionDecl *InterpFrame::getCallee() const {
+ if (!Func)
+ return nullptr;
return Func->getDecl();
}
Pointer InterpFrame::getLocalPointer(unsigned Offset) const {
assert(Offset < Func->getFrameSize() && "Invalid local offset.");
- return Pointer(localBlock(Offset), sizeof(InlineDescriptor));
+ return Pointer(localBlock(Offset));
}
Pointer InterpFrame::getParamPointer(unsigned Off) {
// Return the block if it was created previously.
- auto Pt = Params.find(Off);
- if (Pt != Params.end()) {
+ if (auto Pt = Params.find(Off); Pt != Params.end())
return Pointer(reinterpret_cast<Block *>(Pt->second.get()));
- }
// Allocate memory to store the parameter and the block metadata.
const auto &Desc = Func->getParamDescriptor(Off);
size_t BlockSize = sizeof(Block) + Desc.second->getAllocSize();
auto Memory = std::make_unique<char[]>(BlockSize);
- auto *B = new (Memory.get()) Block(Desc.second);
+ auto *B = new (Memory.get()) Block(S.Ctx.getEvalID(), Desc.second);
+ B->invokeCtor();
// Copy the initial value.
TYPE_SWITCH(Desc.first, new (B->data()) T(stackRef<T>(Off)));
@@ -235,10 +247,16 @@ SourceInfo InterpFrame::getSource(CodePtr PC) const {
}
const Expr *InterpFrame::getExpr(CodePtr PC) const {
+ if (Func && (!Func->hasBody() || Func->getDecl()->isImplicit()) && Caller)
+ return Caller->getExpr(RetPC);
+
return S.getExpr(Func, PC);
}
SourceLocation InterpFrame::getLocation(CodePtr PC) const {
+ if (Func && (!Func->hasBody() || Func->getDecl()->isImplicit()) && Caller)
+ return Caller->getLocation(RetPC);
+
return S.getLocation(Func, PC);
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
index cba4f9560bf5..4a312a71bcf1 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
@@ -15,8 +15,6 @@
#include "Frame.h"
#include "Program.h"
-#include <cstdint>
-#include <vector>
namespace clang {
namespace interp {
@@ -32,13 +30,14 @@ public:
/// Creates a new frame for a method call.
InterpFrame(InterpState &S, const Function *Func, InterpFrame *Caller,
- CodePtr RetPC);
+ CodePtr RetPC, unsigned ArgSize);
/// Creates a new frame with the values that make sense.
/// I.e., the caller is the current frame of S,
/// the This() pointer is the current Pointer on the top of S's stack,
/// and the RVO pointer is before that.
- InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC);
+ InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC,
+ unsigned VarArgSize = 0);
/// Destroys the frame, killing all live pointers to stack slots.
~InterpFrame();
@@ -84,11 +83,9 @@ public:
/// Returns the value of an argument.
template <typename T> const T &getParam(unsigned Offset) const {
auto Pt = Params.find(Offset);
- if (Pt == Params.end()) {
+ if (Pt == Params.end())
return stackRef<T>(Offset);
- } else {
- return Pointer(reinterpret_cast<Block *>(Pt->second.get())).deref<T>();
- }
+ return Pointer(reinterpret_cast<Block *>(Pt->second.get())).deref<T>();
}
/// Mutates a local copy of a parameter.
@@ -122,6 +119,9 @@ public:
unsigned getDepth() const { return Depth; }
+ void dump() const { dump(llvm::errs(), 0); }
+ void dump(llvm::raw_ostream &OS, unsigned Indent = 0) const;
+
private:
/// Returns an original argument from the stack.
template <typename T> const T &stackRef(unsigned Offset) const {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.cpp
new file mode 100644
index 000000000000..6af03691f1b2
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.cpp
@@ -0,0 +1,42 @@
+//===--- InterpShared.cpp ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "InterpShared.h"
+#include "clang/AST/Attr.h"
+#include "llvm/ADT/BitVector.h"
+
+namespace clang {
+namespace interp {
+
+llvm::BitVector collectNonNullArgs(const FunctionDecl *F,
+ const llvm::ArrayRef<const Expr *> &Args) {
+ llvm::BitVector NonNullArgs;
+ if (!F)
+ return NonNullArgs;
+
+ assert(F);
+ NonNullArgs.resize(Args.size());
+
+ for (const auto *Attr : F->specific_attrs<NonNullAttr>()) {
+ if (!Attr->args_size()) {
+ NonNullArgs.set();
+ break;
+ } else
+ for (auto Idx : Attr->args()) {
+ unsigned ASTIdx = Idx.getASTIndex();
+ if (ASTIdx >= Args.size())
+ continue;
+ NonNullArgs[ASTIdx] = true;
+ }
+ }
+
+ return NonNullArgs;
+}
+
+} // namespace interp
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.h
new file mode 100644
index 000000000000..8c5e0bee22c9
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.h
@@ -0,0 +1,26 @@
+//===--- InterpShared.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_AST_INTERP_SHARED_H
+#define LLVM_CLANG_LIB_AST_INTERP_SHARED_H
+
+#include "llvm/ADT/BitVector.h"
+
+namespace clang {
+class FunctionDecl;
+class Expr;
+
+namespace interp {
+
+llvm::BitVector collectNonNullArgs(const FunctionDecl *F,
+ const llvm::ArrayRef<const Expr *> &Args);
+
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp
index 91fe40feb767..c7024740d322 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp
@@ -10,6 +10,7 @@
#include "Boolean.h"
#include "Floating.h"
#include "Integral.h"
+#include "MemberPointer.h"
#include "Pointer.h"
#include <cassert>
#include <cstdlib>
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h
index 3fd0f63c781f..4966e2870de6 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h
@@ -15,6 +15,7 @@
#include "FunctionPointer.h"
#include "IntegralAP.h"
+#include "MemberPointer.h"
#include "PrimType.h"
#include <memory>
#include <vector>
@@ -47,7 +48,6 @@ public:
#endif
T *Ptr = &peekInternal<T>();
T Value = std::move(*Ptr);
- Ptr->~T();
shrink(aligned_size<T>());
return Value;
}
@@ -188,6 +188,8 @@ private:
return PT_IntAP;
else if constexpr (std::is_same_v<T, IntegralAP<false>>)
return PT_IntAP;
+ else if constexpr (std::is_same_v<T, MemberPointer>)
+ return PT_MemberPtr;
llvm_unreachable("unknown type push()'ed into InterpStack");
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp
index 2cb87ef07fe5..4ea05305540e 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp
@@ -33,6 +33,18 @@ InterpState::~InterpState() {
}
}
+void InterpState::cleanup() {
+ // As a last resort, make sure all pointers still pointing to a dead block
+ // don't point to it anymore.
+ for (DeadBlock *DB = DeadBlocks; DB; DB = DB->Next) {
+ for (Pointer *P = DB->B.Pointers; P; P = P->Next) {
+ P->PointeeStorage.BS.Pointee = nullptr;
+ }
+ }
+
+ Alloc.cleanup();
+}
+
Frame *InterpState::getCurrentFrame() {
if (Current && Current->Caller)
return Current;
@@ -57,17 +69,34 @@ void InterpState::deallocate(Block *B) {
char *Memory =
reinterpret_cast<char *>(std::malloc(sizeof(DeadBlock) + Size));
auto *D = new (Memory) DeadBlock(DeadBlocks, B);
+ std::memset(D->B.rawData(), 0, D->B.getSize());
// Move data and metadata from the old block to the new (dead)block.
- if (Desc->MoveFn) {
+ if (B->IsInitialized && Desc->MoveFn) {
Desc->MoveFn(B, B->data(), D->data(), Desc);
if (Desc->getMetadataSize() > 0)
std::memcpy(D->rawData(), B->rawData(), Desc->getMetadataSize());
}
+ D->B.IsInitialized = B->IsInitialized;
// We moved the contents over to the DeadBlock.
B->IsInitialized = false;
- } else {
+ } else if (B->IsInitialized) {
B->invokeDtor();
}
}
+
+bool InterpState::maybeDiagnoseDanglingAllocations() {
+ bool NoAllocationsLeft = (Alloc.getNumAllocations() == 0);
+
+ if (!checkingPotentialConstantExpression()) {
+ for (const auto &It : Alloc.allocation_sites()) {
+ assert(It.second.size() > 0);
+
+ const Expr *Source = It.first;
+ CCEDiag(Source->getExprLoc(), diag::note_constexpr_memory_leak)
+ << (It.second.size() - 1) << Source->getSourceRange();
+ }
+ }
+ return NoAllocationsLeft;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h
index 8f84bf6ed2ea..61ee54331c65 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h
@@ -14,6 +14,7 @@
#define LLVM_CLANG_AST_INTERP_INTERPSTATE_H
#include "Context.h"
+#include "DynamicAllocator.h"
#include "Function.h"
#include "InterpFrame.h"
#include "InterpStack.h"
@@ -39,6 +40,8 @@ public:
~InterpState();
+ void cleanup();
+
InterpState(const InterpState &) = delete;
InterpState &operator=(const InterpState &) = delete;
@@ -89,18 +92,34 @@ public:
/// Delegates source mapping to the mapper.
SourceInfo getSource(const Function *F, CodePtr PC) const override {
- return M ? M->getSource(F, PC) : F->getSource(PC);
+ if (M)
+ return M->getSource(F, PC);
+
+ assert(F && "Function cannot be null");
+ return F->getSource(PC);
}
Context &getContext() const { return Ctx; }
+ void setEvalLocation(SourceLocation SL) { this->EvalLocation = SL; }
+
+ DynamicAllocator &getAllocator() { return Alloc; }
+
+ /// Diagnose any dynamic allocations that haven't been freed yet.
+ /// Will return \c false if there were any allocations to diagnose,
+ /// \c true otherwise.
+ bool maybeDiagnoseDanglingAllocations();
+
private:
+ friend class EvaluationResult;
/// AST Walker state.
State &Parent;
/// Dead block chain.
DeadBlock *DeadBlocks = nullptr;
/// Reference to the offset-source mapping.
SourceMapper *M;
+ /// Allocator used for dynamic allocations performed via the program.
+ DynamicAllocator Alloc;
public:
/// Reference to the module containing all bytecode.
@@ -111,6 +130,14 @@ public:
Context &Ctx;
/// The current frame.
InterpFrame *Current = nullptr;
+ /// Source location of the evaluating expression
+ SourceLocation EvalLocation;
+ /// Declaration we're initializing/evaluting, if any.
+ const VarDecl *EvaluatingDecl = nullptr;
+
+ llvm::SmallVector<
+ std::pair<const Expr *, const LifetimeExtendedTemporaryDecl *>>
+ SeenGlobalTemporaries;
};
} // namespace interp
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.cpp b/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.cpp
new file mode 100644
index 000000000000..0c1b6edc5f7e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.cpp
@@ -0,0 +1,76 @@
+//===------------------------- MemberPointer.cpp ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "MemberPointer.h"
+#include "Context.h"
+#include "FunctionPointer.h"
+#include "Program.h"
+#include "Record.h"
+
+namespace clang {
+namespace interp {
+
+std::optional<Pointer> MemberPointer::toPointer(const Context &Ctx) const {
+ if (!Dcl || isa<FunctionDecl>(Dcl))
+ return Base;
+ const FieldDecl *FD = cast<FieldDecl>(Dcl);
+ assert(FD);
+
+ if (!Base.isBlockPointer())
+ return std::nullopt;
+
+ Pointer CastedBase =
+ (PtrOffset < 0 ? Base.atField(-PtrOffset) : Base.atFieldSub(PtrOffset));
+
+ const Record *BaseRecord = CastedBase.getRecord();
+ if (!BaseRecord)
+ return std::nullopt;
+
+ assert(BaseRecord);
+ if (FD->getParent() == BaseRecord->getDecl())
+ return CastedBase.atField(BaseRecord->getField(FD)->Offset);
+
+ const RecordDecl *FieldParent = FD->getParent();
+ const Record *FieldRecord = Ctx.getRecord(FieldParent);
+
+ unsigned Offset = 0;
+ Offset += FieldRecord->getField(FD)->Offset;
+ Offset += CastedBase.block()->getDescriptor()->getMetadataSize();
+
+ if (Offset > CastedBase.block()->getSize())
+ return std::nullopt;
+
+ if (const RecordDecl *BaseDecl = Base.getDeclPtr().getRecord()->getDecl();
+ BaseDecl != FieldParent)
+ Offset += Ctx.collectBaseOffset(FieldParent, BaseDecl);
+
+ if (Offset > CastedBase.block()->getSize())
+ return std::nullopt;
+
+ assert(Offset <= CastedBase.block()->getSize());
+ return Pointer(const_cast<Block *>(Base.block()), Offset, Offset);
+}
+
+FunctionPointer MemberPointer::toFunctionPointer(const Context &Ctx) const {
+ return FunctionPointer(Ctx.getProgram().getFunction(cast<FunctionDecl>(Dcl)));
+}
+
+APValue MemberPointer::toAPValue(const ASTContext &ASTCtx) const {
+ if (isZero())
+ return APValue(static_cast<ValueDecl *>(nullptr), /*IsDerivedMember=*/false,
+ /*Path=*/{});
+
+ if (hasBase())
+ return Base.toAPValue(ASTCtx);
+
+ return APValue(cast<ValueDecl>(getDecl()), /*IsDerivedMember=*/false,
+ /*Path=*/{});
+}
+
+} // namespace interp
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.h b/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.h
new file mode 100644
index 000000000000..2b3be124db42
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.h
@@ -0,0 +1,112 @@
+//===------------------------- MemberPointer.h ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_MEMBER_POINTER_H
+#define LLVM_CLANG_AST_INTERP_MEMBER_POINTER_H
+
+#include "Pointer.h"
+#include <optional>
+
+namespace clang {
+class ASTContext;
+namespace interp {
+
+class Context;
+class FunctionPointer;
+
+class MemberPointer final {
+private:
+ Pointer Base;
+ const Decl *Dcl = nullptr;
+ int32_t PtrOffset = 0;
+
+ MemberPointer(Pointer Base, const Decl *Dcl, int32_t PtrOffset)
+ : Base(Base), Dcl(Dcl), PtrOffset(PtrOffset) {}
+
+public:
+ MemberPointer() = default;
+ MemberPointer(Pointer Base, const Decl *Dcl) : Base(Base), Dcl(Dcl) {}
+ MemberPointer(uint32_t Address, const Descriptor *D) {
+ // We only reach this for Address == 0, when creating a null member pointer.
+ assert(Address == 0);
+ }
+
+ MemberPointer(const Decl *D) : Dcl(D) {
+ assert((isa<FieldDecl, IndirectFieldDecl, CXXMethodDecl>(D)));
+ }
+
+ uint64_t getIntegerRepresentation() const {
+ assert(
+ false &&
+ "getIntegerRepresentation() shouldn't be reachable for MemberPointers");
+ return 17;
+ }
+
+ std::optional<Pointer> toPointer(const Context &Ctx) const;
+
+ FunctionPointer toFunctionPointer(const Context &Ctx) const;
+
+ Pointer getBase() const {
+ if (PtrOffset < 0)
+ return Base.atField(-PtrOffset);
+ return Base.atFieldSub(PtrOffset);
+ }
+ bool isMemberFunctionPointer() const {
+ return isa_and_nonnull<CXXMethodDecl>(Dcl);
+ }
+ const CXXMethodDecl *getMemberFunction() const {
+ return dyn_cast_if_present<CXXMethodDecl>(Dcl);
+ }
+ const FieldDecl *getField() const {
+ return dyn_cast_if_present<FieldDecl>(Dcl);
+ }
+
+ bool hasDecl() const { return Dcl; }
+ const Decl *getDecl() const { return Dcl; }
+
+ MemberPointer atInstanceBase(unsigned Offset) const {
+ if (Base.isZero())
+ return MemberPointer(Base, Dcl, Offset);
+ return MemberPointer(this->Base, Dcl, Offset + PtrOffset);
+ }
+
+ MemberPointer takeInstance(Pointer Instance) const {
+ assert(this->Base.isZero());
+ return MemberPointer(Instance, this->Dcl, this->PtrOffset);
+ }
+
+ APValue toAPValue(const ASTContext &) const;
+
+ bool isZero() const { return Base.isZero() && !Dcl; }
+ bool hasBase() const { return !Base.isZero(); }
+
+ void print(llvm::raw_ostream &OS) const {
+ OS << "MemberPtr(" << Base << " " << (const void *)Dcl << " + " << PtrOffset
+ << ")";
+ }
+
+ std::string toDiagnosticString(const ASTContext &Ctx) const {
+ return "FIXME";
+ }
+
+ ComparisonCategoryResult compare(const MemberPointer &RHS) const {
+ if (this->Dcl == RHS.Dcl)
+ return ComparisonCategoryResult::Equal;
+ return ComparisonCategoryResult::Unordered;
+ }
+};
+
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, MemberPointer FP) {
+ FP.print(OS);
+ return OS;
+}
+
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td b/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td
index 24747b6b98c1..9f29fa927271 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td
@@ -30,12 +30,13 @@ def IntAPS : Type;
def Float : Type;
def Ptr : Type;
def FnPtr : Type;
+def MemberPtr : Type;
//===----------------------------------------------------------------------===//
// Types transferred to the interpreter.
//===----------------------------------------------------------------------===//
-class ArgType { string Name = ?; }
+class ArgType { string Name = ?; bit AsRef = false; }
def ArgSint8 : ArgType { let Name = "int8_t"; }
def ArgUint8 : ArgType { let Name = "uint8_t"; }
def ArgSint16 : ArgType { let Name = "int16_t"; }
@@ -44,7 +45,9 @@ def ArgSint32 : ArgType { let Name = "int32_t"; }
def ArgUint32 : ArgType { let Name = "uint32_t"; }
def ArgSint64 : ArgType { let Name = "int64_t"; }
def ArgUint64 : ArgType { let Name = "uint64_t"; }
-def ArgFloat : ArgType { let Name = "Floating"; }
+def ArgIntAP : ArgType { let Name = "IntegralAP<false>"; let AsRef = true; }
+def ArgIntAPS : ArgType { let Name = "IntegralAP<true>"; let AsRef = true; }
+def ArgFloat : ArgType { let Name = "Floating"; let AsRef = true; }
def ArgBool : ArgType { let Name = "bool"; }
def ArgFunction : ArgType { let Name = "const Function *"; }
@@ -55,9 +58,15 @@ def ArgRoundingMode : ArgType { let Name = "llvm::RoundingMode"; }
def ArgLETD: ArgType { let Name = "const LifetimeExtendedTemporaryDecl *"; }
def ArgCastKind : ArgType { let Name = "CastKind"; }
def ArgCallExpr : ArgType { let Name = "const CallExpr *"; }
+def ArgExpr : ArgType { let Name = "const Expr *"; }
def ArgOffsetOfExpr : ArgType { let Name = "const OffsetOfExpr *"; }
def ArgDeclRef : ArgType { let Name = "const DeclRefExpr *"; }
def ArgCCI : ArgType { let Name = "const ComparisonCategoryInfo *"; }
+def ArgDecl : ArgType { let Name = "const Decl*"; }
+def ArgVarDecl : ArgType { let Name = "const VarDecl*"; }
+def ArgDesc : ArgType { let Name = "const Descriptor *"; }
+def ArgPrimType : ArgType { let Name = "PrimType"; }
+def ArgEnumDecl : ArgType { let Name = "const EnumDecl *"; }
//===----------------------------------------------------------------------===//
// Classes of types instructions operate on.
@@ -90,7 +99,7 @@ def AluTypeClass : TypeClass {
}
def PtrTypeClass : TypeClass {
- let Types = [Ptr, FnPtr];
+ let Types = [Ptr, FnPtr, MemberPtr];
}
def BoolTypeClass : TypeClass {
@@ -134,7 +143,6 @@ class AluOpcode : Opcode {
}
class FloatOpcode : Opcode {
- let Types = [];
let Args = [ArgRoundingMode];
}
@@ -189,23 +197,23 @@ def NoRet : Opcode {}
def Call : Opcode {
- let Args = [ArgFunction];
- let Types = [];
+ let Args = [ArgFunction, ArgUint32];
}
def CallVirt : Opcode {
- let Args = [ArgFunction];
- let Types = [];
+ let Args = [ArgFunction, ArgUint32];
}
def CallBI : Opcode {
let Args = [ArgFunction, ArgCallExpr];
- let Types = [];
}
def CallPtr : Opcode {
- let Args = [];
- let Types = [];
+ let Args = [ArgUint32, ArgCallExpr];
+}
+
+def CallVar : Opcode {
+ let Args = [ArgFunction, ArgUint32];
}
def OffsetOf : Opcode {
@@ -244,6 +252,8 @@ def ConstUint32 : ConstOpcode<Uint32, ArgUint32>;
def ConstSint64 : ConstOpcode<Sint64, ArgSint64>;
def ConstUint64 : ConstOpcode<Uint64, ArgUint64>;
def ConstFloat : ConstOpcode<Float, ArgFloat>;
+def constIntAP : ConstOpcode<IntAP, ArgIntAP>;
+def constIntAPS : ConstOpcode<IntAPS, ArgIntAPS>;
def ConstBool : ConstOpcode<Bool, ArgBool>;
// [] -> [Integer]
@@ -263,70 +273,53 @@ def ZeroIntAPS : Opcode {
// [] -> [Pointer]
def Null : Opcode {
let Types = [PtrTypeClass];
+ let Args = [ArgDesc];
let HasGroup = 1;
}
//===----------------------------------------------------------------------===//
// Pointer generation
//===----------------------------------------------------------------------===//
+class OffsetOpcode : Opcode {
+ let Args = [ArgUint32];
+}
// [] -> [Pointer]
-def GetPtrLocal : Opcode {
- // Offset of local.
- let Args = [ArgUint32];
+def GetPtrLocal : OffsetOpcode {
bit HasCustomEval = 1;
}
// [] -> [Pointer]
-def GetPtrParam : Opcode {
- // Offset of parameter.
- let Args = [ArgUint32];
-}
+def GetPtrParam : OffsetOpcode;
// [] -> [Pointer]
-def GetPtrGlobal : Opcode {
- // Index of global.
- let Args = [ArgUint32];
-}
+def GetPtrGlobal : OffsetOpcode;
// [Pointer] -> [Pointer]
-def GetPtrField : Opcode {
- // Offset of field.
- let Args = [ArgUint32];
-}
+def GetPtrField : OffsetOpcode;
+def GetPtrFieldPop : OffsetOpcode;
// [Pointer] -> [Pointer]
-def GetPtrActiveField : Opcode {
- // Offset of field.
- let Args = [ArgUint32];
-}
+def GetPtrActiveField : OffsetOpcode;
// [] -> [Pointer]
-def GetPtrActiveThisField : Opcode {
- // Offset of field.
- let Args = [ArgUint32];
-}
+def GetPtrActiveThisField : OffsetOpcode;
// [] -> [Pointer]
-def GetPtrThisField : Opcode {
- // Offset of field.
- let Args = [ArgUint32];
-}
+def GetPtrThisField : OffsetOpcode;
// [Pointer] -> [Pointer]
-def GetPtrBase : Opcode {
- // Offset of field, which is a base.
- let Args = [ArgUint32];
-}
+def GetPtrBase : OffsetOpcode;
// [Pointer] -> [Pointer]
-def GetPtrBasePop : Opcode {
+def GetPtrBasePop : OffsetOpcode;
+def GetMemberPtrBasePop : Opcode {
// Offset of field, which is a base.
- let Args = [ArgUint32];
+ let Args = [ArgSint32];
}
-def InitPtrPop : Opcode {
- let Args = [];
-}
+
+def FinishInitPop : Opcode;
+def FinishInit : Opcode;
def GetPtrDerivedPop : Opcode {
let Args = [ArgUint32];
}
// [Pointer] -> [Pointer]
-def GetPtrVirtBase : Opcode {
+def GetPtrVirtBasePop : Opcode {
// RecordDecl of base class.
let Args = [ArgRecordDecl];
}
@@ -354,6 +347,24 @@ def ExpandPtr : Opcode;
def ArrayElemPtr : AluOpcode;
def ArrayElemPtrPop : AluOpcode;
+def ArrayElemPop : Opcode {
+ let Args = [ArgUint32];
+ let Types = [AllTypeClass];
+ let HasGroup = 1;
+}
+
+def ArrayElem : Opcode {
+ let Args = [ArgUint32];
+ let Types = [AllTypeClass];
+ let HasGroup = 1;
+}
+
+def CopyArray : Opcode {
+ let Args = [ArgUint32, ArgUint32, ArgUint32];
+ let Types = [AllTypeClass];
+ let HasGroup = 1;
+}
+
//===----------------------------------------------------------------------===//
// Direct field accessors
//===----------------------------------------------------------------------===//
@@ -375,6 +386,16 @@ def GetLocal : AccessOpcode { let HasCustomEval = 1; }
// [] -> [Pointer]
def SetLocal : AccessOpcode { let HasCustomEval = 1; }
+def CheckDecl : Opcode {
+ let Args = [ArgVarDecl];
+}
+
+def CheckEnumValue : Opcode {
+ let Args = [ArgEnumDecl];
+ let Types = [FixedSizeIntegralTypeClass];
+ let HasGroup = 1;
+}
+
// [] -> [Value]
def GetGlobal : AccessOpcode;
def GetGlobalUnchecked : AccessOpcode;
@@ -387,8 +408,6 @@ def InitGlobalTemp : AccessOpcode {
// [Pointer] -> [Pointer]
def InitGlobalTempComp : Opcode {
let Args = [ArgLETD];
- let Types = [];
- let HasGroup = 0;
}
// [Value] -> []
def SetGlobal : AccessOpcode;
@@ -462,6 +481,7 @@ def StoreBitField : StoreBitFieldOpcode {}
def StoreBitFieldPop : StoreBitFieldOpcode {}
// [Pointer, Value] -> []
+def Init : StoreOpcode {}
def InitPop : StoreOpcode {}
// [Pointer, Value] -> [Pointer]
def InitElem : Opcode {
@@ -492,13 +512,9 @@ def SubPtr : Opcode {
}
// [Pointer] -> [Pointer]
-def IncPtr : Opcode {
- let HasGroup = 0;
-}
+def IncPtr : Opcode;
// [Pointer] -> [Pointer]
-def DecPtr : Opcode {
- let HasGroup = 0;
-}
+def DecPtr : Opcode;
//===----------------------------------------------------------------------===//
// Function pointers.
@@ -507,6 +523,11 @@ def GetFnPtr : Opcode {
let Args = [ArgFunction];
}
+def GetIntPtr : Opcode {
+ let Types = [AluTypeClass];
+ let Args = [ArgDesc];
+ let HasGroup = 1;
+}
//===----------------------------------------------------------------------===//
// Binary operators.
@@ -519,9 +540,17 @@ def Sub : AluOpcode;
def Subf : FloatOpcode;
def Mul : AluOpcode;
def Mulf : FloatOpcode;
+def Mulc : Opcode {
+ let Types = [NumberTypeClass];
+ let HasGroup = 1;
+}
def Rem : IntegerOpcode;
def Div : IntegerOpcode;
def Divf : FloatOpcode;
+def Divc : Opcode {
+ let Types = [NumberTypeClass];
+ let HasGroup = 1;
+}
def BitAnd : IntegerOpcode;
def BitOr : IntegerOpcode;
@@ -548,10 +577,10 @@ def Inv: Opcode {
}
// Increment and decrement.
-def Inc: IntegerOpcode;
-def IncPop : IntegerOpcode;
-def Dec: IntegerOpcode;
-def DecPop: IntegerOpcode;
+def Inc: AluOpcode;
+def IncPop : AluOpcode;
+def Dec: AluOpcode;
+def DecPop: AluOpcode;
// Float increment and decrement.
def Incf: FloatOpcode;
@@ -589,7 +618,6 @@ def Cast: Opcode {
}
def CastFP : Opcode {
- let Types = [];
let Args = [ArgFltSemantics, ArgRoundingMode];
}
@@ -624,18 +652,30 @@ def CastFloatingIntegral : Opcode {
}
def CastFloatingIntegralAP : Opcode {
- let Types = [];
let Args = [ArgUint32];
}
def CastFloatingIntegralAPS : Opcode {
- let Types = [];
let Args = [ArgUint32];
}
def CastPointerIntegral : Opcode {
- let Types = [AluTypeClass];
- let Args = [];
+ let Types = [FixedSizeIntegralTypeClass];
+ let HasGroup = 1;
+}
+def CastPointerIntegralAP : Opcode {
+ let Args = [ArgUint32];
+}
+def CastPointerIntegralAPS : Opcode {
+ let Args = [ArgUint32];
+}
+def PtrPtrCast : Opcode {
+ let Args = [ArgBool];
+
+}
+
+def DecayPtr : Opcode {
+ let Types = [PtrTypeClass, PtrTypeClass];
let HasGroup = 1;
}
@@ -683,6 +723,8 @@ def Dup : Opcode {
// [] -> []
def Invalid : Opcode {}
+def Unsupported : Opcode {}
+def Error : Opcode {}
def InvalidCast : Opcode {
let Args = [ArgCastKind];
}
@@ -691,4 +733,48 @@ def InvalidDeclRef : Opcode {
let Args = [ArgDeclRef];
}
+def SizelessVectorElementSize : Opcode;
+
+def Assume : Opcode;
+
def ArrayDecay : Opcode;
+
+def CheckNonNullArg : Opcode {
+ let Types = [PtrTypeClass];
+ let HasGroup = 1;
+}
+
+def Memcpy : Opcode;
+
+def ToMemberPtr : Opcode;
+def CastMemberPtrPtr : Opcode;
+def GetMemberPtr : Opcode {
+ let Args = [ArgDecl];
+}
+def GetMemberPtrBase : Opcode;
+def GetMemberPtrDecl : Opcode;
+
+//===----------------------------------------------------------------------===//
+// Debugging.
+//===----------------------------------------------------------------------===//
+def Dump : Opcode;
+
+def Alloc : Opcode {
+ let Args = [ArgDesc];
+}
+
+def AllocN : Opcode {
+ let Types = [IntegerTypeClass];
+ let Args = [ArgPrimType, ArgExpr, ArgBool];
+ let HasGroup = 1;
+}
+
+def AllocCN : Opcode {
+ let Types = [IntegerTypeClass];
+ let Args = [ArgDesc, ArgBool];
+ let HasGroup = 1;
+}
+
+def Free : Opcode {
+ let Args = [ArgBool];
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
index 5af1d6d52e93..29579f5db40b 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
@@ -13,156 +13,271 @@
#include "Function.h"
#include "Integral.h"
#include "InterpBlock.h"
+#include "MemberPointer.h"
#include "PrimType.h"
#include "Record.h"
+#include "clang/AST/RecordLayout.h"
using namespace clang;
using namespace clang::interp;
-Pointer::Pointer(Block *Pointee) : Pointer(Pointee, 0, 0) {}
+Pointer::Pointer(Block *Pointee)
+ : Pointer(Pointee, Pointee->getDescriptor()->getMetadataSize(),
+ Pointee->getDescriptor()->getMetadataSize()) {}
-Pointer::Pointer(Block *Pointee, unsigned BaseAndOffset)
+Pointer::Pointer(Block *Pointee, uint64_t BaseAndOffset)
: Pointer(Pointee, BaseAndOffset, BaseAndOffset) {}
-Pointer::Pointer(const Pointer &P) : Pointer(P.Pointee, P.Base, P.Offset) {}
+Pointer::Pointer(const Pointer &P)
+ : Offset(P.Offset), PointeeStorage(P.PointeeStorage),
+ StorageKind(P.StorageKind) {
-Pointer::Pointer(Pointer &&P)
- : Pointee(P.Pointee), Base(P.Base), Offset(P.Offset) {
- if (Pointee)
- Pointee->replacePointer(&P, this);
+ if (isBlockPointer() && PointeeStorage.BS.Pointee)
+ PointeeStorage.BS.Pointee->addPointer(this);
}
-Pointer::Pointer(Block *Pointee, unsigned Base, unsigned Offset)
- : Pointee(Pointee), Base(Base), Offset(Offset) {
+Pointer::Pointer(Block *Pointee, unsigned Base, uint64_t Offset)
+ : Offset(Offset), StorageKind(Storage::Block) {
assert((Base == RootPtrMark || Base % alignof(void *) == 0) && "wrong base");
+
+ PointeeStorage.BS = {Pointee, Base};
+
if (Pointee)
Pointee->addPointer(this);
}
+Pointer::Pointer(Pointer &&P)
+ : Offset(P.Offset), PointeeStorage(P.PointeeStorage),
+ StorageKind(P.StorageKind) {
+
+ if (StorageKind == Storage::Block && PointeeStorage.BS.Pointee)
+ PointeeStorage.BS.Pointee->replacePointer(&P, this);
+}
+
Pointer::~Pointer() {
- if (Pointee) {
+ if (isIntegralPointer())
+ return;
+
+ if (Block *Pointee = PointeeStorage.BS.Pointee) {
Pointee->removePointer(this);
Pointee->cleanup();
}
}
void Pointer::operator=(const Pointer &P) {
- Block *Old = Pointee;
-
- if (Pointee)
- Pointee->removePointer(this);
+ // If the current storage type is Block, we need to remove
+ // this pointer from the block.
+ bool WasBlockPointer = isBlockPointer();
+ if (StorageKind == Storage::Block) {
+ Block *Old = PointeeStorage.BS.Pointee;
+ if (WasBlockPointer && Old) {
+ PointeeStorage.BS.Pointee->removePointer(this);
+ Old->cleanup();
+ }
+ }
+ StorageKind = P.StorageKind;
Offset = P.Offset;
- Base = P.Base;
- Pointee = P.Pointee;
- if (Pointee)
- Pointee->addPointer(this);
+ if (P.isBlockPointer()) {
+ PointeeStorage.BS = P.PointeeStorage.BS;
+ PointeeStorage.BS.Pointee = P.PointeeStorage.BS.Pointee;
- if (Old)
- Old->cleanup();
+ if (PointeeStorage.BS.Pointee)
+ PointeeStorage.BS.Pointee->addPointer(this);
+ } else if (P.isIntegralPointer()) {
+ PointeeStorage.Int = P.PointeeStorage.Int;
+ } else {
+ assert(false && "Unhandled storage kind");
+ }
}
void Pointer::operator=(Pointer &&P) {
- Block *Old = Pointee;
-
- if (Pointee)
- Pointee->removePointer(this);
+ // If the current storage type is Block, we need to remove
+ // this pointer from the block.
+ bool WasBlockPointer = isBlockPointer();
+ if (StorageKind == Storage::Block) {
+ Block *Old = PointeeStorage.BS.Pointee;
+ if (WasBlockPointer && Old) {
+ PointeeStorage.BS.Pointee->removePointer(this);
+ Old->cleanup();
+ }
+ }
+ StorageKind = P.StorageKind;
Offset = P.Offset;
- Base = P.Base;
- Pointee = P.Pointee;
- if (Pointee)
- Pointee->replacePointer(&P, this);
+ if (P.isBlockPointer()) {
+ PointeeStorage.BS = P.PointeeStorage.BS;
+ PointeeStorage.BS.Pointee = P.PointeeStorage.BS.Pointee;
- if (Old)
- Old->cleanup();
+ if (PointeeStorage.BS.Pointee)
+ PointeeStorage.BS.Pointee->addPointer(this);
+ } else if (P.isIntegralPointer()) {
+ PointeeStorage.Int = P.PointeeStorage.Int;
+ } else {
+ assert(false && "Unhandled storage kind");
+ }
}
-APValue Pointer::toAPValue() const {
- APValue::LValueBase Base;
+APValue Pointer::toAPValue(const ASTContext &ASTCtx) const {
llvm::SmallVector<APValue::LValuePathEntry, 5> Path;
- CharUnits Offset;
- bool IsNullPtr;
- bool IsOnePastEnd;
-
- if (isZero()) {
- Base = static_cast<const Expr *>(nullptr);
- IsNullPtr = true;
- IsOnePastEnd = false;
- Offset = CharUnits::Zero();
- } else {
- // Build the lvalue base from the block.
- const Descriptor *Desc = getDeclDesc();
- if (auto *VD = Desc->asValueDecl())
- Base = VD;
- else if (auto *E = Desc->asExpr())
- Base = E;
- else
- llvm_unreachable("Invalid allocation type");
-
- // Not a null pointer.
- IsNullPtr = false;
-
- if (isUnknownSizeArray()) {
- IsOnePastEnd = false;
- Offset = CharUnits::Zero();
- } else if (Desc->asExpr()) {
- // Pointer pointing to a an expression.
- IsOnePastEnd = false;
- Offset = CharUnits::Zero();
+
+ if (isZero())
+ return APValue(static_cast<const Expr *>(nullptr), CharUnits::Zero(), Path,
+ /*IsOnePastEnd=*/false, /*IsNullPtr=*/true);
+ if (isIntegralPointer())
+ return APValue(static_cast<const Expr *>(nullptr),
+ CharUnits::fromQuantity(asIntPointer().Value + this->Offset),
+ Path,
+ /*IsOnePastEnd=*/false, /*IsNullPtr=*/false);
+
+ // Build the lvalue base from the block.
+ const Descriptor *Desc = getDeclDesc();
+ APValue::LValueBase Base;
+ if (const auto *VD = Desc->asValueDecl())
+ Base = VD;
+ else if (const auto *E = Desc->asExpr())
+ Base = E;
+ else
+ llvm_unreachable("Invalid allocation type");
+
+ if (isUnknownSizeArray() || Desc->asExpr())
+ return APValue(Base, CharUnits::Zero(), Path,
+ /*IsOnePastEnd=*/isOnePastEnd(), /*IsNullPtr=*/false);
+
+ CharUnits Offset = CharUnits::Zero();
+
+ auto getFieldOffset = [&](const FieldDecl *FD) -> CharUnits {
+ // This shouldn't happen, but if it does, don't crash inside
+ // getASTRecordLayout.
+ if (FD->getParent()->isInvalidDecl())
+ return CharUnits::Zero();
+ const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout(FD->getParent());
+ unsigned FieldIndex = FD->getFieldIndex();
+ return ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex));
+ };
+
+ // Build the path into the object.
+ Pointer Ptr = *this;
+ while (Ptr.isField() || Ptr.isArrayElement()) {
+ if (Ptr.isArrayRoot()) {
+ Path.push_back(APValue::LValuePathEntry(
+ {Ptr.getFieldDesc()->asDecl(), /*IsVirtual=*/false}));
+
+ if (const auto *FD = dyn_cast<FieldDecl>(Ptr.getFieldDesc()->asDecl()))
+ Offset += getFieldOffset(FD);
+
+ Ptr = Ptr.getBase();
+ } else if (Ptr.isArrayElement()) {
+ unsigned Index;
+ if (Ptr.isOnePastEnd())
+ Index = Ptr.getArray().getNumElems();
+ else
+ Index = Ptr.getIndex();
+
+ Offset += (Index * ASTCtx.getTypeSizeInChars(Ptr.getType()));
+ Path.push_back(APValue::LValuePathEntry::ArrayIndex(Index));
+ Ptr = Ptr.getArray();
} else {
- // TODO: compute the offset into the object.
- Offset = CharUnits::Zero();
-
- // Build the path into the object.
- Pointer Ptr = *this;
- while (Ptr.isField() || Ptr.isArrayElement()) {
- if (Ptr.isArrayElement()) {
- Path.push_back(APValue::LValuePathEntry::ArrayIndex(Ptr.getIndex()));
- Ptr = Ptr.getArray();
+ bool IsVirtual = false;
+
+ // Create a path entry for the field.
+ const Descriptor *Desc = Ptr.getFieldDesc();
+ if (const auto *BaseOrMember = Desc->asDecl()) {
+ if (const auto *FD = dyn_cast<FieldDecl>(BaseOrMember)) {
+ Ptr = Ptr.getBase();
+ Offset += getFieldOffset(FD);
+ } else if (const auto *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) {
+ IsVirtual = Ptr.isVirtualBaseClass();
+ Ptr = Ptr.getBase();
+ const Record *BaseRecord = Ptr.getRecord();
+
+ const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout(
+ cast<CXXRecordDecl>(BaseRecord->getDecl()));
+ if (IsVirtual)
+ Offset += Layout.getVBaseClassOffset(RD);
+ else
+ Offset += Layout.getBaseClassOffset(RD);
+
} else {
- // TODO: figure out if base is virtual
- bool IsVirtual = false;
-
- // Create a path entry for the field.
- const Descriptor *Desc = Ptr.getFieldDesc();
- if (const auto *BaseOrMember = Desc->asDecl()) {
- Path.push_back(APValue::LValuePathEntry({BaseOrMember, IsVirtual}));
- Ptr = Ptr.getBase();
- continue;
- }
- llvm_unreachable("Invalid field type");
+ Ptr = Ptr.getBase();
}
+ Path.push_back(APValue::LValuePathEntry({BaseOrMember, IsVirtual}));
+ continue;
}
-
- IsOnePastEnd = isOnePastEnd();
+ llvm_unreachable("Invalid field type");
}
}
+ // FIXME(perf): We compute the lvalue path above, but we can't supply it
+ // for dummy pointers (that causes crashes later in CheckConstantExpression).
+ if (isDummy())
+ Path.clear();
+
// We assemble the LValuePath starting from the innermost pointer to the
// outermost one. SO in a.b.c, the first element in Path will refer to
// the field 'c', while later code expects it to refer to 'a'.
// Just invert the order of the elements.
std::reverse(Path.begin(), Path.end());
- return APValue(Base, Offset, Path, IsOnePastEnd, IsNullPtr);
+ return APValue(Base, Offset, Path, /*IsOnePastEnd=*/isOnePastEnd(),
+ /*IsNullPtr=*/false);
+}
+
+void Pointer::print(llvm::raw_ostream &OS) const {
+ OS << PointeeStorage.BS.Pointee << " (";
+ if (isBlockPointer()) {
+ const Block *B = PointeeStorage.BS.Pointee;
+ OS << "Block) {";
+
+ if (isRoot())
+ OS << "rootptr(" << PointeeStorage.BS.Base << "), ";
+ else
+ OS << PointeeStorage.BS.Base << ", ";
+
+ if (isElementPastEnd())
+ OS << "pastend, ";
+ else
+ OS << Offset << ", ";
+
+ if (B)
+ OS << B->getSize();
+ else
+ OS << "nullptr";
+ } else {
+ OS << "Int) {";
+ OS << PointeeStorage.Int.Value << ", " << PointeeStorage.Int.Desc;
+ }
+ OS << "}";
}
std::string Pointer::toDiagnosticString(const ASTContext &Ctx) const {
- if (!Pointee)
+ if (isZero())
return "nullptr";
- return toAPValue().getAsString(Ctx, getType());
+ if (isIntegralPointer())
+ return (Twine("&(") + Twine(asIntPointer().Value + Offset) + ")").str();
+
+ return toAPValue(Ctx).getAsString(Ctx, getType());
}
bool Pointer::isInitialized() const {
- assert(Pointee && "Cannot check if null pointer was initialized");
+ if (isIntegralPointer())
+ return true;
+
+ if (isRoot() && PointeeStorage.BS.Base == sizeof(GlobalInlineDescriptor)) {
+ const GlobalInlineDescriptor &GD =
+ *reinterpret_cast<const GlobalInlineDescriptor *>(block()->rawData());
+ return GD.InitState == GlobalInitState::Initialized;
+ }
+
+ assert(PointeeStorage.BS.Pointee &&
+ "Cannot check if null pointer was initialized");
const Descriptor *Desc = getFieldDesc();
assert(Desc);
if (Desc->isPrimitiveArray()) {
- if (isStatic() && Base == 0)
+ if (isStatic() && PointeeStorage.BS.Base == 0)
return true;
InitMapPtr &IM = getInitMap();
@@ -176,18 +291,35 @@ bool Pointer::isInitialized() const {
return IM->second->isElementInitialized(getIndex());
}
+ if (asBlockPointer().Base == 0)
+ return true;
+
// Field has its bit in an inline descriptor.
- return Base == 0 || getInlineDesc()->IsInitialized;
+ return getInlineDesc()->IsInitialized;
}
void Pointer::initialize() const {
- assert(Pointee && "Cannot initialize null pointer");
+ if (isIntegralPointer())
+ return;
+
+ assert(PointeeStorage.BS.Pointee && "Cannot initialize null pointer");
const Descriptor *Desc = getFieldDesc();
+ if (isRoot() && PointeeStorage.BS.Base == sizeof(GlobalInlineDescriptor)) {
+ GlobalInlineDescriptor &GD = *reinterpret_cast<GlobalInlineDescriptor *>(
+ asBlockPointer().Pointee->rawData());
+ GD.InitState = GlobalInitState::Initialized;
+ return;
+ }
+
assert(Desc);
if (Desc->isPrimitiveArray()) {
// Primitive global arrays don't have an initmap.
- if (isStatic() && Base == 0)
+ if (isStatic() && PointeeStorage.BS.Base == 0)
+ return;
+
+ // Nothing to do for these.
+ if (Desc->getNumElems() == 0)
return;
InitMapPtr &IM = getInitMap();
@@ -209,13 +341,19 @@ void Pointer::initialize() const {
}
// Field has its bit in an inline descriptor.
- assert(Base != 0 && "Only composite fields can be initialised");
+ assert(PointeeStorage.BS.Base != 0 &&
+ "Only composite fields can be initialised");
getInlineDesc()->IsInitialized = true;
}
void Pointer::activate() const {
// Field has its bit in an inline descriptor.
- assert(Base != 0 && "Only composite fields can be initialised");
+ assert(PointeeStorage.BS.Base != 0 &&
+ "Only composite fields can be initialised");
+
+ if (isRoot() && PointeeStorage.BS.Base == sizeof(GlobalInlineDescriptor))
+ return;
+
getInlineDesc()->IsActive = true;
}
@@ -224,32 +362,44 @@ void Pointer::deactivate() const {
}
bool Pointer::hasSameBase(const Pointer &A, const Pointer &B) {
- return A.Pointee == B.Pointee;
+ // Two null pointers always have the same base.
+ if (A.isZero() && B.isZero())
+ return true;
+
+ if (A.isIntegralPointer() && B.isIntegralPointer())
+ return true;
+
+ if (A.isIntegralPointer() || B.isIntegralPointer())
+ return A.getSource() == B.getSource();
+
+ return A.asBlockPointer().Pointee == B.asBlockPointer().Pointee;
}
bool Pointer::hasSameArray(const Pointer &A, const Pointer &B) {
- return hasSameBase(A, B) && A.Base == B.Base && A.getFieldDesc()->IsArray;
+ return hasSameBase(A, B) &&
+ A.PointeeStorage.BS.Base == B.PointeeStorage.BS.Base &&
+ A.getFieldDesc()->IsArray;
}
-std::optional<APValue> Pointer::toRValue(const Context &Ctx) const {
+std::optional<APValue> Pointer::toRValue(const Context &Ctx,
+ QualType ResultType) const {
+ const ASTContext &ASTCtx = Ctx.getASTContext();
+ assert(!ResultType.isNull());
// Method to recursively traverse composites.
std::function<bool(QualType, const Pointer &, APValue &)> Composite;
- Composite = [&Composite, &Ctx](QualType Ty, const Pointer &Ptr, APValue &R) {
+ Composite = [&Composite, &Ctx, &ASTCtx](QualType Ty, const Pointer &Ptr,
+ APValue &R) {
if (const auto *AT = Ty->getAs<AtomicType>())
Ty = AT->getValueType();
// Invalid pointers.
- if (Ptr.isDummy() || !Ptr.isLive() ||
- (!Ptr.isUnknownSizeArray() && Ptr.isOnePastEnd()))
+ if (Ptr.isDummy() || !Ptr.isLive() || !Ptr.isBlockPointer() ||
+ Ptr.isPastEnd())
return false;
// Primitive values.
if (std::optional<PrimType> T = Ctx.classify(Ty)) {
- if (T == PT_Ptr || T == PT_FnPtr) {
- R = Ptr.toAPValue();
- } else {
- TYPE_SWITCH(*T, R = Ptr.deref<T>().toAPValue());
- }
+ TYPE_SWITCH(*T, R = Ptr.deref<T>().toAPValue(ASTCtx));
return true;
}
@@ -266,10 +416,11 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx) const {
QualType FieldTy = F.Decl->getType();
if (FP.isActive()) {
if (std::optional<PrimType> T = Ctx.classify(FieldTy)) {
- TYPE_SWITCH(*T, Value = FP.deref<T>().toAPValue());
+ TYPE_SWITCH(*T, Value = FP.deref<T>().toAPValue(ASTCtx));
} else {
Ok &= Composite(FieldTy, FP, Value);
}
+ ActiveField = FP.getFieldDesc()->asFieldDecl();
break;
}
}
@@ -288,7 +439,7 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx) const {
APValue &Value = R.getStructField(I);
if (std::optional<PrimType> T = Ctx.classify(FieldTy)) {
- TYPE_SWITCH(*T, Value = FP.deref<T>().toAPValue());
+ TYPE_SWITCH(*T, Value = FP.deref<T>().toAPValue(ASTCtx));
} else {
Ok &= Composite(FieldTy, FP, Value);
}
@@ -326,7 +477,7 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx) const {
APValue &Slot = R.getArrayInitializedElt(I);
const Pointer &EP = Ptr.atIndex(I);
if (std::optional<PrimType> T = Ctx.classify(ElemTy)) {
- TYPE_SWITCH(*T, Slot = EP.deref<T>().toAPValue());
+ TYPE_SWITCH(*T, Slot = EP.deref<T>().toAPValue(ASTCtx));
} else {
Ok &= Composite(ElemTy, EP.narrow(), Slot);
}
@@ -337,10 +488,10 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx) const {
// Complex types.
if (const auto *CT = Ty->getAs<ComplexType>()) {
QualType ElemTy = CT->getElementType();
- std::optional<PrimType> ElemT = Ctx.classify(ElemTy);
- assert(ElemT);
if (ElemTy->isIntegerType()) {
+ std::optional<PrimType> ElemT = Ctx.classify(ElemTy);
+ assert(ElemT);
INT_TYPE_SWITCH(*ElemT, {
auto V1 = Ptr.atIndex(0).deref<T>();
auto V2 = Ptr.atIndex(1).deref<T>();
@@ -355,16 +506,41 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx) const {
return false;
}
+ // Vector types.
+ if (const auto *VT = Ty->getAs<VectorType>()) {
+ assert(Ptr.getFieldDesc()->isPrimitiveArray());
+ QualType ElemTy = VT->getElementType();
+ PrimType ElemT = *Ctx.classify(ElemTy);
+
+ SmallVector<APValue> Values;
+ Values.reserve(VT->getNumElements());
+ for (unsigned I = 0; I != VT->getNumElements(); ++I) {
+ TYPE_SWITCH(ElemT, {
+ Values.push_back(Ptr.atIndex(I).deref<T>().toAPValue(ASTCtx));
+ });
+ }
+
+ assert(Values.size() == VT->getNumElements());
+ R = APValue(Values.data(), Values.size());
+ return true;
+ }
+
llvm_unreachable("invalid value to return");
};
- if (isZero())
- return APValue(static_cast<Expr *>(nullptr), CharUnits::Zero(), {}, false,
- true);
-
- if (isDummy() || !isLive())
+ // Invalid to read from.
+ if (isDummy() || !isLive() || isPastEnd())
return std::nullopt;
+ // We can return these as rvalues, but we can't deref() them.
+ if (isZero() || isIntegralPointer())
+ return toAPValue(ASTCtx);
+
+ // Just load primitive types.
+ if (std::optional<PrimType> T = Ctx.classify(ResultType)) {
+ TYPE_SWITCH(*T, return this->deref<T>().toAPValue(ASTCtx));
+ }
+
// Return the composite type.
APValue Result;
if (!Composite(getType(), *this, Result))
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
index 8ccaff41ded8..e351699023ba 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
@@ -19,7 +19,6 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
-#include "llvm/ADT/PointerUnion.h"
#include "llvm/Support/raw_ostream.h"
namespace clang {
@@ -28,11 +27,26 @@ class Block;
class DeadBlock;
class Pointer;
class Context;
+template <unsigned A, bool B> class Integral;
enum PrimType : unsigned;
class Pointer;
inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Pointer &P);
+struct BlockPointer {
+ /// The block the pointer is pointing to.
+ Block *Pointee;
+ /// Start of the current subfield.
+ unsigned Base;
+};
+
+struct IntPointer {
+ const Descriptor *Desc;
+ uint64_t Value;
+};
+
+enum class Storage { Block, Int };
+
/// A pointer to a memory block, live or dead.
///
/// This object can be allocated into interpreter stack frames. If pointing to
@@ -68,11 +82,20 @@ private:
static constexpr unsigned RootPtrMark = ~0u;
public:
- Pointer() {}
+ Pointer() {
+ StorageKind = Storage::Int;
+ PointeeStorage.Int.Value = 0;
+ PointeeStorage.Int.Desc = nullptr;
+ }
Pointer(Block *B);
- Pointer(Block *B, unsigned BaseAndOffset);
+ Pointer(Block *B, uint64_t BaseAndOffset);
Pointer(const Pointer &P);
Pointer(Pointer &&P);
+ Pointer(uint64_t Address, const Descriptor *Desc, uint64_t Offset = 0)
+ : Offset(Offset), StorageKind(Storage::Int) {
+ PointeeStorage.Int.Value = Address;
+ PointeeStorage.Int.Desc = Desc;
+ }
~Pointer();
void operator=(const Pointer &P);
@@ -80,42 +103,59 @@ public:
/// Equality operators are just for tests.
bool operator==(const Pointer &P) const {
- return Pointee == P.Pointee && Base == P.Base && Offset == P.Offset;
- }
+ if (P.StorageKind != StorageKind)
+ return false;
+ if (isIntegralPointer())
+ return P.asIntPointer().Value == asIntPointer().Value &&
+ Offset == P.Offset;
- bool operator!=(const Pointer &P) const {
- return Pointee != P.Pointee || Base != P.Base || Offset != P.Offset;
+ assert(isBlockPointer());
+ return P.asBlockPointer().Pointee == asBlockPointer().Pointee &&
+ P.asBlockPointer().Base == asBlockPointer().Base &&
+ Offset == P.Offset;
}
+ bool operator!=(const Pointer &P) const { return !(P == *this); }
+
/// Converts the pointer to an APValue.
- APValue toAPValue() const;
+ APValue toAPValue(const ASTContext &ASTCtx) const;
/// Converts the pointer to a string usable in diagnostics.
std::string toDiagnosticString(const ASTContext &Ctx) const;
- unsigned getIntegerRepresentation() const {
- return reinterpret_cast<uintptr_t>(Pointee) + Offset;
+ uint64_t getIntegerRepresentation() const {
+ if (isIntegralPointer())
+ return asIntPointer().Value + (Offset * elemSize());
+ return reinterpret_cast<uint64_t>(asBlockPointer().Pointee) + Offset;
}
/// Converts the pointer to an APValue that is an rvalue.
- std::optional<APValue> toRValue(const Context &Ctx) const;
+ std::optional<APValue> toRValue(const Context &Ctx,
+ QualType ResultType) const;
/// Offsets a pointer inside an array.
- [[nodiscard]] Pointer atIndex(unsigned Idx) const {
- if (Base == RootPtrMark)
- return Pointer(Pointee, RootPtrMark, getDeclDesc()->getSize());
- unsigned Off = Idx * elemSize();
+ [[nodiscard]] Pointer atIndex(uint64_t Idx) const {
+ if (isIntegralPointer())
+ return Pointer(asIntPointer().Value, asIntPointer().Desc, Idx);
+
+ if (asBlockPointer().Base == RootPtrMark)
+ return Pointer(asBlockPointer().Pointee, RootPtrMark,
+ getDeclDesc()->getSize());
+ uint64_t Off = Idx * elemSize();
if (getFieldDesc()->ElemDesc)
Off += sizeof(InlineDescriptor);
else
Off += sizeof(InitMapPtr);
- return Pointer(Pointee, Base, Base + Off);
+ return Pointer(asBlockPointer().Pointee, asBlockPointer().Base,
+ asBlockPointer().Base + Off);
}
/// Creates a pointer to a field.
[[nodiscard]] Pointer atField(unsigned Off) const {
unsigned Field = Offset + Off;
- return Pointer(Pointee, Field, Field);
+ if (isIntegralPointer())
+ return Pointer(asIntPointer().Value + Field, asIntPointer().Desc);
+ return Pointer(asBlockPointer().Pointee, Field, Field);
}
/// Subtract the given offset from the current Base and Offset
@@ -123,47 +163,56 @@ public:
[[nodiscard]] Pointer atFieldSub(unsigned Off) const {
assert(Offset >= Off);
unsigned O = Offset - Off;
- return Pointer(Pointee, O, O);
+ return Pointer(asBlockPointer().Pointee, O, O);
}
/// Restricts the scope of an array element pointer.
[[nodiscard]] Pointer narrow() const {
+ if (!isBlockPointer())
+ return *this;
+ assert(isBlockPointer());
// Null pointers cannot be narrowed.
if (isZero() || isUnknownSizeArray())
return *this;
// Pointer to an array of base types - enter block.
- if (Base == RootPtrMark)
- return Pointer(Pointee, 0, Offset == 0 ? Offset : PastEndMark);
+ if (asBlockPointer().Base == RootPtrMark)
+ return Pointer(asBlockPointer().Pointee, sizeof(InlineDescriptor),
+ Offset == 0 ? Offset : PastEndMark);
// Pointer is one past end - magic offset marks that.
if (isOnePastEnd())
- return Pointer(Pointee, Base, PastEndMark);
+ return Pointer(asBlockPointer().Pointee, asBlockPointer().Base,
+ PastEndMark);
// Primitive arrays are a bit special since they do not have inline
// descriptors. If Offset != Base, then the pointer already points to
// an element and there is nothing to do. Otherwise, the pointer is
// adjusted to the first element of the array.
if (inPrimitiveArray()) {
- if (Offset != Base)
+ if (Offset != asBlockPointer().Base)
return *this;
- return Pointer(Pointee, Base, Offset + sizeof(InitMapPtr));
+ return Pointer(asBlockPointer().Pointee, asBlockPointer().Base,
+ Offset + sizeof(InitMapPtr));
}
// Pointer is to a field or array element - enter it.
- if (Offset != Base)
- return Pointer(Pointee, Offset, Offset);
+ if (Offset != asBlockPointer().Base)
+ return Pointer(asBlockPointer().Pointee, Offset, Offset);
// Enter the first element of an array.
if (!getFieldDesc()->isArray())
return *this;
- const unsigned NewBase = Base + sizeof(InlineDescriptor);
- return Pointer(Pointee, NewBase, NewBase);
+ const unsigned NewBase = asBlockPointer().Base + sizeof(InlineDescriptor);
+ return Pointer(asBlockPointer().Pointee, NewBase, NewBase);
}
/// Expands a pointer to the containing array, undoing narrowing.
[[nodiscard]] Pointer expand() const {
+ assert(isBlockPointer());
+ Block *Pointee = asBlockPointer().Pointee;
+
if (isElementPastEnd()) {
// Revert to an outer one-past-end pointer.
unsigned Adjust;
@@ -171,119 +220,209 @@ public:
Adjust = sizeof(InitMapPtr);
else
Adjust = sizeof(InlineDescriptor);
- return Pointer(Pointee, Base, Base + getSize() + Adjust);
+ return Pointer(Pointee, asBlockPointer().Base,
+ asBlockPointer().Base + getSize() + Adjust);
}
// Do not step out of array elements.
- if (Base != Offset)
+ if (asBlockPointer().Base != Offset)
return *this;
// If at base, point to an array of base types.
- if (Base == 0)
+ if (isRoot())
return Pointer(Pointee, RootPtrMark, 0);
// Step into the containing array, if inside one.
- unsigned Next = Base - getInlineDesc()->Offset;
+ unsigned Next = asBlockPointer().Base - getInlineDesc()->Offset;
const Descriptor *Desc =
- Next == 0 ? getDeclDesc() : getDescriptor(Next)->Desc;
+ (Next == Pointee->getDescriptor()->getMetadataSize())
+ ? getDeclDesc()
+ : getDescriptor(Next)->Desc;
if (!Desc->IsArray)
return *this;
return Pointer(Pointee, Next, Offset);
}
/// Checks if the pointer is null.
- bool isZero() const { return Pointee == nullptr; }
+ bool isZero() const {
+ if (isBlockPointer())
+ return asBlockPointer().Pointee == nullptr;
+ assert(isIntegralPointer());
+ return asIntPointer().Value == 0 && Offset == 0;
+ }
/// Checks if the pointer is live.
- bool isLive() const { return Pointee && !Pointee->IsDead; }
+ bool isLive() const {
+ if (isIntegralPointer())
+ return true;
+ return asBlockPointer().Pointee && !asBlockPointer().Pointee->IsDead;
+ }
/// Checks if the item is a field in an object.
- bool isField() const { return Base != 0 && Base != RootPtrMark; }
+ bool isField() const {
+ if (isIntegralPointer())
+ return false;
+
+ return !isRoot() && getFieldDesc()->asDecl();
+ }
/// Accessor for information about the declaration site.
const Descriptor *getDeclDesc() const {
- assert(Pointee);
- return Pointee->Desc;
+ if (isIntegralPointer())
+ return asIntPointer().Desc;
+
+ assert(isBlockPointer());
+ assert(asBlockPointer().Pointee);
+ return asBlockPointer().Pointee->Desc;
}
SourceLocation getDeclLoc() const { return getDeclDesc()->getLocation(); }
+ /// Returns the expression or declaration the pointer has been created for.
+ DeclTy getSource() const {
+ if (isBlockPointer())
+ return getDeclDesc()->getSource();
+
+ assert(isIntegralPointer());
+ return asIntPointer().Desc ? asIntPointer().Desc->getSource() : DeclTy();
+ }
+
/// Returns a pointer to the object of which this pointer is a field.
[[nodiscard]] Pointer getBase() const {
- if (Base == RootPtrMark) {
+ if (asBlockPointer().Base == RootPtrMark) {
assert(Offset == PastEndMark && "cannot get base of a block");
- return Pointer(Pointee, Base, 0);
+ return Pointer(asBlockPointer().Pointee, asBlockPointer().Base, 0);
}
- assert(Offset == Base && "not an inner field");
- unsigned NewBase = Base - getInlineDesc()->Offset;
- return Pointer(Pointee, NewBase, NewBase);
+ unsigned NewBase = asBlockPointer().Base - getInlineDesc()->Offset;
+ return Pointer(asBlockPointer().Pointee, NewBase, NewBase);
}
/// Returns the parent array.
[[nodiscard]] Pointer getArray() const {
- if (Base == RootPtrMark) {
+ if (asBlockPointer().Base == RootPtrMark) {
assert(Offset != 0 && Offset != PastEndMark && "not an array element");
- return Pointer(Pointee, Base, 0);
+ return Pointer(asBlockPointer().Pointee, asBlockPointer().Base, 0);
}
- assert(Offset != Base && "not an array element");
- return Pointer(Pointee, Base, Base);
+ assert(Offset != asBlockPointer().Base && "not an array element");
+ return Pointer(asBlockPointer().Pointee, asBlockPointer().Base,
+ asBlockPointer().Base);
}
/// Accessors for information about the innermost field.
const Descriptor *getFieldDesc() const {
- if (Base == 0 || Base == RootPtrMark)
+ if (isIntegralPointer())
+ return asIntPointer().Desc;
+
+ if (isRoot())
return getDeclDesc();
return getInlineDesc()->Desc;
}
/// Returns the type of the innermost field.
QualType getType() const {
- if (inPrimitiveArray() && Offset != Base)
- return getFieldDesc()->getType()->getAsArrayTypeUnsafe()->getElementType();
+ if (inPrimitiveArray() && Offset != asBlockPointer().Base) {
+ // Unfortunately, complex and vector types are not array types in clang,
+ // but they are for us.
+ if (const auto *AT = getFieldDesc()->getType()->getAsArrayTypeUnsafe())
+ return AT->getElementType();
+ if (const auto *CT = getFieldDesc()->getType()->getAs<ComplexType>())
+ return CT->getElementType();
+ if (const auto *CT = getFieldDesc()->getType()->getAs<VectorType>())
+ return CT->getElementType();
+ }
return getFieldDesc()->getType();
}
- [[nodiscard]] Pointer getDeclPtr() const { return Pointer(Pointee); }
+ [[nodiscard]] Pointer getDeclPtr() const {
+ return Pointer(asBlockPointer().Pointee);
+ }
/// Returns the element size of the innermost field.
size_t elemSize() const {
- if (Base == RootPtrMark)
+ if (isIntegralPointer()) {
+ if (!asIntPointer().Desc)
+ return 1;
+ return asIntPointer().Desc->getElemSize();
+ }
+
+ if (asBlockPointer().Base == RootPtrMark)
return getDeclDesc()->getSize();
return getFieldDesc()->getElemSize();
}
/// Returns the total size of the innermost field.
- size_t getSize() const { return getFieldDesc()->getSize(); }
+ size_t getSize() const {
+ assert(isBlockPointer());
+ return getFieldDesc()->getSize();
+ }
/// Returns the offset into an array.
unsigned getOffset() const {
assert(Offset != PastEndMark && "invalid offset");
- if (Base == RootPtrMark)
+ if (asBlockPointer().Base == RootPtrMark)
return Offset;
unsigned Adjust = 0;
- if (Offset != Base) {
+ if (Offset != asBlockPointer().Base) {
if (getFieldDesc()->ElemDesc)
Adjust = sizeof(InlineDescriptor);
else
Adjust = sizeof(InitMapPtr);
}
- return Offset - Base - Adjust;
+ return Offset - asBlockPointer().Base - Adjust;
}
/// Whether this array refers to an array, but not
/// to the first element.
- bool isArrayRoot() const { return inArray() && Offset == Base; }
+ bool isArrayRoot() const {
+ return inArray() && Offset == asBlockPointer().Base;
+ }
/// Checks if the innermost field is an array.
- bool inArray() const { return getFieldDesc()->IsArray; }
+ bool inArray() const {
+ if (isBlockPointer())
+ return getFieldDesc()->IsArray;
+ return false;
+ }
/// Checks if the structure is a primitive array.
- bool inPrimitiveArray() const { return getFieldDesc()->isPrimitiveArray(); }
+ bool inPrimitiveArray() const {
+ if (isBlockPointer())
+ return getFieldDesc()->isPrimitiveArray();
+ return false;
+ }
/// Checks if the structure is an array of unknown size.
bool isUnknownSizeArray() const {
+ if (!isBlockPointer())
+ return false;
return getFieldDesc()->isUnknownSizeArray();
}
/// Checks if the pointer points to an array.
- bool isArrayElement() const { return inArray() && Base != Offset; }
+ bool isArrayElement() const {
+ if (isBlockPointer())
+ return inArray() && asBlockPointer().Base != Offset;
+ return false;
+ }
/// Pointer points directly to a block.
bool isRoot() const {
- return (Base == 0 || Base == RootPtrMark) && Offset == 0;
+ if (isZero() || isIntegralPointer())
+ return true;
+ return (asBlockPointer().Base ==
+ asBlockPointer().Pointee->getDescriptor()->getMetadataSize() ||
+ asBlockPointer().Base == 0);
+ }
+ /// If this pointer has an InlineDescriptor we can use to initialize.
+ bool canBeInitialized() const {
+ if (!isBlockPointer())
+ return false;
+
+ return asBlockPointer().Pointee && asBlockPointer().Base > 0;
+ }
+
+ [[nodiscard]] const BlockPointer &asBlockPointer() const {
+ assert(isBlockPointer());
+ return PointeeStorage.BS;
}
+ [[nodiscard]] const IntPointer &asIntPointer() const {
+ assert(isIntegralPointer());
+ return PointeeStorage.Int;
+ }
+ bool isBlockPointer() const { return StorageKind == Storage::Block; }
+ bool isIntegralPointer() const { return StorageKind == Storage::Int; }
/// Returns the record descriptor of a class.
const Record *getRecord() const { return getFieldDesc()->ElemRecord; }
@@ -299,61 +438,114 @@ public:
bool isUnion() const;
/// Checks if the storage is extern.
- bool isExtern() const { return Pointee && Pointee->isExtern(); }
+ bool isExtern() const {
+ if (isBlockPointer())
+ return asBlockPointer().Pointee && asBlockPointer().Pointee->isExtern();
+ return false;
+ }
/// Checks if the storage is static.
bool isStatic() const {
- assert(Pointee);
- return Pointee->isStatic();
+ if (isIntegralPointer())
+ return true;
+ assert(asBlockPointer().Pointee);
+ return asBlockPointer().Pointee->isStatic();
}
/// Checks if the storage is temporary.
bool isTemporary() const {
- assert(Pointee);
- return Pointee->isTemporary();
+ if (isBlockPointer()) {
+ assert(asBlockPointer().Pointee);
+ return asBlockPointer().Pointee->isTemporary();
+ }
+ return false;
}
/// Checks if the storage is a static temporary.
bool isStaticTemporary() const { return isStatic() && isTemporary(); }
/// Checks if the field is mutable.
bool isMutable() const {
- return Base != 0 && getInlineDesc()->IsFieldMutable;
+ if (!isBlockPointer())
+ return false;
+ return !isRoot() && getInlineDesc()->IsFieldMutable;
+ }
+
+ bool isWeak() const {
+ if (isIntegralPointer())
+ return false;
+
+ assert(isBlockPointer());
+ if (const ValueDecl *VD = getDeclDesc()->asValueDecl())
+ return VD->isWeak();
+ return false;
}
/// Checks if an object was initialized.
bool isInitialized() const;
/// Checks if the object is active.
- bool isActive() const { return Base == 0 || getInlineDesc()->IsActive; }
+ bool isActive() const {
+ if (!isBlockPointer())
+ return true;
+ return isRoot() || getInlineDesc()->IsActive;
+ }
/// Checks if a structure is a base class.
bool isBaseClass() const { return isField() && getInlineDesc()->IsBase; }
- /// Checks if the pointer pointers to a dummy value.
- bool isDummy() const { return getDeclDesc()->isDummy(); }
+ bool isVirtualBaseClass() const {
+ return isField() && getInlineDesc()->IsVirtualBase;
+ }
+ /// Checks if the pointer points to a dummy value.
+ bool isDummy() const {
+ if (!isBlockPointer())
+ return false;
+
+ if (!asBlockPointer().Pointee)
+ return false;
+
+ return getDeclDesc()->isDummy();
+ }
/// Checks if an object or a subfield is mutable.
bool isConst() const {
- return Base == 0 ? getDeclDesc()->IsConst : getInlineDesc()->IsConst;
+ if (isIntegralPointer())
+ return true;
+ return isRoot() ? getDeclDesc()->IsConst : getInlineDesc()->IsConst;
}
/// Returns the declaration ID.
std::optional<unsigned> getDeclID() const {
- assert(Pointee);
- return Pointee->getDeclID();
+ if (isBlockPointer()) {
+ assert(asBlockPointer().Pointee);
+ return asBlockPointer().Pointee->getDeclID();
+ }
+ return std::nullopt;
}
/// Returns the byte offset from the start.
unsigned getByteOffset() const {
+ if (isIntegralPointer())
+ return asIntPointer().Value + Offset;
+ if (isOnePastEnd())
+ return PastEndMark;
return Offset;
}
/// Returns the number of elements.
- unsigned getNumElems() const { return getSize() / elemSize(); }
+ unsigned getNumElems() const {
+ if (isIntegralPointer())
+ return ~unsigned(0);
+ return getSize() / elemSize();
+ }
- const Block *block() const { return Pointee; }
+ const Block *block() const { return asBlockPointer().Pointee; }
/// Returns the index into an array.
int64_t getIndex() const {
- if (isElementPastEnd())
- return 1;
+ if (!isBlockPointer())
+ return 0;
+
+ if (isZero())
+ return 0;
// narrow()ed element in a composite array.
- if (Base > 0 && Base == Offset)
+ if (asBlockPointer().Base > sizeof(InlineDescriptor) &&
+ asBlockPointer().Base == Offset)
return 0;
if (auto ElemSize = elemSize())
@@ -363,31 +555,67 @@ public:
/// Checks if the index is one past end.
bool isOnePastEnd() const {
- if (!Pointee)
+ if (isIntegralPointer())
+ return false;
+
+ if (!asBlockPointer().Pointee)
+ return false;
+
+ if (isUnknownSizeArray())
+ return false;
+
+ return isElementPastEnd() || isPastEnd() ||
+ (getSize() == getOffset() && !isZeroSizeArray());
+ }
+
+ /// Checks if the pointer points past the end of the object.
+ bool isPastEnd() const {
+ if (isIntegralPointer())
return false;
- return isElementPastEnd() || getSize() == getOffset();
+
+ return !isZero() && Offset > PointeeStorage.BS.Pointee->getSize();
}
/// Checks if the pointer is an out-of-bounds element pointer.
bool isElementPastEnd() const { return Offset == PastEndMark; }
+ /// Checks if the pointer is pointing to a zero-size array.
+ bool isZeroSizeArray() const { return getFieldDesc()->isZeroSizeArray(); }
+
/// Dereferences the pointer, if it's live.
template <typename T> T &deref() const {
assert(isLive() && "Invalid pointer");
- assert(Pointee);
+ assert(isBlockPointer());
+ assert(asBlockPointer().Pointee);
+ assert(isDereferencable());
+ assert(Offset + sizeof(T) <=
+ asBlockPointer().Pointee->getDescriptor()->getAllocSize());
+
if (isArrayRoot())
- return *reinterpret_cast<T *>(Pointee->rawData() + Base +
- sizeof(InitMapPtr));
+ return *reinterpret_cast<T *>(asBlockPointer().Pointee->rawData() +
+ asBlockPointer().Base + sizeof(InitMapPtr));
- assert(Offset + sizeof(T) <= Pointee->getDescriptor()->getAllocSize());
- return *reinterpret_cast<T *>(Pointee->rawData() + Offset);
+ return *reinterpret_cast<T *>(asBlockPointer().Pointee->rawData() + Offset);
}
/// Dereferences a primitive element.
template <typename T> T &elem(unsigned I) const {
assert(I < getNumElems());
- assert(Pointee);
- return reinterpret_cast<T *>(Pointee->data() + sizeof(InitMapPtr))[I];
+ assert(isBlockPointer());
+ assert(asBlockPointer().Pointee);
+ return reinterpret_cast<T *>(asBlockPointer().Pointee->data() +
+ sizeof(InitMapPtr))[I];
+ }
+
+ /// Whether this block can be read from at all. This is only true for
+ /// block pointers that point to a valid location inside that block.
+ bool isDereferencable() const {
+ if (!isBlockPointer())
+ return false;
+ if (isPastEnd())
+ return false;
+
+ return true;
}
/// Initializes a field.
@@ -416,60 +644,56 @@ public:
static bool hasSameArray(const Pointer &A, const Pointer &B);
/// Prints the pointer.
- void print(llvm::raw_ostream &OS) const {
- OS << Pointee << " {";
- if (Base == RootPtrMark)
- OS << "rootptr, ";
- else
- OS << Base << ", ";
-
- if (Offset == PastEndMark)
- OS << "pastend, ";
- else
- OS << Offset << ", ";
-
- if (Pointee)
- OS << Pointee->getSize();
- else
- OS << "nullptr";
- OS << "}";
- }
+ void print(llvm::raw_ostream &OS) const;
private:
friend class Block;
friend class DeadBlock;
+ friend class MemberPointer;
+ friend class InterpState;
friend struct InitMap;
+ friend class DynamicAllocator;
- Pointer(Block *Pointee, unsigned Base, unsigned Offset);
+ Pointer(Block *Pointee, unsigned Base, uint64_t Offset);
/// Returns the embedded descriptor preceding a field.
- InlineDescriptor *getInlineDesc() const { return getDescriptor(Base); }
+ InlineDescriptor *getInlineDesc() const {
+ assert(asBlockPointer().Base != sizeof(GlobalInlineDescriptor));
+ assert(asBlockPointer().Base <= asBlockPointer().Pointee->getSize());
+ return getDescriptor(asBlockPointer().Base);
+ }
/// Returns a descriptor at a given offset.
InlineDescriptor *getDescriptor(unsigned Offset) const {
assert(Offset != 0 && "Not a nested pointer");
- assert(Pointee);
- return reinterpret_cast<InlineDescriptor *>(Pointee->rawData() + Offset) -
+ assert(isBlockPointer());
+ assert(!isZero());
+ return reinterpret_cast<InlineDescriptor *>(
+ asBlockPointer().Pointee->rawData() + Offset) -
1;
}
/// Returns a reference to the InitMapPtr which stores the initialization map.
InitMapPtr &getInitMap() const {
- assert(Pointee);
- return *reinterpret_cast<InitMapPtr *>(Pointee->rawData() + Base);
+ assert(isBlockPointer());
+ assert(!isZero());
+ return *reinterpret_cast<InitMapPtr *>(asBlockPointer().Pointee->rawData() +
+ asBlockPointer().Base);
}
- /// The block the pointer is pointing to.
- Block *Pointee = nullptr;
- /// Start of the current subfield.
- unsigned Base = 0;
- /// Offset into the block.
- unsigned Offset = 0;
+ /// Offset into the storage.
+ uint64_t Offset = 0;
/// Previous link in the pointer chain.
Pointer *Prev = nullptr;
/// Next link in the pointer chain.
Pointer *Next = nullptr;
+
+ union {
+ BlockPointer BS;
+ IntPointer Int;
+ } PointeeStorage;
+ Storage StorageKind = Storage::Int;
};
inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Pointer &P) {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp
index 9b96dcfe6a27..3054e67d5c49 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp
@@ -11,6 +11,7 @@
#include "Floating.h"
#include "FunctionPointer.h"
#include "IntegralAP.h"
+#include "MemberPointer.h"
#include "Pointer.h"
using namespace clang;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
index 8c5e87f37be1..20fb5e81774d 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
@@ -1,4 +1,4 @@
-//===--- PrimType.h - Types for the constexpr VM --------------------*- C++ -*-===//
+//===--- PrimType.h - Types for the constexpr VM ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -25,29 +25,36 @@ class Pointer;
class Boolean;
class Floating;
class FunctionPointer;
+class MemberPointer;
template <bool Signed> class IntegralAP;
template <unsigned Bits, bool Signed> class Integral;
/// Enumeration of the primitive types of the VM.
enum PrimType : unsigned {
- PT_Sint8,
- PT_Uint8,
- PT_Sint16,
- PT_Uint16,
- PT_Sint32,
- PT_Uint32,
- PT_Sint64,
- PT_Uint64,
- PT_IntAP,
- PT_IntAPS,
- PT_Bool,
- PT_Float,
- PT_Ptr,
- PT_FnPtr,
+ PT_Sint8 = 0,
+ PT_Uint8 = 1,
+ PT_Sint16 = 2,
+ PT_Uint16 = 3,
+ PT_Sint32 = 4,
+ PT_Uint32 = 5,
+ PT_Sint64 = 6,
+ PT_Uint64 = 7,
+ PT_IntAP = 8,
+ PT_IntAPS = 9,
+ PT_Bool = 10,
+ PT_Float = 11,
+ PT_Ptr = 12,
+ PT_FnPtr = 13,
+ PT_MemberPtr = 14,
};
+inline constexpr bool isPtrType(PrimType T) {
+ return T == PT_Ptr || T == PT_FnPtr || T == PT_MemberPtr;
+}
+
enum class CastKind : uint8_t {
Reinterpret,
+ Atomic,
};
inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
interp::CastKind CK) {
@@ -55,6 +62,9 @@ inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
case interp::CastKind::Reinterpret:
OS << "reinterpret_cast";
break;
+ case interp::CastKind::Atomic:
+ OS << "atomic";
+ break;
}
return OS;
}
@@ -83,6 +93,9 @@ template <> struct PrimConv<PT_Ptr> { using T = Pointer; };
template <> struct PrimConv<PT_FnPtr> {
using T = FunctionPointer;
};
+template <> struct PrimConv<PT_MemberPtr> {
+ using T = MemberPointer;
+};
/// Returns the size of a primitive type in bytes.
size_t primSize(PrimType Type);
@@ -123,6 +136,7 @@ static inline bool aligned(const void *P) {
TYPE_SWITCH_CASE(PT_Bool, B) \
TYPE_SWITCH_CASE(PT_Ptr, B) \
TYPE_SWITCH_CASE(PT_FnPtr, B) \
+ TYPE_SWITCH_CASE(PT_MemberPtr, B) \
} \
} while (0)
@@ -145,6 +159,24 @@ static inline bool aligned(const void *P) {
} \
} while (0)
+#define INT_TYPE_SWITCH_NO_BOOL(Expr, B) \
+ do { \
+ switch (Expr) { \
+ TYPE_SWITCH_CASE(PT_Sint8, B) \
+ TYPE_SWITCH_CASE(PT_Uint8, B) \
+ TYPE_SWITCH_CASE(PT_Sint16, B) \
+ TYPE_SWITCH_CASE(PT_Uint16, B) \
+ TYPE_SWITCH_CASE(PT_Sint32, B) \
+ TYPE_SWITCH_CASE(PT_Uint32, B) \
+ TYPE_SWITCH_CASE(PT_Sint64, B) \
+ TYPE_SWITCH_CASE(PT_Uint64, B) \
+ TYPE_SWITCH_CASE(PT_IntAP, B) \
+ TYPE_SWITCH_CASE(PT_IntAPS, B) \
+ default: \
+ llvm_unreachable("Not an integer value"); \
+ } \
+ } while (0)
+
#define COMPOSITE_TYPE_SWITCH(Expr, B, D) \
do { \
switch (Expr) { \
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp
index 1daefab4dcda..5dd59d969853 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "Program.h"
-#include "ByteCodeStmtGen.h"
#include "Context.h"
#include "Function.h"
#include "Integral.h"
@@ -55,7 +54,7 @@ unsigned Program::createGlobalString(const StringLiteral *S) {
// Create a descriptor for the string.
Descriptor *Desc =
- allocateDescriptor(S, CharType, std::nullopt, S->getLength() + 1,
+ allocateDescriptor(S, CharType, Descriptor::GlobalMD, S->getLength() + 1,
/*isConst=*/true,
/*isTemporary=*/false,
/*isMutable=*/false);
@@ -64,9 +63,11 @@ unsigned Program::createGlobalString(const StringLiteral *S) {
// The byte length does not include the null terminator.
unsigned I = Globals.size();
unsigned Sz = Desc->getAllocSize();
- auto *G = new (Allocator, Sz) Global(Desc, /*isStatic=*/true,
+ auto *G = new (Allocator, Sz) Global(Ctx.getEvalID(), Desc, /*isStatic=*/true,
/*isExtern=*/false);
G->block()->invokeCtor();
+
+ new (G->block()->rawData()) InlineDescriptor(Desc);
Globals.push_back(G);
// Construct the string in storage.
@@ -78,16 +79,19 @@ unsigned Program::createGlobalString(const StringLiteral *S) {
case PT_Sint8: {
using T = PrimConv<PT_Sint8>::T;
Field.deref<T>() = T::from(CodePoint, BitWidth);
+ Field.initialize();
break;
}
case PT_Uint16: {
using T = PrimConv<PT_Uint16>::T;
Field.deref<T>() = T::from(CodePoint, BitWidth);
+ Field.initialize();
break;
}
case PT_Uint32: {
using T = PrimConv<PT_Uint32>::T;
Field.deref<T>() = T::from(CodePoint, BitWidth);
+ Field.initialize();
break;
}
default:
@@ -97,33 +101,35 @@ unsigned Program::createGlobalString(const StringLiteral *S) {
return I;
}
-Pointer Program::getPtrGlobal(unsigned Idx) {
+Pointer Program::getPtrGlobal(unsigned Idx) const {
assert(Idx < Globals.size());
return Pointer(Globals[Idx]->block());
}
std::optional<unsigned> Program::getGlobal(const ValueDecl *VD) {
- auto It = GlobalIndices.find(VD);
- if (It != GlobalIndices.end())
+ if (auto It = GlobalIndices.find(VD); It != GlobalIndices.end())
return It->second;
// Find any previous declarations which were already evaluated.
std::optional<unsigned> Index;
- for (const Decl *P = VD; P; P = P->getPreviousDecl()) {
- auto It = GlobalIndices.find(P);
- if (It != GlobalIndices.end()) {
+ for (const Decl *P = VD->getPreviousDecl(); P; P = P->getPreviousDecl()) {
+ if (auto It = GlobalIndices.find(P); It != GlobalIndices.end()) {
Index = It->second;
break;
}
}
// Map the decl to the existing index.
- if (Index) {
+ if (Index)
GlobalIndices[VD] = *Index;
- return std::nullopt;
- }
- return Index;
+ return std::nullopt;
+}
+
+std::optional<unsigned> Program::getGlobal(const Expr *E) {
+ if (auto It = GlobalIndices.find(E); It != GlobalIndices.end())
+ return It->second;
+ return std::nullopt;
}
std::optional<unsigned> Program::getOrCreateGlobal(const ValueDecl *VD,
@@ -140,31 +146,47 @@ std::optional<unsigned> Program::getOrCreateGlobal(const ValueDecl *VD,
std::optional<unsigned> Program::getOrCreateDummy(const ValueDecl *VD) {
// Dedup blocks since they are immutable and pointers cannot be compared.
- if (auto It = DummyParams.find(VD); It != DummyParams.end())
+ if (auto It = DummyVariables.find(VD); It != DummyVariables.end())
return It->second;
- // Create dummy descriptor.
- Descriptor *Desc = allocateDescriptor(VD, std::nullopt);
+ QualType QT = VD->getType();
+ if (const auto *RT = QT->getAs<ReferenceType>())
+ QT = RT->getPointeeType();
+
+ Descriptor *Desc;
+ if (std::optional<PrimType> T = Ctx.classify(QT))
+ Desc = createDescriptor(VD, *T, std::nullopt, true, false);
+ else
+ Desc = createDescriptor(VD, QT.getTypePtr(), std::nullopt, true, false);
+ if (!Desc)
+ Desc = allocateDescriptor(VD);
+
+ assert(Desc);
+ Desc->makeDummy();
+
+ assert(Desc->isDummy());
+
// Allocate a block for storage.
unsigned I = Globals.size();
auto *G = new (Allocator, Desc->getAllocSize())
- Global(getCurrentDecl(), Desc, /*IsStatic=*/true, /*IsExtern=*/false);
+ Global(Ctx.getEvalID(), getCurrentDecl(), Desc, /*IsStatic=*/true,
+ /*IsExtern=*/false);
G->block()->invokeCtor();
Globals.push_back(G);
- DummyParams[VD] = I;
+ DummyVariables[VD] = I;
return I;
}
std::optional<unsigned> Program::createGlobal(const ValueDecl *VD,
const Expr *Init) {
- assert(!getGlobal(VD));
bool IsStatic, IsExtern;
if (const auto *Var = dyn_cast<VarDecl>(VD)) {
IsStatic = Context::shouldBeGloballyIndexed(VD);
- IsExtern = !Var->getAnyInitializer();
- } else if (isa<UnnamedGlobalConstantDecl>(VD)) {
+ IsExtern = Var->hasExternalStorage();
+ } else if (isa<UnnamedGlobalConstantDecl, MSGuidDecl,
+ TemplateParamObjectDecl>(VD)) {
IsStatic = true;
IsExtern = false;
} else {
@@ -180,7 +202,14 @@ std::optional<unsigned> Program::createGlobal(const ValueDecl *VD,
}
std::optional<unsigned> Program::createGlobal(const Expr *E) {
- return createGlobal(E, E->getType(), /*isStatic=*/true, /*isExtern=*/false);
+ if (auto Idx = getGlobal(E))
+ return Idx;
+ if (auto Idx = createGlobal(E, E->getType(), /*isStatic=*/true,
+ /*isExtern=*/false)) {
+ GlobalIndices[E] = *Idx;
+ return *Idx;
+ }
+ return std::nullopt;
}
std::optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty,
@@ -190,12 +219,12 @@ std::optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty,
Descriptor *Desc;
const bool IsConst = Ty.isConstQualified();
const bool IsTemporary = D.dyn_cast<const Expr *>();
- if (auto T = Ctx.classify(Ty)) {
- Desc = createDescriptor(D, *T, std::nullopt, IsConst, IsTemporary);
- } else {
- Desc = createDescriptor(D, Ty.getTypePtr(), std::nullopt, IsConst,
+ if (std::optional<PrimType> T = Ctx.classify(Ty))
+ Desc = createDescriptor(D, *T, Descriptor::GlobalMD, IsConst, IsTemporary);
+ else
+ Desc = createDescriptor(D, Ty.getTypePtr(), Descriptor::GlobalMD, IsConst,
IsTemporary);
- }
+
if (!Desc)
return std::nullopt;
@@ -203,9 +232,13 @@ std::optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty,
unsigned I = Globals.size();
auto *G = new (Allocator, Desc->getAllocSize())
- Global(getCurrentDecl(), Desc, IsStatic, IsExtern);
+ Global(Ctx.getEvalID(), getCurrentDecl(), Desc, IsStatic, IsExtern);
G->block()->invokeCtor();
+ // Initialize InlineDescriptor fields.
+ auto *GD = new (G->block()->rawData()) GlobalInlineDescriptor();
+ if (!Init)
+ GD->InitState = GlobalInitState::NoInitializer;
Globals.push_back(G);
return I;
@@ -224,6 +257,9 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
if (!RD)
return nullptr;
+ if (!RD->isCompleteDefinition())
+ return nullptr;
+
// Deduplicate records.
if (auto It = Records.find(RD); It != Records.end())
return It->second;
@@ -239,7 +275,8 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
unsigned VirtSize = 0;
// Helper to get a base descriptor.
- auto GetBaseDesc = [this](const RecordDecl *BD, Record *BR) -> Descriptor * {
+ auto GetBaseDesc = [this](const RecordDecl *BD,
+ const Record *BR) -> const Descriptor * {
if (!BR)
return nullptr;
return allocateDescriptor(BD, BR, std::nullopt, /*isConst=*/false,
@@ -250,39 +287,52 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
// Reserve space for base classes.
Record::BaseList Bases;
Record::VirtualBaseList VirtBases;
- if (auto *CD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (const auto *CD = dyn_cast<CXXRecordDecl>(RD)) {
for (const CXXBaseSpecifier &Spec : CD->bases()) {
if (Spec.isVirtual())
continue;
- const RecordDecl *BD = Spec.getType()->castAs<RecordType>()->getDecl();
- Record *BR = getOrCreateRecord(BD);
- if (Descriptor *Desc = GetBaseDesc(BD, BR)) {
- BaseSize += align(sizeof(InlineDescriptor));
- Bases.push_back({BD, BaseSize, Desc, BR});
- BaseSize += align(BR->getSize());
- continue;
- }
- return nullptr;
+ // In error cases, the base might not be a RecordType.
+ const auto *RT = Spec.getType()->getAs<RecordType>();
+ if (!RT)
+ return nullptr;
+ const RecordDecl *BD = RT->getDecl();
+ const Record *BR = getOrCreateRecord(BD);
+
+ const Descriptor *Desc = GetBaseDesc(BD, BR);
+ if (!Desc)
+ return nullptr;
+
+ BaseSize += align(sizeof(InlineDescriptor));
+ Bases.push_back({BD, BaseSize, Desc, BR});
+ BaseSize += align(BR->getSize());
}
for (const CXXBaseSpecifier &Spec : CD->vbases()) {
- const RecordDecl *BD = Spec.getType()->castAs<RecordType>()->getDecl();
- Record *BR = getOrCreateRecord(BD);
+ const auto *RT = Spec.getType()->getAs<RecordType>();
+ if (!RT)
+ return nullptr;
- if (Descriptor *Desc = GetBaseDesc(BD, BR)) {
- VirtSize += align(sizeof(InlineDescriptor));
- VirtBases.push_back({BD, VirtSize, Desc, BR});
- VirtSize += align(BR->getSize());
- continue;
- }
- return nullptr;
+ const RecordDecl *BD = RT->getDecl();
+ const Record *BR = getOrCreateRecord(BD);
+
+ const Descriptor *Desc = GetBaseDesc(BD, BR);
+ if (!Desc)
+ return nullptr;
+
+ VirtSize += align(sizeof(InlineDescriptor));
+ VirtBases.push_back({BD, VirtSize, Desc, BR});
+ VirtSize += align(BR->getSize());
}
}
// Reserve space for fields.
Record::FieldList Fields;
for (const FieldDecl *FD : RD->fields()) {
+ // Note that we DO create fields and descriptors
+ // for unnamed bitfields here, even though we later ignore
+ // them everywhere. That's so the FieldDecl's getFieldIndex() matches.
+
// Reserve space for the field's descriptor and the offset.
BaseSize += align(sizeof(InlineDescriptor));
@@ -290,7 +340,7 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
QualType FT = FD->getType();
const bool IsConst = FT.isConstQualified();
const bool IsMutable = FD->isMutable();
- Descriptor *Desc;
+ const Descriptor *Desc;
if (std::optional<PrimType> T = Ctx.classify(FT)) {
Desc = createDescriptor(FD, *T, std::nullopt, IsConst,
/*isTemporary=*/false, IsMutable);
@@ -314,6 +364,7 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
Descriptor::MetadataSize MDSize,
bool IsConst, bool IsTemporary,
bool IsMutable, const Expr *Init) {
+
// Classes and structures.
if (const auto *RT = Ty->getAs<RecordType>()) {
if (const auto *Record = getOrCreateRecord(RT->getDecl()))
@@ -326,7 +377,7 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
QualType ElemTy = ArrayType->getElementType();
// Array of well-known bounds.
if (auto CAT = dyn_cast<ConstantArrayType>(ArrayType)) {
- size_t NumElems = CAT->getSize().getZExtValue();
+ size_t NumElems = CAT->getZExtSize();
if (std::optional<PrimType> T = Ctx.classify(ElemTy)) {
// Arrays of primitives.
unsigned ElemSize = primSize(*T);
@@ -353,16 +404,17 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
// Array of unknown bounds - cannot be accessed and pointer arithmetic
// is forbidden on pointers to such objects.
- if (isa<IncompleteArrayType>(ArrayType)) {
+ if (isa<IncompleteArrayType>(ArrayType) ||
+ isa<VariableArrayType>(ArrayType)) {
if (std::optional<PrimType> T = Ctx.classify(ElemTy)) {
- return allocateDescriptor(D, *T, IsTemporary,
+ return allocateDescriptor(D, *T, MDSize, IsTemporary,
Descriptor::UnknownSize{});
} else {
const Descriptor *Desc = createDescriptor(D, ElemTy.getTypePtr(),
MDSize, IsConst, IsTemporary);
if (!Desc)
return nullptr;
- return allocateDescriptor(D, Desc, IsTemporary,
+ return allocateDescriptor(D, Desc, MDSize, IsTemporary,
Descriptor::UnknownSize{});
}
}
@@ -382,5 +434,12 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
IsMutable);
}
+ // Same with vector types.
+ if (const auto *VT = Ty->getAs<VectorType>()) {
+ PrimType ElemTy = *Ctx.classify(VT->getElementType());
+ return allocateDescriptor(D, ElemTy, MDSize, VT->getNumElements(), IsConst,
+ IsTemporary, IsMutable);
+ }
+
return nullptr;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Program.h b/contrib/llvm-project/clang/lib/AST/Interp/Program.h
index 17342680102c..1cabc5212180 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Program.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Program.h
@@ -34,7 +34,6 @@ class VarDecl;
namespace interp {
class Context;
-class Record;
/// The program contains and links the bytecode for all functions.
class Program final {
@@ -46,7 +45,8 @@ public:
// but primitive arrays might have an InitMap* heap allocated and
// that needs to be freed.
for (Global *G : Globals)
- G->block()->invokeDtor();
+ if (Block *B = G->block(); B->isInitialized())
+ B->invokeDtor();
// Records might actually allocate memory themselves, but they
// are allocated using a BumpPtrAllocator. Call their desctructors
@@ -67,7 +67,7 @@ public:
unsigned createGlobalString(const StringLiteral *S);
/// Returns a pointer to a global.
- Pointer getPtrGlobal(unsigned Idx);
+ Pointer getPtrGlobal(unsigned Idx) const;
/// Returns the value of a global.
Block *getGlobal(unsigned Idx) {
@@ -77,6 +77,7 @@ public:
/// Finds a global's index.
std::optional<unsigned> getGlobal(const ValueDecl *VD);
+ std::optional<unsigned> getGlobal(const Expr *E);
/// Returns or creates a global an creates an index to it.
std::optional<unsigned> getOrCreateGlobal(const ValueDecl *VD,
@@ -86,7 +87,7 @@ public:
std::optional<unsigned> getOrCreateDummy(const ValueDecl *VD);
/// Creates a global and returns its index.
- std::optional<unsigned> createGlobal(const ValueDecl *VD, const Expr *E);
+ std::optional<unsigned> createGlobal(const ValueDecl *VD, const Expr *Init);
/// Creates a global from a lifetime-extended temporary.
std::optional<unsigned> createGlobal(const Expr *E);
@@ -190,6 +191,7 @@ private:
std::byte *data() { return B.data(); }
/// Return a pointer to the block.
Block *block() { return &B; }
+ const Block *block() const { return &B; }
private:
/// Required metadata - does not actually track pointers.
@@ -208,7 +210,7 @@ private:
llvm::DenseMap<const RecordDecl *, Record *> Records;
/// Dummy parameter to generate pointers from.
- llvm::DenseMap<const ValueDecl *, unsigned> DummyParams;
+ llvm::DenseMap<const ValueDecl *, unsigned> DummyVariables;
/// Creates a new descriptor.
template <typename... Ts>
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp
index 909416e6e1a1..ac01524e1caf 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "Record.h"
+#include "clang/AST/ASTContext.h"
using namespace clang;
using namespace clang::interp;
@@ -15,7 +16,7 @@ Record::Record(const RecordDecl *Decl, BaseList &&SrcBases,
FieldList &&SrcFields, VirtualBaseList &&SrcVirtualBases,
unsigned VirtualSize, unsigned BaseSize)
: Decl(Decl), Bases(std::move(SrcBases)), Fields(std::move(SrcFields)),
- BaseSize(BaseSize), VirtualSize(VirtualSize) {
+ BaseSize(BaseSize), VirtualSize(VirtualSize), IsUnion(Decl->isUnion()) {
for (Base &V : SrcVirtualBases)
VirtualBases.push_back({ V.Decl, V.Offset + BaseSize, V.Desc, V.R });
@@ -27,6 +28,14 @@ Record::Record(const RecordDecl *Decl, BaseList &&SrcBases,
VirtualBaseMap[V.Decl] = &V;
}
+const std::string Record::getName() const {
+ std::string Ret;
+ llvm::raw_string_ostream OS(Ret);
+ Decl->getNameForDiagnostic(OS, Decl->getASTContext().getPrintingPolicy(),
+ /*Qualified=*/true);
+ return Ret;
+}
+
const Record::Field *Record::getField(const FieldDecl *FD) const {
auto It = FieldMap.find(FD);
assert(It != FieldMap.end() && "Missing field");
@@ -40,11 +49,11 @@ const Record::Base *Record::getBase(const RecordDecl *FD) const {
}
const Record::Base *Record::getBase(QualType T) const {
- if (!T->isRecordType())
- return nullptr;
-
- const RecordDecl *RD = T->getAs<RecordType>()->getDecl();
- return BaseMap.lookup(RD);
+ if (auto *RT = T->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ return BaseMap.lookup(RD);
+ }
+ return nullptr;
}
const Record::Base *Record::getVirtualBase(const RecordDecl *FD) const {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Record.h b/contrib/llvm-project/clang/lib/AST/Interp/Record.h
index b0952af2d1ac..83e15b125f77 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Record.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Record.h
@@ -28,7 +28,7 @@ public:
struct Field {
const FieldDecl *Decl;
unsigned Offset;
- Descriptor *Desc;
+ const Descriptor *Desc;
bool isBitField() const { return Decl->isBitField(); }
};
@@ -36,8 +36,8 @@ public:
struct Base {
const RecordDecl *Decl;
unsigned Offset;
- Descriptor *Desc;
- Record *R;
+ const Descriptor *Desc;
+ const Record *R;
};
/// Mapping from identifiers to field descriptors.
@@ -51,9 +51,9 @@ public:
/// Returns the underlying declaration.
const RecordDecl *getDecl() const { return Decl; }
/// Returns the name of the underlying declaration.
- const std::string getName() const { return Decl->getNameAsString(); }
+ const std::string getName() const;
/// Checks if the record is a union.
- bool isUnion() const { return getDecl()->isUnion(); }
+ bool isUnion() const { return IsUnion; }
/// Returns the size of the record.
unsigned getSize() const { return BaseSize; }
/// Returns the full size of the record, including records.
@@ -80,7 +80,6 @@ public:
unsigned getNumFields() const { return Fields.size(); }
const Field *getField(unsigned I) const { return &Fields[I]; }
- Field *getField(unsigned I) { return &Fields[I]; }
using const_base_iter = BaseList::const_iterator;
llvm::iterator_range<const_base_iter> bases() const {
@@ -101,6 +100,10 @@ public:
unsigned getNumVirtualBases() const { return VirtualBases.size(); }
const Base *getVirtualBase(unsigned I) const { return &VirtualBases[I]; }
+ void dump(llvm::raw_ostream &OS, unsigned Indentation = 0,
+ unsigned Offset = 0) const;
+ void dump() const { dump(llvm::errs()); }
+
private:
/// Constructor used by Program to create record descriptors.
Record(const RecordDecl *, BaseList &&Bases, FieldList &&Fields,
@@ -120,15 +123,17 @@ private:
VirtualBaseList VirtualBases;
/// Mapping from declarations to bases.
- llvm::DenseMap<const RecordDecl *, Base *> BaseMap;
+ llvm::DenseMap<const RecordDecl *, const Base *> BaseMap;
/// Mapping from field identifiers to descriptors.
- llvm::DenseMap<const FieldDecl *, Field *> FieldMap;
+ llvm::DenseMap<const FieldDecl *, const Field *> FieldMap;
/// Mapping from declarations to virtual bases.
llvm::DenseMap<const RecordDecl *, Base *> VirtualBaseMap;
/// Size of the structure.
unsigned BaseSize;
/// Size of all virtual bases.
unsigned VirtualSize;
+ /// If this record is a union.
+ bool IsUnion;
};
} // namespace interp
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp
index 4e032c92d26d..45cd0ad4fd42 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp
@@ -33,7 +33,7 @@ SourceRange SourceInfo::getRange() const {
}
const Expr *SourceInfo::asExpr() const {
- if (auto *S = Source.dyn_cast<const Stmt *>())
+ if (const auto *S = Source.dyn_cast<const Stmt *>())
return dyn_cast<Expr>(S);
return nullptr;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/State.cpp b/contrib/llvm-project/clang/lib/AST/Interp/State.cpp
index 47fbf5145cd4..0d9dadec4b95 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/State.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/State.cpp
@@ -155,7 +155,8 @@ void State::addCallStack(unsigned Limit) {
SmallString<128> Buffer;
llvm::raw_svector_ostream Out(Buffer);
F->describe(Out);
- addDiag(CallRange.getBegin(), diag::note_constexpr_call_here)
- << Out.str() << CallRange;
+ if (!Buffer.empty())
+ addDiag(CallRange.getBegin(), diag::note_constexpr_call_here)
+ << Out.str() << CallRange;
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
index 688141b30441..d46d621d4c7d 100644
--- a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
@@ -99,11 +99,10 @@ public:
}
void mangleCXXName(GlobalDecl GD, raw_ostream &) override;
- void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
+ void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk, bool,
raw_ostream &) override;
void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
- const ThisAdjustment &ThisAdjustment,
- raw_ostream &) override;
+ const ThunkInfo &Thunk, bool, raw_ostream &) override;
void mangleReferenceTemporary(const VarDecl *D, unsigned ManglingNumber,
raw_ostream &) override;
void mangleCXXVTable(const CXXRecordDecl *RD, raw_ostream &) override;
@@ -468,6 +467,7 @@ public:
void mangleNameOrStandardSubstitution(const NamedDecl *ND);
void mangleLambdaSig(const CXXRecordDecl *Lambda);
void mangleModuleNamePrefix(StringRef Name, bool IsPartition = false);
+ void mangleVendorQualifier(StringRef Name);
private:
@@ -559,7 +559,6 @@ private:
StringRef Prefix = "");
void mangleOperatorName(DeclarationName Name, unsigned Arity);
void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
- void mangleVendorQualifier(StringRef qualifier);
void mangleQualifiers(Qualifiers Quals, const DependentAddressSpaceType *DAST = nullptr);
void mangleRefQualifier(RefQualifierKind RefQualifier);
@@ -962,7 +961,7 @@ bool CXXNameMangler::isStd(const NamespaceDecl *NS) {
if (!Context.getEffectiveParentContext(NS)->isTranslationUnit())
return false;
- const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
+ const IdentifierInfo *II = NS->getFirstDecl()->getIdentifier();
return II && II->isStr("std");
}
@@ -1062,26 +1061,23 @@ void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
// ::= <local-name>
//
const DeclContext *DC = Context.getEffectiveDeclContext(ND);
+ bool IsLambda = isLambda(ND);
// If this is an extern variable declared locally, the relevant DeclContext
// is that of the containing namespace, or the translation unit.
// FIXME: This is a hack; extern variables declared locally should have
// a proper semantic declaration context!
- if (isLocalContainerContext(DC) && ND->hasLinkage() && !isLambda(ND))
+ if (isLocalContainerContext(DC) && ND->hasLinkage() && !IsLambda)
while (!DC->isNamespace() && !DC->isTranslationUnit())
DC = Context.getEffectiveParentContext(DC);
- else if (GetLocalClassDecl(ND)) {
+ else if (GetLocalClassDecl(ND) &&
+ (!IsLambda || isCompatibleWith(LangOptions::ClangABI::Ver18))) {
mangleLocalName(GD, AdditionalAbiTags);
return;
}
assert(!isa<LinkageSpecDecl>(DC) && "context cannot be LinkageSpecDecl");
- if (isLocalContainerContext(DC)) {
- mangleLocalName(GD, AdditionalAbiTags);
- return;
- }
-
// Closures can require a nested-name mangling even if they're semantically
// in the global namespace.
if (const NamedDecl *PrefixND = getClosurePrefix(ND)) {
@@ -1089,6 +1085,11 @@ void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
return;
}
+ if (isLocalContainerContext(DC)) {
+ mangleLocalName(GD, AdditionalAbiTags);
+ return;
+ }
+
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
@@ -2201,8 +2202,6 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
if (NoFunction && isLocalContainerContext(DC))
return;
- assert(!isLocalContainerContext(DC));
-
const NamedDecl *ND = cast<NamedDecl>(DC);
if (mangleSubstitution(ND))
return;
@@ -2398,6 +2397,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::Complex:
case Type::Adjusted:
case Type::Decayed:
+ case Type::ArrayParameter:
case Type::Pointer:
case Type::BlockPointer:
case Type::LValueReference:
@@ -2431,6 +2431,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::MacroQualified:
case Type::BitInt:
case Type::DependentBitInt:
+ case Type::CountAttributed:
llvm_unreachable("type is illegal as a nested name specifier");
case Type::SubstTemplateTypeParmPack:
@@ -2448,6 +2449,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::Decltype:
+ case Type::PackIndexing:
case Type::TemplateTypeParm:
case Type::UnaryTransform:
case Type::SubstTemplateTypeParm:
@@ -3420,6 +3422,12 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
Out << 'u' << type_name.size() << type_name; \
break;
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ type_name = Name; \
+ Out << 'u' << type_name.size() << type_name; \
+ break;
+#include "clang/Basic/AMDGPUTypes.def"
}
}
@@ -3442,6 +3450,8 @@ StringRef CXXNameMangler::getCallingConvQualifierName(CallingConv CC) {
case CC_PreserveMost:
case CC_PreserveAll:
case CC_M68kRTD:
+ case CC_PreserveNone:
+ case CC_RISCVVectorCall:
// FIXME: we should be mangling all of the above.
return "";
@@ -4202,6 +4212,13 @@ void CXXNameMangler::mangleType(const PackExpansionType *T) {
mangleType(T->getPattern());
}
+void CXXNameMangler::mangleType(const PackIndexingType *T) {
+ if (!T->hasSelectedType())
+ mangleType(T->getPattern());
+ else
+ mangleType(T->getSelectedType());
+}
+
void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
mangleSourceName(T->getDecl()->getIdentifier());
}
@@ -4435,6 +4452,10 @@ void CXXNameMangler::mangleType(const DependentBitIntType *T) {
Out << "_";
}
+void CXXNameMangler::mangleType(const ArrayParameterType *T) {
+ mangleType(cast<ConstantArrayType>(T));
+}
+
void CXXNameMangler::mangleIntegerLiteral(QualType T,
const llvm::APSInt &Value) {
// <expr-primary> ::= L <type> <value number> E # integer literal
@@ -4699,11 +4720,12 @@ recurse:
case Expr::MSPropertySubscriptExprClass:
case Expr::TypoExprClass: // This should no longer exist in the AST by now.
case Expr::RecoveryExprClass:
- case Expr::OMPArraySectionExprClass:
+ case Expr::ArraySectionExprClass:
case Expr::OMPArrayShapingExprClass:
case Expr::OMPIteratorExprClass:
case Expr::CXXInheritedCtorInitExprClass:
case Expr::CXXParenListInitExprClass:
+ case Expr::PackIndexingExprClass:
llvm_unreachable("unexpected statement kind");
case Expr::ConstantExprClass:
@@ -4743,6 +4765,7 @@ recurse:
case Expr::PseudoObjectExprClass:
case Expr::AtomicExprClass:
case Expr::SourceLocExprClass:
+ case Expr::EmbedExprClass:
case Expr::BuiltinBitCastExprClass:
{
NotPrimaryExpr();
@@ -5156,6 +5179,14 @@ recurse:
Diags.Report(DiagID);
return;
}
+ case UETT_PtrAuthTypeDiscriminator: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot yet mangle __builtin_ptrauth_type_discriminator expression");
+ Diags.Report(E->getExprLoc(), DiagID);
+ return;
+ }
case UETT_VecStep: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
@@ -6159,7 +6190,7 @@ static bool isZeroInitialized(QualType T, const APValue &V) {
}
I = 0;
for (const FieldDecl *FD : RD->fields()) {
- if (!FD->isUnnamedBitfield() &&
+ if (!FD->isUnnamedBitField() &&
!isZeroInitialized(FD->getType(), V.getStructField(I)))
return false;
++I;
@@ -6172,7 +6203,7 @@ static bool isZeroInitialized(QualType T, const APValue &V) {
assert(RD && "unexpected type for union value");
// Zero-initialization zeroes the first non-unnamed-bitfield field, if any.
for (const FieldDecl *FD : RD->fields()) {
- if (!FD->isUnnamedBitfield())
+ if (!FD->isUnnamedBitField())
return V.getUnionField() && declaresSameEntity(FD, V.getUnionField()) &&
isZeroInitialized(FD->getType(), V.getUnionValue());
}
@@ -6314,7 +6345,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
llvm::SmallVector<const FieldDecl *, 16> Fields(RD->fields());
while (
!Fields.empty() &&
- (Fields.back()->isUnnamedBitfield() ||
+ (Fields.back()->isUnnamedBitField() ||
isZeroInitialized(Fields.back()->getType(),
V.getStructField(Fields.back()->getFieldIndex())))) {
Fields.pop_back();
@@ -6334,7 +6365,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
for (unsigned I = 0, N = Bases.size(); I != N; ++I)
mangleValueInTemplateArg(Bases[I].getType(), V.getStructBase(I), false);
for (unsigned I = 0, N = Fields.size(); I != N; ++I) {
- if (Fields[I]->isUnnamedBitfield())
+ if (Fields[I]->isUnnamedBitField())
continue;
mangleValueInTemplateArg(Fields[I]->getType(),
V.getStructField(Fields[I]->getFieldIndex()),
@@ -7020,8 +7051,78 @@ void ItaniumMangleContextImpl::mangleCXXDtorComdat(const CXXDestructorDecl *D,
Mangler.mangle(GlobalDecl(D, Dtor_Comdat));
}
+/// Mangles the pointer authentication override attribute for classes
+/// that have explicit overrides for the vtable authentication schema.
+///
+/// The override is mangled as a parameterized vendor extension as follows
+///
+/// <type> ::= U "__vtptrauth" I
+/// <key>
+/// <addressDiscriminated>
+/// <extraDiscriminator>
+/// E
+///
+/// The extra discriminator encodes the explicit value derived from the
+/// override schema, e.g. if the override has specified type based
+/// discrimination the encoded value will be the discriminator derived from the
+/// type name.
+static void mangleOverrideDiscrimination(CXXNameMangler &Mangler,
+ ASTContext &Context,
+ const ThunkInfo &Thunk) {
+ auto &LangOpts = Context.getLangOpts();
+ const CXXRecordDecl *ThisRD = Thunk.ThisType->getPointeeCXXRecordDecl();
+ const CXXRecordDecl *PtrauthClassRD =
+ Context.baseForVTableAuthentication(ThisRD);
+ unsigned TypedDiscriminator =
+ Context.getPointerAuthVTablePointerDiscriminator(ThisRD);
+ Mangler.mangleVendorQualifier("__vtptrauth");
+ auto &ManglerStream = Mangler.getStream();
+ ManglerStream << "I";
+ if (const auto *ExplicitAuth =
+ PtrauthClassRD->getAttr<VTablePointerAuthenticationAttr>()) {
+ ManglerStream << "Lj" << ExplicitAuth->getKey();
+
+ if (ExplicitAuth->getAddressDiscrimination() ==
+ VTablePointerAuthenticationAttr::DefaultAddressDiscrimination)
+ ManglerStream << "Lb" << LangOpts.PointerAuthVTPtrAddressDiscrimination;
+ else
+ ManglerStream << "Lb"
+ << (ExplicitAuth->getAddressDiscrimination() ==
+ VTablePointerAuthenticationAttr::AddressDiscrimination);
+
+ switch (ExplicitAuth->getExtraDiscrimination()) {
+ case VTablePointerAuthenticationAttr::DefaultExtraDiscrimination: {
+ if (LangOpts.PointerAuthVTPtrTypeDiscrimination)
+ ManglerStream << "Lj" << TypedDiscriminator;
+ else
+ ManglerStream << "Lj" << 0;
+ break;
+ }
+ case VTablePointerAuthenticationAttr::TypeDiscrimination:
+ ManglerStream << "Lj" << TypedDiscriminator;
+ break;
+ case VTablePointerAuthenticationAttr::CustomDiscrimination:
+ ManglerStream << "Lj" << ExplicitAuth->getCustomDiscriminationValue();
+ break;
+ case VTablePointerAuthenticationAttr::NoExtraDiscrimination:
+ ManglerStream << "Lj" << 0;
+ break;
+ }
+ } else {
+ ManglerStream << "Lj"
+ << (unsigned)VTablePointerAuthenticationAttr::DefaultKey;
+ ManglerStream << "Lb" << LangOpts.PointerAuthVTPtrAddressDiscrimination;
+ if (LangOpts.PointerAuthVTPtrTypeDiscrimination)
+ ManglerStream << "Lj" << TypedDiscriminator;
+ else
+ ManglerStream << "Lj" << 0;
+ }
+ ManglerStream << "E";
+}
+
void ItaniumMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk,
+ bool ElideOverrideInfo,
raw_ostream &Out) {
// <special-name> ::= T <call-offset> <base encoding>
// # base is the nominal target function of thunk
@@ -7047,21 +7148,28 @@ void ItaniumMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
Thunk.Return.Virtual.Itanium.VBaseOffsetOffset);
Mangler.mangleFunctionEncoding(MD);
+ if (!ElideOverrideInfo)
+ mangleOverrideDiscrimination(Mangler, getASTContext(), Thunk);
}
-void ItaniumMangleContextImpl::mangleCXXDtorThunk(
- const CXXDestructorDecl *DD, CXXDtorType Type,
- const ThisAdjustment &ThisAdjustment, raw_ostream &Out) {
+void ItaniumMangleContextImpl::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const ThunkInfo &Thunk,
+ bool ElideOverrideInfo,
+ raw_ostream &Out) {
// <special-name> ::= T <call-offset> <base encoding>
// # base is the nominal target function of thunk
CXXNameMangler Mangler(*this, Out, DD, Type);
Mangler.getStream() << "_ZT";
+ auto &ThisAdjustment = Thunk.This;
// Mangle the 'this' pointer adjustment.
Mangler.mangleCallOffset(ThisAdjustment.NonVirtual,
ThisAdjustment.Virtual.Itanium.VCallOffsetOffset);
Mangler.mangleFunctionEncoding(GlobalDecl(DD, Type));
+ if (!ElideOverrideInfo)
+ mangleOverrideDiscrimination(Mangler, getASTContext(), Thunk);
}
/// Returns the mangled name for a guard variable for the passed in VarDecl.
diff --git a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
index 3c11b75d7472..eeb314b8d32b 100644
--- a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
@@ -77,7 +77,7 @@ void JSONNodeDumper::Visit(const Type *T) {
return;
JOS.attribute("kind", (llvm::Twine(T->getTypeClassName()) + "Type").str());
- JOS.attribute("type", createQualType(QualType(T, 0), /*Desugar*/ false));
+ JOS.attribute("type", createQualType(QualType(T, 0), /*Desugar=*/false));
attributeOnlyIfTrue("containsErrors", T->containsErrors());
attributeOnlyIfTrue("isDependent", T->isDependentType());
attributeOnlyIfTrue("isInstantiationDependent",
@@ -96,6 +96,21 @@ void JSONNodeDumper::Visit(QualType T) {
JOS.attribute("qualifiers", T.split().Quals.getAsString());
}
+void JSONNodeDumper::Visit(TypeLoc TL) {
+ if (TL.isNull())
+ return;
+ JOS.attribute("kind",
+ (llvm::Twine(TL.getTypeLocClass() == TypeLoc::Qualified
+ ? "Qualified"
+ : TL.getTypePtr()->getTypeClassName()) +
+ "TypeLoc")
+ .str());
+ JOS.attribute("type",
+ createQualType(QualType(TL.getType()), /*Desugar=*/false));
+ JOS.attributeObject("range",
+ [TL, this] { writeSourceRange(TL.getSourceRange()); });
+}
+
void JSONNodeDumper::Visit(const Decl *D) {
JOS.attribute("id", createPointerRepresentation(D));
@@ -172,6 +187,8 @@ void JSONNodeDumper::Visit(const CXXCtorInitializer *Init) {
llvm_unreachable("Unknown initializer type");
}
+void JSONNodeDumper::Visit(const OpenACCClause *C) {}
+
void JSONNodeDumper::Visit(const OMPClause *C) {}
void JSONNodeDumper::Visit(const BlockDecl::Capture &C) {
@@ -220,7 +237,23 @@ void JSONNodeDumper::Visit(const APValue &Value, QualType Ty) {
std::string Str;
llvm::raw_string_ostream OS(Str);
Value.printPretty(OS, Ctx, Ty);
- JOS.attribute("value", OS.str());
+ JOS.attribute("value", Str);
+}
+
+void JSONNodeDumper::Visit(const ConceptReference *CR) {
+ JOS.attribute("kind", "ConceptReference");
+ JOS.attribute("id", createPointerRepresentation(CR->getNamedConcept()));
+ if (const auto *Args = CR->getTemplateArgsAsWritten()) {
+ JOS.attributeArray("templateArgsAsWritten", [Args, this] {
+ for (const TemplateArgumentLoc &TAL : Args->arguments())
+ JOS.object(
+ [&TAL, this] { Visit(TAL.getArgument(), TAL.getSourceRange()); });
+ });
+ }
+ JOS.attributeObject("loc",
+ [CR, this] { writeSourceLocation(CR->getLocation()); });
+ JOS.attributeObject("range",
+ [CR, this] { writeSourceRange(CR->getSourceRange()); });
}
void JSONNodeDumper::writeIncludeStack(PresumedLoc Loc, bool JustFirst) {
@@ -664,7 +697,7 @@ void JSONNodeDumper::VisitArrayType(const ArrayType *AT) {
void JSONNodeDumper::VisitConstantArrayType(const ConstantArrayType *CAT) {
// FIXME: this should use ZExt instead of SExt, but JSON doesn't allow a
// narrowing conversion to int64_t so it cannot be expressed.
- JOS.attribute("size", CAT->getSize().getSExtValue());
+ JOS.attribute("size", CAT->getSExtSize());
VisitArrayType(CAT);
}
@@ -769,7 +802,7 @@ void JSONNodeDumper::VisitTemplateSpecializationType(
std::string Str;
llvm::raw_string_ostream OS(Str);
TST->getTemplateName().print(OS, PrintPolicy);
- JOS.attribute("templateName", OS.str());
+ JOS.attribute("templateName", Str);
}
void JSONNodeDumper::VisitInjectedClassNameType(
@@ -791,7 +824,7 @@ void JSONNodeDumper::VisitElaboratedType(const ElaboratedType *ET) {
std::string Str;
llvm::raw_string_ostream OS(Str);
NNS->print(OS, PrintPolicy, /*ResolveTemplateArgs*/ true);
- JOS.attribute("qualifier", OS.str());
+ JOS.attribute("qualifier", Str);
}
if (const TagDecl *TD = ET->getOwnedTagDecl())
JOS.attribute("ownedTagDecl", createBareDeclRef(TD));
@@ -850,9 +883,8 @@ void JSONNodeDumper::VisitNamespaceDecl(const NamespaceDecl *ND) {
VisitNamedDecl(ND);
attributeOnlyIfTrue("isInline", ND->isInline());
attributeOnlyIfTrue("isNested", ND->isNested());
- if (!ND->isOriginalNamespace())
- JOS.attribute("originalNamespace",
- createBareDeclRef(ND->getOriginalNamespace()));
+ if (!ND->isFirstDecl())
+ JOS.attribute("originalNamespace", createBareDeclRef(ND->getFirstDecl()));
}
void JSONNodeDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *UDD) {
@@ -942,6 +974,9 @@ void JSONNodeDumper::VisitFunctionDecl(const FunctionDecl *FD) {
if (FD->isDefaulted())
JOS.attribute("explicitlyDefaulted",
FD->isDeleted() ? "deleted" : "default");
+
+ if (StringLiteral *Msg = FD->getDeletedMessage())
+ JOS.attribute("deletedMessage", Msg->getString());
}
void JSONNodeDumper::VisitEnumDecl(const EnumDecl *ED) {
@@ -992,7 +1027,7 @@ void JSONNodeDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
if (D->hasDefaultArgument())
JOS.attributeObject("defaultArg", [=] {
- Visit(D->getDefaultArgument(), SourceRange(),
+ Visit(D->getDefaultArgument().getArgument(), SourceRange(),
D->getDefaultArgStorage().getInheritedFrom(),
D->defaultArgumentWasInherited() ? "inherited from" : "previous");
});
@@ -1008,7 +1043,7 @@ void JSONNodeDumper::VisitNonTypeTemplateParmDecl(
if (D->hasDefaultArgument())
JOS.attributeObject("defaultArg", [=] {
- Visit(D->getDefaultArgument(), SourceRange(),
+ Visit(D->getDefaultArgument().getArgument(), SourceRange(),
D->getDefaultArgStorage().getInheritedFrom(),
D->defaultArgumentWasInherited() ? "inherited from" : "previous");
});
@@ -1210,7 +1245,7 @@ void JSONNodeDumper::VisitObjCMessageExpr(const ObjCMessageExpr *OME) {
llvm::raw_string_ostream OS(Str);
OME->getSelector().print(OS);
- JOS.attribute("selector", OS.str());
+ JOS.attribute("selector", Str);
switch (OME->getReceiverKind()) {
case ObjCMessageExpr::Instance:
@@ -1241,7 +1276,7 @@ void JSONNodeDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *OBE) {
llvm::raw_string_ostream OS(Str);
MD->getSelector().print(OS);
- JOS.attribute("selector", OS.str());
+ JOS.attribute("selector", Str);
}
}
@@ -1250,7 +1285,7 @@ void JSONNodeDumper::VisitObjCSelectorExpr(const ObjCSelectorExpr *OSE) {
llvm::raw_string_ostream OS(Str);
OSE->getSelector().print(OS);
- JOS.attribute("selector", OS.str());
+ JOS.attribute("selector", Str);
}
void JSONNodeDumper::VisitObjCProtocolExpr(const ObjCProtocolExpr *OPE) {
@@ -1543,6 +1578,14 @@ void JSONNodeDumper::VisitMaterializeTemporaryExpr(
attributeOnlyIfTrue("boundToLValueRef", MTE->isBoundToLvalueReference());
}
+void JSONNodeDumper::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *Node) {
+ attributeOnlyIfTrue("hasRewrittenInit", Node->hasRewrittenInit());
+}
+
+void JSONNodeDumper::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *Node) {
+ attributeOnlyIfTrue("hasRewrittenInit", Node->hasRewrittenInit());
+}
+
void JSONNodeDumper::VisitCXXDependentScopeMemberExpr(
const CXXDependentScopeMemberExpr *DSME) {
JOS.attribute("isArrow", DSME->isArrow());
@@ -1590,7 +1633,7 @@ void JSONNodeDumper::VisitStringLiteral(const StringLiteral *SL) {
std::string Buffer;
llvm::raw_string_ostream SS(Buffer);
SL->outputString(SS);
- JOS.attribute("value", SS.str());
+ JOS.attribute("value", Buffer);
}
void JSONNodeDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *BLE) {
JOS.attribute("value", BLE->getValue());
diff --git a/contrib/llvm-project/clang/lib/AST/Linkage.h b/contrib/llvm-project/clang/lib/AST/Linkage.h
index 31f384eb75d0..e4dcb5e53261 100644
--- a/contrib/llvm-project/clang/lib/AST/Linkage.h
+++ b/contrib/llvm-project/clang/lib/AST/Linkage.h
@@ -29,12 +29,15 @@ namespace clang {
struct LVComputationKind {
/// The kind of entity whose visibility is ultimately being computed;
/// visibility computations for types and non-types follow different rules.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ExplicitKind : 1;
/// Whether explicit visibility attributes should be ignored. When set,
/// visibility may only be restricted by the visibility of template arguments.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IgnoreExplicitVisibility : 1;
/// Whether all visibility should be ignored. When set, we're only interested
/// in computing linkage.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IgnoreAllVisibility : 1;
enum { NumLVComputationKindBits = 3 };
diff --git a/contrib/llvm-project/clang/lib/AST/Mangle.cpp b/contrib/llvm-project/clang/lib/AST/Mangle.cpp
index 30cff1ba2e6f..75f6e2161a63 100644
--- a/contrib/llvm-project/clang/lib/AST/Mangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Mangle.cpp
@@ -301,9 +301,8 @@ void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
} else {
assert((isa<NamedDecl>(DC) || isa<BlockDecl>(DC)) &&
"expected a NamedDecl or BlockDecl");
- if (isa<BlockDecl>(DC))
- for (; DC && isa<BlockDecl>(DC); DC = DC->getParent())
- (void) getBlockId(cast<BlockDecl>(DC), true);
+ for (; isa_and_nonnull<BlockDecl>(DC); DC = DC->getParent())
+ (void)getBlockId(cast<BlockDecl>(DC), true);
assert((isa<TranslationUnitDecl>(DC) || isa<NamedDecl>(DC)) &&
"expected a TranslationUnitDecl or a NamedDecl");
if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
@@ -514,10 +513,20 @@ public:
}
} else if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(ND)) {
Manglings.emplace_back(getName(ND));
- if (MD->isVirtual())
- if (const auto *TIV = Ctx.getVTableContext()->getThunkInfo(MD))
- for (const auto &T : *TIV)
- Manglings.emplace_back(getMangledThunk(MD, T));
+ if (MD->isVirtual()) {
+ if (const auto *TIV = Ctx.getVTableContext()->getThunkInfo(MD)) {
+ for (const auto &T : *TIV) {
+ std::string ThunkName;
+ std::string ContextualizedName =
+ getMangledThunk(MD, T, /* ElideOverrideInfo */ false);
+ if (Ctx.useAbbreviatedThunkName(MD, ContextualizedName))
+ ThunkName = getMangledThunk(MD, T, /* ElideOverrideInfo */ true);
+ else
+ ThunkName = ContextualizedName;
+ Manglings.emplace_back(ThunkName);
+ }
+ }
+ }
}
return Manglings;
@@ -570,11 +579,12 @@ private:
return BOS.str();
}
- std::string getMangledThunk(const CXXMethodDecl *MD, const ThunkInfo &T) {
+ std::string getMangledThunk(const CXXMethodDecl *MD, const ThunkInfo &T,
+ bool ElideOverrideInfo) {
std::string FrontendBuf;
llvm::raw_string_ostream FOS(FrontendBuf);
- MC->mangleThunk(MD, T, FOS);
+ MC->mangleThunk(MD, T, ElideOverrideInfo, FOS);
std::string BackendBuf;
llvm::raw_string_ostream BOS(BackendBuf);
diff --git a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
index 36b5bf64f675..4016043df62e 100644
--- a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
@@ -159,9 +159,9 @@ public:
const MethodVFTableLocation &ML,
raw_ostream &Out) override;
void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
- raw_ostream &) override;
+ bool ElideOverrideInfo, raw_ostream &) override;
void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
- const ThisAdjustment &ThisAdjustment,
+ const ThunkInfo &Thunk, bool ElideOverrideInfo,
raw_ostream &) override;
void mangleCXXVFTable(const CXXRecordDecl *Derived,
ArrayRef<const CXXRecordDecl *> BasePath,
@@ -169,6 +169,8 @@ public:
void mangleCXXVBTable(const CXXRecordDecl *Derived,
ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) override;
+
+ void mangleCXXVTable(const CXXRecordDecl *, raw_ostream &) override;
void mangleCXXVirtualDisplacementMap(const CXXRecordDecl *SrcRD,
const CXXRecordDecl *DstRD,
raw_ostream &Out) override;
@@ -366,12 +368,21 @@ public:
void mangleFunctionEncoding(GlobalDecl GD, bool ShouldMangle);
void mangleVariableEncoding(const VarDecl *VD);
void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD,
+ const NonTypeTemplateParmDecl *PD,
+ QualType TemplateArgType,
StringRef Prefix = "$");
void mangleMemberDataPointerInClassNTTP(const CXXRecordDecl *,
const ValueDecl *);
void mangleMemberFunctionPointer(const CXXRecordDecl *RD,
const CXXMethodDecl *MD,
+ const NonTypeTemplateParmDecl *PD,
+ QualType TemplateArgType,
StringRef Prefix = "$");
+ void mangleFunctionPointer(const FunctionDecl *FD,
+ const NonTypeTemplateParmDecl *PD,
+ QualType TemplateArgType);
+ void mangleVarDecl(const VarDecl *VD, const NonTypeTemplateParmDecl *PD,
+ QualType TemplateArgType);
void mangleMemberFunctionPointerInClassNTTP(const CXXRecordDecl *RD,
const CXXMethodDecl *MD);
void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
@@ -390,6 +401,7 @@ public:
const FunctionDecl *D = nullptr,
bool ForceThisQuals = false,
bool MangleExceptionSpec = true);
+ void mangleSourceName(StringRef Name);
void mangleNestedName(GlobalDecl GD);
private:
@@ -408,7 +420,6 @@ private:
mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName());
}
void mangleUnqualifiedName(GlobalDecl GD, DeclarationName Name);
- void mangleSourceName(StringRef Name);
void mangleOperatorName(OverloadedOperatorKind OO, SourceLocation Loc);
void mangleCXXDtorType(CXXDtorType T);
void mangleQualifiers(Qualifiers Quals, bool IsMember);
@@ -442,8 +453,8 @@ private:
void mangleDecayedArrayType(const ArrayType *T);
void mangleArrayType(const ArrayType *T);
void mangleFunctionClass(const FunctionDecl *FD);
- void mangleCallingConvention(CallingConv CC);
- void mangleCallingConvention(const FunctionType *T);
+ void mangleCallingConvention(CallingConv CC, SourceRange Range);
+ void mangleCallingConvention(const FunctionType *T, SourceRange Range);
void mangleIntegerLiteral(const llvm::APSInt &Number,
const NonTypeTemplateParmDecl *PD = nullptr,
QualType TemplateArgType = QualType());
@@ -666,12 +677,17 @@ void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
}
}
-void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD,
- const ValueDecl *VD,
- StringRef Prefix) {
+void MicrosoftCXXNameMangler::mangleMemberDataPointer(
+ const CXXRecordDecl *RD, const ValueDecl *VD,
+ const NonTypeTemplateParmDecl *PD, QualType TemplateArgType,
+ StringRef Prefix) {
// <member-data-pointer> ::= <integer-literal>
// ::= $F <number> <number>
// ::= $G <number> <number> <number>
+ //
+ // <auto-nttp> ::= $ M <type> <integer-literal>
+ // <auto-nttp> ::= $ M <type> F <name> <number>
+ // <auto-nttp> ::= $ M <type> G <name> <number> <number>
int64_t FieldOffset;
int64_t VBTableOffset;
@@ -700,7 +716,18 @@ void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD,
case MSInheritanceModel::Unspecified: Code = 'G'; break;
}
- Out << Prefix << Code;
+ Out << Prefix;
+
+ if (VD &&
+ getASTContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2019) &&
+ PD && PD->getType()->getTypeClass() == Type::Auto &&
+ !TemplateArgType.isNull()) {
+ Out << "M";
+ mangleType(TemplateArgType, SourceRange(), QMM_Drop);
+ }
+
+ Out << Code;
mangleNumber(FieldOffset);
@@ -721,7 +748,7 @@ void MicrosoftCXXNameMangler::mangleMemberDataPointerInClassNTTP(
// ::= 8 <postfix> @ <unqualified-name> @
if (IM != MSInheritanceModel::Single && IM != MSInheritanceModel::Multiple)
- return mangleMemberDataPointer(RD, VD, "");
+ return mangleMemberDataPointer(RD, VD, nullptr, QualType(), "");
if (!VD) {
Out << 'N';
@@ -735,14 +762,19 @@ void MicrosoftCXXNameMangler::mangleMemberDataPointerInClassNTTP(
Out << '@';
}
-void
-MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
- const CXXMethodDecl *MD,
- StringRef Prefix) {
+void MicrosoftCXXNameMangler::mangleMemberFunctionPointer(
+ const CXXRecordDecl *RD, const CXXMethodDecl *MD,
+ const NonTypeTemplateParmDecl *PD, QualType TemplateArgType,
+ StringRef Prefix) {
// <member-function-pointer> ::= $1? <name>
// ::= $H? <name> <number>
// ::= $I? <name> <number> <number>
// ::= $J? <name> <number> <number> <number>
+ //
+ // <auto-nttp> ::= $ M <type> 1? <name>
+ // <auto-nttp> ::= $ M <type> H? <name> <number>
+ // <auto-nttp> ::= $ M <type> I? <name> <number> <number>
+ // <auto-nttp> ::= $ M <type> J? <name> <number> <number> <number>
MSInheritanceModel IM = RD->getMSInheritanceModel();
@@ -760,7 +792,17 @@ MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
uint64_t VBTableOffset = 0;
uint64_t VBPtrOffset = 0;
if (MD) {
- Out << Prefix << Code << '?';
+ Out << Prefix;
+
+ if (getASTContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2019) &&
+ PD && PD->getType()->getTypeClass() == Type::Auto &&
+ !TemplateArgType.isNull()) {
+ Out << "M";
+ mangleType(TemplateArgType, SourceRange(), QMM_Drop);
+ }
+
+ Out << Code << '?';
if (MD->isVirtual()) {
MicrosoftVTableContext *VTContext =
cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
@@ -799,6 +841,50 @@ MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
mangleNumber(VBTableOffset);
}
+void MicrosoftCXXNameMangler::mangleFunctionPointer(
+ const FunctionDecl *FD, const NonTypeTemplateParmDecl *PD,
+ QualType TemplateArgType) {
+ // <func-ptr> ::= $1? <mangled-name>
+ // <func-ptr> ::= <auto-nttp>
+ //
+ // <auto-nttp> ::= $ M <type> 1? <mangled-name>
+ Out << '$';
+
+ if (getASTContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2019) &&
+ PD && PD->getType()->getTypeClass() == Type::Auto &&
+ !TemplateArgType.isNull()) {
+ Out << "M";
+ mangleType(TemplateArgType, SourceRange(), QMM_Drop);
+ }
+
+ Out << "1?";
+ mangleName(FD);
+ mangleFunctionEncoding(FD, /*ShouldMangle=*/true);
+}
+
+void MicrosoftCXXNameMangler::mangleVarDecl(const VarDecl *VD,
+ const NonTypeTemplateParmDecl *PD,
+ QualType TemplateArgType) {
+ // <var-ptr> ::= $1? <mangled-name>
+ // <var-ptr> ::= <auto-nttp>
+ //
+ // <auto-nttp> ::= $ M <type> 1? <mangled-name>
+ Out << '$';
+
+ if (getASTContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2019) &&
+ PD && PD->getType()->getTypeClass() == Type::Auto &&
+ !TemplateArgType.isNull()) {
+ Out << "M";
+ mangleType(TemplateArgType, SourceRange(), QMM_Drop);
+ }
+
+ Out << "1?";
+ mangleName(VD);
+ mangleVariableEncoding(VD);
+}
+
void MicrosoftCXXNameMangler::mangleMemberFunctionPointerInClassNTTP(
const CXXRecordDecl *RD, const CXXMethodDecl *MD) {
// <nttp-class-member-function-pointer> ::= <member-function-pointer>
@@ -808,7 +894,7 @@ void MicrosoftCXXNameMangler::mangleMemberFunctionPointerInClassNTTP(
if (!MD) {
if (RD->getMSInheritanceModel() != MSInheritanceModel::Single)
- return mangleMemberFunctionPointer(RD, MD, "");
+ return mangleMemberFunctionPointer(RD, MD, nullptr, QualType(), "");
Out << 'N';
return;
@@ -839,7 +925,8 @@ void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk(
Out << "$B";
mangleNumber(OffsetInVFTable);
Out << 'A';
- mangleCallingConvention(MD->getType()->castAs<FunctionProtoType>());
+ mangleCallingConvention(MD->getType()->castAs<FunctionProtoType>(),
+ MD->getSourceRange());
}
void MicrosoftCXXNameMangler::mangleName(GlobalDecl GD) {
@@ -894,11 +981,15 @@ void MicrosoftCXXNameMangler::mangleFloat(llvm::APFloat Number) {
case APFloat::S_IEEEquad: Out << 'Y'; break;
case APFloat::S_PPCDoubleDouble: Out << 'Z'; break;
case APFloat::S_Float8E5M2:
+ case APFloat::S_Float8E4M3:
case APFloat::S_Float8E4M3FN:
case APFloat::S_Float8E5M2FNUZ:
case APFloat::S_Float8E4M3FNUZ:
case APFloat::S_Float8E4M3B11FNUZ:
case APFloat::S_FloatTF32:
+ case APFloat::S_Float6E3M2FN:
+ case APFloat::S_Float6E2M3FN:
+ case APFloat::S_Float4E2M1FN:
llvm_unreachable("Tried to mangle unexpected APFloat semantics");
}
@@ -1552,6 +1643,9 @@ void MicrosoftCXXNameMangler::mangleIntegerLiteral(
const llvm::APSInt &Value, const NonTypeTemplateParmDecl *PD,
QualType TemplateArgType) {
// <integer-literal> ::= $0 <number>
+ // <integer-literal> ::= <auto-nttp>
+ //
+ // <auto-nttp> ::= $ M <type> 0 <number>
Out << "$";
// Since MSVC 2019, add 'M[<type>]' after '$' for auto template parameter when
@@ -1629,8 +1723,11 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
// ::= <member-data-pointer>
// ::= <member-function-pointer>
// ::= $ <constant-value>
+ // ::= $ <auto-nttp-constant-value>
// ::= <template-args>
//
+ // <auto-nttp-constant-value> ::= M <type> <constant-value>
+ //
// <constant-value> ::= 0 <number> # integer
// ::= 1 <mangled-name> # address of D
// ::= 2 <type> <typed-constant-value>* @ # struct
@@ -1671,22 +1768,27 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
if (isa<FieldDecl>(ND) || isa<IndirectFieldDecl>(ND)) {
mangleMemberDataPointer(cast<CXXRecordDecl>(ND->getDeclContext())
->getMostRecentNonInjectedDecl(),
- cast<ValueDecl>(ND));
+ cast<ValueDecl>(ND),
+ cast<NonTypeTemplateParmDecl>(Parm),
+ TA.getParamTypeForDecl());
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
if (MD && MD->isInstance()) {
mangleMemberFunctionPointer(
- MD->getParent()->getMostRecentNonInjectedDecl(), MD);
+ MD->getParent()->getMostRecentNonInjectedDecl(), MD,
+ cast<NonTypeTemplateParmDecl>(Parm), TA.getParamTypeForDecl());
} else {
- Out << "$1?";
- mangleName(FD);
- mangleFunctionEncoding(FD, /*ShouldMangle=*/true);
+ mangleFunctionPointer(FD, cast<NonTypeTemplateParmDecl>(Parm),
+ TA.getParamTypeForDecl());
}
} else if (TA.getParamTypeForDecl()->isRecordType()) {
Out << "$";
auto *TPO = cast<TemplateParamObjectDecl>(ND);
mangleTemplateArgValue(TPO->getType().getUnqualifiedType(),
TPO->getValue(), TplArgKind::ClassNTTP);
+ } else if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ mangleVarDecl(VD, cast<NonTypeTemplateParmDecl>(Parm),
+ TA.getParamTypeForDecl());
} else {
mangle(ND, "$1?");
}
@@ -1704,12 +1806,12 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
if (MPT->isMemberFunctionPointerType() &&
!isa<FunctionTemplateDecl>(TD)) {
- mangleMemberFunctionPointer(RD, nullptr);
+ mangleMemberFunctionPointer(RD, nullptr, nullptr, QualType());
return;
}
if (MPT->isMemberDataPointer()) {
if (!isa<FunctionTemplateDecl>(TD)) {
- mangleMemberDataPointer(RD, nullptr);
+ mangleMemberDataPointer(RD, nullptr, nullptr, QualType());
return;
}
// nullptr data pointers are always represented with a single field
@@ -1916,9 +2018,10 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
cast_or_null<CXXMethodDecl>(D));
} else {
if (T->isMemberDataPointerType())
- mangleMemberDataPointer(RD, D, "");
+ mangleMemberDataPointer(RD, D, nullptr, QualType(), "");
else
- mangleMemberFunctionPointer(RD, cast_or_null<CXXMethodDecl>(D), "");
+ mangleMemberFunctionPointer(RD, cast_or_null<CXXMethodDecl>(D), nullptr,
+ QualType(), "");
}
return;
}
@@ -1933,7 +2036,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
for (const CXXBaseSpecifier &B : RD->bases())
mangleTemplateArgValue(B.getType(), V.getStructBase(BaseIndex++), TAK);
for (const FieldDecl *FD : RD->fields())
- if (!FD->isUnnamedBitfield())
+ if (!FD->isUnnamedBitField())
mangleTemplateArgValue(FD->getType(),
V.getStructField(FD->getFieldIndex()), TAK,
/*WithScalarType*/ true);
@@ -2609,6 +2712,8 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
@@ -2706,7 +2811,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
mangleQualifiers(Quals, /*IsMember=*/false);
}
- mangleCallingConvention(CC);
+ mangleCallingConvention(CC, Range);
// <return-type> ::= <type>
// ::= @ # structors (they have no declared return type)
@@ -2748,7 +2853,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
return;
}
Out << '@';
- } else if (IsInLambda && D && isa<CXXConversionDecl>(D)) {
+ } else if (IsInLambda && isa_and_nonnull<CXXConversionDecl>(D)) {
// The only lambda conversion operators are to function pointers, which
// can differ by their calling convention and are typically deduced. So
// we make sure that this type gets mangled properly.
@@ -2887,7 +2992,8 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
Out << 'Y';
}
}
-void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
+void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC,
+ SourceRange Range) {
// <calling-convention> ::= A # __cdecl
// ::= B # __export __cdecl
// ::= C # __pascal
@@ -2900,7 +3006,10 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
// ::= J # __export __fastcall
// ::= Q # __vectorcall
// ::= S # __attribute__((__swiftcall__)) // Clang-only
- // ::= T # __attribute__((__swiftasynccall__))
+ // ::= W # __attribute__((__swiftasynccall__))
+ // ::= U # __attribute__((__preserve_most__))
+ // ::= V # __attribute__((__preserve_none__)) //
+ // Clang-only
// // Clang-only
// ::= w # __regcall
// ::= x # __regcall4
@@ -2912,28 +3021,55 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
switch (CC) {
default:
- llvm_unreachable("Unsupported CC for mangling");
+ break;
case CC_Win64:
case CC_X86_64SysV:
- case CC_C: Out << 'A'; break;
- case CC_X86Pascal: Out << 'C'; break;
- case CC_X86ThisCall: Out << 'E'; break;
- case CC_X86StdCall: Out << 'G'; break;
- case CC_X86FastCall: Out << 'I'; break;
- case CC_X86VectorCall: Out << 'Q'; break;
- case CC_Swift: Out << 'S'; break;
- case CC_SwiftAsync: Out << 'W'; break;
- case CC_PreserveMost: Out << 'U'; break;
+ case CC_C:
+ Out << 'A';
+ return;
+ case CC_X86Pascal:
+ Out << 'C';
+ return;
+ case CC_X86ThisCall:
+ Out << 'E';
+ return;
+ case CC_X86StdCall:
+ Out << 'G';
+ return;
+ case CC_X86FastCall:
+ Out << 'I';
+ return;
+ case CC_X86VectorCall:
+ Out << 'Q';
+ return;
+ case CC_Swift:
+ Out << 'S';
+ return;
+ case CC_SwiftAsync:
+ Out << 'W';
+ return;
+ case CC_PreserveMost:
+ Out << 'U';
+ return;
+ case CC_PreserveNone:
+ Out << 'V';
+ return;
case CC_X86RegCall:
if (getASTContext().getLangOpts().RegCall4)
Out << "x";
else
Out << "w";
- break;
+ return;
}
+
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "cannot mangle this calling convention yet");
+ Diags.Report(Range.getBegin(), DiagID) << Range;
}
-void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
- mangleCallingConvention(T->getCallConv());
+void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T,
+ SourceRange Range) {
+ mangleCallingConvention(T->getCallConv(), Range);
}
void MicrosoftCXXNameMangler::mangleThrowSpecification(
@@ -3079,6 +3215,11 @@ void MicrosoftCXXNameMangler::mangleArrayType(const ArrayType *T) {
mangleType(ElementTy, SourceRange(), QMM_Escape);
}
+void MicrosoftCXXNameMangler::mangleType(const ArrayParameterType *T,
+ Qualifiers, SourceRange) {
+ mangleArrayType(cast<ConstantArrayType>(T));
+}
+
// <type> ::= <pointer-to-member-type>
// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
// <class name> <type>
@@ -3408,6 +3549,12 @@ void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T, Qualifiers,
<< Range;
}
+void MicrosoftCXXNameMangler::mangleType(const PackIndexingType *T,
+ Qualifiers Quals, SourceRange Range) {
+ manglePointerCVQualifiers(Quals);
+ mangleType(T->getSelectedType(), Range);
+}
+
void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T, Qualifiers,
SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
@@ -3642,6 +3789,7 @@ void MicrosoftMangleContextImpl::mangleVirtualMemPtrThunk(
void MicrosoftMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk,
+ bool /*ElideOverrideInfo*/,
raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
@@ -3663,9 +3811,11 @@ void MicrosoftMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
DeclForFPT->getType()->castAs<FunctionProtoType>(), MD);
}
-void MicrosoftMangleContextImpl::mangleCXXDtorThunk(
- const CXXDestructorDecl *DD, CXXDtorType Type,
- const ThisAdjustment &Adjustment, raw_ostream &Out) {
+void MicrosoftMangleContextImpl::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const ThunkInfo &Thunk,
+ bool /*ElideOverrideInfo*/,
+ raw_ostream &Out) {
// FIXME: Actually, the dtor thunk should be emitted for vector deleting
// dtors rather than scalar deleting dtors. Just use the vector deleting dtor
// mangling manually until we support both deleting dtor types.
@@ -3674,6 +3824,7 @@ void MicrosoftMangleContextImpl::mangleCXXDtorThunk(
MicrosoftCXXNameMangler Mangler(*this, MHO, DD, Type);
Mangler.getStream() << "??_E";
Mangler.mangleName(DD->getParent());
+ auto &Adjustment = Thunk.This;
mangleThunkThisAdjustment(DD->getAccess(), Adjustment, Mangler, MHO);
Mangler.mangleFunctionType(DD->getType()->castAs<FunctionProtoType>(), DD);
}
@@ -3698,6 +3849,12 @@ void MicrosoftMangleContextImpl::mangleCXXVFTable(
Mangler.getStream() << '@';
}
+void MicrosoftMangleContextImpl::mangleCXXVTable(const CXXRecordDecl *Derived,
+ raw_ostream &Out) {
+ // TODO: Determine appropriate mangling for MSABI
+ mangleCXXVFTable(Derived, {}, Out);
+}
+
void MicrosoftMangleContextImpl::mangleCXXVBTable(
const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) {
@@ -3905,7 +4062,8 @@ void MicrosoftMangleContextImpl::mangleReferenceTemporary(
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "?$RT" << ManglingNumber << '@';
+ Mangler.getStream() << "?";
+ Mangler.mangleSourceName("$RT" + llvm::utostr(ManglingNumber));
Mangler.mangle(VD, "");
}
@@ -3914,7 +4072,8 @@ void MicrosoftMangleContextImpl::mangleThreadSafeStaticGuardVariable(
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "?$TSS" << GuardNum << '@';
+ Mangler.getStream() << "?";
+ Mangler.mangleSourceName("$TSS" + llvm::utostr(GuardNum));
Mangler.mangleNestedName(VD);
Mangler.getStream() << "@4HA";
}
@@ -4015,10 +4174,8 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
// char bar[42] = "foobar";
// Where it is truncated or zero-padded to fit the array. This is the length
// used for mangling, and any trailing null-bytes also need to be mangled.
- unsigned StringLength = getASTContext()
- .getAsConstantArrayType(SL->getType())
- ->getSize()
- .getZExtValue();
+ unsigned StringLength =
+ getASTContext().getAsConstantArrayType(SL->getType())->getZExtSize();
unsigned StringByteLength = StringLength * SL->getCharByteWidth();
// <char-type>: The "kind" of string literal is encoded into the mangled name.
diff --git a/contrib/llvm-project/clang/lib/AST/NSAPI.cpp b/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
index 86dee540e9e2..48d1763125e6 100644
--- a/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
+++ b/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
@@ -56,10 +56,8 @@ Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const {
&Ctx.Idents.get("initWithUTF8String"));
break;
case NSStr_stringWithCStringEncoding: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("stringWithCString"),
- &Ctx.Idents.get("encoding")
- };
+ const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("stringWithCString"),
+ &Ctx.Idents.get("encoding")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
@@ -93,10 +91,8 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const {
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObjects"));
break;
case NSArr_arrayWithObjectsCount: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("arrayWithObjects"),
- &Ctx.Idents.get("count")
- };
+ const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("arrayWithObjects"),
+ &Ctx.Idents.get("count")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
@@ -110,10 +106,9 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const {
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectAtIndex"));
break;
case NSMutableArr_replaceObjectAtIndex: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("replaceObjectAtIndex"),
- &Ctx.Idents.get("withObject")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("replaceObjectAtIndex"),
+ &Ctx.Idents.get("withObject")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
@@ -121,18 +116,14 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const {
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject"));
break;
case NSMutableArr_insertObjectAtIndex: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("insertObject"),
- &Ctx.Idents.get("atIndex")
- };
+ const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("insertObject"),
+ &Ctx.Idents.get("atIndex")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSMutableArr_setObjectAtIndexedSubscript: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("setObject"),
- &Ctx.Idents.get("atIndexedSubscript")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setObject"), &Ctx.Idents.get("atIndexedSubscript")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
@@ -167,27 +158,21 @@ Selector NSAPI::getNSDictionarySelector(
&Ctx.Idents.get("dictionaryWithDictionary"));
break;
case NSDict_dictionaryWithObjectForKey: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("dictionaryWithObject"),
- &Ctx.Idents.get("forKey")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObject"), &Ctx.Idents.get("forKey")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSDict_dictionaryWithObjectsForKeys: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("dictionaryWithObjects"),
- &Ctx.Idents.get("forKeys")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObjects"), &Ctx.Idents.get("forKeys")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSDict_dictionaryWithObjectsForKeysCount: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("dictionaryWithObjects"),
- &Ctx.Idents.get("forKeys"),
- &Ctx.Idents.get("count")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObjects"), &Ctx.Idents.get("forKeys"),
+ &Ctx.Idents.get("count")};
Sel = Ctx.Selectors.getSelector(3, KeyIdents);
break;
}
@@ -204,10 +189,8 @@ Selector NSAPI::getNSDictionarySelector(
&Ctx.Idents.get("initWithObjectsAndKeys"));
break;
case NSDict_initWithObjectsForKeys: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("initWithObjects"),
- &Ctx.Idents.get("forKeys")
- };
+ const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("initWithObjects"),
+ &Ctx.Idents.get("forKeys")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
@@ -215,26 +198,20 @@ Selector NSAPI::getNSDictionarySelector(
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectForKey"));
break;
case NSMutableDict_setObjectForKey: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("setObject"),
- &Ctx.Idents.get("forKey")
- };
+ const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("setObject"),
+ &Ctx.Idents.get("forKey")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSMutableDict_setObjectForKeyedSubscript: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("setObject"),
- &Ctx.Idents.get("forKeyedSubscript")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setObject"), &Ctx.Idents.get("forKeyedSubscript")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSMutableDict_setValueForKey: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("setValue"),
- &Ctx.Idents.get("forKey")
- };
+ const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("setValue"),
+ &Ctx.Idents.get("forKey")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
@@ -264,34 +241,27 @@ Selector NSAPI::getNSSetSelector(NSSetMethodKind MK) const {
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject"));
break;
case NSOrderedSet_insertObjectAtIndex: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("insertObject"),
- &Ctx.Idents.get("atIndex")
- };
+ const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("insertObject"),
+ &Ctx.Idents.get("atIndex")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSOrderedSet_setObjectAtIndex: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("setObject"),
- &Ctx.Idents.get("atIndex")
- };
+ const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("setObject"),
+ &Ctx.Idents.get("atIndex")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSOrderedSet_setObjectAtIndexedSubscript: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("setObject"),
- &Ctx.Idents.get("atIndexedSubscript")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setObject"), &Ctx.Idents.get("atIndexedSubscript")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSOrderedSet_replaceObjectAtIndexWithObject: {
- IdentifierInfo *KeyIdents[] = {
- &Ctx.Idents.get("replaceObjectAtIndex"),
- &Ctx.Idents.get("withObject")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("replaceObjectAtIndex"),
+ &Ctx.Idents.get("withObject")};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
@@ -483,7 +453,10 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
#include "clang/Basic/RISCVVTypes.def"
#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
case BuiltinType::BoundMember:
+ case BuiltinType::UnresolvedTemplate:
case BuiltinType::Dependent:
case BuiltinType::Overload:
case BuiltinType::UnknownAny:
@@ -492,7 +465,7 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
case BuiltinType::PseudoObject:
case BuiltinType::BuiltinFn:
case BuiltinType::IncompleteMatrixIdx:
- case BuiltinType::OMPArraySection:
+ case BuiltinType::ArraySection:
case BuiltinType::OMPArrayShaping:
case BuiltinType::OMPIterator:
case BuiltinType::BFloat16:
@@ -606,7 +579,7 @@ bool NSAPI::isObjCEnumerator(const Expr *E,
Selector NSAPI::getOrInitSelector(ArrayRef<StringRef> Ids,
Selector &Sel) const {
if (Sel.isNull()) {
- SmallVector<IdentifierInfo *, 4> Idents;
+ SmallVector<const IdentifierInfo *, 4> Idents;
for (ArrayRef<StringRef>::const_iterator
I = Ids.begin(), E = Ids.end(); I != E; ++I)
Idents.push_back(&Ctx.Idents.get(*I));
@@ -617,7 +590,7 @@ Selector NSAPI::getOrInitSelector(ArrayRef<StringRef> Ids,
Selector NSAPI::getOrInitNullarySelector(StringRef Id, Selector &Sel) const {
if (Sel.isNull()) {
- IdentifierInfo *Ident = &Ctx.Idents.get(Id);
+ const IdentifierInfo *Ident = &Ctx.Idents.get(Id);
Sel = Ctx.Selectors.getSelector(0, &Ident);
}
return Sel;
diff --git a/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp b/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp
index 36f2c47b3000..785c46e86a77 100644
--- a/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp
+++ b/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp
@@ -55,16 +55,16 @@ NestedNameSpecifier::FindOrInsert(const ASTContext &Context,
return NNS;
}
-NestedNameSpecifier *
-NestedNameSpecifier::Create(const ASTContext &Context,
- NestedNameSpecifier *Prefix, IdentifierInfo *II) {
+NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ const IdentifierInfo *II) {
assert(II && "Identifier cannot be NULL");
assert((!Prefix || Prefix->isDependent()) && "Prefix must be dependent");
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(Prefix);
Mockup.Prefix.setInt(StoredIdentifier);
- Mockup.Specifier = II;
+ Mockup.Specifier = const_cast<IdentifierInfo *>(II);
return FindOrInsert(Context, Mockup);
}
@@ -87,7 +87,7 @@ NestedNameSpecifier::Create(const ASTContext &Context,
NestedNameSpecifier *
NestedNameSpecifier::Create(const ASTContext &Context,
NestedNameSpecifier *Prefix,
- NamespaceAliasDecl *Alias) {
+ const NamespaceAliasDecl *Alias) {
assert(Alias && "Namespace alias cannot be NULL");
assert((!Prefix ||
(Prefix->getAsType() == nullptr &&
@@ -96,7 +96,7 @@ NestedNameSpecifier::Create(const ASTContext &Context,
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(Prefix);
Mockup.Prefix.setInt(StoredDecl);
- Mockup.Specifier = Alias;
+ Mockup.Specifier = const_cast<NamespaceAliasDecl *>(Alias);
return FindOrInsert(Context, Mockup);
}
@@ -112,13 +112,13 @@ NestedNameSpecifier::Create(const ASTContext &Context,
return FindOrInsert(Context, Mockup);
}
-NestedNameSpecifier *
-NestedNameSpecifier::Create(const ASTContext &Context, IdentifierInfo *II) {
+NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context,
+ const IdentifierInfo *II) {
assert(II && "Identifier cannot be NULL");
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(nullptr);
Mockup.Prefix.setInt(StoredIdentifier);
- Mockup.Specifier = II;
+ Mockup.Specifier = const_cast<IdentifierInfo *>(II);
return FindOrInsert(Context, Mockup);
}
diff --git a/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp b/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp
index 5b1cdc16e2ea..37f0f68c9235 100644
--- a/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp
@@ -1409,13 +1409,15 @@ bool ODRDiagsEmitter::diagnoseMismatch(
}
if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
- QualType FirstType = FirstTTPD->getDefaultArgument();
- QualType SecondType = SecondTTPD->getDefaultArgument();
- if (computeODRHash(FirstType) != computeODRHash(SecondType)) {
+ TemplateArgument FirstTA =
+ FirstTTPD->getDefaultArgument().getArgument();
+ TemplateArgument SecondTA =
+ SecondTTPD->getDefaultArgument().getArgument();
+ if (computeODRHash(FirstTA) != computeODRHash(SecondTA)) {
DiagTemplateError(FunctionTemplateParameterDifferentDefaultArgument)
- << (i + 1) << FirstType;
+ << (i + 1) << FirstTA;
DiagTemplateNote(FunctionTemplateParameterDifferentDefaultArgument)
- << (i + 1) << SecondType;
+ << (i + 1) << SecondTA;
return true;
}
}
@@ -1521,8 +1523,11 @@ bool ODRDiagsEmitter::diagnoseMismatch(
}
if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
- Expr *FirstDefaultArgument = FirstNTTPD->getDefaultArgument();
- Expr *SecondDefaultArgument = SecondNTTPD->getDefaultArgument();
+ TemplateArgument FirstDefaultArgument =
+ FirstNTTPD->getDefaultArgument().getArgument();
+ TemplateArgument SecondDefaultArgument =
+ SecondNTTPD->getDefaultArgument().getArgument();
+
if (computeODRHash(FirstDefaultArgument) !=
computeODRHash(SecondDefaultArgument)) {
DiagTemplateError(FunctionTemplateParameterDifferentDefaultArgument)
diff --git a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
index 2dbc259138a8..fbfe92318dc5 100644
--- a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
@@ -146,10 +146,17 @@ void ODRHash::AddTemplateName(TemplateName Name) {
case TemplateName::Template:
AddDecl(Name.getAsTemplateDecl());
break;
+ case TemplateName::QualifiedTemplate: {
+ QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName();
+ if (NestedNameSpecifier *NNS = QTN->getQualifier())
+ AddNestedNameSpecifier(NNS);
+ AddBoolean(QTN->hasTemplateKeyword());
+ AddTemplateName(QTN->getUnderlyingTemplate());
+ break;
+ }
// TODO: Support these cases.
case TemplateName::OverloadedTemplate:
case TemplateName::AssumedTemplate:
- case TemplateName::QualifiedTemplate:
case TemplateName::DependentTemplate:
case TemplateName::SubstTemplateTemplateParm:
case TemplateName::SubstTemplateTemplateParmPack:
@@ -244,7 +251,7 @@ unsigned ODRHash::CalculateHash() {
assert(I == Bools.rend());
Bools.clear();
- return ID.ComputeHash();
+ return ID.computeStableHash();
}
namespace {
@@ -462,7 +469,7 @@ public:
D->hasDefaultArgument() && !D->defaultArgumentWasInherited();
Hash.AddBoolean(hasDefaultArgument);
if (hasDefaultArgument) {
- AddTemplateArgument(D->getDefaultArgument());
+ AddTemplateArgument(D->getDefaultArgument().getArgument());
}
Hash.AddBoolean(D->isParameterPack());
@@ -480,7 +487,7 @@ public:
D->hasDefaultArgument() && !D->defaultArgumentWasInherited();
Hash.AddBoolean(hasDefaultArgument);
if (hasDefaultArgument) {
- AddStmt(D->getDefaultArgument());
+ AddTemplateArgument(D->getDefaultArgument().getArgument());
}
Hash.AddBoolean(D->isParameterPack());
@@ -696,6 +703,12 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function,
AddBoolean(Function->isDeletedAsWritten());
AddBoolean(Function->isExplicitlyDefaulted());
+ StringLiteral *DeletedMessage = Function->getDeletedMessage();
+ AddBoolean(DeletedMessage);
+
+ if (DeletedMessage)
+ ID.AddString(DeletedMessage->getBytes());
+
AddDecl(Function);
AddQualType(Function->getReturnType());
@@ -944,6 +957,10 @@ public:
VisitArrayType(T);
}
+ void VisitArrayParameterType(const ArrayParameterType *T) {
+ VisitConstantArrayType(T);
+ }
+
void VisitDependentSizedArrayType(const DependentSizedArrayType *T) {
AddStmt(T->getSizeExpr());
VisitArrayType(T);
diff --git a/contrib/llvm-project/clang/lib/AST/OpenACCClause.cpp b/contrib/llvm-project/clang/lib/AST/OpenACCClause.cpp
new file mode 100644
index 000000000000..95089a9b79e2
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/OpenACCClause.cpp
@@ -0,0 +1,552 @@
+//===---- OpenACCClause.cpp - Classes for OpenACC Clauses ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclasses of the OpenACCClause class declared in
+// OpenACCClause.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/OpenACCClause.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+
+using namespace clang;
+
+bool OpenACCClauseWithParams::classof(const OpenACCClause *C) {
+ return OpenACCDeviceTypeClause::classof(C) ||
+ OpenACCClauseWithCondition::classof(C) ||
+ OpenACCClauseWithExprs::classof(C);
+}
+bool OpenACCClauseWithExprs::classof(const OpenACCClause *C) {
+ return OpenACCWaitClause::classof(C) || OpenACCNumGangsClause::classof(C) ||
+ OpenACCClauseWithSingleIntExpr::classof(C) ||
+ OpenACCClauseWithVarList::classof(C);
+}
+bool OpenACCClauseWithVarList::classof(const OpenACCClause *C) {
+ return OpenACCPrivateClause::classof(C) ||
+ OpenACCFirstPrivateClause::classof(C) ||
+ OpenACCDevicePtrClause::classof(C) ||
+ OpenACCDevicePtrClause::classof(C) ||
+ OpenACCAttachClause::classof(C) || OpenACCNoCreateClause::classof(C) ||
+ OpenACCPresentClause::classof(C) || OpenACCCopyClause::classof(C) ||
+ OpenACCCopyInClause::classof(C) || OpenACCCopyOutClause::classof(C) ||
+ OpenACCReductionClause::classof(C) || OpenACCCreateClause::classof(C);
+}
+bool OpenACCClauseWithCondition::classof(const OpenACCClause *C) {
+ return OpenACCIfClause::classof(C) || OpenACCSelfClause::classof(C);
+}
+bool OpenACCClauseWithSingleIntExpr::classof(const OpenACCClause *C) {
+ return OpenACCNumWorkersClause::classof(C) ||
+ OpenACCVectorLengthClause::classof(C) ||
+ OpenACCAsyncClause::classof(C);
+}
+OpenACCDefaultClause *OpenACCDefaultClause::Create(const ASTContext &C,
+ OpenACCDefaultClauseKind K,
+ SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ void *Mem =
+ C.Allocate(sizeof(OpenACCDefaultClause), alignof(OpenACCDefaultClause));
+
+ return new (Mem) OpenACCDefaultClause(K, BeginLoc, LParenLoc, EndLoc);
+}
+
+OpenACCIfClause *OpenACCIfClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ Expr *ConditionExpr,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OpenACCIfClause), alignof(OpenACCIfClause));
+ return new (Mem) OpenACCIfClause(BeginLoc, LParenLoc, ConditionExpr, EndLoc);
+}
+
+OpenACCIfClause::OpenACCIfClause(SourceLocation BeginLoc,
+ SourceLocation LParenLoc, Expr *ConditionExpr,
+ SourceLocation EndLoc)
+ : OpenACCClauseWithCondition(OpenACCClauseKind::If, BeginLoc, LParenLoc,
+ ConditionExpr, EndLoc) {
+ assert(ConditionExpr && "if clause requires condition expr");
+ assert((ConditionExpr->isInstantiationDependent() ||
+ ConditionExpr->getType()->isScalarType()) &&
+ "Condition expression type not scalar/dependent");
+}
+
+OpenACCSelfClause *OpenACCSelfClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ Expr *ConditionExpr,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OpenACCIfClause), alignof(OpenACCIfClause));
+ return new (Mem)
+ OpenACCSelfClause(BeginLoc, LParenLoc, ConditionExpr, EndLoc);
+}
+
+OpenACCSelfClause::OpenACCSelfClause(SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ Expr *ConditionExpr, SourceLocation EndLoc)
+ : OpenACCClauseWithCondition(OpenACCClauseKind::Self, BeginLoc, LParenLoc,
+ ConditionExpr, EndLoc) {
+ assert((!ConditionExpr || ConditionExpr->isInstantiationDependent() ||
+ ConditionExpr->getType()->isScalarType()) &&
+ "Condition expression type not scalar/dependent");
+}
+
+OpenACCClause::child_range OpenACCClause::children() {
+ switch (getClauseKind()) {
+ default:
+ assert(false && "Clause children function not implemented");
+ break;
+#define VISIT_CLAUSE(CLAUSE_NAME) \
+ case OpenACCClauseKind::CLAUSE_NAME: \
+ return cast<OpenACC##CLAUSE_NAME##Clause>(this)->children();
+#define CLAUSE_ALIAS(ALIAS_NAME, CLAUSE_NAME, DEPRECATED) \
+ case OpenACCClauseKind::ALIAS_NAME: \
+ return cast<OpenACC##CLAUSE_NAME##Clause>(this)->children();
+
+#include "clang/Basic/OpenACCClauses.def"
+ }
+ return child_range(child_iterator(), child_iterator());
+}
+
+OpenACCNumWorkersClause::OpenACCNumWorkersClause(SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ Expr *IntExpr,
+ SourceLocation EndLoc)
+ : OpenACCClauseWithSingleIntExpr(OpenACCClauseKind::NumWorkers, BeginLoc,
+ LParenLoc, IntExpr, EndLoc) {
+ assert((!IntExpr || IntExpr->isInstantiationDependent() ||
+ IntExpr->getType()->isIntegerType()) &&
+ "Condition expression type not scalar/dependent");
+}
+
+OpenACCNumWorkersClause *
+OpenACCNumWorkersClause::Create(const ASTContext &C, SourceLocation BeginLoc,
+ SourceLocation LParenLoc, Expr *IntExpr,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OpenACCNumWorkersClause),
+ alignof(OpenACCNumWorkersClause));
+ return new (Mem)
+ OpenACCNumWorkersClause(BeginLoc, LParenLoc, IntExpr, EndLoc);
+}
+
+OpenACCVectorLengthClause::OpenACCVectorLengthClause(SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ Expr *IntExpr,
+ SourceLocation EndLoc)
+ : OpenACCClauseWithSingleIntExpr(OpenACCClauseKind::VectorLength, BeginLoc,
+ LParenLoc, IntExpr, EndLoc) {
+ assert((!IntExpr || IntExpr->isInstantiationDependent() ||
+ IntExpr->getType()->isIntegerType()) &&
+ "Condition expression type not scalar/dependent");
+}
+
+OpenACCVectorLengthClause *
+OpenACCVectorLengthClause::Create(const ASTContext &C, SourceLocation BeginLoc,
+ SourceLocation LParenLoc, Expr *IntExpr,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OpenACCVectorLengthClause),
+ alignof(OpenACCVectorLengthClause));
+ return new (Mem)
+ OpenACCVectorLengthClause(BeginLoc, LParenLoc, IntExpr, EndLoc);
+}
+
+OpenACCAsyncClause::OpenACCAsyncClause(SourceLocation BeginLoc,
+ SourceLocation LParenLoc, Expr *IntExpr,
+ SourceLocation EndLoc)
+ : OpenACCClauseWithSingleIntExpr(OpenACCClauseKind::Async, BeginLoc,
+ LParenLoc, IntExpr, EndLoc) {
+ assert((!IntExpr || IntExpr->isInstantiationDependent() ||
+ IntExpr->getType()->isIntegerType()) &&
+ "Condition expression type not scalar/dependent");
+}
+
+OpenACCAsyncClause *OpenACCAsyncClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ Expr *IntExpr,
+ SourceLocation EndLoc) {
+ void *Mem =
+ C.Allocate(sizeof(OpenACCAsyncClause), alignof(OpenACCAsyncClause));
+ return new (Mem) OpenACCAsyncClause(BeginLoc, LParenLoc, IntExpr, EndLoc);
+}
+
+OpenACCWaitClause *OpenACCWaitClause::Create(
+ const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc,
+ Expr *DevNumExpr, SourceLocation QueuesLoc, ArrayRef<Expr *> QueueIdExprs,
+ SourceLocation EndLoc) {
+ // Allocates enough room in trailing storage for all the int-exprs, plus a
+ // placeholder for the devnum.
+ void *Mem = C.Allocate(
+ OpenACCWaitClause::totalSizeToAlloc<Expr *>(QueueIdExprs.size() + 1));
+ return new (Mem) OpenACCWaitClause(BeginLoc, LParenLoc, DevNumExpr, QueuesLoc,
+ QueueIdExprs, EndLoc);
+}
+
+OpenACCNumGangsClause *OpenACCNumGangsClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> IntExprs,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(
+ OpenACCNumGangsClause::totalSizeToAlloc<Expr *>(IntExprs.size()));
+ return new (Mem) OpenACCNumGangsClause(BeginLoc, LParenLoc, IntExprs, EndLoc);
+}
+
+OpenACCPrivateClause *OpenACCPrivateClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(
+ OpenACCPrivateClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem) OpenACCPrivateClause(BeginLoc, LParenLoc, VarList, EndLoc);
+}
+
+OpenACCFirstPrivateClause *OpenACCFirstPrivateClause::Create(
+ const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc,
+ ArrayRef<Expr *> VarList, SourceLocation EndLoc) {
+ void *Mem = C.Allocate(
+ OpenACCFirstPrivateClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem)
+ OpenACCFirstPrivateClause(BeginLoc, LParenLoc, VarList, EndLoc);
+}
+
+OpenACCAttachClause *OpenACCAttachClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc) {
+ void *Mem =
+ C.Allocate(OpenACCAttachClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem) OpenACCAttachClause(BeginLoc, LParenLoc, VarList, EndLoc);
+}
+
+OpenACCDevicePtrClause *OpenACCDevicePtrClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(
+ OpenACCDevicePtrClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem) OpenACCDevicePtrClause(BeginLoc, LParenLoc, VarList, EndLoc);
+}
+
+OpenACCNoCreateClause *OpenACCNoCreateClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(
+ OpenACCNoCreateClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem) OpenACCNoCreateClause(BeginLoc, LParenLoc, VarList, EndLoc);
+}
+
+OpenACCPresentClause *OpenACCPresentClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(
+ OpenACCPresentClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem) OpenACCPresentClause(BeginLoc, LParenLoc, VarList, EndLoc);
+}
+
+OpenACCCopyClause *
+OpenACCCopyClause::Create(const ASTContext &C, OpenACCClauseKind Spelling,
+ SourceLocation BeginLoc, SourceLocation LParenLoc,
+ ArrayRef<Expr *> VarList, SourceLocation EndLoc) {
+ void *Mem =
+ C.Allocate(OpenACCCopyClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem)
+ OpenACCCopyClause(Spelling, BeginLoc, LParenLoc, VarList, EndLoc);
+}
+
+OpenACCCopyInClause *
+OpenACCCopyInClause::Create(const ASTContext &C, OpenACCClauseKind Spelling,
+ SourceLocation BeginLoc, SourceLocation LParenLoc,
+ bool IsReadOnly, ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc) {
+ void *Mem =
+ C.Allocate(OpenACCCopyInClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem) OpenACCCopyInClause(Spelling, BeginLoc, LParenLoc,
+ IsReadOnly, VarList, EndLoc);
+}
+
+OpenACCCopyOutClause *
+OpenACCCopyOutClause::Create(const ASTContext &C, OpenACCClauseKind Spelling,
+ SourceLocation BeginLoc, SourceLocation LParenLoc,
+ bool IsZero, ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(
+ OpenACCCopyOutClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem) OpenACCCopyOutClause(Spelling, BeginLoc, LParenLoc, IsZero,
+ VarList, EndLoc);
+}
+
+OpenACCCreateClause *
+OpenACCCreateClause::Create(const ASTContext &C, OpenACCClauseKind Spelling,
+ SourceLocation BeginLoc, SourceLocation LParenLoc,
+ bool IsZero, ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc) {
+ void *Mem =
+ C.Allocate(OpenACCCreateClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem) OpenACCCreateClause(Spelling, BeginLoc, LParenLoc, IsZero,
+ VarList, EndLoc);
+}
+
+OpenACCDeviceTypeClause *OpenACCDeviceTypeClause::Create(
+ const ASTContext &C, OpenACCClauseKind K, SourceLocation BeginLoc,
+ SourceLocation LParenLoc, ArrayRef<DeviceTypeArgument> Archs,
+ SourceLocation EndLoc) {
+ void *Mem =
+ C.Allocate(OpenACCDeviceTypeClause::totalSizeToAlloc<DeviceTypeArgument>(
+ Archs.size()));
+ return new (Mem)
+ OpenACCDeviceTypeClause(K, BeginLoc, LParenLoc, Archs, EndLoc);
+}
+
+OpenACCReductionClause *OpenACCReductionClause::Create(
+ const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc,
+ OpenACCReductionOperator Operator, ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(
+ OpenACCReductionClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem)
+ OpenACCReductionClause(BeginLoc, LParenLoc, Operator, VarList, EndLoc);
+}
+
+OpenACCAutoClause *OpenACCAutoClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OpenACCAutoClause));
+ return new (Mem) OpenACCAutoClause(BeginLoc, EndLoc);
+}
+
+OpenACCIndependentClause *
+OpenACCIndependentClause::Create(const ASTContext &C, SourceLocation BeginLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OpenACCIndependentClause));
+ return new (Mem) OpenACCIndependentClause(BeginLoc, EndLoc);
+}
+
+OpenACCSeqClause *OpenACCSeqClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OpenACCSeqClause));
+ return new (Mem) OpenACCSeqClause(BeginLoc, EndLoc);
+}
+
+OpenACCGangClause *OpenACCGangClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OpenACCGangClause));
+ return new (Mem) OpenACCGangClause(BeginLoc, EndLoc);
+}
+
+OpenACCWorkerClause *OpenACCWorkerClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OpenACCWorkerClause));
+ return new (Mem) OpenACCWorkerClause(BeginLoc, EndLoc);
+}
+
+OpenACCVectorClause *OpenACCVectorClause::Create(const ASTContext &C,
+ SourceLocation BeginLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(OpenACCVectorClause));
+ return new (Mem) OpenACCVectorClause(BeginLoc, EndLoc);
+}
+
+//===----------------------------------------------------------------------===//
+// OpenACC clauses printing methods
+//===----------------------------------------------------------------------===//
+
+void OpenACCClausePrinter::printExpr(const Expr *E) {
+ E->printPretty(OS, nullptr, Policy, 0);
+}
+
+void OpenACCClausePrinter::VisitDefaultClause(const OpenACCDefaultClause &C) {
+ OS << "default(" << C.getDefaultClauseKind() << ")";
+}
+
+void OpenACCClausePrinter::VisitIfClause(const OpenACCIfClause &C) {
+ OS << "if(";
+ printExpr(C.getConditionExpr());
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitSelfClause(const OpenACCSelfClause &C) {
+ OS << "self";
+ if (const Expr *CondExpr = C.getConditionExpr()) {
+ OS << "(";
+ printExpr(CondExpr);
+ OS << ")";
+ }
+}
+
+void OpenACCClausePrinter::VisitNumGangsClause(const OpenACCNumGangsClause &C) {
+ OS << "num_gangs(";
+ llvm::interleaveComma(C.getIntExprs(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitNumWorkersClause(
+ const OpenACCNumWorkersClause &C) {
+ OS << "num_workers(";
+ printExpr(C.getIntExpr());
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitVectorLengthClause(
+ const OpenACCVectorLengthClause &C) {
+ OS << "vector_length(";
+ printExpr(C.getIntExpr());
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitAsyncClause(const OpenACCAsyncClause &C) {
+ OS << "async";
+ if (C.hasIntExpr()) {
+ OS << "(";
+ printExpr(C.getIntExpr());
+ OS << ")";
+ }
+}
+
+void OpenACCClausePrinter::VisitPrivateClause(const OpenACCPrivateClause &C) {
+ OS << "private(";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitFirstPrivateClause(
+ const OpenACCFirstPrivateClause &C) {
+ OS << "firstprivate(";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitAttachClause(const OpenACCAttachClause &C) {
+ OS << "attach(";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitDevicePtrClause(
+ const OpenACCDevicePtrClause &C) {
+ OS << "deviceptr(";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitNoCreateClause(const OpenACCNoCreateClause &C) {
+ OS << "no_create(";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitPresentClause(const OpenACCPresentClause &C) {
+ OS << "present(";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitCopyClause(const OpenACCCopyClause &C) {
+ OS << C.getClauseKind() << '(';
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitCopyInClause(const OpenACCCopyInClause &C) {
+ OS << C.getClauseKind() << '(';
+ if (C.isReadOnly())
+ OS << "readonly: ";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitCopyOutClause(const OpenACCCopyOutClause &C) {
+ OS << C.getClauseKind() << '(';
+ if (C.isZero())
+ OS << "zero: ";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitCreateClause(const OpenACCCreateClause &C) {
+ OS << C.getClauseKind() << '(';
+ if (C.isZero())
+ OS << "zero: ";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitReductionClause(
+ const OpenACCReductionClause &C) {
+ OS << "reduction(" << C.getReductionOp() << ": ";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitWaitClause(const OpenACCWaitClause &C) {
+ OS << "wait";
+ if (!C.getLParenLoc().isInvalid()) {
+ OS << "(";
+ if (C.hasDevNumExpr()) {
+ OS << "devnum: ";
+ printExpr(C.getDevNumExpr());
+ OS << " : ";
+ }
+
+ if (C.hasQueuesTag())
+ OS << "queues: ";
+
+ llvm::interleaveComma(C.getQueueIdExprs(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+ }
+}
+
+void OpenACCClausePrinter::VisitDeviceTypeClause(
+ const OpenACCDeviceTypeClause &C) {
+ OS << C.getClauseKind();
+ OS << "(";
+ llvm::interleaveComma(C.getArchitectures(), OS,
+ [&](const DeviceTypeArgument &Arch) {
+ if (Arch.first == nullptr)
+ OS << "*";
+ else
+ OS << Arch.first->getName();
+ });
+ OS << ")";
+}
+
+void OpenACCClausePrinter::VisitAutoClause(const OpenACCAutoClause &C) {
+ OS << "auto";
+}
+
+void OpenACCClausePrinter::VisitIndependentClause(
+ const OpenACCIndependentClause &C) {
+ OS << "independent";
+}
+
+void OpenACCClausePrinter::VisitSeqClause(const OpenACCSeqClause &C) {
+ OS << "seq";
+}
diff --git a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
index 04f680a8f5c9..042a5df5906c 100644
--- a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
+++ b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
@@ -1957,6 +1957,8 @@ void OMPClausePrinter::VisitOMPRelaxedClause(OMPRelaxedClause *) {
OS << "relaxed";
}
+void OMPClausePrinter::VisitOMPWeakClause(OMPWeakClause *) { OS << "weak"; }
+
void OMPClausePrinter::VisitOMPThreadsClause(OMPThreadsClause *) {
OS << "threads";
}
diff --git a/contrib/llvm-project/clang/lib/AST/ParentMap.cpp b/contrib/llvm-project/clang/lib/AST/ParentMap.cpp
index 3d6a1cc84c7b..e97cb5e226f5 100644
--- a/contrib/llvm-project/clang/lib/AST/ParentMap.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ParentMap.cpp
@@ -139,7 +139,9 @@ Stmt* ParentMap::getParent(Stmt* S) const {
}
Stmt *ParentMap::getParentIgnoreParens(Stmt *S) const {
- do { S = getParent(S); } while (S && isa<ParenExpr>(S));
+ do {
+ S = getParent(S);
+ } while (isa_and_nonnull<ParenExpr>(S));
return S;
}
@@ -155,7 +157,8 @@ Stmt *ParentMap::getParentIgnoreParenCasts(Stmt *S) const {
Stmt *ParentMap::getParentIgnoreParenImpCasts(Stmt *S) const {
do {
S = getParent(S);
- } while (S && isa<Expr>(S) && cast<Expr>(S)->IgnoreParenImpCasts() != S);
+ } while (isa_and_nonnull<Expr>(S) &&
+ cast<Expr>(S)->IgnoreParenImpCasts() != S);
return S;
}
diff --git a/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp b/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp
index 21cfd5b1de6e..9723c0cfa83b 100644
--- a/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp
@@ -61,7 +61,26 @@ class ParentMapContext::ParentMap {
template <typename, typename...> friend struct ::MatchParents;
/// Contains parents of a node.
- using ParentVector = llvm::SmallVector<DynTypedNode, 2>;
+ class ParentVector {
+ public:
+ ParentVector() = default;
+ explicit ParentVector(size_t N, const DynTypedNode &Value) {
+ Items.reserve(N);
+ for (; N > 0; --N)
+ push_back(Value);
+ }
+ bool contains(const DynTypedNode &Value) {
+ return Seen.contains(Value);
+ }
+ void push_back(const DynTypedNode &Value) {
+ if (!Value.getMemoizationData() || Seen.insert(Value).second)
+ Items.push_back(Value);
+ }
+ llvm::ArrayRef<DynTypedNode> view() const { return Items; }
+ private:
+ llvm::SmallVector<DynTypedNode, 2> Items;
+ llvm::SmallDenseSet<DynTypedNode, 2> Seen;
+ };
/// Maps from a node to its parents. This is used for nodes that have
/// pointer identity only, which are more common and we can save space by
@@ -99,7 +118,7 @@ class ParentMapContext::ParentMap {
return llvm::ArrayRef<DynTypedNode>();
}
if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
- return llvm::ArrayRef(*V);
+ return V->view();
}
return getSingleDynTypedNodeFromParentMap(I->second);
}
@@ -252,7 +271,7 @@ public:
const auto *S = It->second.dyn_cast<const Stmt *>();
if (!S) {
if (auto *Vec = It->second.dyn_cast<ParentVector *>())
- return llvm::ArrayRef(*Vec);
+ return Vec->view();
return getSingleDynTypedNodeFromParentMap(It->second);
}
const auto *P = dyn_cast<Expr>(S);
diff --git a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
index a4bb0d999d99..5fd120bc745b 100644
--- a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
@@ -146,13 +146,13 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
if (Warn && (Size == 0 || Size > 8))
H.handleInvalidMaskType(MaskType);
FS.setMaskType(MaskType);
- } else if (MatchedStr.equals("sensitive"))
+ } else if (MatchedStr == "sensitive")
PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsSensitive;
else if (PrivacyFlags !=
- clang::analyze_os_log::OSLogBufferItem::IsSensitive &&
- MatchedStr.equals("private"))
+ clang::analyze_os_log::OSLogBufferItem::IsSensitive &&
+ MatchedStr == "private")
PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPrivate;
- else if (PrivacyFlags == 0 && MatchedStr.equals("public"))
+ else if (PrivacyFlags == 0 && MatchedStr == "public")
PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPublic;
} else {
size_t CommaOrBracePos =
@@ -348,6 +348,8 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
case 'r':
if (isFreeBSDKPrintf)
k = ConversionSpecifier::FreeBSDrArg; // int
+ else if (LO.FixedPoint)
+ k = ConversionSpecifier::rArg;
break;
case 'y':
if (isFreeBSDKPrintf)
@@ -373,6 +375,20 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
if (Target.getTriple().isOSMSVCRT())
k = ConversionSpecifier::ZArg;
break;
+ // ISO/IEC TR 18037 (fixed-point) specific.
+ // NOTE: 'r' is handled up above since FreeBSD also supports %r.
+ case 'k':
+ if (LO.FixedPoint)
+ k = ConversionSpecifier::kArg;
+ break;
+ case 'K':
+ if (LO.FixedPoint)
+ k = ConversionSpecifier::KArg;
+ break;
+ case 'R':
+ if (LO.FixedPoint)
+ k = ConversionSpecifier::RArg;
+ break;
}
// Check to see if we used the Objective-C modifier flags with
@@ -627,6 +643,9 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx,
}
}
+ if (CS.isFixedPointArg() && !Ctx.getLangOpts().FixedPoint)
+ return ArgType::Invalid();
+
switch (CS.getKind()) {
case ConversionSpecifier::sArg:
if (LM.getKind() == LengthModifier::AsWideChar) {
@@ -658,6 +677,50 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx,
return ArgType::CPointerTy;
case ConversionSpecifier::ObjCObjArg:
return ArgType::ObjCPointerTy;
+ case ConversionSpecifier::kArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return Ctx.AccumTy;
+ case LengthModifier::AsShort:
+ return Ctx.ShortAccumTy;
+ case LengthModifier::AsLong:
+ return Ctx.LongAccumTy;
+ default:
+ return ArgType::Invalid();
+ }
+ case ConversionSpecifier::KArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return Ctx.UnsignedAccumTy;
+ case LengthModifier::AsShort:
+ return Ctx.UnsignedShortAccumTy;
+ case LengthModifier::AsLong:
+ return Ctx.UnsignedLongAccumTy;
+ default:
+ return ArgType::Invalid();
+ }
+ case ConversionSpecifier::rArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return Ctx.FractTy;
+ case LengthModifier::AsShort:
+ return Ctx.ShortFractTy;
+ case LengthModifier::AsLong:
+ return Ctx.LongFractTy;
+ default:
+ return ArgType::Invalid();
+ }
+ case ConversionSpecifier::RArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return Ctx.UnsignedFractTy;
+ case LengthModifier::AsShort:
+ return Ctx.UnsignedShortFractTy;
+ case LengthModifier::AsLong:
+ return Ctx.UnsignedLongFractTy;
+ default:
+ return ArgType::Invalid();
+ }
default:
break;
}
@@ -802,6 +865,8 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
#include "clang/Basic/RISCVVTypes.def"
#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
#define SIGNED_TYPE(Id, SingletonId)
#define UNSIGNED_TYPE(Id, SingletonId)
#define FLOATING_TYPE(Id, SingletonId)
@@ -955,6 +1020,8 @@ bool PrintfSpecifier::hasValidPlusPrefix() const {
case ConversionSpecifier::AArg:
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
+ case ConversionSpecifier::rArg:
+ case ConversionSpecifier::kArg:
return true;
default:
@@ -966,7 +1033,7 @@ bool PrintfSpecifier::hasValidAlternativeForm() const {
if (!HasAlternativeForm)
return true;
- // Alternate form flag only valid with the bBoxXaAeEfFgG conversions
+ // Alternate form flag only valid with the bBoxXaAeEfFgGrRkK conversions
switch (CS.getKind()) {
case ConversionSpecifier::bArg:
case ConversionSpecifier::BArg:
@@ -984,6 +1051,10 @@ bool PrintfSpecifier::hasValidAlternativeForm() const {
case ConversionSpecifier::GArg:
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
+ case ConversionSpecifier::rArg:
+ case ConversionSpecifier::RArg:
+ case ConversionSpecifier::kArg:
+ case ConversionSpecifier::KArg:
return true;
default:
@@ -995,7 +1066,7 @@ bool PrintfSpecifier::hasValidLeadingZeros() const {
if (!HasLeadingZeroes)
return true;
- // Leading zeroes flag only valid with the bBdiouxXaAeEfFgG conversions
+ // Leading zeroes flag only valid with the bBdiouxXaAeEfFgGrRkK conversions
switch (CS.getKind()) {
case ConversionSpecifier::bArg:
case ConversionSpecifier::BArg:
@@ -1018,6 +1089,10 @@ bool PrintfSpecifier::hasValidLeadingZeros() const {
case ConversionSpecifier::GArg:
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
+ case ConversionSpecifier::rArg:
+ case ConversionSpecifier::RArg:
+ case ConversionSpecifier::kArg:
+ case ConversionSpecifier::KArg:
return true;
default:
@@ -1044,6 +1119,8 @@ bool PrintfSpecifier::hasValidSpacePrefix() const {
case ConversionSpecifier::AArg:
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
+ case ConversionSpecifier::rArg:
+ case ConversionSpecifier::kArg:
return true;
default:
@@ -1089,7 +1166,7 @@ bool PrintfSpecifier::hasValidPrecision() const {
if (Precision.getHowSpecified() == OptionalAmount::NotSpecified)
return true;
- // Precision is only valid with the bBdiouxXaAeEfFgGsP conversions
+ // Precision is only valid with the bBdiouxXaAeEfFgGsPrRkK conversions
switch (CS.getKind()) {
case ConversionSpecifier::bArg:
case ConversionSpecifier::BArg:
@@ -1114,6 +1191,10 @@ bool PrintfSpecifier::hasValidPrecision() const {
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
case ConversionSpecifier::PArg:
+ case ConversionSpecifier::rArg:
+ case ConversionSpecifier::RArg:
+ case ConversionSpecifier::kArg:
+ case ConversionSpecifier::KArg:
return true;
default:
diff --git a/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp b/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
index 066377423df7..4e1243ef79e8 100644
--- a/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
+++ b/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
@@ -65,8 +65,9 @@ static bool getFullyQualifiedTemplateName(const ASTContext &Ctx,
assert(ArgTDecl != nullptr);
QualifiedTemplateName *QTName = TName.getAsQualifiedTemplateName();
- if (QTName && !QTName->hasTemplateKeyword()) {
- NNS = QTName->getQualifier();
+ if (QTName &&
+ !QTName->hasTemplateKeyword() &&
+ (NNS = QTName->getQualifier())) {
NestedNameSpecifier *QNNS = getFullyQualifiedNestedNameSpecifier(
Ctx, NNS, WithGlobalNsPrefix);
if (QNNS != NNS) {
@@ -269,8 +270,8 @@ static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
assert(Decl);
const DeclContext *DC = Decl->getDeclContext()->getRedeclContext();
- const auto *Outer = dyn_cast_or_null<NamedDecl>(DC);
- const auto *OuterNS = dyn_cast_or_null<NamespaceDecl>(DC);
+ const auto *Outer = dyn_cast<NamedDecl>(DC);
+ const auto *OuterNS = dyn_cast<NamespaceDecl>(DC);
if (Outer && !(OuterNS && OuterNS->isAnonymousNamespace())) {
if (const auto *CxxDecl = dyn_cast<CXXRecordDecl>(DC)) {
if (ClassTemplateDecl *ClassTempl =
diff --git a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
index 6dfaadd92e79..d9bf62c2bbb0 100644
--- a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -602,21 +602,28 @@ protected:
/// Whether the external AST source has provided a layout for this
/// record.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseExternalLayout : 1;
/// Whether we need to infer alignment, even when we have an
/// externally-provided layout.
+ LLVM_PREFERRED_TYPE(bool)
unsigned InferAlignment : 1;
/// Packed - Whether the record is packed or not.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Packed : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsUnion : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsMac68kAlign : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsNaturalAlign : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsMsStruct : 1;
/// UnfilledBitsInLastUnit - If the last field laid out was a bitfield,
@@ -2451,6 +2458,11 @@ static bool mustSkipTailPadding(TargetCXXABI ABI, const CXXRecordDecl *RD) {
}
static bool isMsLayout(const ASTContext &Context) {
+ // Check if it's CUDA device compilation; ensure layout consistency with host.
+ if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice &&
+ Context.getAuxTargetInfo())
+ return Context.getAuxTargetInfo()->getCXXABI().isMicrosoft();
+
return Context.getTargetInfo().getCXXABI().isMicrosoft();
}
diff --git a/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp b/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp
index 64c430e623b5..7ee21c8c6195 100644
--- a/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp
@@ -448,9 +448,7 @@ bool ScanfSpecifier::fixType(QualType QT, QualType RawQT,
if (const ConstantArrayType *CAT = Ctx.getAsConstantArrayType(RawQT)) {
if (CAT->getSizeModifier() == ArraySizeModifier::Normal)
FieldWidth = OptionalAmount(OptionalAmount::Constant,
- CAT->getSize().getZExtValue() - 1,
- "", 0, false);
-
+ CAT->getZExtSize() - 1, "", 0, false);
}
return true;
}
diff --git a/contrib/llvm-project/clang/lib/AST/SelectorLocationsKind.cpp b/contrib/llvm-project/clang/lib/AST/SelectorLocationsKind.cpp
index 2c34c9c60c2b..ebe6324f904c 100644
--- a/contrib/llvm-project/clang/lib/AST/SelectorLocationsKind.cpp
+++ b/contrib/llvm-project/clang/lib/AST/SelectorLocationsKind.cpp
@@ -26,7 +26,7 @@ static SourceLocation getStandardSelLoc(unsigned Index,
assert(Index == 0);
if (EndLoc.isInvalid())
return SourceLocation();
- IdentifierInfo *II = Sel.getIdentifierInfoForSlot(0);
+ const IdentifierInfo *II = Sel.getIdentifierInfoForSlot(0);
unsigned Len = II ? II->getLength() : 0;
return EndLoc.getLocWithOffset(-Len);
}
@@ -34,7 +34,7 @@ static SourceLocation getStandardSelLoc(unsigned Index,
assert(Index < NumSelArgs);
if (ArgLoc.isInvalid())
return SourceLocation();
- IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index);
+ const IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index);
unsigned Len = /* selector id */ (II ? II->getLength() : 0) + /* ':' */ 1;
if (WithArgSpace)
++Len;
diff --git a/contrib/llvm-project/clang/lib/AST/Stmt.cpp b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
index afd05881cb16..fe59d6070b3e 100644
--- a/contrib/llvm-project/clang/lib/AST/Stmt.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
@@ -23,6 +23,7 @@
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
diff --git a/contrib/llvm-project/clang/lib/AST/StmtOpenACC.cpp b/contrib/llvm-project/clang/lib/AST/StmtOpenACC.cpp
new file mode 100644
index 000000000000..2d864a288579
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/StmtOpenACC.cpp
@@ -0,0 +1,125 @@
+//===--- StmtOpenACC.cpp - Classes for OpenACC Constructs -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclasses of Stmt class declared in StmtOpenACC.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtOpenACC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtCXX.h"
+using namespace clang;
+
+OpenACCComputeConstruct *
+OpenACCComputeConstruct::CreateEmpty(const ASTContext &C, unsigned NumClauses) {
+ void *Mem = C.Allocate(
+ OpenACCComputeConstruct::totalSizeToAlloc<const OpenACCClause *>(
+ NumClauses));
+ auto *Inst = new (Mem) OpenACCComputeConstruct(NumClauses);
+ return Inst;
+}
+
+OpenACCComputeConstruct *OpenACCComputeConstruct::Create(
+ const ASTContext &C, OpenACCDirectiveKind K, SourceLocation BeginLoc,
+ SourceLocation DirLoc, SourceLocation EndLoc,
+ ArrayRef<const OpenACCClause *> Clauses, Stmt *StructuredBlock,
+ ArrayRef<OpenACCLoopConstruct *> AssociatedLoopConstructs) {
+ void *Mem = C.Allocate(
+ OpenACCComputeConstruct::totalSizeToAlloc<const OpenACCClause *>(
+ Clauses.size()));
+ auto *Inst = new (Mem) OpenACCComputeConstruct(K, BeginLoc, DirLoc, EndLoc,
+ Clauses, StructuredBlock);
+
+ llvm::for_each(AssociatedLoopConstructs, [&](OpenACCLoopConstruct *C) {
+ C->setParentComputeConstruct(Inst);
+ });
+
+ return Inst;
+}
+
+void OpenACCComputeConstruct::findAndSetChildLoops() {
+ struct LoopConstructFinder : RecursiveASTVisitor<LoopConstructFinder> {
+ OpenACCComputeConstruct *Construct = nullptr;
+
+ LoopConstructFinder(OpenACCComputeConstruct *Construct)
+ : Construct(Construct) {}
+
+ bool TraverseOpenACCComputeConstruct(OpenACCComputeConstruct *C) {
+ // Stop searching if we find a compute construct.
+ return true;
+ }
+ bool TraverseOpenACCLoopConstruct(OpenACCLoopConstruct *C) {
+ // Stop searching if we find a loop construct, after taking ownership of
+ // it.
+ C->setParentComputeConstruct(Construct);
+ return true;
+ }
+ };
+
+ LoopConstructFinder f(this);
+ f.TraverseStmt(getAssociatedStmt());
+}
+
+OpenACCLoopConstruct::OpenACCLoopConstruct(unsigned NumClauses)
+ : OpenACCAssociatedStmtConstruct(
+ OpenACCLoopConstructClass, OpenACCDirectiveKind::Loop,
+ SourceLocation{}, SourceLocation{}, SourceLocation{},
+ /*AssociatedStmt=*/nullptr) {
+ std::uninitialized_value_construct(
+ getTrailingObjects<const OpenACCClause *>(),
+ getTrailingObjects<const OpenACCClause *>() + NumClauses);
+ setClauseList(
+ MutableArrayRef(getTrailingObjects<const OpenACCClause *>(), NumClauses));
+}
+
+OpenACCLoopConstruct::OpenACCLoopConstruct(
+ SourceLocation Start, SourceLocation DirLoc, SourceLocation End,
+ ArrayRef<const OpenACCClause *> Clauses, Stmt *Loop)
+ : OpenACCAssociatedStmtConstruct(OpenACCLoopConstructClass,
+ OpenACCDirectiveKind::Loop, Start, DirLoc,
+ End, Loop) {
+ // accept 'nullptr' for the loop. This is diagnosed somewhere, but this gives
+ // us some level of AST fidelity in the error case.
+ assert((Loop == nullptr || isa<ForStmt, CXXForRangeStmt>(Loop)) &&
+ "Associated Loop not a for loop?");
+ // Initialize the trailing storage.
+ std::uninitialized_copy(Clauses.begin(), Clauses.end(),
+ getTrailingObjects<const OpenACCClause *>());
+
+ setClauseList(MutableArrayRef(getTrailingObjects<const OpenACCClause *>(),
+ Clauses.size()));
+}
+
+void OpenACCLoopConstruct::setLoop(Stmt *Loop) {
+ assert((isa<ForStmt, CXXForRangeStmt>(Loop)) &&
+ "Associated Loop not a for loop?");
+ setAssociatedStmt(Loop);
+}
+
+OpenACCLoopConstruct *OpenACCLoopConstruct::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses) {
+ void *Mem =
+ C.Allocate(OpenACCLoopConstruct::totalSizeToAlloc<const OpenACCClause *>(
+ NumClauses));
+ auto *Inst = new (Mem) OpenACCLoopConstruct(NumClauses);
+ return Inst;
+}
+
+OpenACCLoopConstruct *
+OpenACCLoopConstruct::Create(const ASTContext &C, SourceLocation BeginLoc,
+ SourceLocation DirLoc, SourceLocation EndLoc,
+ ArrayRef<const OpenACCClause *> Clauses,
+ Stmt *Loop) {
+ void *Mem =
+ C.Allocate(OpenACCLoopConstruct::totalSizeToAlloc<const OpenACCClause *>(
+ Clauses.size()));
+ auto *Inst =
+ new (Mem) OpenACCLoopConstruct(BeginLoc, DirLoc, EndLoc, Clauses, Loop);
+ return Inst;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
index 426b35848cb5..a2325b177d41 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the subclesses of Stmt class declared in StmtOpenMP.h
+// This file implements the subclasses of Stmt class declared in StmtOpenMP.h
//
//===----------------------------------------------------------------------===//
@@ -449,6 +449,44 @@ OMPUnrollDirective *OMPUnrollDirective::CreateEmpty(const ASTContext &C,
SourceLocation(), SourceLocation());
}
+OMPReverseDirective *
+OMPReverseDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, Stmt *AssociatedStmt,
+ Stmt *TransformedStmt, Stmt *PreInits) {
+ OMPReverseDirective *Dir = createDirective<OMPReverseDirective>(
+ C, std::nullopt, AssociatedStmt, TransformedStmtOffset + 1, StartLoc,
+ EndLoc);
+ Dir->setTransformedStmt(TransformedStmt);
+ Dir->setPreInits(PreInits);
+ return Dir;
+}
+
+OMPReverseDirective *OMPReverseDirective::CreateEmpty(const ASTContext &C) {
+ return createEmptyDirective<OMPReverseDirective>(
+ C, /*NumClauses=*/0, /*HasAssociatedStmt=*/true,
+ TransformedStmtOffset + 1, SourceLocation(), SourceLocation());
+}
+
+OMPInterchangeDirective *OMPInterchangeDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, unsigned NumLoops, Stmt *AssociatedStmt,
+ Stmt *TransformedStmt, Stmt *PreInits) {
+ OMPInterchangeDirective *Dir = createDirective<OMPInterchangeDirective>(
+ C, Clauses, AssociatedStmt, TransformedStmtOffset + 1, StartLoc, EndLoc,
+ NumLoops);
+ Dir->setTransformedStmt(TransformedStmt);
+ Dir->setPreInits(PreInits);
+ return Dir;
+}
+
+OMPInterchangeDirective *
+OMPInterchangeDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ unsigned NumLoops) {
+ return createEmptyDirective<OMPInterchangeDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true, TransformedStmtOffset + 1,
+ SourceLocation(), SourceLocation(), NumLoops);
+}
+
OMPForSimdDirective *
OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
@@ -2431,7 +2469,7 @@ OMPTeamsGenericLoopDirective::CreateEmpty(const ASTContext &C,
OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
+ const HelperExprs &Exprs, bool CanBeParallelFor) {
auto *Dir = createDirective<OMPTargetTeamsGenericLoopDirective>(
C, Clauses, AssociatedStmt,
numLoopChildren(CollapsedNum, OMPD_target_teams_loop), StartLoc, EndLoc,
@@ -2473,6 +2511,7 @@ OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create(
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
+ Dir->setCanBeParallelFor(CanBeParallelFor);
return Dir;
}
diff --git a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
index 9d4aa07ec4da..69e0b763e8dd 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
@@ -84,7 +84,7 @@ namespace {
void PrintStmt(Stmt *S, int SubIndent) {
IndentLevel += SubIndent;
- if (S && isa<Expr>(S)) {
+ if (isa_and_nonnull<Expr>(S)) {
// If this is an expr used in a stmt context, indent and newline it.
Indent();
Visit(S);
@@ -292,8 +292,11 @@ void StmtPrinter::VisitLabelStmt(LabelStmt *Node) {
}
void StmtPrinter::VisitAttributedStmt(AttributedStmt *Node) {
- for (const auto *Attr : Node->getAttrs()) {
+ llvm::ArrayRef<const Attr *> Attrs = Node->getAttrs();
+ for (const auto *Attr : Attrs) {
Attr->printPretty(OS, Policy);
+ if (Attr != Attrs.back())
+ OS << ' ';
}
PrintStmt(Node->getSubStmt(), 0);
@@ -760,6 +763,16 @@ void StmtPrinter::VisitOMPUnrollDirective(OMPUnrollDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPReverseDirective(OMPReverseDirective *Node) {
+ Indent() << "#pragma omp reverse";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPInterchangeDirective(OMPInterchangeDirective *Node) {
+ Indent() << "#pragma omp interchange";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPForDirective(OMPForDirective *Node) {
Indent() << "#pragma omp for";
PrintOMPExecutableDirective(Node);
@@ -1138,6 +1151,35 @@ void StmtPrinter::VisitOMPTargetParallelGenericLoopDirective(
}
//===----------------------------------------------------------------------===//
+// OpenACC construct printing methods
+//===----------------------------------------------------------------------===//
+void StmtPrinter::VisitOpenACCComputeConstruct(OpenACCComputeConstruct *S) {
+ Indent() << "#pragma acc " << S->getDirectiveKind();
+
+ if (!S->clauses().empty()) {
+ OS << ' ';
+ OpenACCClausePrinter Printer(OS, Policy);
+ Printer.VisitClauseList(S->clauses());
+ }
+ OS << '\n';
+
+ PrintStmt(S->getStructuredBlock());
+}
+
+void StmtPrinter::VisitOpenACCLoopConstruct(OpenACCLoopConstruct *S) {
+ Indent() << "#pragma acc loop";
+
+ if (!S->clauses().empty()) {
+ OS << ' ';
+ OpenACCClausePrinter Printer(OS, Policy);
+ Printer.VisitClauseList(S->clauses());
+ }
+ OS << '\n';
+
+ PrintStmt(S->getLoop());
+}
+
+//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//
@@ -1145,6 +1187,10 @@ void StmtPrinter::VisitSourceLocExpr(SourceLocExpr *Node) {
OS << Node->getBuiltinStr() << "()";
}
+void StmtPrinter::VisitEmbedExpr(EmbedExpr *Node) {
+ llvm::report_fatal_error("Not implemented");
+}
+
void StmtPrinter::VisitConstantExpr(ConstantExpr *Node) {
PrintExpr(Node->getSubExpr());
}
@@ -1429,7 +1475,7 @@ void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
continue;
// Field or identifier node.
- IdentifierInfo *Id = ON.getFieldName();
+ const IdentifierInfo *Id = ON.getFieldName();
if (!Id)
continue;
@@ -1503,7 +1549,7 @@ void StmtPrinter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *Node) {
OS << "]";
}
-void StmtPrinter::VisitOMPArraySectionExpr(OMPArraySectionExpr *Node) {
+void StmtPrinter::VisitArraySectionExpr(ArraySectionExpr *Node) {
PrintExpr(Node->getBase());
OS << "[";
if (Node->getLowerBound())
@@ -1513,7 +1559,7 @@ void StmtPrinter::VisitOMPArraySectionExpr(OMPArraySectionExpr *Node) {
if (Node->getLength())
PrintExpr(Node->getLength());
}
- if (Node->getColonLocSecond().isValid()) {
+ if (Node->isOMPArraySection() && Node->getColonLocSecond().isValid()) {
OS << ":";
if (Node->getStride())
PrintExpr(Node->getStride());
@@ -1833,7 +1879,7 @@ void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
case AtomicExpr::AO ## ID: \
Name = #ID "("; \
break;
-#include "clang/Basic/Builtins.def"
+#include "clang/Basic/Builtins.inc"
}
OS << Name;
@@ -1907,7 +1953,7 @@ void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) {
// If we have a conversion operator call only print the argument.
CXXMethodDecl *MD = Node->getMethodDecl();
- if (MD && isa<CXXConversionDecl>(MD)) {
+ if (isa_and_nonnull<CXXConversionDecl>(MD)) {
PrintExpr(Node->getImplicitObjectArgument());
return;
}
@@ -2330,7 +2376,7 @@ void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
E->getQualifier()->print(OS, Policy);
OS << "~";
- if (IdentifierInfo *II = E->getDestroyedTypeIdentifier())
+ if (const IdentifierInfo *II = E->getDestroyedTypeIdentifier())
OS << II->getName();
else
E->getDestroyedType().print(OS, Policy);
@@ -2449,6 +2495,10 @@ void StmtPrinter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
OS << "sizeof...(" << *E->getPack() << ")";
}
+void StmtPrinter::VisitPackIndexingExpr(PackIndexingExpr *E) {
+ OS << E->getPackIdExpression() << "...[" << E->getIndexExpr() << "]";
+}
+
void StmtPrinter::VisitSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *Node) {
OS << *Node->getParameterPack();
diff --git a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
index dd0838edab7b..89d2a422509d 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
@@ -61,7 +61,7 @@ namespace {
virtual void VisitName(DeclarationName Name, bool TreatAsDecl = false) = 0;
/// Visit identifiers that are not in Decl's or Type's.
- virtual void VisitIdentifierInfo(IdentifierInfo *II) = 0;
+ virtual void VisitIdentifierInfo(const IdentifierInfo *II) = 0;
/// Visit a nested-name-specifier that occurs within an expression
/// or statement.
@@ -163,7 +163,7 @@ namespace {
ID.AddPointer(Name.getAsOpaquePtr());
}
- void VisitIdentifierInfo(IdentifierInfo *II) override {
+ void VisitIdentifierInfo(const IdentifierInfo *II) override {
ID.AddPointer(II);
}
@@ -211,7 +211,7 @@ namespace {
}
Hash.AddDeclarationName(Name, TreatAsDecl);
}
- void VisitIdentifierInfo(IdentifierInfo *II) override {
+ void VisitIdentifierInfo(const IdentifierInfo *II) override {
ID.AddBoolean(II);
if (II) {
Hash.AddIdentifierInfo(II);
@@ -594,6 +594,8 @@ void OMPClauseProfiler::VisitOMPReleaseClause(const OMPReleaseClause *) {}
void OMPClauseProfiler::VisitOMPRelaxedClause(const OMPRelaxedClause *) {}
+void OMPClauseProfiler::VisitOMPWeakClause(const OMPWeakClause *) {}
+
void OMPClauseProfiler::VisitOMPThreadsClause(const OMPThreadsClause *) {}
void OMPClauseProfiler::VisitOMPSIMDClause(const OMPSIMDClause *) {}
@@ -983,6 +985,15 @@ void StmtProfiler::VisitOMPUnrollDirective(const OMPUnrollDirective *S) {
VisitOMPLoopTransformationDirective(S);
}
+void StmtProfiler::VisitOMPReverseDirective(const OMPReverseDirective *S) {
+ VisitOMPLoopTransformationDirective(S);
+}
+
+void StmtProfiler::VisitOMPInterchangeDirective(
+ const OMPInterchangeDirective *S) {
+ VisitOMPLoopTransformationDirective(S);
+}
+
void StmtProfiler::VisitOMPForDirective(const OMPForDirective *S) {
VisitOMPLoopDirective(S);
}
@@ -1433,7 +1444,7 @@ void StmtProfiler::VisitMatrixSubscriptExpr(const MatrixSubscriptExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitOMPArraySectionExpr(const OMPArraySectionExpr *S) {
+void StmtProfiler::VisitArraySectionExpr(const ArraySectionExpr *S) {
VisitExpr(S);
}
@@ -2009,6 +2020,7 @@ void StmtProfiler::VisitMSPropertySubscriptExpr(
void StmtProfiler::VisitCXXThisExpr(const CXXThisExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isImplicit());
+ ID.AddBoolean(S->isCapturedByCopyInLambdaWithExplicitObjectParameter());
}
void StmtProfiler::VisitCXXThrowExpr(const CXXThrowExpr *S) {
@@ -2068,13 +2080,31 @@ StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
}
CXXRecordDecl *Lambda = S->getLambdaClass();
- ID.AddInteger(Lambda->getODRHash());
-
for (const auto &Capture : Lambda->captures()) {
ID.AddInteger(Capture.getCaptureKind());
if (Capture.capturesVariable())
VisitDecl(Capture.getCapturedVar());
}
+
+ // Profiling the body of the lambda may be dangerous during deserialization.
+ // So we'd like only to profile the signature here.
+ ODRHash Hasher;
+ // FIXME: We can't get the operator call easily by
+ // `CXXRecordDecl::getLambdaCallOperator()` if we're in deserialization.
+ // So we have to do something raw here.
+ for (auto *SubDecl : Lambda->decls()) {
+ FunctionDecl *Call = nullptr;
+ if (auto *FTD = dyn_cast<FunctionTemplateDecl>(SubDecl))
+ Call = FTD->getTemplatedDecl();
+ else if (auto *FD = dyn_cast<FunctionDecl>(SubDecl))
+ Call = FD;
+
+ if (!Call)
+ continue;
+
+ Hasher.AddFunctionDecl(Call, /*SkipBody=*/true);
+ }
+ ID.AddInteger(Hasher.CalculateHash());
}
void
@@ -2219,6 +2249,12 @@ void StmtProfiler::VisitSizeOfPackExpr(const SizeOfPackExpr *S) {
}
}
+void StmtProfiler::VisitPackIndexingExpr(const PackIndexingExpr *E) {
+ VisitExpr(E);
+ VisitExpr(E->getPackIdExpression());
+ VisitExpr(E->getIndexExpr());
+}
+
void StmtProfiler::VisitSubstNonTypeTemplateParmPackExpr(
const SubstNonTypeTemplateParmPackExpr *S) {
VisitExpr(S);
@@ -2286,6 +2322,8 @@ void StmtProfiler::VisitSourceLocExpr(const SourceLocExpr *E) {
VisitExpr(E);
}
+void StmtProfiler::VisitEmbedExpr(const EmbedExpr *E) { VisitExpr(E); }
+
void StmtProfiler::VisitRecoveryExpr(const RecoveryExpr *E) { VisitExpr(E); }
void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
@@ -2433,6 +2471,166 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
}
}
+namespace {
+class OpenACCClauseProfiler
+ : public OpenACCClauseVisitor<OpenACCClauseProfiler> {
+ StmtProfiler &Profiler;
+
+public:
+ OpenACCClauseProfiler(StmtProfiler &P) : Profiler(P) {}
+
+ void VisitOpenACCClauseList(ArrayRef<const OpenACCClause *> Clauses) {
+ for (const OpenACCClause *Clause : Clauses) {
+ // TODO OpenACC: When we have clauses with expressions, we should
+ // profile them too.
+ Visit(Clause);
+ }
+ }
+
+#define VISIT_CLAUSE(CLAUSE_NAME) \
+ void Visit##CLAUSE_NAME##Clause(const OpenACC##CLAUSE_NAME##Clause &Clause);
+
+#include "clang/Basic/OpenACCClauses.def"
+};
+
+/// Nothing to do here, there are no sub-statements.
+void OpenACCClauseProfiler::VisitDefaultClause(
+ const OpenACCDefaultClause &Clause) {}
+
+void OpenACCClauseProfiler::VisitIfClause(const OpenACCIfClause &Clause) {
+ assert(Clause.hasConditionExpr() &&
+ "if clause requires a valid condition expr");
+ Profiler.VisitStmt(Clause.getConditionExpr());
+}
+
+void OpenACCClauseProfiler::VisitCopyClause(const OpenACCCopyClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
+void OpenACCClauseProfiler::VisitCopyInClause(
+ const OpenACCCopyInClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
+
+void OpenACCClauseProfiler::VisitCopyOutClause(
+ const OpenACCCopyOutClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
+
+void OpenACCClauseProfiler::VisitCreateClause(
+ const OpenACCCreateClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
+
+void OpenACCClauseProfiler::VisitSelfClause(const OpenACCSelfClause &Clause) {
+ if (Clause.hasConditionExpr())
+ Profiler.VisitStmt(Clause.getConditionExpr());
+}
+
+void OpenACCClauseProfiler::VisitNumGangsClause(
+ const OpenACCNumGangsClause &Clause) {
+ for (auto *E : Clause.getIntExprs())
+ Profiler.VisitStmt(E);
+}
+
+void OpenACCClauseProfiler::VisitNumWorkersClause(
+ const OpenACCNumWorkersClause &Clause) {
+ assert(Clause.hasIntExpr() && "num_workers clause requires a valid int expr");
+ Profiler.VisitStmt(Clause.getIntExpr());
+}
+
+void OpenACCClauseProfiler::VisitPrivateClause(
+ const OpenACCPrivateClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
+
+void OpenACCClauseProfiler::VisitFirstPrivateClause(
+ const OpenACCFirstPrivateClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
+
+void OpenACCClauseProfiler::VisitAttachClause(
+ const OpenACCAttachClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
+
+void OpenACCClauseProfiler::VisitDevicePtrClause(
+ const OpenACCDevicePtrClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
+
+void OpenACCClauseProfiler::VisitNoCreateClause(
+ const OpenACCNoCreateClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
+
+void OpenACCClauseProfiler::VisitPresentClause(
+ const OpenACCPresentClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
+
+void OpenACCClauseProfiler::VisitVectorLengthClause(
+ const OpenACCVectorLengthClause &Clause) {
+ assert(Clause.hasIntExpr() &&
+ "vector_length clause requires a valid int expr");
+ Profiler.VisitStmt(Clause.getIntExpr());
+}
+
+void OpenACCClauseProfiler::VisitAsyncClause(const OpenACCAsyncClause &Clause) {
+ if (Clause.hasIntExpr())
+ Profiler.VisitStmt(Clause.getIntExpr());
+}
+
+void OpenACCClauseProfiler::VisitWaitClause(const OpenACCWaitClause &Clause) {
+ if (Clause.hasDevNumExpr())
+ Profiler.VisitStmt(Clause.getDevNumExpr());
+ for (auto *E : Clause.getQueueIdExprs())
+ Profiler.VisitStmt(E);
+}
+/// Nothing to do here, there are no sub-statements.
+void OpenACCClauseProfiler::VisitDeviceTypeClause(
+ const OpenACCDeviceTypeClause &Clause) {}
+
+void OpenACCClauseProfiler::VisitAutoClause(const OpenACCAutoClause &Clause) {}
+
+void OpenACCClauseProfiler::VisitIndependentClause(
+ const OpenACCIndependentClause &Clause) {}
+
+void OpenACCClauseProfiler::VisitSeqClause(const OpenACCSeqClause &Clause) {}
+
+void OpenACCClauseProfiler::VisitReductionClause(
+ const OpenACCReductionClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
+} // namespace
+
+void StmtProfiler::VisitOpenACCComputeConstruct(
+ const OpenACCComputeConstruct *S) {
+ // VisitStmt handles children, so the AssociatedStmt is handled.
+ VisitStmt(S);
+
+ OpenACCClauseProfiler P{*this};
+ P.VisitOpenACCClauseList(S->clauses());
+}
+
+void StmtProfiler::VisitOpenACCLoopConstruct(const OpenACCLoopConstruct *S) {
+ // VisitStmt handles children, so the Loop is handled.
+ VisitStmt(S);
+
+ OpenACCClauseProfiler P{*this};
+ P.VisitOpenACCClauseList(S->clauses());
+}
+
void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical, bool ProfileLambdaExpr) const {
StmtProfilerWithPointers Profiler(ID, Context, Canonical, ProfileLambdaExpr);
diff --git a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
index 3310d7dc24c5..2e6839e948d9 100644
--- a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
@@ -221,8 +221,13 @@ static const ValueDecl *getAsSimpleValueDeclRef(const ASTContext &Ctx,
// We model class non-type template parameters as their template parameter
// object declaration.
- if (V.isStruct() || V.isUnion())
+ if (V.isStruct() || V.isUnion()) {
+ // Dependent types are not supposed to be described as
+ // TemplateParamObjectDecls.
+ if (T->isDependentType() || T->isInstantiationDependentType())
+ return nullptr;
return Ctx.getTemplateParamObjectDecl(T, V);
+ }
// Pointers and references with an empty path use the special 'Declaration'
// representation.
@@ -538,9 +543,10 @@ void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out,
Out << "nullptr";
break;
- case Template:
- getAsTemplate().print(Out, Policy, TemplateName::Qualified::Fully);
+ case Template: {
+ getAsTemplate().print(Out, Policy);
break;
+ }
case TemplateExpansion:
getAsTemplateOrTemplatePattern().print(Out, Policy);
@@ -571,15 +577,6 @@ void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out,
}
}
-void TemplateArgument::dump(raw_ostream &Out) const {
- LangOptions LO; // FIXME! see also TemplateName::dump().
- LO.CPlusPlus = true;
- LO.Bool = true;
- print(PrintingPolicy(LO), Out, /*IncludeType*/ true);
-}
-
-LLVM_DUMP_METHOD void TemplateArgument::dump() const { dump(llvm::errs()); }
-
//===----------------------------------------------------------------------===//
// TemplateArgumentLoc Implementation
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
index 2f0e4181e940..11544dbb56e3 100644
--- a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
@@ -214,29 +214,12 @@ UsingShadowDecl *TemplateName::getAsUsingShadowDecl() const {
return nullptr;
}
-TemplateName TemplateName::getNameToSubstitute() const {
- TemplateDecl *Decl = getAsTemplateDecl();
-
- // Substituting a dependent template name: preserve it as written.
- if (!Decl)
- return *this;
-
- // If we have a template declaration, use the most recent non-friend
- // declaration of that template.
- Decl = cast<TemplateDecl>(Decl->getMostRecentDecl());
- while (Decl->getFriendObjectKind()) {
- Decl = cast<TemplateDecl>(Decl->getPreviousDecl());
- assert(Decl && "all declarations of template are friends");
- }
- return TemplateName(Decl);
-}
-
TemplateNameDependence TemplateName::getDependence() const {
auto D = TemplateNameDependence::None;
switch (getKind()) {
case TemplateName::NameKind::QualifiedTemplate:
- D |= toTemplateNameDependence(
- getAsQualifiedTemplateName()->getQualifier()->getDependence());
+ if (NestedNameSpecifier *NNS = getAsQualifiedTemplateName()->getQualifier())
+ D |= toTemplateNameDependence(NNS->getDependence());
break;
case TemplateName::NameKind::DependentTemplate:
D |= toTemplateNameDependence(
@@ -292,9 +275,16 @@ void TemplateName::Profile(llvm::FoldingSetNodeID &ID) {
void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
Qualified Qual) const {
- auto Kind = getKind();
- TemplateDecl *Template = nullptr;
- if (Kind == TemplateName::Template || Kind == TemplateName::UsingTemplate) {
+ auto handleAnonymousTTP = [](TemplateDecl *TD, raw_ostream &OS) {
+ if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(TD);
+ TTP && TTP->getIdentifier() == nullptr) {
+ OS << "template-parameter-" << TTP->getDepth() << "-" << TTP->getIndex();
+ return true;
+ }
+ return false;
+ };
+ if (NameKind Kind = getKind();
+ Kind == TemplateName::Template || Kind == TemplateName::UsingTemplate) {
// After `namespace ns { using std::vector }`, what is the fully-qualified
// name of the UsingTemplateName `vector` within ns?
//
@@ -304,46 +294,49 @@ void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
// Similar to the UsingType behavior, using declarations are used to import
// names more often than to export them, thus using the original name is
// most useful in this case.
- Template = getAsTemplateDecl();
- }
-
- if (Template)
- if (Policy.CleanUglifiedParameters &&
- isa<TemplateTemplateParmDecl>(Template) && Template->getIdentifier())
- OS << Template->getIdentifier()->deuglifiedName();
- else if (Qual == Qualified::Fully &&
- getDependence() !=
- TemplateNameDependenceScope::DependentInstantiation)
- Template->printQualifiedName(OS, Policy);
- else
- OS << *Template;
- else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
- if (Qual == Qualified::Fully &&
- getDependence() !=
- TemplateNameDependenceScope::DependentInstantiation) {
- QTN->getUnderlyingTemplate().getAsTemplateDecl()->printQualifiedName(
- OS, Policy);
+ TemplateDecl *Template = getAsTemplateDecl();
+ if (handleAnonymousTTP(Template, OS))
return;
- }
- if (Qual == Qualified::AsWritten)
- QTN->getQualifier()->print(OS, Policy);
+ if (Qual == Qualified::None)
+ OS << *Template;
+ else
+ Template->printQualifiedName(OS, Policy);
+ } else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
+ if (NestedNameSpecifier *NNS = QTN->getQualifier();
+ Qual != Qualified::None && NNS)
+ NNS->print(OS, Policy);
if (QTN->hasTemplateKeyword())
OS << "template ";
- OS << *QTN->getUnderlyingTemplate().getAsTemplateDecl();
+
+ TemplateName Underlying = QTN->getUnderlyingTemplate();
+ assert(Underlying.getKind() == TemplateName::Template ||
+ Underlying.getKind() == TemplateName::UsingTemplate);
+
+ TemplateDecl *UTD = Underlying.getAsTemplateDecl();
+
+ if (handleAnonymousTTP(UTD, OS))
+ return;
+
+ if (IdentifierInfo *II = UTD->getIdentifier();
+ Policy.CleanUglifiedParameters && II &&
+ isa<TemplateTemplateParmDecl>(UTD))
+ OS << II->deuglifiedName();
+ else
+ OS << *UTD;
} else if (DependentTemplateName *DTN = getAsDependentTemplateName()) {
- if (Qual == Qualified::AsWritten && DTN->getQualifier())
- DTN->getQualifier()->print(OS, Policy);
+ if (NestedNameSpecifier *NNS = DTN->getQualifier())
+ NNS->print(OS, Policy);
OS << "template ";
if (DTN->isIdentifier())
OS << DTN->getIdentifier()->getName();
else
OS << "operator " << getOperatorSpelling(DTN->getOperator());
- } else if (SubstTemplateTemplateParmStorage *subst
- = getAsSubstTemplateTemplateParm()) {
+ } else if (SubstTemplateTemplateParmStorage *subst =
+ getAsSubstTemplateTemplateParm()) {
subst->getReplacement().print(OS, Policy, Qual);
- } else if (SubstTemplateTemplateParmPackStorage *SubstPack
- = getAsSubstTemplateTemplateParmPack())
+ } else if (SubstTemplateTemplateParmPackStorage *SubstPack =
+ getAsSubstTemplateTemplateParmPack())
OS << *SubstPack->getParameterPack();
else if (AssumedTemplateStorage *Assumed = getAsAssumedTemplateName()) {
Assumed->getDeclName().print(OS, Policy);
@@ -367,14 +360,3 @@ const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB,
OS.flush();
return DB << NameStr;
}
-
-void TemplateName::dump(raw_ostream &OS) const {
- LangOptions LO; // FIXME!
- LO.CPlusPlus = true;
- LO.Bool = true;
- print(OS, PrintingPolicy(LO));
-}
-
-LLVM_DUMP_METHOD void TemplateName::dump() const {
- dump(llvm::errs());
-}
diff --git a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
index ecf5de0be543..5ba952350425 100644
--- a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/LocInfoType.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Type.h"
+#include "clang/AST/TypeLocVisitor.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
@@ -240,6 +241,27 @@ void TextNodeDumper::Visit(QualType T) {
OS << " " << T.split().Quals.getAsString();
}
+void TextNodeDumper::Visit(TypeLoc TL) {
+ if (!TL) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+
+ {
+ ColorScope Color(OS, ShowColors, TypeColor);
+ OS << (TL.getTypeLocClass() == TypeLoc::Qualified
+ ? "Qualified"
+ : TL.getType()->getTypeClassName())
+ << "TypeLoc";
+ }
+ dumpSourceRange(TL.getSourceRange());
+ OS << ' ';
+ dumpBareType(TL.getType(), /*Desugar=*/false);
+
+ TypeLocVisitor<TextNodeDumper>::Visit(TL);
+}
+
void TextNodeDumper::Visit(const Decl *D) {
if (!D) {
ColorScope Color(OS, ShowColors, NullColor);
@@ -359,6 +381,98 @@ void TextNodeDumper::Visit(const OMPClause *C) {
OS << " <implicit>";
}
+void TextNodeDumper::Visit(const OpenACCClause *C) {
+ if (!C) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>> OpenACCClause";
+ return;
+ }
+ {
+ ColorScope Color(OS, ShowColors, AttrColor);
+ OS << C->getClauseKind();
+
+ // Handle clauses with parens for types that have no children, likely
+ // because there is no sub expression.
+ switch (C->getClauseKind()) {
+ case OpenACCClauseKind::Default:
+ OS << '(' << cast<OpenACCDefaultClause>(C)->getDefaultClauseKind() << ')';
+ break;
+ case OpenACCClauseKind::Async:
+ case OpenACCClauseKind::Auto:
+ case OpenACCClauseKind::Attach:
+ case OpenACCClauseKind::Copy:
+ case OpenACCClauseKind::PCopy:
+ case OpenACCClauseKind::PresentOrCopy:
+ case OpenACCClauseKind::If:
+ case OpenACCClauseKind::Independent:
+ case OpenACCClauseKind::DevicePtr:
+ case OpenACCClauseKind::FirstPrivate:
+ case OpenACCClauseKind::NoCreate:
+ case OpenACCClauseKind::NumGangs:
+ case OpenACCClauseKind::NumWorkers:
+ case OpenACCClauseKind::Present:
+ case OpenACCClauseKind::Private:
+ case OpenACCClauseKind::Self:
+ case OpenACCClauseKind::Seq:
+ case OpenACCClauseKind::VectorLength:
+ // The condition expression will be printed as a part of the 'children',
+ // but print 'clause' here so it is clear what is happening from the dump.
+ OS << " clause";
+ break;
+ case OpenACCClauseKind::CopyIn:
+ case OpenACCClauseKind::PCopyIn:
+ case OpenACCClauseKind::PresentOrCopyIn:
+ OS << " clause";
+ if (cast<OpenACCCopyInClause>(C)->isReadOnly())
+ OS << " : readonly";
+ break;
+ case OpenACCClauseKind::CopyOut:
+ case OpenACCClauseKind::PCopyOut:
+ case OpenACCClauseKind::PresentOrCopyOut:
+ OS << " clause";
+ if (cast<OpenACCCopyOutClause>(C)->isZero())
+ OS << " : zero";
+ break;
+ case OpenACCClauseKind::Create:
+ case OpenACCClauseKind::PCreate:
+ case OpenACCClauseKind::PresentOrCreate:
+ OS << " clause";
+ if (cast<OpenACCCreateClause>(C)->isZero())
+ OS << " : zero";
+ break;
+ case OpenACCClauseKind::Wait:
+ OS << " clause";
+ if (cast<OpenACCWaitClause>(C)->hasDevNumExpr())
+ OS << " has devnum";
+ if (cast<OpenACCWaitClause>(C)->hasQueuesTag())
+ OS << " has queues tag";
+ break;
+ case OpenACCClauseKind::DeviceType:
+ case OpenACCClauseKind::DType:
+ OS << "(";
+ llvm::interleaveComma(
+ cast<OpenACCDeviceTypeClause>(C)->getArchitectures(), OS,
+ [&](const DeviceTypeArgument &Arch) {
+ if (Arch.first == nullptr)
+ OS << "*";
+ else
+ OS << Arch.first->getName();
+ });
+ OS << ")";
+ break;
+ case OpenACCClauseKind::Reduction:
+ OS << " clause Operator: "
+ << cast<OpenACCReductionClause>(C)->getReductionOp();
+ break;
+ default:
+ // Nothing to do here.
+ break;
+ }
+ }
+ dumpPointer(C);
+ dumpSourceRange(SourceRange(C->getBeginLoc(), C->getEndLoc()));
+}
+
void TextNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) {
const TypeSourceInfo *TSI = A.getTypeSourceInfo();
if (TSI) {
@@ -836,6 +950,29 @@ void TextNodeDumper::dumpDeclRef(const Decl *D, StringRef Label) {
});
}
+void TextNodeDumper::dumpTemplateArgument(const TemplateArgument &TA) {
+ llvm::SmallString<128> Str;
+ {
+ llvm::raw_svector_ostream SS(Str);
+ TA.print(PrintPolicy, SS, /*IncludeType=*/true);
+ }
+ OS << " '" << Str << "'";
+
+ if (!Context)
+ return;
+
+ if (TemplateArgument CanonTA = Context->getCanonicalTemplateArgument(TA);
+ !CanonTA.structurallyEquals(TA)) {
+ llvm::SmallString<128> CanonStr;
+ {
+ llvm::raw_svector_ostream SS(CanonStr);
+ CanonTA.print(PrintPolicy, SS, /*IncludeType=*/true);
+ }
+ if (CanonStr != Str)
+ OS << ":'" << CanonStr << "'";
+ }
+}
+
const char *TextNodeDumper::getCommandName(unsigned CommandID) {
if (Traits)
return Traits->getCommandInfo(CommandID)->Name;
@@ -975,45 +1112,128 @@ void TextNodeDumper::VisitNullTemplateArgument(const TemplateArgument &) {
void TextNodeDumper::VisitTypeTemplateArgument(const TemplateArgument &TA) {
OS << " type";
- dumpType(TA.getAsType());
+ dumpTemplateArgument(TA);
}
void TextNodeDumper::VisitDeclarationTemplateArgument(
const TemplateArgument &TA) {
OS << " decl";
+ dumpTemplateArgument(TA);
dumpDeclRef(TA.getAsDecl());
}
-void TextNodeDumper::VisitNullPtrTemplateArgument(const TemplateArgument &) {
+void TextNodeDumper::VisitNullPtrTemplateArgument(const TemplateArgument &TA) {
OS << " nullptr";
+ dumpTemplateArgument(TA);
}
void TextNodeDumper::VisitIntegralTemplateArgument(const TemplateArgument &TA) {
- OS << " integral " << TA.getAsIntegral();
+ OS << " integral";
+ dumpTemplateArgument(TA);
+}
+
+void TextNodeDumper::dumpTemplateName(TemplateName TN, StringRef Label) {
+ AddChild(Label, [=] {
+ {
+ llvm::SmallString<128> Str;
+ {
+ llvm::raw_svector_ostream SS(Str);
+ TN.print(SS, PrintPolicy);
+ }
+ OS << "'" << Str << "'";
+
+ if (Context) {
+ if (TemplateName CanonTN = Context->getCanonicalTemplateName(TN);
+ CanonTN != TN) {
+ llvm::SmallString<128> CanonStr;
+ {
+ llvm::raw_svector_ostream SS(CanonStr);
+ CanonTN.print(SS, PrintPolicy);
+ }
+ if (CanonStr != Str)
+ OS << ":'" << CanonStr << "'";
+ }
+ }
+ }
+ dumpBareTemplateName(TN);
+ });
+}
+
+void TextNodeDumper::dumpBareTemplateName(TemplateName TN) {
+ switch (TN.getKind()) {
+ case TemplateName::Template:
+ AddChild([=] { Visit(TN.getAsTemplateDecl()); });
+ return;
+ case TemplateName::UsingTemplate: {
+ const UsingShadowDecl *USD = TN.getAsUsingShadowDecl();
+ AddChild([=] { Visit(USD); });
+ AddChild("target", [=] { Visit(USD->getTargetDecl()); });
+ return;
+ }
+ case TemplateName::QualifiedTemplate: {
+ OS << " qualified";
+ const QualifiedTemplateName *QTN = TN.getAsQualifiedTemplateName();
+ if (QTN->hasTemplateKeyword())
+ OS << " keyword";
+ dumpNestedNameSpecifier(QTN->getQualifier());
+ dumpBareTemplateName(QTN->getUnderlyingTemplate());
+ return;
+ }
+ case TemplateName::DependentTemplate: {
+ OS << " dependent";
+ const DependentTemplateName *DTN = TN.getAsDependentTemplateName();
+ dumpNestedNameSpecifier(DTN->getQualifier());
+ return;
+ }
+ case TemplateName::SubstTemplateTemplateParm: {
+ OS << " subst";
+ const SubstTemplateTemplateParmStorage *STS =
+ TN.getAsSubstTemplateTemplateParm();
+ OS << " index " << STS->getIndex();
+ if (std::optional<unsigned int> PackIndex = STS->getPackIndex())
+ OS << " pack_index " << *PackIndex;
+ if (const TemplateTemplateParmDecl *P = STS->getParameter())
+ AddChild("parameter", [=] { Visit(P); });
+ dumpDeclRef(STS->getAssociatedDecl(), "associated");
+ dumpTemplateName(STS->getReplacement(), "replacement");
+ return;
+ }
+ // FIXME: Implement these.
+ case TemplateName::OverloadedTemplate:
+ OS << " overloaded";
+ return;
+ case TemplateName::AssumedTemplate:
+ OS << " assumed";
+ return;
+ case TemplateName::SubstTemplateTemplateParmPack:
+ OS << " subst_pack";
+ return;
+ }
+ llvm_unreachable("Unexpected TemplateName Kind");
}
void TextNodeDumper::VisitTemplateTemplateArgument(const TemplateArgument &TA) {
- if (TA.getAsTemplate().getKind() == TemplateName::UsingTemplate)
- OS << " using";
- OS << " template ";
- TA.getAsTemplate().dump(OS);
+ OS << " template";
+ dumpTemplateArgument(TA);
+ dumpBareTemplateName(TA.getAsTemplate());
}
void TextNodeDumper::VisitTemplateExpansionTemplateArgument(
const TemplateArgument &TA) {
- if (TA.getAsTemplateOrTemplatePattern().getKind() ==
- TemplateName::UsingTemplate)
- OS << " using";
- OS << " template expansion ";
- TA.getAsTemplateOrTemplatePattern().dump(OS);
+ OS << " template expansion";
+ dumpTemplateArgument(TA);
+ dumpBareTemplateName(TA.getAsTemplateOrTemplatePattern());
}
-void TextNodeDumper::VisitExpressionTemplateArgument(const TemplateArgument &) {
+void TextNodeDumper::VisitExpressionTemplateArgument(
+ const TemplateArgument &TA) {
OS << " expr";
+ dumpTemplateArgument(TA);
}
-void TextNodeDumper::VisitPackTemplateArgument(const TemplateArgument &) {
+void TextNodeDumper::VisitPackTemplateArgument(const TemplateArgument &TA) {
OS << " pack";
+ dumpTemplateArgument(TA);
}
static void dumpBasePath(raw_ostream &OS, const CastExpr *Node) {
@@ -1158,8 +1378,11 @@ void TextNodeDumper::VisitDeclRefExpr(const DeclRefExpr *Node) {
case NOUR_Constant: OS << " non_odr_use_constant"; break;
case NOUR_Discarded: OS << " non_odr_use_discarded"; break;
}
- if (Node->refersToEnclosingVariableOrCapture())
+ if (Node->isCapturedByCopyInLambdaWithExplicitObjectParameter())
+ OS << " dependent_capture";
+ else if (Node->refersToEnclosingVariableOrCapture())
OS << " refers_to_enclosing_variable_or_capture";
+
if (Node->isImmediateEscalating())
OS << " immediate-escalating";
}
@@ -1315,6 +1538,8 @@ void TextNodeDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node) {
void TextNodeDumper::VisitCXXThisExpr(const CXXThisExpr *Node) {
if (Node->isImplicit())
OS << " implicit";
+ if (Node->isCapturedByCopyInLambdaWithExplicitObjectParameter())
+ OS << " dependent_capture";
OS << " this";
}
@@ -1397,6 +1622,16 @@ void TextNodeDumper::VisitExpressionTraitExpr(const ExpressionTraitExpr *Node) {
OS << " " << getTraitSpelling(Node->getTrait());
}
+void TextNodeDumper::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *Node) {
+ if (Node->hasRewrittenInit())
+ OS << " has rewritten init";
+}
+
+void TextNodeDumper::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *Node) {
+ if (Node->hasRewrittenInit())
+ OS << " has rewritten init";
+}
+
void TextNodeDumper::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *Node) {
if (const ValueDecl *VD = Node->getExtendingDecl()) {
@@ -1781,27 +2016,20 @@ void TextNodeDumper::VisitAutoType(const AutoType *T) {
OS << " decltype(auto)";
if (!T->isDeduced())
OS << " undeduced";
- if (T->isConstrained()) {
+ if (T->isConstrained())
dumpDeclRef(T->getTypeConstraintConcept());
- for (const auto &Arg : T->getTypeConstraintArguments())
- VisitTemplateArgument(Arg);
- }
}
void TextNodeDumper::VisitDeducedTemplateSpecializationType(
const DeducedTemplateSpecializationType *T) {
- if (T->getTemplateName().getKind() == TemplateName::UsingTemplate)
- OS << " using";
+ dumpTemplateName(T->getTemplateName(), "name");
}
void TextNodeDumper::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
if (T->isTypeAlias())
OS << " alias";
- if (T->getTemplateName().getKind() == TemplateName::UsingTemplate)
- OS << " using";
- OS << " ";
- T->getTemplateName().dump(OS);
+ dumpTemplateName(T->getTemplateName(), "name");
}
void TextNodeDumper::VisitInjectedClassNameType(
@@ -1818,6 +2046,13 @@ void TextNodeDumper::VisitPackExpansionType(const PackExpansionType *T) {
OS << " expansions " << *N;
}
+void TextNodeDumper::VisitTypeLoc(TypeLoc TL) {
+ // By default, add extra Type details with no extra loc info.
+ TypeVisitor<TextNodeDumper>::Visit(TL.getTypePtr());
+}
+// FIXME: override behavior for TypeLocs that have interesting location
+// information, such as the qualifier in ElaboratedTypeLoc.
+
void TextNodeDumper::VisitLabelDecl(const LabelDecl *D) { dumpName(D); }
void TextNodeDumper::VisitTypedefDecl(const TypedefDecl *D) {
@@ -1890,6 +2125,9 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
if (D->isTrivial())
OS << " trivial";
+ if (const StringLiteral *M = D->getDeletedMessage())
+ AddChild("delete message", [=] { Visit(M); });
+
if (D->isIneligibleOrNotSelected())
OS << (isa<CXXDestructorDecl>(D) ? " not_selected" : " ineligible");
@@ -1944,6 +2182,19 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
}
}
+void TextNodeDumper::VisitCXXDeductionGuideDecl(
+ const CXXDeductionGuideDecl *D) {
+ VisitFunctionDecl(D);
+ switch (D->getDeductionCandidateKind()) {
+ case DeductionCandidate::Normal:
+ case DeductionCandidate::Copy:
+ return;
+ case DeductionCandidate::Aggregate:
+ OS << " aggregate ";
+ break;
+ }
+}
+
void TextNodeDumper::VisitLifetimeExtendedTemporaryDecl(
const LifetimeExtendedTemporaryDecl *D) {
OS << " extended by ";
@@ -2135,8 +2386,8 @@ void TextNodeDumper::VisitNamespaceDecl(const NamespaceDecl *D) {
OS << " inline";
if (D->isNested())
OS << " nested";
- if (!D->isOriginalNamespace())
- dumpDeclRef(D->getOriginalNamespace(), "original");
+ if (!D->isFirstDecl())
+ dumpDeclRef(D->getFirstDecl(), "original");
}
void TextNodeDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) {
@@ -2622,3 +2873,19 @@ void TextNodeDumper::VisitHLSLBufferDecl(const HLSLBufferDecl *D) {
OS << " tbuffer";
dumpName(D);
}
+
+void TextNodeDumper::VisitOpenACCConstructStmt(const OpenACCConstructStmt *S) {
+ OS << " " << S->getDirectiveKind();
+}
+void TextNodeDumper::VisitOpenACCLoopConstruct(const OpenACCLoopConstruct *S) {
+
+ if (S->isOrphanedLoopConstruct())
+ OS << " <orphan>";
+ else
+ OS << " parent: " << S->getParentComputeConstruct();
+}
+
+void TextNodeDumper::VisitEmbedExpr(const EmbedExpr *S) {
+ AddChild("begin", [=] { OS << S->getStartingElementPos(); });
+ AddChild("number of elements", [=] { OS << S->getDataElementCount(); });
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Type.cpp b/contrib/llvm-project/clang/lib/AST/Type.cpp
index d4103025591e..fdaab8e43459 100644
--- a/contrib/llvm-project/clang/lib/AST/Type.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Type.cpp
@@ -159,6 +159,22 @@ ArrayType::ArrayType(TypeClass tc, QualType et, QualType can,
ArrayTypeBits.SizeModifier = llvm::to_underlying(sm);
}
+ConstantArrayType *
+ConstantArrayType::Create(const ASTContext &Ctx, QualType ET, QualType Can,
+ const llvm::APInt &Sz, const Expr *SzExpr,
+ ArraySizeModifier SzMod, unsigned Qual) {
+ bool NeedsExternalSize = SzExpr != nullptr || Sz.ugt(0x0FFFFFFFFFFFFFFF) ||
+ Sz.getBitWidth() > 0xFF;
+ if (!NeedsExternalSize)
+ return new (Ctx, alignof(ConstantArrayType)) ConstantArrayType(
+ ET, Can, Sz.getBitWidth(), Sz.getZExtValue(), SzMod, Qual);
+
+ auto *SzPtr = new (Ctx, alignof(ConstantArrayType::ExternalSize))
+ ConstantArrayType::ExternalSize(Sz, SzExpr);
+ return new (Ctx, alignof(ConstantArrayType))
+ ConstantArrayType(ET, Can, SzPtr, SzMod, Qual);
+}
+
unsigned ConstantArrayType::getNumAddressingBits(const ASTContext &Context,
QualType ElementType,
const llvm::APInt &NumElements) {
@@ -213,11 +229,10 @@ unsigned ConstantArrayType::getMaxSizeBits(const ASTContext &Context) {
void ConstantArrayType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context, QualType ET,
- const llvm::APInt &ArraySize,
- const Expr *SizeExpr, ArraySizeModifier SizeMod,
- unsigned TypeQuals) {
+ uint64_t ArraySize, const Expr *SizeExpr,
+ ArraySizeModifier SizeMod, unsigned TypeQuals) {
ID.AddPointer(ET.getAsOpaquePtr());
- ID.AddInteger(ArraySize.getZExtValue());
+ ID.AddInteger(ArraySize);
ID.AddInteger(llvm::to_underlying(SizeMod));
ID.AddInteger(TypeQuals);
ID.AddBoolean(SizeExpr != nullptr);
@@ -241,7 +256,8 @@ void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
ID.AddPointer(ET.getAsOpaquePtr());
ID.AddInteger(llvm::to_underlying(SizeMod));
ID.AddInteger(TypeQuals);
- E->Profile(ID, Context, true);
+ if (E)
+ E->Profile(ID, Context, true);
}
DependentVectorType::DependentVectorType(QualType ElementType,
@@ -385,6 +401,26 @@ void DependentBitIntType::Profile(llvm::FoldingSetNodeID &ID,
NumBitsExpr->Profile(ID, Context, true);
}
+bool BoundsAttributedType::referencesFieldDecls() const {
+ return llvm::any_of(dependent_decls(),
+ [](const TypeCoupledDeclRefInfo &Info) {
+ return isa<FieldDecl>(Info.getDecl());
+ });
+}
+
+void CountAttributedType::Profile(llvm::FoldingSetNodeID &ID,
+ QualType WrappedTy, Expr *CountExpr,
+ bool CountInBytes, bool OrNull) {
+ ID.AddPointer(WrappedTy.getAsOpaquePtr());
+ ID.AddBoolean(CountInBytes);
+ ID.AddBoolean(OrNull);
+ // We profile it as a pointer as the StmtProfiler considers parameter
+ // expressions on function declaration and function definition as the
+ // same, resulting in count expression being evaluated with ParamDecl
+ // not in the function scope.
+ ID.AddPointer(CountExpr);
+}
+
/// getArrayElementTypeNoTypeQual - If this is an array type, return the
/// element type of the array, potentially with type qualifiers missing.
/// This method should never be used when type qualifiers are meaningful.
@@ -432,12 +468,8 @@ QualType QualType::getSingleStepDesugaredTypeImpl(QualType type,
// Check that no type class has a non-trival destructor. Types are
// allocated with the BumpPtrAllocator from ASTContext and therefore
// their destructor is not executed.
-//
-// FIXME: ConstantArrayType is not trivially destructible because of its
-// APInt member. It should be replaced in favor of ASTContext allocation.
#define TYPE(CLASS, BASE) \
- static_assert(std::is_trivially_destructible<CLASS##Type>::value || \
- std::is_same<CLASS##Type, ConstantArrayType>::value, \
+ static_assert(std::is_trivially_destructible<CLASS##Type>::value, \
#CLASS "Type should be trivially destructible!");
#include "clang/AST/TypeNodes.inc"
@@ -559,6 +591,14 @@ template <> const AttributedType *Type::getAs() const {
return getAsSugar<AttributedType>(this);
}
+template <> const BoundsAttributedType *Type::getAs() const {
+ return getAsSugar<BoundsAttributedType>(this);
+}
+
+template <> const CountAttributedType *Type::getAs() const {
+ return getAsSugar<CountAttributedType>(this);
+}
+
/// getUnqualifiedDesugaredType - Pull any qualifiers and syntactic
/// sugar off the given type. This should produce an object of the
/// same dynamic type as the canonical type.
@@ -592,6 +632,16 @@ bool Type::isStructureType() const {
return false;
}
+bool Type::isStructureTypeWithFlexibleArrayMember() const {
+ const auto *RT = getAs<RecordType>();
+ if (!RT)
+ return false;
+ const auto *Decl = RT->getDecl();
+ if (!Decl->isStruct())
+ return false;
+ return Decl->hasFlexibleArrayMember();
+}
+
bool Type::isObjCBoxableRecordType() const {
if (const auto *RT = getAs<RecordType>())
return RT->getDecl()->hasAttr<ObjCBoxableAttr>();
@@ -641,6 +691,10 @@ bool Type::isScopedEnumeralType() const {
return false;
}
+bool Type::isCountAttributedType() const {
+ return getAs<CountAttributedType>();
+}
+
const ComplexType *Type::getAsComplexIntegerType() const {
if (const auto *Complex = getAs<ComplexType>())
if (Complex->getElementType()->isIntegerType())
@@ -1154,6 +1208,14 @@ public:
return Ctx.getDecayedType(originalType);
}
+ QualType VisitArrayParameterType(const ArrayParameterType *T) {
+ QualType ArrTy = VisitConstantArrayType(T);
+ if (ArrTy.isNull())
+ return {};
+
+ return Ctx.getArrayParameterType(ArrTy);
+ }
+
SUGARED_TYPE_CLASS(TypeOfExpr)
SUGARED_TYPE_CLASS(TypeOf)
SUGARED_TYPE_CLASS(Decltype)
@@ -1565,9 +1627,10 @@ QualType QualType::stripObjCKindOfType(const ASTContext &constCtx) const {
}
QualType QualType::getAtomicUnqualifiedType() const {
- if (const auto AT = getTypePtr()->getAs<AtomicType>())
- return AT->getValueType().getUnqualifiedType();
- return getUnqualifiedType();
+ QualType T = *this;
+ if (const auto AT = T.getTypePtr()->getAs<AtomicType>())
+ T = AT->getValueType();
+ return T.getUnqualifiedType();
}
std::optional<ArrayRef<QualType>>
@@ -2320,6 +2383,14 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
*Def = Rec;
return !Rec->isCompleteDefinition();
}
+ case InjectedClassName: {
+ CXXRecordDecl *Rec = cast<InjectedClassNameType>(CanonicalType)->getDecl();
+ if (!Rec->isBeingDefined())
+ return false;
+ if (Def)
+ *Def = Rec;
+ return true;
+ }
case ConstantArray:
case VariableArray:
// An array is incomplete if its element type is incomplete
@@ -2459,6 +2530,18 @@ bool Type::isSveVLSBuiltinType() const {
return false;
}
+QualType Type::getSizelessVectorEltType(const ASTContext &Ctx) const {
+ assert(isSizelessVectorType() && "Must be sizeless vector type");
+ // Currently supports SVE and RVV
+ if (isSVESizelessBuiltinType())
+ return getSveEltType(Ctx);
+
+ if (isRVVSizelessBuiltinType())
+ return getRVVEltType(Ctx);
+
+ llvm_unreachable("Unhandled type");
+}
+
QualType Type::getSveEltType(const ASTContext &Ctx) const {
assert(isSveVLSBuiltinType() && "unsupported type!");
@@ -2667,6 +2750,43 @@ bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
/*IsCopyConstructible=*/false);
}
+// FIXME: each call will trigger a full computation, cache the result.
+bool QualType::isBitwiseCloneableType(const ASTContext &Context) const {
+ auto CanonicalType = getCanonicalType();
+ if (CanonicalType.hasNonTrivialObjCLifetime())
+ return false;
+ if (CanonicalType->isArrayType())
+ return Context.getBaseElementType(CanonicalType)
+ .isBitwiseCloneableType(Context);
+
+ if (CanonicalType->isIncompleteType())
+ return false;
+ const auto *RD = CanonicalType->getAsRecordDecl(); // struct/union/class
+ if (!RD)
+ return true;
+
+ // Never allow memcpy when we're adding poisoned padding bits to the struct.
+ // Accessing these posioned bits will trigger false alarms on
+ // SanitizeAddressFieldPadding etc.
+ if (RD->mayInsertExtraPadding())
+ return false;
+
+ for (auto *const Field : RD->fields()) {
+ if (!Field->getType().isBitwiseCloneableType(Context))
+ return false;
+ }
+
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (auto Base : CXXRD->bases())
+ if (!Base.getType().isBitwiseCloneableType(Context))
+ return false;
+ for (auto VBase : CXXRD->vbases())
+ if (!VBase.getType().isBitwiseCloneableType(Context))
+ return false;
+ }
+ return true;
+}
+
bool QualType::isTriviallyCopyConstructibleType(
const ASTContext &Context) const {
return isTriviallyCopyableTypeImpl(*this, Context,
@@ -2682,6 +2802,8 @@ bool QualType::isTriviallyRelocatableType(const ASTContext &Context) const {
return false;
} else if (const auto *RD = BaseElementType->getAsRecordDecl()) {
return RD->canPassInRegisters();
+ } else if (BaseElementType.isTriviallyCopyableType(Context)) {
+ return true;
} else {
switch (isNonTrivialToPrimitiveDestructiveMove()) {
case PCK_Trivial:
@@ -2694,66 +2816,6 @@ bool QualType::isTriviallyRelocatableType(const ASTContext &Context) const {
}
}
-static bool
-HasNonDeletedDefaultedEqualityComparison(const CXXRecordDecl *Decl) {
- if (Decl->isUnion())
- return false;
- if (Decl->isLambda())
- return Decl->isCapturelessLambda();
-
- auto IsDefaultedOperatorEqualEqual = [&](const FunctionDecl *Function) {
- return Function->getOverloadedOperator() ==
- OverloadedOperatorKind::OO_EqualEqual &&
- Function->isDefaulted() && Function->getNumParams() > 0 &&
- (Function->getParamDecl(0)->getType()->isReferenceType() ||
- Decl->isTriviallyCopyable());
- };
-
- if (llvm::none_of(Decl->methods(), IsDefaultedOperatorEqualEqual) &&
- llvm::none_of(Decl->friends(), [&](const FriendDecl *Friend) {
- if (NamedDecl *ND = Friend->getFriendDecl()) {
- return ND->isFunctionOrFunctionTemplate() &&
- IsDefaultedOperatorEqualEqual(ND->getAsFunction());
- }
- return false;
- }))
- return false;
-
- return llvm::all_of(Decl->bases(),
- [](const CXXBaseSpecifier &BS) {
- if (const auto *RD = BS.getType()->getAsCXXRecordDecl())
- return HasNonDeletedDefaultedEqualityComparison(RD);
- return true;
- }) &&
- llvm::all_of(Decl->fields(), [](const FieldDecl *FD) {
- auto Type = FD->getType();
- if (Type->isArrayType())
- Type = Type->getBaseElementTypeUnsafe()->getCanonicalTypeUnqualified();
-
- if (Type->isReferenceType() || Type->isEnumeralType())
- return false;
- if (const auto *RD = Type->getAsCXXRecordDecl())
- return HasNonDeletedDefaultedEqualityComparison(RD);
- return true;
- });
-}
-
-bool QualType::isTriviallyEqualityComparableType(
- const ASTContext &Context) const {
- QualType CanonicalType = getCanonicalType();
- if (CanonicalType->isIncompleteType() || CanonicalType->isDependentType() ||
- CanonicalType->isEnumeralType() || CanonicalType->isArrayType())
- return false;
-
- if (const auto *RD = CanonicalType->getAsCXXRecordDecl()) {
- if (!HasNonDeletedDefaultedEqualityComparison(RD))
- return false;
- }
-
- return Context.hasUniqueObjectRepresentations(
- CanonicalType, /*CheckIfTriviallyCopyable=*/false);
-}
-
bool QualType::isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const {
return !Context.getLangOpts().ObjCAutoRefCount &&
Context.getLangOpts().ObjCWeak &&
@@ -3328,6 +3390,8 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
return "<overloaded function type>";
case BoundMember:
return "<bound member function type>";
+ case UnresolvedTemplate:
+ return "<unresolved template type>";
case PseudoObject:
return "<pseudo-object type>";
case Dependent:
@@ -3360,8 +3424,8 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
return "reserve_id_t";
case IncompleteMatrixIdx:
return "<incomplete matrix index type>";
- case OMPArraySection:
- return "<OpenMP array section type>";
+ case ArraySection:
+ return "<array section type>";
case OMPArrayShaping:
return "<OpenMP array shaping type>";
case OMPIterator:
@@ -3386,6 +3450,10 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
case Id: \
return Name;
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) \
+ case Id: \
+ return Name;
+#include "clang/Basic/AMDGPUTypes.def"
}
llvm_unreachable("Invalid builtin type.");
@@ -3438,6 +3506,10 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_PreserveMost: return "preserve_most";
case CC_PreserveAll: return "preserve_all";
case CC_M68kRTD: return "m68k_rtd";
+ case CC_PreserveNone: return "preserve_none";
+ // clang-format off
+ case CC_RISCVVectorCall: return "riscv_vector_cc";
+ // clang-format on
}
llvm_unreachable("Invalid calling convention.");
@@ -3580,6 +3652,34 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
auto &EllipsisLoc = *getTrailingObjects<SourceLocation>();
EllipsisLoc = epi.EllipsisLoc;
}
+
+ if (!epi.FunctionEffects.empty()) {
+ auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>();
+ size_t EffectsCount = epi.FunctionEffects.size();
+ ExtraBits.NumFunctionEffects = EffectsCount;
+ assert(ExtraBits.NumFunctionEffects == EffectsCount &&
+ "effect bitfield overflow");
+
+ ArrayRef<FunctionEffect> SrcFX = epi.FunctionEffects.effects();
+ auto *DestFX = getTrailingObjects<FunctionEffect>();
+ std::uninitialized_copy(SrcFX.begin(), SrcFX.end(), DestFX);
+
+ ArrayRef<EffectConditionExpr> SrcConds = epi.FunctionEffects.conditions();
+ if (!SrcConds.empty()) {
+ ExtraBits.EffectsHaveConditions = true;
+ auto *DestConds = getTrailingObjects<EffectConditionExpr>();
+ std::uninitialized_copy(SrcConds.begin(), SrcConds.end(), DestConds);
+ assert(std::any_of(SrcConds.begin(), SrcConds.end(),
+ [](const EffectConditionExpr &EC) {
+ if (const Expr *E = EC.getCondition())
+ return E->isTypeDependent() ||
+ E->isValueDependent();
+ return false;
+ }) &&
+ "expected a dependent expression among the conditions");
+ addDependence(TypeDependence::DependentInstantiation);
+ }
+ }
}
bool FunctionProtoType::hasDependentExceptionSpec() const {
@@ -3663,6 +3763,7 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
// Finally we have a trailing return type flag (bool)
// combined with AArch64 SME Attributes, to save space:
// int
+ // combined with any FunctionEffects
//
// There is no ambiguity between the consumed arguments and an empty EH
// spec because of the leading 'bool' which unambiguously indicates
@@ -3697,7 +3798,18 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
}
epi.ExtInfo.Profile(ID);
- ID.AddInteger((epi.AArch64SMEAttributes << 1) | epi.HasTrailingReturn);
+
+ unsigned EffectCount = epi.FunctionEffects.size();
+ bool HasConds = !epi.FunctionEffects.Conditions.empty();
+
+ ID.AddInteger((EffectCount << 3) | (HasConds << 2) |
+ (epi.AArch64SMEAttributes << 1) | epi.HasTrailingReturn);
+
+ for (unsigned Idx = 0; Idx != EffectCount; ++Idx) {
+ ID.AddInteger(epi.FunctionEffects.Effects[Idx].toOpaqueInt32());
+ if (HasConds)
+ ID.AddPointer(epi.FunctionEffects.Conditions[Idx].getCondition());
+ }
}
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
@@ -3706,6 +3818,43 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
getExtProtoInfo(), Ctx, isCanonicalUnqualified());
}
+TypeCoupledDeclRefInfo::TypeCoupledDeclRefInfo(ValueDecl *D, bool Deref)
+ : Data(D, Deref << DerefShift) {}
+
+bool TypeCoupledDeclRefInfo::isDeref() const {
+ return Data.getInt() & DerefMask;
+}
+ValueDecl *TypeCoupledDeclRefInfo::getDecl() const { return Data.getPointer(); }
+unsigned TypeCoupledDeclRefInfo::getInt() const { return Data.getInt(); }
+void *TypeCoupledDeclRefInfo::getOpaqueValue() const {
+ return Data.getOpaqueValue();
+}
+bool TypeCoupledDeclRefInfo::operator==(
+ const TypeCoupledDeclRefInfo &Other) const {
+ return getOpaqueValue() == Other.getOpaqueValue();
+}
+void TypeCoupledDeclRefInfo::setFromOpaqueValue(void *V) {
+ Data.setFromOpaqueValue(V);
+}
+
+BoundsAttributedType::BoundsAttributedType(TypeClass TC, QualType Wrapped,
+ QualType Canon)
+ : Type(TC, Canon, Wrapped->getDependence()), WrappedTy(Wrapped) {}
+
+CountAttributedType::CountAttributedType(
+ QualType Wrapped, QualType Canon, Expr *CountExpr, bool CountInBytes,
+ bool OrNull, ArrayRef<TypeCoupledDeclRefInfo> CoupledDecls)
+ : BoundsAttributedType(CountAttributed, Wrapped, Canon),
+ CountExpr(CountExpr) {
+ CountAttributedTypeBits.NumCoupledDecls = CoupledDecls.size();
+ CountAttributedTypeBits.CountInBytes = CountInBytes;
+ CountAttributedTypeBits.OrNull = OrNull;
+ auto *DeclSlot = getTrailingObjects<TypeCoupledDeclRefInfo>();
+ Decls = llvm::ArrayRef(DeclSlot, CoupledDecls.size());
+ for (unsigned i = 0; i != CoupledDecls.size(); ++i)
+ DeclSlot[i] = CoupledDecls[i];
+}
+
TypedefType::TypedefType(TypeClass tc, const TypedefNameDecl *D,
QualType Underlying, QualType can)
: Type(tc, can, toSemanticDependence(can->getDependence())),
@@ -3751,18 +3900,19 @@ QualType MacroQualifiedType::getModifiedType() const {
return Inner;
}
-TypeOfExprType::TypeOfExprType(Expr *E, TypeOfKind Kind, QualType Can)
+TypeOfExprType::TypeOfExprType(const ASTContext &Context, Expr *E,
+ TypeOfKind Kind, QualType Can)
: Type(TypeOfExpr,
// We have to protect against 'Can' being invalid through its
// default argument.
Kind == TypeOfKind::Unqualified && !Can.isNull()
- ? Can.getAtomicUnqualifiedType()
+ ? Context.getUnqualifiedArrayType(Can).getAtomicUnqualifiedType()
: Can,
toTypeDependence(E->getDependence()) |
(E->getType()->getDependence() &
TypeDependence::VariablyModified)),
- TOExpr(E) {
- TypeOfBits.IsUnqual = Kind == TypeOfKind::Unqualified;
+ TOExpr(E), Context(Context) {
+ TypeOfBits.Kind = static_cast<unsigned>(Kind);
}
bool TypeOfExprType::isSugared() const {
@@ -3772,7 +3922,9 @@ bool TypeOfExprType::isSugared() const {
QualType TypeOfExprType::desugar() const {
if (isSugared()) {
QualType QT = getUnderlyingExpr()->getType();
- return TypeOfBits.IsUnqual ? QT.getAtomicUnqualifiedType() : QT;
+ return getKind() == TypeOfKind::Unqualified
+ ? Context.getUnqualifiedArrayType(QT).getAtomicUnqualifiedType()
+ : QT;
}
return QualType(this, 0);
}
@@ -3784,6 +3936,24 @@ void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
ID.AddBoolean(IsUnqual);
}
+TypeOfType::TypeOfType(const ASTContext &Context, QualType T, QualType Can,
+ TypeOfKind Kind)
+ : Type(TypeOf,
+ Kind == TypeOfKind::Unqualified
+ ? Context.getUnqualifiedArrayType(Can).getAtomicUnqualifiedType()
+ : Can,
+ T->getDependence()),
+ TOType(T), Context(Context) {
+ TypeOfBits.Kind = static_cast<unsigned>(Kind);
+}
+
+QualType TypeOfType::desugar() const {
+ QualType QT = getUnmodifiedType();
+ return getKind() == TypeOfKind::Unqualified
+ ? Context.getUnqualifiedArrayType(QT).getAtomicUnqualifiedType()
+ : QT;
+}
+
DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can)
// C++11 [temp.type]p2: "If an expression e involves a template parameter,
// decltype(e) denotes a unique dependent type." Hence a decltype type is
@@ -3813,6 +3983,63 @@ void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID,
E->Profile(ID, Context, true);
}
+PackIndexingType::PackIndexingType(const ASTContext &Context,
+ QualType Canonical, QualType Pattern,
+ Expr *IndexExpr,
+ ArrayRef<QualType> Expansions)
+ : Type(PackIndexing, Canonical,
+ computeDependence(Pattern, IndexExpr, Expansions)),
+ Context(Context), Pattern(Pattern), IndexExpr(IndexExpr),
+ Size(Expansions.size()) {
+
+ std::uninitialized_copy(Expansions.begin(), Expansions.end(),
+ getTrailingObjects<QualType>());
+}
+
+std::optional<unsigned> PackIndexingType::getSelectedIndex() const {
+ if (isInstantiationDependentType())
+ return std::nullopt;
+ // Should only be not a constant for error recovery.
+ ConstantExpr *CE = dyn_cast<ConstantExpr>(getIndexExpr());
+ if (!CE)
+ return std::nullopt;
+ auto Index = CE->getResultAsAPSInt();
+ assert(Index.isNonNegative() && "Invalid index");
+ return static_cast<unsigned>(Index.getExtValue());
+}
+
+TypeDependence
+PackIndexingType::computeDependence(QualType Pattern, Expr *IndexExpr,
+ ArrayRef<QualType> Expansions) {
+ TypeDependence IndexD = toTypeDependence(IndexExpr->getDependence());
+
+ TypeDependence TD = IndexD | (IndexExpr->isInstantiationDependent()
+ ? TypeDependence::DependentInstantiation
+ : TypeDependence::None);
+ if (Expansions.empty())
+ TD |= Pattern->getDependence() & TypeDependence::DependentInstantiation;
+ else
+ for (const QualType &T : Expansions)
+ TD |= T->getDependence();
+
+ if (!(IndexD & TypeDependence::UnexpandedPack))
+ TD &= ~TypeDependence::UnexpandedPack;
+
+ // If the pattern does not contain an unexpended pack,
+ // the type is still dependent, and invalid
+ if (!Pattern->containsUnexpandedParameterPack())
+ TD |= TypeDependence::Error | TypeDependence::DependentInstantiation;
+
+ return TD;
+}
+
+void PackIndexingType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context, QualType Pattern,
+ Expr *E) {
+ Pattern.Profile(ID);
+ E->Profile(ID, Context, true);
+}
+
UnaryTransformType::UnaryTransformType(QualType BaseType,
QualType UnderlyingType, UTTKind UKind,
QualType CanonicalType)
@@ -3933,6 +4160,8 @@ bool AttributedType::isCallingConv() const {
case attr::PreserveMost:
case attr::PreserveAll:
case attr::M68kRTD:
+ case attr::PreserveNone:
+ case attr::RISCVVectorCC:
return true;
}
llvm_unreachable("invalid attr kind");
@@ -4065,7 +4294,8 @@ TemplateSpecializationType::TemplateSpecializationType(
assert((T.getKind() == TemplateName::Template ||
T.getKind() == TemplateName::SubstTemplateTemplateParm ||
T.getKind() == TemplateName::SubstTemplateTemplateParmPack ||
- T.getKind() == TemplateName::UsingTemplate) &&
+ T.getKind() == TemplateName::UsingTemplate ||
+ T.getKind() == TemplateName::QualifiedTemplate) &&
"Unexpected template name for TemplateSpecializationType");
auto *TemplateArgs = reinterpret_cast<TemplateArgument *>(this + 1);
@@ -4257,7 +4487,6 @@ static CachedProperties computeCachedProperties(const Type *T) {
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
#include "clang/AST/TypeNodes.inc"
// Treat instantiation-dependent types as external.
- if (!T->isInstantiationDependentType()) T->dump();
assert(T->isInstantiationDependentType());
return CachedProperties(Linkage::External, false);
@@ -4309,6 +4538,7 @@ static CachedProperties computeCachedProperties(const Type *T) {
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
+ case Type::ArrayParameter:
return Cache::get(cast<ArrayType>(T)->getElementType());
case Type::Vector:
case Type::ExtVector:
@@ -4397,6 +4627,7 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
+ case Type::ArrayParameter:
return computeTypeLinkageInfo(cast<ArrayType>(T)->getElementType());
case Type::Vector:
case Type::ExtVector:
@@ -4488,6 +4719,7 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::Decltype:
+ case Type::PackIndexing:
case Type::UnaryTransform:
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
@@ -4496,16 +4728,15 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::Auto:
return ResultIfUnknown;
- // Dependent template specializations can instantiate to pointer
- // types unless they're known to be specializations of a class
- // template.
+ // Dependent template specializations could instantiate to pointer types.
case Type::TemplateSpecialization:
- if (TemplateDecl *templateDecl
- = cast<TemplateSpecializationType>(type.getTypePtr())
- ->getTemplateName().getAsTemplateDecl()) {
- if (isa<ClassTemplateDecl>(templateDecl))
- return false;
- }
+ // If it's a known class template, we can already check if it's nullable.
+ if (TemplateDecl *templateDecl =
+ cast<TemplateSpecializationType>(type.getTypePtr())
+ ->getTemplateName()
+ .getAsTemplateDecl())
+ if (auto *CTD = dyn_cast<ClassTemplateDecl>(templateDecl))
+ return CTD->getTemplatedDecl()->hasAttr<TypeNullableAttr>();
return ResultIfUnknown;
case Type::Builtin:
@@ -4518,6 +4749,7 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
#include "clang/AST/BuiltinTypes.def"
return false;
+ case BuiltinType::UnresolvedTemplate:
// Dependent types that could instantiate to a pointer type.
case BuiltinType::Dependent:
case BuiltinType::Overload:
@@ -4552,16 +4784,29 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
#include "clang/Basic/RISCVVTypes.def"
#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::NullPtr:
case BuiltinType::IncompleteMatrixIdx:
- case BuiltinType::OMPArraySection:
+ case BuiltinType::ArraySection:
case BuiltinType::OMPArrayShaping:
case BuiltinType::OMPIterator:
return false;
}
llvm_unreachable("unknown builtin type");
+ case Type::Record: {
+ const RecordDecl *RD = cast<RecordType>(type)->getDecl();
+ // For template specializations, look only at primary template attributes.
+ // This is a consistent regardless of whether the instantiation is known.
+ if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ return CTSD->getSpecializedTemplate()
+ ->getTemplatedDecl()
+ ->hasAttr<TypeNullableAttr>();
+ return RD->hasAttr<TypeNullableAttr>();
+ }
+
// Non-pointer types.
case Type::Complex:
case Type::LValueReference:
@@ -4579,7 +4824,6 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::DependentAddressSpace:
case Type::FunctionProto:
case Type::FunctionNoProto:
- case Type::Record:
case Type::DeducedTemplateSpecialization:
case Type::Enum:
case Type::InjectedClassName:
@@ -4590,6 +4834,7 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::Pipe:
case Type::BitInt:
case Type::DependentBitInt:
+ case Type::ArrayParameter:
return false;
}
llvm_unreachable("bad type kind!");
@@ -4850,3 +5095,234 @@ void AutoType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
Profile(ID, Context, getDeducedType(), getKeyword(), isDependentType(),
getTypeConstraintConcept(), getTypeConstraintArguments());
}
+
+FunctionEffect::Kind FunctionEffect::oppositeKind() const {
+ switch (kind()) {
+ case Kind::NonBlocking:
+ return Kind::Blocking;
+ case Kind::Blocking:
+ return Kind::NonBlocking;
+ case Kind::NonAllocating:
+ return Kind::Allocating;
+ case Kind::Allocating:
+ return Kind::NonAllocating;
+ case Kind::None:
+ return Kind::None;
+ }
+ llvm_unreachable("unknown effect kind");
+}
+
+StringRef FunctionEffect::name() const {
+ switch (kind()) {
+ case Kind::NonBlocking:
+ return "nonblocking";
+ case Kind::NonAllocating:
+ return "nonallocating";
+ case Kind::Blocking:
+ return "blocking";
+ case Kind::Allocating:
+ return "allocating";
+ case Kind::None:
+ return "(none)";
+ }
+ llvm_unreachable("unknown effect kind");
+}
+
+bool FunctionEffect::canInferOnFunction(const Decl &Callee) const {
+ switch (kind()) {
+ case Kind::NonAllocating:
+ case Kind::NonBlocking: {
+ FunctionEffectsRef CalleeFX;
+ if (auto *FD = Callee.getAsFunction())
+ CalleeFX = FD->getFunctionEffects();
+ else if (auto *BD = dyn_cast<BlockDecl>(&Callee))
+ CalleeFX = BD->getFunctionEffects();
+ else
+ return false;
+ for (const FunctionEffectWithCondition &CalleeEC : CalleeFX) {
+ // nonblocking/nonallocating cannot call allocating.
+ if (CalleeEC.Effect.kind() == Kind::Allocating)
+ return false;
+ // nonblocking cannot call blocking.
+ if (kind() == Kind::NonBlocking &&
+ CalleeEC.Effect.kind() == Kind::Blocking)
+ return false;
+ }
+ return true;
+ }
+
+ case Kind::Allocating:
+ case Kind::Blocking:
+ return false;
+
+ case Kind::None:
+ assert(0 && "canInferOnFunction with None");
+ break;
+ }
+ llvm_unreachable("unknown effect kind");
+}
+
+bool FunctionEffect::shouldDiagnoseFunctionCall(
+ bool Direct, ArrayRef<FunctionEffect> CalleeFX) const {
+ switch (kind()) {
+ case Kind::NonAllocating:
+ case Kind::NonBlocking: {
+ const Kind CallerKind = kind();
+ for (const auto &Effect : CalleeFX) {
+ const Kind EK = Effect.kind();
+ // Does callee have same or stronger constraint?
+ if (EK == CallerKind ||
+ (CallerKind == Kind::NonAllocating && EK == Kind::NonBlocking)) {
+ return false; // no diagnostic
+ }
+ }
+ return true; // warning
+ }
+ case Kind::Allocating:
+ case Kind::Blocking:
+ return false;
+ case Kind::None:
+ assert(0 && "shouldDiagnoseFunctionCall with None");
+ break;
+ }
+ llvm_unreachable("unknown effect kind");
+}
+
+// =====
+
+bool FunctionEffectSet::insert(const FunctionEffectWithCondition &NewEC,
+ Conflicts &Errs) {
+ FunctionEffect::Kind NewOppositeKind = NewEC.Effect.oppositeKind();
+ Expr *NewCondition = NewEC.Cond.getCondition();
+
+ // The index at which insertion will take place; default is at end
+ // but we might find an earlier insertion point.
+ unsigned InsertIdx = Effects.size();
+ unsigned Idx = 0;
+ for (const FunctionEffectWithCondition &EC : *this) {
+ // Note about effects with conditions: They are considered distinct from
+ // those without conditions; they are potentially unique, redundant, or
+ // in conflict, but we can't tell which until the condition is evaluated.
+ if (EC.Cond.getCondition() == nullptr && NewCondition == nullptr) {
+ if (EC.Effect.kind() == NewEC.Effect.kind()) {
+ // There is no condition, and the effect kind is already present,
+ // so just fail to insert the new one (creating a duplicate),
+ // and return success.
+ return true;
+ }
+
+ if (EC.Effect.kind() == NewOppositeKind) {
+ Errs.push_back({EC, NewEC});
+ return false;
+ }
+ }
+
+ if (NewEC.Effect.kind() < EC.Effect.kind() && InsertIdx > Idx)
+ InsertIdx = Idx;
+
+ ++Idx;
+ }
+
+ if (NewCondition || !Conditions.empty()) {
+ if (Conditions.empty() && !Effects.empty())
+ Conditions.resize(Effects.size());
+ Conditions.insert(Conditions.begin() + InsertIdx,
+ NewEC.Cond.getCondition());
+ }
+ Effects.insert(Effects.begin() + InsertIdx, NewEC.Effect);
+ return true;
+}
+
+bool FunctionEffectSet::insert(const FunctionEffectsRef &Set, Conflicts &Errs) {
+ for (const auto &Item : Set)
+ insert(Item, Errs);
+ return Errs.empty();
+}
+
+FunctionEffectSet FunctionEffectSet::getIntersection(FunctionEffectsRef LHS,
+ FunctionEffectsRef RHS) {
+ FunctionEffectSet Result;
+ FunctionEffectSet::Conflicts Errs;
+
+ // We could use std::set_intersection but that would require expanding the
+ // container interface to include push_back, making it available to clients
+ // who might fail to maintain invariants.
+ auto IterA = LHS.begin(), EndA = LHS.end();
+ auto IterB = RHS.begin(), EndB = RHS.end();
+
+ auto FEWCLess = [](const FunctionEffectWithCondition &LHS,
+ const FunctionEffectWithCondition &RHS) {
+ return std::tuple(LHS.Effect, uintptr_t(LHS.Cond.getCondition())) <
+ std::tuple(RHS.Effect, uintptr_t(RHS.Cond.getCondition()));
+ };
+
+ while (IterA != EndA && IterB != EndB) {
+ FunctionEffectWithCondition A = *IterA;
+ FunctionEffectWithCondition B = *IterB;
+ if (FEWCLess(A, B))
+ ++IterA;
+ else if (FEWCLess(B, A))
+ ++IterB;
+ else {
+ Result.insert(A, Errs);
+ ++IterA;
+ ++IterB;
+ }
+ }
+
+ // Insertion shouldn't be able to fail; that would mean both input
+ // sets contained conflicts.
+ assert(Errs.empty() && "conflict shouldn't be possible in getIntersection");
+
+ return Result;
+}
+
+FunctionEffectSet FunctionEffectSet::getUnion(FunctionEffectsRef LHS,
+ FunctionEffectsRef RHS,
+ Conflicts &Errs) {
+ // Optimize for either of the two sets being empty (very common).
+ if (LHS.empty())
+ return FunctionEffectSet(RHS);
+
+ FunctionEffectSet Combined(LHS);
+ Combined.insert(RHS, Errs);
+ return Combined;
+}
+
+LLVM_DUMP_METHOD void FunctionEffectsRef::dump(llvm::raw_ostream &OS) const {
+ OS << "Effects{";
+ bool First = true;
+ for (const auto &CFE : *this) {
+ if (!First)
+ OS << ", ";
+ else
+ First = false;
+ OS << CFE.Effect.name();
+ if (Expr *E = CFE.Cond.getCondition()) {
+ OS << '(';
+ E->dump();
+ OS << ')';
+ }
+ }
+ OS << "}";
+}
+
+LLVM_DUMP_METHOD void FunctionEffectSet::dump(llvm::raw_ostream &OS) const {
+ FunctionEffectsRef(*this).dump(OS);
+}
+
+FunctionEffectsRef
+FunctionEffectsRef::create(ArrayRef<FunctionEffect> FX,
+ ArrayRef<EffectConditionExpr> Conds) {
+ assert(std::is_sorted(FX.begin(), FX.end()) && "effects should be sorted");
+ assert((Conds.empty() || Conds.size() == FX.size()) &&
+ "effects size should match conditions size");
+ return FunctionEffectsRef(FX, Conds);
+}
+
+std::string FunctionEffectWithCondition::description() const {
+ std::string Result(Effect.name().str());
+ if (Cond.getCondition() != nullptr)
+ Result += "(expr)";
+ return Result;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
index 66732bba18e2..33e6ccbadc12 100644
--- a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
@@ -399,6 +399,7 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
case BuiltinType::NullPtr:
case BuiltinType::Overload:
case BuiltinType::Dependent:
+ case BuiltinType::UnresolvedTemplate:
case BuiltinType::BoundMember:
case BuiltinType::UnknownAny:
case BuiltinType::ARCUnbridgedCast:
@@ -427,9 +428,11 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
#include "clang/Basic/RISCVVTypes.def"
#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::IncompleteMatrixIdx:
- case BuiltinType::OMPArraySection:
+ case BuiltinType::ArraySection:
case BuiltinType::OMPArrayShaping:
case BuiltinType::OMPIterator:
return TST_unspecified;
@@ -516,6 +519,10 @@ SourceRange AttributedTypeLoc::getLocalSourceRange() const {
return getAttr() ? getAttr()->getRange() : SourceRange();
}
+SourceRange CountAttributedTypeLoc::getLocalSourceRange() const {
+ return getCountExpr() ? getCountExpr()->getSourceRange() : SourceRange();
+}
+
SourceRange BTFTagAttributedTypeLoc::getLocalSourceRange() const {
return getAttr() ? getAttr()->getRange() : SourceRange();
}
@@ -738,3 +745,12 @@ AutoTypeLoc TypeLoc::getContainedAutoTypeLoc() const {
return AutoTypeLoc();
return Res.getAs<AutoTypeLoc>();
}
+
+SourceLocation TypeLoc::getTemplateKeywordLoc() const {
+ if (const auto TSTL = getAsAdjusted<TemplateSpecializationTypeLoc>())
+ return TSTL.getTemplateKeywordLoc();
+ if (const auto DTSTL =
+ getAsAdjusted<DependentTemplateSpecializationTypeLoc>())
+ return DTSTL.getTemplateKeywordLoc();
+ return SourceLocation();
+}
diff --git a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
index e9b6e810b02e..ffec3ef9d226 100644
--- a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
@@ -268,6 +268,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::Adjusted:
case Type::Decayed:
+ case Type::ArrayParameter:
case Type::Pointer:
case Type::BlockPointer:
case Type::LValueReference:
@@ -286,6 +287,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::PackExpansion:
case Type::SubstTemplateTypeParm:
case Type::MacroQualified:
+ case Type::CountAttributed:
CanPrefixQualifiers = false;
break;
@@ -296,6 +298,11 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
CanPrefixQualifiers = AttrTy->getAttrKind() == attr::AddressSpace;
break;
}
+ case Type::PackIndexing: {
+ return canPrefixQualifiers(
+ cast<PackIndexingType>(UnderlyingType)->getPattern().getTypePtr(),
+ NeedARCStrongQualifier);
+ }
}
return CanPrefixQualifiers;
@@ -532,7 +539,7 @@ void TypePrinter::printConstantArrayAfter(const ConstantArrayType *T,
if (T->getSizeModifier() == ArraySizeModifier::Static)
OS << "static ";
- OS << T->getSize().getZExtValue() << ']';
+ OS << T->getZExtSize() << ']';
printAfter(T->getElementType(), OS);
}
@@ -589,6 +596,16 @@ void TypePrinter::printDecayedBefore(const DecayedType *T, raw_ostream &OS) {
printAdjustedBefore(T, OS);
}
+void TypePrinter::printArrayParameterAfter(const ArrayParameterType *T,
+ raw_ostream &OS) {
+ printConstantArrayAfter(T, OS);
+}
+
+void TypePrinter::printArrayParameterBefore(const ArrayParameterType *T,
+ raw_ostream &OS) {
+ printConstantArrayBefore(T, OS);
+}
+
void TypePrinter::printDecayedAfter(const DecayedType *T, raw_ostream &OS) {
printAdjustedAfter(T, OS);
}
@@ -627,16 +644,25 @@ void TypePrinter::printDependentAddressSpaceAfter(
void TypePrinter::printDependentSizedExtVectorBefore(
const DependentSizedExtVectorType *T,
raw_ostream &OS) {
+ if (Policy.UseHLSLTypes)
+ OS << "vector<";
printBefore(T->getElementType(), OS);
}
void TypePrinter::printDependentSizedExtVectorAfter(
const DependentSizedExtVectorType *T,
raw_ostream &OS) {
- OS << " __attribute__((ext_vector_type(";
- if (T->getSizeExpr())
- T->getSizeExpr()->printPretty(OS, nullptr, Policy);
- OS << ")))";
+ if (Policy.UseHLSLTypes) {
+ OS << ", ";
+ if (T->getSizeExpr())
+ T->getSizeExpr()->printPretty(OS, nullptr, Policy);
+ OS << ">";
+ } else {
+ OS << " __attribute__((ext_vector_type(";
+ if (T->getSizeExpr())
+ T->getSizeExpr()->printPretty(OS, nullptr, Policy);
+ OS << ")))";
+ }
printAfter(T->getElementType(), OS);
}
@@ -798,14 +824,23 @@ void TypePrinter::printDependentVectorAfter(
void TypePrinter::printExtVectorBefore(const ExtVectorType *T,
raw_ostream &OS) {
+ if (Policy.UseHLSLTypes)
+ OS << "vector<";
printBefore(T->getElementType(), OS);
}
void TypePrinter::printExtVectorAfter(const ExtVectorType *T, raw_ostream &OS) {
printAfter(T->getElementType(), OS);
- OS << " __attribute__((ext_vector_type(";
- OS << T->getNumElements();
- OS << ")))";
+
+ if (Policy.UseHLSLTypes) {
+ OS << ", ";
+ OS << T->getNumElements();
+ OS << ">";
+ } else {
+ OS << " __attribute__((ext_vector_type(";
+ OS << T->getNumElements();
+ OS << ")))";
+ }
}
void TypePrinter::printConstantMatrixBefore(const ConstantMatrixType *T,
@@ -981,6 +1016,17 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
}
T->printExceptionSpecification(OS, Policy);
+ const FunctionEffectsRef FX = T->getFunctionEffects();
+ for (const auto &CFE : FX) {
+ OS << " __attribute__((" << CFE.Effect.name();
+ if (const Expr *E = CFE.Cond.getCondition()) {
+ OS << '(';
+ E->printPretty(OS, nullptr, Policy);
+ OS << ')';
+ }
+ OS << "))";
+ }
+
if (T->hasTrailingReturn()) {
OS << " -> ";
print(T->getReturnType(), OS, StringRef());
@@ -1062,6 +1108,12 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
case CC_M68kRTD:
OS << " __attribute__((m68k_rtd))";
break;
+ case CC_PreserveNone:
+ OS << " __attribute__((preserve_none))";
+ break;
+ case CC_RISCVVectorCall:
+ OS << "__attribute__((riscv_vector_cc))";
+ break;
}
}
@@ -1188,6 +1240,21 @@ void TypePrinter::printDecltypeBefore(const DecltypeType *T, raw_ostream &OS) {
spaceBeforePlaceHolder(OS);
}
+void TypePrinter::printPackIndexingBefore(const PackIndexingType *T,
+ raw_ostream &OS) {
+ if (T->hasSelectedType()) {
+ OS << T->getSelectedType();
+ } else {
+ OS << T->getPattern() << "...[";
+ T->getIndexExpr()->printPretty(OS, nullptr, Policy);
+ OS << "]";
+ }
+ spaceBeforePlaceHolder(OS);
+}
+
+void TypePrinter::printPackIndexingAfter(const PackIndexingType *T,
+ raw_ostream &OS) {}
+
void TypePrinter::printDecltypeAfter(const DecltypeType *T, raw_ostream &OS) {}
void TypePrinter::printUnaryTransformBefore(const UnaryTransformType *T,
@@ -1434,21 +1501,18 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
// If this is a class template specialization, print the template
// arguments.
- if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
- ArrayRef<TemplateArgument> Args;
- TypeSourceInfo *TAW = Spec->getTypeAsWritten();
- if (!Policy.PrintCanonicalTypes && TAW) {
- const TemplateSpecializationType *TST =
- cast<TemplateSpecializationType>(TAW->getType());
- Args = TST->template_arguments();
- } else {
- const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
- Args = TemplateArgs.asArray();
- }
+ if (auto *S = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ const TemplateParameterList *TParams =
+ S->getSpecializedTemplate()->getTemplateParameters();
+ const ASTTemplateArgumentListInfo *TArgAsWritten =
+ S->getTemplateArgsAsWritten();
IncludeStrongLifetimeRAII Strong(Policy);
- printTemplateArgumentList(
- OS, Args, Policy,
- Spec->getSpecializedTemplate()->getTemplateParameters());
+ if (TArgAsWritten && !Policy.PrintCanonicalTypes)
+ printTemplateArgumentList(OS, TArgAsWritten->arguments(), Policy,
+ TParams);
+ else
+ printTemplateArgumentList(OS, S->getTemplateArgs().asArray(), Policy,
+ TParams);
}
spaceBeforePlaceHolder(OS);
@@ -1551,14 +1615,14 @@ void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
IncludeStrongLifetimeRAII Strong(Policy);
TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl();
- // FIXME: Null TD never excercised in test suite.
+ // FIXME: Null TD never exercised in test suite.
if (FullyQualify && TD) {
if (!Policy.SuppressScope)
AppendScope(TD->getDeclContext(), OS, TD->getDeclName());
OS << TD->getName();
} else {
- T->getTemplateName().print(OS, Policy);
+ T->getTemplateName().print(OS, Policy, TemplateName::Qualified::None);
}
DefaultTemplateArgsPolicyRAII TemplateArgs(Policy);
@@ -1615,6 +1679,17 @@ void TypePrinter::printElaboratedBefore(const ElaboratedType *T,
if (T->getKeyword() != ElaboratedTypeKeyword::None)
OS << " ";
NestedNameSpecifier *Qualifier = T->getQualifier();
+ if (!Policy.SuppressTagKeyword && Policy.SuppressScope &&
+ !Policy.SuppressUnwrittenScope) {
+ bool OldTagKeyword = Policy.SuppressTagKeyword;
+ bool OldSupressScope = Policy.SuppressScope;
+ Policy.SuppressTagKeyword = true;
+ Policy.SuppressScope = false;
+ printBefore(T->getNamedType(), OS);
+ Policy.SuppressTagKeyword = OldTagKeyword;
+ Policy.SuppressScope = OldSupressScope;
+ return;
+ }
if (Qualifier)
Qualifier->print(OS, Policy);
}
@@ -1697,6 +1772,37 @@ void TypePrinter::printPackExpansionAfter(const PackExpansionType *T,
OS << "...";
}
+static void printCountAttributedImpl(const CountAttributedType *T,
+ raw_ostream &OS,
+ const PrintingPolicy &Policy) {
+ OS << ' ';
+ if (T->isCountInBytes() && T->isOrNull())
+ OS << "__sized_by_or_null(";
+ else if (T->isCountInBytes())
+ OS << "__sized_by(";
+ else if (T->isOrNull())
+ OS << "__counted_by_or_null(";
+ else
+ OS << "__counted_by(";
+ if (T->getCountExpr())
+ T->getCountExpr()->printPretty(OS, nullptr, Policy);
+ OS << ')';
+}
+
+void TypePrinter::printCountAttributedBefore(const CountAttributedType *T,
+ raw_ostream &OS) {
+ printBefore(T->desugar(), OS);
+ if (!T->isArrayType())
+ printCountAttributedImpl(T, OS, Policy);
+}
+
+void TypePrinter::printCountAttributedAfter(const CountAttributedType *T,
+ raw_ostream &OS) {
+ printAfter(T->desugar(), OS);
+ if (T->isArrayType())
+ printCountAttributedImpl(T, OS, Policy);
+}
+
void TypePrinter::printAttributedBefore(const AttributedType *T,
raw_ostream &OS) {
// FIXME: Generate this with TableGen.
@@ -1827,6 +1933,10 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
// AttributedType nodes for them.
break;
+ case attr::CountedBy:
+ case attr::CountedByOrNull:
+ case attr::SizedBy:
+ case attr::SizedByOrNull:
case attr::LifetimeBound:
case attr::TypeNonNull:
case attr::TypeNullable:
@@ -1850,6 +1960,10 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::ArmOut:
case attr::ArmInOut:
case attr::ArmPreserves:
+ case attr::NonBlocking:
+ case attr::NonAllocating:
+ case attr::Blocking:
+ case attr::Allocating:
llvm_unreachable("This attribute should have been handled already");
case attr::NSReturnsRetained:
@@ -1894,6 +2008,12 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::M68kRTD:
OS << "m68k_rtd";
break;
+ case attr::PreserveNone:
+ OS << "preserve_none";
+ break;
+ case attr::RISCVVectorCC:
+ OS << "riscv_vector_cc";
+ break;
case attr::NoDeref:
OS << "noderef";
break;
@@ -2189,16 +2309,17 @@ bool clang::isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
if (auto *TTPD = dyn_cast<TemplateTypeParmDecl>(Param)) {
return TTPD->hasDefaultArgument() &&
- isSubstitutedTemplateArgument(Ctx, Arg, TTPD->getDefaultArgument(),
- Args, Depth);
+ isSubstitutedTemplateArgument(
+ Ctx, Arg, TTPD->getDefaultArgument().getArgument(), Args, Depth);
} else if (auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(Param)) {
return TTPD->hasDefaultArgument() &&
isSubstitutedTemplateArgument(
Ctx, Arg, TTPD->getDefaultArgument().getArgument(), Args, Depth);
} else if (auto *NTTPD = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
return NTTPD->hasDefaultArgument() &&
- isSubstitutedTemplateArgument(Ctx, Arg, NTTPD->getDefaultArgument(),
- Args, Depth);
+ isSubstitutedTemplateArgument(
+ Ctx, Arg, NTTPD->getDefaultArgument().getArgument(), Args,
+ Depth);
}
return false;
}
@@ -2247,7 +2368,7 @@ printTo(raw_ostream &OS, ArrayRef<TA> Args, const PrintingPolicy &Policy,
// If this is the first argument and its string representation
// begins with the global scope specifier ('::foo'), add a space
// to avoid printing the diagraph '<:'.
- if (FirstArg && !ArgString.empty() && ArgString[0] == ':')
+ if (FirstArg && ArgString.starts_with(":"))
OS << ' ';
OS << ArgString;
diff --git a/contrib/llvm-project/clang/lib/AST/VTTBuilder.cpp b/contrib/llvm-project/clang/lib/AST/VTTBuilder.cpp
index d58e87517785..464a2014c430 100644
--- a/contrib/llvm-project/clang/lib/AST/VTTBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/VTTBuilder.cpp
@@ -189,7 +189,7 @@ void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
if (!IsPrimaryVTT) {
// Remember the sub-VTT index.
- SubVTTIndicies[Base] = VTTComponents.size();
+ SubVTTIndices[Base] = VTTComponents.size();
}
uint64_t VTableIndex = VTTVTables.size();
diff --git a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
index a956ca5b37ac..e941c3bedb0a 100644
--- a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
@@ -1147,11 +1147,41 @@ void ItaniumVTableBuilder::ComputeThisAdjustments() {
continue;
// Add it.
- VTableThunks[VTableIndex].This = ThisAdjustment;
+ auto SetThisAdjustmentThunk = [&](uint64_t Idx) {
+ // If a this pointer adjustment is required, record the method that
+ // created the vtable entry. MD is not necessarily the method that
+ // created the entry since derived classes overwrite base class
+ // information in MethodInfoMap, hence findOriginalMethodInMap is called
+ // here.
+ //
+ // For example, in the following class hierarchy, if MD = D1::m and
+ // Overrider = D2:m, the original method that created the entry is B0:m,
+ // which is what findOriginalMethodInMap(MD) returns:
+ //
+ // struct B0 { int a; virtual void m(); };
+ // struct D0 : B0 { int a; void m() override; };
+ // struct D1 : B0 { int a; void m() override; };
+ // struct D2 : D0, D1 { int a; void m() override; };
+ //
+ // We need to record the method because we cannot
+ // call findOriginalMethod to find the method that created the entry if
+ // the method in the entry requires adjustment.
+ //
+ // Do not set ThunkInfo::Method if Idx is already in VTableThunks. This
+ // can happen when covariant return adjustment is required too.
+ if (!VTableThunks.count(Idx)) {
+ const CXXMethodDecl *Method = VTables.findOriginalMethodInMap(MD);
+ VTableThunks[Idx].Method = Method;
+ VTableThunks[Idx].ThisType = Method->getThisType().getTypePtr();
+ }
+ VTableThunks[Idx].This = ThisAdjustment;
+ };
+
+ SetThisAdjustmentThunk(VTableIndex);
if (isa<CXXDestructorDecl>(MD)) {
// Add an adjustment for the deleting destructor as well.
- VTableThunks[VTableIndex + 1].This = ThisAdjustment;
+ SetThisAdjustmentThunk(VTableIndex + 1);
}
}
@@ -1509,6 +1539,8 @@ void ItaniumVTableBuilder::AddMethods(
FindNearestOverriddenMethod(MD, PrimaryBases)) {
if (ComputeReturnAdjustmentBaseOffset(Context, MD,
OverriddenMD).isEmpty()) {
+ VTables.setOriginalMethod(MD, OverriddenMD);
+
// Replace the method info of the overridden method with our own
// method.
assert(MethodInfoMap.count(OverriddenMD) &&
@@ -1547,7 +1579,8 @@ void ItaniumVTableBuilder::AddMethods(
// This is a virtual thunk for the most derived class, add it.
AddThunk(Overrider.Method,
- ThunkInfo(ThisAdjustment, ReturnAdjustment));
+ ThunkInfo(ThisAdjustment, ReturnAdjustment,
+ OverriddenMD->getThisType().getTypePtr()));
}
}
@@ -1615,6 +1648,15 @@ void ItaniumVTableBuilder::AddMethods(
ReturnAdjustment ReturnAdjustment =
ComputeReturnAdjustment(ReturnAdjustmentOffset);
+ // If a return adjustment is required, record the method that created the
+ // vtable entry. We need to record the method because we cannot call
+ // findOriginalMethod to find the method that created the entry if the
+ // method in the entry requires adjustment.
+ if (!ReturnAdjustment.isEmpty()) {
+ VTableThunks[Components.size()].Method = MD;
+ VTableThunks[Components.size()].ThisType = MD->getThisType().getTypePtr();
+ }
+
AddMethod(Overrider.Method, ReturnAdjustment);
}
}
@@ -1890,11 +1932,31 @@ void ItaniumVTableBuilder::LayoutVTablesForVirtualBases(
}
}
+static void printThunkMethod(const ThunkInfo &Info, raw_ostream &Out) {
+ if (!Info.Method)
+ return;
+ std::string Str = PredefinedExpr::ComputeName(
+ PredefinedIdentKind::PrettyFunctionNoVirtual, Info.Method);
+ Out << " method: " << Str;
+}
+
/// dumpLayout - Dump the vtable layout.
void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
// FIXME: write more tests that actually use the dumpLayout output to prevent
// ItaniumVTableBuilder regressions.
+ Out << "Original map\n";
+
+ for (const auto &P : VTables.getOriginalMethodMap()) {
+ std::string Str0 =
+ PredefinedExpr::ComputeName(PredefinedIdentKind::PrettyFunctionNoVirtual,
+ P.first);
+ std::string Str1 =
+ PredefinedExpr::ComputeName(PredefinedIdentKind::PrettyFunctionNoVirtual,
+ P.second);
+ Out << " " << Str0 << " -> " << Str1 << "\n";
+ }
+
if (isBuildingConstructorVTable()) {
Out << "Construction vtable for ('";
MostDerivedClass->printQualifiedName(Out);
@@ -1978,6 +2040,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
}
Out << ']';
+ printThunkMethod(Thunk, Out);
}
// If this function pointer has a 'this' pointer adjustment, dump it.
@@ -1991,6 +2054,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
}
Out << ']';
+ printThunkMethod(Thunk, Out);
}
}
@@ -2027,6 +2091,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
Out << ']';
}
+ printThunkMethod(Thunk, Out);
}
break;
@@ -2125,7 +2190,6 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
ThunkInfoVectorTy ThunksVector = Thunks[MD];
llvm::sort(ThunksVector, [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
- assert(LHS.Method == nullptr && RHS.Method == nullptr);
return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
});
@@ -2314,6 +2378,35 @@ ItaniumVTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
return I->second;
}
+GlobalDecl ItaniumVTableContext::findOriginalMethod(GlobalDecl GD) {
+ const auto *MD = cast<CXXMethodDecl>(GD.getDecl());
+ computeVTableRelatedInformation(MD->getParent());
+ const CXXMethodDecl *OriginalMD = findOriginalMethodInMap(MD);
+
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(OriginalMD))
+ return GlobalDecl(DD, GD.getDtorType());
+ return OriginalMD;
+}
+
+const CXXMethodDecl *
+ItaniumVTableContext::findOriginalMethodInMap(const CXXMethodDecl *MD) const {
+ // Traverse the chain of virtual methods until we find the method that added
+ // the v-table slot.
+ while (true) {
+ auto I = OriginalMethodMap.find(MD);
+
+ // MD doesn't exist in OriginalMethodMap, so it must be the method we are
+ // looking for.
+ if (I == OriginalMethodMap.end())
+ break;
+
+ // Set MD to the overridden method.
+ MD = I->second;
+ }
+
+ return MD;
+}
+
static std::unique_ptr<VTableLayout>
CreateVTableLayout(const ItaniumVTableBuilder &Builder) {
SmallVector<VTableLayout::VTableThunkTy, 1>
@@ -3094,9 +3187,9 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
ReturnAdjustmentOffset.VirtualBase);
}
}
-
+ auto ThisType = (OverriddenMD ? OverriddenMD : MD)->getThisType().getTypePtr();
AddMethod(FinalOverriderMD,
- ThunkInfo(ThisAdjustmentOffset, ReturnAdjustment,
+ ThunkInfo(ThisAdjustmentOffset, ReturnAdjustment, ThisType,
ForceReturnAdjustmentMangling ? MD : nullptr));
}
}
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
index cf9ae7c974a6..37c91abb5c83 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
@@ -21,7 +21,7 @@ getBestGuess(llvm::StringRef Search, llvm::ArrayRef<llvm::StringRef> Allowed,
llvm::StringRef Res;
for (const llvm::StringRef &Item : Allowed) {
if (Item.equals_insensitive(Search)) {
- assert(!Item.equals(Search) && "This should be handled earlier on.");
+ assert(Item != Search && "This should be handled earlier on.");
MaxEditDistance = 1;
Res = Item;
continue;
@@ -41,7 +41,7 @@ getBestGuess(llvm::StringRef Search, llvm::ArrayRef<llvm::StringRef> Allowed,
if (!NoPrefix.consume_front(DropPrefix))
continue;
if (NoPrefix.equals_insensitive(Search)) {
- if (NoPrefix.equals(Search))
+ if (NoPrefix == Search)
return Item.str();
MaxEditDistance = 1;
Res = Item;
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
index c76ddf17b719..0e640cbada72 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -937,7 +937,7 @@ class MapAnyOfMatcherDescriptor : public MatcherDescriptor {
public:
MapAnyOfMatcherDescriptor(ASTNodeKind CladeNodeKind,
std::vector<ASTNodeKind> NodeKinds)
- : CladeNodeKind(CladeNodeKind), NodeKinds(NodeKinds) {}
+ : CladeNodeKind(CladeNodeKind), NodeKinds(std::move(NodeKinds)) {}
VariantMatcher create(SourceRange NameRange, ArrayRef<ParserValue> Args,
Diagnostics *Error) const override {
@@ -1026,7 +1026,7 @@ public:
}
return std::make_unique<MapAnyOfMatcherDescriptor>(CladeNodeKind,
- NodeKinds);
+ std::move(NodeKinds));
}
bool isVariadic() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 15dad022df5f..2c75e6beb743 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -432,6 +432,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isExpansionInMainFile);
REGISTER_MATCHER(isExpansionInSystemHeader);
REGISTER_MATCHER(isExplicit);
+ REGISTER_MATCHER(isExplicitObjectMemberFunction);
REGISTER_MATCHER(isExplicitTemplateSpecialization);
REGISTER_MATCHER(isExpr);
REGISTER_MATCHER(isExternC);
diff --git a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
index 03ab4c6fdf29..64e6155de090 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
@@ -1788,10 +1788,7 @@ static QualType getReferenceInitTemporaryType(const Expr *Init,
}
// Skip sub-object accesses into rvalues.
- SmallVector<const Expr *, 2> CommaLHSs;
- SmallVector<SubobjectAdjustment, 2> Adjustments;
- const Expr *SkippedInit =
- Init->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
+ const Expr *SkippedInit = Init->skipRValueSubobjectAdjustments();
if (SkippedInit != Init) {
Init = SkippedInit;
continue;
@@ -2042,7 +2039,7 @@ void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) {
QualType QT = FI->getType();
// It may be a multidimensional array.
while (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
- if (AT->getSize() == 0)
+ if (AT->isZeroSize())
break;
QT = AT->getElementType();
}
@@ -2136,7 +2133,7 @@ bool CFGBuilder::hasTrivialDestructor(const VarDecl *VD) const {
// Check for constant size array. Set type to array element type.
while (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
- if (AT->getSize() == 0)
+ if (AT->isZeroSize())
return true;
QT = AT->getElementType();
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp b/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp
index 04c5f6aa9c74..30cbd257b65e 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp
@@ -163,7 +163,7 @@ public:
NotVisited = 0x8, /* 1000 */
// We already reported a violation and stopped tracking calls for this
// parameter.
- Reported = 0x15, /* 1111 */
+ Reported = 0xF, /* 1111 */
LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ Reported)
};
@@ -932,7 +932,8 @@ private:
ParameterStatus &CurrentParamStatus = CurrentState.getStatusFor(Index);
// Escape overrides whatever error we think happened.
- if (CurrentParamStatus.isErrorStatus()) {
+ if (CurrentParamStatus.isErrorStatus() &&
+ CurrentParamStatus.getKind() != ParameterStatus::Kind::Reported) {
CurrentParamStatus = ParameterStatus::Escaped;
}
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index bb042760d297..6d726ae44104 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -186,9 +186,10 @@ template <> struct NodeID<Decl> { static constexpr StringRef value = "decl"; };
constexpr StringRef NodeID<Expr>::value;
constexpr StringRef NodeID<Decl>::value;
-template <class T, class F = const Stmt *(ExprMutationAnalyzer::*)(const T *)>
+template <class T,
+ class F = const Stmt *(ExprMutationAnalyzer::Analyzer::*)(const T *)>
const Stmt *tryEachMatch(ArrayRef<ast_matchers::BoundNodes> Matches,
- ExprMutationAnalyzer *Analyzer, F Finder) {
+ ExprMutationAnalyzer::Analyzer *Analyzer, F Finder) {
const StringRef ID = NodeID<T>::value;
for (const auto &Nodes : Matches) {
if (const Stmt *S = (Analyzer->*Finder)(Nodes.getNodeAs<T>(ID)))
@@ -199,50 +200,57 @@ const Stmt *tryEachMatch(ArrayRef<ast_matchers::BoundNodes> Matches,
} // namespace
-const Stmt *ExprMutationAnalyzer::findMutation(const Expr *Exp) {
- return findMutationMemoized(Exp,
- {&ExprMutationAnalyzer::findDirectMutation,
- &ExprMutationAnalyzer::findMemberMutation,
- &ExprMutationAnalyzer::findArrayElementMutation,
- &ExprMutationAnalyzer::findCastMutation,
- &ExprMutationAnalyzer::findRangeLoopMutation,
- &ExprMutationAnalyzer::findReferenceMutation,
- &ExprMutationAnalyzer::findFunctionArgMutation},
- Results);
+const Stmt *ExprMutationAnalyzer::Analyzer::findMutation(const Expr *Exp) {
+ return findMutationMemoized(
+ Exp,
+ {&ExprMutationAnalyzer::Analyzer::findDirectMutation,
+ &ExprMutationAnalyzer::Analyzer::findMemberMutation,
+ &ExprMutationAnalyzer::Analyzer::findArrayElementMutation,
+ &ExprMutationAnalyzer::Analyzer::findCastMutation,
+ &ExprMutationAnalyzer::Analyzer::findRangeLoopMutation,
+ &ExprMutationAnalyzer::Analyzer::findReferenceMutation,
+ &ExprMutationAnalyzer::Analyzer::findFunctionArgMutation},
+ Memorized.Results);
}
-const Stmt *ExprMutationAnalyzer::findMutation(const Decl *Dec) {
- return tryEachDeclRef(Dec, &ExprMutationAnalyzer::findMutation);
+const Stmt *ExprMutationAnalyzer::Analyzer::findMutation(const Decl *Dec) {
+ return tryEachDeclRef(Dec, &ExprMutationAnalyzer::Analyzer::findMutation);
}
-const Stmt *ExprMutationAnalyzer::findPointeeMutation(const Expr *Exp) {
- return findMutationMemoized(Exp, {/*TODO*/}, PointeeResults);
+const Stmt *
+ExprMutationAnalyzer::Analyzer::findPointeeMutation(const Expr *Exp) {
+ return findMutationMemoized(Exp, {/*TODO*/}, Memorized.PointeeResults);
}
-const Stmt *ExprMutationAnalyzer::findPointeeMutation(const Decl *Dec) {
- return tryEachDeclRef(Dec, &ExprMutationAnalyzer::findPointeeMutation);
+const Stmt *
+ExprMutationAnalyzer::Analyzer::findPointeeMutation(const Decl *Dec) {
+ return tryEachDeclRef(Dec,
+ &ExprMutationAnalyzer::Analyzer::findPointeeMutation);
}
-const Stmt *ExprMutationAnalyzer::findMutationMemoized(
+const Stmt *ExprMutationAnalyzer::Analyzer::findMutationMemoized(
const Expr *Exp, llvm::ArrayRef<MutationFinder> Finders,
- ResultMap &MemoizedResults) {
+ Memoized::ResultMap &MemoizedResults) {
const auto Memoized = MemoizedResults.find(Exp);
if (Memoized != MemoizedResults.end())
return Memoized->second;
+ // Assume Exp is not mutated before analyzing Exp.
+ MemoizedResults[Exp] = nullptr;
if (isUnevaluated(Exp))
- return MemoizedResults[Exp] = nullptr;
+ return nullptr;
for (const auto &Finder : Finders) {
if (const Stmt *S = (this->*Finder)(Exp))
return MemoizedResults[Exp] = S;
}
- return MemoizedResults[Exp] = nullptr;
+ return nullptr;
}
-const Stmt *ExprMutationAnalyzer::tryEachDeclRef(const Decl *Dec,
- MutationFinder Finder) {
+const Stmt *
+ExprMutationAnalyzer::Analyzer::tryEachDeclRef(const Decl *Dec,
+ MutationFinder Finder) {
const auto Refs = match(
findAll(
declRefExpr(to(
@@ -261,8 +269,9 @@ const Stmt *ExprMutationAnalyzer::tryEachDeclRef(const Decl *Dec,
return nullptr;
}
-bool ExprMutationAnalyzer::isUnevaluated(const Stmt *Exp, const Stmt &Stm,
- ASTContext &Context) {
+bool ExprMutationAnalyzer::Analyzer::isUnevaluated(const Stmt *Exp,
+ const Stmt &Stm,
+ ASTContext &Context) {
return selectFirst<Stmt>(
NodeID<Expr>::value,
match(
@@ -293,33 +302,36 @@ bool ExprMutationAnalyzer::isUnevaluated(const Stmt *Exp, const Stmt &Stm,
Stm, Context)) != nullptr;
}
-bool ExprMutationAnalyzer::isUnevaluated(const Expr *Exp) {
+bool ExprMutationAnalyzer::Analyzer::isUnevaluated(const Expr *Exp) {
return isUnevaluated(Exp, Stm, Context);
}
const Stmt *
-ExprMutationAnalyzer::findExprMutation(ArrayRef<BoundNodes> Matches) {
- return tryEachMatch<Expr>(Matches, this, &ExprMutationAnalyzer::findMutation);
+ExprMutationAnalyzer::Analyzer::findExprMutation(ArrayRef<BoundNodes> Matches) {
+ return tryEachMatch<Expr>(Matches, this,
+ &ExprMutationAnalyzer::Analyzer::findMutation);
}
const Stmt *
-ExprMutationAnalyzer::findDeclMutation(ArrayRef<BoundNodes> Matches) {
- return tryEachMatch<Decl>(Matches, this, &ExprMutationAnalyzer::findMutation);
+ExprMutationAnalyzer::Analyzer::findDeclMutation(ArrayRef<BoundNodes> Matches) {
+ return tryEachMatch<Decl>(Matches, this,
+ &ExprMutationAnalyzer::Analyzer::findMutation);
}
-const Stmt *ExprMutationAnalyzer::findExprPointeeMutation(
+const Stmt *ExprMutationAnalyzer::Analyzer::findExprPointeeMutation(
ArrayRef<ast_matchers::BoundNodes> Matches) {
- return tryEachMatch<Expr>(Matches, this,
- &ExprMutationAnalyzer::findPointeeMutation);
+ return tryEachMatch<Expr>(
+ Matches, this, &ExprMutationAnalyzer::Analyzer::findPointeeMutation);
}
-const Stmt *ExprMutationAnalyzer::findDeclPointeeMutation(
+const Stmt *ExprMutationAnalyzer::Analyzer::findDeclPointeeMutation(
ArrayRef<ast_matchers::BoundNodes> Matches) {
- return tryEachMatch<Decl>(Matches, this,
- &ExprMutationAnalyzer::findPointeeMutation);
+ return tryEachMatch<Decl>(
+ Matches, this, &ExprMutationAnalyzer::Analyzer::findPointeeMutation);
}
-const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
+const Stmt *
+ExprMutationAnalyzer::Analyzer::findDirectMutation(const Expr *Exp) {
// LHS of any assignment operators.
const auto AsAssignmentLhs =
binaryOperator(isAssignmentOperator(), hasLHS(canResolveToExpr(Exp)));
@@ -392,25 +404,24 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
memberExpr(hasObjectExpression(canResolveToExpr(Exp)))),
nonConstReferenceType());
const auto NotInstantiated = unless(hasDeclaration(isInstantiated()));
- const auto TypeDependentCallee =
- callee(expr(anyOf(unresolvedLookupExpr(), unresolvedMemberExpr(),
- cxxDependentScopeMemberExpr(),
- hasType(templateTypeParmType()), isTypeDependent())));
-
- const auto AsNonConstRefArg = anyOf(
- callExpr(NonConstRefParam, NotInstantiated),
- cxxConstructExpr(NonConstRefParam, NotInstantiated),
- callExpr(TypeDependentCallee, hasAnyArgument(canResolveToExpr(Exp))),
- cxxUnresolvedConstructExpr(hasAnyArgument(canResolveToExpr(Exp))),
- // Previous False Positive in the following Code:
- // `template <typename T> void f() { int i = 42; new Type<T>(i); }`
- // Where the constructor of `Type` takes its argument as reference.
- // The AST does not resolve in a `cxxConstructExpr` because it is
- // type-dependent.
- parenListExpr(hasDescendant(expr(canResolveToExpr(Exp)))),
- // If the initializer is for a reference type, there is no cast for
- // the variable. Values are cast to RValue first.
- initListExpr(hasAnyInit(expr(canResolveToExpr(Exp)))));
+
+ const auto AsNonConstRefArg =
+ anyOf(callExpr(NonConstRefParam, NotInstantiated),
+ cxxConstructExpr(NonConstRefParam, NotInstantiated),
+ // If the call is type-dependent, we can't properly process any
+ // argument because required type conversions and implicit casts
+ // will be inserted only after specialization.
+ callExpr(isTypeDependent(), hasAnyArgument(canResolveToExpr(Exp))),
+ cxxUnresolvedConstructExpr(hasAnyArgument(canResolveToExpr(Exp))),
+ // Previous False Positive in the following Code:
+ // `template <typename T> void f() { int i = 42; new Type<T>(i); }`
+ // Where the constructor of `Type` takes its argument as reference.
+ // The AST does not resolve in a `cxxConstructExpr` because it is
+ // type-dependent.
+ parenListExpr(hasDescendant(expr(canResolveToExpr(Exp)))),
+ // If the initializer is for a reference type, there is no cast for
+ // the variable. Values are cast to RValue first.
+ initListExpr(hasAnyInit(expr(canResolveToExpr(Exp)))));
// Captured by a lambda by reference.
// If we're initializing a capture with 'Exp' directly then we're initializing
@@ -426,7 +437,7 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
const auto AsNonConstRefReturn =
returnStmt(hasReturnValue(canResolveToExpr(Exp)));
- // It is used as a non-const-reference for initalizing a range-for loop.
+ // It is used as a non-const-reference for initializing a range-for loop.
const auto AsNonConstRefRangeInit = cxxForRangeStmt(hasRangeInit(declRefExpr(
allOf(canResolveToExpr(Exp), hasType(nonConstReferenceType())))));
@@ -443,7 +454,8 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
return selectFirst<Stmt>("stmt", Matches);
}
-const Stmt *ExprMutationAnalyzer::findMemberMutation(const Expr *Exp) {
+const Stmt *
+ExprMutationAnalyzer::Analyzer::findMemberMutation(const Expr *Exp) {
// Check whether any member of 'Exp' is mutated.
const auto MemberExprs = match(
findAll(expr(anyOf(memberExpr(hasObjectExpression(canResolveToExpr(Exp))),
@@ -456,7 +468,8 @@ const Stmt *ExprMutationAnalyzer::findMemberMutation(const Expr *Exp) {
return findExprMutation(MemberExprs);
}
-const Stmt *ExprMutationAnalyzer::findArrayElementMutation(const Expr *Exp) {
+const Stmt *
+ExprMutationAnalyzer::Analyzer::findArrayElementMutation(const Expr *Exp) {
// Check whether any element of an array is mutated.
const auto SubscriptExprs = match(
findAll(arraySubscriptExpr(
@@ -469,7 +482,7 @@ const Stmt *ExprMutationAnalyzer::findArrayElementMutation(const Expr *Exp) {
return findExprMutation(SubscriptExprs);
}
-const Stmt *ExprMutationAnalyzer::findCastMutation(const Expr *Exp) {
+const Stmt *ExprMutationAnalyzer::Analyzer::findCastMutation(const Expr *Exp) {
// If the 'Exp' is explicitly casted to a non-const reference type the
// 'Exp' is considered to be modified.
const auto ExplicitCast =
@@ -504,7 +517,8 @@ const Stmt *ExprMutationAnalyzer::findCastMutation(const Expr *Exp) {
return findExprMutation(Calls);
}
-const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) {
+const Stmt *
+ExprMutationAnalyzer::Analyzer::findRangeLoopMutation(const Expr *Exp) {
// Keep the ordering for the specific initialization matches to happen first,
// because it is cheaper to match all potential modifications of the loop
// variable.
@@ -567,7 +581,8 @@ const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) {
return findDeclMutation(LoopVars);
}
-const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) {
+const Stmt *
+ExprMutationAnalyzer::Analyzer::findReferenceMutation(const Expr *Exp) {
// Follow non-const reference returned by `operator*()` of move-only classes.
// These are typically smart pointers with unique ownership so we treat
// mutation of pointee as mutation of the smart pointer itself.
@@ -599,7 +614,8 @@ const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) {
return findDeclMutation(Refs);
}
-const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) {
+const Stmt *
+ExprMutationAnalyzer::Analyzer::findFunctionArgMutation(const Expr *Exp) {
const auto NonConstRefParam = forEachArgumentWithParam(
canResolveToExpr(Exp),
parmVarDecl(hasType(nonConstReferenceType())).bind("parm"));
@@ -637,10 +653,9 @@ const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) {
if (const auto *RefType = ParmType->getAs<RValueReferenceType>()) {
if (!RefType->getPointeeType().getQualifiers() &&
RefType->getPointeeType()->getAs<TemplateTypeParmType>()) {
- std::unique_ptr<FunctionParmMutationAnalyzer> &Analyzer =
- FuncParmAnalyzer[Func];
- if (!Analyzer)
- Analyzer.reset(new FunctionParmMutationAnalyzer(*Func, Context));
+ FunctionParmMutationAnalyzer *Analyzer =
+ FunctionParmMutationAnalyzer::getFunctionParmMutationAnalyzer(
+ *Func, Context, Memorized);
if (Analyzer->findMutation(Parm))
return Exp;
continue;
@@ -653,13 +668,15 @@ const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) {
}
FunctionParmMutationAnalyzer::FunctionParmMutationAnalyzer(
- const FunctionDecl &Func, ASTContext &Context)
- : BodyAnalyzer(*Func.getBody(), Context) {
+ const FunctionDecl &Func, ASTContext &Context,
+ ExprMutationAnalyzer::Memoized &Memorized)
+ : BodyAnalyzer(*Func.getBody(), Context, Memorized) {
if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(&Func)) {
// CXXCtorInitializer might also mutate Param but they're not part of
// function body, check them eagerly here since they're typically trivial.
for (const CXXCtorInitializer *Init : Ctor->inits()) {
- ExprMutationAnalyzer InitAnalyzer(*Init->getInit(), Context);
+ ExprMutationAnalyzer::Analyzer InitAnalyzer(*Init->getInit(), Context,
+ Memorized);
for (const ParmVarDecl *Parm : Ctor->parameters()) {
if (Results.contains(Parm))
continue;
@@ -675,11 +692,14 @@ FunctionParmMutationAnalyzer::findMutation(const ParmVarDecl *Parm) {
const auto Memoized = Results.find(Parm);
if (Memoized != Results.end())
return Memoized->second;
-
+ // To handle call A -> call B -> call A. Assume parameters of A is not mutated
+ // before analyzing parameters of A. Then when analyzing the second "call A",
+ // FunctionParmMutationAnalyzer can use this memoized value to avoid infinite
+ // recursion.
+ Results[Parm] = nullptr;
if (const Stmt *S = BodyAnalyzer.findMutation(Parm))
return Results[Parm] = S;
-
- return Results[Parm] = nullptr;
+ return Results[Parm];
}
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ASTOps.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ASTOps.cpp
new file mode 100644
index 000000000000..27d42a7b5085
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ASTOps.cpp
@@ -0,0 +1,287 @@
+//===-- ASTOps.cc -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Operations on AST nodes that are used in flow-sensitive analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/ASTOps.h"
+#include "clang/AST/ComputeDependence.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Type.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include <cassert>
+#include <iterator>
+#include <vector>
+
+#define DEBUG_TYPE "dataflow"
+
+namespace clang::dataflow {
+
+const Expr &ignoreCFGOmittedNodes(const Expr &E) {
+ const Expr *Current = &E;
+ const Expr *Last = nullptr;
+ while (Current != Last) {
+ Last = Current;
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(Current)) {
+ Current = EWC->getSubExpr();
+ assert(Current != nullptr);
+ }
+ if (auto *CE = dyn_cast<ConstantExpr>(Current)) {
+ Current = CE->getSubExpr();
+ assert(Current != nullptr);
+ }
+ Current = Current->IgnoreParens();
+ assert(Current != nullptr);
+ }
+ return *Current;
+}
+
+const Stmt &ignoreCFGOmittedNodes(const Stmt &S) {
+ if (auto *E = dyn_cast<Expr>(&S))
+ return ignoreCFGOmittedNodes(*E);
+ return S;
+}
+
+// FIXME: Does not precisely handle non-virtual diamond inheritance. A single
+// field decl will be modeled for all instances of the inherited field.
+static void getFieldsFromClassHierarchy(QualType Type, FieldSet &Fields) {
+ if (Type->isIncompleteType() || Type->isDependentType() ||
+ !Type->isRecordType())
+ return;
+
+ for (const FieldDecl *Field : Type->getAsRecordDecl()->fields())
+ Fields.insert(Field);
+ if (auto *CXXRecord = Type->getAsCXXRecordDecl())
+ for (const CXXBaseSpecifier &Base : CXXRecord->bases())
+ getFieldsFromClassHierarchy(Base.getType(), Fields);
+}
+
+/// Gets the set of all fields in the type.
+FieldSet getObjectFields(QualType Type) {
+ FieldSet Fields;
+ getFieldsFromClassHierarchy(Type, Fields);
+ return Fields;
+}
+
+bool containsSameFields(const FieldSet &Fields,
+ const RecordStorageLocation::FieldToLoc &FieldLocs) {
+ if (Fields.size() != FieldLocs.size())
+ return false;
+ for ([[maybe_unused]] auto [Field, Loc] : FieldLocs)
+ if (!Fields.contains(cast_or_null<FieldDecl>(Field)))
+ return false;
+ return true;
+}
+
+/// Returns the fields of a `RecordDecl` that are initialized by an
+/// `InitListExpr` or `CXXParenListInitExpr`, in the order in which they appear
+/// in `InitListExpr::inits()` / `CXXParenListInitExpr::getInitExprs()`.
+/// `InitList->getType()` must be a record type.
+template <class InitListT>
+static std::vector<const FieldDecl *>
+getFieldsForInitListExpr(const InitListT *InitList) {
+ const RecordDecl *RD = InitList->getType()->getAsRecordDecl();
+ assert(RD != nullptr);
+
+ std::vector<const FieldDecl *> Fields;
+
+ if (InitList->getType()->isUnionType()) {
+ if (const FieldDecl *Field = InitList->getInitializedFieldInUnion())
+ Fields.push_back(Field);
+ return Fields;
+ }
+
+ // Unnamed bitfields are only used for padding and do not appear in
+ // `InitListExpr`'s inits. However, those fields do appear in `RecordDecl`'s
+ // field list, and we thus need to remove them before mapping inits to
+ // fields to avoid mapping inits to the wrongs fields.
+ llvm::copy_if(
+ RD->fields(), std::back_inserter(Fields),
+ [](const FieldDecl *Field) { return !Field->isUnnamedBitField(); });
+ return Fields;
+}
+
+RecordInitListHelper::RecordInitListHelper(const InitListExpr *InitList)
+ : RecordInitListHelper(InitList->getType(),
+ getFieldsForInitListExpr(InitList),
+ InitList->inits()) {}
+
+RecordInitListHelper::RecordInitListHelper(
+ const CXXParenListInitExpr *ParenInitList)
+ : RecordInitListHelper(ParenInitList->getType(),
+ getFieldsForInitListExpr(ParenInitList),
+ ParenInitList->getInitExprs()) {}
+
+RecordInitListHelper::RecordInitListHelper(
+ QualType Ty, std::vector<const FieldDecl *> Fields,
+ ArrayRef<Expr *> Inits) {
+ auto *RD = Ty->getAsCXXRecordDecl();
+ assert(RD != nullptr);
+
+ // Unions initialized with an empty initializer list need special treatment.
+ // For structs/classes initialized with an empty initializer list, Clang
+ // puts `ImplicitValueInitExpr`s in `InitListExpr::inits()`, but for unions,
+ // it doesn't do this -- so we create an `ImplicitValueInitExpr` ourselves.
+ SmallVector<Expr *> InitsForUnion;
+ if (Ty->isUnionType() && Inits.empty()) {
+ assert(Fields.size() <= 1);
+ if (!Fields.empty()) {
+ ImplicitValueInitForUnion.emplace(Fields.front()->getType());
+ InitsForUnion.push_back(&*ImplicitValueInitForUnion);
+ }
+ Inits = InitsForUnion;
+ }
+
+ size_t InitIdx = 0;
+
+ assert(Fields.size() + RD->getNumBases() == Inits.size());
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ assert(InitIdx < Inits.size());
+ Expr *Init = Inits[InitIdx++];
+ BaseInits.emplace_back(&Base, Init);
+ }
+
+ assert(Fields.size() == Inits.size() - InitIdx);
+ for (const FieldDecl *Field : Fields) {
+ assert(InitIdx < Inits.size());
+ Expr *Init = Inits[InitIdx++];
+ FieldInits.emplace_back(Field, Init);
+ }
+}
+
+static void insertIfGlobal(const Decl &D,
+ llvm::DenseSet<const VarDecl *> &Globals) {
+ if (auto *V = dyn_cast<VarDecl>(&D))
+ if (V->hasGlobalStorage())
+ Globals.insert(V);
+}
+
+static void insertIfFunction(const Decl &D,
+ llvm::DenseSet<const FunctionDecl *> &Funcs) {
+ if (auto *FD = dyn_cast<FunctionDecl>(&D))
+ Funcs.insert(FD);
+}
+
+static MemberExpr *getMemberForAccessor(const CXXMemberCallExpr &C) {
+ // Use getCalleeDecl instead of getMethodDecl in order to handle
+ // pointer-to-member calls.
+ const auto *MethodDecl = dyn_cast_or_null<CXXMethodDecl>(C.getCalleeDecl());
+ if (!MethodDecl)
+ return nullptr;
+ auto *Body = dyn_cast_or_null<CompoundStmt>(MethodDecl->getBody());
+ if (!Body || Body->size() != 1)
+ return nullptr;
+ if (auto *RS = dyn_cast<ReturnStmt>(*Body->body_begin()))
+ if (auto *Return = RS->getRetValue())
+ return dyn_cast<MemberExpr>(Return->IgnoreParenImpCasts());
+ return nullptr;
+}
+
+class ReferencedDeclsVisitor
+ : public AnalysisASTVisitor<ReferencedDeclsVisitor> {
+public:
+ ReferencedDeclsVisitor(ReferencedDecls &Referenced)
+ : Referenced(Referenced) {}
+
+ void TraverseConstructorInits(const CXXConstructorDecl *Ctor) {
+ for (const CXXCtorInitializer *Init : Ctor->inits()) {
+ if (Init->isMemberInitializer()) {
+ Referenced.Fields.insert(Init->getMember());
+ } else if (Init->isIndirectMemberInitializer()) {
+ for (const auto *I : Init->getIndirectMember()->chain())
+ Referenced.Fields.insert(cast<FieldDecl>(I));
+ }
+
+ Expr *InitExpr = Init->getInit();
+
+ // Also collect declarations referenced in `InitExpr`.
+ TraverseStmt(InitExpr);
+
+ // If this is a `CXXDefaultInitExpr`, also collect declarations referenced
+ // within the default expression.
+ if (auto *DefaultInit = dyn_cast<CXXDefaultInitExpr>(InitExpr))
+ TraverseStmt(DefaultInit->getExpr());
+ }
+ }
+
+ bool VisitDecl(Decl *D) {
+ insertIfGlobal(*D, Referenced.Globals);
+ insertIfFunction(*D, Referenced.Functions);
+ return true;
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ insertIfGlobal(*E->getDecl(), Referenced.Globals);
+ insertIfFunction(*E->getDecl(), Referenced.Functions);
+ return true;
+ }
+
+ bool VisitCXXMemberCallExpr(CXXMemberCallExpr *C) {
+ // If this is a method that returns a member variable but does nothing else,
+ // model the field of the return value.
+ if (MemberExpr *E = getMemberForAccessor(*C))
+ if (const auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl()))
+ Referenced.Fields.insert(FD);
+ return true;
+ }
+
+ bool VisitMemberExpr(MemberExpr *E) {
+ // FIXME: should we be using `E->getFoundDecl()`?
+ const ValueDecl *VD = E->getMemberDecl();
+ insertIfGlobal(*VD, Referenced.Globals);
+ insertIfFunction(*VD, Referenced.Functions);
+ if (const auto *FD = dyn_cast<FieldDecl>(VD))
+ Referenced.Fields.insert(FD);
+ return true;
+ }
+
+ bool VisitInitListExpr(InitListExpr *InitList) {
+ if (InitList->getType()->isRecordType())
+ for (const auto *FD : getFieldsForInitListExpr(InitList))
+ Referenced.Fields.insert(FD);
+ return true;
+ }
+
+ bool VisitCXXParenListInitExpr(CXXParenListInitExpr *ParenInitList) {
+ if (ParenInitList->getType()->isRecordType())
+ for (const auto *FD : getFieldsForInitListExpr(ParenInitList))
+ Referenced.Fields.insert(FD);
+ return true;
+ }
+
+private:
+ ReferencedDecls &Referenced;
+};
+
+ReferencedDecls getReferencedDecls(const FunctionDecl &FD) {
+ ReferencedDecls Result;
+ ReferencedDeclsVisitor Visitor(Result);
+ Visitor.TraverseStmt(FD.getBody());
+ if (const auto *CtorDecl = dyn_cast<CXXConstructorDecl>(&FD))
+ Visitor.TraverseConstructorInits(CtorDecl);
+
+ return Result;
+}
+
+ReferencedDecls getReferencedDecls(const Stmt &S) {
+ ReferencedDecls Result;
+ ReferencedDeclsVisitor Visitor(Result);
+ Visitor.TraverseStmt(const_cast<Stmt *>(&S));
+ return Result;
+}
+
+} // namespace clang::dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/AdornedCFG.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/AdornedCFG.cpp
new file mode 100644
index 000000000000..255543021a99
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/AdornedCFG.cpp
@@ -0,0 +1,183 @@
+//===- AdornedCFG.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an `AdornedCFG` class that is used by dataflow analyses
+// that run over Control-Flow Graphs (CFGs).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/AdornedCFG.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Error.h"
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+/// Returns a map from statements to basic blocks that contain them.
+static llvm::DenseMap<const Stmt *, const CFGBlock *>
+buildStmtToBasicBlockMap(const CFG &Cfg) {
+ llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock;
+ for (const CFGBlock *Block : Cfg) {
+ if (Block == nullptr)
+ continue;
+
+ for (const CFGElement &Element : *Block) {
+ auto Stmt = Element.getAs<CFGStmt>();
+ if (!Stmt)
+ continue;
+
+ StmtToBlock[Stmt->getStmt()] = Block;
+ }
+ }
+ // Some terminator conditions don't appear as a `CFGElement` anywhere else -
+ // for example, this is true if the terminator condition is a `&&` or `||`
+ // operator.
+ // We associate these conditions with the block the terminator appears in,
+ // but only if the condition has not already appeared as a regular
+ // `CFGElement`. (The `insert()` below does nothing if the key already exists
+ // in the map.)
+ for (const CFGBlock *Block : Cfg) {
+ if (Block != nullptr)
+ if (const Stmt *TerminatorCond = Block->getTerminatorCondition())
+ StmtToBlock.insert({TerminatorCond, Block});
+ }
+ // Terminator statements typically don't appear as a `CFGElement` anywhere
+ // else, so we want to associate them with the block that they terminate.
+ // However, there are some important special cases:
+ // - The conditional operator is a type of terminator, but it also appears
+ // as a regular `CFGElement`, and we want to associate it with the block
+ // in which it appears as a `CFGElement`.
+ // - The `&&` and `||` operators are types of terminators, but like the
+ // conditional operator, they can appear as a regular `CFGElement` or
+ // as a terminator condition (see above).
+ // We process terminators last to make sure that we only associate them with
+ // the block they terminate if they haven't previously occurred as a regular
+ // `CFGElement` or as a terminator condition.
+ for (const CFGBlock *Block : Cfg) {
+ if (Block != nullptr)
+ if (const Stmt *TerminatorStmt = Block->getTerminatorStmt())
+ StmtToBlock.insert({TerminatorStmt, Block});
+ }
+ return StmtToBlock;
+}
+
+static llvm::BitVector findReachableBlocks(const CFG &Cfg) {
+ llvm::BitVector BlockReachable(Cfg.getNumBlockIDs(), false);
+
+ llvm::SmallVector<const CFGBlock *> BlocksToVisit;
+ BlocksToVisit.push_back(&Cfg.getEntry());
+ while (!BlocksToVisit.empty()) {
+ const CFGBlock *Block = BlocksToVisit.back();
+ BlocksToVisit.pop_back();
+
+ if (BlockReachable[Block->getBlockID()])
+ continue;
+
+ BlockReachable[Block->getBlockID()] = true;
+
+ for (const CFGBlock *Succ : Block->succs())
+ if (Succ)
+ BlocksToVisit.push_back(Succ);
+ }
+
+ return BlockReachable;
+}
+
+static llvm::DenseSet<const CFGBlock *>
+buildContainsExprConsumedInDifferentBlock(
+ const CFG &Cfg,
+ const llvm::DenseMap<const Stmt *, const CFGBlock *> &StmtToBlock) {
+ llvm::DenseSet<const CFGBlock *> Result;
+
+ auto CheckChildExprs = [&Result, &StmtToBlock](const Stmt *S,
+ const CFGBlock *Block) {
+ for (const Stmt *Child : S->children()) {
+ if (!isa_and_nonnull<Expr>(Child))
+ continue;
+ const CFGBlock *ChildBlock = StmtToBlock.lookup(Child);
+ if (ChildBlock != Block)
+ Result.insert(ChildBlock);
+ }
+ };
+
+ for (const CFGBlock *Block : Cfg) {
+ if (Block == nullptr)
+ continue;
+
+ for (const CFGElement &Element : *Block)
+ if (auto S = Element.getAs<CFGStmt>())
+ CheckChildExprs(S->getStmt(), Block);
+
+ if (const Stmt *TerminatorCond = Block->getTerminatorCondition())
+ CheckChildExprs(TerminatorCond, Block);
+ }
+
+ return Result;
+}
+
+llvm::Expected<AdornedCFG> AdornedCFG::build(const FunctionDecl &Func) {
+ if (!Func.doesThisDeclarationHaveABody())
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot analyze function without a body");
+
+ return build(Func, *Func.getBody(), Func.getASTContext());
+}
+
+llvm::Expected<AdornedCFG> AdornedCFG::build(const Decl &D, Stmt &S,
+ ASTContext &C) {
+ if (D.isTemplated())
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot analyze templated declarations");
+
+ // The shape of certain elements of the AST can vary depending on the
+ // language. We currently only support C++.
+ if (!C.getLangOpts().CPlusPlus || C.getLangOpts().ObjC)
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Can only analyze C++");
+
+ CFG::BuildOptions Options;
+ Options.PruneTriviallyFalseEdges = true;
+ Options.AddImplicitDtors = true;
+ Options.AddTemporaryDtors = true;
+ Options.AddInitializers = true;
+ Options.AddCXXDefaultInitExprInCtors = true;
+ Options.AddLifetime = true;
+
+ // Ensure that all sub-expressions in basic blocks are evaluated.
+ Options.setAllAlwaysAdd();
+
+ auto Cfg = CFG::buildCFG(&D, &S, &C, Options);
+ if (Cfg == nullptr)
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "CFG::buildCFG failed");
+
+ llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock =
+ buildStmtToBasicBlockMap(*Cfg);
+
+ llvm::BitVector BlockReachable = findReachableBlocks(*Cfg);
+
+ llvm::DenseSet<const CFGBlock *> ContainsExprConsumedInDifferentBlock =
+ buildContainsExprConsumedInDifferentBlock(*Cfg, StmtToBlock);
+
+ return AdornedCFG(D, std::move(Cfg), std::move(StmtToBlock),
+ std::move(BlockReachable),
+ std::move(ContainsExprConsumedInDifferentBlock));
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/CNFFormula.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/CNFFormula.cpp
new file mode 100644
index 000000000000..2410ce1e7bd6
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/CNFFormula.cpp
@@ -0,0 +1,303 @@
+//===- CNFFormula.cpp -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A representation of a boolean formula in 3-CNF.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/CNFFormula.h"
+#include "llvm/ADT/DenseSet.h"
+
+#include <queue>
+
+namespace clang {
+namespace dataflow {
+
+namespace {
+
+/// Applies simplifications while building up a BooleanFormula.
+/// We keep track of unit clauses, which tell us variables that must be
+/// true/false in any model that satisfies the overall formula.
+/// Such variables can be dropped from subsequently-added clauses, which
+/// may in turn yield more unit clauses or even a contradiction.
+/// The total added complexity of this preprocessing is O(N) where we
+/// for every clause, we do a lookup for each unit clauses.
+/// The lookup is O(1) on average. This method won't catch all
+/// contradictory formulas, more passes can in principle catch
+/// more cases but we leave all these and the general case to the
+/// proper SAT solver.
+struct CNFFormulaBuilder {
+ // Formula should outlive CNFFormulaBuilder.
+ explicit CNFFormulaBuilder(CNFFormula &CNF) : Formula(CNF) {}
+
+ /// Adds the `L1 v ... v Ln` clause to the formula. Applies
+ /// simplifications, based on single-literal clauses.
+ ///
+ /// Requirements:
+ ///
+ /// `Li` must not be `NullLit`.
+ ///
+ /// All literals must be distinct.
+ void addClause(ArrayRef<Literal> Literals) {
+ // We generate clauses with up to 3 literals in this file.
+ assert(!Literals.empty() && Literals.size() <= 3);
+ // Contains literals of the simplified clause.
+ llvm::SmallVector<Literal> Simplified;
+ for (auto L : Literals) {
+ assert(L != NullLit &&
+ llvm::all_of(Simplified, [L](Literal S) { return S != L; }));
+ auto X = var(L);
+ if (trueVars.contains(X)) { // X must be true
+ if (isPosLit(L))
+ return; // Omit clause `(... v X v ...)`, it is `true`.
+ else
+ continue; // Omit `!X` from `(... v !X v ...)`.
+ }
+ if (falseVars.contains(X)) { // X must be false
+ if (isNegLit(L))
+ return; // Omit clause `(... v !X v ...)`, it is `true`.
+ else
+ continue; // Omit `X` from `(... v X v ...)`.
+ }
+ Simplified.push_back(L);
+ }
+ if (Simplified.empty()) {
+ // Simplification made the clause empty, which is equivalent to `false`.
+ // We already know that this formula is unsatisfiable.
+ Formula.addClause(Simplified);
+ return;
+ }
+ if (Simplified.size() == 1) {
+ // We have new unit clause.
+ const Literal lit = Simplified.front();
+ const Variable v = var(lit);
+ if (isPosLit(lit))
+ trueVars.insert(v);
+ else
+ falseVars.insert(v);
+ }
+ Formula.addClause(Simplified);
+ }
+
+ /// Returns true if we observed a contradiction while adding clauses.
+ /// In this case then the formula is already known to be unsatisfiable.
+ bool isKnownContradictory() { return Formula.knownContradictory(); }
+
+private:
+ CNFFormula &Formula;
+ llvm::DenseSet<Variable> trueVars;
+ llvm::DenseSet<Variable> falseVars;
+};
+
+} // namespace
+
+CNFFormula::CNFFormula(Variable LargestVar)
+ : LargestVar(LargestVar), KnownContradictory(false) {
+ Clauses.push_back(0);
+ ClauseStarts.push_back(0);
+}
+
+void CNFFormula::addClause(ArrayRef<Literal> lits) {
+ assert(llvm::all_of(lits, [](Literal L) { return L != NullLit; }));
+
+ if (lits.empty())
+ KnownContradictory = true;
+
+ const size_t S = Clauses.size();
+ ClauseStarts.push_back(S);
+ Clauses.insert(Clauses.end(), lits.begin(), lits.end());
+}
+
+CNFFormula buildCNF(const llvm::ArrayRef<const Formula *> &Formulas,
+ llvm::DenseMap<Variable, Atom> &Atomics) {
+ // The general strategy of the algorithm implemented below is to map each
+ // of the sub-values in `Vals` to a unique variable and use these variables in
+ // the resulting CNF expression to avoid exponential blow up. The number of
+ // literals in the resulting formula is guaranteed to be linear in the number
+ // of sub-formulas in `Vals`.
+
+ // Map each sub-formula in `Vals` to a unique variable.
+ llvm::DenseMap<const Formula *, Variable> FormulaToVar;
+ // Store variable identifiers and Atom of atomic booleans.
+ Variable NextVar = 1;
+ {
+ std::queue<const Formula *> UnprocessedFormulas;
+ for (const Formula *F : Formulas)
+ UnprocessedFormulas.push(F);
+ while (!UnprocessedFormulas.empty()) {
+ Variable Var = NextVar;
+ const Formula *F = UnprocessedFormulas.front();
+ UnprocessedFormulas.pop();
+
+ if (!FormulaToVar.try_emplace(F, Var).second)
+ continue;
+ ++NextVar;
+
+ for (const Formula *Op : F->operands())
+ UnprocessedFormulas.push(Op);
+ if (F->kind() == Formula::AtomRef)
+ Atomics[Var] = F->getAtom();
+ }
+ }
+
+ auto GetVar = [&FormulaToVar](const Formula *F) {
+ auto ValIt = FormulaToVar.find(F);
+ assert(ValIt != FormulaToVar.end());
+ return ValIt->second;
+ };
+
+ CNFFormula CNF(NextVar - 1);
+ std::vector<bool> ProcessedSubVals(NextVar, false);
+ CNFFormulaBuilder builder(CNF);
+
+ // Add a conjunct for each variable that represents a top-level conjunction
+ // value in `Vals`.
+ for (const Formula *F : Formulas)
+ builder.addClause(posLit(GetVar(F)));
+
+ // Add conjuncts that represent the mapping between newly-created variables
+ // and their corresponding sub-formulas.
+ std::queue<const Formula *> UnprocessedFormulas;
+ for (const Formula *F : Formulas)
+ UnprocessedFormulas.push(F);
+ while (!UnprocessedFormulas.empty()) {
+ const Formula *F = UnprocessedFormulas.front();
+ UnprocessedFormulas.pop();
+ const Variable Var = GetVar(F);
+
+ if (ProcessedSubVals[Var])
+ continue;
+ ProcessedSubVals[Var] = true;
+
+ switch (F->kind()) {
+ case Formula::AtomRef:
+ break;
+ case Formula::Literal:
+ CNF.addClause(F->literal() ? posLit(Var) : negLit(Var));
+ break;
+ case Formula::And: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A ^ A)` is equivalent to `(!X v A) ^ (X v !A)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ } else {
+ // `X <=> (A ^ B)` is equivalent to `(!X v A) ^ (!X v B) ^ (X v !A v
+ // !B)` which is already in conjunctive normal form. Below we add each
+ // of the conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({negLit(Var), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
+ }
+ break;
+ }
+ case Formula::Or: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A v A)` is equivalent to `(!X v A) ^ (X v !A)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ } else {
+ // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v
+ // !B)` which is already in conjunctive normal form. Below we add each
+ // of the conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ builder.addClause({posLit(Var), negLit(RHS)});
+ }
+ break;
+ }
+ case Formula::Not: {
+ const Variable Operand = GetVar(F->operands()[0]);
+
+ // `X <=> !Y` is equivalent to `(!X v !Y) ^ (X v Y)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), negLit(Operand)});
+ builder.addClause({posLit(Var), posLit(Operand)});
+ break;
+ }
+ case Formula::Implies: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ // `X <=> (A => B)` is equivalent to
+ // `(X v A) ^ (X v !B) ^ (!X v !A v B)` which is already in
+ // conjunctive normal form. Below we add each of the conjuncts of
+ // the latter expression to the result.
+ builder.addClause({posLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(RHS)});
+ builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
+ break;
+ }
+ case Formula::Equal: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A <=> A)` is equivalent to `X` which is already in
+ // conjunctive normal form. Below we add each of the conjuncts of the
+ // latter expression to the result.
+ builder.addClause(posLit(Var));
+
+ // No need to visit the sub-values of `Val`.
+ continue;
+ }
+ // `X <=> (A <=> B)` is equivalent to
+ // `(X v A v B) ^ (X v !A v !B) ^ (!X v A v !B) ^ (!X v !A v B)` which
+ // is already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({posLit(Var), posLit(LHS), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
+ builder.addClause({negLit(Var), posLit(LHS), negLit(RHS)});
+ builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
+ break;
+ }
+ }
+ if (builder.isKnownContradictory()) {
+ return CNF;
+ }
+ for (const Formula *Child : F->operands())
+ UnprocessedFormulas.push(Child);
+ }
+
+ // Unit clauses that were added later were not
+ // considered for the simplification of earlier clauses. Do a final
+ // pass to find more opportunities for simplification.
+ CNFFormula FinalCNF(NextVar - 1);
+ CNFFormulaBuilder FinalBuilder(FinalCNF);
+
+ // Collect unit clauses.
+ for (ClauseID C = 1; C <= CNF.numClauses(); ++C) {
+ if (CNF.clauseSize(C) == 1) {
+ FinalBuilder.addClause(CNF.clauseLiterals(C)[0]);
+ }
+ }
+
+ // Add all clauses that were added previously, preserving the order.
+ for (ClauseID C = 1; C <= CNF.numClauses(); ++C) {
+ FinalBuilder.addClause(CNF.clauseLiterals(C));
+ if (FinalBuilder.isKnownContradictory()) {
+ break;
+ }
+ }
+ // It is possible there were new unit clauses again, but
+ // we stop here and leave the rest to the solver algorithm.
+ return FinalCNF;
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
deleted file mode 100644
index c9ebffe6f378..000000000000
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
+++ /dev/null
@@ -1,121 +0,0 @@
-//===- ControlFlowContext.cpp ---------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a ControlFlowContext class that is used by dataflow
-// analyses that run over Control-Flow Graphs (CFGs).
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
-#include "clang/AST/ASTContext.h"
-#include "clang/AST/Decl.h"
-#include "clang/AST/Stmt.h"
-#include "clang/Analysis/CFG.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/Support/Error.h"
-#include <utility>
-
-namespace clang {
-namespace dataflow {
-
-/// Returns a map from statements to basic blocks that contain them.
-static llvm::DenseMap<const Stmt *, const CFGBlock *>
-buildStmtToBasicBlockMap(const CFG &Cfg) {
- llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock;
- for (const CFGBlock *Block : Cfg) {
- if (Block == nullptr)
- continue;
-
- for (const CFGElement &Element : *Block) {
- auto Stmt = Element.getAs<CFGStmt>();
- if (!Stmt)
- continue;
-
- StmtToBlock[Stmt->getStmt()] = Block;
- }
- if (const Stmt *TerminatorStmt = Block->getTerminatorStmt())
- StmtToBlock[TerminatorStmt] = Block;
- }
- return StmtToBlock;
-}
-
-static llvm::BitVector findReachableBlocks(const CFG &Cfg) {
- llvm::BitVector BlockReachable(Cfg.getNumBlockIDs(), false);
-
- llvm::SmallVector<const CFGBlock *> BlocksToVisit;
- BlocksToVisit.push_back(&Cfg.getEntry());
- while (!BlocksToVisit.empty()) {
- const CFGBlock *Block = BlocksToVisit.back();
- BlocksToVisit.pop_back();
-
- if (BlockReachable[Block->getBlockID()])
- continue;
-
- BlockReachable[Block->getBlockID()] = true;
-
- for (const CFGBlock *Succ : Block->succs())
- if (Succ)
- BlocksToVisit.push_back(Succ);
- }
-
- return BlockReachable;
-}
-
-llvm::Expected<ControlFlowContext>
-ControlFlowContext::build(const FunctionDecl &Func) {
- if (!Func.doesThisDeclarationHaveABody())
- return llvm::createStringError(
- std::make_error_code(std::errc::invalid_argument),
- "Cannot analyze function without a body");
-
- return build(Func, *Func.getBody(), Func.getASTContext());
-}
-
-llvm::Expected<ControlFlowContext>
-ControlFlowContext::build(const Decl &D, Stmt &S, ASTContext &C) {
- if (D.isTemplated())
- return llvm::createStringError(
- std::make_error_code(std::errc::invalid_argument),
- "Cannot analyze templated declarations");
-
- // The shape of certain elements of the AST can vary depending on the
- // language. We currently only support C++.
- if (!C.getLangOpts().CPlusPlus)
- return llvm::createStringError(
- std::make_error_code(std::errc::invalid_argument),
- "Can only analyze C++");
-
- CFG::BuildOptions Options;
- Options.PruneTriviallyFalseEdges = true;
- Options.AddImplicitDtors = true;
- Options.AddTemporaryDtors = true;
- Options.AddInitializers = true;
- Options.AddCXXDefaultInitExprInCtors = true;
- Options.AddLifetime = true;
-
- // Ensure that all sub-expressions in basic blocks are evaluated.
- Options.setAllAlwaysAdd();
-
- auto Cfg = CFG::buildCFG(&D, &S, &C, Options);
- if (Cfg == nullptr)
- return llvm::createStringError(
- std::make_error_code(std::errc::invalid_argument),
- "CFG::buildCFG failed");
-
- llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock =
- buildStmtToBasicBlockMap(*Cfg);
-
- llvm::BitVector BlockReachable = findReachableBlocks(*Cfg);
-
- return ControlFlowContext(D, std::move(Cfg), std::move(StmtToBlock),
- std::move(BlockReachable));
-}
-
-} // namespace dataflow
-} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
index f4c4af022f51..4b86daa56d7b 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
@@ -14,6 +14,7 @@
#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/Analysis/FlowSensitive/ASTOps.h"
#include "clang/Analysis/FlowSensitive/DebugSupport.h"
#include "clang/Analysis/FlowSensitive/Formula.h"
#include "clang/Analysis/FlowSensitive/Logger.h"
@@ -169,7 +170,7 @@ DataflowAnalysisContext::joinFlowConditions(Atom FirstToken,
Solver::Result DataflowAnalysisContext::querySolver(
llvm::SetVector<const Formula *> Constraints) {
- return S->solve(Constraints.getArrayRef());
+ return S.solve(Constraints.getArrayRef());
}
bool DataflowAnalysisContext::flowConditionImplies(Atom Token,
@@ -288,8 +289,8 @@ void DataflowAnalysisContext::dumpFlowCondition(Atom Token,
}
}
-const ControlFlowContext *
-DataflowAnalysisContext::getControlFlowContext(const FunctionDecl *F) {
+const AdornedCFG *
+DataflowAnalysisContext::getAdornedCFG(const FunctionDecl *F) {
// Canonicalize the key:
F = F->getDefinition();
if (F == nullptr)
@@ -299,10 +300,10 @@ DataflowAnalysisContext::getControlFlowContext(const FunctionDecl *F) {
return &It->second;
if (F->doesThisDeclarationHaveABody()) {
- auto CFCtx = ControlFlowContext::build(*F);
+ auto ACFG = AdornedCFG::build(*F);
// FIXME: Handle errors.
- assert(CFCtx);
- auto Result = FunctionContexts.insert({F, std::move(*CFCtx)});
+ assert(ACFG);
+ auto Result = FunctionContexts.insert({F, std::move(*ACFG)});
return &Result.first->second;
}
@@ -337,10 +338,10 @@ static std::unique_ptr<Logger> makeLoggerFromCommandLine() {
return Logger::html(std::move(StreamFactory));
}
-DataflowAnalysisContext::DataflowAnalysisContext(std::unique_ptr<Solver> S,
- Options Opts)
- : S(std::move(S)), A(std::make_unique<Arena>()), Opts(Opts) {
- assert(this->S != nullptr);
+DataflowAnalysisContext::DataflowAnalysisContext(
+ Solver &S, std::unique_ptr<Solver> &&OwnedSolver, Options Opts)
+ : S(S), OwnedSolver(std::move(OwnedSolver)), A(std::make_unique<Arena>()),
+ Opts(Opts) {
// If the -dataflow-log command-line flag was set, synthesize a logger.
// This is ugly but provides a uniform method for ad-hoc debugging dataflow-
// based tools.
@@ -359,55 +360,3 @@ DataflowAnalysisContext::~DataflowAnalysisContext() = default;
} // namespace dataflow
} // namespace clang
-
-using namespace clang;
-
-const Expr &clang::dataflow::ignoreCFGOmittedNodes(const Expr &E) {
- const Expr *Current = &E;
- if (auto *EWC = dyn_cast<ExprWithCleanups>(Current)) {
- Current = EWC->getSubExpr();
- assert(Current != nullptr);
- }
- Current = Current->IgnoreParens();
- assert(Current != nullptr);
- return *Current;
-}
-
-const Stmt &clang::dataflow::ignoreCFGOmittedNodes(const Stmt &S) {
- if (auto *E = dyn_cast<Expr>(&S))
- return ignoreCFGOmittedNodes(*E);
- return S;
-}
-
-// FIXME: Does not precisely handle non-virtual diamond inheritance. A single
-// field decl will be modeled for all instances of the inherited field.
-static void getFieldsFromClassHierarchy(QualType Type,
- clang::dataflow::FieldSet &Fields) {
- if (Type->isIncompleteType() || Type->isDependentType() ||
- !Type->isRecordType())
- return;
-
- for (const FieldDecl *Field : Type->getAsRecordDecl()->fields())
- Fields.insert(Field);
- if (auto *CXXRecord = Type->getAsCXXRecordDecl())
- for (const CXXBaseSpecifier &Base : CXXRecord->bases())
- getFieldsFromClassHierarchy(Base.getType(), Fields);
-}
-
-/// Gets the set of all fields in the type.
-clang::dataflow::FieldSet clang::dataflow::getObjectFields(QualType Type) {
- FieldSet Fields;
- getFieldsFromClassHierarchy(Type, Fields);
- return Fields;
-}
-
-bool clang::dataflow::containsSameFields(
- const clang::dataflow::FieldSet &Fields,
- const clang::dataflow::RecordStorageLocation::FieldToLoc &FieldLocs) {
- if (Fields.size() != FieldLocs.size())
- return false;
- for ([[maybe_unused]] auto [Field, Loc] : FieldLocs)
- if (!Fields.contains(cast_or_null<FieldDecl>(Field)))
- return false;
- return true;
-}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
index 196a1360a775..8d7fe1848821 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
@@ -15,17 +15,28 @@
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Stmt.h"
#include "clang/AST/Type.h"
+#include "clang/Analysis/FlowSensitive/ASTOps.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
#include <cassert>
+#include <memory>
#include <utility>
+#define DEBUG_TYPE "dataflow"
+
namespace clang {
namespace dataflow {
@@ -48,6 +59,24 @@ static llvm::DenseMap<const ValueDecl *, StorageLocation *> intersectDeclToLoc(
return Result;
}
+// Performs a join on either `ExprToLoc` or `ExprToVal`.
+// The maps must be consistent in the sense that any entries for the same
+// expression must map to the same location / value. This is the case if we are
+// performing a join for control flow within a full-expression (which is the
+// only case when this function should be used).
+template <typename MapT> MapT joinExprMaps(const MapT &Map1, const MapT &Map2) {
+ MapT Result = Map1;
+
+ for (const auto &Entry : Map2) {
+ [[maybe_unused]] auto [It, Inserted] = Result.insert(Entry);
+ // If there was an existing entry, its value should be the same as for the
+ // entry we were trying to insert.
+ assert(It->second == Entry.second);
+ }
+
+ return Result;
+}
+
// Whether to consider equivalent two values with an unknown relation.
//
// FIXME: this function is a hack enabling unsoundness to support
@@ -58,7 +87,6 @@ static bool equateUnknownValues(Value::Kind K) {
switch (K) {
case Value::Kind::Integer:
case Value::Kind::Pointer:
- case Value::Kind::Record:
return true;
default:
return false;
@@ -86,15 +114,15 @@ static bool compareDistinctValues(QualType Type, Value &Val1,
llvm_unreachable("All cases covered in switch");
}
-/// Attempts to merge distinct values `Val1` and `Val2` in `Env1` and `Env2`,
-/// respectively, of the same type `Type`. Merging generally produces a single
+/// Attempts to join distinct values `Val1` and `Val2` in `Env1` and `Env2`,
+/// respectively, of the same type `Type`. Joining generally produces a single
/// value that (soundly) approximates the two inputs, although the actual
/// meaning depends on `Model`.
-static Value *mergeDistinctValues(QualType Type, Value &Val1,
- const Environment &Env1, Value &Val2,
- const Environment &Env2,
- Environment &MergedEnv,
- Environment::ValueModel &Model) {
+static Value *joinDistinctValues(QualType Type, Value &Val1,
+ const Environment &Env1, Value &Val2,
+ const Environment &Env2,
+ Environment &JoinedEnv,
+ Environment::ValueModel &Model) {
// Join distinct boolean values preserving information about the constraints
// in the respective path conditions.
if (isa<BoolValue>(&Val1) && isa<BoolValue>(&Val2)) {
@@ -113,79 +141,64 @@ static Value *mergeDistinctValues(QualType Type, Value &Val1,
// ```
auto &Expr1 = cast<BoolValue>(Val1).formula();
auto &Expr2 = cast<BoolValue>(Val2).formula();
- auto &A = MergedEnv.arena();
- auto &MergedVal = A.makeAtomRef(A.makeAtom());
- MergedEnv.assume(
+ auto &A = JoinedEnv.arena();
+ auto &JoinedVal = A.makeAtomRef(A.makeAtom());
+ JoinedEnv.assume(
A.makeOr(A.makeAnd(A.makeAtomRef(Env1.getFlowConditionToken()),
- A.makeEquals(MergedVal, Expr1)),
+ A.makeEquals(JoinedVal, Expr1)),
A.makeAnd(A.makeAtomRef(Env2.getFlowConditionToken()),
- A.makeEquals(MergedVal, Expr2))));
- return &A.makeBoolValue(MergedVal);
- }
-
- Value *MergedVal = nullptr;
- if (auto *RecordVal1 = dyn_cast<RecordValue>(&Val1)) {
- auto *RecordVal2 = cast<RecordValue>(&Val2);
-
- if (&RecordVal1->getLoc() == &RecordVal2->getLoc())
- // `RecordVal1` and `RecordVal2` may have different properties associated
- // with them. Create a new `RecordValue` with the same location but
- // without any properties so that we soundly approximate both values. If a
- // particular analysis needs to merge properties, it should do so in
- // `DataflowAnalysis::merge()`.
- MergedVal = &MergedEnv.create<RecordValue>(RecordVal1->getLoc());
- else
- // If the locations for the two records are different, need to create a
- // completely new value.
- MergedVal = MergedEnv.createValue(Type);
- } else {
- MergedVal = MergedEnv.createValue(Type);
+ A.makeEquals(JoinedVal, Expr2))));
+ return &A.makeBoolValue(JoinedVal);
}
- // FIXME: Consider destroying `MergedValue` immediately if `ValueModel::merge`
- // returns false to avoid storing unneeded values in `DACtx`.
- if (MergedVal)
- if (Model.merge(Type, Val1, Env1, Val2, Env2, *MergedVal, MergedEnv))
- return MergedVal;
+ Value *JoinedVal = JoinedEnv.createValue(Type);
+ if (JoinedVal)
+ Model.join(Type, Val1, Env1, Val2, Env2, *JoinedVal, JoinedEnv);
- return nullptr;
+ return JoinedVal;
}
-// When widening does not change `Current`, return value will equal `&Prev`.
-static Value &widenDistinctValues(QualType Type, Value &Prev,
- const Environment &PrevEnv, Value &Current,
- Environment &CurrentEnv,
- Environment::ValueModel &Model) {
+static WidenResult widenDistinctValues(QualType Type, Value &Prev,
+ const Environment &PrevEnv,
+ Value &Current, Environment &CurrentEnv,
+ Environment::ValueModel &Model) {
// Boolean-model widening.
- if (auto *PrevBool = dyn_cast<BoolValue>(&Prev)) {
- // If previous value was already Top, re-use that to (implicitly) indicate
- // that no change occurred.
+ if (isa<BoolValue>(Prev) && isa<BoolValue>(Current)) {
+ // FIXME: Checking both values should be unnecessary, but we can currently
+ // end up with `BoolValue`s in integer-typed variables. See comment in
+ // `joinDistinctValues()` for details.
+ auto &PrevBool = cast<BoolValue>(Prev);
+ auto &CurBool = cast<BoolValue>(Current);
+
if (isa<TopBoolValue>(Prev))
- return Prev;
+ // Safe to return `Prev` here, because Top is never dependent on the
+ // environment.
+ return {&Prev, LatticeEffect::Unchanged};
// We may need to widen to Top, but before we do so, check whether both
// values are implied to be either true or false in the current environment.
// In that case, we can simply return a literal instead.
- auto &CurBool = cast<BoolValue>(Current);
- bool TruePrev = PrevEnv.proves(PrevBool->formula());
+ bool TruePrev = PrevEnv.proves(PrevBool.formula());
bool TrueCur = CurrentEnv.proves(CurBool.formula());
if (TruePrev && TrueCur)
- return CurrentEnv.getBoolLiteralValue(true);
+ return {&CurrentEnv.getBoolLiteralValue(true), LatticeEffect::Unchanged};
if (!TruePrev && !TrueCur &&
- PrevEnv.proves(PrevEnv.arena().makeNot(PrevBool->formula())) &&
+ PrevEnv.proves(PrevEnv.arena().makeNot(PrevBool.formula())) &&
CurrentEnv.proves(CurrentEnv.arena().makeNot(CurBool.formula())))
- return CurrentEnv.getBoolLiteralValue(false);
+ return {&CurrentEnv.getBoolLiteralValue(false), LatticeEffect::Unchanged};
- return CurrentEnv.makeTopBoolValue();
+ return {&CurrentEnv.makeTopBoolValue(), LatticeEffect::Changed};
}
// FIXME: Add other built-in model widening.
// Custom-model widening.
- if (auto *W = Model.widen(Type, Prev, PrevEnv, Current, CurrentEnv))
- return *W;
+ if (auto Result = Model.widen(Type, Prev, PrevEnv, Current, CurrentEnv))
+ return *Result;
- return equateUnknownValues(Prev.getKind()) ? Prev : Current;
+ return {&Current, equateUnknownValues(Prev.getKind())
+ ? LatticeEffect::Unchanged
+ : LatticeEffect::Changed};
}
// Returns whether the values in `Map1` and `Map2` compare equal for those
@@ -235,14 +248,9 @@ joinLocToVal(const llvm::MapVector<const StorageLocation *, Value *> &LocToVal,
continue;
assert(It->second != nullptr);
- if (areEquivalentValues(*Val, *It->second)) {
- Result.insert({Loc, Val});
- continue;
- }
-
- if (Value *MergedVal = mergeDistinctValues(
- Loc->getType(), *Val, Env1, *It->second, Env2, JoinedEnv, Model)) {
- Result.insert({Loc, MergedVal});
+ if (Value *JoinedVal = Environment::joinValues(
+ Loc->getType(), Val, Env1, It->second, Env2, JoinedEnv, Model)) {
+ Result.insert({Loc, JoinedVal});
}
}
@@ -256,7 +264,7 @@ llvm::MapVector<Key, Value *>
widenKeyToValueMap(const llvm::MapVector<Key, Value *> &CurMap,
const llvm::MapVector<Key, Value *> &PrevMap,
Environment &CurEnv, const Environment &PrevEnv,
- Environment::ValueModel &Model, LatticeJoinEffect &Effect) {
+ Environment::ValueModel &Model, LatticeEffect &Effect) {
llvm::MapVector<Key, Value *> WidenedMap;
for (auto &Entry : CurMap) {
Key K = Entry.first;
@@ -275,199 +283,309 @@ widenKeyToValueMap(const llvm::MapVector<Key, Value *> &CurMap,
continue;
}
- Value &WidenedVal = widenDistinctValues(K->getType(), *PrevIt->second,
- PrevEnv, *Val, CurEnv, Model);
- WidenedMap.insert({K, &WidenedVal});
- if (&WidenedVal != PrevIt->second)
- Effect = LatticeJoinEffect::Changed;
+ auto [WidenedVal, ValEffect] = widenDistinctValues(
+ K->getType(), *PrevIt->second, PrevEnv, *Val, CurEnv, Model);
+ WidenedMap.insert({K, WidenedVal});
+ if (ValEffect == LatticeEffect::Changed)
+ Effect = LatticeEffect::Changed;
}
return WidenedMap;
}
-/// Initializes a global storage value.
-static void insertIfGlobal(const Decl &D,
- llvm::DenseSet<const VarDecl *> &Vars) {
- if (auto *V = dyn_cast<VarDecl>(&D))
- if (V->hasGlobalStorage())
- Vars.insert(V);
-}
+namespace {
+
+// Visitor that builds a map from record prvalues to result objects.
+// For each result object that it encounters, it propagates the storage location
+// of the result object to all record prvalues that can initialize it.
+class ResultObjectVisitor : public AnalysisASTVisitor<ResultObjectVisitor> {
+public:
+ // `ResultObjectMap` will be filled with a map from record prvalues to result
+ // object. If this visitor will traverse a function that returns a record by
+ // value, `LocForRecordReturnVal` is the location to which this record should
+ // be written; otherwise, it is null.
+ explicit ResultObjectVisitor(
+ llvm::DenseMap<const Expr *, RecordStorageLocation *> &ResultObjectMap,
+ RecordStorageLocation *LocForRecordReturnVal,
+ DataflowAnalysisContext &DACtx)
+ : ResultObjectMap(ResultObjectMap),
+ LocForRecordReturnVal(LocForRecordReturnVal), DACtx(DACtx) {}
+
+ // Traverse all member and base initializers of `Ctor`. This function is not
+ // called by `RecursiveASTVisitor`; it should be called manually if we are
+ // analyzing a constructor. `ThisPointeeLoc` is the storage location that
+ // `this` points to.
+ void TraverseConstructorInits(const CXXConstructorDecl *Ctor,
+ RecordStorageLocation *ThisPointeeLoc) {
+ assert(ThisPointeeLoc != nullptr);
+ for (const CXXCtorInitializer *Init : Ctor->inits()) {
+ Expr *InitExpr = Init->getInit();
+ if (FieldDecl *Field = Init->getMember();
+ Field != nullptr && Field->getType()->isRecordType()) {
+ PropagateResultObject(InitExpr, cast<RecordStorageLocation>(
+ ThisPointeeLoc->getChild(*Field)));
+ } else if (Init->getBaseClass()) {
+ PropagateResultObject(InitExpr, ThisPointeeLoc);
+ }
-static void insertIfFunction(const Decl &D,
- llvm::DenseSet<const FunctionDecl *> &Funcs) {
- if (auto *FD = dyn_cast<FunctionDecl>(&D))
- Funcs.insert(FD);
-}
+ // Ensure that any result objects within `InitExpr` (e.g. temporaries)
+ // are also propagated to the prvalues that initialize them.
+ TraverseStmt(InitExpr);
-static MemberExpr *getMemberForAccessor(const CXXMemberCallExpr &C) {
- // Use getCalleeDecl instead of getMethodDecl in order to handle
- // pointer-to-member calls.
- const auto *MethodDecl = dyn_cast_or_null<CXXMethodDecl>(C.getCalleeDecl());
- if (!MethodDecl)
- return nullptr;
- auto *Body = dyn_cast_or_null<CompoundStmt>(MethodDecl->getBody());
- if (!Body || Body->size() != 1)
- return nullptr;
- if (auto *RS = dyn_cast<ReturnStmt>(*Body->body_begin()))
- if (auto *Return = RS->getRetValue())
- return dyn_cast<MemberExpr>(Return->IgnoreParenImpCasts());
- return nullptr;
-}
+ // If this is a `CXXDefaultInitExpr`, also propagate any result objects
+ // within the default expression.
+ if (auto *DefaultInit = dyn_cast<CXXDefaultInitExpr>(InitExpr))
+ TraverseStmt(DefaultInit->getExpr());
+ }
+ }
-static void
-getFieldsGlobalsAndFuncs(const Decl &D, FieldSet &Fields,
- llvm::DenseSet<const VarDecl *> &Vars,
- llvm::DenseSet<const FunctionDecl *> &Funcs) {
- insertIfGlobal(D, Vars);
- insertIfFunction(D, Funcs);
- if (const auto *Decomp = dyn_cast<DecompositionDecl>(&D))
- for (const auto *B : Decomp->bindings())
- if (auto *ME = dyn_cast_or_null<MemberExpr>(B->getBinding()))
- // FIXME: should we be using `E->getFoundDecl()`?
- if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
- Fields.insert(FD);
-}
+ bool VisitVarDecl(VarDecl *VD) {
+ if (VD->getType()->isRecordType() && VD->hasInit())
+ PropagateResultObject(
+ VD->getInit(),
+ &cast<RecordStorageLocation>(DACtx.getStableStorageLocation(*VD)));
+ return true;
+ }
-/// Traverses `S` and inserts into `Fields`, `Vars` and `Funcs` any fields,
-/// global variables and functions that are declared in or referenced from
-/// sub-statements.
-static void
-getFieldsGlobalsAndFuncs(const Stmt &S, FieldSet &Fields,
- llvm::DenseSet<const VarDecl *> &Vars,
- llvm::DenseSet<const FunctionDecl *> &Funcs) {
- for (auto *Child : S.children())
- if (Child != nullptr)
- getFieldsGlobalsAndFuncs(*Child, Fields, Vars, Funcs);
- if (const auto *DefaultInit = dyn_cast<CXXDefaultInitExpr>(&S))
- getFieldsGlobalsAndFuncs(*DefaultInit->getExpr(), Fields, Vars, Funcs);
-
- if (auto *DS = dyn_cast<DeclStmt>(&S)) {
- if (DS->isSingleDecl())
- getFieldsGlobalsAndFuncs(*DS->getSingleDecl(), Fields, Vars, Funcs);
- else
- for (auto *D : DS->getDeclGroup())
- getFieldsGlobalsAndFuncs(*D, Fields, Vars, Funcs);
- } else if (auto *E = dyn_cast<DeclRefExpr>(&S)) {
- insertIfGlobal(*E->getDecl(), Vars);
- insertIfFunction(*E->getDecl(), Funcs);
- } else if (const auto *C = dyn_cast<CXXMemberCallExpr>(&S)) {
- // If this is a method that returns a member variable but does nothing else,
- // model the field of the return value.
- if (MemberExpr *E = getMemberForAccessor(*C))
- if (const auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl()))
- Fields.insert(FD);
- } else if (auto *E = dyn_cast<MemberExpr>(&S)) {
- // FIXME: should we be using `E->getFoundDecl()`?
- const ValueDecl *VD = E->getMemberDecl();
- insertIfGlobal(*VD, Vars);
- insertIfFunction(*VD, Funcs);
- if (const auto *FD = dyn_cast<FieldDecl>(VD))
- Fields.insert(FD);
- } else if (auto *InitList = dyn_cast<InitListExpr>(&S)) {
- if (RecordDecl *RD = InitList->getType()->getAsRecordDecl())
- for (const auto *FD : getFieldsForInitListExpr(RD))
- Fields.insert(FD);
+ bool VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *MTE) {
+ if (MTE->getType()->isRecordType())
+ PropagateResultObject(
+ MTE->getSubExpr(),
+ &cast<RecordStorageLocation>(DACtx.getStableStorageLocation(*MTE)));
+ return true;
}
-}
-Environment::Environment(DataflowAnalysisContext &DACtx)
- : DACtx(&DACtx),
- FlowConditionToken(DACtx.arena().makeFlowConditionToken()) {}
+ bool VisitReturnStmt(ReturnStmt *Return) {
+ Expr *RetValue = Return->getRetValue();
+ if (RetValue != nullptr && RetValue->getType()->isRecordType() &&
+ RetValue->isPRValue())
+ PropagateResultObject(RetValue, LocForRecordReturnVal);
+ return true;
+ }
-Environment::Environment(DataflowAnalysisContext &DACtx,
- const DeclContext &DeclCtx)
- : Environment(DACtx) {
- CallStack.push_back(&DeclCtx);
-}
+ bool VisitExpr(Expr *E) {
+ // Clang's AST can have record-type prvalues without a result object -- for
+ // example as full-expressions contained in a compound statement or as
+ // arguments of call expressions. We notice this if we get here and a
+ // storage location has not yet been associated with `E`. In this case,
+ // treat this as if it was a `MaterializeTemporaryExpr`.
+ if (E->isPRValue() && E->getType()->isRecordType() &&
+ !ResultObjectMap.contains(E))
+ PropagateResultObject(
+ E, &cast<RecordStorageLocation>(DACtx.getStableStorageLocation(*E)));
+ return true;
+ }
+
+ void
+ PropagateResultObjectToRecordInitList(const RecordInitListHelper &InitList,
+ RecordStorageLocation *Loc) {
+ for (auto [Base, Init] : InitList.base_inits()) {
+ assert(Base->getType().getCanonicalType() ==
+ Init->getType().getCanonicalType());
+
+ // Storage location for the base class is the same as that of the
+ // derived class because we "flatten" the object hierarchy and put all
+ // fields in `RecordStorageLocation` of the derived class.
+ PropagateResultObject(Init, Loc);
+ }
+
+ for (auto [Field, Init] : InitList.field_inits()) {
+ // Fields of non-record type are handled in
+ // `TransferVisitor::VisitInitListExpr()`.
+ if (Field->getType()->isRecordType())
+ PropagateResultObject(
+ Init, cast<RecordStorageLocation>(Loc->getChild(*Field)));
+ }
+ }
+
+ // Assigns `Loc` as the result object location of `E`, then propagates the
+ // location to all lower-level prvalues that initialize the same object as
+ // `E` (or one of its base classes or member variables).
+ void PropagateResultObject(Expr *E, RecordStorageLocation *Loc) {
+ if (!E->isPRValue() || !E->getType()->isRecordType()) {
+ assert(false);
+ // Ensure we don't propagate the result object if we hit this in a
+ // release build.
+ return;
+ }
+
+ ResultObjectMap[E] = Loc;
+
+ // The following AST node kinds are "original initializers": They are the
+ // lowest-level AST node that initializes a given object, and nothing
+ // below them can initialize the same object (or part of it).
+ if (isa<CXXConstructExpr>(E) || isa<CallExpr>(E) || isa<LambdaExpr>(E) ||
+ isa<CXXDefaultArgExpr>(E) || isa<CXXStdInitializerListExpr>(E) ||
+ isa<AtomicExpr>(E) ||
+ // We treat `BuiltinBitCastExpr` as an "original initializer" too as
+ // it may not even be casting from a record type -- and even if it is,
+ // the two objects are in general of unrelated type.
+ isa<BuiltinBitCastExpr>(E)) {
+ return;
+ }
+ if (auto *Op = dyn_cast<BinaryOperator>(E);
+ Op && Op->getOpcode() == BO_Cmp) {
+ // Builtin `<=>` returns a `std::strong_ordering` object.
+ return;
+ }
+
+ if (auto *InitList = dyn_cast<InitListExpr>(E)) {
+ if (!InitList->isSemanticForm())
+ return;
+ if (InitList->isTransparent()) {
+ PropagateResultObject(InitList->getInit(0), Loc);
+ return;
+ }
+
+ PropagateResultObjectToRecordInitList(RecordInitListHelper(InitList),
+ Loc);
+ return;
+ }
+
+ if (auto *ParenInitList = dyn_cast<CXXParenListInitExpr>(E)) {
+ PropagateResultObjectToRecordInitList(RecordInitListHelper(ParenInitList),
+ Loc);
+ return;
+ }
+
+ if (auto *Op = dyn_cast<BinaryOperator>(E); Op && Op->isCommaOp()) {
+ PropagateResultObject(Op->getRHS(), Loc);
+ return;
+ }
+
+ if (auto *Cond = dyn_cast<AbstractConditionalOperator>(E)) {
+ PropagateResultObject(Cond->getTrueExpr(), Loc);
+ PropagateResultObject(Cond->getFalseExpr(), Loc);
+ return;
+ }
+
+ if (auto *SE = dyn_cast<StmtExpr>(E)) {
+ PropagateResultObject(cast<Expr>(SE->getSubStmt()->body_back()), Loc);
+ return;
+ }
+
+ if (auto *DIE = dyn_cast<CXXDefaultInitExpr>(E)) {
+ PropagateResultObject(DIE->getExpr(), Loc);
+ return;
+ }
+
+ // All other expression nodes that propagate a record prvalue should have
+ // exactly one child.
+ SmallVector<Stmt *, 1> Children(E->child_begin(), E->child_end());
+ LLVM_DEBUG({
+ if (Children.size() != 1)
+ E->dump();
+ });
+ assert(Children.size() == 1);
+ for (Stmt *S : Children)
+ PropagateResultObject(cast<Expr>(S), Loc);
+ }
+
+private:
+ llvm::DenseMap<const Expr *, RecordStorageLocation *> &ResultObjectMap;
+ RecordStorageLocation *LocForRecordReturnVal;
+ DataflowAnalysisContext &DACtx;
+};
+
+} // namespace
void Environment::initialize() {
- const DeclContext *DeclCtx = getDeclCtx();
- if (DeclCtx == nullptr)
+ if (InitialTargetStmt == nullptr)
return;
- if (const auto *FuncDecl = dyn_cast<FunctionDecl>(DeclCtx)) {
- assert(FuncDecl->doesThisDeclarationHaveABody());
+ if (InitialTargetFunc == nullptr) {
+ initFieldsGlobalsAndFuncs(getReferencedDecls(*InitialTargetStmt));
+ ResultObjectMap =
+ std::make_shared<PrValueToResultObject>(buildResultObjectMap(
+ DACtx, InitialTargetStmt, getThisPointeeStorageLocation(),
+ /*LocForRecordReturnValue=*/nullptr));
+ return;
+ }
- initFieldsGlobalsAndFuncs(FuncDecl);
+ initFieldsGlobalsAndFuncs(getReferencedDecls(*InitialTargetFunc));
- for (const auto *ParamDecl : FuncDecl->parameters()) {
- assert(ParamDecl != nullptr);
- setStorageLocation(*ParamDecl, createObject(*ParamDecl, nullptr));
- }
+ for (const auto *ParamDecl : InitialTargetFunc->parameters()) {
+ assert(ParamDecl != nullptr);
+ setStorageLocation(*ParamDecl, createObject(*ParamDecl, nullptr));
}
- if (const auto *MethodDecl = dyn_cast<CXXMethodDecl>(DeclCtx)) {
+ if (InitialTargetFunc->getReturnType()->isRecordType())
+ LocForRecordReturnVal = &cast<RecordStorageLocation>(
+ createStorageLocation(InitialTargetFunc->getReturnType()));
+
+ if (const auto *MethodDecl = dyn_cast<CXXMethodDecl>(InitialTargetFunc)) {
auto *Parent = MethodDecl->getParent();
assert(Parent != nullptr);
if (Parent->isLambda()) {
- for (auto Capture : Parent->captures()) {
+ for (const auto &Capture : Parent->captures()) {
if (Capture.capturesVariable()) {
const auto *VarDecl = Capture.getCapturedVar();
assert(VarDecl != nullptr);
setStorageLocation(*VarDecl, createObject(*VarDecl, nullptr));
} else if (Capture.capturesThis()) {
- const auto *SurroundingMethodDecl =
- cast<CXXMethodDecl>(DeclCtx->getNonClosureAncestor());
- QualType ThisPointeeType =
- SurroundingMethodDecl->getFunctionObjectParameterType();
- setThisPointeeStorageLocation(
- cast<RecordValue>(createValue(ThisPointeeType))->getLoc());
+ if (auto *Ancestor = InitialTargetFunc->getNonClosureAncestor()) {
+ const auto *SurroundingMethodDecl = cast<CXXMethodDecl>(Ancestor);
+ QualType ThisPointeeType =
+ SurroundingMethodDecl->getFunctionObjectParameterType();
+ setThisPointeeStorageLocation(
+ cast<RecordStorageLocation>(createObject(ThisPointeeType)));
+ } else if (auto *FieldBeingInitialized =
+ dyn_cast<FieldDecl>(Parent->getLambdaContextDecl())) {
+ // This is in a field initializer, rather than a method.
+ setThisPointeeStorageLocation(
+ cast<RecordStorageLocation>(createObject(QualType(
+ FieldBeingInitialized->getParent()->getTypeForDecl(), 0))));
+ } else {
+ assert(false && "Unexpected this-capturing lambda context.");
+ }
}
}
} else if (MethodDecl->isImplicitObjectMemberFunction()) {
QualType ThisPointeeType = MethodDecl->getFunctionObjectParameterType();
- setThisPointeeStorageLocation(
- cast<RecordValue>(createValue(ThisPointeeType))->getLoc());
+ auto &ThisLoc =
+ cast<RecordStorageLocation>(createStorageLocation(ThisPointeeType));
+ setThisPointeeStorageLocation(ThisLoc);
+ // Initialize fields of `*this` with values, but only if we're not
+ // analyzing a constructor; after all, it's the constructor's job to do
+ // this (and we want to be able to test that).
+ if (!isa<CXXConstructorDecl>(MethodDecl))
+ initializeFieldsWithValues(ThisLoc);
}
}
+
+ // We do this below the handling of `CXXMethodDecl` above so that we can
+ // be sure that the storage location for `this` has been set.
+ ResultObjectMap =
+ std::make_shared<PrValueToResultObject>(buildResultObjectMap(
+ DACtx, InitialTargetFunc, getThisPointeeStorageLocation(),
+ LocForRecordReturnVal));
}
-// FIXME: Add support for resetting globals after function calls to enable
-// the implementation of sound analyses.
-void Environment::initFieldsGlobalsAndFuncs(const FunctionDecl *FuncDecl) {
- assert(FuncDecl->doesThisDeclarationHaveABody());
-
- FieldSet Fields;
- llvm::DenseSet<const VarDecl *> Vars;
- llvm::DenseSet<const FunctionDecl *> Funcs;
-
- // Look for global variable and field references in the
- // constructor-initializers.
- if (const auto *CtorDecl = dyn_cast<CXXConstructorDecl>(FuncDecl)) {
- for (const auto *Init : CtorDecl->inits()) {
- if (Init->isMemberInitializer()) {
- Fields.insert(Init->getMember());
- } else if (Init->isIndirectMemberInitializer()) {
- for (const auto *I : Init->getIndirectMember()->chain())
- Fields.insert(cast<FieldDecl>(I));
- }
- const Expr *E = Init->getInit();
- assert(E != nullptr);
- getFieldsGlobalsAndFuncs(*E, Fields, Vars, Funcs);
- }
- // Add all fields mentioned in default member initializers.
- for (const FieldDecl *F : CtorDecl->getParent()->fields())
- if (const auto *I = F->getInClassInitializer())
- getFieldsGlobalsAndFuncs(*I, Fields, Vars, Funcs);
- }
- getFieldsGlobalsAndFuncs(*FuncDecl->getBody(), Fields, Vars, Funcs);
+// FIXME: Add support for resetting globals after function calls to enable the
+// implementation of sound analyses.
+void Environment::initFieldsGlobalsAndFuncs(const ReferencedDecls &Referenced) {
// These have to be added before the lines that follow to ensure that
// `create*` work correctly for structs.
- DACtx->addModeledFields(Fields);
+ DACtx->addModeledFields(Referenced.Fields);
- for (const VarDecl *D : Vars) {
+ for (const VarDecl *D : Referenced.Globals) {
if (getStorageLocation(*D) != nullptr)
continue;
- setStorageLocation(*D, createObject(*D));
+ // We don't run transfer functions on the initializers of global variables,
+ // so they won't be associated with a value or storage location. We
+ // therefore intentionally don't pass an initializer to `createObject()`; in
+ // particular, this ensures that `createObject()` will initialize the fields
+ // of record-type variables with values.
+ setStorageLocation(*D, createObject(*D, nullptr));
}
- for (const FunctionDecl *FD : Funcs) {
+ for (const FunctionDecl *FD : Referenced.Functions) {
if (getStorageLocation(*FD) != nullptr)
continue;
- auto &Loc = createStorageLocation(FD->getType());
+ auto &Loc = createStorageLocation(*FD);
setStorageLocation(*FD, Loc);
}
}
@@ -479,8 +597,8 @@ Environment Environment::fork() const {
}
bool Environment::canDescend(unsigned MaxDepth,
- const DeclContext *Callee) const {
- return CallStack.size() <= MaxDepth && !llvm::is_contained(CallStack, Callee);
+ const FunctionDecl *Callee) const {
+ return CallStack.size() < MaxDepth && !llvm::is_contained(CallStack, Callee);
}
Environment Environment::pushCall(const CallExpr *Call) const {
@@ -489,13 +607,16 @@ Environment Environment::pushCall(const CallExpr *Call) const {
if (const auto *MethodCall = dyn_cast<CXXMemberCallExpr>(Call)) {
if (const Expr *Arg = MethodCall->getImplicitObjectArgument()) {
if (!isa<CXXThisExpr>(Arg))
- Env.ThisPointeeLoc =
- cast<RecordStorageLocation>(getStorageLocation(*Arg));
+ Env.ThisPointeeLoc =
+ cast<RecordStorageLocation>(getStorageLocation(*Arg));
// Otherwise (when the argument is `this`), retain the current
// environment's `ThisPointeeLoc`.
}
}
+ if (Call->getType()->isRecordType() && Call->isPRValue())
+ Env.LocForRecordReturnVal = &Env.getResultObjectLocation(*Call);
+
Env.pushCallInternal(Call->getDirectCallee(),
llvm::ArrayRef(Call->getArgs(), Call->getNumArgs()));
@@ -506,6 +627,7 @@ Environment Environment::pushCall(const CXXConstructExpr *Call) const {
Environment Env(*this);
Env.ThisPointeeLoc = &Env.getResultObjectLocation(*Call);
+ Env.LocForRecordReturnVal = &Env.getResultObjectLocation(*Call);
Env.pushCallInternal(Call->getConstructor(),
llvm::ArrayRef(Call->getArgs(), Call->getNumArgs()));
@@ -523,7 +645,7 @@ void Environment::pushCallInternal(const FunctionDecl *FuncDecl,
CallStack.push_back(FuncDecl);
- initFieldsGlobalsAndFuncs(FuncDecl);
+ initFieldsGlobalsAndFuncs(getReferencedDecls(*FuncDecl));
const auto *ParamIt = FuncDecl->param_begin();
@@ -534,6 +656,10 @@ void Environment::pushCallInternal(const FunctionDecl *FuncDecl,
const VarDecl *Param = *ParamIt;
setStorageLocation(*Param, createObject(*Param, Args[ArgIndex]));
}
+
+ ResultObjectMap = std::make_shared<PrValueToResultObject>(
+ buildResultObjectMap(DACtx, FuncDecl, getThisPointeeStorageLocation(),
+ LocForRecordReturnVal));
}
void Environment::popCall(const CallExpr *Call, const Environment &CalleeEnv) {
@@ -561,10 +687,6 @@ void Environment::popCall(const CXXConstructExpr *Call,
// See also comment in `popCall(const CallExpr *, const Environment &)` above.
this->LocToVal = std::move(CalleeEnv.LocToVal);
this->FlowConditionToken = std::move(CalleeEnv.FlowConditionToken);
-
- if (Value *Val = CalleeEnv.getValue(*CalleeEnv.ThisPointeeLoc)) {
- setValue(*Call, *Val);
- }
}
bool Environment::equivalentTo(const Environment &Other,
@@ -577,6 +699,9 @@ bool Environment::equivalentTo(const Environment &Other,
if (ReturnLoc != Other.ReturnLoc)
return false;
+ if (LocForRecordReturnVal != Other.LocForRecordReturnVal)
+ return false;
+
if (ThisPointeeLoc != Other.ThisPointeeLoc)
return false;
@@ -595,15 +720,19 @@ bool Environment::equivalentTo(const Environment &Other,
return true;
}
-LatticeJoinEffect Environment::widen(const Environment &PrevEnv,
- Environment::ValueModel &Model) {
+LatticeEffect Environment::widen(const Environment &PrevEnv,
+ Environment::ValueModel &Model) {
assert(DACtx == PrevEnv.DACtx);
assert(ReturnVal == PrevEnv.ReturnVal);
assert(ReturnLoc == PrevEnv.ReturnLoc);
+ assert(LocForRecordReturnVal == PrevEnv.LocForRecordReturnVal);
assert(ThisPointeeLoc == PrevEnv.ThisPointeeLoc);
assert(CallStack == PrevEnv.CallStack);
+ assert(ResultObjectMap == PrevEnv.ResultObjectMap);
+ assert(InitialTargetFunc == PrevEnv.InitialTargetFunc);
+ assert(InitialTargetStmt == PrevEnv.InitialTargetStmt);
- auto Effect = LatticeJoinEffect::Unchanged;
+ auto Effect = LatticeEffect::Unchanged;
// By the API, `PrevEnv` is a previous version of the environment for the same
// block, so we have some guarantees about its shape. In particular, it will
@@ -624,43 +753,38 @@ LatticeJoinEffect Environment::widen(const Environment &PrevEnv,
ExprToLoc.size() != PrevEnv.ExprToLoc.size() ||
ExprToVal.size() != PrevEnv.ExprToVal.size() ||
LocToVal.size() != PrevEnv.LocToVal.size())
- Effect = LatticeJoinEffect::Changed;
+ Effect = LatticeEffect::Changed;
return Effect;
}
Environment Environment::join(const Environment &EnvA, const Environment &EnvB,
- Environment::ValueModel &Model) {
+ Environment::ValueModel &Model,
+ ExprJoinBehavior ExprBehavior) {
assert(EnvA.DACtx == EnvB.DACtx);
+ assert(EnvA.LocForRecordReturnVal == EnvB.LocForRecordReturnVal);
assert(EnvA.ThisPointeeLoc == EnvB.ThisPointeeLoc);
assert(EnvA.CallStack == EnvB.CallStack);
+ assert(EnvA.ResultObjectMap == EnvB.ResultObjectMap);
+ assert(EnvA.InitialTargetFunc == EnvB.InitialTargetFunc);
+ assert(EnvA.InitialTargetStmt == EnvB.InitialTargetStmt);
Environment JoinedEnv(*EnvA.DACtx);
JoinedEnv.CallStack = EnvA.CallStack;
+ JoinedEnv.ResultObjectMap = EnvA.ResultObjectMap;
+ JoinedEnv.LocForRecordReturnVal = EnvA.LocForRecordReturnVal;
JoinedEnv.ThisPointeeLoc = EnvA.ThisPointeeLoc;
+ JoinedEnv.InitialTargetFunc = EnvA.InitialTargetFunc;
+ JoinedEnv.InitialTargetStmt = EnvA.InitialTargetStmt;
- if (EnvA.ReturnVal == nullptr || EnvB.ReturnVal == nullptr) {
- // `ReturnVal` might not always get set -- for example if we have a return
- // statement of the form `return some_other_func()` and we decide not to
- // analyze `some_other_func()`.
- // In this case, we can't say anything about the joined return value -- we
- // don't simply want to propagate the return value that we do have, because
- // it might not be the correct one.
- // This occurs for example in the test `ContextSensitiveMutualRecursion`.
+ const FunctionDecl *Func = EnvA.getCurrentFunc();
+ if (!Func) {
JoinedEnv.ReturnVal = nullptr;
- } else if (areEquivalentValues(*EnvA.ReturnVal, *EnvB.ReturnVal)) {
- JoinedEnv.ReturnVal = EnvA.ReturnVal;
} else {
- assert(!EnvA.CallStack.empty());
- // FIXME: Make `CallStack` a vector of `FunctionDecl` so we don't need this
- // cast.
- auto *Func = dyn_cast<FunctionDecl>(EnvA.CallStack.back());
- assert(Func != nullptr);
- if (Value *MergedVal =
- mergeDistinctValues(Func->getReturnType(), *EnvA.ReturnVal, EnvA,
- *EnvB.ReturnVal, EnvB, JoinedEnv, Model))
- JoinedEnv.ReturnVal = MergedVal;
+ JoinedEnv.ReturnVal =
+ joinValues(Func->getReturnType(), EnvA.ReturnVal, EnvA, EnvB.ReturnVal,
+ EnvB, JoinedEnv, Model);
}
if (EnvA.ReturnLoc == EnvB.ReturnLoc)
@@ -678,13 +802,32 @@ Environment Environment::join(const Environment &EnvA, const Environment &EnvB,
JoinedEnv.LocToVal =
joinLocToVal(EnvA.LocToVal, EnvB.LocToVal, EnvA, EnvB, JoinedEnv, Model);
- // We intentionally leave `JoinedEnv.ExprToLoc` and `JoinedEnv.ExprToVal`
- // empty, as we never need to access entries in these maps outside of the
- // basic block that sets them.
+ if (ExprBehavior == KeepExprState) {
+ JoinedEnv.ExprToVal = joinExprMaps(EnvA.ExprToVal, EnvB.ExprToVal);
+ JoinedEnv.ExprToLoc = joinExprMaps(EnvA.ExprToLoc, EnvB.ExprToLoc);
+ }
return JoinedEnv;
}
+Value *Environment::joinValues(QualType Ty, Value *Val1,
+ const Environment &Env1, Value *Val2,
+ const Environment &Env2, Environment &JoinedEnv,
+ Environment::ValueModel &Model) {
+ if (Val1 == nullptr || Val2 == nullptr)
+ // We can't say anything about the joined value -- even if one of the values
+ // is non-null, we don't want to simply propagate it, because it would be
+ // too specific: Because the other value is null, that means we have no
+ // information at all about the value (i.e. the value is unconstrained).
+ return nullptr;
+
+ if (areEquivalentValues(*Val1, *Val2))
+ // Arbitrarily return one of the two values.
+ return Val1;
+
+ return joinDistinctValues(Ty, *Val1, Env1, *Val2, Env2, JoinedEnv, Model);
+}
+
StorageLocation &Environment::createStorageLocation(QualType Type) {
return DACtx->createStorageLocation(Type);
}
@@ -705,6 +848,12 @@ StorageLocation &Environment::createStorageLocation(const Expr &E) {
void Environment::setStorageLocation(const ValueDecl &D, StorageLocation &Loc) {
assert(!DeclToLoc.contains(&D));
+ // The only kinds of declarations that may have a "variable" storage location
+ // are declarations of reference type and `BindingDecl`. For all other
+ // declaration, the storage location should be the stable storage location
+ // returned by `createStorageLocation()`.
+ assert(D.getType()->isReferenceType() || isa<BindingDecl>(D) ||
+ &Loc == &createStorageLocation(D));
DeclToLoc[&D] = &Loc;
}
@@ -739,88 +888,55 @@ StorageLocation *Environment::getStorageLocation(const Expr &E) const {
return It == ExprToLoc.end() ? nullptr : &*It->second;
}
-// Returns whether a prvalue of record type is the one that originally
-// constructs the object (i.e. it doesn't propagate it from one of its
-// children).
-static bool isOriginalRecordConstructor(const Expr &RecordPRValue) {
- if (auto *Init = dyn_cast<InitListExpr>(&RecordPRValue))
- return !Init->isSemanticForm() || !Init->isTransparent();
- return isa<CXXConstructExpr>(RecordPRValue) || isa<CallExpr>(RecordPRValue) ||
- isa<LambdaExpr>(RecordPRValue) ||
- isa<CXXDefaultInitExpr>(RecordPRValue) ||
- // The framework currently does not propagate the objects created in
- // the two branches of a `ConditionalOperator` because there is no way
- // to reconcile their storage locations, which are different. We
- // therefore claim that the `ConditionalOperator` is the expression
- // that originally constructs the object.
- // Ultimately, this will be fixed by propagating locations down from
- // the result object, rather than up from the original constructor as
- // we do now (see also the FIXME in the documentation for
- // `getResultObjectLocation()`).
- isa<ConditionalOperator>(RecordPRValue);
-}
-
RecordStorageLocation &
Environment::getResultObjectLocation(const Expr &RecordPRValue) const {
assert(RecordPRValue.getType()->isRecordType());
assert(RecordPRValue.isPRValue());
- // Returns a storage location that we can use if assertions fail.
- auto FallbackForAssertFailure =
- [this, &RecordPRValue]() -> RecordStorageLocation & {
+ assert(ResultObjectMap != nullptr);
+ RecordStorageLocation *Loc = ResultObjectMap->lookup(&RecordPRValue);
+ assert(Loc != nullptr);
+ // In release builds, use the "stable" storage location if the map lookup
+ // failed.
+ if (Loc == nullptr)
return cast<RecordStorageLocation>(
DACtx->getStableStorageLocation(RecordPRValue));
- };
-
- if (isOriginalRecordConstructor(RecordPRValue)) {
- auto *Val = cast_or_null<RecordValue>(getValue(RecordPRValue));
- // The builtin transfer function should have created a `RecordValue` for all
- // original record constructors.
- assert(Val);
- if (!Val)
- return FallbackForAssertFailure();
- return Val->getLoc();
- }
-
- if (auto *Op = dyn_cast<BinaryOperator>(&RecordPRValue);
- Op && Op->isCommaOp()) {
- return getResultObjectLocation(*Op->getRHS());
- }
-
- // All other expression nodes that propagate a record prvalue should have
- // exactly one child.
- llvm::SmallVector<const Stmt *> children(RecordPRValue.child_begin(),
- RecordPRValue.child_end());
- assert(children.size() == 1);
- if (children.empty())
- return FallbackForAssertFailure();
-
- return getResultObjectLocation(*cast<Expr>(children[0]));
+ return *Loc;
}
PointerValue &Environment::getOrCreateNullPointerValue(QualType PointeeType) {
return DACtx->getOrCreateNullPointerValue(PointeeType);
}
-void Environment::setValue(const StorageLocation &Loc, Value &Val) {
- assert(!isa<RecordValue>(&Val) || &cast<RecordValue>(&Val)->getLoc() == &Loc);
+void Environment::initializeFieldsWithValues(RecordStorageLocation &Loc,
+ QualType Type) {
+ llvm::DenseSet<QualType> Visited;
+ int CreatedValuesCount = 0;
+ initializeFieldsWithValues(Loc, Type, Visited, 0, CreatedValuesCount);
+ if (CreatedValuesCount > MaxCompositeValueSize) {
+ llvm::errs() << "Attempting to initialize a huge value of type: " << Type
+ << '\n';
+ }
+}
+void Environment::setValue(const StorageLocation &Loc, Value &Val) {
+ // Records should not be associated with values.
+ assert(!isa<RecordStorageLocation>(Loc));
LocToVal[&Loc] = &Val;
}
void Environment::setValue(const Expr &E, Value &Val) {
const Expr &CanonE = ignoreCFGOmittedNodes(E);
- if (auto *RecordVal = dyn_cast<RecordValue>(&Val)) {
- assert(isOriginalRecordConstructor(CanonE) ||
- &RecordVal->getLoc() == &getResultObjectLocation(CanonE));
- }
-
assert(CanonE.isPRValue());
+ // Records should not be associated with values.
+ assert(!CanonE.getType()->isRecordType());
ExprToVal[&CanonE] = &Val;
}
Value *Environment::getValue(const StorageLocation &Loc) const {
+ // Records should not be associated with values.
+ assert(!isa<RecordStorageLocation>(Loc));
return LocToVal.lookup(&Loc);
}
@@ -832,6 +948,9 @@ Value *Environment::getValue(const ValueDecl &D) const {
}
Value *Environment::getValue(const Expr &E) const {
+ // Records should not be associated with values.
+ assert(!E.getType()->isRecordType());
+
if (E.isPRValue()) {
auto It = ExprToVal.find(&ignoreCFGOmittedNodes(E));
return It == ExprToVal.end() ? nullptr : It->second;
@@ -860,6 +979,7 @@ Value *Environment::createValueUnlessSelfReferential(
int &CreatedValuesCount) {
assert(!Type.isNull());
assert(!Type->isReferenceType());
+ assert(!Type->isRecordType());
// Allow unlimited fields at depth 1; only cap at deeper nesting levels.
if ((Depth > 1 && CreatedValuesCount > MaxCompositeValueSize) ||
@@ -888,38 +1008,6 @@ Value *Environment::createValueUnlessSelfReferential(
return &arena().create<PointerValue>(PointeeLoc);
}
- if (Type->isRecordType()) {
- CreatedValuesCount++;
- llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
- for (const FieldDecl *Field : DACtx->getModeledFields(Type)) {
- assert(Field != nullptr);
-
- QualType FieldType = Field->getType();
-
- FieldLocs.insert(
- {Field, &createLocAndMaybeValue(FieldType, Visited, Depth + 1,
- CreatedValuesCount)});
- }
-
- RecordStorageLocation::SyntheticFieldMap SyntheticFieldLocs;
- for (const auto &Entry : DACtx->getSyntheticFields(Type)) {
- SyntheticFieldLocs.insert(
- {Entry.getKey(),
- &createLocAndMaybeValue(Entry.getValue(), Visited, Depth + 1,
- CreatedValuesCount)});
- }
-
- RecordStorageLocation &Loc = DACtx->createRecordStorageLocation(
- Type, std::move(FieldLocs), std::move(SyntheticFieldLocs));
- RecordValue &RecordVal = create<RecordValue>(Loc);
-
- // As we already have a storage location for the `RecordValue`, we can and
- // should associate them in the environment.
- setValue(Loc, RecordVal);
-
- return &RecordVal;
- }
-
return nullptr;
}
@@ -929,23 +1017,70 @@ Environment::createLocAndMaybeValue(QualType Ty,
int Depth, int &CreatedValuesCount) {
if (!Visited.insert(Ty.getCanonicalType()).second)
return createStorageLocation(Ty.getNonReferenceType());
- Value *Val = createValueUnlessSelfReferential(
- Ty.getNonReferenceType(), Visited, Depth, CreatedValuesCount);
- Visited.erase(Ty.getCanonicalType());
+ auto EraseVisited = llvm::make_scope_exit(
+ [&Visited, Ty] { Visited.erase(Ty.getCanonicalType()); });
Ty = Ty.getNonReferenceType();
- if (Val == nullptr)
- return createStorageLocation(Ty);
-
- if (Ty->isRecordType())
- return cast<RecordValue>(Val)->getLoc();
+ if (Ty->isRecordType()) {
+ auto &Loc = cast<RecordStorageLocation>(createStorageLocation(Ty));
+ initializeFieldsWithValues(Loc, Ty, Visited, Depth, CreatedValuesCount);
+ return Loc;
+ }
StorageLocation &Loc = createStorageLocation(Ty);
- setValue(Loc, *Val);
+
+ if (Value *Val = createValueUnlessSelfReferential(Ty, Visited, Depth,
+ CreatedValuesCount))
+ setValue(Loc, *Val);
+
return Loc;
}
+void Environment::initializeFieldsWithValues(RecordStorageLocation &Loc,
+ QualType Type,
+ llvm::DenseSet<QualType> &Visited,
+ int Depth,
+ int &CreatedValuesCount) {
+ auto initField = [&](QualType FieldType, StorageLocation &FieldLoc) {
+ if (FieldType->isRecordType()) {
+ auto &FieldRecordLoc = cast<RecordStorageLocation>(FieldLoc);
+ initializeFieldsWithValues(FieldRecordLoc, FieldRecordLoc.getType(),
+ Visited, Depth + 1, CreatedValuesCount);
+ } else {
+ if (getValue(FieldLoc) != nullptr)
+ return;
+ if (!Visited.insert(FieldType.getCanonicalType()).second)
+ return;
+ if (Value *Val = createValueUnlessSelfReferential(
+ FieldType, Visited, Depth + 1, CreatedValuesCount))
+ setValue(FieldLoc, *Val);
+ Visited.erase(FieldType.getCanonicalType());
+ }
+ };
+
+ for (const FieldDecl *Field : DACtx->getModeledFields(Type)) {
+ assert(Field != nullptr);
+ QualType FieldType = Field->getType();
+
+ if (FieldType->isReferenceType()) {
+ Loc.setChild(*Field,
+ &createLocAndMaybeValue(FieldType, Visited, Depth + 1,
+ CreatedValuesCount));
+ } else {
+ StorageLocation *FieldLoc = Loc.getChild(*Field);
+ assert(FieldLoc != nullptr);
+ initField(FieldType, *FieldLoc);
+ }
+ }
+ for (const auto &[FieldName, FieldType] : DACtx->getSyntheticFields(Type)) {
+ // Synthetic fields cannot have reference type, so we don't need to deal
+ // with this case.
+ assert(!FieldType->isReferenceType());
+ initField(FieldType, Loc.getSyntheticField(FieldName));
+ }
+}
+
StorageLocation &Environment::createObjectInternal(const ValueDecl *D,
QualType Ty,
const Expr *InitExpr) {
@@ -955,7 +1090,7 @@ StorageLocation &Environment::createObjectInternal(const ValueDecl *D,
// be null.
if (InitExpr) {
if (auto *InitExprLoc = getStorageLocation(*InitExpr))
- return *InitExprLoc;
+ return *InitExprLoc;
}
// Even though we have an initializer, we might not get an
@@ -965,33 +1100,35 @@ StorageLocation &Environment::createObjectInternal(const ValueDecl *D,
return createObjectInternal(D, Ty.getNonReferenceType(), nullptr);
}
- Value *Val = nullptr;
- if (InitExpr)
- // In the (few) cases where an expression is intentionally
- // "uninterpreted", `InitExpr` is not associated with a value. There are
- // two ways to handle this situation: propagate the status, so that
- // uninterpreted initializers result in uninterpreted variables, or
- // provide a default value. We choose the latter so that later refinements
- // of the variable can be used for reasoning about the surrounding code.
- // For this reason, we let this case be handled by the `createValue()`
- // call below.
- //
- // FIXME. If and when we interpret all language cases, change this to
- // assert that `InitExpr` is interpreted, rather than supplying a
- // default value (assuming we don't update the environment API to return
- // references).
- Val = getValue(*InitExpr);
- if (!Val)
- Val = createValue(Ty);
-
- if (Ty->isRecordType())
- return cast<RecordValue>(Val)->getLoc();
-
StorageLocation &Loc =
D ? createStorageLocation(*D) : createStorageLocation(Ty);
- if (Val)
- setValue(Loc, *Val);
+ if (Ty->isRecordType()) {
+ auto &RecordLoc = cast<RecordStorageLocation>(Loc);
+ if (!InitExpr)
+ initializeFieldsWithValues(RecordLoc);
+ } else {
+ Value *Val = nullptr;
+ if (InitExpr)
+ // In the (few) cases where an expression is intentionally
+ // "uninterpreted", `InitExpr` is not associated with a value. There are
+ // two ways to handle this situation: propagate the status, so that
+ // uninterpreted initializers result in uninterpreted variables, or
+ // provide a default value. We choose the latter so that later refinements
+ // of the variable can be used for reasoning about the surrounding code.
+ // For this reason, we let this case be handled by the `createValue()`
+ // call below.
+ //
+ // FIXME. If and when we interpret all language cases, change this to
+ // assert that `InitExpr` is interpreted, rather than supplying a
+ // default value (assuming we don't update the environment API to return
+ // references).
+ Val = getValue(*InitExpr);
+ if (!Val)
+ Val = createValue(Ty);
+ if (Val)
+ setValue(Loc, *Val);
+ }
return Loc;
}
@@ -1009,12 +1146,17 @@ bool Environment::allows(const Formula &F) const {
}
void Environment::dump(raw_ostream &OS) const {
- // FIXME: add printing for remaining fields and allow caller to decide what
- // fields are printed.
- OS << "DeclToLoc:\n";
- for (auto [D, L] : DeclToLoc)
- OS << " [" << D->getNameAsString() << ", " << L << "]\n";
+ llvm::DenseMap<const StorageLocation *, std::string> LocToName;
+ if (LocForRecordReturnVal != nullptr)
+ LocToName[LocForRecordReturnVal] = "(returned record)";
+ if (ThisPointeeLoc != nullptr)
+ LocToName[ThisPointeeLoc] = "this";
+ OS << "DeclToLoc:\n";
+ for (auto [D, L] : DeclToLoc) {
+ auto Iter = LocToName.insert({L, D->getNameAsString()}).first;
+ OS << " [" << Iter->second << ", " << L << "]\n";
+ }
OS << "ExprToLoc:\n";
for (auto [E, L] : ExprToLoc)
OS << " [" << E << ", " << L << "]\n";
@@ -1025,15 +1167,63 @@ void Environment::dump(raw_ostream &OS) const {
OS << "LocToVal:\n";
for (auto [L, V] : LocToVal) {
- OS << " [" << L << ", " << V << ": " << *V << "]\n";
+ OS << " [" << L;
+ if (auto Iter = LocToName.find(L); Iter != LocToName.end())
+ OS << " (" << Iter->second << ")";
+ OS << ", " << V << ": " << *V << "]\n";
+ }
+
+ if (const FunctionDecl *Func = getCurrentFunc()) {
+ if (Func->getReturnType()->isReferenceType()) {
+ OS << "ReturnLoc: " << ReturnLoc;
+ if (auto Iter = LocToName.find(ReturnLoc); Iter != LocToName.end())
+ OS << " (" << Iter->second << ")";
+ OS << "\n";
+ } else if (Func->getReturnType()->isRecordType() ||
+ isa<CXXConstructorDecl>(Func)) {
+ OS << "LocForRecordReturnVal: " << LocForRecordReturnVal << "\n";
+ } else if (!Func->getReturnType()->isVoidType()) {
+ if (ReturnVal == nullptr)
+ OS << "ReturnVal: nullptr\n";
+ else
+ OS << "ReturnVal: " << *ReturnVal << "\n";
+ }
+
+ if (isa<CXXMethodDecl>(Func)) {
+ OS << "ThisPointeeLoc: " << ThisPointeeLoc << "\n";
+ }
}
OS << "\n";
DACtx->dumpFlowCondition(FlowConditionToken, OS);
}
-void Environment::dump() const {
- dump(llvm::dbgs());
+void Environment::dump() const { dump(llvm::dbgs()); }
+
+Environment::PrValueToResultObject Environment::buildResultObjectMap(
+ DataflowAnalysisContext *DACtx, const FunctionDecl *FuncDecl,
+ RecordStorageLocation *ThisPointeeLoc,
+ RecordStorageLocation *LocForRecordReturnVal) {
+ assert(FuncDecl->doesThisDeclarationHaveABody());
+
+ PrValueToResultObject Map = buildResultObjectMap(
+ DACtx, FuncDecl->getBody(), ThisPointeeLoc, LocForRecordReturnVal);
+
+ ResultObjectVisitor Visitor(Map, LocForRecordReturnVal, *DACtx);
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(FuncDecl))
+ Visitor.TraverseConstructorInits(Ctor, ThisPointeeLoc);
+
+ return Map;
+}
+
+Environment::PrValueToResultObject Environment::buildResultObjectMap(
+ DataflowAnalysisContext *DACtx, Stmt *S,
+ RecordStorageLocation *ThisPointeeLoc,
+ RecordStorageLocation *LocForRecordReturnVal) {
+ PrValueToResultObject Map;
+ ResultObjectVisitor Visitor(Map, LocForRecordReturnVal, *DACtx);
+ Visitor.TraverseStmt(S);
+ return Map;
}
RecordStorageLocation *getImplicitObjectLocation(const CXXMemberCallExpr &MCE,
@@ -1063,50 +1253,5 @@ RecordStorageLocation *getBaseObjectLocation(const MemberExpr &ME,
return Env.get<RecordStorageLocation>(*Base);
}
-std::vector<FieldDecl *> getFieldsForInitListExpr(const RecordDecl *RD) {
- // Unnamed bitfields are only used for padding and do not appear in
- // `InitListExpr`'s inits. However, those fields do appear in `RecordDecl`'s
- // field list, and we thus need to remove them before mapping inits to
- // fields to avoid mapping inits to the wrongs fields.
- std::vector<FieldDecl *> Fields;
- llvm::copy_if(
- RD->fields(), std::back_inserter(Fields),
- [](const FieldDecl *Field) { return !Field->isUnnamedBitfield(); });
- return Fields;
-}
-
-RecordValue &refreshRecordValue(RecordStorageLocation &Loc, Environment &Env) {
- auto &NewVal = Env.create<RecordValue>(Loc);
- Env.setValue(Loc, NewVal);
- return NewVal;
-}
-
-RecordValue &refreshRecordValue(const Expr &Expr, Environment &Env) {
- assert(Expr.getType()->isRecordType());
-
- if (Expr.isPRValue()) {
- if (auto *ExistingVal = Env.get<RecordValue>(Expr)) {
- auto &NewVal = Env.create<RecordValue>(ExistingVal->getLoc());
- Env.setValue(Expr, NewVal);
- Env.setValue(NewVal.getLoc(), NewVal);
- return NewVal;
- }
-
- auto &NewVal = *cast<RecordValue>(Env.createValue(Expr.getType()));
- Env.setValue(Expr, NewVal);
- return NewVal;
- }
-
- if (auto *Loc = Env.get<RecordStorageLocation>(Expr)) {
- auto &NewVal = Env.create<RecordValue>(*Loc);
- Env.setValue(*Loc, NewVal);
- return NewVal;
- }
-
- auto &NewVal = *cast<RecordValue>(Env.createValue(Expr.getType()));
- Env.setStorageLocation(Expr, NewVal.getLoc());
- return NewVal;
-}
-
} // namespace dataflow
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp
index 573c4b1d474b..d40aab7a7f10 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp
@@ -28,8 +28,6 @@ llvm::StringRef debugString(Value::Kind Kind) {
return "Integer";
case Value::Kind::Pointer:
return "Pointer";
- case Value::Kind::Record:
- return "Record";
case Value::Kind::AtomicBool:
return "AtomicBool";
case Value::Kind::TopBool:
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp
index 2a7bfce53501..a36cb41a63df 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp
@@ -54,7 +54,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/AdornedCFG.h"
#include "clang/Analysis/FlowSensitive/DebugSupport.h"
#include "clang/Analysis/FlowSensitive/Logger.h"
#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
@@ -95,7 +95,6 @@ public:
switch (V.getKind()) {
case Value::Kind::Integer:
- case Value::Kind::Record:
case Value::Kind::TopBool:
case Value::Kind::AtomicBool:
case Value::Kind::FormulaBool:
@@ -126,8 +125,9 @@ public:
return;
JOS.attribute("type", L.getType().getAsString());
- if (auto *V = Env.getValue(L))
- dump(*V);
+ if (!L.getType()->isRecordType())
+ if (auto *V = Env.getValue(L))
+ dump(*V);
if (auto *RLoc = dyn_cast<RecordStorageLocation>(&L)) {
for (const auto &Child : RLoc->children())
@@ -158,13 +158,17 @@ class HTMLLogger : public Logger {
StreamFactory Streams;
std::unique_ptr<llvm::raw_ostream> OS;
- std::optional<llvm::json::OStream> JOS;
+ std::string JSON;
+ llvm::raw_string_ostream JStringStream{JSON};
+ llvm::json::OStream JOS{JStringStream, /*Indent=*/2};
- const ControlFlowContext *CFG;
+ const AdornedCFG *ACFG;
// Timeline of iterations of CFG block visitation.
std::vector<Iteration> Iters;
// Indexes in `Iters` of the iterations for each block.
llvm::DenseMap<const CFGBlock *, llvm::SmallVector<size_t>> BlockIters;
+ // For a given block ID, did the block converge (on the last iteration)?
+ llvm::BitVector BlockConverged;
// The messages logged in the current context but not yet written.
std::string ContextLogs;
// The number of elements we have visited within the current CFG block.
@@ -172,13 +176,15 @@ class HTMLLogger : public Logger {
public:
explicit HTMLLogger(StreamFactory Streams) : Streams(std::move(Streams)) {}
- void beginAnalysis(const ControlFlowContext &CFG,
+ void beginAnalysis(const AdornedCFG &ACFG,
TypeErasedDataflowAnalysis &A) override {
OS = Streams();
- this->CFG = &CFG;
+ this->ACFG = &ACFG;
*OS << llvm::StringRef(HTMLLogger_html).split("<?INJECT?>").first;
- const auto &D = CFG.getDecl();
+ BlockConverged.resize(ACFG.getCFG().getNumBlockIDs());
+
+ const auto &D = ACFG.getDecl();
const auto &SM = A.getASTContext().getSourceManager();
*OS << "<title>";
if (const auto *ND = dyn_cast<NamedDecl>(&D))
@@ -191,37 +197,37 @@ public:
*OS << "<script>" << HTMLLogger_js << "</script>\n";
writeCode();
- writeCFG();
-
- *OS << "<script>var HTMLLoggerData = \n";
- JOS.emplace(*OS, /*Indent=*/2);
- JOS->objectBegin();
- JOS->attributeBegin("states");
- JOS->objectBegin();
+ JOS.objectBegin();
+ JOS.attributeBegin("states");
+ JOS.objectBegin();
}
// Between beginAnalysis() and endAnalysis() we write all the states for
// particular analysis points into the `timeline` array.
void endAnalysis() override {
- JOS->objectEnd();
- JOS->attributeEnd();
+ JOS.objectEnd();
+ JOS.attributeEnd();
- JOS->attributeArray("timeline", [&] {
+ JOS.attributeArray("timeline", [&] {
for (const auto &E : Iters) {
- JOS->object([&] {
- JOS->attribute("block", blockID(E.Block->getBlockID()));
- JOS->attribute("iter", E.Iter);
- JOS->attribute("post_visit", E.PostVisit);
- JOS->attribute("converged", E.Converged);
+ JOS.object([&] {
+ JOS.attribute("block", blockID(E.Block->getBlockID()));
+ JOS.attribute("iter", E.Iter);
+ JOS.attribute("post_visit", E.PostVisit);
+ JOS.attribute("converged", E.Converged);
});
}
});
- JOS->attributeObject("cfg", [&] {
+ JOS.attributeObject("cfg", [&] {
for (const auto &E : BlockIters)
writeBlock(*E.first, E.second);
});
- JOS->objectEnd();
- JOS.reset();
+ JOS.objectEnd();
+
+ writeCFG();
+
+ *OS << "<script>var HTMLLoggerData = \n";
+ *OS << JSON;
*OS << ";\n</script>\n";
*OS << llvm::StringRef(HTMLLogger_html).split("<?INJECT?>").second;
}
@@ -231,6 +237,8 @@ public:
unsigned IterNum = BIter.size() + 1;
BIter.push_back(Iters.size());
Iters.push_back({&B, IterNum, PostVisit, /*Converged=*/false});
+ if (!PostVisit)
+ BlockConverged[B.getBlockID()] = false;
ElementIndex = 0;
}
void enterElement(const CFGElement &E) override {
@@ -261,11 +269,11 @@ public:
unsigned Block = Iters.back().Block->getBlockID();
unsigned Iter = Iters.back().Iter;
bool PostVisit = Iters.back().PostVisit;
- JOS->attributeObject(elementIterID(Block, Iter, ElementIndex), [&] {
- JOS->attribute("block", blockID(Block));
- JOS->attribute("iter", Iter);
- JOS->attribute("post_visit", PostVisit);
- JOS->attribute("element", ElementIndex);
+ JOS.attributeObject(elementIterID(Block, Iter, ElementIndex), [&] {
+ JOS.attribute("block", blockID(Block));
+ JOS.attribute("iter", Iter);
+ JOS.attribute("post_visit", PostVisit);
+ JOS.attribute("element", ElementIndex);
// If this state immediately follows an Expr, show its built-in model.
if (ElementIndex > 0) {
@@ -273,29 +281,33 @@ public:
Iters.back().Block->Elements[ElementIndex - 1].getAs<CFGStmt>();
if (const Expr *E = S ? llvm::dyn_cast<Expr>(S->getStmt()) : nullptr) {
if (E->isPRValue()) {
- if (auto *V = State.Env.getValue(*E))
- JOS->attributeObject(
- "value", [&] { ModelDumper(*JOS, State.Env).dump(*V); });
+ if (!E->getType()->isRecordType())
+ if (auto *V = State.Env.getValue(*E))
+ JOS.attributeObject(
+ "value", [&] { ModelDumper(JOS, State.Env).dump(*V); });
} else {
if (auto *Loc = State.Env.getStorageLocation(*E))
- JOS->attributeObject(
- "value", [&] { ModelDumper(*JOS, State.Env).dump(*Loc); });
+ JOS.attributeObject(
+ "value", [&] { ModelDumper(JOS, State.Env).dump(*Loc); });
}
}
}
if (!ContextLogs.empty()) {
- JOS->attribute("logs", ContextLogs);
+ JOS.attribute("logs", ContextLogs);
ContextLogs.clear();
}
{
std::string BuiltinLattice;
llvm::raw_string_ostream BuiltinLatticeS(BuiltinLattice);
State.Env.dump(BuiltinLatticeS);
- JOS->attribute("builtinLattice", BuiltinLattice);
+ JOS.attribute("builtinLattice", BuiltinLattice);
}
});
}
- void blockConverged() override { Iters.back().Converged = true; }
+ void blockConverged() override {
+ Iters.back().Converged = true;
+ BlockConverged[Iters.back().Block->getBlockID()] = true;
+ }
void logText(llvm::StringRef S) override {
ContextLogs.append(S.begin(), S.end());
@@ -307,23 +319,23 @@ private:
// Currently this is just the list of elements in execution order.
// FIXME: an AST dump would be a useful view, too.
void writeBlock(const CFGBlock &B, llvm::ArrayRef<size_t> ItersForB) {
- JOS->attributeObject(blockID(B.getBlockID()), [&] {
- JOS->attributeArray("iters", [&] {
+ JOS.attributeObject(blockID(B.getBlockID()), [&] {
+ JOS.attributeArray("iters", [&] {
for (size_t IterIdx : ItersForB) {
const Iteration &Iter = Iters[IterIdx];
- JOS->object([&] {
- JOS->attribute("iter", Iter.Iter);
- JOS->attribute("post_visit", Iter.PostVisit);
- JOS->attribute("converged", Iter.Converged);
+ JOS.object([&] {
+ JOS.attribute("iter", Iter.Iter);
+ JOS.attribute("post_visit", Iter.PostVisit);
+ JOS.attribute("converged", Iter.Converged);
});
}
});
- JOS->attributeArray("elements", [&] {
+ JOS.attributeArray("elements", [&] {
for (const auto &Elt : B.Elements) {
std::string Dump;
llvm::raw_string_ostream DumpS(Dump);
Elt.dumpToStream(DumpS);
- JOS->value(Dump);
+ JOS.value(Dump);
}
});
});
@@ -334,7 +346,7 @@ private:
// tokens are associated with, and even which BB element (so that clicking
// can select the right element).
void writeCode() {
- const auto &AST = CFG->getDecl().getASTContext();
+ const auto &AST = ACFG->getDecl().getASTContext();
bool Invalid = false;
// Extract the source code from the original file.
@@ -342,7 +354,7 @@ private:
// indentation to worry about), but we need the boundaries of particular
// AST nodes and the printer doesn't provide this.
auto Range = clang::Lexer::makeFileCharRange(
- CharSourceRange::getTokenRange(CFG->getDecl().getSourceRange()),
+ CharSourceRange::getTokenRange(ACFG->getDecl().getSourceRange()),
AST.getSourceManager(), AST.getLangOpts());
if (Range.isInvalid())
return;
@@ -408,7 +420,7 @@ private:
// Construct one TokenInfo per character in a flat array.
// This is inefficient (chars in a token all have the same info) but simple.
std::vector<TokenInfo> State(Code.size());
- for (const auto *Block : CFG->getCFG()) {
+ for (const auto *Block : ACFG->getCFG()) {
unsigned EltIndex = 0;
for (const auto& Elt : *Block) {
++EltIndex;
@@ -469,7 +481,7 @@ private:
// out to `dot` to turn it into an SVG.
void writeCFG() {
*OS << "<template data-copy='cfg'>\n";
- if (auto SVG = renderSVG(buildCFGDot(CFG->getCFG())))
+ if (auto SVG = renderSVG(buildCFGDot(ACFG->getCFG())))
*OS << *SVG;
else
*OS << "Can't draw CFG: " << toString(SVG.takeError());
@@ -477,7 +489,7 @@ private:
}
// Produce a graphviz description of a CFG.
- static std::string buildCFGDot(const clang::CFG &CFG) {
+ std::string buildCFGDot(const clang::CFG &CFG) {
std::string Graph;
llvm::raw_string_ostream GraphS(Graph);
// Graphviz likes to add unhelpful tooltips everywhere, " " suppresses.
@@ -486,8 +498,15 @@ private:
node[class=bb, shape=square, fontname="sans-serif", tooltip=" "]
edge[tooltip = " "]
)";
- for (unsigned I = 0; I < CFG.getNumBlockIDs(); ++I)
- GraphS << " " << blockID(I) << " [id=" << blockID(I) << "]\n";
+ for (unsigned I = 0; I < CFG.getNumBlockIDs(); ++I) {
+ std::string Name = blockID(I);
+ // Rightwards arrow, vertical line
+ const char *ConvergenceMarker = (const char *)u8"\\n\u2192\u007c";
+ if (BlockConverged[I])
+ Name += ConvergenceMarker;
+ GraphS << " " << blockID(I) << " [id=" << blockID(I) << " label=\""
+ << Name << "\"]\n";
+ }
for (const auto *Block : CFG) {
for (const auto &Succ : Block->succs()) {
if (Succ.getReachableBlock())
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css
index 5da8db8fa87b..e25270430efc 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css
@@ -29,6 +29,16 @@ section h2 {
}
#timeline {
min-width: max-content;
+ counter-reset: entry_counter;
+}
+#timeline .entry .counter::before {
+ counter-increment: entry_counter;
+ content: counter(entry_counter) ":";
+}
+#timeline .entry .counter {
+ display: inline-block;
+ min-width: 2em; /* Enough space for two digits and a colon */
+ text-align: right;
}
#timeline .entry.hover {
background-color: #aaa;
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html
index b9f76c5074c7..be173e8b2854 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html
@@ -42,6 +42,7 @@
<header>Timeline</header>
<template data-for="entry in timeline">
<div id="{{entry.block}}:{{entry.iter}}" data-bb="{{entry.block}}" class="entry">
+ <span class="counter"></span>
{{entry.block}}
<template data-if="entry.post_visit">(post-visit)</template>
<template data-if="!entry.post_visit">({{entry.iter}})</template>
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp
index 8c401df62e44..8f40768171c9 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/FlowSensitive/Logger.h"
-#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/AdornedCFG.h"
#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
#include "llvm/Support/WithColor.h"
@@ -33,17 +33,17 @@ struct TextualLogger final : Logger {
TextualLogger(llvm::raw_ostream &OS)
: OS(OS), ShowColors(llvm::WithColor::defaultAutoDetectFunction()(OS)) {}
- virtual void beginAnalysis(const ControlFlowContext &CFG,
+ virtual void beginAnalysis(const AdornedCFG &ACFG,
TypeErasedDataflowAnalysis &Analysis) override {
{
llvm::WithColor Header(OS, llvm::raw_ostream::Colors::RED, /*Bold=*/true);
OS << "=== Beginning data flow analysis ===\n";
}
- auto &D = CFG.getDecl();
+ auto &D = ACFG.getDecl();
D.print(OS);
OS << "\n";
D.dump(OS);
- CurrentCFG = &CFG.getCFG();
+ CurrentCFG = &ACFG.getCFG();
CurrentCFG->print(OS, Analysis.getASTContext().getLangOpts(), ShowColors);
CurrentAnalysis = &Analysis;
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
index 1d31b22b6d25..0707aa662e4c 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
@@ -64,39 +64,125 @@ static bool hasOptionalClassName(const CXXRecordDecl &RD) {
return false;
}
+static const CXXRecordDecl *getOptionalBaseClass(const CXXRecordDecl *RD) {
+ if (RD == nullptr)
+ return nullptr;
+ if (hasOptionalClassName(*RD))
+ return RD;
+
+ if (!RD->hasDefinition())
+ return nullptr;
+
+ for (const CXXBaseSpecifier &Base : RD->bases())
+ if (const CXXRecordDecl *BaseClass =
+ getOptionalBaseClass(Base.getType()->getAsCXXRecordDecl()))
+ return BaseClass;
+
+ return nullptr;
+}
+
namespace {
using namespace ::clang::ast_matchers;
using LatticeTransferState = TransferState<NoopLattice>;
-AST_MATCHER(CXXRecordDecl, hasOptionalClassNameMatcher) {
- return hasOptionalClassName(Node);
+AST_MATCHER(CXXRecordDecl, optionalClass) { return hasOptionalClassName(Node); }
+
+AST_MATCHER(CXXRecordDecl, optionalOrDerivedClass) {
+ return getOptionalBaseClass(&Node) != nullptr;
}
-DeclarationMatcher optionalClass() {
- return classTemplateSpecializationDecl(
- hasOptionalClassNameMatcher(),
- hasTemplateArgument(0, refersToType(type().bind("T"))));
+auto desugarsToOptionalType() {
+ return hasUnqualifiedDesugaredType(
+ recordType(hasDeclaration(cxxRecordDecl(optionalClass()))));
}
-auto optionalOrAliasType() {
+auto desugarsToOptionalOrDerivedType() {
return hasUnqualifiedDesugaredType(
- recordType(hasDeclaration(optionalClass())));
+ recordType(hasDeclaration(cxxRecordDecl(optionalOrDerivedClass()))));
+}
+
+auto hasOptionalType() { return hasType(desugarsToOptionalType()); }
+
+/// Matches any of the spellings of the optional types and sugar, aliases,
+/// derived classes, etc.
+auto hasOptionalOrDerivedType() {
+ return hasType(desugarsToOptionalOrDerivedType());
+}
+
+QualType getPublicType(const Expr *E) {
+ auto *Cast = dyn_cast<ImplicitCastExpr>(E->IgnoreParens());
+ if (Cast == nullptr || Cast->getCastKind() != CK_UncheckedDerivedToBase) {
+ QualType Ty = E->getType();
+ if (Ty->isPointerType())
+ return Ty->getPointeeType();
+ return Ty;
+ }
+
+ // Is the derived type that we're casting from the type of `*this`? In this
+ // special case, we can upcast to the base class even if the base is
+ // non-public.
+ bool CastingFromThis = isa<CXXThisExpr>(Cast->getSubExpr());
+
+ // Find the least-derived type in the path (i.e. the last entry in the list)
+ // that we can access.
+ const CXXBaseSpecifier *PublicBase = nullptr;
+ for (const CXXBaseSpecifier *Base : Cast->path()) {
+ if (Base->getAccessSpecifier() != AS_public && !CastingFromThis)
+ break;
+ PublicBase = Base;
+ CastingFromThis = false;
+ }
+
+ if (PublicBase != nullptr)
+ return PublicBase->getType();
+
+ // We didn't find any public type that we could cast to. There may be more
+ // casts in `getSubExpr()`, so recurse. (If there aren't any more casts, this
+ // will return the type of `getSubExpr()`.)
+ return getPublicType(Cast->getSubExpr());
+}
+
+// Returns the least-derived type for the receiver of `MCE` that
+// `MCE.getImplicitObjectArgument()->IgnoreParentImpCasts()` can be downcast to.
+// Effectively, we upcast until we reach a non-public base class, unless that
+// base is a base of `*this`.
+//
+// This is needed to correctly match methods called on types derived from
+// `std::optional`.
+//
+// Say we have a `struct Derived : public std::optional<int> {} d;` For a call
+// `d.has_value()`, the `getImplicitObjectArgument()` looks like this:
+//
+// ImplicitCastExpr 'const std::__optional_storage_base<int>' lvalue
+// | <UncheckedDerivedToBase (optional -> __optional_storage_base)>
+// `-DeclRefExpr 'Derived' lvalue Var 'd' 'Derived'
+//
+// The type of the implicit object argument is `__optional_storage_base`
+// (since this is the internal type that `has_value()` is declared on). If we
+// call `IgnoreParenImpCasts()` on the implicit object argument, we get the
+// `DeclRefExpr`, which has type `Derived`. Neither of these types is
+// `optional`, and hence neither is sufficient for querying whether we are
+// calling a method on `optional`.
+//
+// Instead, starting with the most derived type, we need to follow the chain of
+// casts
+QualType getPublicReceiverType(const CXXMemberCallExpr &MCE) {
+ return getPublicType(MCE.getImplicitObjectArgument());
}
-/// Matches any of the spellings of the optional types and sugar, aliases, etc.
-auto hasOptionalType() { return hasType(optionalOrAliasType()); }
+AST_MATCHER_P(CXXMemberCallExpr, publicReceiverType,
+ ast_matchers::internal::Matcher<QualType>, InnerMatcher) {
+ return InnerMatcher.matches(getPublicReceiverType(Node), Finder, Builder);
+}
auto isOptionalMemberCallWithNameMatcher(
ast_matchers::internal::Matcher<NamedDecl> matcher,
const std::optional<StatementMatcher> &Ignorable = std::nullopt) {
- auto Exception = unless(Ignorable ? expr(anyOf(*Ignorable, cxxThisExpr()))
- : cxxThisExpr());
- return cxxMemberCallExpr(
- on(expr(Exception,
- anyOf(hasOptionalType(),
- hasType(pointerType(pointee(optionalOrAliasType())))))),
- callee(cxxMethodDecl(matcher)));
+ return cxxMemberCallExpr(Ignorable ? on(expr(unless(*Ignorable)))
+ : anything(),
+ publicReceiverType(desugarsToOptionalType()),
+ callee(cxxMethodDecl(matcher)));
}
auto isOptionalOperatorCallWithName(
@@ -129,49 +215,51 @@ auto inPlaceClass() {
auto isOptionalNulloptConstructor() {
return cxxConstructExpr(
- hasOptionalType(),
hasDeclaration(cxxConstructorDecl(parameterCountIs(1),
- hasParameter(0, hasNulloptType()))));
+ hasParameter(0, hasNulloptType()))),
+ hasOptionalOrDerivedType());
}
auto isOptionalInPlaceConstructor() {
- return cxxConstructExpr(hasOptionalType(),
- hasArgument(0, hasType(inPlaceClass())));
+ return cxxConstructExpr(hasArgument(0, hasType(inPlaceClass())),
+ hasOptionalOrDerivedType());
}
auto isOptionalValueOrConversionConstructor() {
return cxxConstructExpr(
- hasOptionalType(),
unless(hasDeclaration(
cxxConstructorDecl(anyOf(isCopyConstructor(), isMoveConstructor())))),
- argumentCountIs(1), hasArgument(0, unless(hasNulloptType())));
+ argumentCountIs(1), hasArgument(0, unless(hasNulloptType())),
+ hasOptionalOrDerivedType());
}
auto isOptionalValueOrConversionAssignment() {
return cxxOperatorCallExpr(
hasOverloadedOperatorName("="),
- callee(cxxMethodDecl(ofClass(optionalClass()))),
+ callee(cxxMethodDecl(ofClass(optionalOrDerivedClass()))),
unless(hasDeclaration(cxxMethodDecl(
anyOf(isCopyAssignmentOperator(), isMoveAssignmentOperator())))),
argumentCountIs(2), hasArgument(1, unless(hasNulloptType())));
}
auto isOptionalNulloptAssignment() {
- return cxxOperatorCallExpr(hasOverloadedOperatorName("="),
- callee(cxxMethodDecl(ofClass(optionalClass()))),
- argumentCountIs(2),
- hasArgument(1, hasNulloptType()));
+ return cxxOperatorCallExpr(
+ hasOverloadedOperatorName("="),
+ callee(cxxMethodDecl(ofClass(optionalOrDerivedClass()))),
+ argumentCountIs(2), hasArgument(1, hasNulloptType()));
}
auto isStdSwapCall() {
return callExpr(callee(functionDecl(hasName("std::swap"))),
- argumentCountIs(2), hasArgument(0, hasOptionalType()),
- hasArgument(1, hasOptionalType()));
+ argumentCountIs(2),
+ hasArgument(0, hasOptionalOrDerivedType()),
+ hasArgument(1, hasOptionalOrDerivedType()));
}
auto isStdForwardCall() {
return callExpr(callee(functionDecl(hasName("std::forward"))),
- argumentCountIs(1), hasArgument(0, hasOptionalType()));
+ argumentCountIs(1),
+ hasArgument(0, hasOptionalOrDerivedType()));
}
constexpr llvm::StringLiteral ValueOrCallID = "ValueOrCall";
@@ -212,8 +300,9 @@ auto isValueOrNotEqX() {
}
auto isCallReturningOptional() {
- return callExpr(hasType(qualType(anyOf(
- optionalOrAliasType(), referenceType(pointee(optionalOrAliasType()))))));
+ return callExpr(hasType(qualType(
+ anyOf(desugarsToOptionalOrDerivedType(),
+ referenceType(pointee(desugarsToOptionalOrDerivedType()))))));
}
template <typename L, typename R>
@@ -250,17 +339,6 @@ void setHasValue(RecordStorageLocation &OptionalLoc, BoolValue &HasValueVal,
Env.setValue(locForHasValue(OptionalLoc), HasValueVal);
}
-/// Creates a symbolic value for an `optional` value at an existing storage
-/// location. Uses `HasValueVal` as the symbolic value of the "has_value"
-/// property.
-RecordValue &createOptionalValue(RecordStorageLocation &Loc,
- BoolValue &HasValueVal, Environment &Env) {
- auto &OptionalVal = Env.create<RecordValue>(Loc);
- Env.setValue(Loc, OptionalVal);
- setHasValue(Loc, HasValueVal, Env);
- return OptionalVal;
-}
-
/// Returns the symbolic value that represents the "has_value" property of the
/// optional at `OptionalLoc`. Returns null if `OptionalLoc` is null.
BoolValue *getHasValue(Environment &Env, RecordStorageLocation *OptionalLoc) {
@@ -275,12 +353,9 @@ BoolValue *getHasValue(Environment &Env, RecordStorageLocation *OptionalLoc) {
return HasValueVal;
}
-/// Returns true if and only if `Type` is an optional type.
-bool isOptionalType(QualType Type) {
- if (!Type->isRecordType())
- return false;
- const CXXRecordDecl *D = Type->getAsCXXRecordDecl();
- return D != nullptr && hasOptionalClassName(*D);
+QualType valueTypeFromOptionalDecl(const CXXRecordDecl &RD) {
+ auto &CTSD = cast<ClassTemplateSpecializationDecl>(RD);
+ return CTSD.getTemplateArgs()[0].getAsType();
}
/// Returns the number of optional wrappers in `Type`.
@@ -288,15 +363,13 @@ bool isOptionalType(QualType Type) {
/// For example, if `Type` is `optional<optional<int>>`, the result of this
/// function will be 2.
int countOptionalWrappers(const ASTContext &ASTCtx, QualType Type) {
- if (!isOptionalType(Type))
+ const CXXRecordDecl *Optional =
+ getOptionalBaseClass(Type->getAsCXXRecordDecl());
+ if (Optional == nullptr)
return 0;
return 1 + countOptionalWrappers(
ASTCtx,
- cast<ClassTemplateSpecializationDecl>(Type->getAsRecordDecl())
- ->getTemplateArgs()
- .get(0)
- .getAsType()
- .getDesugaredType(ASTCtx));
+ valueTypeFromOptionalDecl(*Optional).getDesugaredType(ASTCtx));
}
StorageLocation *getLocBehindPossiblePointer(const Expr &E,
@@ -329,9 +402,8 @@ void transferArrowOpCall(const Expr *UnwrapExpr, const Expr *ObjectExpr,
void transferMakeOptionalCall(const CallExpr *E,
const MatchFinder::MatchResult &,
LatticeTransferState &State) {
- State.Env.setValue(
- *E, createOptionalValue(State.Env.getResultObjectLocation(*E),
- State.Env.getBoolLiteralValue(true), State.Env));
+ setHasValue(State.Env.getResultObjectLocation(*E),
+ State.Env.getBoolLiteralValue(true), State.Env);
}
void transferOptionalHasValueCall(const CXXMemberCallExpr *CallExpr,
@@ -399,9 +471,6 @@ void transferValueOrNotEqX(const Expr *ComparisonExpr,
void transferCallReturningOptional(const CallExpr *E,
const MatchFinder::MatchResult &Result,
LatticeTransferState &State) {
- if (State.Env.getValue(*E) != nullptr)
- return;
-
RecordStorageLocation *Loc = nullptr;
if (E->isPRValue()) {
Loc = &State.Env.getResultObjectLocation(*E);
@@ -413,42 +482,41 @@ void transferCallReturningOptional(const CallExpr *E,
}
}
- RecordValue &Val =
- createOptionalValue(*Loc, State.Env.makeAtomicBoolValue(), State.Env);
- if (E->isPRValue())
- State.Env.setValue(*E, Val);
+ if (State.Env.getValue(locForHasValue(*Loc)) != nullptr)
+ return;
+
+ setHasValue(*Loc, State.Env.makeAtomicBoolValue(), State.Env);
}
void constructOptionalValue(const Expr &E, Environment &Env,
BoolValue &HasValueVal) {
RecordStorageLocation &Loc = Env.getResultObjectLocation(E);
- Env.setValue(E, createOptionalValue(Loc, HasValueVal, Env));
+ setHasValue(Loc, HasValueVal, Env);
}
/// Returns a symbolic value for the "has_value" property of an `optional<T>`
/// value that is constructed/assigned from a value of type `U` or `optional<U>`
/// where `T` is constructible from `U`.
-BoolValue &valueOrConversionHasValue(const FunctionDecl &F, const Expr &E,
+BoolValue &valueOrConversionHasValue(QualType DestType, const Expr &E,
const MatchFinder::MatchResult &MatchRes,
LatticeTransferState &State) {
- assert(F.getTemplateSpecializationArgs() != nullptr);
- assert(F.getTemplateSpecializationArgs()->size() > 0);
-
- const int TemplateParamOptionalWrappersCount =
- countOptionalWrappers(*MatchRes.Context, F.getTemplateSpecializationArgs()
- ->get(0)
- .getAsType()
- .getNonReferenceType());
+ const int DestTypeOptionalWrappersCount =
+ countOptionalWrappers(*MatchRes.Context, DestType);
const int ArgTypeOptionalWrappersCount = countOptionalWrappers(
*MatchRes.Context, E.getType().getNonReferenceType());
- // Check if this is a constructor/assignment call for `optional<T>` with
- // argument of type `U` such that `T` is constructible from `U`.
- if (TemplateParamOptionalWrappersCount == ArgTypeOptionalWrappersCount)
+ // Is this an constructor of the form `template<class U> optional(U &&)` /
+ // assignment of the form `template<class U> optional& operator=(U &&)`
+ // (where `T` is assignable / constructible from `U`)?
+ // We recognize this because the number of optionals in the optional being
+ // assigned to is different from the function argument type.
+ if (DestTypeOptionalWrappersCount != ArgTypeOptionalWrappersCount)
return State.Env.getBoolLiteralValue(true);
- // This is a constructor/assignment call for `optional<T>` with argument of
- // type `optional<U>` such that `T` is constructible from `U`.
+ // Otherwise, this must be a constructor of the form
+ // `template <class U> optional<optional<U> &&)` / assignment of the form
+ // `template <class U> optional& operator=(optional<U> &&)
+ // (where, again, `T` is assignable / constructible from `U`).
auto *Loc = State.Env.get<RecordStorageLocation>(E);
if (auto *HasValueVal = getHasValue(State.Env, Loc))
return *HasValueVal;
@@ -460,10 +528,11 @@ void transferValueOrConversionConstructor(
LatticeTransferState &State) {
assert(E->getNumArgs() > 0);
- constructOptionalValue(*E, State.Env,
- valueOrConversionHasValue(*E->getConstructor(),
- *E->getArg(0), MatchRes,
- State));
+ constructOptionalValue(
+ *E, State.Env,
+ valueOrConversionHasValue(
+ E->getConstructor()->getThisType()->getPointeeType(), *E->getArg(0),
+ MatchRes, State));
}
void transferAssignment(const CXXOperatorCallExpr *E, BoolValue &HasValueVal,
@@ -471,7 +540,7 @@ void transferAssignment(const CXXOperatorCallExpr *E, BoolValue &HasValueVal,
assert(E->getNumArgs() > 0);
if (auto *Loc = State.Env.get<RecordStorageLocation>(*E->getArg(0))) {
- createOptionalValue(*Loc, HasValueVal, State.Env);
+ setHasValue(*Loc, HasValueVal, State.Env);
// Assign a storage location for the whole expression.
State.Env.setStorageLocation(*E, *Loc);
@@ -482,10 +551,11 @@ void transferValueOrConversionAssignment(
const CXXOperatorCallExpr *E, const MatchFinder::MatchResult &MatchRes,
LatticeTransferState &State) {
assert(E->getNumArgs() > 1);
- transferAssignment(E,
- valueOrConversionHasValue(*E->getDirectCallee(),
- *E->getArg(1), MatchRes, State),
- State);
+ transferAssignment(
+ E,
+ valueOrConversionHasValue(E->getArg(0)->getType().getNonReferenceType(),
+ *E->getArg(1), MatchRes, State),
+ State);
}
void transferNulloptAssignment(const CXXOperatorCallExpr *E,
@@ -502,11 +572,11 @@ void transferSwap(RecordStorageLocation *Loc1, RecordStorageLocation *Loc2,
if (Loc1 == nullptr) {
if (Loc2 != nullptr)
- createOptionalValue(*Loc2, Env.makeAtomicBoolValue(), Env);
+ setHasValue(*Loc2, Env.makeAtomicBoolValue(), Env);
return;
}
if (Loc2 == nullptr) {
- createOptionalValue(*Loc1, Env.makeAtomicBoolValue(), Env);
+ setHasValue(*Loc1, Env.makeAtomicBoolValue(), Env);
return;
}
@@ -524,8 +594,8 @@ void transferSwap(RecordStorageLocation *Loc1, RecordStorageLocation *Loc2,
if (BoolVal2 == nullptr)
BoolVal2 = &Env.makeAtomicBoolValue();
- createOptionalValue(*Loc1, *BoolVal2, Env);
- createOptionalValue(*Loc2, *BoolVal1, Env);
+ setHasValue(*Loc1, *BoolVal2, Env);
+ setHasValue(*Loc2, *BoolVal1, Env);
}
void transferSwapCall(const CXXMemberCallExpr *E,
@@ -721,8 +791,7 @@ auto buildTransferMatchSwitch() {
LatticeTransferState &State) {
if (RecordStorageLocation *Loc =
getImplicitObjectLocation(*E, State.Env)) {
- createOptionalValue(*Loc, State.Env.getBoolLiteralValue(true),
- State.Env);
+ setHasValue(*Loc, State.Env.getBoolLiteralValue(true), State.Env);
}
})
@@ -733,8 +802,8 @@ auto buildTransferMatchSwitch() {
LatticeTransferState &State) {
if (RecordStorageLocation *Loc =
getImplicitObjectLocation(*E, State.Env)) {
- createOptionalValue(*Loc, State.Env.getBoolLiteralValue(false),
- State.Env);
+ setHasValue(*Loc, State.Env.getBoolLiteralValue(false),
+ State.Env);
}
})
@@ -843,13 +912,7 @@ auto buildDiagnoseMatchSwitch(
ast_matchers::DeclarationMatcher
UncheckedOptionalAccessModel::optionalClassDecl() {
- return optionalClass();
-}
-
-static QualType valueTypeFromOptionalType(QualType OptionalTy) {
- auto *CTSD =
- cast<ClassTemplateSpecializationDecl>(OptionalTy->getAsCXXRecordDecl());
- return CTSD->getTemplateArgs()[0].getAsType();
+ return cxxRecordDecl(optionalClass());
}
UncheckedOptionalAccessModel::UncheckedOptionalAccessModel(ASTContext &Ctx,
@@ -858,9 +921,11 @@ UncheckedOptionalAccessModel::UncheckedOptionalAccessModel(ASTContext &Ctx,
TransferMatchSwitch(buildTransferMatchSwitch()) {
Env.getDataflowAnalysisContext().setSyntheticFieldCallback(
[&Ctx](QualType Ty) -> llvm::StringMap<QualType> {
- if (!isOptionalType(Ty))
+ const CXXRecordDecl *Optional =
+ getOptionalBaseClass(Ty->getAsCXXRecordDecl());
+ if (Optional == nullptr)
return {};
- return {{"value", valueTypeFromOptionalType(Ty)},
+ return {{"value", valueTypeFromOptionalDecl(*Optional)},
{"has_value", Ctx.BoolTy}};
});
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp
index da4dd6dc0785..b8401230a83d 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp
@@ -14,18 +14,52 @@
#define DEBUG_TYPE "dataflow"
-void clang::dataflow::copyRecord(RecordStorageLocation &Src,
- RecordStorageLocation &Dst, Environment &Env) {
+namespace clang::dataflow {
+
+static void copyField(const ValueDecl &Field, StorageLocation *SrcFieldLoc,
+ StorageLocation *DstFieldLoc, RecordStorageLocation &Dst,
+ Environment &Env) {
+ assert(Field.getType()->isReferenceType() ||
+ (SrcFieldLoc != nullptr && DstFieldLoc != nullptr));
+
+ if (Field.getType()->isRecordType()) {
+ copyRecord(cast<RecordStorageLocation>(*SrcFieldLoc),
+ cast<RecordStorageLocation>(*DstFieldLoc), Env);
+ } else if (Field.getType()->isReferenceType()) {
+ Dst.setChild(Field, SrcFieldLoc);
+ } else {
+ if (Value *Val = Env.getValue(*SrcFieldLoc))
+ Env.setValue(*DstFieldLoc, *Val);
+ else
+ Env.clearValue(*DstFieldLoc);
+ }
+}
+
+static void copySyntheticField(QualType FieldType, StorageLocation &SrcFieldLoc,
+ StorageLocation &DstFieldLoc, Environment &Env) {
+ if (FieldType->isRecordType()) {
+ copyRecord(cast<RecordStorageLocation>(SrcFieldLoc),
+ cast<RecordStorageLocation>(DstFieldLoc), Env);
+ } else {
+ if (Value *Val = Env.getValue(SrcFieldLoc))
+ Env.setValue(DstFieldLoc, *Val);
+ else
+ Env.clearValue(DstFieldLoc);
+ }
+}
+
+void copyRecord(RecordStorageLocation &Src, RecordStorageLocation &Dst,
+ Environment &Env) {
auto SrcType = Src.getType().getCanonicalType().getUnqualifiedType();
auto DstType = Dst.getType().getCanonicalType().getUnqualifiedType();
auto SrcDecl = SrcType->getAsCXXRecordDecl();
auto DstDecl = DstType->getAsCXXRecordDecl();
- bool compatibleTypes =
+ [[maybe_unused]] bool compatibleTypes =
SrcType == DstType ||
- (SrcDecl && DstDecl && SrcDecl->isDerivedFrom(DstDecl));
- (void)compatibleTypes;
+ (SrcDecl != nullptr && DstDecl != nullptr &&
+ (SrcDecl->isDerivedFrom(DstDecl) || DstDecl->isDerivedFrom(SrcDecl)));
LLVM_DEBUG({
if (!compatibleTypes) {
@@ -35,45 +69,24 @@ void clang::dataflow::copyRecord(RecordStorageLocation &Src,
});
assert(compatibleTypes);
- for (auto [Field, DstFieldLoc] : Dst.children()) {
- StorageLocation *SrcFieldLoc = Src.getChild(*Field);
-
- assert(Field->getType()->isReferenceType() ||
- (SrcFieldLoc != nullptr && DstFieldLoc != nullptr));
-
- if (Field->getType()->isRecordType()) {
- copyRecord(cast<RecordStorageLocation>(*SrcFieldLoc),
- cast<RecordStorageLocation>(*DstFieldLoc), Env);
- } else if (Field->getType()->isReferenceType()) {
- Dst.setChild(*Field, SrcFieldLoc);
- } else {
- if (Value *Val = Env.getValue(*SrcFieldLoc))
- Env.setValue(*DstFieldLoc, *Val);
- else
- Env.clearValue(*DstFieldLoc);
- }
- }
-
- for (const auto &[Name, SynthFieldLoc] : Src.synthetic_fields()) {
- if (SynthFieldLoc->getType()->isRecordType()) {
- copyRecord(*cast<RecordStorageLocation>(SynthFieldLoc),
- cast<RecordStorageLocation>(Dst.getSyntheticField(Name)), Env);
- } else {
- if (Value *Val = Env.getValue(*SynthFieldLoc))
- Env.setValue(Dst.getSyntheticField(Name), *Val);
- else
- Env.clearValue(Dst.getSyntheticField(Name));
- }
+ if (SrcType == DstType || (SrcDecl != nullptr && DstDecl != nullptr &&
+ SrcDecl->isDerivedFrom(DstDecl))) {
+ for (auto [Field, DstFieldLoc] : Dst.children())
+ copyField(*Field, Src.getChild(*Field), DstFieldLoc, Dst, Env);
+ for (const auto &[Name, DstFieldLoc] : Dst.synthetic_fields())
+ copySyntheticField(DstFieldLoc->getType(), Src.getSyntheticField(Name),
+ *DstFieldLoc, Env);
+ } else {
+ for (auto [Field, SrcFieldLoc] : Src.children())
+ copyField(*Field, SrcFieldLoc, Dst.getChild(*Field), Dst, Env);
+ for (const auto &[Name, SrcFieldLoc] : Src.synthetic_fields())
+ copySyntheticField(SrcFieldLoc->getType(), *SrcFieldLoc,
+ Dst.getSyntheticField(Name), Env);
}
-
- RecordValue *DstVal = &Env.create<RecordValue>(Dst);
- Env.setValue(Dst, *DstVal);
}
-bool clang::dataflow::recordsEqual(const RecordStorageLocation &Loc1,
- const Environment &Env1,
- const RecordStorageLocation &Loc2,
- const Environment &Env2) {
+bool recordsEqual(const RecordStorageLocation &Loc1, const Environment &Env1,
+ const RecordStorageLocation &Loc2, const Environment &Env2) {
LLVM_DEBUG({
if (Loc2.getType().getCanonicalType().getUnqualifiedType() !=
Loc1.getType().getCanonicalType().getUnqualifiedType()) {
@@ -116,3 +129,5 @@ bool clang::dataflow::recordsEqual(const RecordStorageLocation &Loc1,
return true;
}
+
+} // namespace clang::dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
index 2271a75fbcaf..3c896d373a21 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
@@ -20,7 +20,9 @@
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
-#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/ASTOps.h"
+#include "clang/Analysis/FlowSensitive/AdornedCFG.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/NoopAnalysis.h"
#include "clang/Analysis/FlowSensitive/RecordOps.h"
@@ -38,9 +40,13 @@ namespace clang {
namespace dataflow {
const Environment *StmtToEnvMap::getEnvironment(const Stmt &S) const {
- auto BlockIt = CFCtx.getStmtToBlock().find(&ignoreCFGOmittedNodes(S));
- assert(BlockIt != CFCtx.getStmtToBlock().end());
- if (!CFCtx.isBlockReachable(*BlockIt->getSecond()))
+ auto BlockIt = ACFG.getStmtToBlock().find(&ignoreCFGOmittedNodes(S));
+ if (BlockIt == ACFG.getStmtToBlock().end()) {
+ assert(false);
+ // Return null to avoid dereferencing the end iterator in non-assert builds.
+ return nullptr;
+ }
+ if (!ACFG.isBlockReachable(*BlockIt->getSecond()))
return nullptr;
if (BlockIt->getSecond()->getBlockID() == CurBlockID)
return &CurState.Env;
@@ -62,6 +68,14 @@ static BoolValue &evaluateBooleanEquality(const Expr &LHS, const Expr &RHS,
if (auto *RHSBool = dyn_cast_or_null<BoolValue>(RHSValue))
return Env.makeIff(*LHSBool, *RHSBool);
+ if (auto *LHSPtr = dyn_cast_or_null<PointerValue>(LHSValue))
+ if (auto *RHSPtr = dyn_cast_or_null<PointerValue>(RHSValue))
+ // If the storage locations are the same, the pointers definitely compare
+ // the same. If the storage locations are different, they may still alias,
+ // so we fall through to the case below that returns an atom.
+ if (&LHSPtr->getPointeeLoc() == &RHSPtr->getPointeeLoc())
+ return Env.getBoolLiteralValue(true);
+
return Env.makeAtomicBoolValue();
}
@@ -94,6 +108,8 @@ static Value *maybeUnpackLValueExpr(const Expr &E, Environment &Env) {
}
static void propagateValue(const Expr &From, const Expr &To, Environment &Env) {
+ if (From.getType()->isRecordType())
+ return;
if (auto *Val = Env.getValue(From))
Env.setValue(To, *Val);
}
@@ -120,8 +136,9 @@ namespace {
class TransferVisitor : public ConstStmtVisitor<TransferVisitor> {
public:
- TransferVisitor(const StmtToEnvMap &StmtToEnv, Environment &Env)
- : StmtToEnv(StmtToEnv), Env(Env) {}
+ TransferVisitor(const StmtToEnvMap &StmtToEnv, Environment &Env,
+ Environment::ValueModel &Model)
+ : StmtToEnv(StmtToEnv), Env(Env), Model(Model) {}
void VisitBinaryOperator(const BinaryOperator *S) {
const Expr *LHS = S->getLHS();
@@ -130,6 +147,13 @@ public:
const Expr *RHS = S->getRHS();
assert(RHS != nullptr);
+ // Do compound assignments up-front, as there are so many of them and we
+ // don't want to list all of them in the switch statement below.
+ // To avoid generating unnecessary values, we don't create a new value but
+ // instead leave it to the specific analysis to do this if desired.
+ if (S->isCompoundAssignmentOp())
+ propagateStorageLocation(*S->getLHS(), *S, Env);
+
switch (S->getOpcode()) {
case BO_Assign: {
auto *LHSLoc = Env.getStorageLocation(*LHS);
@@ -365,6 +389,25 @@ public:
Env.setValue(*S, Env.makeNot(*SubExprVal));
break;
}
+ case UO_PreInc:
+ case UO_PreDec:
+ // Propagate the storage location and clear out any value associated with
+ // it (to represent the fact that the value has definitely changed).
+ // To avoid generating unnecessary values, we leave it to the specific
+ // analysis to create a new value if desired.
+ propagateStorageLocation(*S->getSubExpr(), *S, Env);
+ if (StorageLocation *Loc = Env.getStorageLocation(*S->getSubExpr()))
+ Env.clearValue(*Loc);
+ break;
+ case UO_PostInc:
+ case UO_PostDec:
+ // Propagate the old value, then clear out any value associated with the
+ // storage location (to represent the fact that the value has definitely
+ // changed). See above for rationale.
+ propagateValue(*S->getSubExpr(), *S, Env);
+ if (StorageLocation *Loc = Env.getStorageLocation(*S->getSubExpr()))
+ Env.clearValue(*Loc);
+ break;
default:
break;
}
@@ -401,6 +444,9 @@ public:
return;
if (Ret->isPRValue()) {
+ if (Ret->getType()->isRecordType())
+ return;
+
auto *Val = Env.getValue(*Ret);
if (Val == nullptr)
return;
@@ -450,9 +496,27 @@ public:
Env.setStorageLocation(*S, *MemberLoc);
}
+ void VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *S) {
+ const Expr *ArgExpr = S->getExpr();
+ assert(ArgExpr != nullptr);
+ propagateValueOrStorageLocation(*ArgExpr, *S, Env);
+
+ if (S->isPRValue() && S->getType()->isRecordType()) {
+ auto &Loc = Env.getResultObjectLocation(*S);
+ Env.initializeFieldsWithValues(Loc);
+ }
+ }
+
void VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *S) {
const Expr *InitExpr = S->getExpr();
assert(InitExpr != nullptr);
+
+ // If this is a prvalue of record type, the handler for `*InitExpr` (if one
+ // exists) will initialize the result object; there is no value to propgate
+ // here.
+ if (S->getType()->isRecordType() && S->isPRValue())
+ return;
+
propagateValueOrStorageLocation(*InitExpr, *S, Env);
}
@@ -460,6 +524,16 @@ public:
const CXXConstructorDecl *ConstructorDecl = S->getConstructor();
assert(ConstructorDecl != nullptr);
+ // `CXXConstructExpr` can have array type if default-initializing an array
+ // of records. We don't handle this specifically beyond potentially inlining
+ // the call.
+ if (!S->getType()->isRecordType()) {
+ transferInlineCall(S, ConstructorDecl);
+ return;
+ }
+
+ RecordStorageLocation &Loc = Env.getResultObjectLocation(*S);
+
if (ConstructorDecl->isCopyOrMoveConstructor()) {
// It is permissible for a copy/move constructor to have additional
// parameters as long as they have default arguments defined for them.
@@ -472,24 +546,14 @@ public:
if (ArgLoc == nullptr)
return;
- if (S->isElidable()) {
- if (Value *Val = Env.getValue(*ArgLoc))
- Env.setValue(*S, *Val);
- } else {
- auto &Val = *cast<RecordValue>(Env.createValue(S->getType()));
- Env.setValue(*S, Val);
- copyRecord(*ArgLoc, Val.getLoc(), Env);
- }
+ // Even if the copy/move constructor call is elidable, we choose to copy
+ // the record in all cases (which isn't wrong, just potentially not
+ // optimal).
+ copyRecord(*ArgLoc, Loc, Env);
return;
}
- // `CXXConstructExpr` can have array type if default-initializing an array
- // of records, and we currently can't create values for arrays. So check if
- // we've got a record type.
- if (S->getType()->isRecordType()) {
- auto &InitialVal = *cast<RecordValue>(Env.createValue(S->getType()));
- Env.setValue(*S, InitialVal);
- }
+ Env.initializeFieldsWithValues(Loc, S->getType());
transferInlineCall(S, ConstructorDecl);
}
@@ -515,8 +579,7 @@ public:
RecordStorageLocation *LocSrc = nullptr;
if (Arg1->isPRValue()) {
- if (auto *Val = Env.get<RecordValue>(*Arg1))
- LocSrc = &Val->getLoc();
+ LocSrc = &Env.getResultObjectLocation(*Arg1);
} else {
LocSrc = Env.get<RecordStorageLocation>(*Arg1);
}
@@ -525,32 +588,36 @@ public:
if (LocSrc == nullptr || LocDst == nullptr)
return;
- // The assignment operators are different from the type of the destination
- // in this model (i.e. in one of their base classes). This must be very
- // rare and we just bail.
- if (Method->getFunctionObjectParameterType()
- .getCanonicalType()
- .getUnqualifiedType() !=
- LocDst->getType().getCanonicalType().getUnqualifiedType())
- return;
-
copyRecord(*LocSrc, *LocDst, Env);
- Env.setStorageLocation(*S, *LocDst);
- }
- }
- void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *S) {
- if (S->getCastKind() == CK_ConstructorConversion) {
- const Expr *SubExpr = S->getSubExpr();
- assert(SubExpr != nullptr);
+ // The assignment operator can have an arbitrary return type. We model the
+ // return value only if the return type is the same as or a base class of
+ // the destination type.
+ if (S->getType().getCanonicalType().getUnqualifiedType() !=
+ LocDst->getType().getCanonicalType().getUnqualifiedType()) {
+ auto ReturnDecl = S->getType()->getAsCXXRecordDecl();
+ auto DstDecl = LocDst->getType()->getAsCXXRecordDecl();
+ if (ReturnDecl == nullptr || DstDecl == nullptr)
+ return;
+ if (!DstDecl->isDerivedFrom(ReturnDecl))
+ return;
+ }
+
+ if (S->isGLValue())
+ Env.setStorageLocation(*S, *LocDst);
+ else
+ copyRecord(*LocDst, Env.getResultObjectLocation(*S), Env);
- propagateValue(*SubExpr, *S, Env);
+ return;
}
+
+ // `CXXOperatorCallExpr` can be a prvalue. Call `VisitCallExpr`() to
+ // initialize the prvalue's fields with values.
+ VisitCallExpr(S);
}
- void VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
- if (Value *Val = Env.createValue(S->getType()))
- Env.setValue(*S, *Val);
+ void VisitCXXRewrittenBinaryOperator(const CXXRewrittenBinaryOperator *RBO) {
+ propagateValue(*RBO->getSemanticForm(), *RBO, Env);
}
void VisitCallExpr(const CallExpr *S) {
@@ -580,13 +647,12 @@ public:
} else if (const FunctionDecl *F = S->getDirectCallee()) {
transferInlineCall(S, F);
- // If this call produces a prvalue of record type, make sure that we have
- // a `RecordValue` for it. This is required so that
- // `Environment::getResultObjectLocation()` is able to return a location
- // for this `CallExpr`.
- if (S->getType()->isRecordType() && S->isPRValue())
- if (Env.getValue(*S) == nullptr)
- refreshRecordValue(*S, Env);
+ // If this call produces a prvalue of record type, initialize its fields
+ // with values.
+ if (S->getType()->isRecordType() && S->isPRValue()) {
+ RecordStorageLocation &Loc = Env.getResultObjectLocation(*S);
+ Env.initializeFieldsWithValues(Loc);
+ }
}
}
@@ -594,18 +660,16 @@ public:
const Expr *SubExpr = S->getSubExpr();
assert(SubExpr != nullptr);
- Value *SubExprVal = Env.getValue(*SubExpr);
- if (SubExprVal == nullptr)
- return;
+ StorageLocation &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
- if (RecordValue *RecordVal = dyn_cast<RecordValue>(SubExprVal)) {
- Env.setStorageLocation(*S, RecordVal->getLoc());
+ if (SubExpr->getType()->isRecordType())
+ // Nothing else left to do -- we initialized the record when transferring
+ // `SubExpr`.
return;
- }
- StorageLocation &Loc = Env.createStorageLocation(*S);
- Env.setValue(Loc, *SubExprVal);
- Env.setStorageLocation(*S, Loc);
+ if (Value *SubExprVal = Env.getValue(*SubExpr))
+ Env.setValue(Loc, *SubExprVal);
}
void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *S) {
@@ -625,107 +689,105 @@ public:
}
void VisitConditionalOperator(const ConditionalOperator *S) {
- // FIXME: Revisit this once flow conditions are added to the framework. For
- // `a = b ? c : d` we can add `b => a == c && !b => a == d` to the flow
- // condition.
- // When we do this, we will need to retrieve the values of the operands from
- // the environments for the basic blocks they are computed in, in a similar
- // way to how this is done for short-circuited logical operators in
- // `getLogicOperatorSubExprValue()`.
- if (S->isGLValue())
- Env.setStorageLocation(*S, Env.createObject(S->getType()));
- else if (Value *Val = Env.createValue(S->getType()))
- Env.setValue(*S, *Val);
+ const Environment *TrueEnv = StmtToEnv.getEnvironment(*S->getTrueExpr());
+ const Environment *FalseEnv = StmtToEnv.getEnvironment(*S->getFalseExpr());
+
+ if (TrueEnv == nullptr || FalseEnv == nullptr) {
+ // If the true or false branch is dead, we may not have an environment for
+ // it. We could handle this specifically by forwarding the value or
+ // location of the live branch, but this case is rare enough that this
+ // probably isn't worth the additional complexity.
+ return;
+ }
+
+ if (S->isGLValue()) {
+ StorageLocation *TrueLoc = TrueEnv->getStorageLocation(*S->getTrueExpr());
+ StorageLocation *FalseLoc =
+ FalseEnv->getStorageLocation(*S->getFalseExpr());
+ if (TrueLoc == FalseLoc && TrueLoc != nullptr)
+ Env.setStorageLocation(*S, *TrueLoc);
+ } else if (!S->getType()->isRecordType()) {
+ // The conditional operator can evaluate to either of the values of the
+ // two branches. To model this, join these two values together to yield
+ // the result of the conditional operator.
+ // Note: Most joins happen in `computeBlockInputState()`, but this case is
+ // different:
+ // - `computeBlockInputState()` (which in turn calls `Environment::join()`
+ // joins values associated with the _same_ expression or storage
+ // location, then associates the joined value with that expression or
+ // storage location. This join has nothing to do with transfer --
+ // instead, it joins together the results of performing transfer on two
+ // different blocks.
+ // - Here, we join values associated with _different_ expressions (the
+ // true and false branch), then associate the joined value with a third
+ // expression (the conditional operator itself). This join is what it
+ // means to perform transfer on the conditional operator.
+ if (Value *Val = Environment::joinValues(
+ S->getType(), TrueEnv->getValue(*S->getTrueExpr()), *TrueEnv,
+ FalseEnv->getValue(*S->getFalseExpr()), *FalseEnv, Env, Model))
+ Env.setValue(*S, *Val);
+ }
}
void VisitInitListExpr(const InitListExpr *S) {
QualType Type = S->getType();
- if (!Type->isStructureOrClassType()) {
- if (auto *Val = Env.createValue(Type))
- Env.setValue(*S, *Val);
-
+ if (!Type->isRecordType()) {
+ // Until array initialization is implemented, we skip arrays and don't
+ // need to care about cases where `getNumInits() > 1`.
+ if (!Type->isArrayType() && S->getNumInits() == 1)
+ propagateValueOrStorageLocation(*S->getInit(0), *S, Env);
return;
}
- // In case the initializer list is transparent, we just need to propagate
- // the value that it contains.
- if (S->isSemanticForm() && S->isTransparent()) {
- propagateValue(*S->getInit(0), *S, Env);
+ // If the initializer list is transparent, there's nothing to do.
+ if (S->isSemanticForm() && S->isTransparent())
return;
- }
- llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
+ RecordStorageLocation &Loc = Env.getResultObjectLocation(*S);
- // This only contains the direct fields for the given type.
- std::vector<FieldDecl *> FieldsForInit =
- getFieldsForInitListExpr(Type->getAsRecordDecl());
+ // Initialization of base classes and fields of record type happens when we
+ // visit the nested `CXXConstructExpr` or `InitListExpr` for that base class
+ // or field. We therefore only need to deal with fields of non-record type
+ // here.
- // `S->inits()` contains all the initializer epressions, including the
- // ones for direct base classes.
- auto Inits = S->inits();
- size_t InitIdx = 0;
+ RecordInitListHelper InitListHelper(S);
- // Initialize base classes.
- if (auto* R = S->getType()->getAsCXXRecordDecl()) {
- assert(FieldsForInit.size() + R->getNumBases() == Inits.size());
- for ([[maybe_unused]] const CXXBaseSpecifier &Base : R->bases()) {
- assert(InitIdx < Inits.size());
- auto Init = Inits[InitIdx++];
- assert(Base.getType().getCanonicalType() ==
+ for (auto [Field, Init] : InitListHelper.field_inits()) {
+ if (Field->getType()->isRecordType())
+ continue;
+ if (Field->getType()->isReferenceType()) {
+ assert(Field->getType().getCanonicalType()->getPointeeType() ==
Init->getType().getCanonicalType());
- auto *BaseVal = Env.get<RecordValue>(*Init);
- if (!BaseVal)
- BaseVal = cast<RecordValue>(Env.createValue(Init->getType()));
- // Take ownership of the fields of the `RecordValue` for the base class
- // and incorporate them into the "flattened" set of fields for the
- // derived class.
- auto Children = BaseVal->getLoc().children();
- FieldLocs.insert(Children.begin(), Children.end());
+ Loc.setChild(*Field, &Env.createObject(Field->getType(), Init));
+ continue;
}
+ assert(Field->getType().getCanonicalType().getUnqualifiedType() ==
+ Init->getType().getCanonicalType().getUnqualifiedType());
+ StorageLocation *FieldLoc = Loc.getChild(*Field);
+ // Locations for non-reference fields must always be non-null.
+ assert(FieldLoc != nullptr);
+ Value *Val = Env.getValue(*Init);
+ if (Val == nullptr && isa<ImplicitValueInitExpr>(Init) &&
+ Init->getType()->isPointerType())
+ Val =
+ &Env.getOrCreateNullPointerValue(Init->getType()->getPointeeType());
+ if (Val == nullptr)
+ Val = Env.createValue(Field->getType());
+ if (Val != nullptr)
+ Env.setValue(*FieldLoc, *Val);
}
- assert(FieldsForInit.size() == Inits.size() - InitIdx);
- for (auto Field : FieldsForInit) {
- assert(InitIdx < Inits.size());
- auto Init = Inits[InitIdx++];
- assert(
- // The types are same, or
- Field->getType().getCanonicalType().getUnqualifiedType() ==
- Init->getType().getCanonicalType().getUnqualifiedType() ||
- // The field's type is T&, and initializer is T
- (Field->getType()->isReferenceType() &&
- Field->getType().getCanonicalType()->getPointeeType() ==
- Init->getType().getCanonicalType()));
- auto& Loc = Env.createObject(Field->getType(), Init);
- FieldLocs.insert({Field, &Loc});
- }
-
- // Check that we satisfy the invariant that a `RecordStorageLoation`
- // contains exactly the set of modeled fields for that type.
- // `ModeledFields` includes fields from all the bases, but only the
- // modeled ones. However, if a class type is initialized with an
- // `InitListExpr`, all fields in the class, including those from base
- // classes, are included in the set of modeled fields. The code above
- // should therefore populate exactly the modeled fields.
- assert(containsSameFields(
- Env.getDataflowAnalysisContext().getModeledFields(Type), FieldLocs));
-
- RecordStorageLocation::SyntheticFieldMap SyntheticFieldLocs;
- for (const auto &Entry :
- Env.getDataflowAnalysisContext().getSyntheticFields(Type)) {
- SyntheticFieldLocs.insert(
- {Entry.getKey(), &Env.createObject(Entry.getValue())});
+ for (const auto &[FieldName, FieldLoc] : Loc.synthetic_fields()) {
+ QualType FieldType = FieldLoc->getType();
+ if (FieldType->isRecordType()) {
+ Env.initializeFieldsWithValues(*cast<RecordStorageLocation>(FieldLoc));
+ } else {
+ if (Value *Val = Env.createValue(FieldType))
+ Env.setValue(*FieldLoc, *Val);
+ }
}
- auto &Loc = Env.getDataflowAnalysisContext().createRecordStorageLocation(
- Type, std::move(FieldLocs), std::move(SyntheticFieldLocs));
- RecordValue &RecordVal = Env.create<RecordValue>(Loc);
-
- Env.setValue(Loc, RecordVal);
-
- Env.setValue(*S, RecordVal);
-
// FIXME: Implement array initialization.
}
@@ -790,27 +852,26 @@ private:
Env.canDescend(Options.ContextSensitiveOpts->Depth, F)))
return;
- const ControlFlowContext *CFCtx =
- Env.getDataflowAnalysisContext().getControlFlowContext(F);
- if (!CFCtx)
+ const AdornedCFG *ACFG = Env.getDataflowAnalysisContext().getAdornedCFG(F);
+ if (!ACFG)
return;
// FIXME: We don't support context-sensitive analysis of recursion, so
// we should return early here if `F` is the same as the `FunctionDecl`
// holding `S` itself.
- auto ExitBlock = CFCtx->getCFG().getExit().getBlockID();
+ auto ExitBlock = ACFG->getCFG().getExit().getBlockID();
auto CalleeEnv = Env.pushCall(S);
// FIXME: Use the same analysis as the caller for the callee. Note,
// though, that doing so would require support for changing the analysis's
// ASTContext.
- auto Analysis = NoopAnalysis(CFCtx->getDecl().getASTContext(),
+ auto Analysis = NoopAnalysis(ACFG->getDecl().getASTContext(),
DataflowAnalysisOptions{Options});
auto BlockToOutputState =
- dataflow::runDataflowAnalysis(*CFCtx, Analysis, CalleeEnv);
+ dataflow::runDataflowAnalysis(*ACFG, Analysis, CalleeEnv);
assert(BlockToOutputState);
assert(ExitBlock < BlockToOutputState->size());
@@ -822,12 +883,14 @@ private:
const StmtToEnvMap &StmtToEnv;
Environment &Env;
+ Environment::ValueModel &Model;
};
} // namespace
-void transfer(const StmtToEnvMap &StmtToEnv, const Stmt &S, Environment &Env) {
- TransferVisitor(StmtToEnv, Env).Visit(&S);
+void transfer(const StmtToEnvMap &StmtToEnv, const Stmt &S, Environment &Env,
+ Environment::ValueModel &Model) {
+ TransferVisitor(StmtToEnv, Env, Model).Visit(&S);
}
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
index 49e425bde66a..200682faafd6 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include <algorithm>
#include <optional>
#include <system_error>
#include <utility>
@@ -33,7 +32,6 @@
#include "clang/Analysis/FlowSensitive/Value.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallBitVector.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Error.h"
@@ -64,106 +62,44 @@ static bool isBackedgeNode(const CFGBlock &B) {
namespace {
-// The return type of the visit functions in TerminatorVisitor. The first
-// element represents the terminator expression (that is the conditional
-// expression in case of a path split in the CFG). The second element
-// represents whether the condition was true or false.
-using TerminatorVisitorRetTy = std::pair<const Expr *, bool>;
-
-/// Extends the flow condition of an environment based on a terminator
-/// statement.
+/// Extracts the terminator's condition expression.
class TerminatorVisitor
- : public ConstStmtVisitor<TerminatorVisitor, TerminatorVisitorRetTy> {
+ : public ConstStmtVisitor<TerminatorVisitor, const Expr *> {
public:
- TerminatorVisitor(Environment &Env, int BlockSuccIdx)
- : Env(Env), BlockSuccIdx(BlockSuccIdx) {}
-
- TerminatorVisitorRetTy VisitIfStmt(const IfStmt *S) {
- auto *Cond = S->getCond();
- assert(Cond != nullptr);
- return extendFlowCondition(*Cond);
- }
-
- TerminatorVisitorRetTy VisitWhileStmt(const WhileStmt *S) {
- auto *Cond = S->getCond();
- assert(Cond != nullptr);
- return extendFlowCondition(*Cond);
- }
-
- TerminatorVisitorRetTy VisitDoStmt(const DoStmt *S) {
- auto *Cond = S->getCond();
- assert(Cond != nullptr);
- return extendFlowCondition(*Cond);
- }
-
- TerminatorVisitorRetTy VisitForStmt(const ForStmt *S) {
- auto *Cond = S->getCond();
- if (Cond != nullptr)
- return extendFlowCondition(*Cond);
- return {nullptr, false};
- }
-
- TerminatorVisitorRetTy VisitCXXForRangeStmt(const CXXForRangeStmt *) {
+ TerminatorVisitor() = default;
+ const Expr *VisitIfStmt(const IfStmt *S) { return S->getCond(); }
+ const Expr *VisitWhileStmt(const WhileStmt *S) { return S->getCond(); }
+ const Expr *VisitDoStmt(const DoStmt *S) { return S->getCond(); }
+ const Expr *VisitForStmt(const ForStmt *S) { return S->getCond(); }
+ const Expr *VisitCXXForRangeStmt(const CXXForRangeStmt *) {
// Don't do anything special for CXXForRangeStmt, because the condition
// (being implicitly generated) isn't visible from the loop body.
- return {nullptr, false};
+ return nullptr;
}
-
- TerminatorVisitorRetTy VisitBinaryOperator(const BinaryOperator *S) {
+ const Expr *VisitBinaryOperator(const BinaryOperator *S) {
assert(S->getOpcode() == BO_LAnd || S->getOpcode() == BO_LOr);
- auto *LHS = S->getLHS();
- assert(LHS != nullptr);
- return extendFlowCondition(*LHS);
+ return S->getLHS();
}
-
- TerminatorVisitorRetTy
- VisitConditionalOperator(const ConditionalOperator *S) {
- auto *Cond = S->getCond();
- assert(Cond != nullptr);
- return extendFlowCondition(*Cond);
+ const Expr *VisitConditionalOperator(const ConditionalOperator *S) {
+ return S->getCond();
}
-
-private:
- TerminatorVisitorRetTy extendFlowCondition(const Expr &Cond) {
- auto *Val = Env.get<BoolValue>(Cond);
- // In transferCFGBlock(), we ensure that we always have a `Value` for the
- // terminator condition, so assert this.
- // We consciously assert ourselves instead of asserting via `cast()` so
- // that we get a more meaningful line number if the assertion fails.
- assert(Val != nullptr);
-
- bool ConditionValue = true;
- // The condition must be inverted for the successor that encompasses the
- // "else" branch, if such exists.
- if (BlockSuccIdx == 1) {
- Val = &Env.makeNot(*Val);
- ConditionValue = false;
- }
-
- Env.assume(Val->formula());
- return {&Cond, ConditionValue};
- }
-
- Environment &Env;
- int BlockSuccIdx;
};
/// Holds data structures required for running dataflow analysis.
struct AnalysisContext {
- AnalysisContext(const ControlFlowContext &CFCtx,
- TypeErasedDataflowAnalysis &Analysis,
+ AnalysisContext(const AdornedCFG &ACFG, TypeErasedDataflowAnalysis &Analysis,
const Environment &InitEnv,
llvm::ArrayRef<std::optional<TypeErasedDataflowAnalysisState>>
BlockStates)
- : CFCtx(CFCtx), Analysis(Analysis), InitEnv(InitEnv),
+ : ACFG(ACFG), Analysis(Analysis), InitEnv(InitEnv),
Log(*InitEnv.getDataflowAnalysisContext().getOptions().Log),
BlockStates(BlockStates) {
- Log.beginAnalysis(CFCtx, Analysis);
+ Log.beginAnalysis(ACFG, Analysis);
}
~AnalysisContext() { Log.endAnalysis(); }
/// Contains the CFG being analyzed.
- const ControlFlowContext &CFCtx;
+ const AdornedCFG &ACFG;
/// The analysis to be run.
TypeErasedDataflowAnalysis &Analysis;
/// Initial state to start the analysis.
@@ -176,19 +112,19 @@ struct AnalysisContext {
class PrettyStackTraceAnalysis : public llvm::PrettyStackTraceEntry {
public:
- PrettyStackTraceAnalysis(const ControlFlowContext &CFCtx, const char *Message)
- : CFCtx(CFCtx), Message(Message) {}
+ PrettyStackTraceAnalysis(const AdornedCFG &ACFG, const char *Message)
+ : ACFG(ACFG), Message(Message) {}
void print(raw_ostream &OS) const override {
OS << Message << "\n";
OS << "Decl:\n";
- CFCtx.getDecl().dump(OS);
+ ACFG.getDecl().dump(OS);
OS << "CFG:\n";
- CFCtx.getCFG().print(OS, LangOptions(), false);
+ ACFG.getCFG().print(OS, LangOptions(), false);
}
private:
- const ControlFlowContext &CFCtx;
+ const AdornedCFG &ACFG;
const char *Message;
};
@@ -221,6 +157,7 @@ private:
// Avoids unneccesary copies of the environment.
class JoinedStateBuilder {
AnalysisContext &AC;
+ Environment::ExprJoinBehavior JoinBehavior;
std::vector<const TypeErasedDataflowAnalysisState *> All;
std::deque<TypeErasedDataflowAnalysisState> Owned;
@@ -228,11 +165,13 @@ class JoinedStateBuilder {
join(const TypeErasedDataflowAnalysisState &L,
const TypeErasedDataflowAnalysisState &R) {
return {AC.Analysis.joinTypeErased(L.Lattice, R.Lattice),
- Environment::join(L.Env, R.Env, AC.Analysis)};
+ Environment::join(L.Env, R.Env, AC.Analysis, JoinBehavior)};
}
public:
- JoinedStateBuilder(AnalysisContext &AC) : AC(AC) {}
+ JoinedStateBuilder(AnalysisContext &AC,
+ Environment::ExprJoinBehavior JoinBehavior)
+ : AC(AC), JoinBehavior(JoinBehavior) {}
void addOwned(TypeErasedDataflowAnalysisState State) {
Owned.push_back(std::move(State));
@@ -248,12 +187,12 @@ public:
// initialize the state of each basic block differently.
return {AC.Analysis.typeErasedInitialElement(), AC.InitEnv.fork()};
if (All.size() == 1)
- // Join the environment with itself so that we discard the entries from
- // `ExprToLoc` and `ExprToVal`.
+ // Join the environment with itself so that we discard expression state if
+ // desired.
// FIXME: We could consider writing special-case code for this that only
// does the discarding, but it's not clear if this is worth it.
- return {All[0]->Lattice,
- Environment::join(All[0]->Env, All[0]->Env, AC.Analysis)};
+ return {All[0]->Lattice, Environment::join(All[0]->Env, All[0]->Env,
+ AC.Analysis, JoinBehavior)};
auto Result = join(*All[0], *All[1]);
for (unsigned I = 2; I < All.size(); ++I)
@@ -261,9 +200,13 @@ public:
return Result;
}
};
-
} // namespace
+static const Expr *getTerminatorCondition(const Stmt *TerminatorStmt) {
+ return TerminatorStmt == nullptr ? nullptr
+ : TerminatorVisitor().Visit(TerminatorStmt);
+}
+
/// Computes the input state for a given basic block by joining the output
/// states of its predecessors.
///
@@ -300,14 +243,29 @@ computeBlockInputState(const CFGBlock &Block, AnalysisContext &AC) {
// See `NoreturnDestructorTest` for concrete examples.
if (Block.succ_begin()->getReachableBlock() != nullptr &&
Block.succ_begin()->getReachableBlock()->hasNoReturnElement()) {
- auto &StmtToBlock = AC.CFCtx.getStmtToBlock();
+ auto &StmtToBlock = AC.ACFG.getStmtToBlock();
auto StmtBlock = StmtToBlock.find(Block.getTerminatorStmt());
assert(StmtBlock != StmtToBlock.end());
llvm::erase(Preds, StmtBlock->getSecond());
}
}
- JoinedStateBuilder Builder(AC);
+ // If any of the predecessor blocks contains an expression consumed in a
+ // different block, we need to keep expression state.
+ // Note that in this case, we keep expression state for all predecessors,
+ // rather than only those predecessors that actually contain an expression
+ // consumed in a different block. While this is potentially suboptimal, it's
+ // actually likely, if we have control flow within a full expression, that
+ // all predecessors have expression state consumed in a different block.
+ Environment::ExprJoinBehavior JoinBehavior = Environment::DiscardExprState;
+ for (const CFGBlock *Pred : Preds) {
+ if (Pred && AC.ACFG.containsExprConsumedInDifferentBlock(*Pred)) {
+ JoinBehavior = Environment::KeepExprState;
+ break;
+ }
+ }
+
+ JoinedStateBuilder Builder(AC, JoinBehavior);
for (const CFGBlock *Pred : Preds) {
// Skip if the `Block` is unreachable or control flow cannot get past it.
if (!Pred || Pred->hasNoReturnElement())
@@ -320,25 +278,32 @@ computeBlockInputState(const CFGBlock &Block, AnalysisContext &AC) {
if (!MaybePredState)
continue;
+ const TypeErasedDataflowAnalysisState &PredState = *MaybePredState;
+ const Expr *Cond = getTerminatorCondition(Pred->getTerminatorStmt());
+ if (Cond == nullptr) {
+ Builder.addUnowned(PredState);
+ continue;
+ }
+
+ bool BranchVal = blockIndexInPredecessor(*Pred, Block) == 0;
+
+ // `transferBranch` may need to mutate the environment to describe the
+ // dynamic effect of the terminator for a given branch. Copy now.
+ TypeErasedDataflowAnalysisState Copy = MaybePredState->fork();
if (AC.Analysis.builtinOptions()) {
- if (const Stmt *PredTerminatorStmt = Pred->getTerminatorStmt()) {
- // We have a terminator: we need to mutate an environment to describe
- // when the terminator is taken. Copy now.
- TypeErasedDataflowAnalysisState Copy = MaybePredState->fork();
-
- auto [Cond, CondValue] =
- TerminatorVisitor(Copy.Env, blockIndexInPredecessor(*Pred, Block))
- .Visit(PredTerminatorStmt);
- if (Cond != nullptr)
- // FIXME: Call transferBranchTypeErased even if BuiltinTransferOpts
- // are not set.
- AC.Analysis.transferBranchTypeErased(CondValue, Cond, Copy.Lattice,
- Copy.Env);
- Builder.addOwned(std::move(Copy));
- continue;
- }
+ auto *CondVal = Copy.Env.get<BoolValue>(*Cond);
+ // In transferCFGBlock(), we ensure that we always have a `Value`
+ // for the terminator condition, so assert this. We consciously
+ // assert ourselves instead of asserting via `cast()` so that we get
+ // a more meaningful line number if the assertion fails.
+ assert(CondVal != nullptr);
+ BoolValue *AssertedVal =
+ BranchVal ? CondVal : &Copy.Env.makeNot(*CondVal);
+ Copy.Env.assume(AssertedVal->formula());
}
- Builder.addUnowned(*MaybePredState);
+ AC.Analysis.transferBranchTypeErased(BranchVal, Cond, Copy.Lattice,
+ Copy.Env);
+ Builder.addOwned(std::move(Copy));
}
return std::move(Builder).take();
}
@@ -350,8 +315,8 @@ builtinTransferStatement(unsigned CurBlockID, const CFGStmt &Elt,
AnalysisContext &AC) {
const Stmt *S = Elt.getStmt();
assert(S != nullptr);
- transfer(StmtToEnvMap(AC.CFCtx, AC.BlockStates, CurBlockID, InputState), *S,
- InputState.Env);
+ transfer(StmtToEnvMap(AC.ACFG, AC.BlockStates, CurBlockID, InputState), *S,
+ InputState.Env, AC.Analysis);
}
/// Built-in transfer function for `CFGInitializer`.
@@ -388,7 +353,6 @@ builtinTransferInitializer(const CFGInitializer &Elt,
}
}
assert(Member != nullptr);
- assert(MemberLoc != nullptr);
// FIXME: Instead of these case distinctions, we would ideally want to be able
// to simply use `Environment::createObject()` here, the same way that we do
@@ -403,18 +367,12 @@ builtinTransferInitializer(const CFGInitializer &Elt,
return;
ParentLoc->setChild(*Member, InitExprLoc);
- } else if (auto *InitExprVal = Env.getValue(*InitExpr)) {
- if (Member->getType()->isRecordType()) {
- auto *InitValStruct = cast<RecordValue>(InitExprVal);
- // FIXME: Rather than performing a copy here, we should really be
- // initializing the field in place. This would require us to propagate the
- // storage location of the field to the AST node that creates the
- // `RecordValue`.
- copyRecord(InitValStruct->getLoc(),
- *cast<RecordStorageLocation>(MemberLoc), Env);
- } else {
+ // Record-type initializers construct themselves directly into the result
+ // object, so there is no need to handle them here.
+ } else if (!Member->getType()->isRecordType()) {
+ assert(MemberLoc != nullptr);
+ if (auto *InitExprVal = Env.getValue(*InitExpr))
Env.setValue(*MemberLoc, *InitExprVal);
- }
}
}
@@ -453,10 +411,9 @@ static void builtinTransfer(unsigned CurBlockID, const CFGElement &Elt,
/// by the user-specified analysis.
static TypeErasedDataflowAnalysisState
transferCFGBlock(const CFGBlock &Block, AnalysisContext &AC,
- std::function<void(const CFGElement &,
- const TypeErasedDataflowAnalysisState &)>
- PostVisitCFG = nullptr) {
- AC.Log.enterBlock(Block, PostVisitCFG != nullptr);
+ const CFGEltCallbacksTypeErased &PostAnalysisCallbacks = {}) {
+ AC.Log.enterBlock(Block, PostAnalysisCallbacks.Before != nullptr ||
+ PostAnalysisCallbacks.After != nullptr);
auto State = computeBlockInputState(Block, AC);
AC.Log.recordState(State);
int ElementIdx = 1;
@@ -465,6 +422,11 @@ transferCFGBlock(const CFGBlock &Block, AnalysisContext &AC,
ElementIdx++, "transferCFGBlock");
AC.Log.enterElement(Element);
+
+ if (PostAnalysisCallbacks.Before) {
+ PostAnalysisCallbacks.Before(Element, State);
+ }
+
// Built-in analysis
if (AC.Analysis.builtinOptions()) {
builtinTransfer(Block.getBlockID(), Element, State, AC);
@@ -473,10 +435,10 @@ transferCFGBlock(const CFGBlock &Block, AnalysisContext &AC,
// User-provided analysis
AC.Analysis.transferTypeErased(Element, State.Lattice, State.Env);
- // Post processing
- if (PostVisitCFG) {
- PostVisitCFG(Element, State);
+ if (PostAnalysisCallbacks.After) {
+ PostAnalysisCallbacks.After(Element, State);
}
+
AC.Log.recordState(State);
}
@@ -493,9 +455,8 @@ transferCFGBlock(const CFGBlock &Block, AnalysisContext &AC,
// takes a `CFGElement` as input, but some expressions only show up as a
// terminator condition, but not as a `CFGElement`. The condition of an if
// statement is one such example.
- transfer(
- StmtToEnvMap(AC.CFCtx, AC.BlockStates, Block.getBlockID(), State),
- *TerminatorCond, State.Env);
+ transfer(StmtToEnvMap(AC.ACFG, AC.BlockStates, Block.getBlockID(), State),
+ *TerminatorCond, State.Env, AC.Analysis);
// If the transfer function didn't produce a value, create an atom so that
// we have *some* value for the condition expression. This ensures that
@@ -510,23 +471,21 @@ transferCFGBlock(const CFGBlock &Block, AnalysisContext &AC,
llvm::Expected<std::vector<std::optional<TypeErasedDataflowAnalysisState>>>
runTypeErasedDataflowAnalysis(
- const ControlFlowContext &CFCtx, TypeErasedDataflowAnalysis &Analysis,
+ const AdornedCFG &ACFG, TypeErasedDataflowAnalysis &Analysis,
const Environment &InitEnv,
- std::function<void(const CFGElement &,
- const TypeErasedDataflowAnalysisState &)>
- PostVisitCFG,
+ const CFGEltCallbacksTypeErased &PostAnalysisCallbacks,
std::int32_t MaxBlockVisits) {
- PrettyStackTraceAnalysis CrashInfo(CFCtx, "runTypeErasedDataflowAnalysis");
+ PrettyStackTraceAnalysis CrashInfo(ACFG, "runTypeErasedDataflowAnalysis");
std::optional<Environment> MaybeStartingEnv;
- if (InitEnv.callStackSize() == 1) {
+ if (InitEnv.callStackSize() == 0) {
MaybeStartingEnv = InitEnv.fork();
MaybeStartingEnv->initialize();
}
const Environment &StartingEnv =
MaybeStartingEnv ? *MaybeStartingEnv : InitEnv;
- const clang::CFG &CFG = CFCtx.getCFG();
+ const clang::CFG &CFG = ACFG.getCFG();
PostOrderCFGView POV(&CFG);
ForwardDataflowWorklist Worklist(CFG, &POV);
@@ -539,15 +498,7 @@ runTypeErasedDataflowAnalysis(
StartingEnv.fork()};
Worklist.enqueueSuccessors(&Entry);
- AnalysisContext AC(CFCtx, Analysis, StartingEnv, BlockStates);
-
- // FIXME: remove relative cap. There isn't really any good setting for
- // `MaxAverageVisitsPerBlock`, so it has no clear value over using
- // `MaxBlockVisits` directly.
- static constexpr std::int32_t MaxAverageVisitsPerBlock = 4;
- const std::int32_t RelativeMaxBlockVisits =
- MaxAverageVisitsPerBlock * BlockStates.size();
- MaxBlockVisits = std::min(RelativeMaxBlockVisits, MaxBlockVisits);
+ AnalysisContext AC(ACFG, Analysis, StartingEnv, BlockStates);
std::int32_t BlockVisits = 0;
while (const CFGBlock *Block = Worklist.dequeue()) {
LLVM_DEBUG(llvm::dbgs()
@@ -604,12 +555,12 @@ runTypeErasedDataflowAnalysis(
// FIXME: Consider evaluating unreachable basic blocks (those that have a
// state set to `std::nullopt` at this point) to also analyze dead code.
- if (PostVisitCFG) {
- for (const CFGBlock *Block : CFCtx.getCFG()) {
+ if (PostAnalysisCallbacks.Before || PostAnalysisCallbacks.After) {
+ for (const CFGBlock *Block : ACFG.getCFG()) {
// Skip blocks that were not evaluated.
if (!BlockStates[Block->getBlockID()])
continue;
- transferCFGBlock(*Block, AC, PostVisitCFG);
+ transferCFGBlock(*Block, AC, PostAnalysisCallbacks);
}
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Value.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Value.cpp
index 7fad6deb0e91..d70e5a82ea23 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Value.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Value.cpp
@@ -46,8 +46,6 @@ raw_ostream &operator<<(raw_ostream &OS, const Value &Val) {
return OS << "Integer(@" << &Val << ")";
case Value::Kind::Pointer:
return OS << "Pointer(" << &cast<PointerValue>(Val).getPointeeLoc() << ")";
- case Value::Kind::Record:
- return OS << "Record(" << &cast<RecordValue>(Val).getLoc() << ")";
case Value::Kind::TopBool:
return OS << "TopBool(" << cast<TopBoolValue>(Val).getAtom() << ")";
case Value::Kind::AtomicBool:
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
index 3ef363753532..a39f0e0b29ad 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
@@ -12,105 +12,31 @@
//===----------------------------------------------------------------------===//
#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <queue>
#include <vector>
+#include "clang/Analysis/FlowSensitive/CNFFormula.h"
#include "clang/Analysis/FlowSensitive/Formula.h"
#include "clang/Analysis/FlowSensitive/Solver.h"
#include "clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
namespace clang {
namespace dataflow {
-// `WatchedLiteralsSolver` is an implementation of Algorithm D from Knuth's
-// The Art of Computer Programming Volume 4: Satisfiability, Fascicle 6. It is
-// based on the backtracking DPLL algorithm [1], keeps references to a single
-// "watched" literal per clause, and uses a set of "active" variables to perform
-// unit propagation.
-//
-// The solver expects that its input is a boolean formula in conjunctive normal
-// form that consists of clauses of at least one literal. A literal is either a
-// boolean variable or its negation. Below we define types, data structures, and
-// utilities that are used to represent boolean formulas in conjunctive normal
-// form.
-//
-// [1] https://en.wikipedia.org/wiki/DPLL_algorithm
-
-/// Boolean variables are represented as positive integers.
-using Variable = uint32_t;
-
-/// A null boolean variable is used as a placeholder in various data structures
-/// and algorithms.
-static constexpr Variable NullVar = 0;
-
-/// Literals are represented as positive integers. Specifically, for a boolean
-/// variable `V` that is represented as the positive integer `I`, the positive
-/// literal `V` is represented as the integer `2*I` and the negative literal
-/// `!V` is represented as the integer `2*I+1`.
-using Literal = uint32_t;
-
-/// A null literal is used as a placeholder in various data structures and
-/// algorithms.
-[[maybe_unused]] static constexpr Literal NullLit = 0;
-
-/// Returns the positive literal `V`.
-static constexpr Literal posLit(Variable V) { return 2 * V; }
-
-static constexpr bool isPosLit(Literal L) { return 0 == (L & 1); }
-
-static constexpr bool isNegLit(Literal L) { return 1 == (L & 1); }
-
-/// Returns the negative literal `!V`.
-static constexpr Literal negLit(Variable V) { return 2 * V + 1; }
-
-/// Returns the negated literal `!L`.
-static constexpr Literal notLit(Literal L) { return L ^ 1; }
-
-/// Returns the variable of `L`.
-static constexpr Variable var(Literal L) { return L >> 1; }
-
-/// Clause identifiers are represented as positive integers.
-using ClauseID = uint32_t;
-
-/// A null clause identifier is used as a placeholder in various data structures
-/// and algorithms.
-static constexpr ClauseID NullClause = 0;
+namespace {
-/// A boolean formula in conjunctive normal form.
-struct CNFFormula {
- /// `LargestVar` is equal to the largest positive integer that represents a
- /// variable in the formula.
- const Variable LargestVar;
-
- /// Literals of all clauses in the formula.
- ///
- /// The element at index 0 stands for the literal in the null clause. It is
- /// set to 0 and isn't used. Literals of clauses in the formula start from the
- /// element at index 1.
- ///
- /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
- /// `Clauses` will be `[0, L1, L2, L2, L3, L4]`.
- std::vector<Literal> Clauses;
+class WatchedLiteralsSolverImpl {
+ /// Stores the variable identifier and Atom for atomic booleans in the
+ /// formula.
+ llvm::DenseMap<Variable, Atom> Atomics;
- /// Start indices of clauses of the formula in `Clauses`.
- ///
- /// The element at index 0 stands for the start index of the null clause. It
- /// is set to 0 and isn't used. Start indices of clauses in the formula start
- /// from the element at index 1.
- ///
- /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
- /// `ClauseStarts` will be `[0, 1, 3]`. Note that the literals of the first
- /// clause always start at index 1. The start index for the literals of the
- /// second clause depends on the size of the first clause and so on.
- std::vector<size_t> ClauseStarts;
+ /// A boolean formula in conjunctive normal form that the solver will attempt
+ /// to prove satisfiable. The formula will be modified in the process.
+ CNFFormula CNF;
/// Maps literals (indices of the vector) to clause identifiers (elements of
/// the vector) that watch the respective literals.
@@ -127,328 +53,6 @@ struct CNFFormula {
/// clauses in the formula start from the element at index 1.
std::vector<ClauseID> NextWatched;
- /// Stores the variable identifier and Atom for atomic booleans in the
- /// formula.
- llvm::DenseMap<Variable, Atom> Atomics;
-
- /// Indicates that we already know the formula is unsatisfiable.
- /// During construction, we catch simple cases of conflicting unit-clauses.
- bool KnownContradictory;
-
- explicit CNFFormula(Variable LargestVar,
- llvm::DenseMap<Variable, Atom> Atomics)
- : LargestVar(LargestVar), Atomics(std::move(Atomics)),
- KnownContradictory(false) {
- Clauses.push_back(0);
- ClauseStarts.push_back(0);
- NextWatched.push_back(0);
- const size_t NumLiterals = 2 * LargestVar + 1;
- WatchedHead.resize(NumLiterals + 1, 0);
- }
-
- /// Adds the `L1 v ... v Ln` clause to the formula.
- /// Requirements:
- ///
- /// `Li` must not be `NullLit`.
- ///
- /// All literals in the input that are not `NullLit` must be distinct.
- void addClause(ArrayRef<Literal> lits) {
- assert(!lits.empty());
- assert(llvm::all_of(lits, [](Literal L) { return L != NullLit; }));
-
- const ClauseID C = ClauseStarts.size();
- const size_t S = Clauses.size();
- ClauseStarts.push_back(S);
- Clauses.insert(Clauses.end(), lits.begin(), lits.end());
-
- // Designate the first literal as the "watched" literal of the clause.
- NextWatched.push_back(WatchedHead[lits.front()]);
- WatchedHead[lits.front()] = C;
- }
-
- /// Returns the number of literals in clause `C`.
- size_t clauseSize(ClauseID C) const {
- return C == ClauseStarts.size() - 1 ? Clauses.size() - ClauseStarts[C]
- : ClauseStarts[C + 1] - ClauseStarts[C];
- }
-
- /// Returns the literals of clause `C`.
- llvm::ArrayRef<Literal> clauseLiterals(ClauseID C) const {
- return llvm::ArrayRef<Literal>(&Clauses[ClauseStarts[C]], clauseSize(C));
- }
-};
-
-/// Applies simplifications while building up a BooleanFormula.
-/// We keep track of unit clauses, which tell us variables that must be
-/// true/false in any model that satisfies the overall formula.
-/// Such variables can be dropped from subsequently-added clauses, which
-/// may in turn yield more unit clauses or even a contradiction.
-/// The total added complexity of this preprocessing is O(N) where we
-/// for every clause, we do a lookup for each unit clauses.
-/// The lookup is O(1) on average. This method won't catch all
-/// contradictory formulas, more passes can in principle catch
-/// more cases but we leave all these and the general case to the
-/// proper SAT solver.
-struct CNFFormulaBuilder {
- // Formula should outlive CNFFormulaBuilder.
- explicit CNFFormulaBuilder(CNFFormula &CNF)
- : Formula(CNF) {}
-
- /// Adds the `L1 v ... v Ln` clause to the formula. Applies
- /// simplifications, based on single-literal clauses.
- ///
- /// Requirements:
- ///
- /// `Li` must not be `NullLit`.
- ///
- /// All literals must be distinct.
- void addClause(ArrayRef<Literal> Literals) {
- // We generate clauses with up to 3 literals in this file.
- assert(!Literals.empty() && Literals.size() <= 3);
- // Contains literals of the simplified clause.
- llvm::SmallVector<Literal> Simplified;
- for (auto L : Literals) {
- assert(L != NullLit &&
- llvm::all_of(Simplified,
- [L](Literal S) { return S != L; }));
- auto X = var(L);
- if (trueVars.contains(X)) { // X must be true
- if (isPosLit(L))
- return; // Omit clause `(... v X v ...)`, it is `true`.
- else
- continue; // Omit `!X` from `(... v !X v ...)`.
- }
- if (falseVars.contains(X)) { // X must be false
- if (isNegLit(L))
- return; // Omit clause `(... v !X v ...)`, it is `true`.
- else
- continue; // Omit `X` from `(... v X v ...)`.
- }
- Simplified.push_back(L);
- }
- if (Simplified.empty()) {
- // Simplification made the clause empty, which is equivalent to `false`.
- // We already know that this formula is unsatisfiable.
- Formula.KnownContradictory = true;
- // We can add any of the input literals to get an unsatisfiable formula.
- Formula.addClause(Literals[0]);
- return;
- }
- if (Simplified.size() == 1) {
- // We have new unit clause.
- const Literal lit = Simplified.front();
- const Variable v = var(lit);
- if (isPosLit(lit))
- trueVars.insert(v);
- else
- falseVars.insert(v);
- }
- Formula.addClause(Simplified);
- }
-
- /// Returns true if we observed a contradiction while adding clauses.
- /// In this case then the formula is already known to be unsatisfiable.
- bool isKnownContradictory() { return Formula.KnownContradictory; }
-
-private:
- CNFFormula &Formula;
- llvm::DenseSet<Variable> trueVars;
- llvm::DenseSet<Variable> falseVars;
-};
-
-/// Converts the conjunction of `Vals` into a formula in conjunctive normal
-/// form where each clause has at least one and at most three literals.
-CNFFormula buildCNF(const llvm::ArrayRef<const Formula *> &Vals) {
- // The general strategy of the algorithm implemented below is to map each
- // of the sub-values in `Vals` to a unique variable and use these variables in
- // the resulting CNF expression to avoid exponential blow up. The number of
- // literals in the resulting formula is guaranteed to be linear in the number
- // of sub-formulas in `Vals`.
-
- // Map each sub-formula in `Vals` to a unique variable.
- llvm::DenseMap<const Formula *, Variable> SubValsToVar;
- // Store variable identifiers and Atom of atomic booleans.
- llvm::DenseMap<Variable, Atom> Atomics;
- Variable NextVar = 1;
- {
- std::queue<const Formula *> UnprocessedSubVals;
- for (const Formula *Val : Vals)
- UnprocessedSubVals.push(Val);
- while (!UnprocessedSubVals.empty()) {
- Variable Var = NextVar;
- const Formula *Val = UnprocessedSubVals.front();
- UnprocessedSubVals.pop();
-
- if (!SubValsToVar.try_emplace(Val, Var).second)
- continue;
- ++NextVar;
-
- for (const Formula *F : Val->operands())
- UnprocessedSubVals.push(F);
- if (Val->kind() == Formula::AtomRef)
- Atomics[Var] = Val->getAtom();
- }
- }
-
- auto GetVar = [&SubValsToVar](const Formula *Val) {
- auto ValIt = SubValsToVar.find(Val);
- assert(ValIt != SubValsToVar.end());
- return ValIt->second;
- };
-
- CNFFormula CNF(NextVar - 1, std::move(Atomics));
- std::vector<bool> ProcessedSubVals(NextVar, false);
- CNFFormulaBuilder builder(CNF);
-
- // Add a conjunct for each variable that represents a top-level conjunction
- // value in `Vals`.
- for (const Formula *Val : Vals)
- builder.addClause(posLit(GetVar(Val)));
-
- // Add conjuncts that represent the mapping between newly-created variables
- // and their corresponding sub-formulas.
- std::queue<const Formula *> UnprocessedSubVals;
- for (const Formula *Val : Vals)
- UnprocessedSubVals.push(Val);
- while (!UnprocessedSubVals.empty()) {
- const Formula *Val = UnprocessedSubVals.front();
- UnprocessedSubVals.pop();
- const Variable Var = GetVar(Val);
-
- if (ProcessedSubVals[Var])
- continue;
- ProcessedSubVals[Var] = true;
-
- switch (Val->kind()) {
- case Formula::AtomRef:
- break;
- case Formula::Literal:
- CNF.addClause(Val->literal() ? posLit(Var) : negLit(Var));
- break;
- case Formula::And: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- if (LHS == RHS) {
- // `X <=> (A ^ A)` is equivalent to `(!X v A) ^ (X v !A)` which is
- // already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS)});
- builder.addClause({posLit(Var), negLit(LHS)});
- } else {
- // `X <=> (A ^ B)` is equivalent to `(!X v A) ^ (!X v B) ^ (X v !A v
- // !B)` which is already in conjunctive normal form. Below we add each
- // of the conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS)});
- builder.addClause({negLit(Var), posLit(RHS)});
- builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
- }
- break;
- }
- case Formula::Or: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- if (LHS == RHS) {
- // `X <=> (A v A)` is equivalent to `(!X v A) ^ (X v !A)` which is
- // already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS)});
- builder.addClause({posLit(Var), negLit(LHS)});
- } else {
- // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v
- // !B)` which is already in conjunctive normal form. Below we add each
- // of the conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS), posLit(RHS)});
- builder.addClause({posLit(Var), negLit(LHS)});
- builder.addClause({posLit(Var), negLit(RHS)});
- }
- break;
- }
- case Formula::Not: {
- const Variable Operand = GetVar(Val->operands()[0]);
-
- // `X <=> !Y` is equivalent to `(!X v !Y) ^ (X v Y)` which is
- // already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), negLit(Operand)});
- builder.addClause({posLit(Var), posLit(Operand)});
- break;
- }
- case Formula::Implies: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- // `X <=> (A => B)` is equivalent to
- // `(X v A) ^ (X v !B) ^ (!X v !A v B)` which is already in
- // conjunctive normal form. Below we add each of the conjuncts of
- // the latter expression to the result.
- builder.addClause({posLit(Var), posLit(LHS)});
- builder.addClause({posLit(Var), negLit(RHS)});
- builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
- break;
- }
- case Formula::Equal: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- if (LHS == RHS) {
- // `X <=> (A <=> A)` is equivalent to `X` which is already in
- // conjunctive normal form. Below we add each of the conjuncts of the
- // latter expression to the result.
- builder.addClause(posLit(Var));
-
- // No need to visit the sub-values of `Val`.
- continue;
- }
- // `X <=> (A <=> B)` is equivalent to
- // `(X v A v B) ^ (X v !A v !B) ^ (!X v A v !B) ^ (!X v !A v B)` which
- // is already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({posLit(Var), posLit(LHS), posLit(RHS)});
- builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
- builder.addClause({negLit(Var), posLit(LHS), negLit(RHS)});
- builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
- break;
- }
- }
- if (builder.isKnownContradictory()) {
- return CNF;
- }
- for (const Formula *Child : Val->operands())
- UnprocessedSubVals.push(Child);
- }
-
- // Unit clauses that were added later were not
- // considered for the simplification of earlier clauses. Do a final
- // pass to find more opportunities for simplification.
- CNFFormula FinalCNF(NextVar - 1, std::move(CNF.Atomics));
- CNFFormulaBuilder FinalBuilder(FinalCNF);
-
- // Collect unit clauses.
- for (ClauseID C = 1; C < CNF.ClauseStarts.size(); ++C) {
- if (CNF.clauseSize(C) == 1) {
- FinalBuilder.addClause(CNF.clauseLiterals(C)[0]);
- }
- }
-
- // Add all clauses that were added previously, preserving the order.
- for (ClauseID C = 1; C < CNF.ClauseStarts.size(); ++C) {
- FinalBuilder.addClause(CNF.clauseLiterals(C));
- if (FinalBuilder.isKnownContradictory()) {
- break;
- }
- }
- // It is possible there were new unit clauses again, but
- // we stop here and leave the rest to the solver algorithm.
- return FinalCNF;
-}
-
-class WatchedLiteralsSolverImpl {
- /// A boolean formula in conjunctive normal form that the solver will attempt
- /// to prove satisfiable. The formula will be modified in the process.
- CNFFormula CNF;
-
/// The search for a satisfying assignment of the variables in `Formula` will
/// proceed in levels, starting from 1 and going up to `Formula.LargestVar`
/// (inclusive). The current level is stored in `Level`. At each level the
@@ -501,20 +105,37 @@ class WatchedLiteralsSolverImpl {
public:
explicit WatchedLiteralsSolverImpl(
const llvm::ArrayRef<const Formula *> &Vals)
- : CNF(buildCNF(Vals)), LevelVars(CNF.LargestVar + 1),
- LevelStates(CNF.LargestVar + 1) {
+ // `Atomics` needs to be initialized first so that we can use it as an
+ // output argument of `buildCNF()`.
+ : Atomics(), CNF(buildCNF(Vals, Atomics)),
+ LevelVars(CNF.largestVar() + 1), LevelStates(CNF.largestVar() + 1) {
assert(!Vals.empty());
+ // Skip initialization if the formula is known to be contradictory.
+ if (CNF.knownContradictory())
+ return;
+
+ // Initialize `NextWatched` and `WatchedHead`.
+ NextWatched.push_back(0);
+ const size_t NumLiterals = 2 * CNF.largestVar() + 1;
+ WatchedHead.resize(NumLiterals + 1, 0);
+ for (ClauseID C = 1; C <= CNF.numClauses(); ++C) {
+ // Designate the first literal as the "watched" literal of the clause.
+ Literal FirstLit = CNF.clauseLiterals(C).front();
+ NextWatched.push_back(WatchedHead[FirstLit]);
+ WatchedHead[FirstLit] = C;
+ }
+
// Initialize the state at the root level to a decision so that in
// `reverseForcedMoves` we don't have to check that `Level >= 0` on each
// iteration.
LevelStates[0] = State::Decision;
// Initialize all variables as unassigned.
- VarAssignments.resize(CNF.LargestVar + 1, Assignment::Unassigned);
+ VarAssignments.resize(CNF.largestVar() + 1, Assignment::Unassigned);
// Initialize the active variables.
- for (Variable Var = CNF.LargestVar; Var != NullVar; --Var) {
+ for (Variable Var = CNF.largestVar(); Var != NullVar; --Var) {
if (isWatched(posLit(Var)) || isWatched(negLit(Var)))
ActiveVars.push_back(Var);
}
@@ -523,7 +144,7 @@ public:
// Returns the `Result` and the number of iterations "remaining" from
// `MaxIterations` (that is, `MaxIterations` - iterations in this call).
std::pair<Solver::Result, std::int64_t> solve(std::int64_t MaxIterations) && {
- if (CNF.KnownContradictory) {
+ if (CNF.knownContradictory()) {
// Short-cut the solving process. We already found out at CNF
// construction time that the formula is unsatisfiable.
return std::make_pair(Solver::Result::Unsatisfiable(), MaxIterations);
@@ -625,7 +246,7 @@ private:
/// Returns a satisfying truth assignment to the atoms in the boolean formula.
llvm::DenseMap<Atom, Solver::Result::Assignment> buildSolution() {
llvm::DenseMap<Atom, Solver::Result::Assignment> Solution;
- for (auto &Atomic : CNF.Atomics) {
+ for (auto &Atomic : Atomics) {
// A variable may have a definite true/false assignment, or it may be
// unassigned indicating its truth value does not affect the result of
// the formula. Unassigned variables are assigned to true as a default.
@@ -661,24 +282,25 @@ private:
const Literal FalseLit = VarAssignments[Var] == Assignment::AssignedTrue
? negLit(Var)
: posLit(Var);
- ClauseID FalseLitWatcher = CNF.WatchedHead[FalseLit];
- CNF.WatchedHead[FalseLit] = NullClause;
+ ClauseID FalseLitWatcher = WatchedHead[FalseLit];
+ WatchedHead[FalseLit] = NullClause;
while (FalseLitWatcher != NullClause) {
- const ClauseID NextFalseLitWatcher = CNF.NextWatched[FalseLitWatcher];
+ const ClauseID NextFalseLitWatcher = NextWatched[FalseLitWatcher];
// Pick the first non-false literal as the new watched literal.
- const size_t FalseLitWatcherStart = CNF.ClauseStarts[FalseLitWatcher];
- size_t NewWatchedLitIdx = FalseLitWatcherStart + 1;
- while (isCurrentlyFalse(CNF.Clauses[NewWatchedLitIdx]))
- ++NewWatchedLitIdx;
- const Literal NewWatchedLit = CNF.Clauses[NewWatchedLitIdx];
+ const CNFFormula::Iterator FalseLitWatcherStart =
+ CNF.startOfClause(FalseLitWatcher);
+ CNFFormula::Iterator NewWatchedLitIter = FalseLitWatcherStart.next();
+ while (isCurrentlyFalse(*NewWatchedLitIter))
+ ++NewWatchedLitIter;
+ const Literal NewWatchedLit = *NewWatchedLitIter;
const Variable NewWatchedLitVar = var(NewWatchedLit);
// Swap the old watched literal for the new one in `FalseLitWatcher` to
// maintain the invariant that the watched literal is at the beginning of
// the clause.
- CNF.Clauses[NewWatchedLitIdx] = FalseLit;
- CNF.Clauses[FalseLitWatcherStart] = NewWatchedLit;
+ *NewWatchedLitIter = FalseLit;
+ *FalseLitWatcherStart = NewWatchedLit;
// If the new watched literal isn't watched by any other clause and its
// variable isn't assigned we need to add it to the active variables.
@@ -686,8 +308,8 @@ private:
VarAssignments[NewWatchedLitVar] == Assignment::Unassigned)
ActiveVars.push_back(NewWatchedLitVar);
- CNF.NextWatched[FalseLitWatcher] = CNF.WatchedHead[NewWatchedLit];
- CNF.WatchedHead[NewWatchedLit] = FalseLitWatcher;
+ NextWatched[FalseLitWatcher] = WatchedHead[NewWatchedLit];
+ WatchedHead[NewWatchedLit] = FalseLitWatcher;
// Go to the next clause that watches `FalseLit`.
FalseLitWatcher = NextFalseLitWatcher;
@@ -697,8 +319,8 @@ private:
/// Returns true if and only if one of the clauses that watch `Lit` is a unit
/// clause.
bool watchedByUnitClause(Literal Lit) const {
- for (ClauseID LitWatcher = CNF.WatchedHead[Lit]; LitWatcher != NullClause;
- LitWatcher = CNF.NextWatched[LitWatcher]) {
+ for (ClauseID LitWatcher = WatchedHead[Lit]; LitWatcher != NullClause;
+ LitWatcher = NextWatched[LitWatcher]) {
llvm::ArrayRef<Literal> Clause = CNF.clauseLiterals(LitWatcher);
// Assert the invariant that the watched literal is always the first one
@@ -728,9 +350,7 @@ private:
}
/// Returns true if and only if `Lit` is watched by a clause in `Formula`.
- bool isWatched(Literal Lit) const {
- return CNF.WatchedHead[Lit] != NullClause;
- }
+ bool isWatched(Literal Lit) const { return WatchedHead[Lit] != NullClause; }
/// Returns an assignment for an unassigned variable.
Assignment decideAssignment(Variable Var) const {
@@ -742,8 +362,8 @@ private:
/// Returns a set of all watched literals.
llvm::DenseSet<Literal> watchedLiterals() const {
llvm::DenseSet<Literal> WatchedLiterals;
- for (Literal Lit = 2; Lit < CNF.WatchedHead.size(); Lit++) {
- if (CNF.WatchedHead[Lit] == NullClause)
+ for (Literal Lit = 2; Lit < WatchedHead.size(); Lit++) {
+ if (WatchedHead[Lit] == NullClause)
continue;
WatchedLiterals.insert(Lit);
}
@@ -783,6 +403,8 @@ private:
}
};
+} // namespace
+
Solver::Result
WatchedLiteralsSolver::solve(llvm::ArrayRef<const Formula *> Vals) {
if (Vals.empty())
diff --git a/contrib/llvm-project/clang/lib/Analysis/MacroExpansionContext.cpp b/contrib/llvm-project/clang/lib/Analysis/MacroExpansionContext.cpp
index 564e359668a5..b212b7f24579 100644
--- a/contrib/llvm-project/clang/lib/Analysis/MacroExpansionContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/MacroExpansionContext.cpp
@@ -12,7 +12,7 @@
#define DEBUG_TYPE "macro-expansion-context"
-static void dumpTokenInto(const clang::Preprocessor &PP, clang::raw_ostream &OS,
+static void dumpTokenInto(const clang::Preprocessor &PP, llvm::raw_ostream &OS,
clang::Token Tok);
namespace clang {
diff --git a/contrib/llvm-project/clang/lib/Analysis/ObjCNoReturn.cpp b/contrib/llvm-project/clang/lib/Analysis/ObjCNoReturn.cpp
index 9d7c365c3b99..9e651c29e085 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ObjCNoReturn.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ObjCNoReturn.cpp
@@ -17,7 +17,8 @@
using namespace clang;
-static bool isSubclass(const ObjCInterfaceDecl *Class, IdentifierInfo *II) {
+static bool isSubclass(const ObjCInterfaceDecl *Class,
+ const IdentifierInfo *II) {
if (!Class)
return false;
if (Class->getIdentifier() == II)
@@ -30,7 +31,7 @@ ObjCNoReturn::ObjCNoReturn(ASTContext &C)
NSExceptionII(&C.Idents.get("NSException"))
{
// Generate selectors.
- SmallVector<IdentifierInfo*, 3> II;
+ SmallVector<const IdentifierInfo *, 3> II;
// raise:format:
II.push_back(&C.Idents.get("raise"));
diff --git a/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp b/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
index 79f337a91ec8..35472e705cfd 100644
--- a/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
@@ -115,14 +115,17 @@ PathDiagnostic::PathDiagnostic(
StringRef CheckerName, const Decl *declWithIssue, StringRef bugtype,
StringRef verboseDesc, StringRef shortDesc, StringRef category,
PathDiagnosticLocation LocationToUnique, const Decl *DeclToUnique,
+ const Decl *AnalysisEntryPoint,
std::unique_ptr<FilesToLineNumsMap> ExecutedLines)
: CheckerName(CheckerName), DeclWithIssue(declWithIssue),
BugType(StripTrailingDots(bugtype)),
VerboseDesc(StripTrailingDots(verboseDesc)),
ShortDesc(StripTrailingDots(shortDesc)),
Category(StripTrailingDots(category)), UniqueingLoc(LocationToUnique),
- UniqueingDecl(DeclToUnique), ExecutedLines(std::move(ExecutedLines)),
- path(pathImpl) {}
+ UniqueingDecl(DeclToUnique), AnalysisEntryPoint(AnalysisEntryPoint),
+ ExecutedLines(std::move(ExecutedLines)), path(pathImpl) {
+ assert(AnalysisEntryPoint);
+}
void PathDiagnosticConsumer::anchor() {}
diff --git a/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp b/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
index 1bf0d9aec862..acbe1470b389 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ParentMap.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
@@ -453,26 +454,68 @@ bool DeadCodeScan::isDeadCodeRoot(const clang::CFGBlock *Block) {
return isDeadRoot;
}
-static bool isValidDeadStmt(const Stmt *S) {
+// Check if the given `DeadStmt` is a coroutine statement and is a substmt of
+// the coroutine statement. `Block` is the CFGBlock containing the `DeadStmt`.
+static bool isInCoroutineStmt(const Stmt *DeadStmt, const CFGBlock *Block) {
+ // The coroutine statement, co_return, co_await, or co_yield.
+ const Stmt *CoroStmt = nullptr;
+ // Find the first coroutine statement after the DeadStmt in the block.
+ bool AfterDeadStmt = false;
+ for (CFGBlock::const_iterator I = Block->begin(), E = Block->end(); I != E;
+ ++I)
+ if (std::optional<CFGStmt> CS = I->getAs<CFGStmt>()) {
+ const Stmt *S = CS->getStmt();
+ if (S == DeadStmt)
+ AfterDeadStmt = true;
+ if (AfterDeadStmt &&
+ // For simplicity, we only check simple coroutine statements.
+ (llvm::isa<CoreturnStmt>(S) || llvm::isa<CoroutineSuspendExpr>(S))) {
+ CoroStmt = S;
+ break;
+ }
+ }
+ if (!CoroStmt)
+ return false;
+ struct Checker : RecursiveASTVisitor<Checker> {
+ const Stmt *DeadStmt;
+ bool CoroutineSubStmt = false;
+ Checker(const Stmt *S) : DeadStmt(S) {}
+ bool VisitStmt(const Stmt *S) {
+ if (S == DeadStmt)
+ CoroutineSubStmt = true;
+ return true;
+ }
+ // Statements captured in the CFG can be implicit.
+ bool shouldVisitImplicitCode() const { return true; }
+ };
+ Checker checker(DeadStmt);
+ checker.TraverseStmt(const_cast<Stmt *>(CoroStmt));
+ return checker.CoroutineSubStmt;
+}
+
+static bool isValidDeadStmt(const Stmt *S, const clang::CFGBlock *Block) {
if (S->getBeginLoc().isInvalid())
return false;
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(S))
return BO->getOpcode() != BO_Comma;
- return true;
+ // Coroutine statements are never considered dead statements, because removing
+ // them may change the function semantic if it is the only coroutine statement
+ // of the coroutine.
+ return !isInCoroutineStmt(S, Block);
}
const Stmt *DeadCodeScan::findDeadCode(const clang::CFGBlock *Block) {
for (CFGBlock::const_iterator I = Block->begin(), E = Block->end(); I!=E; ++I)
if (std::optional<CFGStmt> CS = I->getAs<CFGStmt>()) {
const Stmt *S = CS->getStmt();
- if (isValidDeadStmt(S))
+ if (isValidDeadStmt(S, Block))
return S;
}
CFGTerminator T = Block->getTerminator();
if (T.isStmtBranch()) {
const Stmt *S = T.getStmt();
- if (S && isValidDeadStmt(S))
+ if (S && isValidDeadStmt(S, Block))
return S;
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp b/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
index 2fe0f85897c3..3e8c959ccee4 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -177,7 +177,7 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
return CapabilityExpr();
if (const auto* SLit = dyn_cast<StringLiteral>(AttrExp)) {
- if (SLit->getString() == StringRef("*"))
+ if (SLit->getString() == "*")
// The "*" expr is a universal lock, which essentially turns off
// checks until it is removed from the lockset.
return CapabilityExpr(new (Arena) til::Wildcard(), StringRef("wildcard"),
@@ -197,7 +197,7 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
else if (const auto *UO = dyn_cast<UnaryOperator>(AttrExp)) {
if (UO->getOpcode() == UO_LNot) {
Neg = true;
- AttrExp = UO->getSubExpr();
+ AttrExp = UO->getSubExpr()->IgnoreImplicit();
}
}
@@ -995,7 +995,7 @@ void SExprBuilder::exitCFG(const CFGBlock *Last) {
IncompleteArgs.clear();
}
-/*
+#ifndef NDEBUG
namespace {
class TILPrinter :
@@ -1016,4 +1016,4 @@ void printSCFG(CFGWalker &Walker) {
} // namespace threadSafety
} // namespace clang
-*/
+#endif // NDEBUG
diff --git a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
index e9111ded64eb..bf2f73061865 100644
--- a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
@@ -44,7 +44,7 @@ static bool recordIsNotEmpty(const RecordDecl *RD) {
// We consider a record decl to be empty if it contains only unnamed bit-
// fields, zero-width fields, and fields of empty record type.
for (const auto *FD : RD->fields()) {
- if (FD->isUnnamedBitfield())
+ if (FD->isUnnamedBitField())
continue;
if (FD->isZeroSize(FD->getASTContext()))
continue;
diff --git a/contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp b/contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp
index 7df706beb226..866222380974 100644
--- a/contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp
@@ -7,18 +7,26 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/UnsafeBufferUsage.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
#include <memory>
#include <optional>
-#include <sstream>
#include <queue>
+#include <sstream>
using namespace llvm;
using namespace clang;
@@ -122,42 +130,42 @@ public:
bool TraverseGenericSelectionExpr(GenericSelectionExpr *Node) {
// These are unevaluated, except the result expression.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return TraverseStmt(Node->getResultExpr());
return VisitorBase::TraverseGenericSelectionExpr(Node);
}
bool TraverseUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node) {
// Unevaluated context.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return true;
return VisitorBase::TraverseUnaryExprOrTypeTraitExpr(Node);
}
bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node) {
// Unevaluated context.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return true;
return VisitorBase::TraverseTypeOfExprTypeLoc(Node);
}
bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node) {
// Unevaluated context.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return true;
return VisitorBase::TraverseDecltypeTypeLoc(Node);
}
bool TraverseCXXNoexceptExpr(CXXNoexceptExpr *Node) {
// Unevaluated context.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return true;
return VisitorBase::TraverseCXXNoexceptExpr(Node);
}
bool TraverseCXXTypeidExpr(CXXTypeidExpr *Node) {
// Unevaluated context.
- if(ignoreUnevaluatedContext)
+ if (ignoreUnevaluatedContext)
return true;
return VisitorBase::TraverseCXXTypeidExpr(Node);
}
@@ -205,24 +213,26 @@ private:
// Because we're dealing with raw pointers, let's define what we mean by that.
static auto hasPointerType() {
- return hasType(hasCanonicalType(pointerType()));
+ return hasType(hasCanonicalType(pointerType()));
}
-static auto hasArrayType() {
- return hasType(hasCanonicalType(arrayType()));
-}
+static auto hasArrayType() { return hasType(hasCanonicalType(arrayType())); }
-AST_MATCHER_P(Stmt, forEachDescendantEvaluatedStmt, internal::Matcher<Stmt>, innerMatcher) {
+AST_MATCHER_P(Stmt, forEachDescendantEvaluatedStmt, internal::Matcher<Stmt>,
+ innerMatcher) {
const DynTypedMatcher &DTM = static_cast<DynTypedMatcher>(innerMatcher);
- MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All, true);
+ MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All,
+ true);
return Visitor.findMatch(DynTypedNode::create(Node));
}
-AST_MATCHER_P(Stmt, forEachDescendantStmt, internal::Matcher<Stmt>, innerMatcher) {
+AST_MATCHER_P(Stmt, forEachDescendantStmt, internal::Matcher<Stmt>,
+ innerMatcher) {
const DynTypedMatcher &DTM = static_cast<DynTypedMatcher>(innerMatcher);
- MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All, false);
+ MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All,
+ false);
return Visitor.findMatch(DynTypedNode::create(Node));
}
@@ -232,6 +242,11 @@ AST_MATCHER_P(Stmt, notInSafeBufferOptOut, const UnsafeBufferUsageHandler *,
return !Handler->isSafeBufferOptOut(Node.getBeginLoc());
}
+AST_MATCHER_P(Stmt, ignoreUnsafeBufferInContainer,
+ const UnsafeBufferUsageHandler *, Handler) {
+ return Handler->ignoreUnsafeBufferInContainer(Node.getBeginLoc());
+}
+
AST_MATCHER_P(CastExpr, castSubExpr, internal::Matcher<Expr>, innerMatcher) {
return innerMatcher.matches(*Node.getSubExpr(), Finder, Builder);
}
@@ -255,10 +270,9 @@ static auto isInUnspecifiedLvalueContext(internal::Matcher<Expr> innerMatcher) {
hasLHS(innerMatcher)
)
));
-// clang-format on
+ // clang-format on
}
-
// Returns a matcher that matches any expression `e` such that `InnerMatcher`
// matches `e` and `e` is in an Unspecified Pointer Context (UPC).
static internal::Matcher<Stmt>
@@ -271,10 +285,13 @@ isInUnspecifiedPointerContext(internal::Matcher<Stmt> InnerMatcher) {
// 4. the operand of a pointer subtraction operation
// (i.e., computing the distance between two pointers); or ...
- auto CallArgMatcher =
- callExpr(forEachArgumentWithParam(InnerMatcher,
- hasPointerType() /* array also decays to pointer type*/),
- unless(callee(functionDecl(hasAttr(attr::UnsafeBufferUsage)))));
+ // clang-format off
+ auto CallArgMatcher = callExpr(
+ forEachArgumentWithParamType(
+ InnerMatcher,
+ isAnyPointer() /* array also decays to pointer type*/),
+ unless(callee(
+ functionDecl(hasAttr(attr::UnsafeBufferUsage)))));
auto CastOperandMatcher =
castExpr(anyOf(hasCastKind(CastKind::CK_PointerToIntegral),
@@ -296,9 +313,10 @@ isInUnspecifiedPointerContext(internal::Matcher<Stmt> InnerMatcher) {
hasRHS(hasPointerType())),
eachOf(hasLHS(InnerMatcher),
hasRHS(InnerMatcher)));
+ // clang-format on
return stmt(anyOf(CallArgMatcher, CastOperandMatcher, CompOperandMatcher,
- PtrSubtractionMatcher));
+ PtrSubtractionMatcher));
// FIXME: any more cases? (UPC excludes the RHS of an assignment. For now we
// don't have to check that.)
}
@@ -325,6 +343,106 @@ isInUnspecifiedUntypedContext(internal::Matcher<Stmt> InnerMatcher) {
// FIXME: Handle loop bodies.
return stmt(anyOf(CompStmt, IfStmtThen, IfStmtElse));
}
+
+// Given a two-param std::span construct call, matches iff the call has the
+// following forms:
+// 1. `std::span<T>{new T[n], n}`, where `n` is a literal or a DRE
+// 2. `std::span<T>{new T, 1}`
+// 3. `std::span<T>{&var, 1}`
+// 4. `std::span<T>{a, n}`, where `a` is of an array-of-T with constant size
+// `n`
+// 5. `std::span<T>{any, 0}`
+AST_MATCHER(CXXConstructExpr, isSafeSpanTwoParamConstruct) {
+ assert(Node.getNumArgs() == 2 &&
+ "expecting a two-parameter std::span constructor");
+ const Expr *Arg0 = Node.getArg(0)->IgnoreImplicit();
+ const Expr *Arg1 = Node.getArg(1)->IgnoreImplicit();
+ auto HaveEqualConstantValues = [&Finder](const Expr *E0, const Expr *E1) {
+ if (auto E0CV = E0->getIntegerConstantExpr(Finder->getASTContext()))
+ if (auto E1CV = E1->getIntegerConstantExpr(Finder->getASTContext())) {
+ return APSInt::compareValues(*E0CV, *E1CV) == 0;
+ }
+ return false;
+ };
+ auto AreSameDRE = [](const Expr *E0, const Expr *E1) {
+ if (auto *DRE0 = dyn_cast<DeclRefExpr>(E0))
+ if (auto *DRE1 = dyn_cast<DeclRefExpr>(E1)) {
+ return DRE0->getDecl() == DRE1->getDecl();
+ }
+ return false;
+ };
+ std::optional<APSInt> Arg1CV =
+ Arg1->getIntegerConstantExpr(Finder->getASTContext());
+
+ if (Arg1CV && Arg1CV->isZero())
+ // Check form 5:
+ return true;
+ switch (Arg0->IgnoreImplicit()->getStmtClass()) {
+ case Stmt::CXXNewExprClass:
+ if (auto Size = cast<CXXNewExpr>(Arg0)->getArraySize()) {
+ // Check form 1:
+ return AreSameDRE((*Size)->IgnoreImplicit(), Arg1) ||
+ HaveEqualConstantValues(*Size, Arg1);
+ }
+ // TODO: what's placeholder type? avoid it for now.
+ if (!cast<CXXNewExpr>(Arg0)->hasPlaceholderType()) {
+ // Check form 2:
+ return Arg1CV && Arg1CV->isOne();
+ }
+ break;
+ case Stmt::UnaryOperatorClass:
+ if (cast<UnaryOperator>(Arg0)->getOpcode() ==
+ UnaryOperator::Opcode::UO_AddrOf)
+ // Check form 3:
+ return Arg1CV && Arg1CV->isOne();
+ break;
+ default:
+ break;
+ }
+
+ QualType Arg0Ty = Arg0->IgnoreImplicit()->getType();
+
+ if (Arg0Ty->isConstantArrayType()) {
+ const APSInt ConstArrSize =
+ APSInt(cast<ConstantArrayType>(Arg0Ty)->getSize());
+
+ // Check form 4:
+ return Arg1CV && APSInt::compareValues(ConstArrSize, *Arg1CV) == 0;
+ }
+ return false;
+}
+
+AST_MATCHER(ArraySubscriptExpr, isSafeArraySubscript) {
+ // FIXME: Proper solution:
+ // - refactor Sema::CheckArrayAccess
+ // - split safe/OOB/unknown decision logic from diagnostics emitting code
+ // - e. g. "Try harder to find a NamedDecl to point at in the note."
+ // already duplicated
+ // - call both from Sema and from here
+
+ const auto *BaseDRE =
+ dyn_cast<DeclRefExpr>(Node.getBase()->IgnoreParenImpCasts());
+ if (!BaseDRE)
+ return false;
+ if (!BaseDRE->getDecl())
+ return false;
+ const auto *CATy = Finder->getASTContext().getAsConstantArrayType(
+ BaseDRE->getDecl()->getType());
+ if (!CATy)
+ return false;
+
+ if (const auto *IdxLit = dyn_cast<IntegerLiteral>(Node.getIdx())) {
+ const APInt ArrIdx = IdxLit->getValue();
+ // FIXME: ArrIdx.isNegative() we could immediately emit an error as that's a
+ // bug
+ if (ArrIdx.isNonNegative() &&
+ ArrIdx.getLimitedValue() < CATy->getLimitedSize())
+ return true;
+ }
+
+ return false;
+}
+
} // namespace clang::ast_matchers
namespace {
@@ -334,9 +452,6 @@ using DeclUseList = SmallVector<const DeclRefExpr *, 1>;
// Convenience typedef.
using FixItList = SmallVector<FixItHint, 4>;
-
-// Defined below.
-class Strategy;
} // namespace
namespace {
@@ -367,7 +482,9 @@ public:
#ifndef NDEBUG
StringRef getDebugName() const {
switch (K) {
-#define GADGET(x) case Kind::x: return #x;
+#define GADGET(x) \
+ case Kind::x: \
+ return #x;
#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
}
llvm_unreachable("Unhandled Gadget::Kind enum");
@@ -375,7 +492,9 @@ public:
#endif
virtual bool isWarningGadget() const = 0;
- virtual const Stmt *getBaseStmt() const = 0;
+ // TODO remove this method from WarningGadget interface. It's only used for
+ // debug prints in FixableGadget.
+ virtual SourceLocation getSourceLoc() const = 0;
/// Returns the list of pointer-type variables on which this gadget performs
/// its operation. Typically, there's only one variable. This isn't a list
@@ -388,7 +507,6 @@ private:
Kind K;
};
-
/// Warning gadgets correspond to unsafe code patterns that warrants
/// an immediate warning.
class WarningGadget : public Gadget {
@@ -397,12 +515,16 @@ public:
static bool classof(const Gadget *G) { return G->isWarningGadget(); }
bool isWarningGadget() const final { return true; }
+
+ virtual void handleUnsafeOperation(UnsafeBufferUsageHandler &Handler,
+ bool IsRelatedToDecl,
+ ASTContext &Ctx) const = 0;
};
-/// Fixable gadgets correspond to code patterns that aren't always unsafe but need to be
-/// properly recognized in order to emit fixes. For example, if a raw pointer-type
-/// variable is replaced by a safe C++ container, every use of such variable must be
-/// carefully considered and possibly updated.
+/// Fixable gadgets correspond to code patterns that aren't always unsafe but
+/// need to be properly recognized in order to emit fixes. For example, if a raw
+/// pointer-type variable is replaced by a safe C++ container, every use of such
+/// variable must be carefully considered and possibly updated.
class FixableGadget : public Gadget {
public:
FixableGadget(Kind K) : Gadget(K) {}
@@ -413,24 +535,23 @@ public:
/// Returns a fixit that would fix the current gadget according to
/// the current strategy. Returns std::nullopt if the fix cannot be produced;
/// returns an empty list if no fixes are necessary.
- virtual std::optional<FixItList> getFixits(const Strategy &) const {
+ virtual std::optional<FixItList> getFixits(const FixitStrategy &) const {
return std::nullopt;
}
- /// Returns a list of two elements where the first element is the LHS of a pointer assignment
- /// statement and the second element is the RHS. This two-element list represents the fact that
- /// the LHS buffer gets its bounds information from the RHS buffer. This information will be used
- /// later to group all those variables whose types must be modified together to prevent type
- /// mismatches.
+ /// Returns a list of two elements where the first element is the LHS of a
+ /// pointer assignment statement and the second element is the RHS. This
+ /// two-element list represents the fact that the LHS buffer gets its bounds
+ /// information from the RHS buffer. This information will be used later to
+ /// group all those variables whose types must be modified together to prevent
+ /// type mismatches.
virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
getStrategyImplications() const {
return std::nullopt;
}
};
-static auto toSupportedVariable() {
- return to(varDecl());
-}
+static auto toSupportedVariable() { return to(varDecl()); }
using FixableGadgetList = std::vector<std::unique_ptr<FixableGadget>>;
using WarningGadgetList = std::vector<std::unique_ptr<WarningGadget>>;
@@ -451,13 +572,18 @@ public:
}
static Matcher matcher() {
- return stmt(unaryOperator(
- hasOperatorName("++"),
- hasUnaryOperand(ignoringParenImpCasts(hasPointerType()))
- ).bind(OpTag));
+ return stmt(
+ unaryOperator(hasOperatorName("++"),
+ hasUnaryOperand(ignoringParenImpCasts(hasPointerType())))
+ .bind(OpTag));
}
- const UnaryOperator *getBaseStmt() const override { return Op; }
+ void handleUnsafeOperation(UnsafeBufferUsageHandler &Handler,
+ bool IsRelatedToDecl,
+ ASTContext &Ctx) const override {
+ Handler.handleUnsafeOperation(Op, IsRelatedToDecl, Ctx);
+ }
+ SourceLocation getSourceLoc() const override { return Op->getBeginLoc(); }
DeclUseList getClaimedVarUseSites() const override {
SmallVector<const DeclRefExpr *, 2> Uses;
@@ -486,13 +612,18 @@ public:
}
static Matcher matcher() {
- return stmt(unaryOperator(
- hasOperatorName("--"),
- hasUnaryOperand(ignoringParenImpCasts(hasPointerType()))
- ).bind(OpTag));
+ return stmt(
+ unaryOperator(hasOperatorName("--"),
+ hasUnaryOperand(ignoringParenImpCasts(hasPointerType())))
+ .bind(OpTag));
}
- const UnaryOperator *getBaseStmt() const override { return Op; }
+ void handleUnsafeOperation(UnsafeBufferUsageHandler &Handler,
+ bool IsRelatedToDecl,
+ ASTContext &Ctx) const override {
+ Handler.handleUnsafeOperation(Op, IsRelatedToDecl, Ctx);
+ }
+ SourceLocation getSourceLoc() const override { return Op->getBeginLoc(); }
DeclUseList getClaimedVarUseSites() const override {
if (const auto *DRE =
@@ -520,20 +651,25 @@ public:
}
static Matcher matcher() {
- // FIXME: What if the index is integer literal 0? Should this be
- // a safe gadget in this case?
- // clang-format off
+ // clang-format off
return stmt(arraySubscriptExpr(
hasBase(ignoringParenImpCasts(
anyOf(hasPointerType(), hasArrayType()))),
- unless(hasIndex(
- anyOf(integerLiteral(equals(0)), arrayInitIndexExpr())
- )))
- .bind(ArraySubscrTag));
+ unless(anyOf(
+ isSafeArraySubscript(),
+ hasIndex(
+ anyOf(integerLiteral(equals(0)), arrayInitIndexExpr())
+ )
+ ))).bind(ArraySubscrTag));
// clang-format on
}
- const ArraySubscriptExpr *getBaseStmt() const override { return ASE; }
+ void handleUnsafeOperation(UnsafeBufferUsageHandler &Handler,
+ bool IsRelatedToDecl,
+ ASTContext &Ctx) const override {
+ Handler.handleUnsafeOperation(ASE, IsRelatedToDecl, Ctx);
+ }
+ SourceLocation getSourceLoc() const override { return ASE->getBeginLoc(); }
DeclUseList getClaimedVarUseSites() const override {
if (const auto *DRE =
@@ -581,7 +717,12 @@ public:
.bind(PointerArithmeticTag));
}
- const Stmt *getBaseStmt() const override { return PA; }
+ void handleUnsafeOperation(UnsafeBufferUsageHandler &Handler,
+ bool IsRelatedToDecl,
+ ASTContext &Ctx) const override {
+ Handler.handleUnsafeOperation(PA, IsRelatedToDecl, Ctx);
+ }
+ SourceLocation getSourceLoc() const override { return PA->getBeginLoc(); }
DeclUseList getClaimedVarUseSites() const override {
if (const auto *DRE = dyn_cast<DeclRefExpr>(Ptr->IgnoreParenImpCasts())) {
@@ -594,6 +735,49 @@ public:
// FIXME: this gadge will need a fix-it
};
+class SpanTwoParamConstructorGadget : public WarningGadget {
+ static constexpr const char *const SpanTwoParamConstructorTag =
+ "spanTwoParamConstructor";
+ const CXXConstructExpr *Ctor; // the span constructor expression
+
+public:
+ SpanTwoParamConstructorGadget(const MatchFinder::MatchResult &Result)
+ : WarningGadget(Kind::SpanTwoParamConstructor),
+ Ctor(Result.Nodes.getNodeAs<CXXConstructExpr>(
+ SpanTwoParamConstructorTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::SpanTwoParamConstructor;
+ }
+
+ static Matcher matcher() {
+ auto HasTwoParamSpanCtorDecl = hasDeclaration(
+ cxxConstructorDecl(hasDeclContext(isInStdNamespace()), hasName("span"),
+ parameterCountIs(2)));
+
+ return stmt(cxxConstructExpr(HasTwoParamSpanCtorDecl,
+ unless(isSafeSpanTwoParamConstruct()))
+ .bind(SpanTwoParamConstructorTag));
+ }
+
+ void handleUnsafeOperation(UnsafeBufferUsageHandler &Handler,
+ bool IsRelatedToDecl,
+ ASTContext &Ctx) const override {
+ Handler.handleUnsafeOperationInContainer(Ctor, IsRelatedToDecl, Ctx);
+ }
+ SourceLocation getSourceLoc() const override { return Ctor->getBeginLoc(); }
+
+ DeclUseList getClaimedVarUseSites() const override {
+ // If the constructor call is of the form `std::span{var, n}`, `var` is
+ // considered an unsafe variable.
+ if (auto *DRE = dyn_cast<DeclRefExpr>(Ctor->getArg(0))) {
+ if (isa<VarDecl>(DRE->getDecl()))
+ return {DRE};
+ }
+ return {};
+ }
+};
+
/// A pointer initialization expression of the form:
/// \code
/// int *p = q;
@@ -602,36 +786,33 @@ class PointerInitGadget : public FixableGadget {
private:
static constexpr const char *const PointerInitLHSTag = "ptrInitLHS";
static constexpr const char *const PointerInitRHSTag = "ptrInitRHS";
- const VarDecl * PtrInitLHS; // the LHS pointer expression in `PI`
- const DeclRefExpr * PtrInitRHS; // the RHS pointer expression in `PI`
+ const VarDecl *PtrInitLHS; // the LHS pointer expression in `PI`
+ const DeclRefExpr *PtrInitRHS; // the RHS pointer expression in `PI`
public:
PointerInitGadget(const MatchFinder::MatchResult &Result)
: FixableGadget(Kind::PointerInit),
- PtrInitLHS(Result.Nodes.getNodeAs<VarDecl>(PointerInitLHSTag)),
- PtrInitRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerInitRHSTag)) {}
+ PtrInitLHS(Result.Nodes.getNodeAs<VarDecl>(PointerInitLHSTag)),
+ PtrInitRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerInitRHSTag)) {}
static bool classof(const Gadget *G) {
return G->getKind() == Kind::PointerInit;
}
static Matcher matcher() {
- auto PtrInitStmt = declStmt(hasSingleDecl(varDecl(
- hasInitializer(ignoringImpCasts(declRefExpr(
- hasPointerType(),
- toSupportedVariable()).
- bind(PointerInitRHSTag)))).
- bind(PointerInitLHSTag)));
+ auto PtrInitStmt = declStmt(hasSingleDecl(
+ varDecl(hasInitializer(ignoringImpCasts(
+ declRefExpr(hasPointerType(), toSupportedVariable())
+ .bind(PointerInitRHSTag))))
+ .bind(PointerInitLHSTag)));
return stmt(PtrInitStmt);
}
- virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
-
- virtual const Stmt *getBaseStmt() const override {
- // FIXME: This needs to be the entire DeclStmt, assuming that this method
- // makes sense at all on a FixableGadget.
- return PtrInitRHS;
+ virtual std::optional<FixItList>
+ getFixits(const FixitStrategy &S) const override;
+ SourceLocation getSourceLoc() const override {
+ return PtrInitRHS->getBeginLoc();
}
virtual DeclUseList getClaimedVarUseSites() const override {
@@ -640,8 +821,7 @@ public:
virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
getStrategyImplications() const override {
- return std::make_pair(PtrInitLHS,
- cast<VarDecl>(PtrInitRHS->getDecl()));
+ return std::make_pair(PtrInitLHS, cast<VarDecl>(PtrInitRHS->getDecl()));
}
};
@@ -649,42 +829,39 @@ public:
/// \code
/// p = q;
/// \endcode
-class PointerAssignmentGadget : public FixableGadget {
+/// where both `p` and `q` are pointers.
+class PtrToPtrAssignmentGadget : public FixableGadget {
private:
static constexpr const char *const PointerAssignLHSTag = "ptrLHS";
static constexpr const char *const PointerAssignRHSTag = "ptrRHS";
- const DeclRefExpr * PtrLHS; // the LHS pointer expression in `PA`
- const DeclRefExpr * PtrRHS; // the RHS pointer expression in `PA`
+ const DeclRefExpr *PtrLHS; // the LHS pointer expression in `PA`
+ const DeclRefExpr *PtrRHS; // the RHS pointer expression in `PA`
public:
- PointerAssignmentGadget(const MatchFinder::MatchResult &Result)
- : FixableGadget(Kind::PointerAssignment),
- PtrLHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignLHSTag)),
- PtrRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignRHSTag)) {}
+ PtrToPtrAssignmentGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::PtrToPtrAssignment),
+ PtrLHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignLHSTag)),
+ PtrRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignRHSTag)) {}
static bool classof(const Gadget *G) {
- return G->getKind() == Kind::PointerAssignment;
+ return G->getKind() == Kind::PtrToPtrAssignment;
}
static Matcher matcher() {
- auto PtrAssignExpr = binaryOperator(allOf(hasOperatorName("="),
- hasRHS(ignoringParenImpCasts(declRefExpr(hasPointerType(),
- toSupportedVariable()).
- bind(PointerAssignRHSTag))),
- hasLHS(declRefExpr(hasPointerType(),
- toSupportedVariable()).
- bind(PointerAssignLHSTag))));
+ auto PtrAssignExpr = binaryOperator(
+ allOf(hasOperatorName("="),
+ hasRHS(ignoringParenImpCasts(
+ declRefExpr(hasPointerType(), toSupportedVariable())
+ .bind(PointerAssignRHSTag))),
+ hasLHS(declRefExpr(hasPointerType(), toSupportedVariable())
+ .bind(PointerAssignLHSTag))));
return stmt(isInUnspecifiedUntypedContext(PtrAssignExpr));
}
- virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
-
- virtual const Stmt *getBaseStmt() const override {
- // FIXME: This should be the binary operator, assuming that this method
- // makes sense at all on a FixableGadget.
- return PtrLHS;
- }
+ virtual std::optional<FixItList>
+ getFixits(const FixitStrategy &S) const override;
+ SourceLocation getSourceLoc() const override { return PtrLHS->getBeginLoc(); }
virtual DeclUseList getClaimedVarUseSites() const override {
return DeclUseList{PtrLHS, PtrRHS};
@@ -697,6 +874,55 @@ public:
}
};
+/// An assignment expression of the form:
+/// \code
+/// ptr = array;
+/// \endcode
+/// where `p` is a pointer and `array` is a constant size array.
+class CArrayToPtrAssignmentGadget : public FixableGadget {
+private:
+ static constexpr const char *const PointerAssignLHSTag = "ptrLHS";
+ static constexpr const char *const PointerAssignRHSTag = "ptrRHS";
+ const DeclRefExpr *PtrLHS; // the LHS pointer expression in `PA`
+ const DeclRefExpr *PtrRHS; // the RHS pointer expression in `PA`
+
+public:
+ CArrayToPtrAssignmentGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::CArrayToPtrAssignment),
+ PtrLHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignLHSTag)),
+ PtrRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignRHSTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::CArrayToPtrAssignment;
+ }
+
+ static Matcher matcher() {
+ auto PtrAssignExpr = binaryOperator(
+ allOf(hasOperatorName("="),
+ hasRHS(ignoringParenImpCasts(
+ declRefExpr(hasType(hasCanonicalType(constantArrayType())),
+ toSupportedVariable())
+ .bind(PointerAssignRHSTag))),
+ hasLHS(declRefExpr(hasPointerType(), toSupportedVariable())
+ .bind(PointerAssignLHSTag))));
+
+ return stmt(isInUnspecifiedUntypedContext(PtrAssignExpr));
+ }
+
+ virtual std::optional<FixItList>
+ getFixits(const FixitStrategy &S) const override;
+ SourceLocation getSourceLoc() const override { return PtrLHS->getBeginLoc(); }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ return DeclUseList{PtrLHS, PtrRHS};
+ }
+
+ virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
+ getStrategyImplications() const override {
+ return {};
+ }
+};
+
/// A call of a function or method that performs unchecked buffer operations
/// over one of its pointer parameters.
class UnsafeBufferUsageAttrGadget : public WarningGadget {
@@ -713,10 +939,53 @@ public:
}
static Matcher matcher() {
- return stmt(callExpr(callee(functionDecl(hasAttr(attr::UnsafeBufferUsage))))
- .bind(OpTag));
+ auto HasUnsafeFnDecl =
+ callee(functionDecl(hasAttr(attr::UnsafeBufferUsage)));
+ return stmt(callExpr(HasUnsafeFnDecl).bind(OpTag));
}
- const Stmt *getBaseStmt() const override { return Op; }
+
+ void handleUnsafeOperation(UnsafeBufferUsageHandler &Handler,
+ bool IsRelatedToDecl,
+ ASTContext &Ctx) const override {
+ Handler.handleUnsafeOperation(Op, IsRelatedToDecl, Ctx);
+ }
+ SourceLocation getSourceLoc() const override { return Op->getBeginLoc(); }
+
+ DeclUseList getClaimedVarUseSites() const override { return {}; }
+};
+
+/// A call of a constructor that performs unchecked buffer operations
+/// over one of its pointer parameters, or constructs a class object that will
+/// perform buffer operations that depend on the correctness of the parameters.
+class UnsafeBufferUsageCtorAttrGadget : public WarningGadget {
+ constexpr static const char *const OpTag = "cxx_construct_expr";
+ const CXXConstructExpr *Op;
+
+public:
+ UnsafeBufferUsageCtorAttrGadget(const MatchFinder::MatchResult &Result)
+ : WarningGadget(Kind::UnsafeBufferUsageCtorAttr),
+ Op(Result.Nodes.getNodeAs<CXXConstructExpr>(OpTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::UnsafeBufferUsageCtorAttr;
+ }
+
+ static Matcher matcher() {
+ auto HasUnsafeCtorDecl =
+ hasDeclaration(cxxConstructorDecl(hasAttr(attr::UnsafeBufferUsage)));
+ // std::span(ptr, size) ctor is handled by SpanTwoParamConstructorGadget.
+ auto HasTwoParamSpanCtorDecl = SpanTwoParamConstructorGadget::matcher();
+ return stmt(
+ cxxConstructExpr(HasUnsafeCtorDecl, unless(HasTwoParamSpanCtorDecl))
+ .bind(OpTag));
+ }
+
+ void handleUnsafeOperation(UnsafeBufferUsageHandler &Handler,
+ bool IsRelatedToDecl,
+ ASTContext &Ctx) const override {
+ Handler.handleUnsafeOperation(Op, IsRelatedToDecl, Ctx);
+ }
+ SourceLocation getSourceLoc() const override { return Op->getBeginLoc(); }
DeclUseList getClaimedVarUseSites() const override { return {}; }
};
@@ -745,7 +1014,13 @@ public:
explicitCastExpr(anyOf(has(callExpr), has(parenExpr(has(callExpr)))))
.bind(OpTag));
}
- const Stmt *getBaseStmt() const override { return Op; }
+
+ void handleUnsafeOperation(UnsafeBufferUsageHandler &Handler,
+ bool IsRelatedToDecl,
+ ASTContext &Ctx) const override {
+ Handler.handleUnsafeOperation(Op, IsRelatedToDecl, Ctx);
+ }
+ SourceLocation getSourceLoc() const override { return Op->getBeginLoc(); }
DeclUseList getClaimedVarUseSites() const override { return {}; }
};
@@ -772,18 +1047,17 @@ public:
static Matcher matcher() {
auto ArrayOrPtr = anyOf(hasPointerType(), hasArrayType());
- auto BaseIsArrayOrPtrDRE =
- hasBase(ignoringParenImpCasts(declRefExpr(ArrayOrPtr,
- toSupportedVariable())));
+ auto BaseIsArrayOrPtrDRE = hasBase(
+ ignoringParenImpCasts(declRefExpr(ArrayOrPtr, toSupportedVariable())));
auto Target =
arraySubscriptExpr(BaseIsArrayOrPtrDRE).bind(ULCArraySubscriptTag);
return expr(isInUnspecifiedLvalueContext(Target));
}
- virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
-
- virtual const Stmt *getBaseStmt() const override { return Node; }
+ virtual std::optional<FixItList>
+ getFixits(const FixitStrategy &S) const override;
+ SourceLocation getSourceLoc() const override { return Node->getBeginLoc(); }
virtual DeclUseList getClaimedVarUseSites() const override {
if (const auto *DRE =
@@ -815,19 +1089,17 @@ public:
static Matcher matcher() {
auto ArrayOrPtr = anyOf(hasPointerType(), hasArrayType());
- auto target = expr(
- ignoringParenImpCasts(declRefExpr(allOf(ArrayOrPtr,
- toSupportedVariable())).bind(DeclRefExprTag)));
+ auto target = expr(ignoringParenImpCasts(
+ declRefExpr(allOf(ArrayOrPtr, toSupportedVariable()))
+ .bind(DeclRefExprTag)));
return stmt(isInUnspecifiedPointerContext(target));
}
- virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+ virtual std::optional<FixItList>
+ getFixits(const FixitStrategy &S) const override;
+ SourceLocation getSourceLoc() const override { return Node->getBeginLoc(); }
- virtual const Stmt *getBaseStmt() const override { return Node; }
-
- virtual DeclUseList getClaimedVarUseSites() const override {
- return {Node};
- }
+ virtual DeclUseList getClaimedVarUseSites() const override { return {Node}; }
};
class PointerDereferenceGadget : public FixableGadget {
@@ -863,9 +1135,9 @@ public:
return {BaseDeclRefExpr};
}
- virtual const Stmt *getBaseStmt() const final { return Op; }
-
- virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+ virtual std::optional<FixItList>
+ getFixits(const FixitStrategy &S) const override;
+ SourceLocation getSourceLoc() const override { return Op->getBeginLoc(); }
};
// Represents expressions of the form `&DRE[any]` in the Unspecified Pointer
@@ -891,21 +1163,21 @@ public:
static Matcher matcher() {
return expr(isInUnspecifiedPointerContext(expr(ignoringImpCasts(
- unaryOperator(hasOperatorName("&"),
- hasUnaryOperand(arraySubscriptExpr(
- hasBase(ignoringParenImpCasts(declRefExpr(
- toSupportedVariable()))))))
+ unaryOperator(
+ hasOperatorName("&"),
+ hasUnaryOperand(arraySubscriptExpr(hasBase(
+ ignoringParenImpCasts(declRefExpr(toSupportedVariable()))))))
.bind(UPCAddressofArraySubscriptTag)))));
}
- virtual std::optional<FixItList> getFixits(const Strategy &) const override;
-
- virtual const Stmt *getBaseStmt() const override { return Node; }
+ virtual std::optional<FixItList>
+ getFixits(const FixitStrategy &) const override;
+ SourceLocation getSourceLoc() const override { return Node->getBeginLoc(); }
virtual DeclUseList getClaimedVarUseSites() const override {
const auto *ArraySubst = cast<ArraySubscriptExpr>(Node->getSubExpr());
const auto *DRE =
- cast<DeclRefExpr>(ArraySubst->getBase()->IgnoreImpCasts());
+ cast<DeclRefExpr>(ArraySubst->getBase()->IgnoreParenImpCasts());
return {DRE};
}
};
@@ -977,58 +1249,18 @@ public:
};
} // namespace
-namespace {
-// Strategy is a map from variables to the way we plan to emit fixes for
-// these variables. It is figured out gradually by trying different fixes
-// for different variables depending on gadgets in which these variables
-// participate.
-class Strategy {
-public:
- enum class Kind {
- Wontfix, // We don't plan to emit a fixit for this variable.
- Span, // We recommend replacing the variable with std::span.
- Iterator, // We recommend replacing the variable with std::span::iterator.
- Array, // We recommend replacing the variable with std::array.
- Vector // We recommend replacing the variable with std::vector.
- };
-
-private:
- using MapTy = llvm::DenseMap<const VarDecl *, Kind>;
-
- MapTy Map;
-
-public:
- Strategy() = default;
- Strategy(const Strategy &) = delete; // Let's avoid copies.
- Strategy &operator=(const Strategy &) = delete;
- Strategy(Strategy &&) = default;
- Strategy &operator=(Strategy &&) = default;
-
- void set(const VarDecl *VD, Kind K) { Map[VD] = K; }
-
- Kind lookup(const VarDecl *VD) const {
- auto I = Map.find(VD);
- if (I == Map.end())
- return Kind::Wontfix;
-
- return I->second;
- }
-};
-} // namespace
-
-
// Representing a pointer type expression of the form `++Ptr` in an Unspecified
// Pointer Context (UPC):
class UPCPreIncrementGadget : public FixableGadget {
private:
static constexpr const char *const UPCPreIncrementTag =
- "PointerPreIncrementUnderUPC";
+ "PointerPreIncrementUnderUPC";
const UnaryOperator *Node; // the `++Ptr` node
public:
UPCPreIncrementGadget(const MatchFinder::MatchResult &Result)
- : FixableGadget(Kind::UPCPreIncrement),
- Node(Result.Nodes.getNodeAs<UnaryOperator>(UPCPreIncrementTag)) {
+ : FixableGadget(Kind::UPCPreIncrement),
+ Node(Result.Nodes.getNodeAs<UnaryOperator>(UPCPreIncrementTag)) {
assert(Node != nullptr && "Expecting a non-null matching result");
}
@@ -1042,15 +1274,14 @@ public:
// can have the matcher be general, so long as `getClaimedVarUseSites` does
// things right.
return stmt(isInUnspecifiedPointerContext(expr(ignoringImpCasts(
- unaryOperator(isPreInc(),
- hasUnaryOperand(declRefExpr(
- toSupportedVariable()))
- ).bind(UPCPreIncrementTag)))));
+ unaryOperator(isPreInc(),
+ hasUnaryOperand(declRefExpr(toSupportedVariable())))
+ .bind(UPCPreIncrementTag)))));
}
- virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
-
- virtual const Stmt *getBaseStmt() const override { return Node; }
+ virtual std::optional<FixItList>
+ getFixits(const FixitStrategy &S) const override;
+ SourceLocation getSourceLoc() const override { return Node->getBeginLoc(); }
virtual DeclUseList getClaimedVarUseSites() const override {
return {dyn_cast<DeclRefExpr>(Node->getSubExpr())};
@@ -1081,16 +1312,21 @@ public:
}
static Matcher matcher() {
+ // clang-format off
return stmt(isInUnspecifiedUntypedContext(expr(ignoringImpCasts(
binaryOperator(hasOperatorName("+="),
- hasLHS(declRefExpr(toSupportedVariable())),
+ hasLHS(
+ declRefExpr(
+ hasPointerType(),
+ toSupportedVariable())),
hasRHS(expr().bind(OffsetTag)))
.bind(UUCAddAssignTag)))));
+ // clang-format on
}
- virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
-
- virtual const Stmt *getBaseStmt() const override { return Node; }
+ virtual std::optional<FixItList>
+ getFixits(const FixitStrategy &S) const override;
+ SourceLocation getSourceLoc() const override { return Node->getBeginLoc(); }
virtual DeclUseList getClaimedVarUseSites() const override {
return {dyn_cast<DeclRefExpr>(Node->getLHS())};
@@ -1138,10 +1374,11 @@ public:
// clang-format on
}
- virtual std::optional<FixItList> getFixits(const Strategy &s) const final;
-
- // TODO remove this method from FixableGadget interface
- virtual const Stmt *getBaseStmt() const final { return nullptr; }
+ virtual std::optional<FixItList>
+ getFixits(const FixitStrategy &s) const final;
+ SourceLocation getSourceLoc() const override {
+ return DerefOp->getBeginLoc();
+ }
virtual DeclUseList getClaimedVarUseSites() const final {
return {BaseDeclRefExpr};
@@ -1210,6 +1447,10 @@ findGadgets(const Decl *D, const UnsafeBufferUsageHandler &Handler,
#define WARNING_GADGET(x) \
allOf(x ## Gadget::matcher().bind(#x), \
notInSafeBufferOptOut(&Handler)),
+#define WARNING_CONTAINER_GADGET(x) \
+ allOf(x ## Gadget::matcher().bind(#x), \
+ notInSafeBufferOptOut(&Handler), \
+ unless(ignoreUnsafeBufferInContainer(&Handler))),
#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
// Avoid a hanging comma.
unless(stmt())
@@ -1344,38 +1585,76 @@ bool clang::internal::anyConflict(const SmallVectorImpl<FixItHint> &FixIts,
}
std::optional<FixItList>
-PointerAssignmentGadget::getFixits(const Strategy &S) const {
+PtrToPtrAssignmentGadget::getFixits(const FixitStrategy &S) const {
const auto *LeftVD = cast<VarDecl>(PtrLHS->getDecl());
const auto *RightVD = cast<VarDecl>(PtrRHS->getDecl());
switch (S.lookup(LeftVD)) {
- case Strategy::Kind::Span:
- if (S.lookup(RightVD) == Strategy::Kind::Span)
- return FixItList{};
- return std::nullopt;
- case Strategy::Kind::Wontfix:
- return std::nullopt;
- case Strategy::Kind::Iterator:
- case Strategy::Kind::Array:
- case Strategy::Kind::Vector:
- llvm_unreachable("unsupported strategies for FixableGadgets");
+ case FixitStrategy::Kind::Span:
+ if (S.lookup(RightVD) == FixitStrategy::Kind::Span)
+ return FixItList{};
+ return std::nullopt;
+ case FixitStrategy::Kind::Wontfix:
+ return std::nullopt;
+ case FixitStrategy::Kind::Iterator:
+ case FixitStrategy::Kind::Array:
+ return std::nullopt;
+ case FixitStrategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
+ }
+ return std::nullopt;
+}
+
+/// \returns fixit that adds .data() call after \DRE.
+static inline std::optional<FixItList> createDataFixit(const ASTContext &Ctx,
+ const DeclRefExpr *DRE);
+
+std::optional<FixItList>
+CArrayToPtrAssignmentGadget::getFixits(const FixitStrategy &S) const {
+ const auto *LeftVD = cast<VarDecl>(PtrLHS->getDecl());
+ const auto *RightVD = cast<VarDecl>(PtrRHS->getDecl());
+ // TLDR: Implementing fixits for non-Wontfix strategy on both LHS and RHS is
+ // non-trivial.
+ //
+ // CArrayToPtrAssignmentGadget doesn't have strategy implications because
+ // constant size array propagates its bounds. Because of that LHS and RHS are
+ // addressed by two different fixits.
+ //
+ // At the same time FixitStrategy S doesn't reflect what group a fixit belongs
+ // to and can't be generally relied on in multi-variable Fixables!
+ //
+ // E. g. If an instance of this gadget is fixing variable on LHS then the
+ // variable on RHS is fixed by a different fixit and its strategy for LHS
+ // fixit is as if Wontfix.
+ //
+ // The only exception is Wontfix strategy for a given variable as that is
+ // valid for any fixit produced for the given input source code.
+ if (S.lookup(LeftVD) == FixitStrategy::Kind::Span) {
+ if (S.lookup(RightVD) == FixitStrategy::Kind::Wontfix) {
+ return FixItList{};
+ }
+ } else if (S.lookup(LeftVD) == FixitStrategy::Kind::Wontfix) {
+ if (S.lookup(RightVD) == FixitStrategy::Kind::Array) {
+ return createDataFixit(RightVD->getASTContext(), PtrRHS);
+ }
}
return std::nullopt;
}
std::optional<FixItList>
-PointerInitGadget::getFixits(const Strategy &S) const {
+PointerInitGadget::getFixits(const FixitStrategy &S) const {
const auto *LeftVD = PtrInitLHS;
const auto *RightVD = cast<VarDecl>(PtrInitRHS->getDecl());
switch (S.lookup(LeftVD)) {
- case Strategy::Kind::Span:
- if (S.lookup(RightVD) == Strategy::Kind::Span)
- return FixItList{};
- return std::nullopt;
- case Strategy::Kind::Wontfix:
- return std::nullopt;
- case Strategy::Kind::Iterator:
- case Strategy::Kind::Array:
- case Strategy::Kind::Vector:
+ case FixitStrategy::Kind::Span:
+ if (S.lookup(RightVD) == FixitStrategy::Kind::Span)
+ return FixItList{};
+ return std::nullopt;
+ case FixitStrategy::Kind::Wontfix:
+ return std::nullopt;
+ case FixitStrategy::Kind::Iterator:
+ case FixitStrategy::Kind::Array:
+ return std::nullopt;
+ case FixitStrategy::Kind::Vector:
llvm_unreachable("unsupported strategies for FixableGadgets");
}
return std::nullopt;
@@ -1392,12 +1671,12 @@ static bool isNonNegativeIntegerExpr(const Expr *Expr, const VarDecl *VD,
}
std::optional<FixItList>
-ULCArraySubscriptGadget::getFixits(const Strategy &S) const {
+ULCArraySubscriptGadget::getFixits(const FixitStrategy &S) const {
if (const auto *DRE =
dyn_cast<DeclRefExpr>(Node->getBase()->IgnoreImpCasts()))
if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
switch (S.lookup(VD)) {
- case Strategy::Kind::Span: {
+ case FixitStrategy::Kind::Span: {
// If the index has a negative constant value, we give up as no valid
// fix-it can be generated:
@@ -1408,10 +1687,11 @@ ULCArraySubscriptGadget::getFixits(const Strategy &S) const {
// no-op is a good fix-it, otherwise
return FixItList{};
}
- case Strategy::Kind::Wontfix:
- case Strategy::Kind::Iterator:
- case Strategy::Kind::Array:
- case Strategy::Kind::Vector:
+ case FixitStrategy::Kind::Array:
+ return FixItList{};
+ case FixitStrategy::Kind::Wontfix:
+ case FixitStrategy::Kind::Iterator:
+ case FixitStrategy::Kind::Vector:
llvm_unreachable("unsupported strategies for FixableGadgets");
}
}
@@ -1422,17 +1702,18 @@ static std::optional<FixItList> // forward declaration
fixUPCAddressofArraySubscriptWithSpan(const UnaryOperator *Node);
std::optional<FixItList>
-UPCAddressofArraySubscriptGadget::getFixits(const Strategy &S) const {
+UPCAddressofArraySubscriptGadget::getFixits(const FixitStrategy &S) const {
auto DREs = getClaimedVarUseSites();
const auto *VD = cast<VarDecl>(DREs.front()->getDecl());
switch (S.lookup(VD)) {
- case Strategy::Kind::Span:
+ case FixitStrategy::Kind::Span:
return fixUPCAddressofArraySubscriptWithSpan(Node);
- case Strategy::Kind::Wontfix:
- case Strategy::Kind::Iterator:
- case Strategy::Kind::Array:
- case Strategy::Kind::Vector:
+ case FixitStrategy::Kind::Wontfix:
+ case FixitStrategy::Kind::Iterator:
+ case FixitStrategy::Kind::Array:
+ return std::nullopt;
+ case FixitStrategy::Kind::Vector:
llvm_unreachable("unsupported strategies for FixableGadgets");
}
return std::nullopt; // something went wrong, no fix-it
@@ -1452,15 +1733,6 @@ std::string getUserFillPlaceHolder(StringRef HintTextToUser = "placeholder") {
return s;
}
-// Return the text representation of the given `APInt Val`:
-static std::string getAPIntText(APInt Val) {
- SmallVector<char> Txt;
- Val.toString(Txt, 10, true);
- // APInt::toString does not add '\0' to the end of the string for us:
- Txt.push_back('\0');
- return Txt.data();
-}
-
// Return the source location of the last character of the AST `Node`.
template <typename NodeTy>
static std::optional<SourceLocation>
@@ -1566,9 +1838,9 @@ static SourceRange getSourceRangeToTokenEnd(const Decl *D,
const LangOptions &LangOpts) {
SourceLocation Begin = D->getBeginLoc();
SourceLocation
- End = // `D->getEndLoc` should always return the starting location of the
- // last token, so we should get the end of the token
- Lexer::getLocForEndOfToken(D->getEndLoc(), 0, SM, LangOpts);
+ End = // `D->getEndLoc` should always return the starting location of the
+ // last token, so we should get the end of the token
+ Lexer::getLocForEndOfToken(D->getEndLoc(), 0, SM, LangOpts);
return SourceRange(Begin, End);
}
@@ -1683,10 +1955,10 @@ getSpanTypeText(StringRef EltTyText,
}
std::optional<FixItList>
-DerefSimplePtrArithFixableGadget::getFixits(const Strategy &s) const {
+DerefSimplePtrArithFixableGadget::getFixits(const FixitStrategy &s) const {
const VarDecl *VD = dyn_cast<VarDecl>(BaseDeclRefExpr->getDecl());
- if (VD && s.lookup(VD) == Strategy::Kind::Span) {
+ if (VD && s.lookup(VD) == FixitStrategy::Kind::Span) {
ASTContext &Ctx = VD->getASTContext();
// std::span can't represent elements before its begin()
if (auto ConstVal = Offset->getIntegerConstantExpr(Ctx))
@@ -1746,10 +2018,10 @@ DerefSimplePtrArithFixableGadget::getFixits(const Strategy &s) const {
}
std::optional<FixItList>
-PointerDereferenceGadget::getFixits(const Strategy &S) const {
+PointerDereferenceGadget::getFixits(const FixitStrategy &S) const {
const VarDecl *VD = cast<VarDecl>(BaseDeclRefExpr->getDecl());
switch (S.lookup(VD)) {
- case Strategy::Kind::Span: {
+ case FixitStrategy::Kind::Span: {
ASTContext &Ctx = VD->getASTContext();
SourceManager &SM = Ctx.getSourceManager();
// Required changes: *(ptr); => (ptr[0]); and *ptr; => ptr[0]
@@ -1760,45 +2032,52 @@ PointerDereferenceGadget::getFixits(const Strategy &S) const {
if (auto LocPastOperand =
getPastLoc(BaseDeclRefExpr, SM, Ctx.getLangOpts())) {
return FixItList{{FixItHint::CreateRemoval(derefRange),
- FixItHint::CreateInsertion(*LocPastOperand, "[0]")}};
+ FixItHint::CreateInsertion(*LocPastOperand, "[0]")}};
}
break;
}
- case Strategy::Kind::Iterator:
- case Strategy::Kind::Array:
- case Strategy::Kind::Vector:
- llvm_unreachable("Strategy not implemented yet!");
- case Strategy::Kind::Wontfix:
+ case FixitStrategy::Kind::Iterator:
+ case FixitStrategy::Kind::Array:
+ return std::nullopt;
+ case FixitStrategy::Kind::Vector:
+ llvm_unreachable("FixitStrategy not implemented yet!");
+ case FixitStrategy::Kind::Wontfix:
llvm_unreachable("Invalid strategy!");
}
return std::nullopt;
}
+static inline std::optional<FixItList> createDataFixit(const ASTContext &Ctx,
+ const DeclRefExpr *DRE) {
+ const SourceManager &SM = Ctx.getSourceManager();
+ // Inserts the .data() after the DRE
+ std::optional<SourceLocation> EndOfOperand =
+ getPastLoc(DRE, SM, Ctx.getLangOpts());
+
+ if (EndOfOperand)
+ return FixItList{{FixItHint::CreateInsertion(*EndOfOperand, ".data()")}};
+
+ return std::nullopt;
+}
+
// Generates fix-its replacing an expression of the form UPC(DRE) with
// `DRE.data()`
-std::optional<FixItList> UPCStandalonePointerGadget::getFixits(const Strategy &S)
- const {
+std::optional<FixItList>
+UPCStandalonePointerGadget::getFixits(const FixitStrategy &S) const {
const auto VD = cast<VarDecl>(Node->getDecl());
switch (S.lookup(VD)) {
- case Strategy::Kind::Span: {
- ASTContext &Ctx = VD->getASTContext();
- SourceManager &SM = Ctx.getSourceManager();
- // Inserts the .data() after the DRE
- std::optional<SourceLocation> EndOfOperand =
- getPastLoc(Node, SM, Ctx.getLangOpts());
-
- if (EndOfOperand)
- return FixItList{{FixItHint::CreateInsertion(
- *EndOfOperand, ".data()")}};
- // FIXME: Points inside a macro expansion.
- break;
- }
- case Strategy::Kind::Wontfix:
- case Strategy::Kind::Iterator:
- case Strategy::Kind::Array:
- case Strategy::Kind::Vector:
- llvm_unreachable("unsupported strategies for FixableGadgets");
+ case FixitStrategy::Kind::Array:
+ case FixitStrategy::Kind::Span: {
+ return createDataFixit(VD->getASTContext(), Node);
+ // FIXME: Points inside a macro expansion.
+ break;
+ }
+ case FixitStrategy::Kind::Wontfix:
+ case FixitStrategy::Kind::Iterator:
+ return std::nullopt;
+ case FixitStrategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
}
return std::nullopt;
@@ -1842,17 +2121,17 @@ fixUPCAddressofArraySubscriptWithSpan(const UnaryOperator *Node) {
}
std::optional<FixItList>
-UUCAddAssignGadget::getFixits(const Strategy &S) const {
+UUCAddAssignGadget::getFixits(const FixitStrategy &S) const {
DeclUseList DREs = getClaimedVarUseSites();
if (DREs.size() != 1)
return std::nullopt; // In cases of `Ptr += n` where `Ptr` is not a DRE, we
// give up
if (const VarDecl *VD = dyn_cast<VarDecl>(DREs.front()->getDecl())) {
- if (S.lookup(VD) == Strategy::Kind::Span) {
+ if (S.lookup(VD) == FixitStrategy::Kind::Span) {
FixItList Fixes;
- const Stmt *AddAssignNode = getBaseStmt();
+ const Stmt *AddAssignNode = Node;
StringRef varName = VD->getName();
const ASTContext &Ctx = VD->getASTContext();
@@ -1883,17 +2162,17 @@ UUCAddAssignGadget::getFixits(const Strategy &S) const {
return std::nullopt; // Not in the cases that we can handle for now, give up.
}
-std::optional<FixItList> UPCPreIncrementGadget::getFixits(const Strategy &S) const {
+std::optional<FixItList>
+UPCPreIncrementGadget::getFixits(const FixitStrategy &S) const {
DeclUseList DREs = getClaimedVarUseSites();
if (DREs.size() != 1)
return std::nullopt; // In cases of `++Ptr` where `Ptr` is not a DRE, we
// give up
if (const VarDecl *VD = dyn_cast<VarDecl>(DREs.front()->getDecl())) {
- if (S.lookup(VD) == Strategy::Kind::Span) {
+ if (S.lookup(VD) == FixitStrategy::Kind::Span) {
FixItList Fixes;
std::stringstream SS;
- const Stmt *PreIncNode = getBaseStmt();
StringRef varName = VD->getName();
const ASTContext &Ctx = VD->getASTContext();
@@ -1901,34 +2180,36 @@ std::optional<FixItList> UPCPreIncrementGadget::getFixits(const Strategy &S) con
SS << "(" << varName.data() << " = " << varName.data()
<< ".subspan(1)).data()";
std::optional<SourceLocation> PreIncLocation =
- getEndCharLoc(PreIncNode, Ctx.getSourceManager(), Ctx.getLangOpts());
+ getEndCharLoc(Node, Ctx.getSourceManager(), Ctx.getLangOpts());
if (!PreIncLocation)
return std::nullopt;
Fixes.push_back(FixItHint::CreateReplacement(
- SourceRange(PreIncNode->getBeginLoc(), *PreIncLocation), SS.str()));
+ SourceRange(Node->getBeginLoc(), *PreIncLocation), SS.str()));
return Fixes;
}
}
return std::nullopt; // Not in the cases that we can handle for now, give up.
}
-
// For a non-null initializer `Init` of `T *` type, this function returns
// `FixItHint`s producing a list initializer `{Init, S}` as a part of a fix-it
// to output stream.
// In many cases, this function cannot figure out the actual extent `S`. It
// then will use a place holder to replace `S` to ask users to fill `S` in. The
// initializer shall be used to initialize a variable of type `std::span<T>`.
+// In some cases (e. g. constant size array) the initializer should remain
+// unchanged and the function returns empty list. In case the function can't
+// provide the right fixit it will return nullopt.
//
// FIXME: Support multi-level pointers
//
// Parameters:
// `Init` a pointer to the initializer expression
// `Ctx` a reference to the ASTContext
-static FixItList
+static std::optional<FixItList>
FixVarInitializerWithSpan(const Expr *Init, ASTContext &Ctx,
- const StringRef UserFillPlaceHolder) {
+ const StringRef UserFillPlaceHolder) {
const SourceManager &SM = Ctx.getSourceManager();
const LangOptions &LangOpts = Ctx.getLangOpts();
@@ -1936,7 +2217,8 @@ FixVarInitializerWithSpan(const Expr *Init, ASTContext &Ctx,
// NULL pointer, we use the default constructor to initialize the span
// object, i.e., a `std:span` variable declaration with no initializer.
// So the fix-it is just to remove the initializer.
- if (Init->isNullPointerConstant(Ctx,
+ if (Init->isNullPointerConstant(
+ Ctx,
// FIXME: Why does this function not ask for `const ASTContext
// &`? It should. Maybe worth an NFC patch later.
Expr::NullPointerConstantValueDependence::
@@ -1944,11 +2226,11 @@ FixVarInitializerWithSpan(const Expr *Init, ASTContext &Ctx,
std::optional<SourceLocation> InitLocation =
getEndCharLoc(Init, SM, LangOpts);
if (!InitLocation)
- return {};
+ return std::nullopt;
SourceRange SR(Init->getBeginLoc(), *InitLocation);
- return {FixItHint::CreateRemoval(SR)};
+ return FixItList{FixItHint::CreateRemoval(SR)};
}
FixItList FixIts{};
@@ -1967,19 +2249,18 @@ FixVarInitializerWithSpan(const Expr *Init, ASTContext &Ctx,
if (!Ext->HasSideEffects(Ctx)) {
std::optional<StringRef> ExtentString = getExprText(Ext, SM, LangOpts);
if (!ExtentString)
- return {};
+ return std::nullopt;
ExtentText = *ExtentString;
}
} else if (!CxxNew->isArray())
// Although the initializer is not allocating a buffer, the pointer
// variable could still be used in buffer access operations.
ExtentText = One;
- } else if (const auto *CArrTy = Ctx.getAsConstantArrayType(
- Init->IgnoreImpCasts()->getType())) {
- // In cases `Init` is of an array type after stripping off implicit casts,
- // the extent is the array size. Note that if the array size is not a
- // constant, we cannot use it as the extent.
- ExtentText = getAPIntText(CArrTy->getSize());
+ } else if (Ctx.getAsConstantArrayType(Init->IgnoreImpCasts()->getType())) {
+ // std::span has a single parameter constructor for initialization with
+ // constant size array. The size is auto-deduced as the constructor is a
+ // function template. The correct fixit is empty - no changes should happen.
+ return FixItList{};
} else {
// In cases `Init` is of the form `&Var` after stripping of implicit
// casts, where `&` is the built-in operator, the extent is 1.
@@ -1995,7 +2276,7 @@ FixVarInitializerWithSpan(const Expr *Init, ASTContext &Ctx,
std::optional<SourceLocation> LocPassInit = getPastLoc(Init, SM, LangOpts);
if (!LocPassInit)
- return {};
+ return std::nullopt;
StrBuffer.append(", ");
StrBuffer.append(ExtentText);
@@ -2005,8 +2286,10 @@ FixVarInitializerWithSpan(const Expr *Init, ASTContext &Ctx,
}
#ifndef NDEBUG
-#define DEBUG_NOTE_DECL_FAIL(D, Msg) \
-Handler.addDebugNoteForVar((D), (D)->getBeginLoc(), "failed to produce fixit for declaration '" + (D)->getNameAsString() + "'" + (Msg))
+#define DEBUG_NOTE_DECL_FAIL(D, Msg) \
+ Handler.addDebugNoteForVar((D), (D)->getBeginLoc(), \
+ "failed to produce fixit for declaration '" + \
+ (D)->getNameAsString() + "'" + (Msg))
#else
#define DEBUG_NOTE_DECL_FAIL(D, Msg)
#endif
@@ -2014,8 +2297,8 @@ Handler.addDebugNoteForVar((D), (D)->getBeginLoc(), "failed to produce fixit for
// For the given variable declaration with a pointer-to-T type, returns the text
// `std::span<T>`. If it is unable to generate the text, returns
// `std::nullopt`.
-static std::optional<std::string> createSpanTypeForVarDecl(const VarDecl *VD,
- const ASTContext &Ctx) {
+static std::optional<std::string>
+createSpanTypeForVarDecl(const VarDecl *VD, const ASTContext &Ctx) {
assert(VD->getType()->isPointerType());
std::optional<Qualifiers> PteTyQualifiers = std::nullopt;
@@ -2052,8 +2335,8 @@ static std::optional<std::string> createSpanTypeForVarDecl(const VarDecl *VD,
// the non-empty fix-it list, if fix-its are successfuly generated; empty
// list otherwise.
static FixItList fixLocalVarDeclWithSpan(const VarDecl *D, ASTContext &Ctx,
- const StringRef UserFillPlaceHolder,
- UnsafeBufferUsageHandler &Handler) {
+ const StringRef UserFillPlaceHolder,
+ UnsafeBufferUsageHandler &Handler) {
if (hasUnsupportedSpecifiers(D, Ctx.getSourceManager()))
return {};
@@ -2069,37 +2352,30 @@ static FixItList fixLocalVarDeclWithSpan(const VarDecl *D, ASTContext &Ctx,
std::stringstream SS;
SS << *SpanTyText;
- // Append qualifiers to the type of `D`, if any:
- if (D->getType().hasQualifiers())
- SS << " " << D->getType().getQualifiers().getAsString();
-
- // The end of the range of the original source that will be replaced
- // by `std::span<T> ident`:
- SourceLocation EndLocForReplacement = D->getEndLoc();
- std::optional<StringRef> IdentText =
- getVarDeclIdentifierText(D, Ctx.getSourceManager(), Ctx.getLangOpts());
-
- if (!IdentText) {
- DEBUG_NOTE_DECL_FAIL(D, " : failed to locate the identifier");
- return {};
- }
// Fix the initializer if it exists:
if (const Expr *Init = D->getInit()) {
- FixItList InitFixIts =
+ std::optional<FixItList> InitFixIts =
FixVarInitializerWithSpan(Init, Ctx, UserFillPlaceHolder);
- if (InitFixIts.empty())
+ if (!InitFixIts)
return {};
- FixIts.insert(FixIts.end(), std::make_move_iterator(InitFixIts.begin()),
- std::make_move_iterator(InitFixIts.end()));
- // If the declaration has the form `T *ident = init`, we want to replace
- // `T *ident = ` with `std::span<T> ident`:
- EndLocForReplacement = Init->getBeginLoc().getLocWithOffset(-1);
- }
- SS << " " << IdentText->str();
+ FixIts.insert(FixIts.end(), std::make_move_iterator(InitFixIts->begin()),
+ std::make_move_iterator(InitFixIts->end()));
+ }
+ // For declaration of the form `T * ident = init;`, we want to replace
+ // `T * ` with `std::span<T>`.
+ // We ignore CV-qualifiers so for `T * const ident;` we also want to replace
+ // just `T *` with `std::span<T>`.
+ const SourceLocation EndLocForReplacement = D->getTypeSpecEndLoc();
if (!EndLocForReplacement.isValid()) {
DEBUG_NOTE_DECL_FAIL(D, " : failed to locate the end of the declaration");
return {};
}
+ // The only exception is that for `T *ident` we'll add a single space between
+ // "std::span<T>" and "ident".
+ // FIXME: The condition is false for identifiers expended from macros.
+ if (EndLocForReplacement.getLocWithOffset(1) == getVarDeclIdentifierLoc(D))
+ SS << " ";
+
FixIts.push_back(FixItHint::CreateReplacement(
SourceRange(D->getBeginLoc(), EndLocForReplacement), SS.str()));
return FixIts;
@@ -2141,7 +2417,7 @@ static bool hasConflictingOverload(const FunctionDecl *FD) {
// }
//
static std::optional<FixItList>
-createOverloadsForFixedParams(const Strategy &S, const FunctionDecl *FD,
+createOverloadsForFixedParams(const FixitStrategy &S, const FunctionDecl *FD,
const ASTContext &Ctx,
UnsafeBufferUsageHandler &Handler) {
// FIXME: need to make this conflict checking better:
@@ -2158,9 +2434,9 @@ createOverloadsForFixedParams(const Strategy &S, const FunctionDecl *FD,
for (unsigned i = 0; i < NumParms; i++) {
const ParmVarDecl *PVD = FD->getParamDecl(i);
- if (S.lookup(PVD) == Strategy::Kind::Wontfix)
+ if (S.lookup(PVD) == FixitStrategy::Kind::Wontfix)
continue;
- if (S.lookup(PVD) != Strategy::Kind::Span)
+ if (S.lookup(PVD) != FixitStrategy::Kind::Span)
// Not supported, not suppose to happen:
return std::nullopt;
@@ -2171,7 +2447,8 @@ createOverloadsForFixedParams(const Strategy &S, const FunctionDecl *FD,
if (!PteTyText)
// something wrong in obtaining the text of the pointee type, give up
return std::nullopt;
- // FIXME: whether we should create std::span type depends on the Strategy.
+ // FIXME: whether we should create std::span type depends on the
+ // FixitStrategy.
NewTysTexts[i] = getSpanTypeText(*PteTyText, PteTyQuals);
ParmsMask[i] = true;
AtLeastOneParmToFix = true;
@@ -2212,9 +2489,9 @@ createOverloadsForFixedParams(const Strategy &S, const FunctionDecl *FD,
// print parameter name if provided:
if (IdentifierInfo *II = Parm->getIdentifier())
SS << ' ' << II->getName().str();
- } else if (auto ParmTypeText = getRangeText(
- getSourceRangeToTokenEnd(Parm, SM, LangOpts),
- SM, LangOpts)) {
+ } else if (auto ParmTypeText =
+ getRangeText(getSourceRangeToTokenEnd(Parm, SM, LangOpts),
+ SM, LangOpts)) {
// print the whole `Parm` without modification:
SS << ParmTypeText->str();
} else
@@ -2358,7 +2635,8 @@ static FixItList fixVariableWithSpan(const VarDecl *VD,
UnsafeBufferUsageHandler &Handler) {
const DeclStmt *DS = Tracker.lookupDecl(VD);
if (!DS) {
- DEBUG_NOTE_DECL_FAIL(VD, " : variables declared this way not implemented yet");
+ DEBUG_NOTE_DECL_FAIL(VD,
+ " : variables declared this way not implemented yet");
return {};
}
if (!DS->isSingleDecl()) {
@@ -2375,10 +2653,103 @@ static FixItList fixVariableWithSpan(const VarDecl *VD,
return fixLocalVarDeclWithSpan(VD, Ctx, getUserFillPlaceHolder(), Handler);
}
+static FixItList fixVarDeclWithArray(const VarDecl *D, const ASTContext &Ctx,
+ UnsafeBufferUsageHandler &Handler) {
+ FixItList FixIts{};
+
+ // Note: the code below expects the declaration to not use any type sugar like
+ // typedef.
+ if (auto CAT = dyn_cast<clang::ConstantArrayType>(D->getType())) {
+ const QualType &ArrayEltT = CAT->getElementType();
+ assert(!ArrayEltT.isNull() && "Trying to fix a non-array type variable!");
+ // FIXME: support multi-dimensional arrays
+ if (isa<clang::ArrayType>(ArrayEltT.getCanonicalType()))
+ return {};
+
+ const SourceLocation IdentifierLoc = getVarDeclIdentifierLoc(D);
+
+ // Get the spelling of the element type as written in the source file
+ // (including macros, etc.).
+ auto MaybeElemTypeTxt =
+ getRangeText({D->getBeginLoc(), IdentifierLoc}, Ctx.getSourceManager(),
+ Ctx.getLangOpts());
+ if (!MaybeElemTypeTxt)
+ return {};
+ const llvm::StringRef ElemTypeTxt = MaybeElemTypeTxt->trim();
+
+ // Find the '[' token.
+ std::optional<Token> NextTok = Lexer::findNextToken(
+ IdentifierLoc, Ctx.getSourceManager(), Ctx.getLangOpts());
+ while (NextTok && !NextTok->is(tok::l_square) &&
+ NextTok->getLocation() <= D->getSourceRange().getEnd())
+ NextTok = Lexer::findNextToken(NextTok->getLocation(),
+ Ctx.getSourceManager(), Ctx.getLangOpts());
+ if (!NextTok)
+ return {};
+ const SourceLocation LSqBracketLoc = NextTok->getLocation();
+
+ // Get the spelling of the array size as written in the source file
+ // (including macros, etc.).
+ auto MaybeArraySizeTxt = getRangeText(
+ {LSqBracketLoc.getLocWithOffset(1), D->getTypeSpecEndLoc()},
+ Ctx.getSourceManager(), Ctx.getLangOpts());
+ if (!MaybeArraySizeTxt)
+ return {};
+ const llvm::StringRef ArraySizeTxt = MaybeArraySizeTxt->trim();
+ if (ArraySizeTxt.empty()) {
+ // FIXME: Support array size getting determined from the initializer.
+ // Examples:
+ // int arr1[] = {0, 1, 2};
+ // int arr2{3, 4, 5};
+ // We might be able to preserve the non-specified size with `auto` and
+ // `std::to_array`:
+ // auto arr1 = std::to_array<int>({0, 1, 2});
+ return {};
+ }
+
+ std::optional<StringRef> IdentText =
+ getVarDeclIdentifierText(D, Ctx.getSourceManager(), Ctx.getLangOpts());
+
+ if (!IdentText) {
+ DEBUG_NOTE_DECL_FAIL(D, " : failed to locate the identifier");
+ return {};
+ }
+
+ SmallString<32> Replacement;
+ raw_svector_ostream OS(Replacement);
+ OS << "std::array<" << ElemTypeTxt << ", " << ArraySizeTxt << "> "
+ << IdentText->str();
+
+ FixIts.push_back(FixItHint::CreateReplacement(
+ SourceRange{D->getBeginLoc(), D->getTypeSpecEndLoc()}, OS.str()));
+ }
+
+ return FixIts;
+}
+
+static FixItList fixVariableWithArray(const VarDecl *VD,
+ const DeclUseTracker &Tracker,
+ const ASTContext &Ctx,
+ UnsafeBufferUsageHandler &Handler) {
+ const DeclStmt *DS = Tracker.lookupDecl(VD);
+ assert(DS && "Fixing non-local variables not implemented yet!");
+ if (!DS->isSingleDecl()) {
+ // FIXME: to support handling multiple `VarDecl`s in a single `DeclStmt`
+ return {};
+ }
+ // Currently DS is an unused variable but we'll need it when
+ // non-single decls are implemented, where the pointee type name
+ // and the '*' are spread around the place.
+ (void)DS;
+
+ // FIXME: handle cases where DS has multiple declarations
+ return fixVarDeclWithArray(VD, Ctx, Handler);
+}
+
// TODO: we should be consistent to use `std::nullopt` to represent no-fix due
// to any unexpected problem.
static FixItList
-fixVariable(const VarDecl *VD, Strategy::Kind K,
+fixVariable(const VarDecl *VD, FixitStrategy::Kind K,
/* The function decl under analysis */ const Decl *D,
const DeclUseTracker &Tracker, ASTContext &Ctx,
UnsafeBufferUsageHandler &Handler) {
@@ -2409,7 +2780,7 @@ fixVariable(const VarDecl *VD, Strategy::Kind K,
}
switch (K) {
- case Strategy::Kind::Span: {
+ case FixitStrategy::Kind::Span: {
if (VD->getType()->isPointerType()) {
if (const auto *PVD = dyn_cast<ParmVarDecl>(VD))
return fixParamWithSpan(PVD, Ctx, Handler);
@@ -2420,11 +2791,18 @@ fixVariable(const VarDecl *VD, Strategy::Kind K,
DEBUG_NOTE_DECL_FAIL(VD, " : not a pointer");
return {};
}
- case Strategy::Kind::Iterator:
- case Strategy::Kind::Array:
- case Strategy::Kind::Vector:
- llvm_unreachable("Strategy not implemented yet!");
- case Strategy::Kind::Wontfix:
+ case FixitStrategy::Kind::Array: {
+ if (VD->isLocalVarDecl() &&
+ isa<clang::ConstantArrayType>(VD->getType().getCanonicalType()))
+ return fixVariableWithArray(VD, Tracker, Ctx, Handler);
+
+ DEBUG_NOTE_DECL_FAIL(VD, " : not a local const-size array");
+ return {};
+ }
+ case FixitStrategy::Kind::Iterator:
+ case FixitStrategy::Kind::Vector:
+ llvm_unreachable("FixitStrategy not implemented yet!");
+ case FixitStrategy::Kind::Wontfix:
llvm_unreachable("Invalid strategy!");
}
llvm_unreachable("Unknown strategy!");
@@ -2485,7 +2863,8 @@ static void eraseVarsForUnfixableGroupMates(
static FixItList createFunctionOverloadsForParms(
std::map<const VarDecl *, FixItList> &FixItsForVariable /* mutable */,
const VariableGroupsManager &VarGrpMgr, const FunctionDecl *FD,
- const Strategy &S, ASTContext &Ctx, UnsafeBufferUsageHandler &Handler) {
+ const FixitStrategy &S, ASTContext &Ctx,
+ UnsafeBufferUsageHandler &Handler) {
FixItList FixItsSharedByParms{};
std::optional<FixItList> OverloadFixes =
@@ -2505,8 +2884,8 @@ static FixItList createFunctionOverloadsForParms(
// Constructs self-contained fix-its for each variable in `FixablesForAllVars`.
static std::map<const VarDecl *, FixItList>
-getFixIts(FixableGadgetSets &FixablesForAllVars, const Strategy &S,
- ASTContext &Ctx,
+getFixIts(FixableGadgetSets &FixablesForAllVars, const FixitStrategy &S,
+ ASTContext &Ctx,
/* The function decl under analysis */ const Decl *D,
const DeclUseTracker &Tracker, UnsafeBufferUsageHandler &Handler,
const VariableGroupsManager &VarGrpMgr) {
@@ -2537,7 +2916,7 @@ getFixIts(FixableGadgetSets &FixablesForAllVars, const Strategy &S,
}
#ifndef NDEBUG
Handler.addDebugNoteForVar(
- VD, F->getBaseStmt()->getBeginLoc(),
+ VD, F->getSourceLoc(),
("gadget '" + F->getDebugName() + "' refused to produce a fix")
.str());
#endif
@@ -2604,11 +2983,14 @@ getFixIts(FixableGadgetSets &FixablesForAllVars, const Strategy &S,
}
template <typename VarDeclIterTy>
-static Strategy
+static FixitStrategy
getNaiveStrategy(llvm::iterator_range<VarDeclIterTy> UnsafeVars) {
- Strategy S;
+ FixitStrategy S;
for (const VarDecl *VD : UnsafeVars) {
- S.set(VD, Strategy::Kind::Span);
+ if (isa<ConstantArrayType>(VD->getType().getCanonicalType()))
+ S.set(VD, FixitStrategy::Kind::Array);
+ else
+ S.set(VD, FixitStrategy::Kind::Span);
}
return S;
}
@@ -2656,8 +3038,8 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
#endif
assert(D && D->getBody());
- // We do not want to visit a Lambda expression defined inside a method independently.
- // Instead, it should be visited along with the outer method.
+ // We do not want to visit a Lambda expression defined inside a method
+ // independently. Instead, it should be visited along with the outer method.
// FIXME: do we want to do the same thing for `BlockDecl`s?
if (const auto *fd = dyn_cast<CXXMethodDecl>(D)) {
if (fd->getParent()->isLambda() && fd->getParent()->isLocalClass())
@@ -2667,7 +3049,7 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
// Do not emit fixit suggestions for functions declared in an
// extern "C" block.
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
- for (FunctionDecl *FReDecl : FD->redecls()) {
+ for (FunctionDecl *FReDecl : FD->redecls()) {
if (FReDecl->isExternC()) {
EmitSuggestions = false;
break;
@@ -2679,15 +3061,15 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
FixableGadgetSets FixablesForAllVars;
auto [FixableGadgets, WarningGadgets, Tracker] =
- findGadgets(D, Handler, EmitSuggestions);
+ findGadgets(D, Handler, EmitSuggestions);
if (!EmitSuggestions) {
// Our job is very easy without suggestions. Just warn about
// every problematic operation and consider it done. No need to deal
// with fixable gadgets, no need to group operations by variable.
for (const auto &G : WarningGadgets) {
- Handler.handleUnsafeOperation(G->getBaseStmt(), /*IsRelatedToDecl=*/false,
- D->getASTContext());
+ G->handleUnsafeOperation(Handler, /*IsRelatedToDecl=*/false,
+ D->getASTContext());
}
// This return guarantees that most of the machine doesn't run when
@@ -2732,51 +3114,58 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
// Filter out non-local vars and vars with unclaimed DeclRefExpr-s.
for (auto it = FixablesForAllVars.byVar.cbegin();
it != FixablesForAllVars.byVar.cend();) {
- // FIXME: need to deal with global variables later
- if ((!it->first->isLocalVarDecl() && !isa<ParmVarDecl>(it->first))) {
+ // FIXME: need to deal with global variables later
+ if ((!it->first->isLocalVarDecl() && !isa<ParmVarDecl>(it->first))) {
#ifndef NDEBUG
- Handler.addDebugNoteForVar(
- it->first, it->first->getBeginLoc(),
- ("failed to produce fixit for '" + it->first->getNameAsString() +
- "' : neither local nor a parameter"));
+ Handler.addDebugNoteForVar(it->first, it->first->getBeginLoc(),
+ ("failed to produce fixit for '" +
+ it->first->getNameAsString() +
+ "' : neither local nor a parameter"));
#endif
- it = FixablesForAllVars.byVar.erase(it);
- } else if (it->first->getType().getCanonicalType()->isReferenceType()) {
+ it = FixablesForAllVars.byVar.erase(it);
+ } else if (it->first->getType().getCanonicalType()->isReferenceType()) {
#ifndef NDEBUG
- Handler.addDebugNoteForVar(it->first, it->first->getBeginLoc(),
- ("failed to produce fixit for '" +
- it->first->getNameAsString() +
- "' : has a reference type"));
+ Handler.addDebugNoteForVar(it->first, it->first->getBeginLoc(),
+ ("failed to produce fixit for '" +
+ it->first->getNameAsString() +
+ "' : has a reference type"));
#endif
- it = FixablesForAllVars.byVar.erase(it);
- } else if (Tracker.hasUnclaimedUses(it->first)) {
+ it = FixablesForAllVars.byVar.erase(it);
+ } else if (Tracker.hasUnclaimedUses(it->first)) {
+ it = FixablesForAllVars.byVar.erase(it);
+ } else if (it->first->isInitCapture()) {
#ifndef NDEBUG
- auto AllUnclaimed = Tracker.getUnclaimedUses(it->first);
- for (auto UnclaimedDRE : AllUnclaimed) {
- std::string UnclaimedUseTrace =
- getDREAncestorString(UnclaimedDRE, D->getASTContext());
-
- Handler.addDebugNoteForVar(
- it->first, UnclaimedDRE->getBeginLoc(),
- ("failed to produce fixit for '" + it->first->getNameAsString() +
- "' : has an unclaimed use\nThe unclaimed DRE trace: " +
- UnclaimedUseTrace));
- }
-#endif
- it = FixablesForAllVars.byVar.erase(it);
- } else if (it->first->isInitCapture()) {
-#ifndef NDEBUG
- Handler.addDebugNoteForVar(
- it->first, it->first->getBeginLoc(),
- ("failed to produce fixit for '" + it->first->getNameAsString() +
- "' : init capture"));
+ Handler.addDebugNoteForVar(it->first, it->first->getBeginLoc(),
+ ("failed to produce fixit for '" +
+ it->first->getNameAsString() +
+ "' : init capture"));
#endif
- it = FixablesForAllVars.byVar.erase(it);
- }else {
+ it = FixablesForAllVars.byVar.erase(it);
+ } else {
++it;
}
}
+#ifndef NDEBUG
+ for (const auto &it : UnsafeOps.byVar) {
+ const VarDecl *const UnsafeVD = it.first;
+ auto UnclaimedDREs = Tracker.getUnclaimedUses(UnsafeVD);
+ if (UnclaimedDREs.empty())
+ continue;
+ const auto UnfixedVDName = UnsafeVD->getNameAsString();
+ for (const clang::DeclRefExpr *UnclaimedDRE : UnclaimedDREs) {
+ std::string UnclaimedUseTrace =
+ getDREAncestorString(UnclaimedDRE, D->getASTContext());
+
+ Handler.addDebugNoteForVar(
+ UnsafeVD, UnclaimedDRE->getBeginLoc(),
+ ("failed to produce fixit for '" + UnfixedVDName +
+ "' : has an unclaimed use\nThe unclaimed DRE trace: " +
+ UnclaimedUseTrace));
+ }
+ }
+#endif
+
// Fixpoint iteration for pointer assignments
using DepMapTy = DenseMap<const VarDecl *, llvm::SetVector<const VarDecl *>>;
DepMapTy DependenciesMap{};
@@ -2785,7 +3174,7 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
for (auto it : FixablesForAllVars.byVar) {
for (const FixableGadget *fixable : it.second) {
std::optional<std::pair<const VarDecl *, const VarDecl *>> ImplPair =
- fixable->getStrategyImplications();
+ fixable->getStrategyImplications();
if (ImplPair) {
std::pair<const VarDecl *, const VarDecl *> Impl = std::move(*ImplPair);
PtrAssignmentGraph[Impl.first].insert(Impl.second);
@@ -2814,10 +3203,10 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
for (const auto &[Var, ignore] : UnsafeOps.byVar) {
if (VisitedVarsDirected.find(Var) == VisitedVarsDirected.end()) {
- std::queue<const VarDecl*> QueueDirected{};
+ std::queue<const VarDecl *> QueueDirected{};
QueueDirected.push(Var);
- while(!QueueDirected.empty()) {
- const VarDecl* CurrentVar = QueueDirected.front();
+ while (!QueueDirected.empty()) {
+ const VarDecl *CurrentVar = QueueDirected.front();
QueueDirected.pop();
VisitedVarsDirected.insert(CurrentVar);
auto AdjacentNodes = PtrAssignmentGraph[CurrentVar];
@@ -2848,11 +3237,11 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
for (const auto &[Var, ignore] : UnsafeOps.byVar) {
if (VisitedVars.find(Var) == VisitedVars.end()) {
VarGrpTy &VarGroup = Groups.emplace_back();
- std::queue<const VarDecl*> Queue{};
+ std::queue<const VarDecl *> Queue{};
Queue.push(Var);
- while(!Queue.empty()) {
- const VarDecl* CurrentVar = Queue.front();
+ while (!Queue.empty()) {
+ const VarDecl *CurrentVar = Queue.front();
Queue.pop();
VisitedVars.insert(CurrentVar);
VarGroup.push_back(CurrentVar);
@@ -2907,7 +3296,7 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
// We assign strategies to variables that are 1) in the graph and 2) can be
// fixed. Other variables have the default "Won't fix" strategy.
- Strategy NaiveStrategy = getNaiveStrategy(llvm::make_filter_range(
+ FixitStrategy NaiveStrategy = getNaiveStrategy(llvm::make_filter_range(
VisitedVars, [&FixablesForAllVars](const VarDecl *V) {
// If a warned variable has no "Fixable", it is considered unfixable:
return FixablesForAllVars.byVar.count(V);
@@ -2922,20 +3311,20 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
Tracker, Handler, VarGrpMgr);
for (const auto &G : UnsafeOps.noVar) {
- Handler.handleUnsafeOperation(G->getBaseStmt(), /*IsRelatedToDecl=*/false,
- D->getASTContext());
+ G->handleUnsafeOperation(Handler, /*IsRelatedToDecl=*/false,
+ D->getASTContext());
}
for (const auto &[VD, WarningGadgets] : UnsafeOps.byVar) {
auto FixItsIt = FixItsForVariableGroup.find(VD);
Handler.handleUnsafeVariableGroup(VD, VarGrpMgr,
FixItsIt != FixItsForVariableGroup.end()
- ? std::move(FixItsIt->second)
- : FixItList{},
- D);
+ ? std::move(FixItsIt->second)
+ : FixItList{},
+ D, NaiveStrategy);
for (const auto &G : WarningGadgets) {
- Handler.handleUnsafeOperation(G->getBaseStmt(), /*IsRelatedToDecl=*/true,
- D->getASTContext());
+ G->handleUnsafeOperation(Handler, /*IsRelatedToDecl=*/true,
+ D->getASTContext());
}
}
}
diff --git a/contrib/llvm-project/clang/lib/Basic/ASTSourceDescriptor.cpp b/contrib/llvm-project/clang/lib/Basic/ASTSourceDescriptor.cpp
new file mode 100644
index 000000000000..8072c08a51d3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/ASTSourceDescriptor.cpp
@@ -0,0 +1,33 @@
+//===- ASTSourceDescriptor.cpp -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Defines the clang::ASTSourceDescriptor class, which abstracts clang modules
+/// and precompiled header files
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/ASTSourceDescriptor.h"
+
+namespace clang {
+
+ASTSourceDescriptor::ASTSourceDescriptor(Module &M)
+ : Signature(M.Signature), ClangModule(&M) {
+ if (M.Directory)
+ Path = M.Directory->getName();
+ if (auto File = M.getASTFile())
+ ASTFile = File->getName();
+}
+
+std::string ASTSourceDescriptor::getModuleName() const {
+ if (ClangModule)
+ return ClangModule->Name;
+ else
+ return std::string(PCHModuleName);
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Attributes.cpp b/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
index 44a4f1890d39..867d241a2cf8 100644
--- a/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
@@ -47,8 +47,12 @@ int clang::hasAttribute(AttributeCommonInfo::Syntax Syntax,
// attributes. We support those, but not through the typical attribute
// machinery that goes through TableGen. We support this in all OpenMP modes
// so long as double square brackets are enabled.
- if (LangOpts.OpenMP && ScopeName == "omp")
- return (Name == "directive" || Name == "sequence") ? 1 : 0;
+ //
+ // Other OpenMP attributes (e.g. [[omp::assume]]) are handled via the
+ // regular attribute parsing machinery.
+ if (LangOpts.OpenMP && ScopeName == "omp" &&
+ (Name == "directive" || Name == "sequence"))
+ return 1;
int res = hasAttributeImpl(Syntax, Name, ScopeName, Target, LangOpts);
if (res)
diff --git a/contrib/llvm-project/clang/lib/Basic/Builtins.cpp b/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
index d366989bafc5..7116e27cd954 100644
--- a/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
@@ -38,7 +38,7 @@ static constexpr Builtin::Info BuiltinInfo[] = {
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANGS},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER, LANGS) \
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::HEADER, LANGS},
-#include "clang/Basic/Builtins.def"
+#include "clang/Basic/Builtins.inc"
};
const Builtin::Info &Builtin::Context::getRecord(unsigned ID) const {
@@ -64,7 +64,7 @@ bool Builtin::Context::isBuiltinFunc(llvm::StringRef FuncName) {
bool InStdNamespace = FuncName.consume_front("std-");
for (unsigned i = Builtin::NotBuiltin + 1; i != Builtin::FirstTSBuiltin;
++i) {
- if (FuncName.equals(BuiltinInfo[i].Name) &&
+ if (FuncName == BuiltinInfo[i].Name &&
(bool)strchr(BuiltinInfo[i].Attributes, 'z') == InStdNamespace)
return strchr(BuiltinInfo[i].Attributes, 'f') != nullptr;
}
@@ -119,6 +119,9 @@ static bool builtinIsSupported(const Builtin::Info &BuiltinInfo,
/* CPlusPlus Unsupported */
if (!LangOpts.CPlusPlus && BuiltinInfo.Langs == CXX_LANG)
return false;
+ /* consteval Unsupported */
+ if (!LangOpts.CPlusPlus20 && strchr(BuiltinInfo.Attributes, 'G') != nullptr)
+ return false;
return true;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/CharInfo.cpp b/contrib/llvm-project/clang/lib/Basic/CharInfo.cpp
index d02054c9718f..26d693b8e9b9 100644
--- a/contrib/llvm-project/clang/lib/Basic/CharInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/CharInfo.cpp
@@ -31,20 +31,20 @@ const uint16_t clang::charinfo::InfoTable[256] = {
0 , 0 , 0 , 0 ,
//32 SP 33 ! 34 " 35 #
//36 $ 37 % 38 & 39 '
- CHAR_SPACE , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL ,
- CHAR_PUNCT , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL ,
+ CHAR_SPACE , CHAR_PUNCT , CHAR_PUNCT , CHAR_PUNCT ,
+ CHAR_PUNCT , CHAR_PUNCT , CHAR_PUNCT , CHAR_PUNCT ,
//40 ( 41 ) 42 * 43 +
//44 , 45 - 46 . 47 /
- CHAR_PUNCT , CHAR_PUNCT , CHAR_RAWDEL , CHAR_RAWDEL ,
- CHAR_RAWDEL , CHAR_RAWDEL , CHAR_PERIOD , CHAR_RAWDEL ,
+ CHAR_PUNCT , CHAR_PUNCT , CHAR_PUNCT , CHAR_PUNCT ,
+ CHAR_PUNCT , CHAR_PUNCT , CHAR_PERIOD , CHAR_PUNCT ,
//48 0 49 1 50 2 51 3
//52 4 53 5 54 6 55 7
CHAR_DIGIT , CHAR_DIGIT , CHAR_DIGIT , CHAR_DIGIT ,
CHAR_DIGIT , CHAR_DIGIT , CHAR_DIGIT , CHAR_DIGIT ,
//56 8 57 9 58 : 59 ;
//60 < 61 = 62 > 63 ?
- CHAR_DIGIT , CHAR_DIGIT , CHAR_RAWDEL , CHAR_RAWDEL ,
- CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL ,
+ CHAR_DIGIT , CHAR_DIGIT , CHAR_PUNCT , CHAR_PUNCT ,
+ CHAR_PUNCT , CHAR_PUNCT , CHAR_PUNCT , CHAR_PUNCT ,
//64 @ 65 A 66 B 67 C
//68 D 69 E 70 F 71 G
CHAR_PUNCT , CHAR_XUPPER , CHAR_XUPPER , CHAR_XUPPER ,
@@ -59,8 +59,8 @@ const uint16_t clang::charinfo::InfoTable[256] = {
CHAR_UPPER , CHAR_UPPER , CHAR_UPPER , CHAR_UPPER ,
//88 X 89 Y 90 Z 91 [
//92 \ 93 ] 94 ^ 95 _
- CHAR_UPPER , CHAR_UPPER , CHAR_UPPER , CHAR_RAWDEL ,
- CHAR_PUNCT , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_UNDER ,
+ CHAR_UPPER , CHAR_UPPER , CHAR_UPPER , CHAR_PUNCT ,
+ CHAR_PUNCT , CHAR_PUNCT , CHAR_PUNCT , CHAR_UNDER ,
//96 ` 97 a 98 b 99 c
//100 d 101 e 102 f 103 g
CHAR_PUNCT , CHAR_XLOWER , CHAR_XLOWER , CHAR_XLOWER ,
@@ -75,6 +75,6 @@ const uint16_t clang::charinfo::InfoTable[256] = {
CHAR_LOWER , CHAR_LOWER , CHAR_LOWER , CHAR_LOWER ,
//120 x 121 y 122 z 123 {
//124 | 125 } 126 ~ 127 DEL
- CHAR_LOWER , CHAR_LOWER , CHAR_LOWER , CHAR_RAWDEL ,
- CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL , 0
+ CHAR_LOWER , CHAR_LOWER , CHAR_LOWER , CHAR_PUNCT ,
+ CHAR_PUNCT , CHAR_PUNCT , CHAR_PUNCT , 0
};
diff --git a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
index 1b1da6a1356f..faf3878f064d 100644
--- a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
@@ -14,7 +14,7 @@ struct CudaVersionMapEntry {
};
#define CUDA_ENTRY(major, minor) \
{ \
-#major "." #minor, CudaVersion::CUDA_##major##minor, \
+ #major "." #minor, CudaVersion::CUDA_##major##minor, \
llvm::VersionTuple(major, minor) \
}
@@ -41,6 +41,8 @@ static const CudaVersionMapEntry CudaNameVersionMap[] = {
CUDA_ENTRY(12, 1),
CUDA_ENTRY(12, 2),
CUDA_ENTRY(12, 3),
+ CUDA_ENTRY(12, 4),
+ CUDA_ENTRY(12, 5),
{"", CudaVersion::NEW, llvm::VersionTuple(std::numeric_limits<int>::max())},
{"unknown", CudaVersion::UNKNOWN, {}} // End of list tombstone.
};
@@ -70,23 +72,21 @@ CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
}
namespace {
-struct CudaArchToStringMap {
- CudaArch arch;
+struct OffloadArchToStringMap {
+ OffloadArch arch;
const char *arch_name;
const char *virtual_arch_name;
};
} // namespace
-#define SM2(sm, ca) \
- { CudaArch::SM_##sm, "sm_" #sm, ca }
+#define SM2(sm, ca) {OffloadArch::SM_##sm, "sm_" #sm, ca}
#define SM(sm) SM2(sm, "compute_" #sm)
-#define GFX(gpu) \
- { CudaArch::GFX##gpu, "gfx" #gpu, "compute_amdgcn" }
-static const CudaArchToStringMap arch_names[] = {
+#define GFX(gpu) {OffloadArch::GFX##gpu, "gfx" #gpu, "compute_amdgcn"}
+static const OffloadArchToStringMap arch_names[] = {
// clang-format off
- {CudaArch::UNUSED, "", ""},
+ {OffloadArch::UNUSED, "", ""},
SM2(20, "compute_20"), SM2(21, "compute_20"), // Fermi
- SM(30), SM(32), SM(35), SM(37), // Kepler
+ SM(30), {OffloadArch::SM_32_, "sm_32", "compute_32"}, SM(35), SM(37), // Kepler
SM(50), SM(52), SM(53), // Maxwell
SM(60), SM(61), SM(62), // Pascal
SM(70), SM(72), // Volta
@@ -110,6 +110,7 @@ static const CudaArchToStringMap arch_names[] = {
GFX(803), // gfx803
GFX(805), // gfx805
GFX(810), // gfx810
+ {OffloadArch::GFX9_GENERIC, "gfx9-generic", "compute_amdgcn"},
GFX(900), // gfx900
GFX(902), // gfx902
GFX(904), // gfx903
@@ -121,10 +122,12 @@ static const CudaArchToStringMap arch_names[] = {
GFX(940), // gfx940
GFX(941), // gfx941
GFX(942), // gfx942
+ {OffloadArch::GFX10_1_GENERIC, "gfx10-1-generic", "compute_amdgcn"},
GFX(1010), // gfx1010
GFX(1011), // gfx1011
GFX(1012), // gfx1012
GFX(1013), // gfx1013
+ {OffloadArch::GFX10_3_GENERIC, "gfx10-3-generic", "compute_amdgcn"},
GFX(1030), // gfx1030
GFX(1031), // gfx1031
GFX(1032), // gfx1032
@@ -132,116 +135,120 @@ static const CudaArchToStringMap arch_names[] = {
GFX(1034), // gfx1034
GFX(1035), // gfx1035
GFX(1036), // gfx1036
+ {OffloadArch::GFX11_GENERIC, "gfx11-generic", "compute_amdgcn"},
GFX(1100), // gfx1100
GFX(1101), // gfx1101
GFX(1102), // gfx1102
GFX(1103), // gfx1103
GFX(1150), // gfx1150
GFX(1151), // gfx1151
+ GFX(1152), // gfx1152
+ {OffloadArch::GFX12_GENERIC, "gfx12-generic", "compute_amdgcn"},
GFX(1200), // gfx1200
GFX(1201), // gfx1201
- {CudaArch::Generic, "generic", ""},
+ {OffloadArch::AMDGCNSPIRV, "amdgcnspirv", "compute_amdgcn"},
+ {OffloadArch::Generic, "generic", ""},
// clang-format on
};
#undef SM
#undef SM2
#undef GFX
-const char *CudaArchToString(CudaArch A) {
+const char *OffloadArchToString(OffloadArch A) {
auto result = std::find_if(
std::begin(arch_names), std::end(arch_names),
- [A](const CudaArchToStringMap &map) { return A == map.arch; });
+ [A](const OffloadArchToStringMap &map) { return A == map.arch; });
if (result == std::end(arch_names))
return "unknown";
return result->arch_name;
}
-const char *CudaArchToVirtualArchString(CudaArch A) {
+const char *OffloadArchToVirtualArchString(OffloadArch A) {
auto result = std::find_if(
std::begin(arch_names), std::end(arch_names),
- [A](const CudaArchToStringMap &map) { return A == map.arch; });
+ [A](const OffloadArchToStringMap &map) { return A == map.arch; });
if (result == std::end(arch_names))
return "unknown";
return result->virtual_arch_name;
}
-CudaArch StringToCudaArch(llvm::StringRef S) {
+OffloadArch StringToOffloadArch(llvm::StringRef S) {
auto result = std::find_if(
std::begin(arch_names), std::end(arch_names),
- [S](const CudaArchToStringMap &map) { return S == map.arch_name; });
+ [S](const OffloadArchToStringMap &map) { return S == map.arch_name; });
if (result == std::end(arch_names))
- return CudaArch::UNKNOWN;
+ return OffloadArch::UNKNOWN;
return result->arch;
}
-CudaVersion MinVersionForCudaArch(CudaArch A) {
- if (A == CudaArch::UNKNOWN)
+CudaVersion MinVersionForOffloadArch(OffloadArch A) {
+ if (A == OffloadArch::UNKNOWN)
return CudaVersion::UNKNOWN;
// AMD GPUs do not depend on CUDA versions.
- if (IsAMDGpuArch(A))
+ if (IsAMDOffloadArch(A))
return CudaVersion::CUDA_70;
switch (A) {
- case CudaArch::SM_20:
- case CudaArch::SM_21:
- case CudaArch::SM_30:
- case CudaArch::SM_32:
- case CudaArch::SM_35:
- case CudaArch::SM_37:
- case CudaArch::SM_50:
- case CudaArch::SM_52:
- case CudaArch::SM_53:
+ case OffloadArch::SM_20:
+ case OffloadArch::SM_21:
+ case OffloadArch::SM_30:
+ case OffloadArch::SM_32_:
+ case OffloadArch::SM_35:
+ case OffloadArch::SM_37:
+ case OffloadArch::SM_50:
+ case OffloadArch::SM_52:
+ case OffloadArch::SM_53:
return CudaVersion::CUDA_70;
- case CudaArch::SM_60:
- case CudaArch::SM_61:
- case CudaArch::SM_62:
+ case OffloadArch::SM_60:
+ case OffloadArch::SM_61:
+ case OffloadArch::SM_62:
return CudaVersion::CUDA_80;
- case CudaArch::SM_70:
+ case OffloadArch::SM_70:
return CudaVersion::CUDA_90;
- case CudaArch::SM_72:
+ case OffloadArch::SM_72:
return CudaVersion::CUDA_91;
- case CudaArch::SM_75:
+ case OffloadArch::SM_75:
return CudaVersion::CUDA_100;
- case CudaArch::SM_80:
+ case OffloadArch::SM_80:
return CudaVersion::CUDA_110;
- case CudaArch::SM_86:
+ case OffloadArch::SM_86:
return CudaVersion::CUDA_111;
- case CudaArch::SM_87:
+ case OffloadArch::SM_87:
return CudaVersion::CUDA_114;
- case CudaArch::SM_89:
- case CudaArch::SM_90:
+ case OffloadArch::SM_89:
+ case OffloadArch::SM_90:
return CudaVersion::CUDA_118;
- case CudaArch::SM_90a:
+ case OffloadArch::SM_90a:
return CudaVersion::CUDA_120;
default:
llvm_unreachable("invalid enum");
}
}
-CudaVersion MaxVersionForCudaArch(CudaArch A) {
+CudaVersion MaxVersionForOffloadArch(OffloadArch A) {
// AMD GPUs do not depend on CUDA versions.
- if (IsAMDGpuArch(A))
+ if (IsAMDOffloadArch(A))
return CudaVersion::NEW;
switch (A) {
- case CudaArch::UNKNOWN:
+ case OffloadArch::UNKNOWN:
return CudaVersion::UNKNOWN;
- case CudaArch::SM_20:
- case CudaArch::SM_21:
+ case OffloadArch::SM_20:
+ case OffloadArch::SM_21:
return CudaVersion::CUDA_80;
- case CudaArch::SM_30:
- case CudaArch::SM_32:
+ case OffloadArch::SM_30:
+ case OffloadArch::SM_32_:
return CudaVersion::CUDA_102;
- case CudaArch::SM_35:
- case CudaArch::SM_37:
+ case OffloadArch::SM_35:
+ case OffloadArch::SM_37:
return CudaVersion::CUDA_118;
default:
return CudaVersion::NEW;
}
}
-bool CudaFeatureEnabled(llvm::VersionTuple Version, CudaFeature Feature) {
+bool CudaFeatureEnabled(llvm::VersionTuple Version, CudaFeature Feature) {
return CudaFeatureEnabled(ToCudaVersion(Version), Feature);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
index 0208ccc31bd7..66776daa5e14 100644
--- a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
@@ -360,9 +360,10 @@ void DiagnosticsEngine::setSeverity(diag::kind Diag, diag::Severity Map,
"Cannot map errors into warnings!");
assert((L.isInvalid() || SourceMgr) && "No SourceMgr for valid location");
- // Don't allow a mapping to a warning override an error/fatal mapping.
+ // A command line -Wfoo has an invalid L and cannot override error/fatal
+ // mapping, while a warning pragma can.
bool WasUpgradedFromWarning = false;
- if (Map == diag::Severity::Warning) {
+ if (Map == diag::Severity::Warning && L.isInvalid()) {
DiagnosticMapping &Info = GetCurDiagState()->getOrAddMapping(Diag);
if (Info.getSeverity() == diag::Severity::Error ||
Info.getSeverity() == diag::Severity::Fatal) {
@@ -851,8 +852,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
// When the diagnostic string is only "%0", the entire string is being given
// by an outside source. Remove unprintable characters from this string
// and skip all the other string processing.
- if (DiagEnd - DiagStr == 2 &&
- StringRef(DiagStr, DiagEnd - DiagStr).equals("%0") &&
+ if (DiagEnd - DiagStr == 2 && StringRef(DiagStr, DiagEnd - DiagStr) == "%0" &&
getArgKind(0) == DiagnosticsEngine::ak_std_string) {
const std::string &S = getArgStdStr(0);
EscapeStringForDiagnostic(S, OutStr);
diff --git a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
index 6c7bd50eefb7..cd42573968b2 100644
--- a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
@@ -49,6 +49,7 @@ struct StaticDiagInfoDescriptionStringTable {
#include "clang/Basic/DiagnosticSemaKinds.inc"
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#include "clang/Basic/DiagnosticRefactoringKinds.inc"
+#include "clang/Basic/DiagnosticInstallAPIKinds.inc"
// clang-format on
#undef DIAG
};
@@ -70,7 +71,8 @@ const StaticDiagInfoDescriptionStringTable StaticDiagInfoDescriptions = {
#include "clang/Basic/DiagnosticSemaKinds.inc"
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#include "clang/Basic/DiagnosticRefactoringKinds.inc"
- // clang-format on
+#include "clang/Basic/DiagnosticInstallAPIKinds.inc"
+// clang-format on
#undef DIAG
};
@@ -95,12 +97,13 @@ const uint32_t StaticDiagInfoDescriptionOffsets[] = {
#include "clang/Basic/DiagnosticSemaKinds.inc"
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#include "clang/Basic/DiagnosticRefactoringKinds.inc"
- // clang-format on
+#include "clang/Basic/DiagnosticInstallAPIKinds.inc"
+// clang-format on
#undef DIAG
};
// Diagnostic classes.
-enum {
+enum DiagnosticClass {
CLASS_NOTE = 0x01,
CLASS_REMARK = 0x02,
CLASS_WARNING = 0x03,
@@ -110,15 +113,22 @@ enum {
struct StaticDiagInfoRec {
uint16_t DiagID;
+ LLVM_PREFERRED_TYPE(diag::Severity)
uint8_t DefaultSeverity : 3;
+ LLVM_PREFERRED_TYPE(DiagnosticClass)
uint8_t Class : 3;
+ LLVM_PREFERRED_TYPE(DiagnosticIDs::SFINAEResponse)
uint8_t SFINAE : 2;
uint8_t Category : 6;
+ LLVM_PREFERRED_TYPE(bool)
uint8_t WarnNoWerror : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint8_t WarnShowInSystemHeader : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint8_t WarnShowInSystemMacro : 1;
uint16_t OptionGroupIndex : 15;
+ LLVM_PREFERRED_TYPE(bool)
uint16_t Deferrable : 1;
uint16_t DescriptionLen;
@@ -166,6 +176,7 @@ VALIDATE_DIAG_SIZE(CROSSTU)
VALIDATE_DIAG_SIZE(SEMA)
VALIDATE_DIAG_SIZE(ANALYSIS)
VALIDATE_DIAG_SIZE(REFACTORING)
+VALIDATE_DIAG_SIZE(INSTALLAPI)
#undef VALIDATE_DIAG_SIZE
#undef STRINGIFY_NAME
@@ -197,6 +208,7 @@ const StaticDiagInfoRec StaticDiagInfo[] = {
#include "clang/Basic/DiagnosticSemaKinds.inc"
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#include "clang/Basic/DiagnosticRefactoringKinds.inc"
+#include "clang/Basic/DiagnosticInstallAPIKinds.inc"
// clang-format on
#undef DIAG
};
@@ -239,6 +251,7 @@ CATEGORY(CROSSTU, COMMENT)
CATEGORY(SEMA, CROSSTU)
CATEGORY(ANALYSIS, SEMA)
CATEGORY(REFACTORING, ANALYSIS)
+CATEGORY(INSTALLAPI, REFACTORING)
#undef CATEGORY
// Avoid out of bounds reads.
@@ -848,6 +861,9 @@ bool DiagnosticIDs::isUnrecoverable(unsigned DiagID) const {
if (isARCDiagnostic(DiagID))
return false;
+ if (isCodegenABICheckDiagnostic(DiagID))
+ return false;
+
return true;
}
@@ -855,3 +871,8 @@ bool DiagnosticIDs::isARCDiagnostic(unsigned DiagID) {
unsigned cat = getCategoryNumberForDiag(DiagID);
return DiagnosticIDs::getCategoryNameFromID(cat).starts_with("ARC ");
}
+
+bool DiagnosticIDs::isCodegenABICheckDiagnostic(unsigned DiagID) {
+ unsigned cat = getCategoryNumberForDiag(DiagID);
+ return DiagnosticIDs::getCategoryNameFromID(cat) == "Codegen ABI Check";
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/FileManager.cpp b/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
index 974c8c22598f..4509cee1ca0f 100644
--- a/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
@@ -39,12 +39,6 @@ using namespace clang;
#define DEBUG_TYPE "file-search"
-ALWAYS_ENABLED_STATISTIC(NumDirLookups, "Number of directory lookups.");
-ALWAYS_ENABLED_STATISTIC(NumFileLookups, "Number of file lookups.");
-ALWAYS_ENABLED_STATISTIC(NumDirCacheMisses,
- "Number of directory cache misses.");
-ALWAYS_ENABLED_STATISTIC(NumFileCacheMisses, "Number of file cache misses.");
-
//===----------------------------------------------------------------------===//
// Common logic.
//===----------------------------------------------------------------------===//
@@ -88,6 +82,22 @@ getDirectoryFromFile(FileManager &FileMgr, StringRef Filename,
return FileMgr.getDirectoryRef(DirName, CacheFailure);
}
+DirectoryEntry *&FileManager::getRealDirEntry(const llvm::vfs::Status &Status) {
+ assert(Status.isDirectory() && "The directory should exist!");
+ // See if we have already opened a directory with the
+ // same inode (this occurs on Unix-like systems when one dir is
+ // symlinked to another, for example) or the same path (on
+ // Windows).
+ DirectoryEntry *&UDE = UniqueRealDirs[Status.getUniqueID()];
+
+ if (!UDE) {
+ // We don't have this directory yet, add it. We use the string
+ // key from the SeenDirEntries map as the string.
+ UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
+ }
+ return UDE;
+}
+
/// Add all ancestors of the given path (pointing to either a file or
/// a directory) as virtual directories.
void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
@@ -105,11 +115,21 @@ void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
if (NamedDirEnt.second)
return;
- // Add the virtual directory to the cache.
- auto *UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
- UDE->Name = NamedDirEnt.first();
- NamedDirEnt.second = *UDE;
- VirtualDirectoryEntries.push_back(UDE);
+ // Check to see if the directory exists.
+ llvm::vfs::Status Status;
+ auto statError =
+ getStatValue(DirName, Status, false, nullptr /*directory lookup*/);
+ if (statError) {
+ // There's no real directory at the given path.
+ // Add the virtual directory to the cache.
+ auto *UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
+ NamedDirEnt.second = *UDE;
+ VirtualDirectoryEntries.push_back(UDE);
+ } else {
+ // There is the real directory
+ DirectoryEntry *&UDE = getRealDirEntry(Status);
+ NamedDirEnt.second = *UDE;
+ }
// Recursively add the other ancestors.
addAncestorsAsVirtualDirs(DirName);
@@ -169,18 +189,8 @@ FileManager::getDirectoryRef(StringRef DirName, bool CacheFailure) {
return llvm::errorCodeToError(statError);
}
- // It exists. See if we have already opened a directory with the
- // same inode (this occurs on Unix-like systems when one dir is
- // symlinked to another, for example) or the same path (on
- // Windows).
- DirectoryEntry *&UDE = UniqueRealDirs[Status.getUniqueID()];
-
- if (!UDE) {
- // We don't have this directory yet, add it. We use the string
- // key from the SeenDirEntries map as the string.
- UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
- UDE->Name = InterndDirName;
- }
+ // It exists.
+ DirectoryEntry *&UDE = getRealDirEntry(Status);
NamedDirEnt.second = *UDE;
return DirectoryEntryRef(NamedDirEnt);
@@ -324,32 +334,10 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
FileEntryRef ReturnedRef(*NamedFileEnt);
if (ReusingEntry) { // Already have an entry with this inode, return it.
-
- // FIXME: This hack ensures that `getDir()` will use the path that was
- // used to lookup this file, even if we found a file by different path
- // first. This is required in order to find a module's structure when its
- // headers/module map are mapped in the VFS.
- //
- // See above for how this will eventually be removed. `IsVFSMapped`
- // *cannot* be narrowed to `ExposesExternalVFSPath` as crash reproducers
- // also depend on this logic and they have `use-external-paths: false`.
- if (&DirInfo.getDirEntry() != UFE->Dir && Status.IsVFSMapped)
- UFE->Dir = &DirInfo.getDirEntry();
-
- // Always update LastRef to the last name by which a file was accessed.
- // FIXME: Neither this nor always using the first reference is correct; we
- // want to switch towards a design where we return a FileName object that
- // encapsulates both the name by which the file was accessed and the
- // corresponding FileEntry.
- // FIXME: LastRef should be removed from FileEntry once all clients adopt
- // FileEntryRef.
- UFE->LastRef = ReturnedRef;
-
return ReturnedRef;
}
// Otherwise, we don't have this file yet, add it.
- UFE->LastRef = ReturnedRef;
UFE->Size = Status.getSize();
UFE->ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
UFE->Dir = &DirInfo.getDirEntry();
@@ -387,6 +375,13 @@ llvm::Expected<FileEntryRef> FileManager::getSTDIN() {
return *STDIN;
}
+void FileManager::trackVFSUsage(bool Active) {
+ FS->visit([Active](llvm::vfs::FileSystem &FileSys) {
+ if (auto *RFS = dyn_cast<llvm::vfs::RedirectingFileSystem>(&FileSys))
+ RFS->setUsageTrackingActive(Active);
+ });
+}
+
const FileEntry *FileManager::getVirtualFile(StringRef Filename, off_t Size,
time_t ModificationTime) {
return &getVirtualFileRef(Filename, Size, ModificationTime).getFileEntry();
@@ -461,7 +456,6 @@ FileEntryRef FileManager::getVirtualFileRef(StringRef Filename, off_t Size,
}
NamedFileEnt.second = FileEntryRef::MapValue(*UFE, *DirInfo);
- UFE->LastRef = FileEntryRef(NamedFileEnt);
UFE->Size = Size;
UFE->ModTime = ModificationTime;
UFE->Dir = &DirInfo->getDirEntry();
@@ -490,7 +484,6 @@ OptionalFileEntryRef FileManager::getBypassFile(FileEntryRef VF) {
FileEntry *BFE = new (FilesAlloc.Allocate()) FileEntry();
BypassFileEntries.push_back(BFE);
Insertion.first->second = FileEntryRef::MapValue(*BFE, VF.getDir());
- BFE->LastRef = FileEntryRef(*Insertion.first);
BFE->Size = Status.getSize();
BFE->Dir = VF.getFileEntry().Dir;
BFE->ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
@@ -537,13 +530,18 @@ void FileManager::fillRealPathName(FileEntry *UFE, llvm::StringRef FileName) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
FileManager::getBufferForFile(FileEntryRef FE, bool isVolatile,
- bool RequiresNullTerminator) {
+ bool RequiresNullTerminator,
+ std::optional<int64_t> MaybeLimit) {
const FileEntry *Entry = &FE.getFileEntry();
// If the content is living on the file entry, return a reference to it.
if (Entry->Content)
return llvm::MemoryBuffer::getMemBuffer(Entry->Content->getMemBufferRef());
uint64_t FileSize = Entry->getSize();
+
+ if (MaybeLimit)
+ FileSize = *MaybeLimit;
+
// If there's a high enough chance that the file have changed since we
// got its size, force a stat before opening it.
if (isVolatile || Entry->isNamedPipe())
@@ -566,7 +564,7 @@ FileManager::getBufferForFile(FileEntryRef FE, bool isVolatile,
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
FileManager::getBufferForFileImpl(StringRef Filename, int64_t FileSize,
bool isVolatile,
- bool RequiresNullTerminator) {
+ bool RequiresNullTerminator) const {
if (FileSystemOpts.WorkingDir.empty())
return FS->getBufferForFile(Filename, FileSize, RequiresNullTerminator,
isVolatile);
@@ -675,6 +673,14 @@ StringRef FileManager::getCanonicalName(const void *Entry, StringRef Name) {
return CanonicalName;
}
+void FileManager::AddStats(const FileManager &Other) {
+ assert(&Other != this && "Collecting stats into the same FileManager");
+ NumDirLookups += Other.NumDirLookups;
+ NumFileLookups += Other.NumFileLookups;
+ NumDirCacheMisses += Other.NumDirCacheMisses;
+ NumFileCacheMisses += Other.NumFileCacheMisses;
+}
+
void FileManager::PrintStats() const {
llvm::errs() << "\n*** File Manager Stats:\n";
llvm::errs() << UniqueRealFiles.size() << " real files found, "
diff --git a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
index d0d8316385b4..4f7ccaf4021d 100644
--- a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
@@ -36,7 +36,7 @@ using namespace clang;
// A check to make sure the ObjCOrBuiltinID has sufficient room to store the
// largest possible target/aux-target combination. If we exceed this, we likely
// need to just change the ObjCOrBuiltinIDBits value in IdentifierTable.h.
-static_assert(2 * LargestBuiltinID < (2 << (ObjCOrBuiltinIDBits - 1)),
+static_assert(2 * LargestBuiltinID < (2 << (InterestingIdentifierBits - 1)),
"Insufficient ObjCOrBuiltinID Bits");
//===----------------------------------------------------------------------===//
@@ -280,13 +280,13 @@ static void AddObjCKeyword(StringRef Name,
Table.get(Name).setObjCKeywordID(ObjCID);
}
-static void AddInterestingIdentifier(StringRef Name,
- tok::InterestingIdentifierKind BTID,
- IdentifierTable &Table) {
- // Don't add 'not_interesting' identifier.
- if (BTID != tok::not_interesting) {
+static void AddNotableIdentifier(StringRef Name,
+ tok::NotableIdentifierKind BTID,
+ IdentifierTable &Table) {
+ // Don't add 'not_notable' identifier.
+ if (BTID != tok::not_notable) {
IdentifierInfo &Info = Table.get(Name, tok::identifier);
- Info.setInterestingIdentifierID(BTID);
+ Info.setNotableIdentifierID(BTID);
}
}
@@ -306,8 +306,8 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
#define OBJC_AT_KEYWORD(NAME) \
if (LangOpts.ObjC) \
AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
-#define INTERESTING_IDENTIFIER(NAME) \
- AddInterestingIdentifier(StringRef(#NAME), tok::NAME, *this);
+#define NOTABLE_IDENTIFIER(NAME) \
+ AddNotableIdentifier(StringRef(#NAME), tok::NAME, *this);
#define TESTING_KEYWORD(NAME, FLAGS)
#include "clang/Basic/TokenKinds.def"
@@ -425,8 +425,8 @@ tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
// collisions (if there were, the switch below would complain about duplicate
// case values). Note that this depends on 'if' being null terminated.
-#define HASH(LEN, FIRST, THIRD) \
- (LEN << 5) + (((FIRST-'a') + (THIRD-'a')) & 31)
+#define HASH(LEN, FIRST, THIRD) \
+ (LEN << 6) + (((FIRST - 'a') - (THIRD - 'a')) & 63)
#define CASE(LEN, FIRST, THIRD, NAME) \
case HASH(LEN, FIRST, THIRD): \
return memcmp(Name, #NAME, LEN) ? tok::pp_not_keyword : tok::pp_ ## NAME
@@ -441,6 +441,7 @@ tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
CASE( 4, 'e', 's', else);
CASE( 4, 'l', 'n', line);
CASE( 4, 's', 'c', sccs);
+ CASE( 5, 'e', 'b', embed);
CASE( 5, 'e', 'd', endif);
CASE( 5, 'e', 'r', error);
CASE( 5, 'i', 'e', ident);
@@ -541,7 +542,8 @@ unsigned Selector::getNumArgs() const {
return SI->getNumArgs();
}
-IdentifierInfo *Selector::getIdentifierInfoForSlot(unsigned argIndex) const {
+const IdentifierInfo *
+Selector::getIdentifierInfoForSlot(unsigned argIndex) const {
if (getIdentifierInfoFlag() < MultiArg) {
assert(argIndex == 0 && "illegal keyword index");
return getAsIdentifierInfo();
@@ -553,7 +555,7 @@ IdentifierInfo *Selector::getIdentifierInfoForSlot(unsigned argIndex) const {
}
StringRef Selector::getNameForSlot(unsigned int argIndex) const {
- IdentifierInfo *II = getIdentifierInfoForSlot(argIndex);
+ const IdentifierInfo *II = getIdentifierInfoForSlot(argIndex);
return II ? II->getName() : StringRef();
}
@@ -574,7 +576,7 @@ std::string Selector::getAsString() const {
return "<null selector>";
if (getIdentifierInfoFlag() < MultiArg) {
- IdentifierInfo *II = getAsIdentifierInfo();
+ const IdentifierInfo *II = getAsIdentifierInfo();
if (getNumArgs() == 0) {
assert(II && "If the number of arguments is 0 then II is guaranteed to "
@@ -608,7 +610,7 @@ static bool startsWithWord(StringRef name, StringRef word) {
}
ObjCMethodFamily Selector::getMethodFamilyImpl(Selector sel) {
- IdentifierInfo *first = sel.getIdentifierInfoForSlot(0);
+ const IdentifierInfo *first = sel.getIdentifierInfoForSlot(0);
if (!first) return OMF_None;
StringRef name = first->getName();
@@ -655,7 +657,7 @@ ObjCMethodFamily Selector::getMethodFamilyImpl(Selector sel) {
}
ObjCInstanceTypeFamily Selector::getInstTypeMethodFamily(Selector sel) {
- IdentifierInfo *first = sel.getIdentifierInfoForSlot(0);
+ const IdentifierInfo *first = sel.getIdentifierInfoForSlot(0);
if (!first) return OIT_None;
StringRef name = first->getName();
@@ -683,7 +685,7 @@ ObjCInstanceTypeFamily Selector::getInstTypeMethodFamily(Selector sel) {
}
ObjCStringFormatFamily Selector::getStringFormatFamilyImpl(Selector sel) {
- IdentifierInfo *first = sel.getIdentifierInfoForSlot(0);
+ const IdentifierInfo *first = sel.getIdentifierInfoForSlot(0);
if (!first) return SFF_None;
StringRef name = first->getName();
@@ -750,7 +752,8 @@ size_t SelectorTable::getTotalMemory() const {
return SelTabImpl.Allocator.getTotalMemory();
}
-Selector SelectorTable::getSelector(unsigned nKeys, IdentifierInfo **IIV) {
+Selector SelectorTable::getSelector(unsigned nKeys,
+ const IdentifierInfo **IIV) {
if (nKeys < 2)
return Selector(IIV[0], nKeys);
diff --git a/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp b/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
index a0adfbf61840..e5adc034f60c 100644
--- a/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
@@ -48,7 +48,7 @@ void LangOptions::resetNonModularOptions() {
bool LangOptions::isNoBuiltinFunc(StringRef FuncName) const {
for (unsigned i = 0, e = NoBuiltinFuncs.size(); i != e; ++i)
- if (FuncName.equals(NoBuiltinFuncs[i]))
+ if (FuncName == NoBuiltinFuncs[i])
return true;
return false;
}
@@ -112,6 +112,7 @@ void LangOptions::setLangDefaults(LangOptions &Opts, Language Lang,
Opts.C11 = Std.isC11();
Opts.C17 = Std.isC17();
Opts.C23 = Std.isC23();
+ Opts.C2y = Std.isC2y();
Opts.CPlusPlus = Std.isCPlusPlus();
Opts.CPlusPlus11 = Std.isCPlusPlus11();
Opts.CPlusPlus14 = Std.isCPlusPlus14();
@@ -124,6 +125,7 @@ void LangOptions::setLangDefaults(LangOptions &Opts, Language Lang,
Opts.HexFloats = Std.hasHexFloats();
Opts.WChar = Std.isCPlusPlus();
Opts.Digraphs = Std.hasDigraphs();
+ Opts.RawStringLiterals = Std.hasRawStringLiterals();
Opts.HLSL = Lang == Language::HLSL;
if (Opts.HLSL && Opts.IncludeDefaultHeader)
diff --git a/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp b/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
index ab09c7221dda..c8c9292abcb2 100644
--- a/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
@@ -21,6 +21,8 @@ StringRef clang::languageToString(Language L) {
return "Asm";
case Language::LLVM_IR:
return "LLVM IR";
+ case Language::CIR:
+ return "ClangIR";
case Language::C:
return "C";
case Language::CXX:
@@ -69,6 +71,16 @@ LangStandard::Kind LangStandard::getLangKind(StringRef Name) {
.Default(lang_unspecified);
}
+LangStandard::Kind LangStandard::getHLSLLangKind(StringRef Name) {
+ return llvm::StringSwitch<LangStandard::Kind>(Name)
+ .Case("2016", LangStandard::lang_hlsl2016)
+ .Case("2017", LangStandard::lang_hlsl2017)
+ .Case("2018", LangStandard::lang_hlsl2018)
+ .Case("2021", LangStandard::lang_hlsl2021)
+ .Case("202x", LangStandard::lang_hlsl202x)
+ .Default(LangStandard::lang_unspecified);
+}
+
const LangStandard *LangStandard::getLangStandardForName(StringRef Name) {
Kind K = getLangKind(Name);
if (K == lang_unspecified)
@@ -82,6 +94,7 @@ LangStandard::Kind clang::getDefaultLanguageStandard(clang::Language Lang,
switch (Lang) {
case Language::Unknown:
case Language::LLVM_IR:
+ case Language::CIR:
llvm_unreachable("Invalid input kind!");
case Language::OpenCL:
return LangStandard::lang_opencl12;
diff --git a/contrib/llvm-project/clang/lib/Basic/Module.cpp b/contrib/llvm-project/clang/lib/Basic/Module.cpp
index 0dac8748a98a..90b7b0d24bb6 100644
--- a/contrib/llvm-project/clang/lib/Basic/Module.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Module.cpp
@@ -140,8 +140,8 @@ bool Module::isUnimportable(const LangOptions &LangOpts,
return true;
}
for (unsigned I = 0, N = Current->Requirements.size(); I != N; ++I) {
- if (hasFeature(Current->Requirements[I].first, LangOpts, Target) !=
- Current->Requirements[I].second) {
+ if (hasFeature(Current->Requirements[I].FeatureName, LangOpts, Target) !=
+ Current->Requirements[I].RequiredState) {
Req = Current->Requirements[I];
return true;
}
@@ -305,6 +305,10 @@ bool Module::directlyUses(const Module *Requested) {
if (Requested->fullModuleNameIs({"_Builtin_stddef", "max_align_t"}) ||
Requested->fullModuleNameIs({"_Builtin_stddef_wint_t"}))
return true;
+ // Darwin is allowed is to use our builtin 'ptrauth.h' and its accompanying
+ // module.
+ if (!Requested->Parent && Requested->Name == "ptrauth")
+ return true;
if (NoUndeclaredIncludes)
UndeclaredUses.insert(Requested);
@@ -315,7 +319,7 @@ bool Module::directlyUses(const Module *Requested) {
void Module::addRequirement(StringRef Feature, bool RequiredState,
const LangOptions &LangOpts,
const TargetInfo &Target) {
- Requirements.push_back(Requirement(std::string(Feature), RequiredState));
+ Requirements.push_back(Requirement{std::string(Feature), RequiredState});
// If this feature is currently available, we're done.
if (hasFeature(Feature, LangOpts, Target) == RequiredState)
@@ -375,7 +379,7 @@ Module *Module::findOrInferSubmodule(StringRef Name) {
Module *Module::getGlobalModuleFragment() const {
assert(isNamedModuleUnit() && "We should only query the global module "
- "fragment from the C++ 20 Named modules");
+ "fragment from the C++20 Named modules");
for (auto *SubModule : SubModules)
if (SubModule->isExplicitGlobalModule())
@@ -386,7 +390,7 @@ Module *Module::getGlobalModuleFragment() const {
Module *Module::getPrivateModuleFragment() const {
assert(isNamedModuleUnit() && "We should only query the private module "
- "fragment from the C++ 20 Named modules");
+ "fragment from the C++20 Named modules");
for (auto *SubModule : SubModules)
if (SubModule->isPrivateModule())
@@ -500,9 +504,9 @@ void Module::print(raw_ostream &OS, unsigned Indent, bool Dump) const {
for (unsigned I = 0, N = Requirements.size(); I != N; ++I) {
if (I)
OS << ", ";
- if (!Requirements[I].second)
+ if (!Requirements[I].RequiredState)
OS << "!";
- OS << Requirements[I].first;
+ OS << Requirements[I].FeatureName;
}
OS << "\n";
}
@@ -720,26 +724,3 @@ void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
};
VisitModule({M, nullptr});
}
-
-void VisibleModuleSet::makeTransitiveImportsVisible(Module *M,
- SourceLocation Loc,
- VisibleCallback Vis,
- ConflictCallback Cb) {
- for (auto *I : M->Imports)
- setVisible(I, Loc, Vis, Cb);
-}
-
-ASTSourceDescriptor::ASTSourceDescriptor(Module &M)
- : Signature(M.Signature), ClangModule(&M) {
- if (M.Directory)
- Path = M.Directory->getName();
- if (auto File = M.getASTFile())
- ASTFile = File->getName();
-}
-
-std::string ASTSourceDescriptor::getModuleName() const {
- if (ClangModule)
- return ClangModule->Name;
- else
- return std::string(PCHModuleName);
-}
diff --git a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
index 6c31b0824eb8..b141e48e77e3 100644
--- a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
@@ -574,31 +574,7 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
}
bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
- return DKind == OMPD_simd || DKind == OMPD_for || DKind == OMPD_for_simd ||
- DKind == OMPD_parallel_for || DKind == OMPD_parallel_for_simd ||
- DKind == OMPD_taskloop || DKind == OMPD_taskloop_simd ||
- DKind == OMPD_master_taskloop || DKind == OMPD_master_taskloop_simd ||
- DKind == OMPD_parallel_master_taskloop ||
- DKind == OMPD_parallel_master_taskloop_simd ||
- DKind == OMPD_masked_taskloop || DKind == OMPD_masked_taskloop_simd ||
- DKind == OMPD_parallel_masked_taskloop || DKind == OMPD_distribute ||
- DKind == OMPD_parallel_masked_taskloop_simd ||
- DKind == OMPD_target_parallel_for ||
- DKind == OMPD_distribute_parallel_for ||
- DKind == OMPD_distribute_parallel_for_simd ||
- DKind == OMPD_distribute_simd ||
- DKind == OMPD_target_parallel_for_simd || DKind == OMPD_target_simd ||
- DKind == OMPD_teams_distribute ||
- DKind == OMPD_teams_distribute_simd ||
- DKind == OMPD_teams_distribute_parallel_for_simd ||
- DKind == OMPD_teams_distribute_parallel_for ||
- DKind == OMPD_target_teams_distribute ||
- DKind == OMPD_target_teams_distribute_parallel_for ||
- DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_simd || DKind == OMPD_tile ||
- DKind == OMPD_unroll || DKind == OMPD_loop ||
- DKind == OMPD_teams_loop || DKind == OMPD_target_teams_loop ||
- DKind == OMPD_parallel_loop || DKind == OMPD_target_parallel_loop;
+ return getDirectiveAssociation(DKind) == Association::Loop;
}
bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
@@ -619,44 +595,20 @@ bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
}
bool clang::isOpenMPTaskLoopDirective(OpenMPDirectiveKind DKind) {
- return DKind == OMPD_taskloop || DKind == OMPD_taskloop_simd ||
- DKind == OMPD_master_taskloop || DKind == OMPD_master_taskloop_simd ||
- DKind == OMPD_parallel_master_taskloop ||
- DKind == OMPD_masked_taskloop || DKind == OMPD_masked_taskloop_simd ||
- DKind == OMPD_parallel_masked_taskloop ||
- DKind == OMPD_parallel_masked_taskloop_simd ||
- DKind == OMPD_parallel_master_taskloop_simd;
+ return DKind == OMPD_taskloop ||
+ llvm::is_contained(getLeafConstructs(DKind), OMPD_taskloop);
}
bool clang::isOpenMPParallelDirective(OpenMPDirectiveKind DKind) {
- return DKind == OMPD_parallel || DKind == OMPD_parallel_for ||
- DKind == OMPD_parallel_for_simd || DKind == OMPD_parallel_sections ||
- DKind == OMPD_target_parallel || DKind == OMPD_target_parallel_for ||
- DKind == OMPD_distribute_parallel_for ||
- DKind == OMPD_distribute_parallel_for_simd ||
- DKind == OMPD_target_parallel_for_simd ||
- DKind == OMPD_teams_distribute_parallel_for ||
- DKind == OMPD_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_parallel_for ||
- DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_parallel_master || DKind == OMPD_parallel_masked ||
- DKind == OMPD_parallel_master_taskloop ||
- DKind == OMPD_parallel_master_taskloop_simd ||
- DKind == OMPD_parallel_masked_taskloop ||
- DKind == OMPD_parallel_masked_taskloop_simd ||
- DKind == OMPD_parallel_loop || DKind == OMPD_target_parallel_loop ||
- DKind == OMPD_teams_loop;
+ if (DKind == OMPD_teams_loop)
+ return true;
+ return DKind == OMPD_parallel ||
+ llvm::is_contained(getLeafConstructs(DKind), OMPD_parallel);
}
bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
- return DKind == OMPD_target || DKind == OMPD_target_parallel ||
- DKind == OMPD_target_parallel_for ||
- DKind == OMPD_target_parallel_for_simd || DKind == OMPD_target_simd ||
- DKind == OMPD_target_teams || DKind == OMPD_target_teams_distribute ||
- DKind == OMPD_target_teams_distribute_parallel_for ||
- DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_simd ||
- DKind == OMPD_target_teams_loop || DKind == OMPD_target_parallel_loop;
+ return DKind == OMPD_target ||
+ llvm::is_contained(getLeafConstructs(DKind), OMPD_target);
}
bool clang::isOpenMPTargetDataManagementDirective(OpenMPDirectiveKind DKind) {
@@ -665,60 +617,45 @@ bool clang::isOpenMPTargetDataManagementDirective(OpenMPDirectiveKind DKind) {
}
bool clang::isOpenMPNestingTeamsDirective(OpenMPDirectiveKind DKind) {
- return DKind == OMPD_teams || DKind == OMPD_teams_distribute ||
- DKind == OMPD_teams_distribute_simd ||
- DKind == OMPD_teams_distribute_parallel_for_simd ||
- DKind == OMPD_teams_distribute_parallel_for ||
- DKind == OMPD_teams_loop;
+ if (DKind == OMPD_teams)
+ return true;
+ ArrayRef<Directive> Leaves = getLeafConstructs(DKind);
+ return !Leaves.empty() && Leaves.front() == OMPD_teams;
}
bool clang::isOpenMPTeamsDirective(OpenMPDirectiveKind DKind) {
- return isOpenMPNestingTeamsDirective(DKind) || DKind == OMPD_target_teams ||
- DKind == OMPD_target_teams_distribute ||
- DKind == OMPD_target_teams_distribute_parallel_for ||
- DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_simd ||
- DKind == OMPD_target_teams_loop;
+ return DKind == OMPD_teams ||
+ llvm::is_contained(getLeafConstructs(DKind), OMPD_teams);
}
bool clang::isOpenMPSimdDirective(OpenMPDirectiveKind DKind) {
- return DKind == OMPD_simd || DKind == OMPD_for_simd ||
- DKind == OMPD_parallel_for_simd || DKind == OMPD_taskloop_simd ||
- DKind == OMPD_master_taskloop_simd ||
- DKind == OMPD_masked_taskloop_simd ||
- DKind == OMPD_parallel_master_taskloop_simd ||
- DKind == OMPD_parallel_masked_taskloop_simd ||
- DKind == OMPD_distribute_parallel_for_simd ||
- DKind == OMPD_distribute_simd || DKind == OMPD_target_simd ||
- DKind == OMPD_teams_distribute_simd ||
- DKind == OMPD_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_simd ||
- DKind == OMPD_target_parallel_for_simd;
+ // Avoid OMPD_declare_simd
+ if (getDirectiveAssociation(DKind) != Association::Loop)
+ return false;
+ // Formally, OMPD_end_do_simd also has a loop association, but
+ // it's a Fortran-specific directive.
+
+ return DKind == OMPD_simd ||
+ llvm::is_contained(getLeafConstructs(DKind), OMPD_simd);
}
bool clang::isOpenMPNestingDistributeDirective(OpenMPDirectiveKind Kind) {
- return Kind == OMPD_distribute || Kind == OMPD_distribute_parallel_for ||
- Kind == OMPD_distribute_parallel_for_simd ||
- Kind == OMPD_distribute_simd;
- // TODO add next directives.
+ if (Kind == OMPD_distribute)
+ return true;
+ ArrayRef<Directive> Leaves = getLeafConstructs(Kind);
+ return !Leaves.empty() && Leaves.front() == OMPD_distribute;
}
bool clang::isOpenMPDistributeDirective(OpenMPDirectiveKind Kind) {
- return isOpenMPNestingDistributeDirective(Kind) ||
- Kind == OMPD_teams_distribute || Kind == OMPD_teams_distribute_simd ||
- Kind == OMPD_teams_distribute_parallel_for_simd ||
- Kind == OMPD_teams_distribute_parallel_for ||
- Kind == OMPD_target_teams_distribute ||
- Kind == OMPD_target_teams_distribute_parallel_for ||
- Kind == OMPD_target_teams_distribute_parallel_for_simd ||
- Kind == OMPD_target_teams_distribute_simd;
+ return Kind == OMPD_distribute ||
+ llvm::is_contained(getLeafConstructs(Kind), OMPD_distribute);
}
bool clang::isOpenMPGenericLoopDirective(OpenMPDirectiveKind Kind) {
- return Kind == OMPD_loop || Kind == OMPD_teams_loop ||
- Kind == OMPD_target_teams_loop || Kind == OMPD_parallel_loop ||
- Kind == OMPD_target_parallel_loop;
+ if (Kind == OMPD_loop)
+ return true;
+ ArrayRef<Directive> Leaves = getLeafConstructs(Kind);
+ return !Leaves.empty() && Leaves.back() == OMPD_loop;
}
bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) {
@@ -747,7 +684,8 @@ bool clang::isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind) {
}
bool clang::isOpenMPLoopTransformationDirective(OpenMPDirectiveKind DKind) {
- return DKind == OMPD_tile || DKind == OMPD_unroll;
+ return DKind == OMPD_tile || DKind == OMPD_unroll || DKind == OMPD_reverse ||
+ DKind == OMPD_interchange;
}
bool clang::isOpenMPCombinedParallelADirective(OpenMPDirectiveKind DKind) {
@@ -765,139 +703,130 @@ bool clang::needsTaskBasedThreadLimit(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_parallel_loop;
}
-void clang::getOpenMPCaptureRegions(
- SmallVectorImpl<OpenMPDirectiveKind> &CaptureRegions,
- OpenMPDirectiveKind DKind) {
- assert(unsigned(DKind) < llvm::omp::Directive_enumSize);
+bool clang::isOpenMPExecutableDirective(OpenMPDirectiveKind DKind) {
+ if (DKind == OMPD_error)
+ return true;
+ Category Cat = getDirectiveCategory(DKind);
+ return Cat == Category::Executable || Cat == Category::Subsidiary;
+}
+
+bool clang::isOpenMPCapturingDirective(OpenMPDirectiveKind DKind) {
+ if (isOpenMPExecutableDirective(DKind)) {
+ switch (DKind) {
+ case OMPD_atomic:
+ case OMPD_barrier:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_critical:
+ case OMPD_depobj:
+ case OMPD_error:
+ case OMPD_flush:
+ case OMPD_masked:
+ case OMPD_master:
+ case OMPD_section:
+ case OMPD_taskwait:
+ case OMPD_taskyield:
+ return false;
+ default:
+ return !isOpenMPLoopTransformationDirective(DKind);
+ }
+ }
+ // Non-executable directives.
switch (DKind) {
case OMPD_metadirective:
- CaptureRegions.push_back(OMPD_metadirective);
- break;
- case OMPD_parallel:
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_parallel_sections:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_parallel_loop:
- CaptureRegions.push_back(OMPD_parallel);
- break;
- case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- CaptureRegions.push_back(OMPD_task);
- CaptureRegions.push_back(OMPD_target);
- CaptureRegions.push_back(OMPD_teams);
- break;
- case OMPD_teams:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- CaptureRegions.push_back(OMPD_teams);
- break;
- case OMPD_target:
- case OMPD_target_simd:
- CaptureRegions.push_back(OMPD_task);
- CaptureRegions.push_back(OMPD_target);
- break;
- case OMPD_teams_loop:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- CaptureRegions.push_back(OMPD_teams);
- CaptureRegions.push_back(OMPD_parallel);
- break;
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_parallel_loop:
- CaptureRegions.push_back(OMPD_task);
- CaptureRegions.push_back(OMPD_target);
- CaptureRegions.push_back(OMPD_parallel);
- break;
- case OMPD_task:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data:
- case OMPD_target_update:
- CaptureRegions.push_back(OMPD_task);
- break;
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_masked_taskloop:
- case OMPD_masked_taskloop_simd:
- CaptureRegions.push_back(OMPD_taskloop);
- break;
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_masked_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- CaptureRegions.push_back(OMPD_parallel);
- CaptureRegions.push_back(OMPD_taskloop);
- break;
- case OMPD_target_teams_loop:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- CaptureRegions.push_back(OMPD_task);
- CaptureRegions.push_back(OMPD_target);
- CaptureRegions.push_back(OMPD_teams);
- CaptureRegions.push_back(OMPD_parallel);
- break;
case OMPD_nothing:
- CaptureRegions.push_back(OMPD_nothing);
- break;
- case OMPD_loop:
- // TODO: 'loop' may require different capture regions depending on the bind
- // clause or the parent directive when there is no bind clause. Use
- // OMPD_unknown for now.
- case OMPD_simd:
- case OMPD_for:
- case OMPD_for_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskgroup:
- case OMPD_distribute:
- case OMPD_ordered:
- case OMPD_atomic:
- case OMPD_target_data:
- case OMPD_distribute_simd:
- case OMPD_scope:
- case OMPD_dispatch:
- CaptureRegions.push_back(OMPD_unknown);
- break;
- case OMPD_tile:
- case OMPD_unroll:
- // loop transformations do not introduce captures.
- break;
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_error:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_cancel:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_declare_simd:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_requires:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- llvm_unreachable("OpenMP Directive is not allowed");
- case OMPD_unknown:
+ return true;
default:
- llvm_unreachable("Unknown OpenMP directive");
+ break;
}
+ return false;
+}
+
+void clang::getOpenMPCaptureRegions(
+ SmallVectorImpl<OpenMPDirectiveKind> &CaptureRegions,
+ OpenMPDirectiveKind DKind) {
+ assert(unsigned(DKind) < llvm::omp::Directive_enumSize);
+ assert(isOpenMPCapturingDirective(DKind) && "Expecting capturing directive");
+
+ auto GetRegionsForLeaf = [&](OpenMPDirectiveKind LKind) {
+ assert(isLeafConstruct(LKind) && "Epecting leaf directive");
+ // Whether a leaf would require OMPD_unknown if it occured on its own.
+ switch (LKind) {
+ case OMPD_metadirective:
+ CaptureRegions.push_back(OMPD_metadirective);
+ break;
+ case OMPD_nothing:
+ CaptureRegions.push_back(OMPD_nothing);
+ break;
+ case OMPD_parallel:
+ CaptureRegions.push_back(OMPD_parallel);
+ break;
+ case OMPD_target:
+ CaptureRegions.push_back(OMPD_task);
+ CaptureRegions.push_back(OMPD_target);
+ break;
+ case OMPD_task:
+ case OMPD_target_enter_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_update:
+ CaptureRegions.push_back(OMPD_task);
+ break;
+ case OMPD_teams:
+ CaptureRegions.push_back(OMPD_teams);
+ break;
+ case OMPD_taskloop:
+ CaptureRegions.push_back(OMPD_taskloop);
+ break;
+ case OMPD_loop:
+ // TODO: 'loop' may require different capture regions depending on the
+ // bind clause or the parent directive when there is no bind clause.
+ // If any of the directives that push regions here are parents of 'loop',
+ // assume 'parallel'. Otherwise do nothing.
+ if (!CaptureRegions.empty() &&
+ !llvm::is_contained(CaptureRegions, OMPD_parallel))
+ CaptureRegions.push_back(OMPD_parallel);
+ else
+ return true;
+ break;
+ case OMPD_dispatch:
+ case OMPD_distribute:
+ case OMPD_for:
+ case OMPD_masked:
+ case OMPD_master:
+ case OMPD_ordered:
+ case OMPD_scope:
+ case OMPD_sections:
+ case OMPD_simd:
+ case OMPD_single:
+ case OMPD_target_data:
+ case OMPD_taskgroup:
+ // These directives (when standalone) use OMPD_unknown as the region,
+ // but when they're constituents of a compound directive, and other
+ // leafs from that directive have specific regions, then these directives
+ // add no additional regions.
+ return true;
+ default:
+ llvm::errs() << getOpenMPDirectiveName(LKind) << '\n';
+ llvm_unreachable("Unexpected directive");
+ }
+ return false;
+ };
+
+ bool MayNeedUnknownRegion = false;
+ for (OpenMPDirectiveKind L : getLeafConstructsOrSelf(DKind))
+ MayNeedUnknownRegion |= GetRegionsForLeaf(L);
+
+ // We need OMPD_unknown when no regions were added, and specific leaf
+ // constructs were present. Push a single OMPD_unknown as the capture
+ /// region.
+ if (CaptureRegions.empty() && MayNeedUnknownRegion)
+ CaptureRegions.push_back(OMPD_unknown);
+
+ // OMPD_unknown is only expected as the only region. If other regions
+ // are present OMPD_unknown should not be present.
+ assert((CaptureRegions[0] == OMPD_unknown ||
+ !llvm::is_contained(CaptureRegions, OMPD_unknown)) &&
+ "Misplaced OMPD_unknown");
}
bool clang::checkFailClauseParameter(OpenMPClauseKind FailClauseParameter) {
diff --git a/contrib/llvm-project/clang/lib/Basic/Sarif.cpp b/contrib/llvm-project/clang/lib/Basic/Sarif.cpp
index 1cae7b937bc6..8c144df34167 100644
--- a/contrib/llvm-project/clang/lib/Basic/Sarif.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Sarif.cpp
@@ -57,8 +57,7 @@ static std::string percentEncodeURICharacter(char C) {
// should be written out directly. Otherwise, percent
// encode the character and write that out instead of the
// reserved character.
- if (llvm::isAlnum(C) ||
- StringRef::npos != StringRef("-._~:@!$&'()*+,;=").find(C))
+ if (llvm::isAlnum(C) || StringRef("-._~:@!$&'()*+,;=").contains(C))
return std::string(&C, 1);
return "%" + llvm::toHex(StringRef(&C, 1));
}
diff --git a/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp b/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
index 37734d3b10e7..533a9fe88a21 100644
--- a/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Allocator.h"
@@ -46,6 +47,13 @@ using namespace clang;
using namespace SrcMgr;
using llvm::MemoryBuffer;
+#define DEBUG_TYPE "source-manager"
+
+// Reaching a limit of 2^31 results in a hard error. This metric allows to track
+// if particular invocation of the compiler is close to it.
+STATISTIC(MaxUsedSLocBytes, "Maximum number of bytes used by source locations "
+ "(both loaded and local).");
+
//===----------------------------------------------------------------------===//
// SourceManager Helper Classes
//===----------------------------------------------------------------------===//
@@ -276,14 +284,14 @@ void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
bool Invalid = false;
- const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
+ SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
if (!Entry.isFile() || Invalid)
return;
- const SrcMgr::FileInfo &FileInfo = Entry.getFile();
+ SrcMgr::FileInfo &FileInfo = Entry.getFile();
// Remember that this file has #line directives now if it doesn't already.
- const_cast<SrcMgr::FileInfo&>(FileInfo).setHasLineDirectives();
+ FileInfo.setHasLineDirectives();
(void) getLineTable();
@@ -431,6 +439,10 @@ ContentCache &SourceManager::createMemBufferContentCache(
const SrcMgr::SLocEntry &SourceManager::loadSLocEntry(unsigned Index,
bool *Invalid) const {
+ return const_cast<SourceManager *>(this)->loadSLocEntry(Index, Invalid);
+}
+
+SrcMgr::SLocEntry &SourceManager::loadSLocEntry(unsigned Index, bool *Invalid) {
assert(!SLocEntryLoaded[Index]);
if (ExternalSLocEntries->ReadSLocEntry(-(static_cast<int>(Index) + 2))) {
if (Invalid)
@@ -462,6 +474,7 @@ SourceManager::AllocateLoadedSLocEntries(unsigned NumSLocEntries,
SLocEntryLoaded.resize(LoadedSLocEntryTable.size());
SLocEntryOffsetLoaded.resize(LoadedSLocEntryTable.size());
CurrentLoadedOffset -= TotalSize;
+ updateSlocUsageStats();
int BaseID = -int(LoadedSLocEntryTable.size()) - 1;
LoadedSLocEntryAllocBegin.push_back(FileID::get(BaseID));
return std::make_pair(BaseID, CurrentLoadedOffset);
@@ -615,6 +628,7 @@ FileID SourceManager::createFileIDImpl(ContentCache &File, StringRef Filename,
// We do a +1 here because we want a SourceLocation that means "the end of the
// file", e.g. for the "no newline at the end of the file" diagnostic.
NextLocalOffset += FileSize + 1;
+ updateSlocUsageStats();
// Set LastFileIDLookup to the newly created file. The next getFileID call is
// almost guaranteed to be from that file.
@@ -675,6 +689,7 @@ SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
}
// See createFileID for that +1.
NextLocalOffset += Length + 1;
+ updateSlocUsageStats();
return SourceLocation::getMacroLoc(NextLocalOffset - (Length + 1));
}
@@ -1839,6 +1854,12 @@ void SourceManager::associateFileChunkWithMacroArgExp(
MacroArgsCache[EndOffs] = EndOffsMappedLoc;
}
+void SourceManager::updateSlocUsageStats() const {
+ SourceLocation::UIntTy UsedBytes =
+ NextLocalOffset + (MaxLoadedOffset - CurrentLoadedOffset);
+ MaxUsedSLocBytes.updateMax(UsedBytes);
+}
+
/// If \arg Loc points inside a function macro argument, the returned
/// location will be the macro location in which the argument was expanded.
/// If a macro argument is used multiple times, the expanded location will
@@ -1911,6 +1932,24 @@ SourceManager::getDecomposedIncludedLoc(FileID FID) const {
return DecompLoc;
}
+FileID SourceManager::getUniqueLoadedASTFileID(SourceLocation Loc) const {
+ assert(isLoadedSourceLocation(Loc) &&
+ "Must be a source location in a loaded PCH/Module file");
+
+ auto [FID, Ignore] = getDecomposedLoc(Loc);
+ // `LoadedSLocEntryAllocBegin` stores the sorted lowest FID of each loaded
+ // allocation. Later allocations have lower FileIDs. The call below is to find
+ // the lowest FID of a loaded allocation from any FID in the same allocation.
+ // The lowest FID is used to identify a loaded allocation.
+ const FileID *FirstFID =
+ llvm::lower_bound(LoadedSLocEntryAllocBegin, FID, std::greater<FileID>{});
+
+ assert(FirstFID &&
+ "The failure to find the first FileID of a "
+ "loaded AST from a loaded source location was unexpected.");
+ return *FirstFID;
+}
+
bool SourceManager::isInTheSameTranslationUnitImpl(
const std::pair<FileID, unsigned> &LOffs,
const std::pair<FileID, unsigned> &ROffs) const {
diff --git a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
index 96b3ad9ba2f2..29f5cd14e46e 100644
--- a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
@@ -157,6 +157,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
HasAArch64SVETypes = false;
HasRISCVVTypes = false;
AllowAMDGPUUnsafeFPAtomics = false;
+ HasUnalignedAccess = false;
ARMCDECoprocMask = 0;
// Default to no types using fpret.
@@ -405,6 +406,16 @@ void TargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
LongDoubleAlign = 64;
}
+ // HLSL explicitly defines the sizes and formats of some data types, and we
+ // need to conform to those regardless of what architecture you are targeting.
+ if (Opts.HLSL) {
+ LongWidth = LongAlign = 64;
+ if (!Opts.NativeHalfType) {
+ HalfFormat = &llvm::APFloat::IEEEsingle();
+ HalfWidth = HalfAlign = 32;
+ }
+ }
+
if (Opts.OpenCL) {
// OpenCL C requires specific widths for types, irrespective of
// what these normally are for the target.
@@ -925,6 +936,10 @@ bool TargetInfo::validateInputConstraint(
return true;
}
+bool TargetInfo::validatePointerAuthKey(const llvm::APSInt &value) const {
+ return false;
+}
+
void TargetInfo::CheckFixedPointBits() const {
// Check that the number of fractional and integral bits (and maybe sign) can
// fit into the bits given for a fixed point type.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets.cpp b/contrib/llvm-project/clang/lib/Basic/Targets.cpp
index e3283510c6aa..29133f9ee8fc 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets.cpp
@@ -673,8 +673,11 @@ std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple,
}
case llvm::Triple::spirv64: {
if (os != llvm::Triple::UnknownOS ||
- Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
+ Triple.getEnvironment() != llvm::Triple::UnknownEnvironment) {
+ if (os == llvm::Triple::OSType::AMDHSA)
+ return std::make_unique<SPIRV64AMDGCNTargetInfo>(Triple, Opts);
return nullptr;
+ }
return std::make_unique<SPIRV64TargetInfo>(Triple, Opts);
}
case llvm::Triple::wasm32:
@@ -760,7 +763,7 @@ using namespace clang::targets;
TargetInfo *
TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
const std::shared_ptr<TargetOptions> &Opts) {
- llvm::Triple Triple(Opts->Triple);
+ llvm::Triple Triple(llvm::Triple::normalize(Opts->Triple));
// Construct the target
std::unique_ptr<TargetInfo> Target = AllocateTarget(Triple, *Opts);
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
index f5a5d689fa09..63fc15f916c5 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
@@ -11,9 +11,11 @@
//===----------------------------------------------------------------------===//
#include "AArch64.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
@@ -152,6 +154,7 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
else
LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
+ BitIntMaxAlign = 128;
MaxVectorAlign = 128;
MaxAtomicInlineWidth = 128;
MaxAtomicPromoteWidth = 128;
@@ -186,6 +189,8 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
UseZeroLengthBitfieldAlignment = true;
+ HasUnalignedAccess = true;
+
// AArch64 targets default to using the ARM C++ ABI.
TheCXXABI.set(TargetCXXABI::GenericAArch64);
@@ -199,18 +204,47 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
StringRef AArch64TargetInfo::getABI() const { return ABI; }
bool AArch64TargetInfo::setABI(const std::string &Name) {
- if (Name != "aapcs" && Name != "darwinpcs")
+ if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs" &&
+ Name != "pauthtest")
return false;
ABI = Name;
return true;
}
+bool AArch64TargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
+ if (hasFeature("fp") && ABI == "aapcs-soft") {
+ // aapcs-soft is not allowed for targets with an FPU, to avoid there being
+ // two incomatible ABIs.
+ Diags.Report(diag::err_target_unsupported_abi_with_fpu) << ABI;
+ return false;
+ }
+ if (getTriple().getEnvironment() == llvm::Triple::PAuthTest &&
+ getTriple().getOS() != llvm::Triple::Linux) {
+ Diags.Report(diag::err_target_unsupported_abi_for_triple)
+ << getTriple().getEnvironmentName() << getTriple().getTriple();
+ return false;
+ }
+ return true;
+}
+
+bool AArch64TargetInfo::validateGlobalRegisterVariable(
+ StringRef RegName, unsigned RegSize, bool &HasSizeMismatch) const {
+ if ((RegName == "sp") || RegName.starts_with("x")) {
+ HasSizeMismatch = RegSize != 64;
+ return true;
+ } else if (RegName.starts_with("w")) {
+ HasSizeMismatch = RegSize != 32;
+ return true;
+ }
+ return false;
+}
+
bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
BranchProtectionInfo &BPI,
StringRef &Err) const {
llvm::ARM::ParsedBranchProtection PBP;
- if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
+ if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err, HasPAuthLR))
return false;
BPI.SignReturnAddr =
@@ -231,7 +265,7 @@ bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
}
bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
- return Name == "generic" || llvm::AArch64::parseCpu(Name);
+ return llvm::AArch64::parseCpu(Name).has_value();
}
bool AArch64TargetInfo::setCPU(const std::string &Name) {
@@ -271,7 +305,6 @@ void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
- Builder.defineMacro("__ARM_FEATURE_BTI", "1");
// Also include the Armv8.4 defines
getTargetDefinesARMV84A(Opts, Builder);
}
@@ -423,6 +456,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasSVE2)
Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
+ if (HasSVE2p1)
+ Builder.defineMacro("__ARM_FEATURE_SVE2p1", "1");
+
if (HasSVE2 && HasSVE2AES)
Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
@@ -435,16 +471,25 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasSVE2 && HasSVE2SM4)
Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
+ if (HasSVEB16B16)
+ Builder.defineMacro("__ARM_FEATURE_SVE_B16B16", "1");
+
if (HasSME) {
Builder.defineMacro("__ARM_FEATURE_SME");
Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
}
- if (HasSME2) {
- Builder.defineMacro("__ARM_FEATURE_SME");
- Builder.defineMacro("__ARM_FEATURE_SME2");
- Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
- }
+ if (HasSME2)
+ Builder.defineMacro("__ARM_FEATURE_SME2", "1");
+
+ if (HasSME2p1)
+ Builder.defineMacro("__ARM_FEATURE_SME2p1", "1");
+
+ if (HasSMEF16F16)
+ Builder.defineMacro("__ARM_FEATURE_SME_F16F16", "1");
+
+ if (HasSMEB16B16)
+ Builder.defineMacro("__ARM_FEATURE_SME_B16B16", "1");
if (HasCRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
@@ -481,7 +526,13 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasPAuth)
Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
- if (HasUnaligned)
+ if (HasPAuthLR)
+ Builder.defineMacro("__ARM_FEATURE_PAUTH_LR", "1");
+
+ if (HasBTI)
+ Builder.defineMacro("__ARM_FEATURE_BTI", "1");
+
+ if (HasUnalignedAccess)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
if ((FPU & NeonMode) && HasFullFP16)
@@ -532,6 +583,7 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
// 0: Protection using the A key
// 1: Protection using the B key
// 2: Protection including leaf functions
+ // 3: Protection using PC as a diversifier
unsigned Value = 0;
if (Opts.isSignReturnAddressWithAKey())
@@ -542,6 +594,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (Opts.isSignReturnAddressScopeAll())
Value |= (1 << 2);
+ if (Opts.BranchProtectionPAuthLR)
+ Value |= (1 << 3);
+
Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
}
@@ -637,37 +692,40 @@ AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
if (Name == "default")
return 0;
- if (auto Ext = llvm::AArch64::parseArchExtension(Name))
- return Ext->FmvPriority;
+ if (auto Ext = llvm::AArch64::parseFMVExtension(Name))
+ return Ext->Priority;
return 0;
}
unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
// Take the maximum priority as per feature cost, so more features win.
- return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
+ constexpr unsigned MaxFMVPriority = 1000;
+ return MaxFMVPriority;
}
bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
- if (auto Ext = llvm::AArch64::parseArchExtension(Name))
- return !Ext->DependentFeatures.empty();
+ // FMV extensions which imply no backend features do not affect codegen.
+ if (auto Ext = llvm::AArch64::parseFMVExtension(Name))
+ return !Ext->Features.empty();
return false;
}
-StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
- if (auto Ext = llvm::AArch64::parseArchExtension(Name))
- return Ext->DependentFeatures;
- return StringRef();
-}
-
bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
- return llvm::AArch64::parseArchExtension(FeatureStr).has_value();
+ // CPU features might be separated by '+', extract them and check
+ llvm::SmallVector<StringRef, 8> Features;
+ FeatureStr.split(Features, "+");
+ for (auto &Feature : Features)
+ if (!llvm::AArch64::parseFMVExtension(Feature.trim()).has_value())
+ return false;
+ return true;
}
bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Cases("aarch64", "arm64", "arm", true)
.Case("fmv", HasFMV)
- .Cases("neon", "fp", "simd", FPU & NeonMode)
+ .Case("fp", FPU & FPUMode)
+ .Cases("neon", "simd", FPU & NeonMode)
.Case("jscvt", HasJSCVT)
.Case("fcma", HasFCMA)
.Case("rng", HasRandGen)
@@ -693,6 +751,7 @@ bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
.Case("sve", FPU & SveMode)
.Case("sve-bf16", FPU & SveMode && HasBFloat16)
.Case("sve-i8mm", FPU & SveMode && HasMatMul)
+ .Case("sve-b16b16", HasSVEB16B16)
.Case("f32mm", FPU & SveMode && HasMatmulFP32)
.Case("f64mm", FPU & SveMode && HasMatmulFP64)
.Case("sve2", FPU & SveMode && HasSVE2)
@@ -700,11 +759,15 @@ bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
.Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
.Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
.Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
+ .Case("sve2p1", FPU & SveMode && HasSVE2p1)
.Case("sme", HasSME)
.Case("sme2", HasSME2)
+ .Case("sme2p1", HasSME2p1)
.Case("sme-f64f64", HasSMEF64F64)
.Case("sme-i16i64", HasSMEI16I64)
.Case("sme-fa64", HasSMEFA64)
+ .Case("sme-f16f16", HasSMEF16F16)
+ .Case("sme-b16b16", HasSMEB16B16)
.Cases("memtag", "memtag2", HasMTE)
.Case("sb", HasSB)
.Case("predres", HasPredRes)
@@ -777,6 +840,13 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFullFP16 = true;
HasSVE2 = true;
}
+ if (Feature == "+sve2p1") {
+ FPU |= NeonMode;
+ FPU |= SveMode;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2p1 = true;
+ }
if (Feature == "+sve2-aes") {
FPU |= NeonMode;
FPU |= SveMode;
@@ -798,6 +868,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasSVE2 = true;
HasSVE2SM4 = true;
}
+ if (Feature == "+sve-b16b16")
+ HasSVEB16B16 = true;
if (Feature == "+sve2-bitperm") {
FPU |= NeonMode;
FPU |= SveMode;
@@ -828,6 +900,13 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasBFloat16 = true;
HasFullFP16 = true;
}
+ if (Feature == "+sme2p1") {
+ HasSME = true;
+ HasSME2 = true;
+ HasSME2p1 = true;
+ HasBFloat16 = true;
+ HasFullFP16 = true;
+ }
if (Feature == "+sme-f64f64") {
HasSME = true;
HasSMEF64F64 = true;
@@ -847,6 +926,21 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasSVE2 = true;
HasSMEFA64 = true;
}
+ if (Feature == "+sme-f16f16") {
+ HasSME = true;
+ HasSME2 = true;
+ HasBFloat16 = true;
+ HasFullFP16 = true;
+ HasSMEF16F16 = true;
+ }
+ if (Feature == "+sme-b16b16") {
+ HasSME = true;
+ HasSME2 = true;
+ HasBFloat16 = true;
+ HasFullFP16 = true;
+ HasSVEB16B16 = true;
+ HasSMEB16B16 = true;
+ }
if (Feature == "+sb")
HasSB = true;
if (Feature == "+predres")
@@ -895,7 +989,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasSM4 = true;
}
if (Feature == "+strict-align")
- HasUnaligned = false;
+ HasUnalignedAccess = false;
+
// All predecessor archs are added but select the latest one for ArchKind.
if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
ArchInfo = &llvm::AArch64::ARMV8A;
@@ -988,6 +1083,10 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasGCS = true;
if (Feature == "+rcpc3")
HasRCPC3 = true;
+ if (Feature == "+pauth-lr") {
+ HasPAuthLR = true;
+ HasPAuth = true;
+ }
}
// Check features that are manually disabled by command line options.
@@ -1016,57 +1115,17 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
return true;
}
-bool AArch64TargetInfo::initFeatureMap(
- llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
- const std::vector<std::string> &FeaturesVec) const {
- std::vector<std::string> UpdatedFeaturesVec;
- // Parse the CPU and add any implied features.
- std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
- if (CpuInfo) {
- auto Exts = CpuInfo->getImpliedExtensions();
- std::vector<StringRef> CPUFeats;
- llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
- for (auto F : CPUFeats) {
- assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
- UpdatedFeaturesVec.push_back(F.str());
- }
- }
-
- // Process target and dependent features. This is done in two loops collecting
- // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
- // add target '+/-'features that can later disable some of features added on
- // the first loop. Function Multi Versioning features begin with '?'.
- for (const auto &Feature : FeaturesVec)
- if (((Feature[0] == '?' || Feature[0] == '+')) &&
- AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
- StringRef DepFeatures =
- AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
- SmallVector<StringRef, 1> AttrFeatures;
- DepFeatures.split(AttrFeatures, ",");
- for (auto F : AttrFeatures)
- UpdatedFeaturesVec.push_back(F.str());
- }
- for (const auto &Feature : FeaturesVec)
- if (Feature[0] != '?') {
- std::string UpdatedFeature = Feature;
- if (Feature[0] == '+') {
- std::optional<llvm::AArch64::ExtensionInfo> Extension =
- llvm::AArch64::parseArchExtension(Feature.substr(1));
- if (Extension)
- UpdatedFeature = Extension->Feature.str();
- }
- UpdatedFeaturesVec.push_back(UpdatedFeature);
- }
-
- return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
-}
-
// Parse AArch64 Target attributes, which are a comma separated list of:
// "arch=<arch>" - parsed to features as per -march=..
// "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
// "tune=<cpu>" - TuneCPU set to <cpu>
// "feature", "no-feature" - Add (or remove) feature.
// "+feature", "+nofeature" - Add (or remove) feature.
+//
+// A feature may correspond to an Extension (anything with a corresponding
+// AEK_), in which case an ExtensionSet is used to parse it and expand its
+// dependencies. If the feature does not yield a successful parse then it
+// is passed through.
ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
ParsedTargetAttr Ret;
if (Features == "default")
@@ -1076,23 +1135,31 @@ ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
bool FoundArch = false;
auto SplitAndAddFeatures = [](StringRef FeatString,
- std::vector<std::string> &Features) {
+ std::vector<std::string> &Features,
+ llvm::AArch64::ExtensionSet &FeatureBits) {
SmallVector<StringRef, 8> SplitFeatures;
FeatString.split(SplitFeatures, StringRef("+"), -1, false);
for (StringRef Feature : SplitFeatures) {
- StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
- if (!FeatureName.empty())
- Features.push_back(FeatureName.str());
+ if (FeatureBits.parseModifier(Feature))
+ continue;
+ // Pass through anything that failed to parse so that we can emit
+ // diagnostics, as well as valid internal feature names.
+ //
+ // FIXME: We should consider rejecting internal feature names like
+ // neon, v8a, etc.
+ // FIXME: We should consider emitting diagnostics here.
+ if (Feature.starts_with("no"))
+ Features.push_back("-" + Feature.drop_front(2).str());
else
- // Pushing the original feature string to give a sema error later on
- // when they get checked.
- if (Feature.starts_with("no"))
- Features.push_back("-" + Feature.drop_front(2).str());
- else
- Features.push_back("+" + Feature.str());
+ Features.push_back("+" + Feature.str());
}
};
+ llvm::AArch64::ExtensionSet FeatureBits;
+ // Reconstruct the bitset from the command line option features.
+ FeatureBits.reconstructFromParsedFeatures(getTargetOpts().FeaturesAsWritten,
+ Ret.Features);
+
for (auto &Feature : AttrFeatures) {
Feature = Feature.trim();
if (Feature.starts_with("fpmath="))
@@ -1115,9 +1182,9 @@ ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
// Ret.Features.
if (!AI)
continue;
- Ret.Features.push_back(AI->ArchFeature.str());
+ FeatureBits.addArchDefaults(*AI);
// Add any extra features, after the +
- SplitAndAddFeatures(Split.second, Ret.Features);
+ SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
} else if (Feature.starts_with("cpu=")) {
if (!Ret.CPU.empty())
Ret.Duplicate = "cpu=";
@@ -1127,7 +1194,10 @@ ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
std::pair<StringRef, StringRef> Split =
Feature.split("=").second.trim().split("+");
Ret.CPU = Split.first;
- SplitAndAddFeatures(Split.second, Ret.Features);
+ if (auto CpuInfo = llvm::AArch64::parseCpu(Ret.CPU)) {
+ FeatureBits.addCPUDefaults(*CpuInfo);
+ SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
+ }
}
} else if (Feature.starts_with("tune=")) {
if (!Ret.Tune.empty())
@@ -1135,25 +1205,23 @@ ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
else
Ret.Tune = Feature.split("=").second.trim();
} else if (Feature.starts_with("+")) {
- SplitAndAddFeatures(Feature, Ret.Features);
- } else if (Feature.starts_with("no-")) {
- StringRef FeatureName =
- llvm::AArch64::getArchExtFeature(Feature.split("-").second);
- if (!FeatureName.empty())
- Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
- else
- Ret.Features.push_back("-" + Feature.split("-").second.str());
+ SplitAndAddFeatures(Feature, Ret.Features, FeatureBits);
} else {
- // Try parsing the string to the internal target feature name. If it is
- // invalid, add the original string (which could already be an internal
- // name). These should be checked later by isValidFeatureName.
- StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
- if (!FeatureName.empty())
- Ret.Features.push_back(FeatureName.str());
+ if (FeatureBits.parseModifier(Feature, /* AllowNoDashForm = */ true))
+ continue;
+ // Pass through anything that failed to parse so that we can emit
+ // diagnostics, as well as valid internal feature names.
+ //
+ // FIXME: We should consider rejecting internal feature names like
+ // neon, v8a, etc.
+ // FIXME: We should consider emitting diagnostics here.
+ if (Feature.starts_with("no-"))
+ Ret.Features.push_back("-" + Feature.drop_front(3).str());
else
Ret.Features.push_back("+" + Feature.str());
}
}
+ FeatureBits.toLLVMFeatureList(Ret.Features);
return Ret;
}
@@ -1169,6 +1237,7 @@ AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_SwiftAsync:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_PreserveNone:
case CC_OpenCLKernel:
case CC_AArch64VectorCall:
case CC_AArch64SVEPCS:
@@ -1433,6 +1502,11 @@ int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
return -1;
}
+bool AArch64TargetInfo::validatePointerAuthKey(
+ const llvm::APSInt &value) const {
+ return 0 <= value && value <= 3;
+}
+
bool AArch64TargetInfo::hasInt128Type() const { return true; }
AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
@@ -1442,11 +1516,11 @@ AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
void AArch64leTargetInfo::setDataLayout() {
if (getTriple().isOSBinFormatMachO()) {
if(getTriple().isArch32Bit())
- resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
+ resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128-Fn32", "_");
else
- resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
+ resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128-Fn32", "_");
} else
- resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
+ resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32");
}
void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
@@ -1469,7 +1543,7 @@ void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
void AArch64beTargetInfo::setDataLayout() {
assert(!getTriple().isOSBinFormatMachO());
- resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
+ resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32");
}
WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
@@ -1492,8 +1566,8 @@ WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
void WindowsARM64TargetInfo::setDataLayout() {
resetDataLayout(Triple.isOSBinFormatMachO()
- ? "e-m:o-i64:64-i128:128-n32:64-S128"
- : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
+ ? "e-m:o-i64:64-i128:128-n32:64-S128-Fn32"
+ : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128-Fn32",
Triple.isOSBinFormatMachO() ? "_" : "");
}
@@ -1505,15 +1579,19 @@ WindowsARM64TargetInfo::getBuiltinVaListKind() const {
TargetInfo::CallingConvCheckResult
WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
switch (CC) {
+ case CC_X86VectorCall:
+ if (getTriple().isWindowsArm64EC())
+ return CCCR_OK;
+ return CCCR_Ignore;
case CC_X86StdCall:
case CC_X86ThisCall:
case CC_X86FastCall:
- case CC_X86VectorCall:
return CCCR_Ignore;
case CC_C:
case CC_OpenCLKernel:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_PreserveNone:
case CC_Swift:
case CC_SwiftAsync:
case CC_Win64:
@@ -1546,8 +1624,10 @@ MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
return CCK_MicrosoftWin64;
}
-unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
- unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
+unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize,
+ bool HasNonWeakDef) const {
+ unsigned Align =
+ WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize, HasNonWeakDef);
// MSVC does size based alignment for arm64 based on alignment section in
// below document, replicate that to keep alignment consistent with object
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
index 9699222b0bf7..526f7f30a386 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
@@ -38,7 +38,6 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasSHA2 = false;
bool HasSHA3 = false;
bool HasSM4 = false;
- bool HasUnaligned = true;
bool HasFullFP16 = false;
bool HasDotProd = false;
bool HasFP16FML = false;
@@ -50,9 +49,11 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasMatMul = false;
bool HasBFloat16 = false;
bool HasSVE2 = false;
+ bool HasSVE2p1 = false;
bool HasSVE2AES = false;
bool HasSVE2SHA3 = false;
bool HasSVE2SM4 = false;
+ bool HasSVEB16B16 = false;
bool HasSVE2BitPerm = false;
bool HasMatmulFP64 = false;
bool HasMatmulFP32 = false;
@@ -71,6 +72,9 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasSME2 = false;
bool HasSMEF64F64 = false;
bool HasSMEI16I64 = false;
+ bool HasSMEF16F16 = false;
+ bool HasSMEB16B16 = false;
+ bool HasSME2p1 = false;
bool HasSB = false;
bool HasPredRes = false;
bool HasSSBS = false;
@@ -85,6 +89,7 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasGCS = false;
bool HasRCPC3 = false;
bool HasSMEFA64 = false;
+ bool HasPAuthLR = false;
const llvm::AArch64::ArchInfo *ArchInfo = &llvm::AArch64::ARMV8A;
@@ -107,10 +112,6 @@ public:
unsigned multiVersionSortPriority(StringRef Name) const override;
unsigned multiVersionFeatureCost() const override;
- bool
- initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
- StringRef CPU,
- const std::vector<std::string> &FeaturesVec) const override;
bool useFP16ConversionIntrinsics() const override {
return false;
}
@@ -155,7 +156,6 @@ public:
std::optional<std::pair<unsigned, unsigned>>
getVScaleRange(const LangOptions &LangOpts) const override;
bool doesFeatureAffectCodeGen(StringRef Name) const override;
- StringRef getFeatureDependencies(StringRef Name) const override;
bool validateCpuSupports(StringRef FeatureStr) const override;
bool hasFeature(StringRef Feature) const override;
void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
@@ -164,7 +164,7 @@ public:
DiagnosticsEngine &Diags) override;
ParsedTargetAttr parseTargetAttr(StringRef Str) const override;
bool supportsTargetAttributeTune() const override { return true; }
-
+ bool supportsCpuSupports() const override { return true; }
bool checkArithmeticFenceSupported() const override { return true; }
bool hasBFloat16Type() const override;
@@ -194,10 +194,17 @@ public:
int getEHDataRegisterNumber(unsigned RegNo) const override;
+ bool validatePointerAuthKey(const llvm::APSInt &value) const override;
+
const char *getBFloat16Mangling() const override { return "u6__bf16"; };
bool hasInt128Type() const override;
bool hasBitIntType() const override { return true; }
+
+ bool validateTarget(DiagnosticsEngine &Diags) const override;
+
+ bool validateGlobalRegisterVariable(StringRef RegName, unsigned RegSize,
+ bool &HasSizeMismatch) const override;
};
class LLVM_LIBRARY_VISIBILITY AArch64leTargetInfo : public AArch64TargetInfo {
@@ -237,7 +244,8 @@ public:
TargetInfo::CallingConvKind
getCallingConvKind(bool ClangABICompat4) const override;
- unsigned getMinGlobalAlign(uint64_t TypeSize) const override;
+ unsigned getMinGlobalAlign(uint64_t TypeSize,
+ bool HasNonWeakDef) const override;
};
// ARM64 MinGW target
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
index 6f3a4908623d..3b748d0249d5 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -17,6 +17,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace clang::targets;
@@ -186,9 +187,15 @@ bool AMDGPUTargetInfo::initFeatureMap(
return false;
// TODO: Should move this logic into TargetParser
- std::string ErrorMsg;
- if (!insertWaveSizeFeature(CPU, getTriple(), Features, ErrorMsg)) {
- Diags.Report(diag::err_invalid_feature_combination) << ErrorMsg;
+ auto HasError = insertWaveSizeFeature(CPU, getTriple(), Features);
+ switch (HasError.first) {
+ default:
+ break;
+ case llvm::AMDGPU::INVALID_FEATURE_COMBINATION:
+ Diags.Report(diag::err_invalid_feature_combination) << HasError.second;
+ return false;
+ case llvm::AMDGPU::UNSUPPORTED_TARGET_FEATURE:
+ Diags.Report(diag::err_opt_not_valid_on_target) << HasError.second;
return false;
}
@@ -231,7 +238,7 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
HasLegalHalfType = true;
HasFloat16 = true;
- WavefrontSize = GPUFeatures & llvm::AMDGPU::FEATURE_WAVE32 ? 32 : 64;
+ WavefrontSize = (GPUFeatures & llvm::AMDGPU::FEATURE_WAVE32) ? 32 : 64;
AllowAMDGPUUnsafeFPAtomics = Opts.AllowAMDGPUUnsafeFPAtomics;
// Set pointer width and alignment for the generic address space.
@@ -274,30 +281,42 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
else
Builder.defineMacro("__R600__");
- if (GPUKind != llvm::AMDGPU::GK_NONE) {
- StringRef CanonName = isAMDGCN(getTriple()) ?
- getArchNameAMDGCN(GPUKind) : getArchNameR600(GPUKind);
- Builder.defineMacro(Twine("__") + Twine(CanonName) + Twine("__"));
- // Emit macros for gfx family e.g. gfx906 -> __GFX9__, gfx1030 -> __GFX10___
- if (isAMDGCN(getTriple())) {
- assert(CanonName.starts_with("gfx") && "Invalid amdgcn canonical name");
- Builder.defineMacro(Twine("__") + Twine(CanonName.drop_back(2).upper()) +
- Twine("__"));
- }
- if (isAMDGCN(getTriple())) {
- Builder.defineMacro("__amdgcn_processor__",
- Twine("\"") + Twine(CanonName) + Twine("\""));
- Builder.defineMacro("__amdgcn_target_id__",
- Twine("\"") + Twine(*getTargetID()) + Twine("\""));
- for (auto F : getAllPossibleTargetIDFeatures(getTriple(), CanonName)) {
- auto Loc = OffloadArchFeatures.find(F);
- if (Loc != OffloadArchFeatures.end()) {
- std::string NewF = F.str();
- std::replace(NewF.begin(), NewF.end(), '-', '_');
- Builder.defineMacro(Twine("__amdgcn_feature_") + Twine(NewF) +
- Twine("__"),
- Loc->second ? "1" : "0");
- }
+ // Legacy HIP host code relies on these default attributes to be defined.
+ bool IsHIPHost = Opts.HIP && !Opts.CUDAIsDevice;
+ if (GPUKind == llvm::AMDGPU::GK_NONE && !IsHIPHost)
+ return;
+
+ llvm::SmallString<16> CanonName =
+ (isAMDGCN(getTriple()) ? getArchNameAMDGCN(GPUKind)
+ : getArchNameR600(GPUKind));
+
+ // Sanitize the name of generic targets.
+ // e.g. gfx10-1-generic -> gfx10_1_generic
+ if (GPUKind >= llvm::AMDGPU::GK_AMDGCN_GENERIC_FIRST &&
+ GPUKind <= llvm::AMDGPU::GK_AMDGCN_GENERIC_LAST) {
+ std::replace(CanonName.begin(), CanonName.end(), '-', '_');
+ }
+
+ Builder.defineMacro(Twine("__") + Twine(CanonName) + Twine("__"));
+ // Emit macros for gfx family e.g. gfx906 -> __GFX9__, gfx1030 -> __GFX10___
+ if (isAMDGCN(getTriple()) && !IsHIPHost) {
+ assert(StringRef(CanonName).starts_with("gfx") &&
+ "Invalid amdgcn canonical name");
+ StringRef CanonFamilyName = getArchFamilyNameAMDGCN(GPUKind);
+ Builder.defineMacro(Twine("__") + Twine(CanonFamilyName.upper()) +
+ Twine("__"));
+ Builder.defineMacro("__amdgcn_processor__",
+ Twine("\"") + Twine(CanonName) + Twine("\""));
+ Builder.defineMacro("__amdgcn_target_id__",
+ Twine("\"") + Twine(*getTargetID()) + Twine("\""));
+ for (auto F : getAllPossibleTargetIDFeatures(getTriple(), CanonName)) {
+ auto Loc = OffloadArchFeatures.find(F);
+ if (Loc != OffloadArchFeatures.end()) {
+ std::string NewF = F.str();
+ std::replace(NewF.begin(), NewF.end(), '-', '_');
+ Builder.defineMacro(Twine("__amdgcn_feature_") + Twine(NewF) +
+ Twine("__"),
+ Loc->second ? "1" : "0");
}
}
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
index 90a1516ecdd2..94d9ba93ed22 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
@@ -168,9 +168,7 @@ public:
return true;
}
- bool HasLeftParen = false;
- if (S.consume_front("{"))
- HasLeftParen = true;
+ bool HasLeftParen = S.consume_front("{");
if (S.empty())
return false;
if (S.front() != 'v' && S.front() != 's' && S.front() != 'a') {
@@ -196,9 +194,7 @@ public:
Name = S.data() - 1;
return true;
}
- bool HasLeftBracket = false;
- if (S.consume_front("["))
- HasLeftBracket = true;
+ bool HasLeftBracket = S.consume_front("[");
unsigned long long N;
if (S.empty() || consumeUnsignedInteger(S, 10, N))
return false;
@@ -418,8 +414,10 @@ public:
// value ~0.
uint64_t getNullPointerValue(LangAS AS) const override {
// FIXME: Also should handle region.
- return (AS == LangAS::opencl_local || AS == LangAS::opencl_private)
- ? ~0 : 0;
+ return (AS == LangAS::opencl_local || AS == LangAS::opencl_private ||
+ AS == LangAS::sycl_local || AS == LangAS::sycl_private)
+ ? ~0
+ : 0;
}
void setAuxTarget(const TargetInfo *Aux) override;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
index 55b71557452f..e55feedbd5c6 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
@@ -173,8 +173,7 @@ bool ARMTargetInfo::supportsThumb() const {
}
bool ARMTargetInfo::supportsThumb2() const {
- return CPUAttr.equals("6T2") ||
- (ArchVersion >= 7 && !CPUAttr.equals("8M_BASE"));
+ return CPUAttr == "6T2" || (ArchVersion >= 7 && CPUAttr != "8M_BASE");
}
StringRef ARMTargetInfo::getCPUAttr() const {
@@ -312,7 +311,9 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
switch (Triple.getEnvironment()) {
case llvm::Triple::Android:
case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIT64:
case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::GNUEABIHFT64:
case llvm::Triple::MuslEABI:
case llvm::Triple::MuslEABIHF:
case llvm::Triple::OpenHOS:
@@ -509,7 +510,7 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
SHA2 = 0;
AES = 0;
DSP = 0;
- Unaligned = 1;
+ HasUnalignedAccess = true;
SoftFloat = false;
// Note that SoftFloatABI is initialized in our constructor.
HWDiv = 0;
@@ -576,7 +577,7 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
return false;
}
} else if (Feature == "+strict-align") {
- Unaligned = 0;
+ HasUnalignedAccess = false;
} else if (Feature == "+fp16") {
HW_FP |= HW_FP_HP;
} else if (Feature == "+fullfp16") {
@@ -785,7 +786,7 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + CPUProfile + "'");
// ACLE 6.4.3 Unaligned access supported in hardware
- if (Unaligned)
+ if (HasUnalignedAccess)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
// ACLE 6.4.4 LDREX/STREX
@@ -1162,7 +1163,7 @@ bool ARMTargetInfo::validateAsmConstraint(
return true;
case 'j': // An immediate integer between 0 and 65535 (valid for MOVW)
// only available in ARMv6T2 and above
- if (CPUAttr.equals("6T2") || ArchVersion >= 7) {
+ if (CPUAttr == "6T2" || ArchVersion >= 7) {
Info.setRequiresImmediate(0, 65535);
return true;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
index 9802eb01abf3..df9855a52e61 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
@@ -61,26 +61,41 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
llvm::ARM::ProfileKind ArchProfile;
unsigned ArchVersion;
+ LLVM_PREFERRED_TYPE(FPUMode)
unsigned FPU : 5;
+ LLVM_PREFERRED_TYPE(MVEMode)
unsigned MVE : 2;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsAAPCS : 1;
+ LLVM_PREFERRED_TYPE(HWDivMode)
unsigned HWDiv : 2;
// Initialized via features.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SoftFloat : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned SoftFloatABI : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned CRC : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned Crypto : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned SHA2 : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned AES : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned DSP : 1;
- unsigned Unaligned : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned DotProd : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasMatMul : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned FPRegsDisabled : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasPAC : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasBTI : 1;
enum {
@@ -210,6 +225,10 @@ public:
bool hasBitIntType() const override { return true; }
const char *getBFloat16Mangling() const override { return "u6__bf16"; };
+
+ std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
+ return std::make_pair(getTriple().isArch64Bit() ? 256 : 64, 64);
+ }
};
class LLVM_LIBRARY_VISIBILITY ARMleTargetInfo : public ARMTargetInfo {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
index 9376c46cd98c..feeb04f37eeb 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
@@ -175,6 +175,10 @@ public:
std::optional<std::string> handleAsmEscapedChar(char EscChar) const override;
StringRef getABI() const override { return ABI; }
+ std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
+ return std::make_pair(32, 32);
+ }
+
protected:
std::string CPU;
StringRef ABI;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.cpp
index e713e0847922..b5ba11a3bdca 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.cpp
@@ -22,7 +22,7 @@ using namespace clang::targets;
static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
-#include "clang/Basic/BuiltinsBPF.def"
+#include "clang/Basic/BuiltinsBPF.inc"
};
void BPFTargetInfo::getTargetDefines(const LangOptions &Opts,
@@ -35,6 +35,9 @@ void BPFTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__BPF_CPU_VERSION__", "0");
return;
}
+
+ Builder.defineMacro("__BPF_FEATURE_ADDR_SPACE_CAST");
+
if (CPU.empty() || CPU == "generic" || CPU == "v1") {
Builder.defineMacro("__BPF_CPU_VERSION__", "1");
return;
@@ -42,6 +45,7 @@ void BPFTargetInfo::getTargetDefines(const LangOptions &Opts,
std::string CpuVerNumStr = CPU.substr(1);
Builder.defineMacro("__BPF_CPU_VERSION__", CpuVerNumStr);
+ Builder.defineMacro("__BPF_FEATURE_MAY_GOTO");
int CpuVerNum = std::stoi(CpuVerNumStr);
if (CpuVerNum >= 2)
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
index 489f29fc4fea..d19b37dd4df7 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
@@ -113,6 +113,10 @@ public:
StringRef CPUName(Name);
return isValidCPUName(CPUName);
}
+
+ std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
+ return std::make_pair(32, 32);
+ }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp
index 851f27dbb1e5..c8bf8b9234d2 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp
@@ -308,7 +308,8 @@ bool CSKYTargetInfo::validateAsmConstraint(
}
}
-unsigned CSKYTargetInfo::getMinGlobalAlign(uint64_t Size) const {
+unsigned CSKYTargetInfo::getMinGlobalAlign(uint64_t Size,
+ bool HasNonWeakDef) const {
if (Size >= 32)
return 32;
return 0;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
index 11404e37db36..94d4eeb9a1ff 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
@@ -71,7 +71,7 @@ public:
bool isValidCPUName(StringRef Name) const override;
- unsigned getMinGlobalAlign(uint64_t) const override;
+ unsigned getMinGlobalAlign(uint64_t, bool HasNonWeakDef) const override;
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h
index acfcc8c47ba9..a084e2823453 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h
@@ -53,7 +53,6 @@ public:
: TargetInfo(Triple) {
TLSSupported = false;
VLASupported = false;
- LongWidth = LongAlign = 64;
AddrSpaceMap = &DirectXAddrSpaceMap;
UseAddrSpaceMapMangling = true;
HasLegalHalfType = true;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
index ac747e371fb4..0282ac812c30 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
@@ -238,6 +238,18 @@ static constexpr CPUSuffix Suffixes[] = {
{{"hexagonv73"}, {"73"}},
};
+std::optional<unsigned> HexagonTargetInfo::getHexagonCPURev(StringRef Name) {
+ StringRef Arch = Name;
+ Arch.consume_front("hexagonv");
+ Arch.consume_back("t");
+
+ unsigned Val;
+ if (!Arch.getAsInteger(0, Val))
+ return Val;
+
+ return std::nullopt;
+}
+
const char *HexagonTargetInfo::getHexagonCPUSuffix(StringRef Name) {
const CPUSuffix *Item = llvm::find_if(
Suffixes, [Name](const CPUSuffix &S) { return S.Name == Name; });
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
index cdb47dbae799..7f053ab7e488 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
@@ -17,6 +17,7 @@
#include "clang/Basic/TargetOptions.h"
#include "llvm/Support/Compiler.h"
#include "llvm/TargetParser/Triple.h"
+#include <optional>
namespace clang {
namespace targets {
@@ -115,6 +116,7 @@ public:
std::string_view getClobbers() const override { return ""; }
static const char *getHexagonCPUSuffix(StringRef Name);
+ static std::optional<unsigned> getHexagonCPURev(StringRef Name);
bool isValidCPUName(StringRef Name) const override {
return getHexagonCPUSuffix(Name);
@@ -139,6 +141,14 @@ public:
}
bool hasBitIntType() const override { return true; }
+
+ std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
+ std::optional<unsigned> Rev = getHexagonCPURev(CPU);
+
+ // V73 and later have 64-byte cache lines.
+ unsigned CacheLineSizeBytes = Rev >= 73U ? 64 : 32;
+ return std::make_pair(CacheLineSizeBytes, CacheLineSizeBytes);
+ }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp
index 88537989a051..cb3fd12c48dd 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp
@@ -200,7 +200,24 @@ void LoongArchTargetInfo::getTargetDefines(const LangOptions &Opts,
// Define __loongarch_arch.
StringRef ArchName = getCPU();
- Builder.defineMacro("__loongarch_arch", Twine('"') + ArchName + Twine('"'));
+ if (ArchName == "loongarch64") {
+ if (HasFeatureLSX) {
+ // TODO: As more features of the V1.1 ISA are supported, a unified "v1.1"
+ // arch feature set will be used to include all sub-features belonging to
+ // the V1.1 ISA version.
+ if (HasFeatureFrecipe)
+ Builder.defineMacro("__loongarch_arch",
+ Twine('"') + "la64v1.1" + Twine('"'));
+ else
+ Builder.defineMacro("__loongarch_arch",
+ Twine('"') + "la64v1.0" + Twine('"'));
+ } else {
+ Builder.defineMacro("__loongarch_arch",
+ Twine('"') + ArchName + Twine('"'));
+ }
+ } else {
+ Builder.defineMacro("__loongarch_arch", Twine('"') + ArchName + Twine('"'));
+ }
// Define __loongarch_tune.
StringRef TuneCPU = getTargetOpts().TuneCPU;
@@ -208,10 +225,16 @@ void LoongArchTargetInfo::getTargetDefines(const LangOptions &Opts,
TuneCPU = ArchName;
Builder.defineMacro("__loongarch_tune", Twine('"') + TuneCPU + Twine('"'));
- if (HasFeatureLSX)
+ if (HasFeatureLASX) {
+ Builder.defineMacro("__loongarch_simd_width", "256");
Builder.defineMacro("__loongarch_sx", Twine(1));
- if (HasFeatureLASX)
Builder.defineMacro("__loongarch_asx", Twine(1));
+ } else if (HasFeatureLSX) {
+ Builder.defineMacro("__loongarch_simd_width", "128");
+ Builder.defineMacro("__loongarch_sx", Twine(1));
+ }
+ if (HasFeatureFrecipe)
+ Builder.defineMacro("__loongarch_frecipe", Twine(1));
StringRef ABI = getABI();
if (ABI == "lp64d" || ABI == "lp64f" || ABI == "lp64s")
@@ -285,6 +308,10 @@ bool LoongArchTargetInfo::handleTargetFeatures(
HasFeatureLSX = true;
else if (Feature == "+lasx")
HasFeatureLASX = true;
+ else if (Feature == "-ual")
+ HasUnalignedAccess = false;
+ else if (Feature == "+frecipe")
+ HasFeatureFrecipe = true;
}
return true;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h
index 3313102492cb..c668ca7eca04 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h
@@ -29,6 +29,7 @@ protected:
bool HasFeatureF;
bool HasFeatureLSX;
bool HasFeatureLASX;
+ bool HasFeatureFrecipe;
public:
LoongArchTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
@@ -37,6 +38,7 @@ public:
HasFeatureF = false;
HasFeatureLSX = false;
HasFeatureLASX = false;
+ HasFeatureFrecipe = false;
LongDoubleWidth = 128;
LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
@@ -132,7 +134,8 @@ public:
: LoongArchTargetInfo(Triple, Opts) {
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
IntMaxType = Int64Type = SignedLong;
- resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n64-S128");
+ HasUnalignedAccess = true;
+ resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n32:64-S128");
// TODO: select appropriate ABI.
setABI("lp64d");
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
index 1b7e0a7f32c9..8b8bf97d6f99 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
@@ -127,16 +127,21 @@ bool M68kTargetInfo::hasFeature(StringRef Feature) const {
const char *const M68kTargetInfo::GCCRegNames[] = {
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
+ "a0", "a1", "a2", "a3", "a4", "a5", "a6", "sp",
"pc"};
ArrayRef<const char *> M68kTargetInfo::getGCCRegNames() const {
return llvm::ArrayRef(GCCRegNames);
}
+const TargetInfo::GCCRegAlias M68kTargetInfo::GCCRegAliases[] = {
+ {{"bp"}, "a5"},
+ {{"fp"}, "a6"},
+ {{"usp", "ssp", "isp", "a7"}, "sp"},
+};
+
ArrayRef<TargetInfo::GCCRegAlias> M68kTargetInfo::getGCCRegAliases() const {
- // No aliases.
- return std::nullopt;
+ return llvm::ArrayRef(GCCRegAliases);
}
bool M68kTargetInfo::validateAsmConstraint(
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h
index a9c262e62fba..b732add77e03 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h
@@ -25,6 +25,7 @@ namespace targets {
class LLVM_LIBRARY_VISIBILITY M68kTargetInfo : public TargetInfo {
static const char *const GCCRegNames[];
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
enum CPUKind {
CK_Unknown,
@@ -55,6 +56,10 @@ public:
BuiltinVaListKind getBuiltinVaListKind() const override;
bool setCPU(const std::string &Name) override;
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override;
+
+ std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
+ return std::make_pair(32, 32);
+ }
};
} // namespace targets
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp
index 3a65f53c5248..174bc9d2ab99 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp
@@ -273,6 +273,34 @@ bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
Diags.Report(diag::err_mips_fp64_req) << "-mfp64";
return false;
}
+ // FPXX requires mips2+
+ if (FPMode == FPXX && CPU == "mips1") {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfpxx" << CPU;
+ return false;
+ }
+ // -mmsa with -msoft-float makes nonsense
+ if (FloatABI == SoftFloat && HasMSA) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-msoft-float"
+ << "-mmsa";
+ return false;
+ }
+ // Option -mmsa permitted on Mips32 iff revision 2 or higher is present
+ if (HasMSA && (CPU == "mips1" || CPU == "mips2" || getISARev() < 2) &&
+ ABI == "o32") {
+ Diags.Report(diag::err_mips_fp64_req) << "-mmsa";
+ return false;
+ }
+ // MSA requires FP64
+ if (FPMode == FPXX && HasMSA) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfpxx"
+ << "-mmsa";
+ return false;
+ }
+ if (FPMode == FP32 && HasMSA) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfp32"
+ << "-mmsa";
+ return false;
+ }
return true;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
index 23d4e1b598fa..b6f110249fa7 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
@@ -85,8 +85,13 @@ public:
return CPU == "mips32r6" || CPU == "mips64r6";
}
- bool isFP64Default() const {
- return CPU == "mips32r6" || ABI == "n32" || ABI == "n64" || ABI == "64";
+ enum FPModeEnum getDefaultFPMode() const {
+ if (CPU == "mips32r6" || ABI == "n32" || ABI == "n64" || ABI == "64")
+ return FP64;
+ else if (CPU == "mips1")
+ return FP32;
+ else
+ return FPXX;
}
bool isNan2008() const override { return IsNan2008; }
@@ -315,9 +320,11 @@ public:
IsSingleFloat = false;
FloatABI = HardFloat;
DspRev = NoDSP;
- FPMode = isFP64Default() ? FP64 : FPXX;
NoOddSpreg = false;
+ FPMode = getDefaultFPMode();
bool OddSpregGiven = false;
+ bool StrictAlign = false;
+ bool FpGiven = false;
for (const auto &Feature : Features) {
if (Feature == "+single-float")
@@ -328,6 +335,12 @@ public:
IsMips16 = true;
else if (Feature == "+micromips")
IsMicromips = true;
+ else if (Feature == "+mips32r6" || Feature == "+mips64r6")
+ HasUnalignedAccess = true;
+ // We cannot be sure that the order of strict-align vs mips32r6.
+ // Thus we need an extra variable here.
+ else if (Feature == "+strict-align")
+ StrictAlign = true;
else if (Feature == "+dsp")
DspRev = std::max(DspRev, DSP1);
else if (Feature == "+dspr2")
@@ -336,13 +349,16 @@ public:
HasMSA = true;
else if (Feature == "+nomadd4")
DisableMadd4 = true;
- else if (Feature == "+fp64")
+ else if (Feature == "+fp64") {
FPMode = FP64;
- else if (Feature == "-fp64")
+ FpGiven = true;
+ } else if (Feature == "-fp64") {
FPMode = FP32;
- else if (Feature == "+fpxx")
+ FpGiven = true;
+ } else if (Feature == "+fpxx") {
FPMode = FPXX;
- else if (Feature == "+nan2008")
+ FpGiven = true;
+ } else if (Feature == "+nan2008")
IsNan2008 = true;
else if (Feature == "-nan2008")
IsNan2008 = false;
@@ -366,6 +382,14 @@ public:
if (FPMode == FPXX && !OddSpregGiven)
NoOddSpreg = true;
+ if (StrictAlign)
+ HasUnalignedAccess = false;
+
+ if (HasMSA && !FpGiven) {
+ FPMode = FP64;
+ Features.push_back("+fp64");
+ }
+
setDataLayout();
return true;
@@ -421,6 +445,10 @@ public:
bool validateTarget(DiagnosticsEngine &Diags) const override;
bool hasBitIntType() const override { return true; }
+
+ std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
+ return std::make_pair(32, 32);
+ }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
index c0b5db795e27..43b653dc52ce 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
@@ -59,7 +59,11 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
// Define available target features
// These must be defined in sorted order!
NoAsmVariants = true;
- GPU = CudaArch::SM_20;
+ GPU = OffloadArch::UNUSED;
+
+ // PTX supports f16 as a fundamental type.
+ HasLegalHalfType = true;
+ HasFloat16 = true;
if (TargetPointerWidth == 32)
resetDataLayout("e-p:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
@@ -115,7 +119,8 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
LongAlign = HostTarget->getLongAlign();
LongLongWidth = HostTarget->getLongLongWidth();
LongLongAlign = HostTarget->getLongLongAlign();
- MinGlobalAlign = HostTarget->getMinGlobalAlign(/* TypeSize = */ 0);
+ MinGlobalAlign = HostTarget->getMinGlobalAlign(/* TypeSize = */ 0,
+ /* HasNonWeakDef = */ true);
NewAlign = HostTarget->getNewAlign();
DefaultAlignForAttributeAligned =
HostTarget->getDefaultAlignForAttributeAligned();
@@ -168,107 +173,119 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__PTX__");
Builder.defineMacro("__NVPTX__");
+
+ // Skip setting architecture dependent macros if undefined.
+ if (GPU == OffloadArch::UNUSED && !HostTarget)
+ return;
+
if (Opts.CUDAIsDevice || Opts.OpenMPIsTargetDevice || !HostTarget) {
// Set __CUDA_ARCH__ for the GPU specified.
std::string CUDAArchCode = [this] {
switch (GPU) {
- case CudaArch::GFX600:
- case CudaArch::GFX601:
- case CudaArch::GFX602:
- case CudaArch::GFX700:
- case CudaArch::GFX701:
- case CudaArch::GFX702:
- case CudaArch::GFX703:
- case CudaArch::GFX704:
- case CudaArch::GFX705:
- case CudaArch::GFX801:
- case CudaArch::GFX802:
- case CudaArch::GFX803:
- case CudaArch::GFX805:
- case CudaArch::GFX810:
- case CudaArch::GFX900:
- case CudaArch::GFX902:
- case CudaArch::GFX904:
- case CudaArch::GFX906:
- case CudaArch::GFX908:
- case CudaArch::GFX909:
- case CudaArch::GFX90a:
- case CudaArch::GFX90c:
- case CudaArch::GFX940:
- case CudaArch::GFX941:
- case CudaArch::GFX942:
- case CudaArch::GFX1010:
- case CudaArch::GFX1011:
- case CudaArch::GFX1012:
- case CudaArch::GFX1013:
- case CudaArch::GFX1030:
- case CudaArch::GFX1031:
- case CudaArch::GFX1032:
- case CudaArch::GFX1033:
- case CudaArch::GFX1034:
- case CudaArch::GFX1035:
- case CudaArch::GFX1036:
- case CudaArch::GFX1100:
- case CudaArch::GFX1101:
- case CudaArch::GFX1102:
- case CudaArch::GFX1103:
- case CudaArch::GFX1150:
- case CudaArch::GFX1151:
- case CudaArch::GFX1200:
- case CudaArch::GFX1201:
- case CudaArch::Generic:
- case CudaArch::LAST:
+ case OffloadArch::GFX600:
+ case OffloadArch::GFX601:
+ case OffloadArch::GFX602:
+ case OffloadArch::GFX700:
+ case OffloadArch::GFX701:
+ case OffloadArch::GFX702:
+ case OffloadArch::GFX703:
+ case OffloadArch::GFX704:
+ case OffloadArch::GFX705:
+ case OffloadArch::GFX801:
+ case OffloadArch::GFX802:
+ case OffloadArch::GFX803:
+ case OffloadArch::GFX805:
+ case OffloadArch::GFX810:
+ case OffloadArch::GFX9_GENERIC:
+ case OffloadArch::GFX900:
+ case OffloadArch::GFX902:
+ case OffloadArch::GFX904:
+ case OffloadArch::GFX906:
+ case OffloadArch::GFX908:
+ case OffloadArch::GFX909:
+ case OffloadArch::GFX90a:
+ case OffloadArch::GFX90c:
+ case OffloadArch::GFX940:
+ case OffloadArch::GFX941:
+ case OffloadArch::GFX942:
+ case OffloadArch::GFX10_1_GENERIC:
+ case OffloadArch::GFX1010:
+ case OffloadArch::GFX1011:
+ case OffloadArch::GFX1012:
+ case OffloadArch::GFX1013:
+ case OffloadArch::GFX10_3_GENERIC:
+ case OffloadArch::GFX1030:
+ case OffloadArch::GFX1031:
+ case OffloadArch::GFX1032:
+ case OffloadArch::GFX1033:
+ case OffloadArch::GFX1034:
+ case OffloadArch::GFX1035:
+ case OffloadArch::GFX1036:
+ case OffloadArch::GFX11_GENERIC:
+ case OffloadArch::GFX1100:
+ case OffloadArch::GFX1101:
+ case OffloadArch::GFX1102:
+ case OffloadArch::GFX1103:
+ case OffloadArch::GFX1150:
+ case OffloadArch::GFX1151:
+ case OffloadArch::GFX1152:
+ case OffloadArch::GFX12_GENERIC:
+ case OffloadArch::GFX1200:
+ case OffloadArch::GFX1201:
+ case OffloadArch::AMDGCNSPIRV:
+ case OffloadArch::Generic:
+ case OffloadArch::LAST:
break;
- case CudaArch::UNUSED:
- case CudaArch::UNKNOWN:
+ case OffloadArch::UNKNOWN:
assert(false && "No GPU arch when compiling CUDA device code.");
return "";
- case CudaArch::SM_20:
+ case OffloadArch::UNUSED:
+ case OffloadArch::SM_20:
return "200";
- case CudaArch::SM_21:
+ case OffloadArch::SM_21:
return "210";
- case CudaArch::SM_30:
+ case OffloadArch::SM_30:
return "300";
- case CudaArch::SM_32:
+ case OffloadArch::SM_32_:
return "320";
- case CudaArch::SM_35:
+ case OffloadArch::SM_35:
return "350";
- case CudaArch::SM_37:
+ case OffloadArch::SM_37:
return "370";
- case CudaArch::SM_50:
+ case OffloadArch::SM_50:
return "500";
- case CudaArch::SM_52:
+ case OffloadArch::SM_52:
return "520";
- case CudaArch::SM_53:
+ case OffloadArch::SM_53:
return "530";
- case CudaArch::SM_60:
+ case OffloadArch::SM_60:
return "600";
- case CudaArch::SM_61:
+ case OffloadArch::SM_61:
return "610";
- case CudaArch::SM_62:
+ case OffloadArch::SM_62:
return "620";
- case CudaArch::SM_70:
+ case OffloadArch::SM_70:
return "700";
- case CudaArch::SM_72:
+ case OffloadArch::SM_72:
return "720";
- case CudaArch::SM_75:
+ case OffloadArch::SM_75:
return "750";
- case CudaArch::SM_80:
+ case OffloadArch::SM_80:
return "800";
- case CudaArch::SM_86:
+ case OffloadArch::SM_86:
return "860";
- case CudaArch::SM_87:
+ case OffloadArch::SM_87:
return "870";
- case CudaArch::SM_89:
+ case OffloadArch::SM_89:
return "890";
- case CudaArch::SM_90:
- case CudaArch::SM_90a:
+ case OffloadArch::SM_90:
+ case OffloadArch::SM_90a:
return "900";
}
- llvm_unreachable("unhandled CudaArch");
+ llvm_unreachable("unhandled OffloadArch");
}();
Builder.defineMacro("__CUDA_ARCH__", CUDAArchCode);
- if (GPU == CudaArch::SM_90a)
+ if (GPU == OffloadArch::SM_90a)
Builder.defineMacro("__CUDA_ARCH_FEAT_SM90_ALL", "1");
}
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
index 20d76b702a94..25dc979d882f 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
@@ -62,7 +62,7 @@ static const int NVPTXDWARFAddrSpaceMap[] = {
class LLVM_LIBRARY_VISIBILITY NVPTXTargetInfo : public TargetInfo {
static const char *const GCCRegNames[];
- CudaArch GPU;
+ OffloadArch GPU;
uint32_t PTXVersion;
std::unique_ptr<TargetInfo> HostTarget;
@@ -75,11 +75,14 @@ public:
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+ bool useFP16ConversionIntrinsics() const override { return false; }
+
bool
initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
StringRef CPU,
const std::vector<std::string> &FeaturesVec) const override {
- Features[CudaArchToString(GPU)] = true;
+ if (GPU != OffloadArch::UNUSED)
+ Features[OffloadArchToString(GPU)] = true;
Features["ptx" + std::to_string(PTXVersion)] = true;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -104,6 +107,7 @@ public:
case 'l':
case 'f':
case 'd':
+ case 'q':
Info.setAllowsRegister();
return true;
}
@@ -115,23 +119,22 @@ public:
}
BuiltinVaListKind getBuiltinVaListKind() const override {
- // FIXME: implement
- return TargetInfo::CharPtrBuiltinVaList;
+ return TargetInfo::VoidPtrBuiltinVaList;
}
bool isValidCPUName(StringRef Name) const override {
- return StringToCudaArch(Name) != CudaArch::UNKNOWN;
+ return StringToOffloadArch(Name) != OffloadArch::UNKNOWN;
}
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override {
- for (int i = static_cast<int>(CudaArch::SM_20);
- i < static_cast<int>(CudaArch::Generic); ++i)
- Values.emplace_back(CudaArchToString(static_cast<CudaArch>(i)));
+ for (int i = static_cast<int>(OffloadArch::SM_20);
+ i < static_cast<int>(OffloadArch::Generic); ++i)
+ Values.emplace_back(OffloadArchToString(static_cast<OffloadArch>(i)));
}
bool setCPU(const std::string &Name) override {
- GPU = StringToCudaArch(Name);
- return GPU != CudaArch::UNKNOWN;
+ GPU = StringToOffloadArch(Name);
+ return GPU != OffloadArch::UNKNOWN;
}
void setSupportedOpenCLOpts() override {
@@ -182,7 +185,7 @@ public:
bool hasBitIntType() const override { return true; }
bool hasBFloat16Type() const override { return true; }
- CudaArch getGPU() const { return GPU; }
+ OffloadArch getGPU() const { return GPU; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
index 4366c1149e40..357c1965057c 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
@@ -337,6 +337,10 @@ protected:
Builder.defineMacro("_GNU_SOURCE");
if (this->HasFloat128)
Builder.defineMacro("__FLOAT128__");
+ if (Triple.isTime64ABI()) {
+ Builder.defineMacro("_FILE_OFFSET_BITS", "64");
+ Builder.defineMacro("_TIME_BITS", "64");
+ }
}
public:
@@ -868,6 +872,7 @@ protected:
public:
FuchsiaTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
+ this->WIntType = TargetInfo::UnsignedInt;
this->MCountName = "__mcount";
this->TheCXXABI.set(TargetCXXABI::Fuchsia);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
index 41935abfb65d..9ff54083c923 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
@@ -79,6 +79,8 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasPrivileged = true;
} else if (Feature == "+aix-small-local-exec-tls") {
HasAIXSmallLocalExecTLS = true;
+ } else if (Feature == "+aix-small-local-dynamic-tls") {
+ HasAIXSmallLocalDynamicTLS = true;
} else if (Feature == "+isa-v206-instructions") {
IsISA2_06 = true;
} else if (Feature == "+isa-v207-instructions") {
@@ -89,6 +91,10 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
IsISA3_1 = true;
} else if (Feature == "+quadword-atomics") {
HasQuadwordAtomics = true;
+ } else if (Feature == "+aix-shared-lib-tls-model-opt") {
+ HasAIXShLibTLSModelOpt = true;
+ } else if (Feature == "+longcall") {
+ UseLongCalls = true;
}
// TODO: Finish this list and add an assert that we've handled them
// all.
@@ -379,6 +385,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("_ARCH_PWR9");
if (ArchDefs & ArchDefinePwr10)
Builder.defineMacro("_ARCH_PWR10");
+ if (ArchDefs & ArchDefinePwr11)
+ Builder.defineMacro("_ARCH_PWR11");
if (ArchDefs & ArchDefineA2)
Builder.defineMacro("_ARCH_A2");
if (ArchDefs & ArchDefineE500)
@@ -442,19 +450,44 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
// _CALL_DARWIN
}
-// Handle explicit options being passed to the compiler here: if we've
-// explicitly turned off vsx and turned on any of:
-// - power8-vector
-// - direct-move
-// - float128
-// - power9-vector
-// - paired-vector-memops
-// - mma
-// - power10-vector
+// Handle explicit options being passed to the compiler here:
+// - if we've explicitly turned off vsx and turned on any of:
+// - power8-vector
+// - direct-move
+// - float128
+// - power9-vector
+// - paired-vector-memops
+// - mma
+// - power10-vector
+// - if we've explicitly turned on vsx and turned off altivec.
+// - if we've explicitly turned off hard-float and turned on altivec.
// then go ahead and error since the customer has expressed an incompatible
// set of options.
static bool ppcUserFeaturesCheck(DiagnosticsEngine &Diags,
const std::vector<std::string> &FeaturesVec) {
+ // Cannot allow soft-float with Altivec.
+ if (llvm::is_contained(FeaturesVec, "-hard-float") &&
+ llvm::is_contained(FeaturesVec, "+altivec")) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-msoft-float"
+ << "-maltivec";
+ return false;
+ }
+
+ // Cannot allow soft-float with VSX.
+ if (llvm::is_contained(FeaturesVec, "-hard-float") &&
+ llvm::is_contained(FeaturesVec, "+vsx")) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-msoft-float"
+ << "-mvsx";
+ return false;
+ }
+
+ // Cannot allow VSX with no Altivec.
+ if (llvm::is_contained(FeaturesVec, "+vsx") &&
+ llvm::is_contained(FeaturesVec, "-altivec")) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mvsx"
+ << "-mno-altivec";
+ return false;
+ }
// vsx was not explicitly turned off.
if (!llvm::is_contained(FeaturesVec, "-vsx"))
@@ -548,9 +581,13 @@ bool PPCTargetInfo::initFeatureMap(
// Privileged instructions are off by default.
Features["privileged"] = false;
- // The code generated by the -maix-small-local-exec-tls option is turned
- // off by default.
+ // The code generated by the -maix-small-local-[exec|dynamic]-tls option is
+ // turned off by default.
Features["aix-small-local-exec-tls"] = false;
+ Features["aix-small-local-dynamic-tls"] = false;
+
+ // Turn off TLS model opt by default.
+ Features["aix-shared-lib-tls-model-opt"] = false;
Features["spe"] = llvm::StringSwitch<bool>(CPU)
.Case("8548", true)
@@ -587,10 +624,17 @@ bool PPCTargetInfo::initFeatureMap(
addP10SpecificFeatures(Features);
}
- // Future CPU should include all of the features of Power 10 as well as any
+ // Power11 includes all the same features as Power10 plus any features
+ // specific to the Power11 core.
+ if (CPU == "pwr11" || CPU == "power11") {
+ initFeatureMap(Features, Diags, "pwr10", FeaturesVec);
+ addP11SpecificFeatures(Features);
+ }
+
+ // Future CPU should include all of the features of Power 11 as well as any
// additional features (yet to be determined) specific to it.
if (CPU == "future") {
- initFeatureMap(Features, Diags, "pwr10", FeaturesVec);
+ initFeatureMap(Features, Diags, "pwr11", FeaturesVec);
addFutureSpecificFeatures(Features);
}
@@ -646,14 +690,6 @@ bool PPCTargetInfo::initFeatureMap(
return false;
}
- if (llvm::is_contained(FeaturesVec, "+aix-small-local-exec-tls")) {
- if (!getTriple().isOSAIX() || !getTriple().isArch64Bit()) {
- Diags.Report(diag::err_opt_not_valid_on_target)
- << "-maix-small-local-exec-tls";
- return false;
- }
- }
-
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -669,6 +705,10 @@ void PPCTargetInfo::addP10SpecificFeatures(
Features["isa-v31-instructions"] = true;
}
+// Add any Power11 specific features.
+void PPCTargetInfo::addP11SpecificFeatures(
+ llvm::StringMap<bool> &Features) const {}
+
// Add features specific to the "Future" CPU.
void PPCTargetInfo::addFutureSpecificFeatures(
llvm::StringMap<bool> &Features) const {}
@@ -696,11 +736,14 @@ bool PPCTargetInfo::hasFeature(StringRef Feature) const {
.Case("rop-protect", HasROPProtect)
.Case("privileged", HasPrivileged)
.Case("aix-small-local-exec-tls", HasAIXSmallLocalExecTLS)
+ .Case("aix-small-local-dynamic-tls", HasAIXSmallLocalDynamicTLS)
.Case("isa-v206-instructions", IsISA2_06)
.Case("isa-v207-instructions", IsISA2_07)
.Case("isa-v30-instructions", IsISA3_0)
.Case("isa-v31-instructions", IsISA3_1)
.Case("quadword-atomics", HasQuadwordAtomics)
+ .Case("aix-shared-lib-tls-model-opt", HasAIXShLibTLSModelOpt)
+ .Case("longcall", UseLongCalls)
.Default(false);
}
@@ -840,17 +883,17 @@ ArrayRef<TargetInfo::AddlRegName> PPCTargetInfo::getGCCAddlRegNames() const {
}
static constexpr llvm::StringLiteral ValidCPUNames[] = {
- {"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
- {"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
- {"620"}, {"630"}, {"g3"}, {"7400"}, {"g4"},
- {"7450"}, {"g4+"}, {"750"}, {"8548"}, {"970"},
- {"g5"}, {"a2"}, {"e500"}, {"e500mc"}, {"e5500"},
- {"power3"}, {"pwr3"}, {"power4"}, {"pwr4"}, {"power5"},
- {"pwr5"}, {"power5x"}, {"pwr5x"}, {"power6"}, {"pwr6"},
- {"power6x"}, {"pwr6x"}, {"power7"}, {"pwr7"}, {"power8"},
- {"pwr8"}, {"power9"}, {"pwr9"}, {"power10"}, {"pwr10"},
- {"powerpc"}, {"ppc"}, {"ppc32"}, {"powerpc64"}, {"ppc64"},
- {"powerpc64le"}, {"ppc64le"}, {"future"}};
+ {"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
+ {"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
+ {"620"}, {"630"}, {"g3"}, {"7400"}, {"g4"},
+ {"7450"}, {"g4+"}, {"750"}, {"8548"}, {"970"},
+ {"g5"}, {"a2"}, {"e500"}, {"e500mc"}, {"e5500"},
+ {"power3"}, {"pwr3"}, {"power4"}, {"pwr4"}, {"power5"},
+ {"pwr5"}, {"power5x"}, {"pwr5x"}, {"power6"}, {"pwr6"},
+ {"power6x"}, {"pwr6x"}, {"power7"}, {"pwr7"}, {"power8"},
+ {"pwr8"}, {"power9"}, {"pwr9"}, {"power10"}, {"pwr10"},
+ {"power11"}, {"pwr11"}, {"powerpc"}, {"ppc"}, {"ppc32"},
+ {"powerpc64"}, {"ppc64"}, {"powerpc64le"}, {"ppc64le"}, {"future"}};
bool PPCTargetInfo::isValidCPUName(StringRef Name) const {
return llvm::is_contained(ValidCPUNames, Name);
@@ -878,3 +921,41 @@ ArrayRef<Builtin::Info> PPCTargetInfo::getTargetBuiltins() const {
return llvm::ArrayRef(BuiltinInfo,
clang::PPC::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
+
+bool PPCTargetInfo::validateCpuSupports(StringRef FeatureStr) const {
+ llvm::Triple Triple = getTriple();
+ if (Triple.isOSAIX()) {
+#define PPC_AIX_FEATURE(NAME, DESC, SUPPORT_METHOD, INDEX, MASK, COMP_OP, \
+ VALUE) \
+ .Case(NAME, true)
+ return llvm::StringSwitch<bool>(FeatureStr)
+#include "llvm/TargetParser/PPCTargetParser.def"
+ .Default(false);
+ }
+
+ assert(Triple.isOSLinux() &&
+ "__builtin_cpu_supports() is only supported for AIX and Linux.");
+
+#define PPC_LNX_FEATURE(NAME, DESC, ENUMNAME, ENUMVAL, HWCAPN) .Case(NAME, true)
+ return llvm::StringSwitch<bool>(FeatureStr)
+#include "llvm/TargetParser/PPCTargetParser.def"
+ .Default(false);
+}
+
+bool PPCTargetInfo::validateCpuIs(StringRef CPUName) const {
+ llvm::Triple Triple = getTriple();
+ assert((Triple.isOSAIX() || Triple.isOSLinux()) &&
+ "__builtin_cpu_is() is only supported for AIX and Linux.");
+
+#define PPC_CPU(NAME, Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, \
+ AIXID) \
+ .Case(NAME, {Linux_SUPPORT_METHOD, AIX_SUPPORT_METHOD})
+
+ std::pair<unsigned, unsigned> SuppportMethod =
+ llvm::StringSwitch<std::pair<unsigned, unsigned>>(CPUName)
+#include "llvm/TargetParser/PPCTargetParser.def"
+ .Default({BUILTIN_PPC_UNSUPPORTED, BUILTIN_PPC_UNSUPPORTED});
+ return Triple.isOSLinux()
+ ? (SuppportMethod.first != BUILTIN_PPC_UNSUPPORTED)
+ : (SuppportMethod.second != BUILTIN_PPC_UNSUPPORTED);
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
index 4d62673ba7fb..6d5d8dd54d01 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
@@ -44,8 +44,9 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
ArchDefinePwr8 = 1 << 12,
ArchDefinePwr9 = 1 << 13,
ArchDefinePwr10 = 1 << 14,
- ArchDefineFuture = 1 << 15,
- ArchDefineA2 = 1 << 16,
+ ArchDefinePwr11 = 1 << 15,
+ ArchDefineFuture = 1 << 16,
+ ArchDefineA2 = 1 << 17,
ArchDefineE500 = 1 << 18
} ArchDefineTypes;
@@ -61,6 +62,7 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
bool HasROPProtect = false;
bool HasPrivileged = false;
bool HasAIXSmallLocalExecTLS = false;
+ bool HasAIXSmallLocalDynamicTLS = false;
bool HasVSX = false;
bool UseCRBits = false;
bool HasP8Vector = false;
@@ -80,6 +82,8 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
bool IsISA3_0 = false;
bool IsISA3_1 = false;
bool HasQuadwordAtomics = false;
+ bool HasAIXShLibTLSModelOpt = false;
+ bool UseLongCalls = false;
protected:
std::string ABI;
@@ -92,6 +96,7 @@ public:
LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
HasStrictFP = true;
HasIbm128 = true;
+ HasUnalignedAccess = true;
}
// Set the language option for altivec based on our value.
@@ -162,11 +167,16 @@ public:
ArchDefinePwr7 | ArchDefinePwr6 | ArchDefinePwr5x |
ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
ArchDefinePpcsq)
+ .Cases("power11", "pwr11",
+ ArchDefinePwr11 | ArchDefinePwr10 | ArchDefinePwr9 |
+ ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
.Case("future",
- ArchDefineFuture | ArchDefinePwr10 | ArchDefinePwr9 |
- ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6 |
- ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
- ArchDefinePpcgr | ArchDefinePpcsq)
+ ArchDefineFuture | ArchDefinePwr11 | ArchDefinePwr10 |
+ ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7 |
+ ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
.Cases("8548", "e500", ArchDefineE500)
.Default(ArchDefineNone);
}
@@ -188,6 +198,7 @@ public:
const std::vector<std::string> &FeaturesVec) const override;
void addP10SpecificFeatures(llvm::StringMap<bool> &Features) const;
+ void addP11SpecificFeatures(llvm::StringMap<bool> &Features) const;
void addFutureSpecificFeatures(llvm::StringMap<bool> &Features) const;
bool handleTargetFeatures(std::vector<std::string> &Features,
@@ -302,9 +313,11 @@ public:
// asm statements)
Info.setAllowsMemory();
break;
- case 'R': // AIX TOC entry
case 'a': // Address operand that is an indexed or indirect from a
// register (`p' is preferable for asm statements)
+ // TODO: Add full support for this constraint
+ return false;
+ case 'R': // AIX TOC entry
case 'S': // Constant suitable as a 64-bit mask operand
case 'T': // Constant suitable as a 32-bit mask operand
case 'U': // System V Release 4 small data area reference
@@ -357,8 +370,30 @@ public:
bool hasBitIntType() const override { return true; }
bool isSPRegName(StringRef RegName) const override {
- return RegName.equals("r1") || RegName.equals("x1");
+ return RegName == "r1" || RegName == "x1";
+ }
+
+ // We support __builtin_cpu_supports/__builtin_cpu_is on targets that
+ // have Glibc since it is Glibc that provides the HWCAP[2] in the auxv.
+ static constexpr int MINIMUM_AIX_OS_MAJOR = 7;
+ static constexpr int MINIMUM_AIX_OS_MINOR = 2;
+ bool supportsCpuSupports() const override {
+ llvm::Triple Triple = getTriple();
+ // AIX 7.2 is the minimum requirement to support __builtin_cpu_supports().
+ return Triple.isOSGlibc() ||
+ (Triple.isOSAIX() &&
+ !Triple.isOSVersionLT(MINIMUM_AIX_OS_MAJOR, MINIMUM_AIX_OS_MINOR));
}
+
+ bool supportsCpuIs() const override {
+ llvm::Triple Triple = getTriple();
+ // AIX 7.2 is the minimum requirement to support __builtin_cpu_is().
+ return Triple.isOSGlibc() ||
+ (Triple.isOSAIX() &&
+ !Triple.isOSVersionLT(MINIMUM_AIX_OS_MAJOR, MINIMUM_AIX_OS_MINOR));
+ }
+ bool validateCpuSupports(StringRef Feature) const override;
+ bool validateCpuIs(StringRef Name) const override;
};
class LLVM_LIBRARY_VISIBILITY PPC32TargetInfo : public PPCTargetInfo {
@@ -406,6 +441,10 @@ public:
// This is the ELF definition
return TargetInfo::PowerABIBuiltinVaList;
}
+
+ std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
+ return std::make_pair(32, 32);
+ }
};
// Note: ABI differences may eventually require us to have a separate
@@ -486,6 +525,10 @@ public:
return CCCR_Warning;
}
}
+
+ std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
+ return std::make_pair(128, 128);
+ }
};
class LLVM_LIBRARY_VISIBILITY AIXPPC32TargetInfo :
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
index c71b2e9eeb6c..9159162f01d1 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
@@ -96,7 +96,8 @@ bool RISCVTargetInfo::validateAsmConstraint(
// An address that is held in a general-purpose register.
Info.setAllowsMemory();
return true;
- case 'S': // A symbolic address
+ case 's':
+ case 'S': // A symbol or label reference with a constant offset
Info.setAllowsRegister();
return true;
case 'v':
@@ -167,7 +168,7 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
Twine(getVersionValue(ExtInfo.Major, ExtInfo.Minor)));
}
- if (ISAInfo->hasExtension("m") || ISAInfo->hasExtension("zmmul"))
+ if (ISAInfo->hasExtension("zmmul"))
Builder.defineMacro("__riscv_mul");
if (ISAInfo->hasExtension("m")) {
@@ -210,7 +211,7 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__riscv_v_fixed_vlen",
Twine(VScale->first * llvm::RISCV::RVVBitsPerBlock));
- if (FastUnalignedAccess)
+ if (FastScalarUnalignedAccess)
Builder.defineMacro("__riscv_misaligned_fast");
else
Builder.defineMacro("__riscv_misaligned_avoid");
@@ -233,7 +234,7 @@ static constexpr Builtin::Info BuiltinInfo[] = {
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
{#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
-#include "clang/Basic/BuiltinsRISCV.def"
+#include "clang/Basic/BuiltinsRISCV.inc"
};
ArrayRef<Builtin::Info> RISCVTargetInfo::getTargetBuiltins() const {
@@ -352,7 +353,8 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
if (ISAInfo->hasExtension("zfh") || ISAInfo->hasExtension("zhinx"))
HasLegalHalfType = true;
- FastUnalignedAccess = llvm::is_contained(Features, "+fast-unaligned-access");
+ FastScalarUnalignedAccess =
+ llvm::is_contained(Features, "+unaligned-scalar-mem");
if (llvm::is_contained(Features, "+experimental"))
HasExperimental = true;
@@ -466,3 +468,14 @@ ParsedTargetAttr RISCVTargetInfo::parseTargetAttr(StringRef Features) const {
}
return Ret;
}
+
+TargetInfo::CallingConvCheckResult
+RISCVTargetInfo::checkCallingConvention(CallingConv CC) const {
+ switch (CC) {
+ default:
+ return CCCR_Warning;
+ case CC_C:
+ case CC_RISCVVectorCall:
+ return CCCR_OK;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
index bfbdafb682c8..d5df6344bedc 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
@@ -16,7 +16,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/RISCVISAInfo.h"
+#include "llvm/TargetParser/RISCVISAInfo.h"
#include "llvm/TargetParser/Triple.h"
#include <optional>
@@ -30,7 +30,7 @@ protected:
std::unique_ptr<llvm::RISCVISAInfo> ISAInfo;
private:
- bool FastUnalignedAccess;
+ bool FastScalarUnalignedAccess;
bool HasExperimental = false;
public:
@@ -110,6 +110,8 @@ public:
bool hasBFloat16Type() const override { return true; }
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override;
+
bool useFP16ConversionIntrinsics() const override {
return false;
}
@@ -120,6 +122,10 @@ public:
void fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool supportsTargetAttributeTune() const override { return true; }
ParsedTargetAttr parseTargetAttr(StringRef Str) const override;
+
+ std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
+ return std::make_pair(32, 32);
+ }
};
class LLVM_LIBRARY_VISIBILITY RISCV32TargetInfo : public RISCVTargetInfo {
public:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp
index dc920177d3a9..040303983594 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp
@@ -11,7 +11,9 @@
//===----------------------------------------------------------------------===//
#include "SPIR.h"
+#include "AMDGPU.h"
#include "Targets.h"
+#include "llvm/TargetParser/TargetParser.h"
using namespace clang;
using namespace clang::targets;
@@ -54,3 +56,76 @@ void SPIRV64TargetInfo::getTargetDefines(const LangOptions &Opts,
BaseSPIRVTargetInfo::getTargetDefines(Opts, Builder);
DefineStd(Builder, "SPIRV64", Opts);
}
+
+static const AMDGPUTargetInfo AMDGPUTI(llvm::Triple("amdgcn-amd-amdhsa"), {});
+
+ArrayRef<const char *> SPIRV64AMDGCNTargetInfo::getGCCRegNames() const {
+ return AMDGPUTI.getGCCRegNames();
+}
+
+bool SPIRV64AMDGCNTargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef,
+ const std::vector<std::string> &FeatureVec) const {
+ llvm::AMDGPU::fillAMDGPUFeatureMap({}, getTriple(), Features);
+
+ return TargetInfo::initFeatureMap(Features, Diags, {}, FeatureVec);
+}
+
+bool SPIRV64AMDGCNTargetInfo::validateAsmConstraint(
+ const char *&Name, TargetInfo::ConstraintInfo &Info) const {
+ return AMDGPUTI.validateAsmConstraint(Name, Info);
+}
+
+std::string
+SPIRV64AMDGCNTargetInfo::convertConstraint(const char *&Constraint) const {
+ return AMDGPUTI.convertConstraint(Constraint);
+}
+
+ArrayRef<Builtin::Info> SPIRV64AMDGCNTargetInfo::getTargetBuiltins() const {
+ return AMDGPUTI.getTargetBuiltins();
+}
+
+void SPIRV64AMDGCNTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ BaseSPIRVTargetInfo::getTargetDefines(Opts, Builder);
+ DefineStd(Builder, "SPIRV64", Opts);
+
+ Builder.defineMacro("__AMD__");
+ Builder.defineMacro("__AMDGPU__");
+ Builder.defineMacro("__AMDGCN__");
+}
+
+void SPIRV64AMDGCNTargetInfo::setAuxTarget(const TargetInfo *Aux) {
+ assert(Aux && "Cannot invoke setAuxTarget without a valid auxiliary target!");
+
+ // This is a 1:1 copy of AMDGPUTargetInfo::setAuxTarget()
+ assert(HalfFormat == Aux->HalfFormat);
+ assert(FloatFormat == Aux->FloatFormat);
+ assert(DoubleFormat == Aux->DoubleFormat);
+
+ // On x86_64 long double is 80-bit extended precision format, which is
+ // not supported by AMDGPU. 128-bit floating point format is also not
+ // supported by AMDGPU. Therefore keep its own format for these two types.
+ auto SaveLongDoubleFormat = LongDoubleFormat;
+ auto SaveFloat128Format = Float128Format;
+ auto SaveLongDoubleWidth = LongDoubleWidth;
+ auto SaveLongDoubleAlign = LongDoubleAlign;
+ copyAuxTarget(Aux);
+ LongDoubleFormat = SaveLongDoubleFormat;
+ Float128Format = SaveFloat128Format;
+ LongDoubleWidth = SaveLongDoubleWidth;
+ LongDoubleAlign = SaveLongDoubleAlign;
+ // For certain builtin types support on the host target, claim they are
+ // supported to pass the compilation of the host code during the device-side
+ // compilation.
+ // FIXME: As the side effect, we also accept `__float128` uses in the device
+ // code. To reject these builtin types supported in the host target but not in
+ // the device target, one approach would support `device_builtin` attribute
+ // so that we could tell the device builtin types from the host ones. This
+ // also solves the different representations of the same builtin type, such
+ // as `size_t` in the MSVC environment.
+ if (Aux->hasFloat128Type()) {
+ HasFloat128 = true;
+ Float128Format = DoubleFormat;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
index fa4a3bb1c82e..37cf9d7921ba 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
@@ -125,7 +125,9 @@ protected:
LongAlign = HostTarget->getLongAlign();
LongLongWidth = HostTarget->getLongLongWidth();
LongLongAlign = HostTarget->getLongLongAlign();
- MinGlobalAlign = HostTarget->getMinGlobalAlign(/* TypeSize = */ 0);
+ MinGlobalAlign =
+ HostTarget->getMinGlobalAlign(/* TypeSize = */ 0,
+ /* HasNonWeakDef = */ true);
NewAlign = HostTarget->getNewAlign();
DefaultAlignForAttributeAligned =
HostTarget->getDefaultAlignForAttributeAligned();
@@ -257,7 +259,7 @@ public:
SizeType = TargetInfo::UnsignedInt;
PtrDiffType = IntPtrType = TargetInfo::SignedInt;
resetDataLayout("e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-"
- "v96:128-v192:256-v256:256-v512:512-v1024:1024");
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
}
void getTargetDefines(const LangOptions &Opts,
@@ -274,7 +276,7 @@ public:
SizeType = TargetInfo::UnsignedLong;
PtrDiffType = IntPtrType = TargetInfo::SignedLong;
resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-"
- "v96:128-v192:256-v256:256-v512:512-v1024:1024");
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
}
void getTargetDefines(const LangOptions &Opts,
@@ -308,11 +310,12 @@ public:
assert(Triple.getEnvironment() >= llvm::Triple::Pixel &&
Triple.getEnvironment() <= llvm::Triple::Amplification &&
"Logical SPIR-V environment must be a valid shader stage.");
+ PointerWidth = PointerAlign = 64;
// SPIR-V IDs are represented with a single 32-bit word.
SizeType = TargetInfo::UnsignedInt;
resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-"
- "v96:128-v192:256-v256:256-v512:512-v1024:1024");
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
}
void getTargetDefines(const LangOptions &Opts,
@@ -333,7 +336,7 @@ public:
SizeType = TargetInfo::UnsignedInt;
PtrDiffType = IntPtrType = TargetInfo::SignedInt;
resetDataLayout("e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-"
- "v96:128-v192:256-v256:256-v512:512-v1024:1024");
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
}
void getTargetDefines(const LangOptions &Opts,
@@ -354,11 +357,62 @@ public:
SizeType = TargetInfo::UnsignedLong;
PtrDiffType = IntPtrType = TargetInfo::SignedLong;
resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-"
- "v96:128-v192:256-v256:256-v512:512-v1024:1024");
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024-G1");
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY SPIRV64AMDGCNTargetInfo final
+ : public BaseSPIRVTargetInfo {
+public:
+ SPIRV64AMDGCNTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : BaseSPIRVTargetInfo(Triple, Opts) {
+ assert(Triple.getArch() == llvm::Triple::spirv64 &&
+ "Invalid architecture for 64-bit AMDGCN SPIR-V.");
+ assert(Triple.getVendor() == llvm::Triple::VendorType::AMD &&
+ "64-bit AMDGCN SPIR-V target must use AMD vendor");
+ assert(getTriple().getOS() == llvm::Triple::OSType::AMDHSA &&
+ "64-bit AMDGCN SPIR-V target must use AMDHSA OS");
+ assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
+ "64-bit SPIR-V target must use unknown environment type");
+ PointerWidth = PointerAlign = 64;
+ SizeType = TargetInfo::UnsignedLong;
+ PtrDiffType = IntPtrType = TargetInfo::SignedLong;
+
+ resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-"
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024-G1-P4-A0");
+
+ BFloat16Width = BFloat16Align = 16;
+ BFloat16Format = &llvm::APFloat::BFloat();
+
+ HasLegalHalfType = true;
+ HasFloat16 = true;
+ HalfArgsAndReturns = true;
}
+ bool hasBFloat16Type() const override { return true; }
+
+ ArrayRef<const char *> getGCCRegNames() const override;
+
+ bool initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef,
+ const std::vector<std::string> &) const override;
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override;
+
+ std::string convertConstraint(const char *&Constraint) const override;
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
+
+ void setAuxTarget(const TargetInfo *Aux) override;
+
+ bool hasInt128Type() const override { return TargetInfo::hasInt128Type(); }
};
} // namespace targets
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
index 214fef88e1dc..3357bee33e1a 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
@@ -140,6 +140,10 @@ public:
CPU = getCPUKind(Name);
return CPU != CK_GENERIC;
}
+
+ std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
+ return std::make_pair(32, 32);
+ }
};
// SPARC v8 is the 32-bit mode selected by Triple::sparc.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp
index a9b5ca483861..06f08db2eadd 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp
@@ -138,6 +138,16 @@ bool SystemZTargetInfo::hasFeature(StringRef Feature) const {
.Default(false);
}
+unsigned SystemZTargetInfo::getMinGlobalAlign(uint64_t Size,
+ bool HasNonWeakDef) const {
+ // Don't enforce the minimum alignment on an external or weak symbol if
+ // -munaligned-symbols is passed.
+ if (UnalignedSymbols && !HasNonWeakDef)
+ return 0;
+
+ return MinGlobalAlign;
+}
+
void SystemZTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__s390__");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
index e4ec338880f2..3bc6f2c1d308 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
@@ -29,11 +29,13 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
bool HasTransactionalExecution;
bool HasVector;
bool SoftFloat;
+ bool UnalignedSymbols;
public:
SystemZTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple), CPU("z10"), ISARevision(8),
- HasTransactionalExecution(false), HasVector(false), SoftFloat(false) {
+ HasTransactionalExecution(false), HasVector(false), SoftFloat(false),
+ UnalignedSymbols(false) {
IntMaxType = SignedLong;
Int64Type = SignedLong;
IntWidth = IntAlign = 32;
@@ -45,6 +47,7 @@ public:
LongDoubleFormat = &llvm::APFloat::IEEEquad();
DefaultAlignForAttributeAligned = 64;
MinGlobalAlign = 16;
+ HasUnalignedAccess = true;
if (Triple.isOSzOS()) {
TLSSupported = false;
// All vector types are default aligned on an 8-byte boundary, even if the
@@ -64,6 +67,8 @@ public:
HasStrictFP = true;
}
+ unsigned getMinGlobalAlign(uint64_t Size, bool HasNonWeakDef) const override;
+
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
@@ -79,7 +84,7 @@ public:
ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override;
bool isSPRegName(StringRef RegName) const override {
- return RegName.equals("r15");
+ return RegName == "r15";
}
bool validateAsmConstraint(const char *&Name,
@@ -163,6 +168,7 @@ public:
HasTransactionalExecution = false;
HasVector = false;
SoftFloat = false;
+ UnalignedSymbols = false;
for (const auto &Feature : Features) {
if (Feature == "+transactional-execution")
HasTransactionalExecution = true;
@@ -170,6 +176,8 @@ public:
HasVector = true;
else if (Feature == "+soft-float")
SoftFloat = true;
+ else if (Feature == "+unaligned-symbols")
+ UnalignedSymbols = true;
}
HasVector &= !SoftFloat;
@@ -212,6 +220,10 @@ public:
int getEHDataRegisterNumber(unsigned RegNo) const override {
return RegNo < 4 ? 6 + RegNo : -1;
}
+
+ std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
+ return std::make_pair(256, 256);
+ }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/VE.h b/contrib/llvm-project/clang/lib/Basic/Targets/VE.h
index ea9a092cad80..7e8fdf6096ef 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/VE.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/VE.h
@@ -40,6 +40,7 @@ public:
Int64Type = SignedLong;
RegParmMax = 8;
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ HasUnalignedAccess = true;
WCharType = UnsignedInt;
WIntType = UnsignedInt;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
index f1c925d90cb6..1e565f0a5319 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
@@ -45,19 +45,20 @@ bool WebAssemblyTargetInfo::setABI(const std::string &Name) {
bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
- .Case("simd128", SIMDLevel >= SIMD128)
- .Case("relaxed-simd", SIMDLevel >= RelaxedSIMD)
- .Case("nontrapping-fptoint", HasNontrappingFPToInt)
- .Case("sign-ext", HasSignExt)
- .Case("exception-handling", HasExceptionHandling)
- .Case("bulk-memory", HasBulkMemory)
.Case("atomics", HasAtomics)
- .Case("mutable-globals", HasMutableGlobals)
- .Case("multivalue", HasMultivalue)
- .Case("tail-call", HasTailCall)
- .Case("reference-types", HasReferenceTypes)
+ .Case("bulk-memory", HasBulkMemory)
+ .Case("exception-handling", HasExceptionHandling)
.Case("extended-const", HasExtendedConst)
+ .Case("half-precision", HasHalfPrecision)
.Case("multimemory", HasMultiMemory)
+ .Case("multivalue", HasMultivalue)
+ .Case("mutable-globals", HasMutableGlobals)
+ .Case("nontrapping-fptoint", HasNontrappingFPToInt)
+ .Case("reference-types", HasReferenceTypes)
+ .Case("relaxed-simd", SIMDLevel >= RelaxedSIMD)
+ .Case("sign-ext", HasSignExt)
+ .Case("simd128", SIMDLevel >= SIMD128)
+ .Case("tail-call", HasTailCall)
.Default(false);
}
@@ -73,32 +74,34 @@ void WebAssemblyTargetInfo::fillValidCPUList(
void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
defineCPUMacros(Builder, "wasm", /*Tuning=*/false);
- if (SIMDLevel >= SIMD128)
- Builder.defineMacro("__wasm_simd128__");
- if (SIMDLevel >= RelaxedSIMD)
- Builder.defineMacro("__wasm_relaxed_simd__");
- if (HasNontrappingFPToInt)
- Builder.defineMacro("__wasm_nontrapping_fptoint__");
- if (HasSignExt)
- Builder.defineMacro("__wasm_sign_ext__");
- if (HasExceptionHandling)
- Builder.defineMacro("__wasm_exception_handling__");
- if (HasBulkMemory)
- Builder.defineMacro("__wasm_bulk_memory__");
if (HasAtomics)
Builder.defineMacro("__wasm_atomics__");
- if (HasMutableGlobals)
- Builder.defineMacro("__wasm_mutable_globals__");
- if (HasMultivalue)
- Builder.defineMacro("__wasm_multivalue__");
- if (HasTailCall)
- Builder.defineMacro("__wasm_tail_call__");
- if (HasReferenceTypes)
- Builder.defineMacro("__wasm_reference_types__");
+ if (HasBulkMemory)
+ Builder.defineMacro("__wasm_bulk_memory__");
+ if (HasExceptionHandling)
+ Builder.defineMacro("__wasm_exception_handling__");
if (HasExtendedConst)
Builder.defineMacro("__wasm_extended_const__");
if (HasMultiMemory)
Builder.defineMacro("__wasm_multimemory__");
+ if (HasHalfPrecision)
+ Builder.defineMacro("__wasm_half_precision__");
+ if (HasMultivalue)
+ Builder.defineMacro("__wasm_multivalue__");
+ if (HasMutableGlobals)
+ Builder.defineMacro("__wasm_mutable_globals__");
+ if (HasNontrappingFPToInt)
+ Builder.defineMacro("__wasm_nontrapping_fptoint__");
+ if (HasReferenceTypes)
+ Builder.defineMacro("__wasm_reference_types__");
+ if (SIMDLevel >= RelaxedSIMD)
+ Builder.defineMacro("__wasm_relaxed_simd__");
+ if (HasSignExt)
+ Builder.defineMacro("__wasm_sign_ext__");
+ if (SIMDLevel >= SIMD128)
+ Builder.defineMacro("__wasm_simd128__");
+ if (HasTailCall)
+ Builder.defineMacro("__wasm_tail_call__");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
@@ -147,19 +150,28 @@ void WebAssemblyTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
bool WebAssemblyTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeaturesVec) const {
- if (CPU == "bleeding-edge") {
- Features["nontrapping-fptoint"] = true;
- Features["sign-ext"] = true;
- Features["bulk-memory"] = true;
- Features["atomics"] = true;
+ auto addGenericFeatures = [&]() {
+ Features["multivalue"] = true;
Features["mutable-globals"] = true;
- Features["tail-call"] = true;
Features["reference-types"] = true;
- Features["multimemory"] = true;
- setSIMDLevel(Features, SIMD128, true);
- } else if (CPU == "generic") {
Features["sign-ext"] = true;
- Features["mutable-globals"] = true;
+ };
+ auto addBleedingEdgeFeatures = [&]() {
+ addGenericFeatures();
+ Features["atomics"] = true;
+ Features["bulk-memory"] = true;
+ Features["exception-handling"] = true;
+ Features["extended-const"] = true;
+ Features["half-precision"] = true;
+ Features["multimemory"] = true;
+ Features["nontrapping-fptoint"] = true;
+ Features["tail-call"] = true;
+ setSIMDLevel(Features, RelaxedSIMD, true);
+ };
+ if (CPU == "generic") {
+ addGenericFeatures();
+ } else if (CPU == "bleeding-edge") {
+ addBleedingEdgeFeatures();
}
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
@@ -168,36 +180,20 @@ bool WebAssemblyTargetInfo::initFeatureMap(
bool WebAssemblyTargetInfo::handleTargetFeatures(
std::vector<std::string> &Features, DiagnosticsEngine &Diags) {
for (const auto &Feature : Features) {
- if (Feature == "+simd128") {
- SIMDLevel = std::max(SIMDLevel, SIMD128);
- continue;
- }
- if (Feature == "-simd128") {
- SIMDLevel = std::min(SIMDLevel, SIMDEnum(SIMD128 - 1));
- continue;
- }
- if (Feature == "+relaxed-simd") {
- SIMDLevel = std::max(SIMDLevel, RelaxedSIMD);
- continue;
- }
- if (Feature == "-relaxed-simd") {
- SIMDLevel = std::min(SIMDLevel, SIMDEnum(RelaxedSIMD - 1));
- continue;
- }
- if (Feature == "+nontrapping-fptoint") {
- HasNontrappingFPToInt = true;
+ if (Feature == "+atomics") {
+ HasAtomics = true;
continue;
}
- if (Feature == "-nontrapping-fptoint") {
- HasNontrappingFPToInt = false;
+ if (Feature == "-atomics") {
+ HasAtomics = false;
continue;
}
- if (Feature == "+sign-ext") {
- HasSignExt = true;
+ if (Feature == "+bulk-memory") {
+ HasBulkMemory = true;
continue;
}
- if (Feature == "-sign-ext") {
- HasSignExt = false;
+ if (Feature == "-bulk-memory") {
+ HasBulkMemory = false;
continue;
}
if (Feature == "+exception-handling") {
@@ -208,28 +204,29 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasExceptionHandling = false;
continue;
}
- if (Feature == "+bulk-memory") {
- HasBulkMemory = true;
+ if (Feature == "+extended-const") {
+ HasExtendedConst = true;
continue;
}
- if (Feature == "-bulk-memory") {
- HasBulkMemory = false;
+ if (Feature == "-extended-const") {
+ HasExtendedConst = false;
continue;
}
- if (Feature == "+atomics") {
- HasAtomics = true;
+ if (Feature == "+half-precision") {
+ SIMDLevel = std::max(SIMDLevel, SIMD128);
+ HasHalfPrecision = true;
continue;
}
- if (Feature == "-atomics") {
- HasAtomics = false;
+ if (Feature == "-half-precision") {
+ HasHalfPrecision = false;
continue;
}
- if (Feature == "+mutable-globals") {
- HasMutableGlobals = true;
+ if (Feature == "+multimemory") {
+ HasMultiMemory = true;
continue;
}
- if (Feature == "-mutable-globals") {
- HasMutableGlobals = false;
+ if (Feature == "-multimemory") {
+ HasMultiMemory = false;
continue;
}
if (Feature == "+multivalue") {
@@ -240,12 +237,20 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasMultivalue = false;
continue;
}
- if (Feature == "+tail-call") {
- HasTailCall = true;
+ if (Feature == "+mutable-globals") {
+ HasMutableGlobals = true;
continue;
}
- if (Feature == "-tail-call") {
- HasTailCall = false;
+ if (Feature == "-mutable-globals") {
+ HasMutableGlobals = false;
+ continue;
+ }
+ if (Feature == "+nontrapping-fptoint") {
+ HasNontrappingFPToInt = true;
+ continue;
+ }
+ if (Feature == "-nontrapping-fptoint") {
+ HasNontrappingFPToInt = false;
continue;
}
if (Feature == "+reference-types") {
@@ -256,20 +261,36 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasReferenceTypes = false;
continue;
}
- if (Feature == "+extended-const") {
- HasExtendedConst = true;
+ if (Feature == "+relaxed-simd") {
+ SIMDLevel = std::max(SIMDLevel, RelaxedSIMD);
continue;
}
- if (Feature == "-extended-const") {
- HasExtendedConst = false;
+ if (Feature == "-relaxed-simd") {
+ SIMDLevel = std::min(SIMDLevel, SIMDEnum(RelaxedSIMD - 1));
continue;
}
- if (Feature == "+multimemory") {
- HasMultiMemory = true;
+ if (Feature == "+sign-ext") {
+ HasSignExt = true;
continue;
}
- if (Feature == "-multimemory") {
- HasMultiMemory = false;
+ if (Feature == "-sign-ext") {
+ HasSignExt = false;
+ continue;
+ }
+ if (Feature == "+simd128") {
+ SIMDLevel = std::max(SIMDLevel, SIMD128);
+ continue;
+ }
+ if (Feature == "-simd128") {
+ SIMDLevel = std::min(SIMDLevel, SIMDEnum(SIMD128 - 1));
+ continue;
+ }
+ if (Feature == "+tail-call") {
+ HasTailCall = true;
+ continue;
+ }
+ if (Feature == "-tail-call") {
+ HasTailCall = false;
continue;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
index 83b1711f9fdf..e4a449d1ff30 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
@@ -53,17 +53,18 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
RelaxedSIMD,
} SIMDLevel = NoSIMD;
- bool HasNontrappingFPToInt = false;
- bool HasSignExt = false;
- bool HasExceptionHandling = false;
- bool HasBulkMemory = false;
bool HasAtomics = false;
- bool HasMutableGlobals = false;
- bool HasMultivalue = false;
- bool HasTailCall = false;
- bool HasReferenceTypes = false;
+ bool HasBulkMemory = false;
+ bool HasExceptionHandling = false;
bool HasExtendedConst = false;
+ bool HasHalfPrecision = false;
bool HasMultiMemory = false;
+ bool HasMultivalue = false;
+ bool HasMutableGlobals = false;
+ bool HasNontrappingFPToInt = false;
+ bool HasReferenceTypes = false;
+ bool HasSignExt = false;
+ bool HasTailCall = false;
std::string ABI;
@@ -84,10 +85,14 @@ public:
SizeType = UnsignedLong;
PtrDiffType = SignedLong;
IntPtrType = SignedLong;
+ HasUnalignedAccess = true;
}
StringRef getABI() const override;
bool setABI(const std::string &Name) override;
+ bool useFP16ConversionIntrinsics() const override {
+ return !HasHalfPrecision;
+ }
protected:
void getTargetDefines(const LangOptions &Opts,
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
index a68b662d9401..072c97e6c8c6 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
@@ -64,6 +64,8 @@ static const char *const GCCRegNames[] = {
"dr0", "dr1", "dr2", "dr3", "dr6", "dr7",
"bnd0", "bnd1", "bnd2", "bnd3",
"tmm0", "tmm1", "tmm2", "tmm3", "tmm4", "tmm5", "tmm6", "tmm7",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
};
const TargetInfo::AddlRegName AddlRegNames[] = {
@@ -83,8 +85,23 @@ const TargetInfo::AddlRegName AddlRegNames[] = {
{{"r13d", "r13w", "r13b"}, 43},
{{"r14d", "r14w", "r14b"}, 44},
{{"r15d", "r15w", "r15b"}, 45},
+ {{"r16d", "r16w", "r16b"}, 165},
+ {{"r17d", "r17w", "r17b"}, 166},
+ {{"r18d", "r18w", "r18b"}, 167},
+ {{"r19d", "r19w", "r19b"}, 168},
+ {{"r20d", "r20w", "r20b"}, 169},
+ {{"r21d", "r21w", "r21b"}, 170},
+ {{"r22d", "r22w", "r22b"}, 171},
+ {{"r23d", "r23w", "r23b"}, 172},
+ {{"r24d", "r24w", "r24b"}, 173},
+ {{"r25d", "r25w", "r25b"}, 174},
+ {{"r26d", "r26w", "r26b"}, 175},
+ {{"r27d", "r27w", "r27b"}, 176},
+ {{"r28d", "r28w", "r28b"}, 177},
+ {{"r29d", "r29w", "r29b"}, 178},
+ {{"r30d", "r30w", "r30b"}, 179},
+ {{"r31d", "r31w", "r31b"}, 180},
};
-
} // namespace targets
} // namespace clang
@@ -139,7 +156,7 @@ bool X86TargetInfo::initFeatureMap(
if (Feature.substr(1, 6) == "avx10.") {
if (Feature[0] == '+') {
HasAVX10 = true;
- if (Feature.substr(Feature.size() - 3, 3) == "512")
+ if (StringRef(Feature).ends_with("512"))
HasAVX10_512 = true;
LastAVX10 = Feature;
} else if (HasAVX10 && Feature == "-avx10.1-256") {
@@ -151,7 +168,7 @@ bool X86TargetInfo::initFeatureMap(
// Postpone AVX10 features handling after AVX512 settled.
UpdatedAVX10FeaturesVec.push_back(Feature);
continue;
- } else if (!HasAVX512F && Feature.substr(0, 7) == "+avx512") {
+ } else if (!HasAVX512F && StringRef(Feature).starts_with("+avx512")) {
HasAVX512F = true;
LastAVX512 = Feature;
} else if (HasAVX512F && Feature == "-avx512f") {
@@ -241,7 +258,9 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
if (Feature[0] != '+')
continue;
- if (Feature == "+aes") {
+ if (Feature == "+mmx") {
+ HasMMX = true;
+ } else if (Feature == "+aes") {
HasAES = true;
} else if (Feature == "+vaes") {
HasVAES = true;
@@ -293,15 +312,9 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAVX512VNNI = true;
} else if (Feature == "+avx512bf16") {
HasAVX512BF16 = true;
- } else if (Feature == "+avx512er") {
- HasAVX512ER = true;
- Diags.Report(diag::warn_knl_knm_isa_support_removed);
} else if (Feature == "+avx512fp16") {
HasAVX512FP16 = true;
HasLegalHalfType = true;
- } else if (Feature == "+avx512pf") {
- HasAVX512PF = true;
- Diags.Report(diag::warn_knl_knm_isa_support_removed);
} else if (Feature == "+avx512dq") {
HasAVX512DQ = true;
} else if (Feature == "+avx512bitalg") {
@@ -358,9 +371,6 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasWBNOINVD = true;
} else if (Feature == "+prefetchi") {
HasPREFETCHI = true;
- } else if (Feature == "+prefetchwt1") {
- HasPREFETCHWT1 = true;
- Diags.Report(diag::warn_knl_knm_isa_support_removed);
} else if (Feature == "+clzero") {
HasCLZERO = true;
} else if (Feature == "+cldemote") {
@@ -433,6 +443,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFullBFloat16 = true;
} else if (Feature == "+egpr") {
HasEGPR = true;
+ } else if (Feature == "+inline-asm-use-gpr32") {
+ HasInlineAsmUseGPR32 = true;
} else if (Feature == "+push2pop2") {
HasPush2Pop2 = true;
} else if (Feature == "+ppx") {
@@ -441,8 +453,14 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasNDD = true;
} else if (Feature == "+ccmp") {
HasCCMP = true;
+ } else if (Feature == "+nf") {
+ HasNF = true;
} else if (Feature == "+cf") {
HasCF = true;
+ } else if (Feature == "+zu") {
+ HasZU = true;
+ } else if (Feature == "+branch-hint") {
+ HasBranchHint = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -471,13 +489,6 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
// for bfloat16 arithmetic operations in the front-end.
HasBFloat16 = SSELevel >= SSE2;
- MMX3DNowEnum ThreeDNowLevel = llvm::StringSwitch<MMX3DNowEnum>(Feature)
- .Case("+3dnowa", AMD3DNowAthlon)
- .Case("+3dnow", AMD3DNow)
- .Case("+mmx", MMX)
- .Default(NoMMX3DNow);
- MMX3DNowLevel = std::max(MMX3DNowLevel, ThreeDNowLevel);
-
XOPEnum XLevel = llvm::StringSwitch<XOPEnum>(Feature)
.Case("+xop", XOP)
.Case("+fma4", FMA4)
@@ -712,6 +723,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_ZNVER4:
defineCPUMacros(Builder, "znver4");
break;
+ case CK_ZNVER5:
+ defineCPUMacros(Builder, "znver5");
+ break;
case CK_Geode:
defineCPUMacros(Builder, "geode");
break;
@@ -821,12 +835,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVX512VNNI__");
if (HasAVX512BF16)
Builder.defineMacro("__AVX512BF16__");
- if (HasAVX512ER)
- Builder.defineMacro("__AVX512ER__");
if (HasAVX512FP16)
Builder.defineMacro("__AVX512FP16__");
- if (HasAVX512PF)
- Builder.defineMacro("__AVX512PF__");
if (HasAVX512DQ)
Builder.defineMacro("__AVX512DQ__");
if (HasAVX512BITALG)
@@ -878,8 +888,6 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__SM4__");
if (HasPREFETCHI)
Builder.defineMacro("__PREFETCHI__");
- if (HasPREFETCHWT1)
- Builder.defineMacro("__PREFETCHWT1__");
if (HasCLZERO)
Builder.defineMacro("__CLZERO__");
if (HasKL)
@@ -952,8 +960,17 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__NDD__");
if (HasCCMP)
Builder.defineMacro("__CCMP__");
+ if (HasNF)
+ Builder.defineMacro("__NF__");
if (HasCF)
Builder.defineMacro("__CF__");
+ if (HasZU)
+ Builder.defineMacro("__ZU__");
+ if (HasEGPR && HasPush2Pop2 && HasPPX && HasNDD && HasCCMP && HasNF &&
+ HasCF && HasZU)
+ Builder.defineMacro("__APX_F__");
+ if (HasEGPR && HasInlineAsmUseGPR32)
+ Builder.defineMacro("__APX_INLINE_ASM_USE_GPR32__");
// Each case falls through to the previous one here.
switch (SSELevel) {
@@ -1012,18 +1029,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
}
// Each case falls through to the previous one here.
- switch (MMX3DNowLevel) {
- case AMD3DNowAthlon:
- Builder.defineMacro("__3dNOW_A__");
- [[fallthrough]];
- case AMD3DNow:
- Builder.defineMacro("__3dNOW__");
- [[fallthrough]];
- case MMX:
+ if (HasMMX) {
Builder.defineMacro("__MMX__");
- [[fallthrough]];
- case NoMMX3DNow:
- break;
}
if (CPU >= CK_i486 || CPU == CK_None) {
@@ -1042,8 +1049,6 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
return llvm::StringSwitch<bool>(Name)
- .Case("3dnow", true)
- .Case("3dnowa", true)
.Case("adx", true)
.Case("aes", true)
.Case("amx-bf16", true)
@@ -1060,9 +1065,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("avx512vpopcntdq", true)
.Case("avx512vnni", true)
.Case("avx512bf16", true)
- .Case("avx512er", true)
.Case("avx512fp16", true)
- .Case("avx512pf", true)
.Case("avx512dq", true)
.Case("avx512bitalg", true)
.Case("avx512bw", true)
@@ -1110,7 +1113,6 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("pku", true)
.Case("popcnt", true)
.Case("prefetchi", true)
- .Case("prefetchwt1", true)
.Case("prfchw", true)
.Case("ptwrite", true)
.Case("raoint", true)
@@ -1154,7 +1156,9 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("ppx", true)
.Case("ndd", true)
.Case("ccmp", true)
+ .Case("nf", true)
.Case("cf", true)
+ .Case("zu", true)
.Default(false);
}
@@ -1176,9 +1180,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("avx512vpopcntdq", HasAVX512VPOPCNTDQ)
.Case("avx512vnni", HasAVX512VNNI)
.Case("avx512bf16", HasAVX512BF16)
- .Case("avx512er", HasAVX512ER)
.Case("avx512fp16", HasAVX512FP16)
- .Case("avx512pf", HasAVX512PF)
.Case("avx512dq", HasAVX512DQ)
.Case("avx512bitalg", HasAVX512BITALG)
.Case("avx512bw", HasAVX512BW)
@@ -1216,9 +1218,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("widekl", HasWIDEKL)
.Case("lwp", HasLWP)
.Case("lzcnt", HasLZCNT)
- .Case("mm3dnow", MMX3DNowLevel >= AMD3DNow)
- .Case("mm3dnowa", MMX3DNowLevel >= AMD3DNowAthlon)
- .Case("mmx", MMX3DNowLevel >= MMX)
+ .Case("mmx", HasMMX)
.Case("movbe", HasMOVBE)
.Case("movdiri", HasMOVDIRI)
.Case("movdir64b", HasMOVDIR64B)
@@ -1228,7 +1228,6 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("pku", HasPKU)
.Case("popcnt", HasPOPCNT)
.Case("prefetchi", HasPREFETCHI)
- .Case("prefetchwt1", HasPREFETCHWT1)
.Case("prfchw", HasPRFCHW)
.Case("ptwrite", HasPTWRITE)
.Case("raoint", HasRAOINT)
@@ -1276,7 +1275,10 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("ppx", HasPPX)
.Case("ndd", HasNDD)
.Case("ccmp", HasCCMP)
+ .Case("nf", HasNF)
.Case("cf", HasCF)
+ .Case("zu", HasZU)
+ .Case("branch-hint", HasBranchHint)
.Default(false);
}
@@ -1473,6 +1475,18 @@ bool X86TargetInfo::validateAsmConstraint(
case 'C': // SSE floating point constant.
case 'G': // x87 floating point constant.
return true;
+ case 'j':
+ Name++;
+ switch (*Name) {
+ default:
+ return false;
+ case 'r':
+ Info.setAllowsRegister();
+ return true;
+ case 'R':
+ Info.setAllowsRegister();
+ return true;
+ }
case '@':
// CC condition changes.
if (auto Len = matchAsmCCConstraint(Name)) {
@@ -1602,6 +1616,7 @@ std::optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
case CK_ZNVER2:
case CK_ZNVER3:
case CK_ZNVER4:
+ case CK_ZNVER5:
// Deprecated
case CK_x86_64:
case CK_x86_64_v2:
@@ -1745,6 +1760,21 @@ std::string X86TargetInfo::convertConstraint(const char *&Constraint) const {
return std::string("^") + std::string(Constraint++, 2);
}
[[fallthrough]];
+ case 'j':
+ switch (Constraint[1]) {
+ default:
+ // Break from inner switch and fall through (copy single char),
+ // continue parsing after copying the current constraint into
+ // the return string.
+ break;
+ case 'r':
+ case 'R':
+ // "^" hints llvm that this is a 2 letter constraint.
+ // "Constraint++" is used to promote the string iterator
+ // to the next constraint.
+ return std::string("^") + std::string(Constraint++, 2);
+ }
+ [[fallthrough]];
default:
return std::string(1, *Constraint);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
index 0ab1c10833db..ba34ab2c7f33 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
@@ -67,12 +67,7 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
AVX2,
AVX512F
} SSELevel = NoSSE;
- enum MMX3DNowEnum {
- NoMMX3DNow,
- MMX,
- AMD3DNow,
- AMD3DNowAthlon
- } MMX3DNowLevel = NoMMX3DNow;
+ bool HasMMX = false;
enum XOPEnum { NoXOP, SSE4A, FMA4, XOP } XOPLevel = NoXOP;
enum AddrSpace { ptr32_sptr = 270, ptr32_uptr = 271, ptr64 = 272 };
@@ -103,8 +98,6 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasAVX512VNNI = false;
bool HasAVX512FP16 = false;
bool HasAVX512BF16 = false;
- bool HasAVX512ER = false;
- bool HasAVX512PF = false;
bool HasAVX512DQ = false;
bool HasAVX512BITALG = false;
bool HasAVX512BW = false;
@@ -136,7 +129,6 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasCLWB = false;
bool HasMOVBE = false;
bool HasPREFETCHI = false;
- bool HasPREFETCHWT1 = false;
bool HasRDPID = false;
bool HasRDPRU = false;
bool HasRetpolineExternalThunk = false;
@@ -173,7 +165,11 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasPPX = false;
bool HasNDD = false;
bool HasCCMP = false;
+ bool HasNF = false;
bool HasCF = false;
+ bool HasZU = false;
+ bool HasInlineAsmUseGPR32 = false;
+ bool HasBranchHint = false;
protected:
llvm::X86::CPUKind CPU = llvm::X86::CK_None;
@@ -188,6 +184,7 @@ public:
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
AddrSpaceMap = &X86AddrSpaceMap;
HasStrictFP = true;
+ HasUnalignedAccess = true;
bool IsWinCOFF =
getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF();
@@ -217,9 +214,13 @@ public:
ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override;
bool isSPRegName(StringRef RegName) const override {
- return RegName.equals("esp") || RegName.equals("rsp");
+ return RegName == "esp" || RegName == "rsp";
}
+ bool supportsCpuSupports() const override { return true; }
+ bool supportsCpuIs() const override { return true; }
+ bool supportsCpuInit() const override { return true; }
+
bool validateCpuSupports(StringRef FeatureStr) const override;
bool validateCpuIs(StringRef FeatureStr) const override;
@@ -241,7 +242,7 @@ public:
bool &HasSizeMismatch) const override {
// esp and ebp are the only 32-bit registers the x86 backend can currently
// handle.
- if (RegName.equals("esp") || RegName.equals("ebp")) {
+ if (RegName == "esp" || RegName == "ebp") {
// Check that the register size is 32-bit.
HasSizeMismatch = RegSize != 32;
return true;
@@ -342,8 +343,7 @@ public:
return "avx512";
if (getTriple().getArch() == llvm::Triple::x86_64 && SSELevel >= AVX)
return "avx";
- if (getTriple().getArch() == llvm::Triple::x86 &&
- MMX3DNowLevel == NoMMX3DNow)
+ if (getTriple().getArch() == llvm::Triple::x86 && !HasMMX)
return "no-mmx";
return "";
}
@@ -513,15 +513,6 @@ class LLVM_LIBRARY_VISIBILITY NetBSDI386TargetInfo
public:
NetBSDI386TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: NetBSDTargetInfo<X86_32TargetInfo>(Triple, Opts) {}
-
- LangOptions::FPEvalMethodKind getFPEvalMethod() const override {
- VersionTuple OsVersion = getTriple().getOSVersion();
- // New NetBSD uses the default rounding mode.
- if (OsVersion >= VersionTuple(6, 99, 26) || OsVersion.getMajor() == 0)
- return X86_32TargetInfo::getFPEvalMethod();
- // NetBSD before 6.99.26 defaults to "double" rounding.
- return LangOptions::FPEvalMethodKind::FEM_Double;
- }
};
class LLVM_LIBRARY_VISIBILITY OpenBSDI386TargetInfo
@@ -668,6 +659,7 @@ public:
MCUX86_32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: X86_32TargetInfo(Triple, Opts) {
LongDoubleWidth = 64;
+ DefaultAlignForAttributeAligned = 32;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
resetDataLayout("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:32-"
"f64:32-f128:32-n8:16:32-a:0:32-S32");
@@ -772,6 +764,7 @@ public:
case CC_Win64:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_PreserveNone:
case CC_X86RegCall:
case CC_OpenCLKernel:
return CCCR_OK;
@@ -795,7 +788,7 @@ public:
bool &HasSizeMismatch) const override {
// rsp and rbp are the only 64-bit registers the x86 backend can currently
// handle.
- if (RegName.equals("rsp") || RegName.equals("rbp")) {
+ if (RegName == "rsp" || RegName == "rbp") {
// Check that the register size is 64-bit.
HasSizeMismatch = RegSize != 64;
return true;
@@ -849,6 +842,7 @@ public:
case CC_IntelOclBicc:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_PreserveNone:
case CC_X86_64SysV:
case CC_Swift:
case CC_SwiftAsync:
diff --git a/contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp b/contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp
index 4dbf678dc395..8d6794223cca 100644
--- a/contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp
@@ -13,6 +13,7 @@
#include "clang/Basic/TypeTraits.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
+#include <cstring>
using namespace clang;
static constexpr const char *TypeTraitNames[] = {
@@ -81,6 +82,15 @@ const char *clang::getTraitName(UnaryExprOrTypeTrait T) {
const char *clang::getTraitSpelling(TypeTrait T) {
assert(T <= TT_Last && "invalid enum value!");
+ if (T == BTT_IsDeducible) {
+ // The __is_deducible is an internal-only type trait. To hide it from
+ // external users, we define it with an empty spelling name, preventing the
+ // clang parser from recognizing its token kind.
+ // However, other components such as the AST dump still require the real
+ // type trait name. Therefore, we return the real name when needed.
+ assert(std::strlen(TypeTraitSpellings[T]) == 0);
+ return "__is_deducible";
+ }
return TypeTraitSpellings[T];
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.cpp b/contrib/llvm-project/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 5fd3d77c3842..c2829c3ff2af 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.cpp
+++ b/contrib/llvm-project/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -1,14 +1,13 @@
-//===--- ByteCodeGenError.h - Byte code generation error --------*- C++ -*-===//
+//===- CIRDialect.cpp - MLIR CIR ops implementation -----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
+//
+// This file implements the CIR dialect and its operations.
+//
+//===----------------------------------------------------------------------===//
-#include "ByteCodeGenError.h"
-
-using namespace clang;
-using namespace clang::interp;
-
-char ByteCodeGenError::ID;
+#include <clang/CIR/Dialect/IR/CIRDialect.h>
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp
index 1b56cf7c596d..edd7146dc1ac 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp
@@ -39,9 +39,9 @@ bool ABIInfo::isOHOSFamily() const {
return getTarget().getTriple().isOHOSFamily();
}
-Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- return Address::invalid();
+RValue ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
+ return RValue::getIgnored();
}
bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
@@ -61,7 +61,7 @@ bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
uint64_t &Members) const {
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- uint64_t NElements = AT->getSize().getZExtValue();
+ uint64_t NElements = AT->getZExtSize();
if (NElements == 0)
return false;
if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
@@ -98,7 +98,7 @@ bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
QualType FT = FD->getType();
while (const ConstantArrayType *AT =
getContext().getAsConstantArrayType(FT)) {
- if (AT->getSize().getZExtValue() == 0)
+ if (AT->isZeroSize())
return false;
FT = AT->getElementType();
}
@@ -184,6 +184,58 @@ ABIArgInfo ABIInfo::getNaturalAlignIndirectInReg(QualType Ty,
/*ByVal*/ false, Realign);
}
+void ABIInfo::appendAttributeMangling(TargetAttr *Attr,
+ raw_ostream &Out) const {
+ if (Attr->isDefaultVersion())
+ return;
+ appendAttributeMangling(Attr->getFeaturesStr(), Out);
+}
+
+void ABIInfo::appendAttributeMangling(TargetVersionAttr *Attr,
+ raw_ostream &Out) const {
+ appendAttributeMangling(Attr->getNamesStr(), Out);
+}
+
+void ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
+ raw_ostream &Out) const {
+ appendAttributeMangling(Attr->getFeatureStr(Index), Out);
+ Out << '.' << Attr->getMangledIndex(Index);
+}
+
+void ABIInfo::appendAttributeMangling(StringRef AttrStr,
+ raw_ostream &Out) const {
+ if (AttrStr == "default") {
+ Out << ".default";
+ return;
+ }
+
+ Out << '.';
+ const TargetInfo &TI = CGT.getTarget();
+ ParsedTargetAttr Info = TI.parseTargetAttr(AttrStr);
+
+ llvm::sort(Info.Features, [&TI](StringRef LHS, StringRef RHS) {
+ // Multiversioning doesn't allow "no-${feature}", so we can
+ // only have "+" prefixes here.
+ assert(LHS.starts_with("+") && RHS.starts_with("+") &&
+ "Features should always have a prefix.");
+ return TI.multiVersionSortPriority(LHS.substr(1)) >
+ TI.multiVersionSortPriority(RHS.substr(1));
+ });
+
+ bool IsFirst = true;
+ if (!Info.CPU.empty()) {
+ IsFirst = false;
+ Out << "arch_" << Info.CPU;
+ }
+
+ for (StringRef Feat : Info.Features) {
+ if (!IsFirst)
+ Out << '_';
+ IsFirst = false;
+ Out << Feat.substr(1);
+ }
+}
+
// Pin the vtable to this file.
SwiftABIInfo::~SwiftABIInfo() = default;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
index b9a5ef6e4366..b8a8de57e5b9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
@@ -9,6 +9,7 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
#define LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
+#include "clang/AST/Attr.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Type.h"
#include "llvm/IR/CallingConv.h"
@@ -33,6 +34,8 @@ class CGCXXABI;
class CGFunctionInfo;
class CodeGenFunction;
class CodeGenTypes;
+class RValue;
+class AggValueSlot;
// FIXME: All of this stuff should be part of the target interface
// somehow. It is currently here because it is not clear how to factor
@@ -74,18 +77,18 @@ public:
// the ABI information any lower than CodeGen. Of course, for
// VAArg handling it has to be at this level; there is no way to
// abstract this out.
- virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF,
- CodeGen::Address VAListAddr,
- QualType Ty) const = 0;
+ virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF,
+ CodeGen::Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const = 0;
bool isAndroid() const;
bool isOHOSFamily() const;
/// Emit the target dependent code to load a value of
/// \arg Ty from the \c __builtin_ms_va_list pointed to by \arg VAListAddr.
- virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF,
- CodeGen::Address VAListAddr,
- QualType Ty) const;
+ virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF,
+ CodeGen::Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const;
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
@@ -111,6 +114,15 @@ public:
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty,
bool Realign = false) const;
+
+ virtual void appendAttributeMangling(TargetAttr *Attr,
+ raw_ostream &Out) const;
+ virtual void appendAttributeMangling(TargetVersionAttr *Attr,
+ raw_ostream &Out) const;
+ virtual void appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
+ raw_ostream &Out) const;
+ virtual void appendAttributeMangling(StringRef AttrStr,
+ raw_ostream &Out) const;
};
/// Target specific hooks for defining how a type should be passed or returned
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp
index 2b20d5a13346..35e8f79ba1ba 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp
@@ -71,9 +71,12 @@ void DefaultABIInfo::computeInfo(CGFunctionInfo &FI) const {
I.info = classifyArgumentType(I.type);
}
-Address DefaultABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
+RValue DefaultABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
+ return CGF.EmitLoadOfAnyValue(
+ CGF.MakeAddrLValue(
+ EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)), Ty),
+ Slot);
}
ABIArgInfo CodeGen::coerceToIntArray(QualType Ty, ASTContext &Context,
@@ -157,7 +160,7 @@ llvm::Value *CodeGen::emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
llvm::Value *RoundUp = CGF.Builder.CreateConstInBoundsGEP1_32(
CGF.Builder.getInt8Ty(), Ptr, Align.getQuantity() - 1);
return CGF.Builder.CreateIntrinsic(
- llvm::Intrinsic::ptrmask, {CGF.AllocaInt8PtrTy, CGF.IntPtrTy},
+ llvm::Intrinsic::ptrmask, {Ptr->getType(), CGF.IntPtrTy},
{RoundUp, llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())},
nullptr, Ptr->getName() + ".aligned");
}
@@ -187,7 +190,7 @@ CodeGen::emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr,
CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
Address NextPtr =
CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
- CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
+ CGF.Builder.CreateStore(NextPtr.emitRawPointer(CGF), VAListAddr);
// If the argument is smaller than a slot, and this is a big-endian
// target, the argument will be right-adjusted in its slot.
@@ -199,12 +202,12 @@ CodeGen::emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr,
return Addr.withElementType(DirectTy);
}
-Address CodeGen::emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType ValueTy, bool IsIndirect,
- TypeInfoChars ValueInfo,
- CharUnits SlotSizeAndAlign,
- bool AllowHigherAlign,
- bool ForceRightAdjust) {
+RValue CodeGen::emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType ValueTy, bool IsIndirect,
+ TypeInfoChars ValueInfo,
+ CharUnits SlotSizeAndAlign,
+ bool AllowHigherAlign, AggValueSlot Slot,
+ bool ForceRightAdjust) {
// The size and alignment of the value that was passed directly.
CharUnits DirectSize, DirectAlign;
if (IsIndirect) {
@@ -230,7 +233,7 @@ Address CodeGen::emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
Addr = Address(CGF.Builder.CreateLoad(Addr), ElementTy, ValueInfo.Align);
}
- return Addr;
+ return CGF.EmitLoadOfAnyValue(CGF.MakeAddrLValue(Addr, ValueTy), Slot);
}
Address CodeGen::emitMergePHI(CodeGenFunction &CGF, Address Addr1,
@@ -239,15 +242,15 @@ Address CodeGen::emitMergePHI(CodeGenFunction &CGF, Address Addr1,
const llvm::Twine &Name) {
assert(Addr1.getType() == Addr2.getType());
llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
- PHI->addIncoming(Addr1.getPointer(), Block1);
- PHI->addIncoming(Addr2.getPointer(), Block2);
+ PHI->addIncoming(Addr1.emitRawPointer(CGF), Block1);
+ PHI->addIncoming(Addr2.emitRawPointer(CGF), Block2);
CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
return Address(PHI, Addr1.getElementType(), Align);
}
bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
bool AllowArrays, bool AsIfNoUniqueAddr) {
- if (FD->isUnnamedBitfield())
+ if (FD->isUnnamedBitField())
return true;
QualType FT = FD->getType();
@@ -257,7 +260,7 @@ bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
bool WasArray = false;
if (AllowArrays)
while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
- if (AT->getSize() == 0)
+ if (AT->isZeroSize())
return true;
FT = AT->getElementType();
// The [[no_unique_address]] special case below does not apply to
@@ -307,6 +310,41 @@ bool CodeGen::isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays,
return true;
}
+bool CodeGen::isEmptyFieldForLayout(const ASTContext &Context,
+ const FieldDecl *FD) {
+ if (FD->isZeroLengthBitField(Context))
+ return true;
+
+ if (FD->isUnnamedBitField())
+ return false;
+
+ return isEmptyRecordForLayout(Context, FD->getType());
+}
+
+bool CodeGen::isEmptyRecordForLayout(const ASTContext &Context, QualType T) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (CXXRD->isDynamicClass())
+ return false;
+
+ for (const auto &I : CXXRD->bases())
+ if (!isEmptyRecordForLayout(Context, I.getType()))
+ return false;
+ }
+
+ for (const auto *I : RD->fields())
+ if (!isEmptyFieldForLayout(Context, I))
+ return false;
+
+ return true;
+}
+
const Type *CodeGen::isSingleElementStruct(QualType T, ASTContext &Context) {
const RecordType *RT = T->getAs<RecordType>();
if (!RT)
@@ -352,7 +390,7 @@ const Type *CodeGen::isSingleElementStruct(QualType T, ASTContext &Context) {
// Treat single element arrays as the element.
while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
- if (AT->getSize().getZExtValue() != 1)
+ if (AT->getZExtSize() != 1)
break;
FT = AT->getElementType();
}
@@ -400,7 +438,7 @@ Address CodeGen::EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr,
llvm::Type *ElementTy = CGF.ConvertTypeForMem(Ty);
llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementTy);
llvm::Value *Addr =
- CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
+ CGF.Builder.CreateVAArg(VAListAddr.emitRawPointer(CGF), BaseTy);
return Address(Addr, ElementTy, TyAlignForABI);
} else {
assert((AI.isDirect() || AI.isExtend()) &&
@@ -416,7 +454,7 @@ Address CodeGen::EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr,
"Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
Address Temp = CGF.CreateMemTemp(Ty, "varet");
- Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(),
+ Val = CGF.Builder.CreateVAArg(VAListAddr.emitRawPointer(CGF),
CGF.ConvertTypeForMem(Ty));
CGF.Builder.CreateStore(Val, Temp);
return Temp;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h
index afde08ba100c..2a3ef6b8a6c9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h
@@ -29,8 +29,8 @@ public:
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
};
// Helper for coercing an aggregate argument or return value into an integer
@@ -112,10 +112,11 @@ Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr,
/// \param ForceRightAdjust - Default is false. On big-endian platform and
/// if the argument is smaller than a slot, set this flag will force
/// right-adjust the argument in its slot irrespective of the type.
-Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType ValueTy, bool IsIndirect,
- TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign,
- bool AllowHigherAlign, bool ForceRightAdjust = false);
+RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType ValueTy, bool IsIndirect,
+ TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign,
+ bool AllowHigherAlign, AggValueSlot Slot,
+ bool ForceRightAdjust = false);
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1,
llvm::BasicBlock *Block1, Address Addr2,
@@ -136,6 +137,16 @@ bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays,
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays,
bool AsIfNoUniqueAddr = false);
+/// isEmptyFieldForLayout - Return true iff the field is "empty", that is,
+/// either a zero-width bit-field or an \ref isEmptyRecordForLayout.
+bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD);
+
+/// isEmptyRecordForLayout - Return true iff a structure contains only empty
+/// base classes (per \ref isEmptyRecordForLayout) and fields (per
+/// \ref isEmptyFieldForLayout). Note, C++ record fields are considered empty
+/// if the [[no_unique_address]] attribute would have made them empty.
+bool isEmptyRecordForLayout(const ASTContext &Context, QualType T);
+
/// isSingleElementStruct - Determine if a structure is a "single
/// element struct", i.e. it has exactly one non-empty field or
/// exactly one field which is itself a single element
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Address.h b/contrib/llvm-project/clang/lib/CodeGen/Address.h
index cf48df8f5e73..a18c7169af1e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Address.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/Address.h
@@ -14,7 +14,9 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
#define LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
+#include "CGPointerAuthInfo.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/Type.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/IR/Constants.h"
#include "llvm/Support/MathExtras.h"
@@ -22,28 +24,41 @@
namespace clang {
namespace CodeGen {
+class Address;
+class CGBuilderTy;
+class CodeGenFunction;
+class CodeGenModule;
+
// Indicates whether a pointer is known not to be null.
enum KnownNonNull_t { NotKnownNonNull, KnownNonNull };
-/// An aligned address.
-class Address {
+/// An abstract representation of an aligned address. This is designed to be an
+/// IR-level abstraction, carrying just the information necessary to perform IR
+/// operations on an address like loads and stores. In particular, it doesn't
+/// carry C type information or allow the representation of things like
+/// bit-fields; clients working at that level should generally be using
+/// `LValue`.
+/// The pointer contained in this class is known to be unsigned.
+class RawAddress {
llvm::PointerIntPair<llvm::Value *, 1, bool> PointerAndKnownNonNull;
llvm::Type *ElementType;
CharUnits Alignment;
protected:
- Address(std::nullptr_t) : ElementType(nullptr) {}
+ RawAddress(std::nullptr_t) : ElementType(nullptr) {}
public:
- Address(llvm::Value *Pointer, llvm::Type *ElementType, CharUnits Alignment,
- KnownNonNull_t IsKnownNonNull = NotKnownNonNull)
+ RawAddress(llvm::Value *Pointer, llvm::Type *ElementType, CharUnits Alignment,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull)
: PointerAndKnownNonNull(Pointer, IsKnownNonNull),
ElementType(ElementType), Alignment(Alignment) {
assert(Pointer != nullptr && "Pointer cannot be null");
assert(ElementType != nullptr && "Element type cannot be null");
}
- static Address invalid() { return Address(nullptr); }
+ inline RawAddress(Address Addr);
+
+ static RawAddress invalid() { return RawAddress(nullptr); }
bool isValid() const {
return PointerAndKnownNonNull.getPointer() != nullptr;
}
@@ -80,6 +95,165 @@ public:
return Alignment;
}
+ /// Return address with different element type, but same pointer and
+ /// alignment.
+ RawAddress withElementType(llvm::Type *ElemTy) const {
+ return RawAddress(getPointer(), ElemTy, getAlignment(), isKnownNonNull());
+ }
+
+ KnownNonNull_t isKnownNonNull() const {
+ assert(isValid());
+ return (KnownNonNull_t)PointerAndKnownNonNull.getInt();
+ }
+};
+
+/// Like RawAddress, an abstract representation of an aligned address, but the
+/// pointer contained in this class is possibly signed.
+///
+/// This is designed to be an IR-level abstraction, carrying just the
+/// information necessary to perform IR operations on an address like loads and
+/// stores. In particular, it doesn't carry C type information or allow the
+/// representation of things like bit-fields; clients working at that level
+/// should generally be using `LValue`.
+///
+/// An address may be either *raw*, meaning that it's an ordinary machine
+/// pointer, or *signed*, meaning that the pointer carries an embedded
+/// pointer-authentication signature. Representing signed pointers directly in
+/// this abstraction allows the authentication to be delayed as long as possible
+/// without forcing IRGen to use totally different code paths for signed and
+/// unsigned values or to separately propagate signature information through
+/// every API that manipulates addresses. Pointer arithmetic on signed addresses
+/// (e.g. drilling down to a struct field) is accumulated into a separate offset
+/// which is applied when the address is finally accessed.
+class Address {
+ friend class CGBuilderTy;
+
+ // The boolean flag indicates whether the pointer is known to be non-null.
+ llvm::PointerIntPair<llvm::Value *, 1, bool> Pointer;
+
+ /// The expected IR type of the pointer. Carrying accurate element type
+ /// information in Address makes it more convenient to work with Address
+ /// values and allows frontend assertions to catch simple mistakes.
+ llvm::Type *ElementType = nullptr;
+
+ CharUnits Alignment;
+
+ /// The ptrauth information needed to authenticate the base pointer.
+ CGPointerAuthInfo PtrAuthInfo;
+
+ /// Offset from the base pointer. This is non-null only when the base
+ /// pointer is signed.
+ llvm::Value *Offset = nullptr;
+
+ llvm::Value *emitRawPointerSlow(CodeGenFunction &CGF) const;
+
+protected:
+ Address(std::nullptr_t) : ElementType(nullptr) {}
+
+public:
+ Address(llvm::Value *pointer, llvm::Type *elementType, CharUnits alignment,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull)
+ : Pointer(pointer, IsKnownNonNull), ElementType(elementType),
+ Alignment(alignment) {
+ assert(pointer != nullptr && "Pointer cannot be null");
+ assert(elementType != nullptr && "Element type cannot be null");
+ assert(!alignment.isZero() && "Alignment cannot be zero");
+ }
+
+ Address(llvm::Value *BasePtr, llvm::Type *ElementType, CharUnits Alignment,
+ CGPointerAuthInfo PtrAuthInfo, llvm::Value *Offset,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull)
+ : Pointer(BasePtr, IsKnownNonNull), ElementType(ElementType),
+ Alignment(Alignment), PtrAuthInfo(PtrAuthInfo), Offset(Offset) {}
+
+ Address(RawAddress RawAddr)
+ : Pointer(RawAddr.isValid() ? RawAddr.getPointer() : nullptr,
+ RawAddr.isValid() ? RawAddr.isKnownNonNull() : NotKnownNonNull),
+ ElementType(RawAddr.isValid() ? RawAddr.getElementType() : nullptr),
+ Alignment(RawAddr.isValid() ? RawAddr.getAlignment()
+ : CharUnits::Zero()) {}
+
+ static Address invalid() { return Address(nullptr); }
+ bool isValid() const { return Pointer.getPointer() != nullptr; }
+
+ /// This function is used in situations where the caller is doing some sort of
+ /// opaque "laundering" of the pointer.
+ void replaceBasePointer(llvm::Value *P) {
+ assert(isValid() && "pointer isn't valid");
+ assert(P->getType() == Pointer.getPointer()->getType() &&
+ "Pointer's type changed");
+ Pointer.setPointer(P);
+ assert(isValid() && "pointer is invalid after replacement");
+ }
+
+ CharUnits getAlignment() const { return Alignment; }
+
+ void setAlignment(CharUnits Value) { Alignment = Value; }
+
+ llvm::Value *getBasePointer() const {
+ assert(isValid() && "pointer isn't valid");
+ return Pointer.getPointer();
+ }
+
+ /// Return the type of the pointer value.
+ llvm::PointerType *getType() const {
+ return llvm::PointerType::get(
+ ElementType,
+ llvm::cast<llvm::PointerType>(Pointer.getPointer()->getType())
+ ->getAddressSpace());
+ }
+
+ /// Return the type of the values stored in this address.
+ llvm::Type *getElementType() const {
+ assert(isValid());
+ return ElementType;
+ }
+
+ /// Return the address space that this address resides in.
+ unsigned getAddressSpace() const { return getType()->getAddressSpace(); }
+
+ /// Return the IR name of the pointer value.
+ llvm::StringRef getName() const { return Pointer.getPointer()->getName(); }
+
+ const CGPointerAuthInfo &getPointerAuthInfo() const { return PtrAuthInfo; }
+ void setPointerAuthInfo(const CGPointerAuthInfo &Info) { PtrAuthInfo = Info; }
+
+ // This function is called only in CGBuilderBaseTy::CreateElementBitCast.
+ void setElementType(llvm::Type *Ty) {
+ assert(hasOffset() &&
+ "this funcion shouldn't be called when there is no offset");
+ ElementType = Ty;
+ }
+
+ bool isSigned() const { return PtrAuthInfo.isSigned(); }
+
+ /// Whether the pointer is known not to be null.
+ KnownNonNull_t isKnownNonNull() const {
+ assert(isValid());
+ return (KnownNonNull_t)Pointer.getInt();
+ }
+
+ Address setKnownNonNull() {
+ assert(isValid());
+ Pointer.setInt(KnownNonNull);
+ return *this;
+ }
+
+ bool hasOffset() const { return Offset; }
+
+ llvm::Value *getOffset() const { return Offset; }
+
+ Address getResignedAddress(const CGPointerAuthInfo &NewInfo,
+ CodeGenFunction &CGF) const;
+
+ /// Return the pointer contained in this class after authenticating it and
+ /// adding offset to it if necessary.
+ llvm::Value *emitRawPointer(CodeGenFunction &CGF) const {
+ if (!isSigned())
+ return getBasePointer();
+ return emitRawPointerSlow(CGF);
+ }
+
/// Return address with different pointer, but same element type and
/// alignment.
Address withPointer(llvm::Value *NewPointer,
@@ -91,61 +265,60 @@ public:
/// Return address with different alignment, but same pointer and element
/// type.
Address withAlignment(CharUnits NewAlignment) const {
- return Address(getPointer(), getElementType(), NewAlignment,
+ return Address(Pointer.getPointer(), getElementType(), NewAlignment,
isKnownNonNull());
}
/// Return address with different element type, but same pointer and
/// alignment.
Address withElementType(llvm::Type *ElemTy) const {
- return Address(getPointer(), ElemTy, getAlignment(), isKnownNonNull());
- }
-
- /// Whether the pointer is known not to be null.
- KnownNonNull_t isKnownNonNull() const {
- assert(isValid());
- return (KnownNonNull_t)PointerAndKnownNonNull.getInt();
- }
-
- /// Set the non-null bit.
- Address setKnownNonNull() {
- assert(isValid());
- PointerAndKnownNonNull.setInt(true);
- return *this;
+ if (!hasOffset())
+ return Address(getBasePointer(), ElemTy, getAlignment(),
+ getPointerAuthInfo(), /*Offset=*/nullptr,
+ isKnownNonNull());
+ Address A(*this);
+ A.ElementType = ElemTy;
+ return A;
}
};
+inline RawAddress::RawAddress(Address Addr)
+ : PointerAndKnownNonNull(Addr.isValid() ? Addr.getBasePointer() : nullptr,
+ Addr.isValid() ? Addr.isKnownNonNull()
+ : NotKnownNonNull),
+ ElementType(Addr.isValid() ? Addr.getElementType() : nullptr),
+ Alignment(Addr.isValid() ? Addr.getAlignment() : CharUnits::Zero()) {}
+
/// A specialization of Address that requires the address to be an
/// LLVM Constant.
-class ConstantAddress : public Address {
- ConstantAddress(std::nullptr_t) : Address(nullptr) {}
+class ConstantAddress : public RawAddress {
+ ConstantAddress(std::nullptr_t) : RawAddress(nullptr) {}
public:
ConstantAddress(llvm::Constant *pointer, llvm::Type *elementType,
CharUnits alignment)
- : Address(pointer, elementType, alignment) {}
+ : RawAddress(pointer, elementType, alignment) {}
static ConstantAddress invalid() {
return ConstantAddress(nullptr);
}
llvm::Constant *getPointer() const {
- return llvm::cast<llvm::Constant>(Address::getPointer());
+ return llvm::cast<llvm::Constant>(RawAddress::getPointer());
}
ConstantAddress withElementType(llvm::Type *ElemTy) const {
return ConstantAddress(getPointer(), ElemTy, getAlignment());
}
- static bool isaImpl(Address addr) {
+ static bool isaImpl(RawAddress addr) {
return llvm::isa<llvm::Constant>(addr.getPointer());
}
- static ConstantAddress castImpl(Address addr) {
+ static ConstantAddress castImpl(RawAddress addr) {
return ConstantAddress(llvm::cast<llvm::Constant>(addr.getPointer()),
addr.getElementType(), addr.getAlignment());
}
};
-
}
// Present a minimal LLVM-like casting interface.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/BackendConsumer.h b/contrib/llvm-project/clang/lib/CodeGen/BackendConsumer.h
index 72a814cd43d7..a023d29cbd1d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/BackendConsumer.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/BackendConsumer.h
@@ -74,8 +74,8 @@ public:
const HeaderSearchOptions &HeaderSearchOpts,
const PreprocessorOptions &PPOpts,
const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- const LangOptions &LangOpts, const std::string &InFile,
+ const TargetOptions &TargetOpts, const LangOptions &LangOpts,
+ const std::string &InFile,
SmallVector<LinkModule, 4> LinkModules,
std::unique_ptr<raw_pwrite_stream> OS, llvm::LLVMContext &C,
CoverageSourceInfo *CoverageInfo = nullptr);
@@ -88,9 +88,9 @@ public:
const HeaderSearchOptions &HeaderSearchOpts,
const PreprocessorOptions &PPOpts,
const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- const LangOptions &LangOpts, llvm::Module *Module,
- SmallVector<LinkModule, 4> LinkModules, llvm::LLVMContext &C,
+ const TargetOptions &TargetOpts, const LangOptions &LangOpts,
+ llvm::Module *Module, SmallVector<LinkModule, 4> LinkModules,
+ llvm::LLVMContext &C,
CoverageSourceInfo *CoverageInfo = nullptr);
llvm::Module *getModule() const;
@@ -107,13 +107,12 @@ public:
void HandleTagDeclDefinition(TagDecl *D) override;
void HandleTagDeclRequiredDefinition(const TagDecl *D) override;
void CompleteTentativeDefinition(VarDecl *D) override;
- void CompleteExternalDeclaration(VarDecl *D) override;
+ void CompleteExternalDeclaration(DeclaratorDecl *D) override;
void AssignInheritanceModel(CXXRecordDecl *RD) override;
void HandleVTable(CXXRecordDecl *RD) override;
-
- // Links each entry in LinkModules into our module. Returns true on error.
- bool LinkInModules(llvm::Module *M, bool ShouldLinkFiles = true);
+ // Links each entry in LinkModules into our module. Returns true on error.
+ bool LinkInModules(llvm::Module *M);
/// Get the best possible source location to represent a diagnostic that
/// may have associated debug info.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
index 4f22d35f9d3a..e765bbf637a6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
@@ -73,8 +73,10 @@
#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
#include "llvm/Transforms/Instrumentation/KCFI.h"
+#include "llvm/Transforms/Instrumentation/LowerAllowCheckPass.h"
#include "llvm/Transforms/Instrumentation/MemProfiler.h"
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
+#include "llvm/Transforms/Instrumentation/NumericalStabilitySanitizer.h"
#include "llvm/Transforms/Instrumentation/PGOInstrumentation.h"
#include "llvm/Transforms/Instrumentation/SanitizerBinaryMetadata.h"
#include "llvm/Transforms/Instrumentation/SanitizerCoverage.h"
@@ -84,7 +86,6 @@
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Scalar/JumpThreading.h"
#include "llvm/Transforms/Utils/Debugify.h"
-#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include <memory>
#include <optional>
@@ -101,14 +102,24 @@ extern cl::opt<bool> PrintPipelinePasses;
// Experiment to move sanitizers earlier.
static cl::opt<bool> ClSanitizeOnOptimizerEarlyEP(
"sanitizer-early-opt-ep", cl::Optional,
- cl::desc("Insert sanitizers on OptimizerEarlyEP."), cl::init(false));
+ cl::desc("Insert sanitizers on OptimizerEarlyEP."));
+
+// Experiment to mark cold functions as optsize/minsize/optnone.
+// TODO: remove once this is exposed as a proper driver flag.
+static cl::opt<PGOOptions::ColdFuncOpt> ClPGOColdFuncAttr(
+ "pgo-cold-func-opt", cl::init(PGOOptions::ColdFuncOpt::Default), cl::Hidden,
+ cl::desc(
+ "Function attribute to apply to cold functions as determined by PGO"),
+ cl::values(clEnumValN(PGOOptions::ColdFuncOpt::Default, "default",
+ "Default (no attribute)"),
+ clEnumValN(PGOOptions::ColdFuncOpt::OptSize, "optsize",
+ "Mark cold functions with optsize."),
+ clEnumValN(PGOOptions::ColdFuncOpt::MinSize, "minsize",
+ "Mark cold functions with minsize."),
+ clEnumValN(PGOOptions::ColdFuncOpt::OptNone, "optnone",
+ "Mark cold functions with optnone.")));
extern cl::opt<InstrProfCorrelator::ProfCorrelatorKind> ProfileCorrelate;
-
-// Re-link builtin bitcodes after optimization
-cl::opt<bool> ClRelinkBuiltinBitcodePostop(
- "relink-builtin-bitcode-postop", cl::Optional,
- cl::desc("Re-link builtin bitcodes after optimization."), cl::init(false));
} // namespace llvm
namespace {
@@ -356,8 +367,6 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
llvm::TargetMachine::parseBinutilsVersion(CodeGenOpts.BinutilsVersion);
Options.UseInitArray = CodeGenOpts.UseInitArray;
Options.DisableIntegratedAS = CodeGenOpts.DisableIntegratedAS;
- Options.CompressDebugSections = CodeGenOpts.getCompressDebugSections();
- Options.RelaxELFRelocations = CodeGenOpts.RelaxELFRelocations;
// Set EABI version.
Options.EABIVersion = TargetOpts.EABIVersion;
@@ -382,6 +391,7 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
LangOptions::FPModeKind::FPM_FastHonorPragmas);
Options.ApproxFuncFPMath = LangOpts.ApproxFunc;
+ Options.BBAddrMap = CodeGenOpts.BBAddrMap;
Options.BBSections =
llvm::StringSwitch<llvm::BasicBlockSection>(CodeGenOpts.BBSections)
.Case("all", llvm::BasicBlockSection::All)
@@ -408,6 +418,7 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames;
Options.UniqueBasicBlockSectionNames =
CodeGenOpts.UniqueBasicBlockSectionNames;
+ Options.SeparateNamedSections = CodeGenOpts.SeparateNamedSections;
Options.TLSSize = CodeGenOpts.TLSSize;
Options.EnableTLSDESC = CodeGenOpts.EnableTLSDESC;
Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
@@ -459,6 +470,10 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose;
Options.MCOptions.Dwarf64 = CodeGenOpts.Dwarf64;
Options.MCOptions.PreserveAsmComments = CodeGenOpts.PreserveAsmComments;
+ Options.MCOptions.Crel = CodeGenOpts.Crel;
+ Options.MCOptions.X86RelaxRelocations = CodeGenOpts.RelaxELFRelocations;
+ Options.MCOptions.CompressDebugSections =
+ CodeGenOpts.getCompressDebugSections();
Options.MCOptions.ABIName = TargetOpts.ABI;
for (const auto &Entry : HSOpts.UserEntries)
if (!Entry.IsFramework &&
@@ -694,6 +709,9 @@ static void addSanitizers(const Triple &TargetTriple,
MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
}
+ if (LangOpts.Sanitize.has(SanitizerKind::NumericalStability))
+ MPM.addPass(NumericalStabilitySanitizerPass());
+
auto ASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
if (LangOpts.Sanitize.has(Mask)) {
bool UseGlobalGC = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
@@ -742,6 +760,16 @@ static void addSanitizers(const Triple &TargetTriple,
// LastEP does not need GlobalsAA.
PB.registerOptimizerLastEPCallback(SanitizersCallback);
}
+
+ if (LowerAllowCheckPass::IsRequested()) {
+ // We can optimize after inliner, and PGO profile matching. The hook below
+ // is called at the end `buildFunctionSimplificationPipeline`, which called
+ // from `buildInlinerPipeline`, which called after profile matching.
+ PB.registerScalarOptimizerLateEPCallback(
+ [](FunctionPassManager &FPM, OptimizationLevel Level) {
+ FPM.addPass(LowerAllowCheckPass());
+ });
+ }
}
void EmitAssemblyHelper::RunOptimizationPipeline(
@@ -755,37 +783,41 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
CodeGenOpts.InstrProfileOutput.empty() ? getDefaultProfileGenName()
: CodeGenOpts.InstrProfileOutput,
"", "", CodeGenOpts.MemoryProfileUsePath, nullptr, PGOOptions::IRInstr,
- PGOOptions::NoCSAction, CodeGenOpts.DebugInfoForProfiling,
+ PGOOptions::NoCSAction, ClPGOColdFuncAttr,
+ CodeGenOpts.DebugInfoForProfiling,
/*PseudoProbeForProfiling=*/false, CodeGenOpts.AtomicProfileUpdate);
else if (CodeGenOpts.hasProfileIRUse()) {
// -fprofile-use.
auto CSAction = CodeGenOpts.hasProfileCSIRUse() ? PGOOptions::CSIRUse
: PGOOptions::NoCSAction;
- PGOOpt = PGOOptions(
- CodeGenOpts.ProfileInstrumentUsePath, "",
- CodeGenOpts.ProfileRemappingFile, CodeGenOpts.MemoryProfileUsePath, VFS,
- PGOOptions::IRUse, CSAction, CodeGenOpts.DebugInfoForProfiling);
+ PGOOpt = PGOOptions(CodeGenOpts.ProfileInstrumentUsePath, "",
+ CodeGenOpts.ProfileRemappingFile,
+ CodeGenOpts.MemoryProfileUsePath, VFS,
+ PGOOptions::IRUse, CSAction, ClPGOColdFuncAttr,
+ CodeGenOpts.DebugInfoForProfiling);
} else if (!CodeGenOpts.SampleProfileFile.empty())
// -fprofile-sample-use
PGOOpt = PGOOptions(
CodeGenOpts.SampleProfileFile, "", CodeGenOpts.ProfileRemappingFile,
CodeGenOpts.MemoryProfileUsePath, VFS, PGOOptions::SampleUse,
- PGOOptions::NoCSAction, CodeGenOpts.DebugInfoForProfiling,
- CodeGenOpts.PseudoProbeForProfiling);
+ PGOOptions::NoCSAction, ClPGOColdFuncAttr,
+ CodeGenOpts.DebugInfoForProfiling, CodeGenOpts.PseudoProbeForProfiling);
else if (!CodeGenOpts.MemoryProfileUsePath.empty())
// -fmemory-profile-use (without any of the above options)
PGOOpt = PGOOptions("", "", "", CodeGenOpts.MemoryProfileUsePath, VFS,
PGOOptions::NoAction, PGOOptions::NoCSAction,
- CodeGenOpts.DebugInfoForProfiling);
+ ClPGOColdFuncAttr, CodeGenOpts.DebugInfoForProfiling);
else if (CodeGenOpts.PseudoProbeForProfiling)
// -fpseudo-probe-for-profiling
- PGOOpt = PGOOptions("", "", "", /*MemoryProfile=*/"", nullptr,
- PGOOptions::NoAction, PGOOptions::NoCSAction,
- CodeGenOpts.DebugInfoForProfiling, true);
+ PGOOpt =
+ PGOOptions("", "", "", /*MemoryProfile=*/"", nullptr,
+ PGOOptions::NoAction, PGOOptions::NoCSAction,
+ ClPGOColdFuncAttr, CodeGenOpts.DebugInfoForProfiling, true);
else if (CodeGenOpts.DebugInfoForProfiling)
// -fdebug-info-for-profiling
PGOOpt = PGOOptions("", "", "", /*MemoryProfile=*/"", nullptr,
- PGOOptions::NoAction, PGOOptions::NoCSAction, true);
+ PGOOptions::NoAction, PGOOptions::NoCSAction,
+ ClPGOColdFuncAttr, true);
// Check to see if we want to generate a CS profile.
if (CodeGenOpts.hasProfileCSIRInstr()) {
@@ -802,13 +834,13 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
: CodeGenOpts.InstrProfileOutput;
PGOOpt->CSAction = PGOOptions::CSIRInstr;
} else
- PGOOpt =
- PGOOptions("",
- CodeGenOpts.InstrProfileOutput.empty()
- ? getDefaultProfileGenName()
- : CodeGenOpts.InstrProfileOutput,
- "", /*MemoryProfile=*/"", nullptr, PGOOptions::NoAction,
- PGOOptions::CSIRInstr, CodeGenOpts.DebugInfoForProfiling);
+ PGOOpt = PGOOptions("",
+ CodeGenOpts.InstrProfileOutput.empty()
+ ? getDefaultProfileGenName()
+ : CodeGenOpts.InstrProfileOutput,
+ "", /*MemoryProfile=*/"", nullptr,
+ PGOOptions::NoAction, PGOOptions::CSIRInstr,
+ ClPGOColdFuncAttr, CodeGenOpts.DebugInfoForProfiling);
}
if (TM)
TM->setPGOOption(PGOOpt);
@@ -955,22 +987,6 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
/*DropTypeTests=*/true));
});
- if (CodeGenOpts.InstrumentFunctions ||
- CodeGenOpts.InstrumentFunctionEntryBare ||
- CodeGenOpts.InstrumentFunctionsAfterInlining ||
- CodeGenOpts.InstrumentForProfiling) {
- PB.registerPipelineStartEPCallback(
- [](ModulePassManager &MPM, OptimizationLevel Level) {
- MPM.addPass(createModuleToFunctionPassAdaptor(
- EntryExitInstrumenterPass(/*PostInlining=*/false)));
- });
- PB.registerOptimizerLastEPCallback(
- [](ModulePassManager &MPM, OptimizationLevel Level) {
- MPM.addPass(createModuleToFunctionPassAdaptor(
- EntryExitInstrumenterPass(/*PostInlining=*/true)));
- });
- }
-
// Register callbacks to schedule sanitizer passes at the appropriate part
// of the pipeline.
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
@@ -1022,12 +1038,9 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
}
}
- // Re-link against any bitcodes supplied via the -mlink-builtin-bitcode option
- // Some optimizations may generate new function calls that would not have
- // been linked pre-optimization (i.e. fused sincos calls generated by
- // AMDGPULibCalls::fold_sincos.)
- if (ClRelinkBuiltinBitcodePostop)
- MPM.addPass(LinkInModulesPass(BC, false));
+ // Link against bitcodes supplied via the -mlink-builtin-bitcode option
+ if (CodeGenOpts.LinkBitcodePostopt)
+ MPM.addPass(LinkInModulesPass(BC));
// Add a verifier pass if requested. We don't have to do this if the action
// requires code generation because there will already be a verifier pass in
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
index 52e6ddb7d6af..fbf942d06ca6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
@@ -80,7 +80,7 @@ namespace {
AtomicSizeInBits = C.toBits(
C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
.alignTo(lvalue.getAlignment()));
- llvm::Value *BitFieldPtr = lvalue.getBitFieldPointer();
+ llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF);
auto OffsetInChars =
(C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
lvalue.getAlignment();
@@ -139,18 +139,18 @@ namespace {
const LValue &getAtomicLValue() const { return LVal; }
llvm::Value *getAtomicPointer() const {
if (LVal.isSimple())
- return LVal.getPointer(CGF);
+ return LVal.emitRawPointer(CGF);
else if (LVal.isBitField())
- return LVal.getBitFieldPointer();
+ return LVal.getRawBitFieldPointer(CGF);
else if (LVal.isVectorElt())
- return LVal.getVectorPointer();
+ return LVal.getRawVectorPointer(CGF);
assert(LVal.isExtVectorElt());
- return LVal.getExtVectorPointer();
+ return LVal.getRawExtVectorPointer(CGF);
}
Address getAtomicAddress() const {
llvm::Type *ElTy;
if (LVal.isSimple())
- ElTy = LVal.getAddress(CGF).getElementType();
+ ElTy = LVal.getAddress().getElementType();
else if (LVal.isBitField())
ElTy = LVal.getBitFieldAddress().getElementType();
else if (LVal.isVectorElt())
@@ -194,12 +194,14 @@ namespace {
RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
SourceLocation loc, bool AsValue) const;
- /// Converts a rvalue to integer value.
- llvm::Value *convertRValueToInt(RValue RVal) const;
+ llvm::Value *getScalarRValValueOrNull(RValue RVal) const;
- RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
- AggValueSlot ResultSlot,
- SourceLocation Loc, bool AsValue) const;
+ /// Converts an rvalue to integer value if needed.
+ llvm::Value *convertRValueToInt(RValue RVal, bool CmpXchg = false) const;
+
+ RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,
+ SourceLocation Loc, bool AsValue,
+ bool CmpXchg = false) const;
/// Copy an atomic r-value into atomic-layout memory.
void emitCopyIntoMemory(RValue rvalue) const;
@@ -261,7 +263,8 @@ namespace {
void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
llvm::AtomicOrdering AO, bool IsVolatile);
/// Emits atomic load as LLVM instruction.
- llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
+ llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile,
+ bool CmpXchg = false);
/// Emits atomic compare-and-exchange op as a libcall.
llvm::Value *EmitAtomicCompareExchangeLibcall(
llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
@@ -360,12 +363,12 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
assert(LVal.isSimple());
- Address addr = LVal.getAddress(CGF);
+ Address addr = LVal.getAddress();
if (!requiresMemSetZero(addr.getElementType()))
return false;
CGF.Builder.CreateMemSet(
- addr.getPointer(), llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ addr.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
LVal.getAlignment().getAsAlign());
return true;
@@ -811,29 +814,6 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Builder.SetInsertPoint(ContBB);
}
-static void
-AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
- bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
- SourceLocation Loc, CharUnits SizeInChars) {
- if (UseOptimizedLibcall) {
- // Load value and pass it to the function directly.
- CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
- int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
- ValTy =
- CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
- llvm::Type *ITy = llvm::IntegerType::get(CGF.getLLVMContext(), SizeInBits);
- Address Ptr = Address(Val, ITy, Align);
- Val = CGF.EmitLoadOfScalar(Ptr, false,
- CGF.getContext().getPointerType(ValTy),
- Loc);
- // Coerce the value into an appropriately sized integer type.
- Args.add(RValue::get(Val), ValTy);
- } else {
- // Non-optimized functions always take a reference.
- Args.add(RValue::get(Val), CGF.getContext().VoidPtrTy);
- }
-}
-
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
QualType MemTy = AtomicTy;
@@ -857,22 +837,16 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
uint64_t Size = TInfo.Width.getQuantity();
unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
- bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
- bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
- bool UseLibcall = Misaligned | Oversized;
- bool ShouldCastToIntPtrTy = true;
-
CharUnits MaxInlineWidth =
getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
-
DiagnosticsEngine &Diags = CGM.getDiags();
-
+ bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
+ bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
if (Misaligned) {
Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
<< (int)TInfo.Width.getQuantity()
<< (int)Ptr.getAlignment().getQuantity();
}
-
if (Oversized) {
Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
<< (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
@@ -881,6 +855,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
llvm::Value *Order = EmitScalarExpr(E->getOrder());
llvm::Value *Scope =
E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
+ bool ShouldCastToIntPtrTy = true;
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
@@ -1047,122 +1022,25 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
Dest = Atomics.castToAtomicIntPointer(Dest);
}
- // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
+ bool PowerOf2Size = (Size & (Size - 1)) == 0;
+ bool UseLibcall = !PowerOf2Size || (Size > 16);
+
+ // For atomics larger than 16 bytes, emit a libcall from the frontend. This
+ // avoids the overhead of dealing with excessively-large value types in IR.
+ // Non-power-of-2 values also lower to libcall here, as they are not currently
+ // permitted in IR instructions (although that constraint could be relaxed in
+ // the future). For other cases where a libcall is required on a given
+ // platform, we let the backend handle it (this includes handling for all of
+ // the size-optimized libcall variants, which are only valid up to 16 bytes.)
+ //
+ // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
if (UseLibcall) {
- bool UseOptimizedLibcall = false;
- switch (E->getOp()) {
- case AtomicExpr::AO__c11_atomic_init:
- case AtomicExpr::AO__opencl_atomic_init:
- llvm_unreachable("Already handled above with EmitAtomicInit!");
-
- case AtomicExpr::AO__atomic_fetch_add:
- case AtomicExpr::AO__atomic_fetch_and:
- case AtomicExpr::AO__atomic_fetch_max:
- case AtomicExpr::AO__atomic_fetch_min:
- case AtomicExpr::AO__atomic_fetch_nand:
- case AtomicExpr::AO__atomic_fetch_or:
- case AtomicExpr::AO__atomic_fetch_sub:
- case AtomicExpr::AO__atomic_fetch_xor:
- case AtomicExpr::AO__atomic_add_fetch:
- case AtomicExpr::AO__atomic_and_fetch:
- case AtomicExpr::AO__atomic_max_fetch:
- case AtomicExpr::AO__atomic_min_fetch:
- case AtomicExpr::AO__atomic_nand_fetch:
- case AtomicExpr::AO__atomic_or_fetch:
- case AtomicExpr::AO__atomic_sub_fetch:
- case AtomicExpr::AO__atomic_xor_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_add:
- case AtomicExpr::AO__c11_atomic_fetch_and:
- case AtomicExpr::AO__c11_atomic_fetch_max:
- case AtomicExpr::AO__c11_atomic_fetch_min:
- case AtomicExpr::AO__c11_atomic_fetch_nand:
- case AtomicExpr::AO__c11_atomic_fetch_or:
- case AtomicExpr::AO__c11_atomic_fetch_sub:
- case AtomicExpr::AO__c11_atomic_fetch_xor:
- case AtomicExpr::AO__hip_atomic_fetch_add:
- case AtomicExpr::AO__hip_atomic_fetch_and:
- case AtomicExpr::AO__hip_atomic_fetch_max:
- case AtomicExpr::AO__hip_atomic_fetch_min:
- case AtomicExpr::AO__hip_atomic_fetch_or:
- case AtomicExpr::AO__hip_atomic_fetch_sub:
- case AtomicExpr::AO__hip_atomic_fetch_xor:
- case AtomicExpr::AO__opencl_atomic_fetch_add:
- case AtomicExpr::AO__opencl_atomic_fetch_and:
- case AtomicExpr::AO__opencl_atomic_fetch_max:
- case AtomicExpr::AO__opencl_atomic_fetch_min:
- case AtomicExpr::AO__opencl_atomic_fetch_or:
- case AtomicExpr::AO__opencl_atomic_fetch_sub:
- case AtomicExpr::AO__opencl_atomic_fetch_xor:
- case AtomicExpr::AO__scoped_atomic_fetch_add:
- case AtomicExpr::AO__scoped_atomic_fetch_and:
- case AtomicExpr::AO__scoped_atomic_fetch_max:
- case AtomicExpr::AO__scoped_atomic_fetch_min:
- case AtomicExpr::AO__scoped_atomic_fetch_nand:
- case AtomicExpr::AO__scoped_atomic_fetch_or:
- case AtomicExpr::AO__scoped_atomic_fetch_sub:
- case AtomicExpr::AO__scoped_atomic_fetch_xor:
- case AtomicExpr::AO__scoped_atomic_add_fetch:
- case AtomicExpr::AO__scoped_atomic_and_fetch:
- case AtomicExpr::AO__scoped_atomic_max_fetch:
- case AtomicExpr::AO__scoped_atomic_min_fetch:
- case AtomicExpr::AO__scoped_atomic_nand_fetch:
- case AtomicExpr::AO__scoped_atomic_or_fetch:
- case AtomicExpr::AO__scoped_atomic_sub_fetch:
- case AtomicExpr::AO__scoped_atomic_xor_fetch:
- // For these, only library calls for certain sizes exist.
- UseOptimizedLibcall = true;
- break;
-
- case AtomicExpr::AO__atomic_load:
- case AtomicExpr::AO__atomic_store:
- case AtomicExpr::AO__atomic_exchange:
- case AtomicExpr::AO__atomic_compare_exchange:
- case AtomicExpr::AO__scoped_atomic_load:
- case AtomicExpr::AO__scoped_atomic_store:
- case AtomicExpr::AO__scoped_atomic_exchange:
- case AtomicExpr::AO__scoped_atomic_compare_exchange:
- // Use the generic version if we don't know that the operand will be
- // suitably aligned for the optimized version.
- if (Misaligned)
- break;
- [[fallthrough]];
- case AtomicExpr::AO__atomic_load_n:
- case AtomicExpr::AO__atomic_store_n:
- case AtomicExpr::AO__atomic_exchange_n:
- case AtomicExpr::AO__atomic_compare_exchange_n:
- case AtomicExpr::AO__c11_atomic_load:
- case AtomicExpr::AO__c11_atomic_store:
- case AtomicExpr::AO__c11_atomic_exchange:
- case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
- case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
- case AtomicExpr::AO__hip_atomic_load:
- case AtomicExpr::AO__hip_atomic_store:
- case AtomicExpr::AO__hip_atomic_exchange:
- case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
- case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
- case AtomicExpr::AO__opencl_atomic_load:
- case AtomicExpr::AO__opencl_atomic_store:
- case AtomicExpr::AO__opencl_atomic_exchange:
- case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
- case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
- case AtomicExpr::AO__scoped_atomic_load_n:
- case AtomicExpr::AO__scoped_atomic_store_n:
- case AtomicExpr::AO__scoped_atomic_exchange_n:
- case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
- // Only use optimized library calls for sizes for which they exist.
- // FIXME: Size == 16 optimized library functions exist too.
- if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
- UseOptimizedLibcall = true;
- break;
- }
-
CallArgList Args;
- if (!UseOptimizedLibcall) {
- // For non-optimized library calls, the size is the first parameter
- Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
- getContext().getSizeType());
- }
- // Atomic address is the first or second parameter
+ // For non-optimized library calls, the size is the first parameter.
+ Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
+ getContext().getSizeType());
+
+ // The atomic address is the second parameter.
// The OpenCL atomic library functions only accept pointer arguments to
// generic address space.
auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
@@ -1178,17 +1056,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
*this, V, AS, LangAS::opencl_generic, DestType, false);
};
- Args.add(RValue::get(CastToGenericAddrSpace(Ptr.getPointer(),
+ Args.add(RValue::get(CastToGenericAddrSpace(Ptr.emitRawPointer(*this),
E->getPtr()->getType())),
getContext().VoidPtrTy);
+ // The next 1-3 parameters are op-dependent.
std::string LibCallName;
- QualType LoweredMemTy =
- MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
QualType RetTy;
bool HaveRetTy = false;
- llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
- bool PostOpMinMax = false;
switch (E->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
case AtomicExpr::AO__opencl_atomic_init:
@@ -1199,8 +1074,6 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// and exchange.
// bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
// void *desired, int success, int failure)
- // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
- // int success, int failure)
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
@@ -1214,17 +1087,17 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LibCallName = "__atomic_compare_exchange";
RetTy = getContext().BoolTy;
HaveRetTy = true;
- Args.add(RValue::get(CastToGenericAddrSpace(Val1.getPointer(),
+ Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
E->getVal1()->getType())),
getContext().VoidPtrTy);
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
- MemTy, E->getExprLoc(), TInfo.Width);
+ Args.add(RValue::get(CastToGenericAddrSpace(Val2.emitRawPointer(*this),
+ E->getVal2()->getType())),
+ getContext().VoidPtrTy);
Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
// void __atomic_exchange(size_t size, void *mem, void *val, void *return,
// int order)
- // T __atomic_exchange_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__c11_atomic_exchange:
@@ -1233,11 +1106,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__scoped_atomic_exchange:
case AtomicExpr::AO__scoped_atomic_exchange_n:
LibCallName = "__atomic_exchange";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), TInfo.Width);
+ Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
+ E->getVal1()->getType())),
+ getContext().VoidPtrTy);
break;
// void __atomic_store(size_t size, void *mem, void *val, int order)
- // void __atomic_store_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
case AtomicExpr::AO__c11_atomic_store:
@@ -1248,11 +1121,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LibCallName = "__atomic_store";
RetTy = getContext().VoidTy;
HaveRetTy = true;
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), TInfo.Width);
+ Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
+ E->getVal1()->getType())),
+ getContext().VoidPtrTy);
break;
// void __atomic_load(size_t size, void *mem, void *return, int order)
- // T __atomic_load_N(T *mem, int order)
case AtomicExpr::AO__atomic_load:
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__c11_atomic_load:
@@ -1262,183 +1135,86 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__scoped_atomic_load_n:
LibCallName = "__atomic_load";
break;
- // T __atomic_add_fetch_N(T *mem, T val, int order)
- // T __atomic_fetch_add_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__scoped_atomic_add_fetch:
- PostOp = llvm::Instruction::Add;
- [[fallthrough]];
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__scoped_atomic_fetch_add:
- LibCallName = "__atomic_fetch_add";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), TInfo.Width);
- break;
- // T __atomic_and_fetch_N(T *mem, T val, int order)
- // T __atomic_fetch_and_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_and_fetch:
case AtomicExpr::AO__scoped_atomic_and_fetch:
- PostOp = llvm::Instruction::And;
- [[fallthrough]];
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__scoped_atomic_fetch_and:
- LibCallName = "__atomic_fetch_and";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), TInfo.Width);
- break;
- // T __atomic_or_fetch_N(T *mem, T val, int order)
- // T __atomic_fetch_or_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__scoped_atomic_or_fetch:
- PostOp = llvm::Instruction::Or;
- [[fallthrough]];
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__scoped_atomic_fetch_or:
- LibCallName = "__atomic_fetch_or";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), TInfo.Width);
- break;
- // T __atomic_sub_fetch_N(T *mem, T val, int order)
- // T __atomic_fetch_sub_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__scoped_atomic_sub_fetch:
- PostOp = llvm::Instruction::Sub;
- [[fallthrough]];
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__scoped_atomic_fetch_sub:
- LibCallName = "__atomic_fetch_sub";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), TInfo.Width);
- break;
- // T __atomic_xor_fetch_N(T *mem, T val, int order)
- // T __atomic_fetch_xor_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__scoped_atomic_xor_fetch:
- PostOp = llvm::Instruction::Xor;
- [[fallthrough]];
case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__scoped_atomic_fetch_xor:
- LibCallName = "__atomic_fetch_xor";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), TInfo.Width);
- break;
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__scoped_atomic_fetch_nand:
+ case AtomicExpr::AO__scoped_atomic_nand_fetch:
case AtomicExpr::AO__atomic_min_fetch:
- case AtomicExpr::AO__scoped_atomic_min_fetch:
- PostOpMinMax = true;
- [[fallthrough]];
case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__c11_atomic_fetch_min:
- case AtomicExpr::AO__scoped_atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
- LibCallName = E->getValueType()->isSignedIntegerType()
- ? "__atomic_fetch_min"
- : "__atomic_fetch_umin";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), TInfo.Width);
- break;
+ case AtomicExpr::AO__scoped_atomic_fetch_min:
+ case AtomicExpr::AO__scoped_atomic_min_fetch:
case AtomicExpr::AO__atomic_max_fetch:
- case AtomicExpr::AO__scoped_atomic_max_fetch:
- PostOpMinMax = true;
- [[fallthrough]];
case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__scoped_atomic_fetch_max:
- LibCallName = E->getValueType()->isSignedIntegerType()
- ? "__atomic_fetch_max"
- : "__atomic_fetch_umax";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), TInfo.Width);
- break;
- // T __atomic_nand_fetch_N(T *mem, T val, int order)
- // T __atomic_fetch_nand_N(T *mem, T val, int order)
- case AtomicExpr::AO__atomic_nand_fetch:
- case AtomicExpr::AO__scoped_atomic_nand_fetch:
- PostOp = llvm::Instruction::And; // the NOT is special cased below
- [[fallthrough]];
- case AtomicExpr::AO__atomic_fetch_nand:
- case AtomicExpr::AO__c11_atomic_fetch_nand:
- case AtomicExpr::AO__scoped_atomic_fetch_nand:
- LibCallName = "__atomic_fetch_nand";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), TInfo.Width);
- break;
+ case AtomicExpr::AO__scoped_atomic_max_fetch:
+ llvm_unreachable("Integral atomic operations always become atomicrmw!");
}
if (E->isOpenCL()) {
- LibCallName = std::string("__opencl") +
- StringRef(LibCallName).drop_front(1).str();
-
+ LibCallName =
+ std::string("__opencl") + StringRef(LibCallName).drop_front(1).str();
}
- // Optimized functions have the size in their name.
- if (UseOptimizedLibcall)
- LibCallName += "_" + llvm::utostr(Size);
// By default, assume we return a value of the atomic type.
if (!HaveRetTy) {
- if (UseOptimizedLibcall) {
- // Value is returned directly.
- // The function returns an appropriately sized integer type.
- RetTy = getContext().getIntTypeForBitwidth(
- getContext().toBits(TInfo.Width), /*Signed=*/false);
- } else {
- // Value is returned through parameter before the order.
- RetTy = getContext().VoidTy;
- Args.add(RValue::get(Dest.getPointer()), getContext().VoidPtrTy);
- }
+ // Value is returned through parameter before the order.
+ RetTy = getContext().VoidTy;
+ Args.add(RValue::get(
+ CastToGenericAddrSpace(Dest.emitRawPointer(*this), RetTy)),
+ getContext().VoidPtrTy);
}
- // order is always the last parameter
+ // Order is always the last parameter.
Args.add(RValue::get(Order),
getContext().IntTy);
if (E->isOpenCL())
Args.add(RValue::get(Scope), getContext().IntTy);
- // PostOp is only needed for the atomic_*_fetch operations, and
- // thus is only needed for and implemented in the
- // UseOptimizedLibcall codepath.
- assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
-
RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
// The value is returned directly from the libcall.
if (E->isCmpXChg())
return Res;
- // The value is returned directly for optimized libcalls but the expr
- // provided an out-param.
- if (UseOptimizedLibcall && Res.getScalarVal()) {
- llvm::Value *ResVal = Res.getScalarVal();
- if (PostOpMinMax) {
- llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
- ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
- E->getValueType()->isSignedIntegerType(),
- ResVal, LoadVal1);
- } else if (PostOp) {
- llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
- ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
- }
- if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||
- E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
- ResVal = Builder.CreateNot(ResVal);
-
- Builder.CreateStore(ResVal, Dest.withElementType(ResVal->getType()));
- }
-
if (RValTy->isVoidType())
return RValue::get(nullptr);
@@ -1625,12 +1401,26 @@ RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
LVal.getBaseInfo(), TBAAAccessInfo()));
}
-RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
- AggValueSlot ResultSlot,
- SourceLocation Loc,
- bool AsValue) const {
+/// Return true if \param ValTy is a type that should be casted to integer
+/// around the atomic memory operation. If \param CmpXchg is true, then the
+/// cast of a floating point type is made as that instruction can not have
+/// floating point operands. TODO: Allow compare-and-exchange and FP - see
+/// comment in AtomicExpandPass.cpp.
+static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg) {
+ if (ValTy->isFloatingPointTy())
+ return ValTy->isX86_FP80Ty() || CmpXchg;
+ return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
+}
+
+RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
+ AggValueSlot ResultSlot,
+ SourceLocation Loc, bool AsValue,
+ bool CmpXchg) const {
// Try not to in some easy cases.
- assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
+ assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||
+ Val->getType()->isIEEELikeFPTy()) &&
+ "Expected integer, pointer or floating point value when converting "
+ "result.");
if (getEvaluationKind() == TEK_Scalar &&
(((!LVal.isBitField() ||
LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
@@ -1639,13 +1429,13 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
auto *ValTy = AsValue
? CGF.ConvertTypeForMem(ValueTy)
: getAtomicAddress().getElementType();
- if (ValTy->isIntegerTy()) {
- assert(IntVal->getType() == ValTy && "Different integer types.");
- return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
- } else if (ValTy->isPointerTy())
- return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
- else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
- return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
+ if (!shouldCastToInt(ValTy, CmpXchg)) {
+ assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&
+ "Different integer types.");
+ return RValue::get(CGF.EmitFromMemory(Val, ValueTy));
+ }
+ if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))
+ return RValue::get(CGF.Builder.CreateBitCast(Val, ValTy));
}
// Create a temporary. This needs to be big enough to hold the
@@ -1662,8 +1452,7 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
// Slam the integer into the temporary.
Address CastTemp = castToAtomicIntPointer(Temp);
- CGF.Builder.CreateStore(IntVal, CastTemp)
- ->setVolatile(TempIsVolatile);
+ CGF.Builder.CreateStore(Val, CastTemp)->setVolatile(TempIsVolatile);
return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
}
@@ -1682,9 +1471,11 @@ void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
}
llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
- bool IsVolatile) {
+ bool IsVolatile, bool CmpXchg) {
// Okay, we're doing this natively.
- Address Addr = getAtomicAddressAsAtomicIntPointer();
+ Address Addr = getAtomicAddress();
+ if (shouldCastToInt(Addr.getElementType(), CmpXchg))
+ Addr = castToAtomicIntPointer(Addr);
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
Load->setAtomic(AO);
@@ -1736,7 +1527,7 @@ RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
} else
TempAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
+ EmitAtomicLoadLibcall(TempAddr.emitRawPointer(CGF), AO, IsVolatile);
// Okay, turn that back into the original value or whole atomic (for
// non-simple lvalues) type.
@@ -1752,7 +1543,7 @@ RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
// Okay, turn that back into the original value or atomic (for non-simple
// lvalues) type.
- return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
+ return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
}
/// Emit a load from an l-value of atomic type. Note that the r-value
@@ -1812,23 +1603,26 @@ Address AtomicInfo::materializeRValue(RValue rvalue) const {
LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
AtomicInfo Atomics(CGF, TempLV);
Atomics.emitCopyIntoMemory(rvalue);
- return TempLV.getAddress(CGF);
+ return TempLV.getAddress();
}
-llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
+llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal) const {
+ if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple()))
+ return RVal.getScalarVal();
+ return nullptr;
+}
+
+llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const {
// If we've got a scalar value of the right size, try to avoid going
- // through memory.
- if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
- llvm::Value *Value = RVal.getScalarVal();
- if (isa<llvm::IntegerType>(Value->getType()))
+ // through memory. Floats get casted if needed by AtomicExpandPass.
+ if (llvm::Value *Value = getScalarRValValueOrNull(RVal)) {
+ if (!shouldCastToInt(Value->getType(), CmpXchg))
return CGF.EmitToMemory(Value, ValueTy);
else {
llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
CGF.getLLVMContext(),
LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
- if (isa<llvm::PointerType>(Value->getType()))
- return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
- else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
+ if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
return CGF.Builder.CreateBitCast(Value, InputIntTy);
}
}
@@ -1889,9 +1683,9 @@ std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
if (shouldUseLibcall()) {
// Produce a source address.
Address ExpectedAddr = materializeRValue(Expected);
- Address DesiredAddr = materializeRValue(Desired);
- auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
- DesiredAddr.getPointer(),
+ llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
+ llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
+ auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
Success, Failure);
return std::make_pair(
convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
@@ -1901,13 +1695,14 @@ std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
// If we've got a scalar value of the right size, try to avoid going
// through memory.
- auto *ExpectedVal = convertRValueToInt(Expected);
- auto *DesiredVal = convertRValueToInt(Desired);
+ auto *ExpectedVal = convertRValueToInt(Expected, /*CmpXchg=*/true);
+ auto *DesiredVal = convertRValueToInt(Desired, /*CmpXchg=*/true);
auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
Failure, IsWeak);
return std::make_pair(
- ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
- SourceLocation(), /*AsValue=*/false),
+ ConvertToValueOrAtomic(Res.first, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false,
+ /*CmpXchg=*/true),
Res.second);
}
@@ -1973,7 +1768,7 @@ void AtomicInfo::EmitAtomicUpdateLibcall(
Address ExpectedAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
+ EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
CGF.EmitBlock(ContBB);
@@ -1987,10 +1782,10 @@ void AtomicInfo::EmitAtomicUpdateLibcall(
AggValueSlot::ignored(),
SourceLocation(), /*AsValue=*/false);
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
+ llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
+ llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
auto *Res =
- EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
- DesiredAddr.getPointer(),
- AO, Failure);
+ EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
@@ -2001,7 +1796,7 @@ void AtomicInfo::EmitAtomicUpdateOp(
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
// Do the atomic load.
- auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
+ auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);
// For non-simple lvalues perform compare-and-swap procedure.
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
@@ -2011,13 +1806,18 @@ void AtomicInfo::EmitAtomicUpdateOp(
/*NumReservedValues=*/2);
PHI->addIncoming(OldVal, CurBB);
Address NewAtomicAddr = CreateTempAlloca();
- Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
+ Address NewAtomicIntAddr =
+ shouldCastToInt(NewAtomicAddr.getElementType(), /*CmpXchg=*/true)
+ ? castToAtomicIntPointer(NewAtomicAddr)
+ : NewAtomicAddr;
+
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
requiresMemSetZero(getAtomicAddress().getElementType())) {
CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
}
- auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
- SourceLocation(), /*AsValue=*/false);
+ auto OldRVal = ConvertToValueOrAtomic(PHI, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false,
+ /*CmpXchg=*/true);
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
// Try to write new value using cmpxchg operation.
@@ -2059,7 +1859,7 @@ void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
Address ExpectedAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
+ EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
CGF.EmitBlock(ContBB);
@@ -2070,10 +1870,10 @@ void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
CGF.Builder.CreateStore(OldVal, DesiredAddr);
}
EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
+ llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
+ llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
auto *Res =
- EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
- DesiredAddr.getPointer(),
- AO, Failure);
+ EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
@@ -2083,7 +1883,7 @@ void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
// Do the atomic load.
- auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
+ auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);
// For non-simple lvalues perform compare-and-swap procedure.
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
@@ -2151,7 +1951,7 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
// maybe for address-space qualification.
assert(!rvalue.isAggregate() ||
rvalue.getAggregateAddress().getElementType() ==
- dest.getAddress(*this).getElementType());
+ dest.getAddress().getElementType());
AtomicInfo atomics(*this, dest);
LValue LVal = atomics.getAtomicLValue();
@@ -2173,7 +1973,8 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
args.add(RValue::get(atomics.getAtomicSizeValue()),
getContext().getSizeType());
args.add(RValue::get(atomics.getAtomicPointer()), getContext().VoidPtrTy);
- args.add(RValue::get(srcAddr.getPointer()), getContext().VoidPtrTy);
+ args.add(RValue::get(srcAddr.emitRawPointer(*this)),
+ getContext().VoidPtrTy);
args.add(
RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
getContext().IntTy);
@@ -2182,13 +1983,17 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
}
// Okay, we're doing this natively.
- llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
+ llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);
// Do the atomic store.
- Address addr = atomics.castToAtomicIntPointer(atomics.getAtomicAddress());
- intValue = Builder.CreateIntCast(
- intValue, addr.getElementType(), /*isSigned=*/false);
- llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
+ Address Addr = atomics.getAtomicAddress();
+ if (llvm::Value *Value = atomics.getScalarRValValueOrNull(rvalue))
+ if (shouldCastToInt(Value->getType(), /*CmpXchg=*/false)) {
+ Addr = atomics.castToAtomicIntPointer(Addr);
+ ValToStore = Builder.CreateIntCast(ValToStore, Addr.getElementType(),
+ /*isSigned=*/false);
+ }
+ llvm::StoreInst *store = Builder.CreateStore(ValToStore, Addr);
if (AO == llvm::AtomicOrdering::Acquire)
AO = llvm::AtomicOrdering::Monotonic;
@@ -2219,10 +2024,10 @@ std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
// maybe for address-space qualification.
assert(!Expected.isAggregate() ||
Expected.getAggregateAddress().getElementType() ==
- Obj.getAddress(*this).getElementType());
+ Obj.getAddress().getElementType());
assert(!Desired.isAggregate() ||
Desired.getAggregateAddress().getElementType() ==
- Obj.getAddress(*this).getElementType());
+ Obj.getAddress().getElementType());
AtomicInfo Atomics(*this, Obj);
return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
@@ -2263,7 +2068,7 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
// Evaluate the expression directly into the destination.
AggValueSlot slot = AggValueSlot::forLValue(
- dest, *this, AggValueSlot::IsNotDestructed,
+ dest, AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap,
Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
index 0cbace7b7f7b..066139b1c78c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
@@ -36,7 +36,8 @@ CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
: Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
NoEscape(false), HasCXXObject(false), UsesStret(false),
HasCapturedVariableLayout(false), CapturesNonExternalType(false),
- LocalAddress(Address::invalid()), StructureType(nullptr), Block(block) {
+ LocalAddress(RawAddress::invalid()), StructureType(nullptr),
+ Block(block) {
// Skip asm prefix, if any. 'name' is usually taken directly from
// the mangled name of the enclosing function.
@@ -120,11 +121,15 @@ static std::string getBlockDescriptorName(const CGBlockInfo &BlockInfo,
Name += "_";
}
- std::string TypeAtEncoding =
- CGM.getContext().getObjCEncodingForBlock(BlockInfo.getBlockExpr());
- /// Replace occurrences of '@' with '\1'. '@' is reserved on ELF platforms as
- /// a separator between symbol name and symbol version.
- std::replace(TypeAtEncoding.begin(), TypeAtEncoding.end(), '@', '\1');
+ std::string TypeAtEncoding;
+
+ if (!CGM.getCodeGenOpts().DisableBlockSignatureString) {
+ TypeAtEncoding =
+ CGM.getContext().getObjCEncodingForBlock(BlockInfo.getBlockExpr());
+ /// Replace occurrences of '@' with '\1'. '@' is reserved on ELF platforms
+ /// as a separator between symbol name and symbol version.
+ std::replace(TypeAtEncoding.begin(), TypeAtEncoding.end(), '@', '\1');
+ }
Name += "e" + llvm::to_string(TypeAtEncoding.size()) + "_" + TypeAtEncoding;
Name += "l" + CGM.getObjCRuntime().getRCBlockLayoutStr(CGM, BlockInfo);
return Name;
@@ -200,9 +205,13 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
}
// Signature. Mandatory ObjC-style method descriptor @encode sequence.
- std::string typeAtEncoding =
- CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
- elements.add(CGM.GetAddrOfConstantCString(typeAtEncoding).getPointer());
+ if (CGM.getCodeGenOpts().DisableBlockSignatureString) {
+ elements.addNullPointer(i8p);
+ } else {
+ std::string typeAtEncoding =
+ CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
+ elements.add(CGM.GetAddrOfConstantCString(typeAtEncoding).getPointer());
+ }
// GC layout.
if (C.getLangOpts().ObjC) {
@@ -576,7 +585,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// First, 'this'.
if (block->capturesCXXThis()) {
- assert(CGF && CGF->CurFuncDecl && isa<CXXMethodDecl>(CGF->CurFuncDecl) &&
+ assert(CGF && isa_and_nonnull<CXXMethodDecl>(CGF->CurFuncDecl) &&
"Can't capture 'this' outside a method");
QualType thisType = cast<CXXMethodDecl>(CGF->CurFuncDecl)->getThisType();
@@ -794,7 +803,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Otherwise, we have to emit this as a local block.
- Address blockAddr = blockInfo.LocalAddress;
+ RawAddress blockAddr = blockInfo.LocalAddress;
assert(blockAddr.isValid() && "block has no address!");
llvm::Constant *isa;
@@ -813,7 +822,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
descriptor = buildBlockDescriptor(CGM, blockInfo);
// Compute the initial on-stack block flags.
- flags = BLOCK_HAS_SIGNATURE;
+ if (!CGM.getCodeGenOpts().DisableBlockSignatureString)
+ flags = BLOCK_HAS_SIGNATURE;
if (blockInfo.HasCapturedVariableLayout)
flags |= BLOCK_HAS_EXTENDED_LAYOUT;
if (blockInfo.NeedsCopyDispose)
@@ -926,7 +936,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
/*RefersToEnclosingVariableOrCapture*/ CI.isNested(),
type.getNonReferenceType(), VK_LValue,
SourceLocation());
- src = EmitDeclRefLValue(&declRef).getAddress(*this);
+ src = EmitDeclRefLValue(&declRef).getAddress();
};
// For byrefs, we just write the pointer to the byref struct into
@@ -939,7 +949,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
if (CI.isNested())
byrefPointer = Builder.CreateLoad(src, "byref.capture");
else
- byrefPointer = src.getPointer();
+ byrefPointer = src.emitRawPointer(*this);
// Write that void* into the capture field.
Builder.CreateStore(byrefPointer, blockField);
@@ -961,10 +971,10 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
}
// If it's a reference variable, copy the reference into the block field.
- } else if (type->isReferenceType()) {
- Builder.CreateStore(src.getPointer(), blockField);
+ } else if (type->getAs<ReferenceType>()) {
+ Builder.CreateStore(src.emitRawPointer(*this), blockField);
- // If type is const-qualified, copy the value into the block field.
+ // If type is const-qualified, copy the value into the block field.
} else if (type.isConstQualified() &&
type.getObjCLifetime() == Qualifiers::OCL_Strong &&
CGM.getCodeGenOpts().OptimizationLevel != 0) {
@@ -1299,7 +1309,9 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
fields.add(CGM.getNSConcreteGlobalBlock());
// __flags
- BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE;
+ BlockFlags flags = BLOCK_IS_GLOBAL;
+ if (!CGM.getCodeGenOpts().DisableBlockSignatureString)
+ flags |= BLOCK_HAS_SIGNATURE;
if (blockInfo.UsesStret)
flags |= BLOCK_USE_STRET;
@@ -1377,7 +1389,7 @@ void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
// Allocate a stack slot like for any local variable to guarantee optimal
// debug info at -O0. The mem2reg pass will eliminate it when optimizing.
- Address alloc = CreateMemTemp(D->getType(), D->getName() + ".addr");
+ RawAddress alloc = CreateMemTemp(D->getType(), D->getName() + ".addr");
Builder.CreateStore(arg, alloc);
if (CGDebugInfo *DI = getDebugInfo()) {
if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
@@ -1446,7 +1458,7 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction(
selfTy = getContext().getPointerType(getContext().getAddrSpaceQualType(
getContext().VoidTy, LangAS::opencl_generic));
- IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
+ const IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
ImplicitParamDecl SelfDecl(getContext(), const_cast<BlockDecl *>(blockDecl),
SourceLocation(), II, selfTy,
@@ -1497,7 +1509,7 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction(
// frame setup instruction by llvm::DwarfDebug::beginFunction().
auto NL = ApplyDebugLocation::CreateEmpty(*this);
Builder.CreateStore(BlockPointer, Alloca);
- BlockPointerDbgLoc = Alloca.getPointer();
+ BlockPointerDbgLoc = Alloca.emitRawPointer(*this);
}
// If we have a C++ 'this' reference, go ahead and force it into
@@ -1540,7 +1552,10 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction(
llvm::BasicBlock *resume = Builder.GetInsertBlock();
// Go back to the entry.
- ++entry_ptr;
+ if (entry_ptr->getNextNonDebugInstruction())
+ entry_ptr = entry_ptr->getNextNonDebugInstruction()->getIterator();
+ else
+ entry_ptr = entry->end();
Builder.SetInsertPoint(entry, entry_ptr);
// Emit debug information for all the DeclRefExprs.
@@ -1554,8 +1569,8 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction(
const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
if (capture.isConstant()) {
auto addr = LocalDeclMap.find(variable)->second;
- (void)DI->EmitDeclareOfAutoVariable(variable, addr.getPointer(),
- Builder);
+ (void)DI->EmitDeclareOfAutoVariable(
+ variable, addr.emitRawPointer(*this), Builder);
continue;
}
@@ -1659,7 +1674,7 @@ struct CallBlockRelease final : EHScopeStack::Cleanup {
if (LoadBlockVarAddr) {
BlockVarAddr = CGF.Builder.CreateLoad(Addr);
} else {
- BlockVarAddr = Addr.getPointer();
+ BlockVarAddr = Addr.emitRawPointer(CGF);
}
CGF.BuildBlockRelease(BlockVarAddr, FieldFlags, CanThrow);
@@ -1959,13 +1974,15 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// it. It's not quite worth the annoyance to avoid creating it in the
// first place.
if (!needsEHCleanup(captureType.isDestructedType()))
- cast<llvm::Instruction>(dstField.getPointer())->eraseFromParent();
+ if (auto *I =
+ cast_or_null<llvm::Instruction>(dstField.getBasePointer()))
+ I->eraseFromParent();
}
break;
}
case BlockCaptureEntityKind::BlockObject: {
llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
- llvm::Value *dstAddr = dstField.getPointer();
+ llvm::Value *dstAddr = dstField.emitRawPointer(*this);
llvm::Value *args[] = {
dstAddr, srcValue, llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
};
@@ -2136,7 +2153,7 @@ public:
llvm::Value *flagsVal = llvm::ConstantInt::get(CGF.Int32Ty, flags);
llvm::FunctionCallee fn = CGF.CGM.getBlockObjectAssign();
- llvm::Value *args[] = { destField.getPointer(), srcValue, flagsVal };
+ llvm::Value *args[] = {destField.emitRawPointer(CGF), srcValue, flagsVal};
CGF.EmitNounwindRuntimeCall(fn, args);
}
@@ -2693,7 +2710,8 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
storeHeaderField(V, getPointerSize(), "byref.isa");
// Store the address of the variable into its own forwarding pointer.
- storeHeaderField(addr.getPointer(), getPointerSize(), "byref.forwarding");
+ storeHeaderField(addr.emitRawPointer(*this), getPointerSize(),
+ "byref.forwarding");
// Blocks ABI:
// c) the flags field is set to either 0 if no helper functions are
@@ -2784,7 +2802,7 @@ static void configureBlocksRuntimeObject(CodeGenModule &CGM,
auto *GV = cast<llvm::GlobalValue>(C->stripPointerCasts());
if (CGM.getTarget().getTriple().isOSBinFormatCOFF()) {
- IdentifierInfo &II = CGM.getContext().Idents.get(C->getName());
+ const IdentifierInfo &II = CGM.getContext().Idents.get(C->getName());
TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
index 4ef1ae9f3365..8d10c4f69b20 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
@@ -271,7 +271,8 @@ public:
/// The block's captures. Non-constant captures are sorted by their offsets.
llvm::SmallVector<Capture, 4> SortedCaptures;
- Address LocalAddress;
+ // Currently we assume that block-pointer types are never signed.
+ RawAddress LocalAddress;
llvm::StructType *StructureType;
const BlockDecl *Block;
const BlockExpr *BlockExpression;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
index bf5ab171d720..5d59d5a4ae2c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
@@ -10,7 +10,9 @@
#define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
#include "Address.h"
+#include "CGValue.h"
#include "CodeGenTypeCache.h"
+#include "llvm/Analysis/Utils/Local.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Type.h"
@@ -18,19 +20,21 @@
namespace clang {
namespace CodeGen {
+class CGBuilderTy;
class CodeGenFunction;
/// This is an IRBuilder insertion helper that forwards to
/// CodeGenFunction::InsertHelper, which adds necessary metadata to
/// instructions.
class CGBuilderInserter final : public llvm::IRBuilderDefaultInserter {
+ friend CGBuilderTy;
+
public:
CGBuilderInserter() = default;
explicit CGBuilderInserter(CodeGenFunction *CGF) : CGF(CGF) {}
/// This forwards to CodeGenFunction::InsertHelper.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
- llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const override;
private:
@@ -43,10 +47,42 @@ typedef llvm::IRBuilder<llvm::ConstantFolder, CGBuilderInserterTy>
CGBuilderBaseTy;
class CGBuilderTy : public CGBuilderBaseTy {
+ friend class Address;
+
/// Storing a reference to the type cache here makes it a lot easier
/// to build natural-feeling, target-specific IR.
const CodeGenTypeCache &TypeCache;
+ CodeGenFunction *getCGF() const { return getInserter().CGF; }
+
+ llvm::Value *emitRawPointerFromAddress(Address Addr) const {
+ return Addr.getBasePointer();
+ }
+
+ template <bool IsInBounds>
+ Address createConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1,
+ const llvm::Twine &Name) {
+ const llvm::DataLayout &DL = BB->getDataLayout();
+ llvm::GetElementPtrInst *GEP;
+ if (IsInBounds)
+ GEP = cast<llvm::GetElementPtrInst>(CreateConstInBoundsGEP2_32(
+ Addr.getElementType(), emitRawPointerFromAddress(Addr), Idx0, Idx1,
+ Name));
+ else
+ GEP = cast<llvm::GetElementPtrInst>(CreateConstGEP2_32(
+ Addr.getElementType(), emitRawPointerFromAddress(Addr), Idx0, Idx1,
+ Name));
+ llvm::APInt Offset(
+ DL.getIndexSizeInBits(Addr.getType()->getPointerAddressSpace()), 0,
+ /*isSigned=*/true);
+ if (!GEP->accumulateConstantOffset(DL, Offset))
+ llvm_unreachable("offset of GEP with constants is always computable");
+ return Address(GEP, GEP->getResultElementType(),
+ Addr.getAlignment().alignmentAtOffset(
+ CharUnits::fromQuantity(Offset.getSExtValue())),
+ IsInBounds ? Addr.isKnownNonNull() : NotKnownNonNull);
+ }
+
public:
CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::LLVMContext &C)
: CGBuilderBaseTy(C), TypeCache(TypeCache) {}
@@ -69,20 +105,22 @@ public:
// Note that we intentionally hide the CreateLoad APIs that don't
// take an alignment.
llvm::LoadInst *CreateLoad(Address Addr, const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(),
+ return CreateAlignedLoad(Addr.getElementType(),
+ emitRawPointerFromAddress(Addr),
Addr.getAlignment().getAsAlign(), Name);
}
llvm::LoadInst *CreateLoad(Address Addr, const char *Name) {
// This overload is required to prevent string literals from
// ending up in the IsVolatile overload.
- return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(),
+ return CreateAlignedLoad(Addr.getElementType(),
+ emitRawPointerFromAddress(Addr),
Addr.getAlignment().getAsAlign(), Name);
}
llvm::LoadInst *CreateLoad(Address Addr, bool IsVolatile,
const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(),
- Addr.getAlignment().getAsAlign(), IsVolatile,
- Name);
+ return CreateAlignedLoad(
+ Addr.getElementType(), emitRawPointerFromAddress(Addr),
+ Addr.getAlignment().getAsAlign(), IsVolatile, Name);
}
using CGBuilderBaseTy::CreateAlignedLoad;
@@ -96,7 +134,7 @@ public:
// take an alignment.
llvm::StoreInst *CreateStore(llvm::Value *Val, Address Addr,
bool IsVolatile = false) {
- return CreateAlignedStore(Val, Addr.getPointer(),
+ return CreateAlignedStore(Val, emitRawPointerFromAddress(Addr),
Addr.getAlignment().getAsAlign(), IsVolatile);
}
@@ -132,33 +170,41 @@ public:
llvm::AtomicOrdering FailureOrdering,
llvm::SyncScope::ID SSID = llvm::SyncScope::System) {
return CGBuilderBaseTy::CreateAtomicCmpXchg(
- Addr.getPointer(), Cmp, New, Addr.getAlignment().getAsAlign(),
- SuccessOrdering, FailureOrdering, SSID);
+ Addr.emitRawPointer(*getCGF()), Cmp, New,
+ Addr.getAlignment().getAsAlign(), SuccessOrdering, FailureOrdering,
+ SSID);
}
llvm::AtomicRMWInst *
CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val,
llvm::AtomicOrdering Ordering,
llvm::SyncScope::ID SSID = llvm::SyncScope::System) {
- return CGBuilderBaseTy::CreateAtomicRMW(Op, Addr.getPointer(), Val,
- Addr.getAlignment().getAsAlign(),
- Ordering, SSID);
+ return CGBuilderBaseTy::CreateAtomicRMW(
+ Op, Addr.emitRawPointer(*getCGF()), Val,
+ Addr.getAlignment().getAsAlign(), Ordering, SSID);
}
using CGBuilderBaseTy::CreateAddrSpaceCast;
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty,
+ llvm::Type *ElementTy,
const llvm::Twine &Name = "") {
- return Addr.withPointer(CreateAddrSpaceCast(Addr.getPointer(), Ty, Name),
- Addr.isKnownNonNull());
+ if (!Addr.hasOffset())
+ return Address(CreateAddrSpaceCast(Addr.getBasePointer(), Ty, Name),
+ ElementTy, Addr.getAlignment(), Addr.getPointerAuthInfo(),
+ /*Offset=*/nullptr, Addr.isKnownNonNull());
+ // Eagerly force a raw address if these is an offset.
+ return RawAddress(
+ CreateAddrSpaceCast(Addr.emitRawPointer(*getCGF()), Ty, Name),
+ ElementTy, Addr.getAlignment(), Addr.isKnownNonNull());
}
using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast;
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty,
llvm::Type *ElementTy,
const llvm::Twine &Name = "") {
- llvm::Value *Ptr =
- CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name);
- return Address(Ptr, ElementTy, Addr.getAlignment(), Addr.isKnownNonNull());
+ if (Addr.getType()->getAddressSpace() == Ty->getPointerAddressSpace())
+ return Addr.withElementType(ElementTy);
+ return CreateAddrSpaceCast(Addr, Ty, ElementTy, Name);
}
/// Given
@@ -172,14 +218,15 @@ public:
Address CreateStructGEP(Address Addr, unsigned Index,
const llvm::Twine &Name = "") {
llvm::StructType *ElTy = cast<llvm::StructType>(Addr.getElementType());
- const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ const llvm::DataLayout &DL = BB->getDataLayout();
const llvm::StructLayout *Layout = DL.getStructLayout(ElTy);
auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index));
- return Address(
- CreateStructGEP(Addr.getElementType(), Addr.getPointer(), Index, Name),
- ElTy->getElementType(Index),
- Addr.getAlignment().alignmentAtOffset(Offset), Addr.isKnownNonNull());
+ return Address(CreateStructGEP(Addr.getElementType(), Addr.getBasePointer(),
+ Index, Name),
+ ElTy->getElementType(Index),
+ Addr.getAlignment().alignmentAtOffset(Offset),
+ Addr.isKnownNonNull());
}
/// Given
@@ -193,12 +240,12 @@ public:
Address CreateConstArrayGEP(Address Addr, uint64_t Index,
const llvm::Twine &Name = "") {
llvm::ArrayType *ElTy = cast<llvm::ArrayType>(Addr.getElementType());
- const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ const llvm::DataLayout &DL = BB->getDataLayout();
CharUnits EltSize =
CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy->getElementType()));
return Address(
- CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
+ CreateInBoundsGEP(Addr.getElementType(), Addr.getBasePointer(),
{getSize(CharUnits::Zero()), getSize(Index)}, Name),
ElTy->getElementType(),
Addr.getAlignment().alignmentAtOffset(Index * EltSize),
@@ -213,13 +260,13 @@ public:
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index,
const llvm::Twine &Name = "") {
llvm::Type *ElTy = Addr.getElementType();
- const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ const llvm::DataLayout &DL = BB->getDataLayout();
CharUnits EltSize = CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy));
- return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
- getSize(Index), Name),
- ElTy, Addr.getAlignment().alignmentAtOffset(Index * EltSize),
- Addr.isKnownNonNull());
+ return Address(
+ CreateInBoundsGEP(ElTy, Addr.getBasePointer(), getSize(Index), Name),
+ ElTy, Addr.getAlignment().alignmentAtOffset(Index * EltSize),
+ Addr.isKnownNonNull());
}
/// Given
@@ -229,110 +276,133 @@ public:
/// where i64 is actually the target word size.
Address CreateConstGEP(Address Addr, uint64_t Index,
const llvm::Twine &Name = "") {
- const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
- CharUnits EltSize =
- CharUnits::fromQuantity(DL.getTypeAllocSize(Addr.getElementType()));
+ llvm::Type *ElTy = Addr.getElementType();
+ const llvm::DataLayout &DL = BB->getDataLayout();
+ CharUnits EltSize = CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy));
- return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
- getSize(Index), Name),
+ return Address(CreateGEP(ElTy, Addr.getBasePointer(), getSize(Index), Name),
Addr.getElementType(),
- Addr.getAlignment().alignmentAtOffset(Index * EltSize),
- NotKnownNonNull);
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
/// Create GEP with single dynamic index. The address alignment is reduced
/// according to the element size.
using CGBuilderBaseTy::CreateGEP;
- Address CreateGEP(Address Addr, llvm::Value *Index,
+ Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index,
const llvm::Twine &Name = "") {
- const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ const llvm::DataLayout &DL = BB->getDataLayout();
CharUnits EltSize =
CharUnits::fromQuantity(DL.getTypeAllocSize(Addr.getElementType()));
return Address(
- CreateGEP(Addr.getElementType(), Addr.getPointer(), Index, Name),
+ CreateGEP(Addr.getElementType(), Addr.emitRawPointer(CGF), Index, Name),
Addr.getElementType(),
- Addr.getAlignment().alignmentOfArrayElement(EltSize), NotKnownNonNull);
+ Addr.getAlignment().alignmentOfArrayElement(EltSize));
}
/// Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Addr.getElementType() == TypeCache.Int8Ty);
- return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
- getSize(Offset), Name),
- Addr.getElementType(),
- Addr.getAlignment().alignmentAtOffset(Offset),
- Addr.isKnownNonNull());
+ return Address(
+ CreateInBoundsGEP(Addr.getElementType(), Addr.getBasePointer(),
+ getSize(Offset), Name),
+ Addr.getElementType(), Addr.getAlignment().alignmentAtOffset(Offset),
+ Addr.isKnownNonNull());
}
+
Address CreateConstByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Addr.getElementType() == TypeCache.Int8Ty);
- return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
+ return Address(CreateGEP(Addr.getElementType(), Addr.getBasePointer(),
getSize(Offset), Name),
Addr.getElementType(),
- Addr.getAlignment().alignmentAtOffset(Offset),
- NotKnownNonNull);
+ Addr.getAlignment().alignmentAtOffset(Offset));
}
using CGBuilderBaseTy::CreateConstInBoundsGEP2_32;
Address CreateConstInBoundsGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1,
const llvm::Twine &Name = "") {
- const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ return createConstGEP2_32<true>(Addr, Idx0, Idx1, Name);
+ }
- auto *GEP = cast<llvm::GetElementPtrInst>(CreateConstInBoundsGEP2_32(
- Addr.getElementType(), Addr.getPointer(), Idx0, Idx1, Name));
- llvm::APInt Offset(
- DL.getIndexSizeInBits(Addr.getType()->getPointerAddressSpace()), 0,
- /*isSigned=*/true);
- if (!GEP->accumulateConstantOffset(DL, Offset))
- llvm_unreachable("offset of GEP with constants is always computable");
- return Address(GEP, GEP->getResultElementType(),
- Addr.getAlignment().alignmentAtOffset(
- CharUnits::fromQuantity(Offset.getSExtValue())),
- Addr.isKnownNonNull());
+ using CGBuilderBaseTy::CreateConstGEP2_32;
+ Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1,
+ const llvm::Twine &Name = "") {
+ return createConstGEP2_32<false>(Addr, Idx0, Idx1, Name);
+ }
+
+ Address CreateGEP(Address Addr, ArrayRef<llvm::Value *> IdxList,
+ llvm::Type *ElementType, CharUnits Align,
+ const Twine &Name = "") {
+ llvm::Value *Ptr = emitRawPointerFromAddress(Addr);
+ return RawAddress(CreateGEP(Addr.getElementType(), Ptr, IdxList, Name),
+ ElementType, Align);
+ }
+
+ using CGBuilderBaseTy::CreateInBoundsGEP;
+ Address CreateInBoundsGEP(Address Addr, ArrayRef<llvm::Value *> IdxList,
+ llvm::Type *ElementType, CharUnits Align,
+ const Twine &Name = "") {
+ return RawAddress(CreateInBoundsGEP(Addr.getElementType(),
+ emitRawPointerFromAddress(Addr),
+ IdxList, Name),
+ ElementType, Align, Addr.isKnownNonNull());
+ }
+
+ using CGBuilderBaseTy::CreateIsNull;
+ llvm::Value *CreateIsNull(Address Addr, const Twine &Name = "") {
+ if (!Addr.hasOffset())
+ return CreateIsNull(Addr.getBasePointer(), Name);
+ // The pointer isn't null if Addr has an offset since offsets can always
+ // be applied inbound.
+ return llvm::ConstantInt::getFalse(Context);
}
using CGBuilderBaseTy::CreateMemCpy;
llvm::CallInst *CreateMemCpy(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
- return CreateMemCpy(Dest.getPointer(), Dest.getAlignment().getAsAlign(),
- Src.getPointer(), Src.getAlignment().getAsAlign(), Size,
- IsVolatile);
+ llvm::Value *DestPtr = emitRawPointerFromAddress(Dest);
+ llvm::Value *SrcPtr = emitRawPointerFromAddress(Src);
+ return CreateMemCpy(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr,
+ Src.getAlignment().getAsAlign(), Size, IsVolatile);
}
llvm::CallInst *CreateMemCpy(Address Dest, Address Src, uint64_t Size,
bool IsVolatile = false) {
- return CreateMemCpy(Dest.getPointer(), Dest.getAlignment().getAsAlign(),
- Src.getPointer(), Src.getAlignment().getAsAlign(), Size,
- IsVolatile);
+ llvm::Value *DestPtr = emitRawPointerFromAddress(Dest);
+ llvm::Value *SrcPtr = emitRawPointerFromAddress(Src);
+ return CreateMemCpy(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr,
+ Src.getAlignment().getAsAlign(), Size, IsVolatile);
}
using CGBuilderBaseTy::CreateMemCpyInline;
llvm::CallInst *CreateMemCpyInline(Address Dest, Address Src, uint64_t Size) {
- return CreateMemCpyInline(
- Dest.getPointer(), Dest.getAlignment().getAsAlign(), Src.getPointer(),
- Src.getAlignment().getAsAlign(), getInt64(Size));
+ llvm::Value *DestPtr = emitRawPointerFromAddress(Dest);
+ llvm::Value *SrcPtr = emitRawPointerFromAddress(Src);
+ return CreateMemCpyInline(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr,
+ Src.getAlignment().getAsAlign(), getInt64(Size));
}
using CGBuilderBaseTy::CreateMemMove;
llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
- return CreateMemMove(Dest.getPointer(), Dest.getAlignment().getAsAlign(),
- Src.getPointer(), Src.getAlignment().getAsAlign(),
- Size, IsVolatile);
+ llvm::Value *DestPtr = emitRawPointerFromAddress(Dest);
+ llvm::Value *SrcPtr = emitRawPointerFromAddress(Src);
+ return CreateMemMove(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr,
+ Src.getAlignment().getAsAlign(), Size, IsVolatile);
}
using CGBuilderBaseTy::CreateMemSet;
llvm::CallInst *CreateMemSet(Address Dest, llvm::Value *Value,
llvm::Value *Size, bool IsVolatile = false) {
- return CreateMemSet(Dest.getPointer(), Value, Size,
+ return CreateMemSet(emitRawPointerFromAddress(Dest), Value, Size,
Dest.getAlignment().getAsAlign(), IsVolatile);
}
using CGBuilderBaseTy::CreateMemSetInline;
llvm::CallInst *CreateMemSetInline(Address Dest, llvm::Value *Value,
uint64_t Size) {
- return CreateMemSetInline(Dest.getPointer(),
+ return CreateMemSetInline(emitRawPointerFromAddress(Dest),
Dest.getAlignment().getAsAlign(), Value,
getInt64(Size));
}
@@ -342,20 +412,35 @@ public:
unsigned FieldIndex,
llvm::MDNode *DbgInfo) {
llvm::StructType *ElTy = cast<llvm::StructType>(Addr.getElementType());
- const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ const llvm::DataLayout &DL = BB->getDataLayout();
const llvm::StructLayout *Layout = DL.getStructLayout(ElTy);
auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index));
- return Address(CreatePreserveStructAccessIndex(ElTy, Addr.getPointer(),
- Index, FieldIndex, DbgInfo),
- ElTy->getElementType(Index),
- Addr.getAlignment().alignmentAtOffset(Offset));
+ return Address(
+ CreatePreserveStructAccessIndex(ElTy, emitRawPointerFromAddress(Addr),
+ Index, FieldIndex, DbgInfo),
+ ElTy->getElementType(Index),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
+
+ using CGBuilderBaseTy::CreatePreserveUnionAccessIndex;
+ Address CreatePreserveUnionAccessIndex(Address Addr, unsigned FieldIndex,
+ llvm::MDNode *DbgInfo) {
+ Addr.replaceBasePointer(CreatePreserveUnionAccessIndex(
+ Addr.getBasePointer(), FieldIndex, DbgInfo));
+ return Addr;
}
using CGBuilderBaseTy::CreateLaunderInvariantGroup;
Address CreateLaunderInvariantGroup(Address Addr) {
- return Addr.withPointer(CreateLaunderInvariantGroup(Addr.getPointer()),
- Addr.isKnownNonNull());
+ Addr.replaceBasePointer(CreateLaunderInvariantGroup(Addr.getBasePointer()));
+ return Addr;
+ }
+
+ using CGBuilderBaseTy::CreateStripInvariantGroup;
+ Address CreateStripInvariantGroup(Address Addr) {
+ Addr.replaceBasePointer(CreateStripInvariantGroup(Addr.getBasePointer()));
+ return Addr;
}
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
index a4f26a6f0eb1..86d47054615e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
@@ -13,6 +13,7 @@
#include "ABIInfo.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
+#include "CGHLSLRuntime.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
#include "CGRecordLayout.h"
@@ -44,6 +45,7 @@
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsARM.h"
#include "llvm/IR/IntrinsicsBPF.h"
+#include "llvm/IR/IntrinsicsDirectX.h"
#include "llvm/IR/IntrinsicsHexagon.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/IntrinsicsPowerPC.h"
@@ -55,6 +57,7 @@
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/MatrixBuilder.h"
+#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ScopedPrinter.h"
@@ -511,8 +514,8 @@ static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
if (CGF.Builder.getIsFPConstrained()) {
- CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
} else {
@@ -528,8 +531,8 @@ static Value *emitBinaryExpMaybeConstrainedFPBuiltin(
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
if (CGF.Builder.getIsFPConstrained()) {
- CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
{Src0->getType(), Src1->getType()});
return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
@@ -549,8 +552,8 @@ static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
if (CGF.Builder.getIsFPConstrained()) {
- CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
} else {
@@ -578,38 +581,19 @@ static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
return CGF.Builder.CreateCall(F, Args);
}
-// Emit a simple mangled intrinsic that has 1 argument and a return type
-// matching the argument type.
-static Value *emitUnaryBuiltin(CodeGenFunction &CGF, const CallExpr *E,
- unsigned IntrinsicID,
- llvm::StringRef Name = "") {
- llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
-
- Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, Src0, Name);
-}
-
-// Emit an intrinsic that has 2 operands of the same type as its result.
-static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
- unsigned IntrinsicID) {
- llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
- llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
-
- Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, { Src0, Src1 });
-}
-
-// Emit an intrinsic that has 3 operands of the same type as its result.
-static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
- unsigned IntrinsicID) {
- llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
- llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
- llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
-
- Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
+// Emit a simple intrinsic that has N scalar arguments and a return type
+// matching the argument type. It is assumed that only the first argument is
+// overloaded.
+template <unsigned N>
+Value *emitBuiltinWithOneOverloadedType(CodeGenFunction &CGF, const CallExpr *E,
+ unsigned IntrinsicID,
+ llvm::StringRef Name = "") {
+ static_assert(N, "expect non-empty argument");
+ SmallVector<Value *, N> Args;
+ for (unsigned I = 0; I < N; ++I)
+ Args.push_back(CGF.EmitScalarExpr(E->getArg(I)));
+ Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Args[0]->getType());
+ return CGF.Builder.CreateCall(F, Args, Name);
}
// Emit an intrinsic that has 1 float or double operand, and 1 integer.
@@ -702,8 +686,36 @@ static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
const CallExpr *E, llvm::Constant *calleeValue) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
- return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
+ RValue Call =
+ CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
+
+ // Check the supported intrinsic.
+ if (unsigned BuiltinID = FD->getBuiltinID()) {
+ auto IsErrnoIntrinsic = [&]() -> unsigned {
+ switch (BuiltinID) {
+ case Builtin::BIexpf:
+ case Builtin::BI__builtin_expf:
+ case Builtin::BI__builtin_expf128:
+ return true;
+ }
+ // TODO: support more FP math libcalls
+ return false;
+ }();
+
+ // Restrict to target with errno, for example, MacOS doesn't set errno.
+ if (IsErrnoIntrinsic && CGF.CGM.getLangOpts().MathErrno &&
+ !CGF.Builder.getIsFPConstrained()) {
+ ASTContext &Context = CGF.getContext();
+ // Emit "int" TBAA metadata on FP math libcalls.
+ clang::QualType IntTy = Context.IntTy;
+ TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(IntTy);
+ Instruction *Inst = cast<llvm::Instruction>(Call.getScalarVal());
+ CGF.CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
+ }
+ }
+ return Call;
}
/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
@@ -730,17 +742,14 @@ static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
return CGF.Builder.CreateExtractValue(Tmp, 0);
}
-static Value *emitRangedBuiltin(CodeGenFunction &CGF,
- unsigned IntrinsicID,
+static Value *emitRangedBuiltin(CodeGenFunction &CGF, unsigned IntrinsicID,
int low, int high) {
- llvm::MDBuilder MDHelper(CGF.getLLVMContext());
- llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
- Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
- llvm::Instruction *Call = CGF.Builder.CreateCall(F);
- Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
- Call->setMetadata(llvm::LLVMContext::MD_noundef,
- llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
- return Call;
+ Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
+ llvm::CallInst *Call = CGF.Builder.CreateCall(F);
+ llvm::ConstantRange CR(APInt(32, low), APInt(32, high));
+ Call->addRangeRetAttr(CR);
+ Call->addRetAttr(llvm::Attribute::AttrKind::NoUndef);
+ return Call;
}
namespace {
@@ -791,7 +800,8 @@ EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
- return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
+ return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
+ ArgValue);
}
/// Checks if using the result of __builtin_object_size(p, @p From) in place of
@@ -819,33 +829,37 @@ CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
}
-const FieldDecl *CodeGenFunction::FindFlexibleArrayMemberField(
- ASTContext &Ctx, const RecordDecl *RD, StringRef Name, uint64_t &Offset) {
+const FieldDecl *CodeGenFunction::FindFlexibleArrayMemberFieldAndOffset(
+ ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FAMDecl,
+ uint64_t &Offset) {
const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
getLangOpts().getStrictFlexArraysLevel();
- unsigned FieldNo = 0;
- bool IsUnion = RD->isUnion();
+ uint32_t FieldNo = 0;
+
+ if (RD->isImplicit())
+ return nullptr;
- for (const Decl *D : RD->decls()) {
- if (const auto *Field = dyn_cast<FieldDecl>(D);
- Field && (Name.empty() || Field->getNameAsString() == Name) &&
+ for (const FieldDecl *FD : RD->fields()) {
+ if ((!FAMDecl || FD == FAMDecl) &&
Decl::isFlexibleArrayMemberLike(
- Ctx, Field, Field->getType(), StrictFlexArraysLevel,
+ Ctx, FD, FD->getType(), StrictFlexArraysLevel,
/*IgnoreTemplateOrMacroSubstitution=*/true)) {
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
Offset += Layout.getFieldOffset(FieldNo);
- return Field;
+ return FD;
}
- if (const auto *Record = dyn_cast<RecordDecl>(D))
- if (const FieldDecl *Field =
- FindFlexibleArrayMemberField(Ctx, Record, Name, Offset)) {
+ QualType Ty = FD->getType();
+ if (Ty->isRecordType()) {
+ if (const FieldDecl *Field = FindFlexibleArrayMemberFieldAndOffset(
+ Ctx, Ty->getAsRecordDecl(), FAMDecl, Offset)) {
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
Offset += Layout.getFieldOffset(FieldNo);
return Field;
}
+ }
- if (!IsUnion && isa<FieldDecl>(D))
+ if (!RD->isUnion())
++FieldNo;
}
@@ -855,14 +869,13 @@ const FieldDecl *CodeGenFunction::FindFlexibleArrayMemberField(
static unsigned CountCountedByAttrs(const RecordDecl *RD) {
unsigned Num = 0;
- for (const Decl *D : RD->decls()) {
- if (const auto *FD = dyn_cast<FieldDecl>(D);
- FD && FD->hasAttr<CountedByAttr>()) {
+ for (const FieldDecl *FD : RD->fields()) {
+ if (FD->getType()->isCountAttributedType())
return ++Num;
- }
- if (const auto *Rec = dyn_cast<RecordDecl>(D))
- Num += CountCountedByAttrs(Rec);
+ QualType Ty = FD->getType();
+ if (Ty->isRecordType())
+ Num += CountCountedByAttrs(Ty->getAsRecordDecl());
}
return Num;
@@ -925,12 +938,14 @@ CodeGenFunction::emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
// Get the flexible array member Decl.
const RecordDecl *OuterRD = nullptr;
- std::string FAMName;
+ const FieldDecl *FAMDecl = nullptr;
if (const auto *ME = dyn_cast<MemberExpr>(Base)) {
// Check if \p Base is referencing the FAM itself.
const ValueDecl *VD = ME->getMemberDecl();
OuterRD = VD->getDeclContext()->getOuterLexicalRecordContext();
- FAMName = VD->getNameAsString();
+ FAMDecl = dyn_cast<FieldDecl>(VD);
+ if (!FAMDecl)
+ return nullptr;
} else if (const auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
// Check if we're pointing to the whole struct.
QualType Ty = DRE->getDecl()->getType();
@@ -955,7 +970,7 @@ CodeGenFunction::emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
// };
// };
//
- // We don't konw which 'count' to use in this scenario:
+ // We don't know which 'count' to use in this scenario:
//
// size_t get_size(struct union_of_fams *p) {
// return __builtin_dynamic_object_size(p, 1);
@@ -969,12 +984,14 @@ CodeGenFunction::emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
if (!OuterRD)
return nullptr;
+ // We call FindFlexibleArrayMemberAndOffset even if FAMDecl is non-null to
+ // get its offset.
uint64_t Offset = 0;
- const FieldDecl *FAMDecl =
- FindFlexibleArrayMemberField(Ctx, OuterRD, FAMName, Offset);
+ FAMDecl =
+ FindFlexibleArrayMemberFieldAndOffset(Ctx, OuterRD, FAMDecl, Offset);
Offset = Ctx.toCharUnitsFromBits(Offset).getQuantity();
- if (!FAMDecl || !FAMDecl->hasAttr<CountedByAttr>())
+ if (!FAMDecl || !FAMDecl->getType()->isCountAttributedType())
// No flexible array member found or it doesn't have the "counted_by"
// attribute.
return nullptr;
@@ -984,6 +1001,24 @@ CodeGenFunction::emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
// Can't find the field referenced by the "counted_by" attribute.
return nullptr;
+ if (isa<DeclRefExpr>(Base))
+ // The whole struct is specificed in the __bdos. The calculation of the
+ // whole size of the structure can be done in two ways:
+ //
+ // 1) sizeof(struct S) + count * sizeof(typeof(fam))
+ // 2) offsetof(struct S, fam) + count * sizeof(typeof(fam))
+ //
+ // The first will add additional padding after the end of the array,
+ // allocation while the second method is more precise, but not quite
+ // expected from programmers. See
+ // https://lore.kernel.org/lkml/ZvV6X5FPBBW7CO1f@archlinux/ for a
+ // discussion of the topic.
+ //
+ // GCC isn't (currently) able to calculate __bdos on a pointer to the whole
+ // structure. Therefore, because of the above issue, we'll choose to match
+ // what GCC does for consistency's sake.
+ return nullptr;
+
// Build a load of the counted_by field.
bool IsSigned = CountedByFD->getType()->isSignedIntegerType();
Value *CountedByInst = EmitCountedByFieldExpr(Base, FAMDecl, CountedByFD);
@@ -1014,32 +1049,9 @@ CodeGenFunction::emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
CharUnits Size = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
llvm::Constant *ElemSize =
llvm::ConstantInt::get(ResType, Size.getQuantity(), IsSigned);
- Value *FAMSize =
+ Value *Res =
Builder.CreateMul(CountedByInst, ElemSize, "", !IsSigned, IsSigned);
- FAMSize = Builder.CreateIntCast(FAMSize, ResType, IsSigned);
- Value *Res = FAMSize;
-
- if (isa<DeclRefExpr>(Base)) {
- // The whole struct is specificed in the __bdos.
- const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(OuterRD);
-
- // Get the offset of the FAM.
- llvm::Constant *FAMOffset = ConstantInt::get(ResType, Offset, IsSigned);
- Value *OffsetAndFAMSize =
- Builder.CreateAdd(FAMOffset, Res, "", !IsSigned, IsSigned);
-
- // Get the full size of the struct.
- llvm::Constant *SizeofStruct =
- ConstantInt::get(ResType, Layout.getSize().getQuantity(), IsSigned);
-
- // max(sizeof(struct s),
- // offsetof(struct s, array) + p->count * sizeof(*p->array))
- Res = IsSigned
- ? Builder.CreateBinaryIntrinsic(llvm::Intrinsic::smax,
- OffsetAndFAMSize, SizeofStruct)
- : Builder.CreateBinaryIntrinsic(llvm::Intrinsic::umax,
- OffsetAndFAMSize, SizeofStruct);
- }
+ Res = Builder.CreateIntCast(Res, ResType, IsSigned);
// A negative \p IdxInst or \p CountedByInst means that the index lands
// outside of the flexible array member. If that's the case, we want to
@@ -1129,6 +1141,7 @@ struct BitTest {
static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
};
+
} // namespace
BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
@@ -2115,9 +2128,9 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
auto AL = ApplyDebugLocation::CreateArtificial(*this);
CharUnits Offset;
- Address BufAddr =
- Address(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Int8Ty,
- BufferAlignment);
+ Address BufAddr = makeNaturalAddressForPointer(
+ Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
+ BufferAlignment);
Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
@@ -2160,7 +2173,7 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
// Ignore argument 1, the format string. It is not currently used.
CallArgList Args;
- Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
+ Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
for (const auto &Item : Layout.Items) {
int Size = Item.getSizeByte();
@@ -2200,8 +2213,8 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
if (!isa<Constant>(ArgVal)) {
CleanupKind Cleanup = getARCCleanupKind();
QualType Ty = TheExpr->getType();
- Address Alloca = Address::invalid();
- Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
+ RawAddress Alloca = RawAddress::invalid();
+ RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
ArgVal = EmitARCRetain(Ty, ArgVal);
Builder.CreateStore(ArgVal, Addr);
pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
@@ -2234,7 +2247,7 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
Layout, BufAddr.getAlignment());
EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
- return RValue::get(BufAddr.getPointer());
+ return RValue::get(BufAddr, *this);
}
static bool isSpecialUnsignedMultiplySignedResult(
@@ -2566,7 +2579,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
if (OP.hasMathErrnoOverride())
ErrnoOverriden = OP.getMathErrnoOverride();
}
- // True if 'atttibute__((optnone)) is used. This attibute overrides
+ // True if 'attribute__((optnone))' is used. This attribute overrides
// fast-math which implies math-errno.
bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
@@ -2596,6 +2609,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fma:
case Builtin::BI__builtin_fmaf:
case Builtin::BI__builtin_fmal:
+ case Builtin::BI__builtin_fmaf16:
case Builtin::BIfma:
case Builtin::BIfmaf:
case Builtin::BIfmal: {
@@ -2648,6 +2662,39 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
if (GenerateIntrinsics) {
switch (BuiltinIDIfNoAsmLabel) {
+ case Builtin::BIacos:
+ case Builtin::BIacosf:
+ case Builtin::BIacosl:
+ case Builtin::BI__builtin_acos:
+ case Builtin::BI__builtin_acosf:
+ case Builtin::BI__builtin_acosf16:
+ case Builtin::BI__builtin_acosl:
+ case Builtin::BI__builtin_acosf128:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::acos, Intrinsic::experimental_constrained_acos));
+
+ case Builtin::BIasin:
+ case Builtin::BIasinf:
+ case Builtin::BIasinl:
+ case Builtin::BI__builtin_asin:
+ case Builtin::BI__builtin_asinf:
+ case Builtin::BI__builtin_asinf16:
+ case Builtin::BI__builtin_asinl:
+ case Builtin::BI__builtin_asinf128:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::asin, Intrinsic::experimental_constrained_asin));
+
+ case Builtin::BIatan:
+ case Builtin::BIatanf:
+ case Builtin::BIatanl:
+ case Builtin::BI__builtin_atan:
+ case Builtin::BI__builtin_atanf:
+ case Builtin::BI__builtin_atanf16:
+ case Builtin::BI__builtin_atanl:
+ case Builtin::BI__builtin_atanf128:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::atan, Intrinsic::experimental_constrained_atan));
+
case Builtin::BIceil:
case Builtin::BIceilf:
case Builtin::BIceill:
@@ -2668,7 +2715,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_copysignf16:
case Builtin::BI__builtin_copysignl:
case Builtin::BI__builtin_copysignf128:
- return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
+ return RValue::get(
+ emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
case Builtin::BIcos:
case Builtin::BIcosf:
@@ -2682,6 +2730,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Intrinsic::cos,
Intrinsic::experimental_constrained_cos));
+ case Builtin::BIcosh:
+ case Builtin::BIcoshf:
+ case Builtin::BIcoshl:
+ case Builtin::BI__builtin_cosh:
+ case Builtin::BI__builtin_coshf:
+ case Builtin::BI__builtin_coshf16:
+ case Builtin::BI__builtin_coshl:
+ case Builtin::BI__builtin_coshf128:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::cosh, Intrinsic::experimental_constrained_cosh));
+
case Builtin::BIexp:
case Builtin::BIexpf:
case Builtin::BIexpl:
@@ -2713,7 +2772,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// TODO: strictfp support
if (Builder.getIsFPConstrained())
break;
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp10));
+ return RValue::get(
+ emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::exp10));
}
case Builtin::BIfabs:
case Builtin::BIfabsf:
@@ -2723,7 +2783,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fabsf16:
case Builtin::BI__builtin_fabsl:
case Builtin::BI__builtin_fabsf128:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
+ return RValue::get(
+ emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs));
case Builtin::BIfloor:
case Builtin::BIfloorf:
@@ -2896,6 +2957,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Intrinsic::sin,
Intrinsic::experimental_constrained_sin));
+ case Builtin::BIsinh:
+ case Builtin::BIsinhf:
+ case Builtin::BIsinhl:
+ case Builtin::BI__builtin_sinh:
+ case Builtin::BI__builtin_sinhf:
+ case Builtin::BI__builtin_sinhf16:
+ case Builtin::BI__builtin_sinhl:
+ case Builtin::BI__builtin_sinhf128:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::sinh, Intrinsic::experimental_constrained_sinh));
+
case Builtin::BIsqrt:
case Builtin::BIsqrtf:
case Builtin::BIsqrtl:
@@ -2910,6 +2982,29 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
SetSqrtFPAccuracy(Call);
return RValue::get(Call);
}
+
+ case Builtin::BItan:
+ case Builtin::BItanf:
+ case Builtin::BItanl:
+ case Builtin::BI__builtin_tan:
+ case Builtin::BI__builtin_tanf:
+ case Builtin::BI__builtin_tanf16:
+ case Builtin::BI__builtin_tanl:
+ case Builtin::BI__builtin_tanf128:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
+
+ case Builtin::BItanh:
+ case Builtin::BItanhf:
+ case Builtin::BItanhl:
+ case Builtin::BI__builtin_tanh:
+ case Builtin::BI__builtin_tanhf:
+ case Builtin::BI__builtin_tanhf16:
+ case Builtin::BI__builtin_tanhl:
+ case Builtin::BI__builtin_tanhf128:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::tanh, Intrinsic::experimental_constrained_tanh));
+
case Builtin::BItrunc:
case Builtin::BItruncf:
case Builtin::BItruncl:
@@ -2982,7 +3077,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Check NonnullAttribute/NullabilityArg and Alignment.
auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
unsigned ParmNum) {
- Value *Val = A.getPointer();
+ Value *Val = A.emitRawPointer(*this);
EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
ParmNum);
@@ -3011,13 +3106,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_va_end:
EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
? EmitScalarExpr(E->getArg(0))
- : EmitVAListRef(E->getArg(0)).getPointer(),
+ : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
BuiltinID != Builtin::BI__builtin_va_end);
return RValue::get(nullptr);
case Builtin::BI__builtin_va_copy: {
- Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
- Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), {DstPtr, SrcPtr});
+ Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
+ Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
+ {DstPtr, SrcPtr});
return RValue::get(nullptr);
}
case Builtin::BIabs:
@@ -3127,36 +3223,66 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_ctzs:
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
- case Builtin::BI__builtin_ctzll: {
- Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
+ case Builtin::BI__builtin_ctzll:
+ case Builtin::BI__builtin_ctzg: {
+ bool HasFallback = BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg &&
+ E->getNumArgs() > 1;
+
+ Value *ArgValue =
+ HasFallback ? EmitScalarExpr(E->getArg(0))
+ : EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
+ Value *ZeroUndef =
+ Builder.getInt1(HasFallback || getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
- return RValue::get(Result);
+ Result =
+ Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
+ if (!HasFallback)
+ return RValue::get(Result);
+
+ Value *Zero = Constant::getNullValue(ArgType);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+ Value *FallbackValue = EmitScalarExpr(E->getArg(1));
+ Value *ResultOrFallback =
+ Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
+ return RValue::get(ResultOrFallback);
}
case Builtin::BI__builtin_clzs:
case Builtin::BI__builtin_clz:
case Builtin::BI__builtin_clzl:
- case Builtin::BI__builtin_clzll: {
- Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
+ case Builtin::BI__builtin_clzll:
+ case Builtin::BI__builtin_clzg: {
+ bool HasFallback = BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg &&
+ E->getNumArgs() > 1;
+
+ Value *ArgValue =
+ HasFallback ? EmitScalarExpr(E->getArg(0))
+ : EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
+ Value *ZeroUndef =
+ Builder.getInt1(HasFallback || getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
- return RValue::get(Result);
+ Result =
+ Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
+ if (!HasFallback)
+ return RValue::get(Result);
+
+ Value *Zero = Constant::getNullValue(ArgType);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+ Value *FallbackValue = EmitScalarExpr(E->getArg(1));
+ Value *ResultOrFallback =
+ Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
+ return RValue::get(ResultOrFallback);
}
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
@@ -3216,7 +3342,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__popcnt64:
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
- case Builtin::BI__builtin_popcountll: {
+ case Builtin::BI__builtin_popcountll:
+ case Builtin::BI__builtin_popcountg: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
@@ -3225,8 +3352,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
+ Result =
+ Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_unpredictable: {
@@ -3317,6 +3444,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
return RValue::get(nullptr);
}
+ case Builtin::BI__builtin_allow_runtime_check: {
+ StringRef Kind =
+ cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
+ LLVMContext &Ctx = CGM.getLLVMContext();
+ llvm::Value *Allow = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::allow_runtime_check),
+ llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
+ return RValue::get(Allow);
+ }
case Builtin::BI__arithmetic_fence: {
// Create the builtin call if FastMath is selected, and the target
// supports the builtin, otherwise just return the argument.
@@ -3353,13 +3489,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI_byteswap_ushort:
case Builtin::BI_byteswap_ulong:
case Builtin::BI_byteswap_uint64: {
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
+ return RValue::get(
+ emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
}
case Builtin::BI__builtin_bitreverse8:
case Builtin::BI__builtin_bitreverse16:
case Builtin::BI__builtin_bitreverse32:
case Builtin::BI__builtin_bitreverse64: {
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
+ return RValue::get(
+ emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
}
case Builtin::BI__builtin_rotateleft8:
case Builtin::BI__builtin_rotateleft16:
@@ -3443,6 +3581,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
return RValue::get(Builder.CreateCall(F));
}
+ case Builtin::BI__builtin_readsteadycounter: {
+ Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
+ return RValue::get(Builder.CreateCall(F));
+ }
case Builtin::BI__builtin___clear_cache: {
Value *Begin = EmitScalarExpr(E->getArg(0));
Value *End = EmitScalarExpr(E->getArg(1));
@@ -3452,6 +3594,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_trap:
EmitTrapCall(Intrinsic::trap);
return RValue::get(nullptr);
+ case Builtin::BI__builtin_verbose_trap: {
+ llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
+ if (getDebugInfo()) {
+ TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
+ TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()),
+ *E->getArg(1)->tryEvaluateString(getContext()));
+ }
+ ApplyDebugLocation ApplyTrapDI(*this, TrapLocation);
+ // Currently no attempt is made to prevent traps from being merged.
+ EmitTrapCall(Intrinsic::trap);
+ return RValue::get(nullptr);
+ }
case Builtin::BI__debugbreak:
EmitTrapCall(Intrinsic::debugtrap);
return RValue::get(nullptr);
@@ -3489,7 +3643,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// frexpl instead of legalizing this type in the BE.
if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Builtin::BI__builtin_frexp:
case Builtin::BI__builtin_frexpf:
@@ -3632,67 +3786,90 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Intrinsic::abs, EmitScalarExpr(E->getArg(0)),
Builder.getFalse(), nullptr, "elt.abs");
else
- Result = emitUnaryBuiltin(*this, E, llvm::Intrinsic::fabs, "elt.abs");
+ Result = emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::fabs, "elt.abs");
return RValue::get(Result);
}
-
+ case Builtin::BI__builtin_elementwise_acos:
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::acos, "elt.acos"));
+ case Builtin::BI__builtin_elementwise_asin:
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::asin, "elt.asin"));
+ case Builtin::BI__builtin_elementwise_atan:
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::atan, "elt.atan"));
case Builtin::BI__builtin_elementwise_ceil:
- return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::ceil, "elt.ceil"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::ceil, "elt.ceil"));
case Builtin::BI__builtin_elementwise_exp:
- return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::exp, "elt.exp"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::exp, "elt.exp"));
case Builtin::BI__builtin_elementwise_exp2:
- return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::exp2, "elt.exp2"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::exp2, "elt.exp2"));
case Builtin::BI__builtin_elementwise_log:
- return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::log, "elt.log"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::log, "elt.log"));
case Builtin::BI__builtin_elementwise_log2:
- return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::log2, "elt.log2"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::log2, "elt.log2"));
case Builtin::BI__builtin_elementwise_log10:
- return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::log10, "elt.log10"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::log10, "elt.log10"));
case Builtin::BI__builtin_elementwise_pow: {
- return RValue::get(emitBinaryBuiltin(*this, E, llvm::Intrinsic::pow));
+ return RValue::get(
+ emitBuiltinWithOneOverloadedType<2>(*this, E, llvm::Intrinsic::pow));
}
case Builtin::BI__builtin_elementwise_bitreverse:
- return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::bitreverse,
- "elt.bitreverse"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::bitreverse, "elt.bitreverse"));
case Builtin::BI__builtin_elementwise_cos:
- return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::cos, "elt.cos"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::cos, "elt.cos"));
+ case Builtin::BI__builtin_elementwise_cosh:
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::cosh, "elt.cosh"));
case Builtin::BI__builtin_elementwise_floor:
- return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::floor, "elt.floor"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::floor, "elt.floor"));
case Builtin::BI__builtin_elementwise_roundeven:
- return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::roundeven,
- "elt.roundeven"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::roundeven, "elt.roundeven"));
case Builtin::BI__builtin_elementwise_round:
- return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::round,
- "elt.round"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::round, "elt.round"));
case Builtin::BI__builtin_elementwise_rint:
- return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::rint,
- "elt.rint"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::rint, "elt.rint"));
case Builtin::BI__builtin_elementwise_nearbyint:
- return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::nearbyint,
- "elt.nearbyint"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::nearbyint, "elt.nearbyint"));
case Builtin::BI__builtin_elementwise_sin:
- return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::sin, "elt.sin"));
-
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::sin, "elt.sin"));
+ case Builtin::BI__builtin_elementwise_sinh:
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::sinh, "elt.sinh"));
+ case Builtin::BI__builtin_elementwise_tan:
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::tan, "elt.tan"));
+ case Builtin::BI__builtin_elementwise_tanh:
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::tanh, "elt.tanh"));
case Builtin::BI__builtin_elementwise_trunc:
- return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::trunc, "elt.trunc"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::trunc, "elt.trunc"));
case Builtin::BI__builtin_elementwise_canonicalize:
- return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::canonicalize, "elt.canonicalize"));
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
+ *this, E, llvm::Intrinsic::canonicalize, "elt.canonicalize"));
case Builtin::BI__builtin_elementwise_copysign:
- return RValue::get(emitBinaryBuiltin(*this, E, llvm::Intrinsic::copysign));
+ return RValue::get(emitBuiltinWithOneOverloadedType<2>(
+ *this, E, llvm::Intrinsic::copysign));
case Builtin::BI__builtin_elementwise_fma:
- return RValue::get(emitTernaryBuiltin(*this, E, llvm::Intrinsic::fma));
+ return RValue::get(
+ emitBuiltinWithOneOverloadedType<3>(*this, E, llvm::Intrinsic::fma));
case Builtin::BI__builtin_elementwise_add_sat:
case Builtin::BI__builtin_elementwise_sub_sat: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
@@ -3746,9 +3923,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_reduce_max: {
- auto GetIntrinsicID = [](QualType QT) {
+ auto GetIntrinsicID = [this](QualType QT) {
if (auto *VecTy = QT->getAs<VectorType>())
QT = VecTy->getElementType();
+ else if (QT->isSizelessVectorType())
+ QT = QT->getSizelessVectorEltType(CGM.getContext());
+
if (QT->isSignedIntegerType())
return llvm::Intrinsic::vector_reduce_smax;
if (QT->isUnsignedIntegerType())
@@ -3756,14 +3936,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
assert(QT->isFloatingType() && "must have a float here");
return llvm::Intrinsic::vector_reduce_fmax;
};
- return RValue::get(emitUnaryBuiltin(
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
*this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
}
case Builtin::BI__builtin_reduce_min: {
- auto GetIntrinsicID = [](QualType QT) {
+ auto GetIntrinsicID = [this](QualType QT) {
if (auto *VecTy = QT->getAs<VectorType>())
QT = VecTy->getElementType();
+ else if (QT->isSizelessVectorType())
+ QT = QT->getSizelessVectorEltType(CGM.getContext());
+
if (QT->isSignedIntegerType())
return llvm::Intrinsic::vector_reduce_smin;
if (QT->isUnsignedIntegerType())
@@ -3772,24 +3955,24 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return llvm::Intrinsic::vector_reduce_fmin;
};
- return RValue::get(emitUnaryBuiltin(
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
*this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
}
case Builtin::BI__builtin_reduce_add:
- return RValue::get(emitUnaryBuiltin(
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
*this, E, llvm::Intrinsic::vector_reduce_add, "rdx.add"));
case Builtin::BI__builtin_reduce_mul:
- return RValue::get(emitUnaryBuiltin(
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
*this, E, llvm::Intrinsic::vector_reduce_mul, "rdx.mul"));
case Builtin::BI__builtin_reduce_xor:
- return RValue::get(emitUnaryBuiltin(
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
*this, E, llvm::Intrinsic::vector_reduce_xor, "rdx.xor"));
case Builtin::BI__builtin_reduce_or:
- return RValue::get(emitUnaryBuiltin(
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
*this, E, llvm::Intrinsic::vector_reduce_or, "rdx.or"));
case Builtin::BI__builtin_reduce_and:
- return RValue::get(emitUnaryBuiltin(
+ return RValue::get(emitBuiltinWithOneOverloadedType<1>(
*this, E, llvm::Intrinsic::vector_reduce_and, "rdx.and"));
case Builtin::BI__builtin_matrix_transpose: {
@@ -3811,13 +3994,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
Address Src = EmitPointerWithAlignment(E->getArg(0));
- EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Src.emitRawPointer(*this)),
+ E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
+ 0);
Value *Result = MB.CreateColumnMajorLoad(
- Src.getElementType(), Src.getPointer(),
+ Src.getElementType(), Src.emitRawPointer(*this),
Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
- ResultTy->getNumRows(), ResultTy->getNumColumns(),
- "matrix");
+ ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
return RValue::get(Result);
}
@@ -3832,11 +4015,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
assert(PtrTy && "arg1 must be of pointer type");
bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
- EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
- E->getArg(1)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Dst.emitRawPointer(*this)),
+ E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
+ 0);
Value *Result = MB.CreateColumnMajorStore(
- Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
- Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
+ Matrix, Dst.emitRawPointer(*this),
+ Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
+ MatrixTy->getNumRows(), MatrixTy->getNumColumns());
return RValue::get(Result);
}
@@ -3995,7 +4180,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_bzero: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
return RValue::get(nullptr);
@@ -4006,12 +4191,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address Src = EmitPointerWithAlignment(E->getArg(0));
Address Dest = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(1)->getType(),
- E->getArg(1)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Src.emitRawPointer(*this)),
+ E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
+ 0);
+ EmitNonNullArgCheck(RValue::get(Dest.emitRawPointer(*this)),
+ E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
+ 0);
Builder.CreateMemMove(Dest, Src, SizeVal, false);
- return RValue::get(Dest.getPointer());
+ return RValue::get(nullptr);
}
case Builtin::BImemcpy:
@@ -4026,10 +4213,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateMemCpy(Dest, Src, SizeVal, false);
if (BuiltinID == Builtin::BImempcpy ||
BuiltinID == Builtin::BI__builtin_mempcpy)
- return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(),
- Dest.getPointer(), SizeVal));
+ return RValue::get(Builder.CreateInBoundsGEP(
+ Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
else
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BI__builtin_memcpy_inline: {
@@ -4061,7 +4248,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemCpy(Dest, Src, SizeVal, false);
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BI__builtin_objc_memmove_collectable: {
@@ -4070,7 +4257,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *SizeVal = EmitScalarExpr(E->getArg(2));
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
DestAddr, SrcAddr, SizeVal);
- return RValue::get(DestAddr.getPointer());
+ return RValue::get(DestAddr, *this);
}
case Builtin::BI__builtin___memmove_chk: {
@@ -4087,7 +4274,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemMove(Dest, Src, SizeVal, false);
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BImemmove:
@@ -4098,7 +4285,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
Builder.CreateMemMove(Dest, Src, SizeVal, false);
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
@@ -4106,10 +4293,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BI__builtin_memset_inline: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
@@ -4117,8 +4304,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
uint64_t Size =
E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Dest.emitRawPointer(*this)),
+ E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
+ 0);
Builder.CreateMemSetInline(Dest, ByteVal, Size);
return RValue::get(nullptr);
}
@@ -4137,7 +4325,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BI__builtin_wmemchr: {
// The MSVC runtime library does not provide a definition of wmemchr, so we
@@ -4359,14 +4547,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Store the stack pointer to the setjmp buffer.
Value *StackAddr = Builder.CreateStackSave();
- assert(Buf.getPointer()->getType() == StackAddr->getType());
+ assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
Builder.CreateStore(StackAddr, StackSaveSlot);
// Call LLVM's EH setjmp, which is lightweight.
Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
- return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
+ return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
}
case Builtin::BI__builtin_longjmp: {
Value *Buf = EmitScalarExpr(E->getArg(0));
@@ -5202,6 +5390,76 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__iso_volatile_store64:
return RValue::get(EmitISOVolatileStore(*this, E));
+ case Builtin::BI__builtin_ptrauth_sign_constant:
+ return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
+
+ case Builtin::BI__builtin_ptrauth_auth:
+ case Builtin::BI__builtin_ptrauth_auth_and_resign:
+ case Builtin::BI__builtin_ptrauth_blend_discriminator:
+ case Builtin::BI__builtin_ptrauth_sign_generic_data:
+ case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
+ case Builtin::BI__builtin_ptrauth_strip: {
+ // Emit the arguments.
+ SmallVector<llvm::Value *, 5> Args;
+ for (auto argExpr : E->arguments())
+ Args.push_back(EmitScalarExpr(argExpr));
+
+ // Cast the value to intptr_t, saving its original type.
+ llvm::Type *OrigValueType = Args[0]->getType();
+ if (OrigValueType->isPointerTy())
+ Args[0] = Builder.CreatePtrToInt(Args[0], IntPtrTy);
+
+ switch (BuiltinID) {
+ case Builtin::BI__builtin_ptrauth_auth_and_resign:
+ if (Args[4]->getType()->isPointerTy())
+ Args[4] = Builder.CreatePtrToInt(Args[4], IntPtrTy);
+ [[fallthrough]];
+
+ case Builtin::BI__builtin_ptrauth_auth:
+ case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
+ if (Args[2]->getType()->isPointerTy())
+ Args[2] = Builder.CreatePtrToInt(Args[2], IntPtrTy);
+ break;
+
+ case Builtin::BI__builtin_ptrauth_sign_generic_data:
+ if (Args[1]->getType()->isPointerTy())
+ Args[1] = Builder.CreatePtrToInt(Args[1], IntPtrTy);
+ break;
+
+ case Builtin::BI__builtin_ptrauth_blend_discriminator:
+ case Builtin::BI__builtin_ptrauth_strip:
+ break;
+ }
+
+ // Call the intrinsic.
+ auto IntrinsicID = [&]() -> unsigned {
+ switch (BuiltinID) {
+ case Builtin::BI__builtin_ptrauth_auth:
+ return llvm::Intrinsic::ptrauth_auth;
+ case Builtin::BI__builtin_ptrauth_auth_and_resign:
+ return llvm::Intrinsic::ptrauth_resign;
+ case Builtin::BI__builtin_ptrauth_blend_discriminator:
+ return llvm::Intrinsic::ptrauth_blend;
+ case Builtin::BI__builtin_ptrauth_sign_generic_data:
+ return llvm::Intrinsic::ptrauth_sign_generic;
+ case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
+ return llvm::Intrinsic::ptrauth_sign;
+ case Builtin::BI__builtin_ptrauth_strip:
+ return llvm::Intrinsic::ptrauth_strip;
+ }
+ llvm_unreachable("bad ptrauth intrinsic");
+ }();
+ auto Intrinsic = CGM.getIntrinsic(IntrinsicID);
+ llvm::Value *Result = EmitRuntimeCall(Intrinsic, Args);
+
+ if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data &&
+ BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator &&
+ OrigValueType->isPointerTy()) {
+ Result = Builder.CreateIntToPtr(Result, OrigValueType);
+ }
+ return RValue::get(Result);
+ }
+
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:
return RValue::get(EmitSEHExceptionCode());
@@ -5454,7 +5712,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
// OpenCL v2.0, s6.13.17 - Enqueue kernel function.
- // It contains four different overload formats specified in Table 6.13.17.1.
+ // Table 6.13.17.1 specifies four overload forms of enqueue_kernel.
+ // The code below expands the builtin call to a call to one of the following
+ // functions that an OpenCL runtime library will have to provide:
+ // __enqueue_kernel_basic
+ // __enqueue_kernel_varargs
+ // __enqueue_kernel_basic_events
+ // __enqueue_kernel_events_varargs
case Builtin::BIenqueue_kernel: {
StringRef Name; // Generated function call name
unsigned NumArgs = E->getNumArgs();
@@ -5466,8 +5730,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
- llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
- llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
+ llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
+ llvm::Type *RangeTy = NDRangeL.getAddress().getType();
if (NumArgs == 4) {
// The most basic form of the call with parameters:
@@ -5486,7 +5750,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
AttrBuilder B(Builder.getContext());
- B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
+ B.addByValAttr(NDRangeL.getAddress().getElementType());
llvm::AttributeList ByValAttrSet =
llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
@@ -5575,9 +5839,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
getContext(), Expr::NPC_ValueDependentIsNotNull)) {
EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
} else {
- EventWaitList = E->getArg(4)->getType()->isArrayType()
- ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
- : EmitScalarExpr(E->getArg(4));
+ EventWaitList =
+ E->getArg(4)->getType()->isArrayType()
+ ? EmitArrayToPointerDecay(E->getArg(4)).emitRawPointer(*this)
+ : EmitScalarExpr(E->getArg(4));
// Convert to generic address space.
EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
}
@@ -5634,7 +5899,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
EmitLifetimeEnd(TmpSize, TmpPtr);
return Call;
}
- [[fallthrough]];
+ llvm_unreachable("Unexpected enqueue_kernel signature");
}
// OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
// parameter.
@@ -5673,7 +5938,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
- llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
+ llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
Value *Kernel =
@@ -5691,7 +5956,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Name),
{NDRange, Kernel, Block}));
}
-
case Builtin::BI__builtin_store_half:
case Builtin::BI__builtin_store_halff: {
Value *Val = EmitScalarExpr(E->getArg(0));
@@ -5710,14 +5974,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *HalfVal = Builder.CreateLoad(Address);
return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
}
+ case Builtin::BI__builtin_printf:
case Builtin::BIprintf:
if (getTarget().getTriple().isNVPTX() ||
- getTarget().getTriple().isAMDGCN()) {
+ getTarget().getTriple().isAMDGCN() ||
+ (getTarget().getTriple().isSPIRV() &&
+ getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) {
if (getLangOpts().OpenMPIsTargetDevice)
return EmitOpenMPDevicePrintfCallExpr(E);
if (getTarget().getTriple().isNVPTX())
return EmitNVPTXDevicePrintfCallExpr(E);
- if (getTarget().getTriple().isAMDGCN() && getLangOpts().HIP)
+ if ((getTarget().getTriple().isAMDGCN() ||
+ getTarget().getTriple().isSPIRV()) &&
+ getLangOpts().HIP)
return EmitAMDGPUDevicePrintfCallExpr(E);
}
@@ -5726,7 +5995,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_canonicalizef:
case Builtin::BI__builtin_canonicalizef16:
case Builtin::BI__builtin_canonicalizel:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
+ return RValue::get(
+ emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::canonicalize));
case Builtin::BI__builtin_thread_pointer: {
if (!getContext().getTargetInfo().isTLSSupported())
@@ -5757,7 +6027,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto PTy0 = FTy->getParamType(0);
if (PTy0 != Arg0Val->getType()) {
if (Arg0Ty->isArrayType())
- Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
+ Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
else
Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
}
@@ -5795,7 +6065,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto PTy1 = FTy->getParamType(1);
if (PTy1 != Arg1Val->getType()) {
if (Arg1Ty->isArrayType())
- Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
+ Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
else
Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
}
@@ -5809,7 +6079,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_ms_va_start:
case Builtin::BI__builtin_ms_va_end:
return RValue::get(
- EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
+ EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).emitRawPointer(*this),
BuiltinID == Builtin::BI__builtin_ms_va_start));
case Builtin::BI__builtin_ms_va_copy: {
@@ -5833,11 +6103,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto Name = CGM.getCUDARuntime().getDeviceSideName(
cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
auto Str = CGM.GetAddrOfConstantCString(Name, "");
- llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
- llvm::ConstantInt::get(SizeTy, 0)};
- auto *Ptr = llvm::ConstantExpr::getGetElementPtr(Str.getElementType(),
- Str.getPointer(), Zeros);
- return RValue::get(Ptr);
+ return RValue::get(Str.getPointer());
}
}
@@ -5851,8 +6117,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// If this is a predefined lib function (e.g. malloc), emit the call
// using exactly the normal call path.
if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
- return emitLibraryCall(*this, FD, E,
- cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
+ return emitLibraryCall(*this, FD, E, CGM.getRawFunctionPointer(FD));
// Check that a call to a target specific builtin has the correct target
// features.
@@ -5871,6 +6136,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
if (!Prefix.empty()) {
IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
+ if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" &&
+ getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
+ IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin("amdgcn", Name);
// NOTE we don't need to perform a compatibility flag check here since the
// intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
// MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
@@ -5907,8 +6175,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
}
- assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
- "Must be able to losslessly bit cast to param");
// Cast vector type (e.g., v256i32) to x86_amx, this only happen
// in amx intrinsics.
if (PTy->isX86_AMXTy())
@@ -5938,8 +6204,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
}
- assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
- "Must be able to losslessly bit cast result type");
// Cast x86_amx to vector type (e.g., v256i32), this only happen
// in amx intrinsics.
if (V->getType()->isX86_AMXTy())
@@ -5973,7 +6237,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(nullptr);
return RValue::get(V);
case TEK_Aggregate:
- return RValue::getAggregate(ReturnValue.getValue(),
+ return RValue::getAggregate(ReturnValue.getAddress(),
ReturnValue.isVolatile());
case TEK_Complex:
llvm_unreachable("No current target builtin returns complex");
@@ -5981,6 +6245,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
}
+ // EmitHLSLBuiltinExpr will check getLangOpts().HLSL
+ if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E))
+ return RValue::get(V);
+
if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
return EmitHipStdParUnsupportedBuiltin(this, FD);
@@ -6041,6 +6309,10 @@ static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
+ case llvm::Triple::spirv64:
+ if (CGF->getTarget().getTriple().getOS() != llvm::Triple::OSType::AMDHSA)
+ return nullptr;
+ return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
default:
return nullptr;
}
@@ -7080,8 +7352,6 @@ static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = {
{ NEON::BI__builtin_neon_vabdq_f16, NEON::BI__builtin_neon_vabdq_v, },
{ NEON::BI__builtin_neon_vabs_f16, NEON::BI__builtin_neon_vabs_v, },
{ NEON::BI__builtin_neon_vabsq_f16, NEON::BI__builtin_neon_vabsq_v, },
- { NEON::BI__builtin_neon_vbsl_f16, NEON::BI__builtin_neon_vbsl_v, },
- { NEON::BI__builtin_neon_vbslq_f16, NEON::BI__builtin_neon_vbslq_v, },
{ NEON::BI__builtin_neon_vcage_f16, NEON::BI__builtin_neon_vcage_v, },
{ NEON::BI__builtin_neon_vcageq_f16, NEON::BI__builtin_neon_vcageq_v, },
{ NEON::BI__builtin_neon_vcagt_f16, NEON::BI__builtin_neon_vcagt_v, },
@@ -7100,8 +7370,6 @@ static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = {
{ NEON::BI__builtin_neon_vclezq_f16, NEON::BI__builtin_neon_vclezq_v, },
{ NEON::BI__builtin_neon_vcltz_f16, NEON::BI__builtin_neon_vcltz_v, },
{ NEON::BI__builtin_neon_vcltzq_f16, NEON::BI__builtin_neon_vcltzq_v, },
- { NEON::BI__builtin_neon_vext_f16, NEON::BI__builtin_neon_vext_v, },
- { NEON::BI__builtin_neon_vextq_f16, NEON::BI__builtin_neon_vextq_v, },
{ NEON::BI__builtin_neon_vfma_f16, NEON::BI__builtin_neon_vfma_v, },
{ NEON::BI__builtin_neon_vfma_lane_f16, NEON::BI__builtin_neon_vfma_lane_v, },
{ NEON::BI__builtin_neon_vfma_laneq_f16, NEON::BI__builtin_neon_vfma_laneq_v, },
@@ -7204,12 +7472,6 @@ static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = {
{ NEON::BI__builtin_neon_vst4_lane_bf16, NEON::BI__builtin_neon_vst4_lane_v },
{ NEON::BI__builtin_neon_vst4q_bf16, NEON::BI__builtin_neon_vst4q_v },
{ NEON::BI__builtin_neon_vst4q_lane_bf16, NEON::BI__builtin_neon_vst4q_lane_v },
- { NEON::BI__builtin_neon_vtrn_f16, NEON::BI__builtin_neon_vtrn_v, },
- { NEON::BI__builtin_neon_vtrnq_f16, NEON::BI__builtin_neon_vtrnq_v, },
- { NEON::BI__builtin_neon_vuzp_f16, NEON::BI__builtin_neon_vuzp_v, },
- { NEON::BI__builtin_neon_vuzpq_f16, NEON::BI__builtin_neon_vuzpq_v, },
- { NEON::BI__builtin_neon_vzip_f16, NEON::BI__builtin_neon_vzip_v, },
- { NEON::BI__builtin_neon_vzipq_f16, NEON::BI__builtin_neon_vzipq_v, },
// The mangling rules cause us to have one ID for each type for vldap1(q)_lane
// and vstl1(q)_lane, but codegen is equivalent for all of them. Choose an
// arbitrary one to be handled as tha canonical variation.
@@ -8739,7 +9001,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
- Ops.push_back(PtrOp0.getPointer());
+ Ops.push_back(PtrOp0.emitRawPointer(*this));
continue;
}
}
@@ -8766,7 +9028,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
- Ops.push_back(PtrOp1.getPointer());
+ Ops.push_back(PtrOp1.emitRawPointer(*this));
continue;
}
}
@@ -9187,7 +9449,7 @@ Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
if (ReturnValue.isNull())
return MvecOut;
else
- return Builder.CreateStore(MvecOut, ReturnValue.getValue());
+ return Builder.CreateStore(MvecOut, ReturnValue.getAddress());
}
case CustomCodeGen::VST24: {
@@ -10077,11 +10339,15 @@ Value *CodeGenFunction::EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
llvm::Type *Ty,
ArrayRef<Value *> Ops) {
assert((TypeFlags.isTupleSet() || TypeFlags.isTupleGet()) &&
- "Expects TypleFlag isTupleSet or TypeFlags.isTupleSet()");
+ "Expects TypleFlags.isTupleSet() or TypeFlags.isTupleGet()");
unsigned I = cast<ConstantInt>(Ops[1])->getSExtValue();
auto *SingleVecTy = dyn_cast<llvm::ScalableVectorType>(
- TypeFlags.isTupleSet() ? Ops[2]->getType() : Ty);
+ TypeFlags.isTupleSet() ? Ops[2]->getType() : Ty);
+
+ if (!SingleVecTy)
+ return nullptr;
+
Value *Idx = ConstantInt::get(CGM.Int64Ty,
I * SingleVecTy->getMinNumElements());
@@ -10096,6 +10362,10 @@ Value *CodeGenFunction::EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
assert(TypeFlags.isTupleCreate() && "Expects TypleFlag isTupleCreate");
auto *SrcTy = dyn_cast<llvm::ScalableVectorType>(Ops[0]->getType());
+
+ if (!SrcTy)
+ return nullptr;
+
unsigned MinElts = SrcTy->getMinNumElements();
Value *Call = llvm::PoisonValue::get(Ty);
for (unsigned I = 0; I < Ops.size(); I++) {
@@ -10637,6 +10907,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
BuiltinID <= clang::AArch64::LastSMEBuiltin)
return EmitAArch64SMEBuiltinExpr(BuiltinID, E);
+ if (BuiltinID == Builtin::BI__builtin_cpu_supports)
+ return EmitAArch64CpuSupports(E);
+
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
default: break;
@@ -10670,16 +10943,20 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
}
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_trap) {
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_break);
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(F, Builder.CreateZExt(Arg, CGM.Int32Ty));
+ }
+
if (BuiltinID == clang::AArch64::BI__builtin_arm_get_sme_state) {
// Create call to __arm_sme_state and store the results to the two pointers.
CallInst *CI = EmitRuntimeCall(CGM.CreateRuntimeFunction(
llvm::FunctionType::get(StructType::get(CGM.Int64Ty, CGM.Int64Ty), {},
false),
"__arm_sme_state"));
- auto Attrs =
- AttributeList()
- .addFnAttribute(getLLVMContext(), "aarch64_pstate_sm_compatible")
- .addFnAttribute(getLLVMContext(), "aarch64_pstate_za_preserved");
+ auto Attrs = AttributeList().addFnAttribute(getLLVMContext(),
+ "aarch64_pstate_sm_compatible");
CI->setAttributes(Attrs);
CI->setCallingConv(
llvm::CallingConv::
@@ -11318,6 +11595,15 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Address, RW, Locality, Data});
}
+ if (BuiltinID == AArch64::BI__hlt) {
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hlt);
+ Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0))});
+
+ // Return 0 for convenience, even though MSVC returns some other undefined
+ // value.
+ return ConstantInt::get(Builder.getInt32Ty(), 0);
+ }
+
// Handle MSVC intrinsics before argument evaluation to prevent double
// evaluation.
if (std::optional<MSVCIntrin> MsvcIntId =
@@ -11360,7 +11646,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
- Ops.push_back(PtrOp0.getPointer());
+ Ops.push_back(PtrOp0.emitRawPointer(*this));
continue;
}
}
@@ -12044,7 +12330,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
"vgetq_lane");
}
- case clang::AArch64::BI_InterlockedAdd: {
+ case clang::AArch64::BI_InterlockedAdd:
+ case clang::AArch64::BI_InterlockedAdd64: {
Address DestAddr = CheckAtomicAlignment(*this, E);
Value *Val = EmitScalarExpr(E->getArg(1));
AtomicRMWInst *RMWI =
@@ -13225,15 +13512,15 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
if (!getDebugInfo()) {
CGM.Error(E->getExprLoc(),
"using __builtin_preserve_field_info() without -g");
- return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
- : EmitLValue(Arg).getPointer(*this);
+ return IsBitField ? EmitLValue(Arg).getRawBitFieldPointer(*this)
+ : EmitLValue(Arg).emitRawPointer(*this);
}
// Enable underlying preserve_*_access_index() generation.
bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
IsInPreservedAIRegion = true;
- Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
- : EmitLValue(Arg).getPointer(*this);
+ Value *FieldAddr = IsBitField ? EmitLValue(Arg).getRawBitFieldPointer(*this)
+ : EmitLValue(Arg).emitRawPointer(*this);
IsInPreservedAIRegion = OldIsInPreservedAIRegion;
ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
@@ -13931,7 +14218,7 @@ Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
// Grab the appropriate field from __cpu_model.
llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
ConstantInt::get(Int32Ty, Index)};
- llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
+ llvm::Value *CpuValue = Builder.CreateInBoundsGEP(STy, CpuModel, Idxs);
CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue,
CharUnits::fromQuantity(4));
@@ -13943,6 +14230,8 @@ Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
+ if (!getContext().getTargetInfo().validateCpuSupports(FeatureStr))
+ return Builder.getFalse();
return EmitX86CpuSupports(FeatureStr);
}
@@ -13971,7 +14260,7 @@ CodeGenFunction::EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask) {
// global in the struct STy.
Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
Builder.getInt32(0)};
- Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
+ Value *CpuFeatures = Builder.CreateInBoundsGEP(STy, CpuModel, Idxs);
Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures,
CharUnits::fromQuantity(4));
@@ -13992,7 +14281,7 @@ CodeGenFunction::EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask) {
continue;
Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(i - 1)};
Value *Features = Builder.CreateAlignedLoad(
- Int32Ty, Builder.CreateGEP(ATy, CpuFeatures2, Idxs),
+ Int32Ty, Builder.CreateInBoundsGEP(ATy, CpuFeatures2, Idxs),
CharUnits::fromQuantity(4));
// Check the value of the bit corresponding to the feature requested.
Value *Mask = Builder.getInt32(M);
@@ -14025,6 +14314,21 @@ Value *CodeGenFunction::EmitX86CpuInit() {
return Builder.CreateCall(Func);
}
+Value *CodeGenFunction::EmitAArch64CpuSupports(const CallExpr *E) {
+ const Expr *ArgExpr = E->getArg(0)->IgnoreParenCasts();
+ StringRef ArgStr = cast<StringLiteral>(ArgExpr)->getString();
+ llvm::SmallVector<StringRef, 8> Features;
+ ArgStr.split(Features, "+");
+ for (auto &Feature : Features) {
+ Feature = Feature.trim();
+ if (!llvm::AArch64::parseFMVExtension(Feature))
+ return Builder.getFalse();
+ if (Feature != "default")
+ Features.push_back(Feature);
+ }
+ return EmitAArch64CpuSupports(Features);
+}
+
llvm::Value *
CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) {
uint64_t FeaturesMask = llvm::AArch64::getCpuSupportsMask(FeaturesStrs);
@@ -14053,11 +14357,11 @@ CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) {
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- if (BuiltinID == X86::BI__builtin_cpu_is)
+ if (BuiltinID == Builtin::BI__builtin_cpu_is)
return EmitX86CpuIs(E);
- if (BuiltinID == X86::BI__builtin_cpu_supports)
+ if (BuiltinID == Builtin::BI__builtin_cpu_supports)
return EmitX86CpuSupports(E);
- if (BuiltinID == X86::BI__builtin_cpu_init)
+ if (BuiltinID == Builtin::BI__builtin_cpu_init)
return EmitX86CpuInit();
// Handle MSVC intrinsics before argument evaluation to prevent double
@@ -14208,14 +14512,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI_mm_setcsr:
case X86::BI__builtin_ia32_ldmxcsr: {
- Address Tmp = CreateMemTemp(E->getArg(0)->getType());
+ RawAddress Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Tmp.getPointer());
}
case X86::BI_mm_getcsr:
case X86::BI__builtin_ia32_stmxcsr: {
- Address Tmp = CreateMemTemp(E->getType());
+ RawAddress Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
Tmp.getPointer());
return Builder.CreateLoad(Tmp, "stmxcsr");
@@ -15753,14 +16057,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Ops[0]});
}
- // 3DNow!
- case X86::BI__builtin_ia32_pswapdsf:
- case X86::BI__builtin_ia32_pswapdsi: {
- llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
- Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
- return Builder.CreateCall(F, Ops, "pswapd");
- }
case X86::BI__builtin_ia32_rdrand16_step:
case X86::BI__builtin_ia32_rdrand32_step:
case X86::BI__builtin_ia32_rdrand64_step:
@@ -16542,9 +16838,144 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID = Intrinsic::not_intrinsic;
+#include "llvm/TargetParser/PPCTargetParser.def"
+ auto GenAIXPPCBuiltinCpuExpr = [&](unsigned SupportMethod, unsigned FieldIdx,
+ unsigned Mask, CmpInst::Predicate CompOp,
+ unsigned OpValue) -> Value * {
+ if (SupportMethod == BUILTIN_PPC_FALSE)
+ return llvm::ConstantInt::getFalse(ConvertType(E->getType()));
+
+ if (SupportMethod == BUILTIN_PPC_TRUE)
+ return llvm::ConstantInt::getTrue(ConvertType(E->getType()));
+
+ assert(SupportMethod <= SYS_CALL && "Invalid value for SupportMethod.");
+
+ llvm::Value *FieldValue = nullptr;
+ if (SupportMethod == USE_SYS_CONF) {
+ llvm::Type *STy = llvm::StructType::get(PPC_SYSTEMCONFIG_TYPE);
+ llvm::Constant *SysConf =
+ CGM.CreateRuntimeVariable(STy, "_system_configuration");
+
+ // Grab the appropriate field from _system_configuration.
+ llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
+ ConstantInt::get(Int32Ty, FieldIdx)};
+
+ FieldValue = Builder.CreateInBoundsGEP(STy, SysConf, Idxs);
+ FieldValue = Builder.CreateAlignedLoad(Int32Ty, FieldValue,
+ CharUnits::fromQuantity(4));
+ } else if (SupportMethod == SYS_CALL) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(Int64Ty, Int32Ty, false);
+ llvm::FunctionCallee Func =
+ CGM.CreateRuntimeFunction(FTy, "getsystemcfg");
+
+ FieldValue =
+ Builder.CreateCall(Func, {ConstantInt::get(Int32Ty, FieldIdx)});
+ }
+ assert(FieldValue &&
+ "SupportMethod value is not defined in PPCTargetParser.def.");
+
+ if (Mask)
+ FieldValue = Builder.CreateAnd(FieldValue, Mask);
+
+ llvm::Type *ValueType = FieldValue->getType();
+ bool IsValueType64Bit = ValueType->isIntegerTy(64);
+ assert(
+ (IsValueType64Bit || ValueType->isIntegerTy(32)) &&
+ "Only 32/64-bit integers are supported in GenAIXPPCBuiltinCpuExpr().");
+
+ return Builder.CreateICmp(
+ CompOp, FieldValue,
+ ConstantInt::get(IsValueType64Bit ? Int64Ty : Int32Ty, OpValue));
+ };
+
switch (BuiltinID) {
default: return nullptr;
+ case Builtin::BI__builtin_cpu_is: {
+ const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
+ StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
+ llvm::Triple Triple = getTarget().getTriple();
+
+ unsigned LinuxSupportMethod, LinuxIDValue, AIXSupportMethod, AIXIDValue;
+ typedef std::tuple<unsigned, unsigned, unsigned, unsigned> CPUInfo;
+
+ std::tie(LinuxSupportMethod, LinuxIDValue, AIXSupportMethod, AIXIDValue) =
+ static_cast<CPUInfo>(StringSwitch<CPUInfo>(CPUStr)
+#define PPC_CPU(NAME, Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, \
+ AIXID) \
+ .Case(NAME, {Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, AIXID})
+#include "llvm/TargetParser/PPCTargetParser.def"
+ .Default({BUILTIN_PPC_UNSUPPORTED, 0,
+ BUILTIN_PPC_UNSUPPORTED, 0}));
+
+ if (Triple.isOSAIX()) {
+ assert((AIXSupportMethod != BUILTIN_PPC_UNSUPPORTED) &&
+ "Invalid CPU name. Missed by SemaChecking?");
+ return GenAIXPPCBuiltinCpuExpr(AIXSupportMethod, AIX_SYSCON_IMPL_IDX, 0,
+ ICmpInst::ICMP_EQ, AIXIDValue);
+ }
+
+ assert(Triple.isOSLinux() &&
+ "__builtin_cpu_is() is only supported for AIX and Linux.");
+
+ assert((LinuxSupportMethod != BUILTIN_PPC_UNSUPPORTED) &&
+ "Invalid CPU name. Missed by SemaChecking?");
+
+ if (LinuxSupportMethod == BUILTIN_PPC_FALSE)
+ return llvm::ConstantInt::getFalse(ConvertType(E->getType()));
+
+ Value *Op0 = llvm::ConstantInt::get(Int32Ty, PPC_FAWORD_CPUID);
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_fixed_addr_ld);
+ Value *TheCall = Builder.CreateCall(F, {Op0}, "cpu_is");
+ return Builder.CreateICmpEQ(TheCall,
+ llvm::ConstantInt::get(Int32Ty, LinuxIDValue));
+ }
+ case Builtin::BI__builtin_cpu_supports: {
+ llvm::Triple Triple = getTarget().getTriple();
+ const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
+ StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
+ if (Triple.isOSAIX()) {
+ unsigned SupportMethod, FieldIdx, Mask, Value;
+ CmpInst::Predicate CompOp;
+ typedef std::tuple<unsigned, unsigned, unsigned, CmpInst::Predicate,
+ unsigned>
+ CPUSupportType;
+ std::tie(SupportMethod, FieldIdx, Mask, CompOp, Value) =
+ static_cast<CPUSupportType>(StringSwitch<CPUSupportType>(CPUStr)
+#define PPC_AIX_FEATURE(NAME, DESC, SUPPORT_METHOD, INDEX, MASK, COMP_OP, \
+ VALUE) \
+ .Case(NAME, {SUPPORT_METHOD, INDEX, MASK, COMP_OP, VALUE})
+#include "llvm/TargetParser/PPCTargetParser.def"
+ .Default({BUILTIN_PPC_FALSE, 0, 0,
+ CmpInst::Predicate(), 0}));
+ return GenAIXPPCBuiltinCpuExpr(SupportMethod, FieldIdx, Mask, CompOp,
+ Value);
+ }
+
+ assert(Triple.isOSLinux() &&
+ "__builtin_cpu_supports() is only supported for AIX and Linux.");
+ unsigned FeatureWord;
+ unsigned BitMask;
+ std::tie(FeatureWord, BitMask) =
+ StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
+#define PPC_LNX_FEATURE(Name, Description, EnumName, Bitmask, FA_WORD) \
+ .Case(Name, {FA_WORD, Bitmask})
+#include "llvm/TargetParser/PPCTargetParser.def"
+ .Default({0, 0});
+ if (!BitMask)
+ return Builder.getFalse();
+ Value *Op0 = llvm::ConstantInt::get(Int32Ty, FeatureWord);
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_fixed_addr_ld);
+ Value *TheCall = Builder.CreateCall(F, {Op0}, "cpu_supports");
+ Value *Mask =
+ Builder.CreateAnd(TheCall, llvm::ConstantInt::get(Int32Ty, BitMask));
+ return Builder.CreateICmpNE(Mask, llvm::Constant::getNullValue(Int32Ty));
+#undef PPC_FAWORD_HWCAP
+#undef PPC_FAWORD_HWCAP2
+#undef PPC_FAWORD_CPUID
+ }
+
// __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
// call __builtin_readcyclecounter.
case PPC::BI__builtin_ppc_get_timebase:
@@ -16980,37 +17411,34 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
}
return Builder.CreateCall(CGM.getIntrinsic(ID), Ops, "");
}
- // Rotate and insert under mask operation.
- // __rldimi(rs, is, shift, mask)
- // (rotl64(rs, shift) & mask) | (is & ~mask)
- // __rlwimi(rs, is, shift, mask)
- // (rotl(rs, shift) & mask) | (is & ~mask)
case PPC::BI__builtin_ppc_rldimi:
case PPC::BI__builtin_ppc_rlwimi: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Op1 = EmitScalarExpr(E->getArg(1));
Value *Op2 = EmitScalarExpr(E->getArg(2));
Value *Op3 = EmitScalarExpr(E->getArg(3));
- llvm::Type *Ty = Op0->getType();
- Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
- if (BuiltinID == PPC::BI__builtin_ppc_rldimi)
+ // rldimi is 64-bit instruction, expand the intrinsic before isel to
+ // leverage peephole and avoid legalization efforts.
+ if (BuiltinID == PPC::BI__builtin_ppc_rldimi &&
+ !getTarget().getTriple().isPPC64()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::fshl, Op0->getType());
Op2 = Builder.CreateZExt(Op2, Int64Ty);
- Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op2});
- Value *X = Builder.CreateAnd(Shift, Op3);
- Value *Y = Builder.CreateAnd(Op1, Builder.CreateNot(Op3));
- return Builder.CreateOr(X, Y);
- }
- // Rotate and insert under mask operation.
- // __rlwnm(rs, shift, mask)
- // rotl(rs, shift) & mask
+ Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op2});
+ return Builder.CreateOr(Builder.CreateAnd(Shift, Op3),
+ Builder.CreateAnd(Op1, Builder.CreateNot(Op3)));
+ }
+ return Builder.CreateCall(
+ CGM.getIntrinsic(BuiltinID == PPC::BI__builtin_ppc_rldimi
+ ? Intrinsic::ppc_rldimi
+ : Intrinsic::ppc_rlwimi),
+ {Op0, Op1, Op2, Op3});
+ }
case PPC::BI__builtin_ppc_rlwnm: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Op1 = EmitScalarExpr(E->getArg(1));
Value *Op2 = EmitScalarExpr(E->getArg(2));
- llvm::Type *Ty = Op0->getType();
- Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
- Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op1});
- return Builder.CreateAnd(Shift, Op2);
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_rlwnm),
+ {Op0, Op1, Op2});
}
case PPC::BI__builtin_ppc_poppar4:
case PPC::BI__builtin_ppc_poppar8: {
@@ -17418,7 +17846,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
SmallVector<Value *, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
if (E->getArg(i)->getType()->isArrayType())
- Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer());
+ Ops.push_back(
+ EmitArrayToPointerDecay(E->getArg(i)).emitRawPointer(*this));
else
Ops.push_back(EmitScalarExpr(E->getArg(i)));
// The first argument of these two builtins is a pointer used to store their
@@ -17721,9 +18150,9 @@ Value *EmitAMDGPUImplicitArgPtr(CodeGenFunction &CGF) {
// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
/// Emit code based on Code Object ABI version.
/// COV_4 : Emit code to use dispatch ptr
-/// COV_5 : Emit code to use implicitarg ptr
+/// COV_5+ : Emit code to use implicitarg ptr
/// COV_NONE : Emit code to load a global variable "__oclc_ABI_version"
-/// and use its value for COV_4 or COV_5 approach. It is used for
+/// and use its value for COV_4 or COV_5+ approach. It is used for
/// compiling device libraries in an ABI-agnostic way.
///
/// Note: "__oclc_ABI_version" is supposed to be emitted and intialized by
@@ -17766,7 +18195,7 @@ Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
Address(Result, CGF.Int16Ty, CharUnits::fromQuantity(2)));
} else {
Value *GEP = nullptr;
- if (Cov == CodeObjectVersionKind::COV_5) {
+ if (Cov >= CodeObjectVersionKind::COV_5) {
// Indexing the implicit kernarg segment.
GEP = CGF.Builder.CreateConstGEP1_32(
CGF.Int8Ty, EmitAMDGPUImplicitArgPtr(CGF), 12 + Index * 2);
@@ -17837,9 +18266,35 @@ void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
break;
}
+ // Some of the atomic builtins take the scope as a string name.
StringRef scp;
- llvm::getConstantStringInfo(Scope, scp);
- SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
+ if (llvm::getConstantStringInfo(Scope, scp)) {
+ SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
+ return;
+ }
+
+ // Older builtins had an enum argument for the memory scope.
+ int scope = cast<llvm::ConstantInt>(Scope)->getZExtValue();
+ switch (scope) {
+ case 0: // __MEMORY_SCOPE_SYSTEM
+ SSID = llvm::SyncScope::System;
+ break;
+ case 1: // __MEMORY_SCOPE_DEVICE
+ SSID = getLLVMContext().getOrInsertSyncScopeID("agent");
+ break;
+ case 2: // __MEMORY_SCOPE_WRKGRP
+ SSID = getLLVMContext().getOrInsertSyncScopeID("workgroup");
+ break;
+ case 3: // __MEMORY_SCOPE_WVFRNT
+ SSID = getLLVMContext().getOrInsertSyncScopeID("wavefront");
+ break;
+ case 4: // __MEMORY_SCOPE_SINGLE
+ SSID = llvm::SyncScope::SingleThread;
+ break;
+ default:
+ SSID = llvm::SyncScope::System;
+ break;
+ }
}
llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
@@ -17859,6 +18314,209 @@ llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
return Arg;
}
+Intrinsic::ID getDotProductIntrinsic(QualType QT, int elementCount) {
+ if (QT->hasFloatingRepresentation()) {
+ switch (elementCount) {
+ case 2:
+ return Intrinsic::dx_dot2;
+ case 3:
+ return Intrinsic::dx_dot3;
+ case 4:
+ return Intrinsic::dx_dot4;
+ }
+ }
+ if (QT->hasSignedIntegerRepresentation())
+ return Intrinsic::dx_sdot;
+
+ assert(QT->hasUnsignedIntegerRepresentation());
+ return Intrinsic::dx_udot;
+}
+
+Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ if (!getLangOpts().HLSL)
+ return nullptr;
+
+ switch (BuiltinID) {
+ case Builtin::BI__builtin_hlsl_elementwise_all: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateIntrinsic(
+ /*ReturnType=*/llvm::Type::getInt1Ty(getLLVMContext()),
+ CGM.getHLSLRuntime().getAllIntrinsic(), ArrayRef<Value *>{Op0}, nullptr,
+ "hlsl.all");
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_any: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateIntrinsic(
+ /*ReturnType=*/llvm::Type::getInt1Ty(getLLVMContext()),
+ CGM.getHLSLRuntime().getAnyIntrinsic(), ArrayRef<Value *>{Op0}, nullptr,
+ "hlsl.any");
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_clamp: {
+ Value *OpX = EmitScalarExpr(E->getArg(0));
+ Value *OpMin = EmitScalarExpr(E->getArg(1));
+ Value *OpMax = EmitScalarExpr(E->getArg(2));
+
+ QualType Ty = E->getArg(0)->getType();
+ bool IsUnsigned = false;
+ if (auto *VecTy = Ty->getAs<VectorType>())
+ Ty = VecTy->getElementType();
+ IsUnsigned = Ty->isUnsignedIntegerType();
+ return Builder.CreateIntrinsic(
+ /*ReturnType=*/OpX->getType(),
+ IsUnsigned ? Intrinsic::dx_uclamp : Intrinsic::dx_clamp,
+ ArrayRef<Value *>{OpX, OpMin, OpMax}, nullptr, "dx.clamp");
+ }
+ case Builtin::BI__builtin_hlsl_dot: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ llvm::Type *T0 = Op0->getType();
+ llvm::Type *T1 = Op1->getType();
+ if (!T0->isVectorTy() && !T1->isVectorTy()) {
+ if (T0->isFloatingPointTy())
+ return Builder.CreateFMul(Op0, Op1, "dx.dot");
+
+ if (T0->isIntegerTy())
+ return Builder.CreateMul(Op0, Op1, "dx.dot");
+
+ // Bools should have been promoted
+ llvm_unreachable(
+ "Scalar dot product is only supported on ints and floats.");
+ }
+ // A VectorSplat should have happened
+ assert(T0->isVectorTy() && T1->isVectorTy() &&
+ "Dot product of vector and scalar is not supported.");
+
+ // A vector sext or sitofp should have happened
+ assert(T0->getScalarType() == T1->getScalarType() &&
+ "Dot product of vectors need the same element types.");
+
+ auto *VecTy0 = E->getArg(0)->getType()->getAs<VectorType>();
+ [[maybe_unused]] auto *VecTy1 =
+ E->getArg(1)->getType()->getAs<VectorType>();
+ // A HLSLVectorTruncation should have happend
+ assert(VecTy0->getNumElements() == VecTy1->getNumElements() &&
+ "Dot product requires vectors to be of the same size.");
+
+ return Builder.CreateIntrinsic(
+ /*ReturnType=*/T0->getScalarType(),
+ getDotProductIntrinsic(E->getArg(0)->getType(),
+ VecTy0->getNumElements()),
+ ArrayRef<Value *>{Op0, Op1}, nullptr, "dx.dot");
+ } break;
+ case Builtin::BI__builtin_hlsl_lerp: {
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Y = EmitScalarExpr(E->getArg(1));
+ Value *S = EmitScalarExpr(E->getArg(2));
+ if (!E->getArg(0)->getType()->hasFloatingRepresentation())
+ llvm_unreachable("lerp operand must have a float representation");
+ return Builder.CreateIntrinsic(
+ /*ReturnType=*/X->getType(), CGM.getHLSLRuntime().getLerpIntrinsic(),
+ ArrayRef<Value *>{X, Y, S}, nullptr, "hlsl.lerp");
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_frac: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ if (!E->getArg(0)->getType()->hasFloatingRepresentation())
+ llvm_unreachable("frac operand must have a float representation");
+ return Builder.CreateIntrinsic(
+ /*ReturnType=*/Op0->getType(), Intrinsic::dx_frac,
+ ArrayRef<Value *>{Op0}, nullptr, "dx.frac");
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ llvm::Type *Xty = Op0->getType();
+ llvm::Type *retType = llvm::Type::getInt1Ty(this->getLLVMContext());
+ if (Xty->isVectorTy()) {
+ auto *XVecTy = E->getArg(0)->getType()->getAs<VectorType>();
+ retType = llvm::VectorType::get(
+ retType, ElementCount::getFixed(XVecTy->getNumElements()));
+ }
+ if (!E->getArg(0)->getType()->hasFloatingRepresentation())
+ llvm_unreachable("isinf operand must have a float representation");
+ return Builder.CreateIntrinsic(retType, Intrinsic::dx_isinf,
+ ArrayRef<Value *>{Op0}, nullptr, "dx.isinf");
+ }
+ case Builtin::BI__builtin_hlsl_mad: {
+ Value *M = EmitScalarExpr(E->getArg(0));
+ Value *A = EmitScalarExpr(E->getArg(1));
+ Value *B = EmitScalarExpr(E->getArg(2));
+ if (E->getArg(0)->getType()->hasFloatingRepresentation())
+ return Builder.CreateIntrinsic(
+ /*ReturnType*/ M->getType(), Intrinsic::fmuladd,
+ ArrayRef<Value *>{M, A, B}, nullptr, "hlsl.fmad");
+
+ if (E->getArg(0)->getType()->hasSignedIntegerRepresentation()) {
+ if (CGM.getTarget().getTriple().getArch() == llvm::Triple::dxil)
+ return Builder.CreateIntrinsic(
+ /*ReturnType*/ M->getType(), Intrinsic::dx_imad,
+ ArrayRef<Value *>{M, A, B}, nullptr, "dx.imad");
+
+ Value *Mul = Builder.CreateNSWMul(M, A);
+ return Builder.CreateNSWAdd(Mul, B);
+ }
+ assert(E->getArg(0)->getType()->hasUnsignedIntegerRepresentation());
+ if (CGM.getTarget().getTriple().getArch() == llvm::Triple::dxil)
+ return Builder.CreateIntrinsic(
+ /*ReturnType=*/M->getType(), Intrinsic::dx_umad,
+ ArrayRef<Value *>{M, A, B}, nullptr, "dx.umad");
+
+ Value *Mul = Builder.CreateNUWMul(M, A);
+ return Builder.CreateNUWAdd(Mul, B);
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_rcp: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ if (!E->getArg(0)->getType()->hasFloatingRepresentation())
+ llvm_unreachable("rcp operand must have a float representation");
+ llvm::Type *Ty = Op0->getType();
+ llvm::Type *EltTy = Ty->getScalarType();
+ Constant *One = Ty->isVectorTy()
+ ? ConstantVector::getSplat(
+ ElementCount::getFixed(
+ cast<FixedVectorType>(Ty)->getNumElements()),
+ ConstantFP::get(EltTy, 1.0))
+ : ConstantFP::get(EltTy, 1.0);
+ return Builder.CreateFDiv(One, Op0, "hlsl.rcp");
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_rsqrt: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ if (!E->getArg(0)->getType()->hasFloatingRepresentation())
+ llvm_unreachable("rsqrt operand must have a float representation");
+ return Builder.CreateIntrinsic(
+ /*ReturnType=*/Op0->getType(), CGM.getHLSLRuntime().getRsqrtIntrinsic(),
+ ArrayRef<Value *>{Op0}, nullptr, "hlsl.rsqrt");
+ }
+ case Builtin::BI__builtin_hlsl_wave_get_lane_index: {
+ return EmitRuntimeCall(CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, {}, false), "__hlsl_wave_get_lane_index",
+ {}, false, true));
+ }
+ }
+ return nullptr;
+}
+
+void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
+ const CallExpr *E) {
+ constexpr const char *Tag = "amdgpu-as";
+
+ LLVMContext &Ctx = Inst->getContext();
+ SmallVector<MMRAMetadata::TagT, 3> MMRAs;
+ for (unsigned K = 2; K < E->getNumArgs(); ++K) {
+ llvm::Value *V = EmitScalarExpr(E->getArg(K));
+ StringRef AS;
+ if (llvm::getConstantStringInfo(V, AS)) {
+ MMRAs.push_back({Tag, AS});
+ // TODO: Delete the resulting unused constant?
+ continue;
+ }
+ CGM.Error(E->getExprLoc(),
+ "expected an address space name as a string literal");
+ }
+
+ llvm::sort(MMRAs);
+ MMRAs.erase(llvm::unique(MMRAs), MMRAs.end());
+ Inst->setMetadata(LLVMContext::MD_mmra, MMRAMetadata::getMD(Ctx, MMRAs));
+}
+
Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
@@ -17903,9 +18561,11 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
}
case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
- return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
+ return emitBuiltinWithOneOverloadedType<2>(*this, E,
+ Intrinsic::amdgcn_ds_swizzle);
case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
- return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
+ return emitBuiltinWithOneOverloadedType<2>(*this, E,
+ Intrinsic::amdgcn_mov_dpp8);
case AMDGPU::BI__builtin_amdgcn_mov_dpp:
case AMDGPU::BI__builtin_amdgcn_update_dpp: {
llvm::SmallVector<llvm::Value *, 6> Args;
@@ -17925,42 +18585,63 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
return Builder.CreateCall(F, Args);
}
+ case AMDGPU::BI__builtin_amdgcn_permlane16:
+ case AMDGPU::BI__builtin_amdgcn_permlanex16:
+ return emitBuiltinWithOneOverloadedType<6>(
+ *this, E,
+ BuiltinID == AMDGPU::BI__builtin_amdgcn_permlane16
+ ? Intrinsic::amdgcn_permlane16
+ : Intrinsic::amdgcn_permlanex16);
+ case AMDGPU::BI__builtin_amdgcn_permlane64:
+ return emitBuiltinWithOneOverloadedType<1>(*this, E,
+ Intrinsic::amdgcn_permlane64);
+ case AMDGPU::BI__builtin_amdgcn_readlane:
+ return emitBuiltinWithOneOverloadedType<2>(*this, E,
+ Intrinsic::amdgcn_readlane);
+ case AMDGPU::BI__builtin_amdgcn_readfirstlane:
+ return emitBuiltinWithOneOverloadedType<1>(*this, E,
+ Intrinsic::amdgcn_readfirstlane);
case AMDGPU::BI__builtin_amdgcn_div_fixup:
case AMDGPU::BI__builtin_amdgcn_div_fixupf:
case AMDGPU::BI__builtin_amdgcn_div_fixuph:
- return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
+ return emitBuiltinWithOneOverloadedType<3>(*this, E,
+ Intrinsic::amdgcn_div_fixup);
case AMDGPU::BI__builtin_amdgcn_trig_preop:
case AMDGPU::BI__builtin_amdgcn_trig_preopf:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
case AMDGPU::BI__builtin_amdgcn_rcp:
case AMDGPU::BI__builtin_amdgcn_rcpf:
case AMDGPU::BI__builtin_amdgcn_rcph:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::amdgcn_rcp);
case AMDGPU::BI__builtin_amdgcn_sqrt:
case AMDGPU::BI__builtin_amdgcn_sqrtf:
case AMDGPU::BI__builtin_amdgcn_sqrth:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E,
+ Intrinsic::amdgcn_sqrt);
case AMDGPU::BI__builtin_amdgcn_rsq:
case AMDGPU::BI__builtin_amdgcn_rsqf:
case AMDGPU::BI__builtin_amdgcn_rsqh:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::amdgcn_rsq);
case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E,
+ Intrinsic::amdgcn_rsq_clamp);
case AMDGPU::BI__builtin_amdgcn_sinf:
case AMDGPU::BI__builtin_amdgcn_sinh:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::amdgcn_sin);
case AMDGPU::BI__builtin_amdgcn_cosf:
case AMDGPU::BI__builtin_amdgcn_cosh:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::amdgcn_cos);
case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
return EmitAMDGPUDispatchPtr(*this, E);
case AMDGPU::BI__builtin_amdgcn_logf:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::amdgcn_log);
case AMDGPU::BI__builtin_amdgcn_exp2f:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_exp2);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E,
+ Intrinsic::amdgcn_exp2);
case AMDGPU::BI__builtin_amdgcn_log_clampf:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E,
+ Intrinsic::amdgcn_log_clamp);
case AMDGPU::BI__builtin_amdgcn_ldexp:
case AMDGPU::BI__builtin_amdgcn_ldexpf: {
llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
@@ -17981,7 +18662,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_frexp_mant:
case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
case AMDGPU::BI__builtin_amdgcn_frexp_manth:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E,
+ Intrinsic::amdgcn_frexp_mant);
case AMDGPU::BI__builtin_amdgcn_frexp_exp:
case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
Value *Src0 = EmitScalarExpr(E->getArg(0));
@@ -17998,13 +18680,17 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_fract:
case AMDGPU::BI__builtin_amdgcn_fractf:
case AMDGPU::BI__builtin_amdgcn_fracth:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E,
+ Intrinsic::amdgcn_fract);
case AMDGPU::BI__builtin_amdgcn_lerp:
- return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
+ return emitBuiltinWithOneOverloadedType<3>(*this, E,
+ Intrinsic::amdgcn_lerp);
case AMDGPU::BI__builtin_amdgcn_ubfe:
- return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
+ return emitBuiltinWithOneOverloadedType<3>(*this, E,
+ Intrinsic::amdgcn_ubfe);
case AMDGPU::BI__builtin_amdgcn_sbfe:
- return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
+ return emitBuiltinWithOneOverloadedType<3>(*this, E,
+ Intrinsic::amdgcn_sbfe);
case AMDGPU::BI__builtin_amdgcn_ballot_w32:
case AMDGPU::BI__builtin_amdgcn_ballot_w64: {
llvm::Type *ResultType = ConvertType(E->getType());
@@ -18042,7 +18728,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
case AMDGPU::BI__builtin_amdgcn_fmed3f:
case AMDGPU::BI__builtin_amdgcn_fmed3h:
- return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
+ return emitBuiltinWithOneOverloadedType<3>(*this, E,
+ Intrinsic::amdgcn_fmed3);
case AMDGPU::BI__builtin_amdgcn_ds_append:
case AMDGPU::BI__builtin_amdgcn_ds_consume: {
Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
@@ -18051,32 +18738,6 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
return Builder.CreateCall(F, { Src0, Builder.getFalse() });
}
- case AMDGPU::BI__builtin_amdgcn_ds_faddf:
- case AMDGPU::BI__builtin_amdgcn_ds_fminf:
- case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
- Intrinsic::ID Intrin;
- switch (BuiltinID) {
- case AMDGPU::BI__builtin_amdgcn_ds_faddf:
- Intrin = Intrinsic::amdgcn_ds_fadd;
- break;
- case AMDGPU::BI__builtin_amdgcn_ds_fminf:
- Intrin = Intrinsic::amdgcn_ds_fmin;
- break;
- case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
- Intrin = Intrinsic::amdgcn_ds_fmax;
- break;
- }
- llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
- llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
- llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
- llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
- llvm::Value *Src4 = EmitScalarExpr(E->getArg(4));
- llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() });
- llvm::FunctionType *FTy = F->getFunctionType();
- llvm::Type *PTy = FTy->getParamType(0);
- Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
- return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
- }
case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
@@ -18149,74 +18810,46 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(IID, {Addr->getType()});
return Builder.CreateCall(F, {Addr, Val});
}
- case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
- case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
- case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16: {
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_i32:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_v2i32:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4i16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4f16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4bf16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8i16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8f16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8bf16: {
+
Intrinsic::ID IID;
- llvm::Type *ArgTy;
switch (BuiltinID) {
- case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
- ArgTy = llvm::Type::getFloatTy(getLLVMContext());
- IID = Intrinsic::amdgcn_ds_fadd;
- break;
- case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
- ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
- IID = Intrinsic::amdgcn_ds_fadd;
- break;
- case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16:
- ArgTy = llvm::FixedVectorType::get(
- llvm::Type::getHalfTy(getLLVMContext()), 2);
- IID = Intrinsic::amdgcn_ds_fadd;
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_i32:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_v2i32:
+ IID = Intrinsic::amdgcn_global_load_tr_b64;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4i16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4f16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4bf16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8i16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8f16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8bf16:
+ IID = Intrinsic::amdgcn_global_load_tr_b128;
break;
}
+ llvm::Type *LoadTy = ConvertType(E->getType());
llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
- llvm::Value *Val = EmitScalarExpr(E->getArg(1));
- llvm::Constant *ZeroI32 = llvm::ConstantInt::getIntegerValue(
- llvm::Type::getInt32Ty(getLLVMContext()), APInt(32, 0, true));
- llvm::Constant *ZeroI1 = llvm::ConstantInt::getIntegerValue(
- llvm::Type::getInt1Ty(getLLVMContext()), APInt(1, 0));
- llvm::Function *F = CGM.getIntrinsic(IID, {ArgTy});
- return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1});
- }
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_i32:
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v2i32:
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4f16:
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4i16:
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8f16:
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8i16: {
-
- llvm::Type *ArgTy;
- switch (BuiltinID) {
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_i32:
- ArgTy = llvm::Type::getInt32Ty(getLLVMContext());
- break;
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v2i32:
- ArgTy = llvm::FixedVectorType::get(
- llvm::Type::getInt32Ty(getLLVMContext()), 2);
- break;
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4f16:
- ArgTy = llvm::FixedVectorType::get(
- llvm::Type::getHalfTy(getLLVMContext()), 4);
- break;
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4i16:
- ArgTy = llvm::FixedVectorType::get(
- llvm::Type::getInt16Ty(getLLVMContext()), 4);
- break;
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8f16:
- ArgTy = llvm::FixedVectorType::get(
- llvm::Type::getHalfTy(getLLVMContext()), 8);
- break;
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8i16:
- ArgTy = llvm::FixedVectorType::get(
- llvm::Type::getInt16Ty(getLLVMContext()), 8);
- break;
- }
-
- llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
- llvm::Function *F =
- CGM.getIntrinsic(Intrinsic::amdgcn_global_load_tr, {ArgTy});
+ llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
return Builder.CreateCall(F, {Addr});
}
+ case AMDGPU::BI__builtin_amdgcn_get_fpenv: {
+ Function *F = CGM.getIntrinsic(Intrinsic::get_fpenv,
+ {llvm::Type::getInt64Ty(getLLVMContext())});
+ return Builder.CreateCall(F);
+ }
+ case AMDGPU::BI__builtin_amdgcn_set_fpenv: {
+ Function *F = CGM.getIntrinsic(Intrinsic::set_fpenv,
+ {llvm::Type::getInt64Ty(getLLVMContext())});
+ llvm::Value *Env = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(F, {Env});
+ }
case AMDGPU::BI__builtin_amdgcn_read_exec:
return EmitAMDGCNBallotForExec(*this, E, Int64Ty, Int64Ty, false);
case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
@@ -18357,7 +18990,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32_gfx12:
case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64_gfx12:
AppendFalseForOpselArg = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32:
case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64:
ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
@@ -18366,7 +18999,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32_gfx12:
case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64_gfx12:
AppendFalseForOpselArg = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32:
case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64:
ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
@@ -18519,7 +19152,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
// r600 intrinsics
case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
- return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
+ return emitBuiltinWithOneOverloadedType<1>(*this, E,
+ Intrinsic::r600_recipsqrt_ieee);
case AMDGPU::BI__builtin_r600_read_tidig_x:
return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
case AMDGPU::BI__builtin_r600_read_tidig_y:
@@ -18536,12 +19170,22 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_fence: {
ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(1)), AO, SSID);
- return Builder.CreateFence(AO, SSID);
+ FenceInst *Fence = Builder.CreateFence(AO, SSID);
+ if (E->getNumArgs() > 2)
+ AddAMDGPUFenceAddressSpaceMMRA(Fence, E);
+ return Fence;
}
case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
- case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16:
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2bf16:
+ case AMDGPU::BI__builtin_amdgcn_ds_faddf:
+ case AMDGPU::BI__builtin_amdgcn_ds_fminf:
+ case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
llvm::AtomicRMWInst::BinOp BinOp;
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
@@ -18552,23 +19196,62 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
BinOp = llvm::AtomicRMWInst::UDecWrap;
break;
+ case AMDGPU::BI__builtin_amdgcn_ds_faddf:
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16:
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2bf16:
+ BinOp = llvm::AtomicRMWInst::FAdd;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_ds_fminf:
+ BinOp = llvm::AtomicRMWInst::FMin;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
+ BinOp = llvm::AtomicRMWInst::FMax;
+ break;
}
Address Ptr = CheckAtomicAlignment(*this, E);
Value *Val = EmitScalarExpr(E->getArg(1));
+ llvm::Type *OrigTy = Val->getType();
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
- ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)), AO, SSID);
+ bool Volatile;
- QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
- bool Volatile =
- PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+ if (BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_faddf ||
+ BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_fminf ||
+ BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_fmaxf) {
+ // __builtin_amdgcn_ds_faddf/fminf/fmaxf has an explicit volatile argument
+ Volatile =
+ cast<ConstantInt>(EmitScalarExpr(E->getArg(4)))->getZExtValue();
+ } else {
+ // Infer volatile from the passed type.
+ Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+ }
+
+ if (E->getNumArgs() >= 4) {
+ // Some of the builtins have explicit ordering and scope arguments.
+ ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)), AO, SSID);
+ } else {
+ // The ds_atomic_fadd_* builtins do not have syncscope/order arguments.
+ SSID = llvm::SyncScope::System;
+ AO = AtomicOrdering::SequentiallyConsistent;
+
+ // The v2bf16 builtin uses i16 instead of a natural bfloat type.
+ if (BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2bf16) {
+ llvm::Type *V2BF16Ty = FixedVectorType::get(
+ llvm::Type::getBFloatTy(Builder.getContext()), 2);
+ Val = Builder.CreateBitCast(Val, V2BF16Ty);
+ }
+ }
llvm::AtomicRMWInst *RMW =
Builder.CreateAtomicRMW(BinOp, Ptr, Val, AO, SSID);
if (Volatile)
RMW->setVolatile(true);
- return RMW;
+ return Builder.CreateBitCast(RMW, OrigTy);
}
case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtn:
case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtnl: {
@@ -18579,6 +19262,50 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::amdgcn_s_sendmsg_rtn, {ResultType});
return Builder.CreateCall(F, {Arg});
}
+ case AMDGPU::BI__builtin_amdgcn_make_buffer_rsrc:
+ return emitBuiltinWithOneOverloadedType<4>(
+ *this, E, Intrinsic::amdgcn_make_buffer_rsrc);
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b8:
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b16:
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b32:
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b64:
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b96:
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_store_b128:
+ return emitBuiltinWithOneOverloadedType<5>(
+ *this, E, Intrinsic::amdgcn_raw_ptr_buffer_store);
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b8:
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b16:
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b32:
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b64:
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b96:
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b128: {
+ llvm::Type *RetTy = nullptr;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b8:
+ RetTy = Int8Ty;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b16:
+ RetTy = Int16Ty;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b32:
+ RetTy = Int32Ty;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b64:
+ RetTy = llvm::FixedVectorType::get(Int32Ty, /*NumElements=*/2);
+ break;
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b96:
+ RetTy = llvm::FixedVectorType::get(Int32Ty, /*NumElements=*/3);
+ break;
+ case AMDGPU::BI__builtin_amdgcn_raw_buffer_load_b128:
+ RetTy = llvm::FixedVectorType::get(Int32Ty, /*NumElements=*/4);
+ break;
+ }
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::amdgcn_raw_ptr_buffer_load, RetTy);
+ return Builder.CreateCall(
+ F, {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3))});
+ }
default:
return nullptr;
}
@@ -19734,14 +20461,14 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
// Save returned values.
assert(II.NumResults);
if (II.NumResults == 1) {
- Builder.CreateAlignedStore(Result, Dst.getPointer(),
+ Builder.CreateAlignedStore(Result, Dst.emitRawPointer(*this),
CharUnits::fromQuantity(4));
} else {
for (unsigned i = 0; i < II.NumResults; ++i) {
Builder.CreateAlignedStore(
Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
Dst.getElementType()),
- Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
+ Builder.CreateGEP(Dst.getElementType(), Dst.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
}
@@ -19781,7 +20508,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < II.NumResults; ++i) {
Value *V = Builder.CreateAlignedLoad(
Src.getElementType(),
- Builder.CreateGEP(Src.getElementType(), Src.getPointer(),
+ Builder.CreateGEP(Src.getElementType(), Src.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, ParamType));
@@ -19853,7 +20580,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < MI.NumEltsA; ++i) {
Value *V = Builder.CreateAlignedLoad(
SrcA.getElementType(),
- Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(),
+ Builder.CreateGEP(SrcA.getElementType(), SrcA.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, AType));
@@ -19863,7 +20590,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < MI.NumEltsB; ++i) {
Value *V = Builder.CreateAlignedLoad(
SrcB.getElementType(),
- Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(),
+ Builder.CreateGEP(SrcB.getElementType(), SrcB.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, BType));
@@ -19874,7 +20601,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < MI.NumEltsC; ++i) {
Value *V = Builder.CreateAlignedLoad(
SrcC.getElementType(),
- Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(),
+ Builder.CreateGEP(SrcC.getElementType(), SrcC.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, CType));
@@ -19884,7 +20611,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < MI.NumEltsD; ++i)
Builder.CreateAlignedStore(
Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
- Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
+ Builder.CreateGEP(Dst.getElementType(), Dst.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
return Result;
@@ -20142,7 +20869,7 @@ struct BuiltinAlignArgs {
BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
QualType AstType = E->getArg(0)->getType();
if (AstType->isArrayType())
- Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
else
Src = CGF.EmitScalarExpr(E->getArg(0));
SrcType = Src->getType();
@@ -20318,6 +21045,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_min_f32:
case WebAssembly::BI__builtin_wasm_min_f64:
+ case WebAssembly::BI__builtin_wasm_min_f16x8:
case WebAssembly::BI__builtin_wasm_min_f32x4:
case WebAssembly::BI__builtin_wasm_min_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
@@ -20328,6 +21056,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_max_f32:
case WebAssembly::BI__builtin_wasm_max_f64:
+ case WebAssembly::BI__builtin_wasm_max_f16x8:
case WebAssembly::BI__builtin_wasm_max_f32x4:
case WebAssembly::BI__builtin_wasm_max_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
@@ -20336,6 +21065,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
+ case WebAssembly::BI__builtin_wasm_pmin_f16x8:
case WebAssembly::BI__builtin_wasm_pmin_f32x4:
case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
@@ -20344,6 +21074,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
+ case WebAssembly::BI__builtin_wasm_pmax_f16x8:
case WebAssembly::BI__builtin_wasm_pmax_f32x4:
case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
Value *LHS = EmitScalarExpr(E->getArg(0));
@@ -20642,6 +21373,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
return Builder.CreateCall(Callee, Ops);
}
+ case WebAssembly::BI__builtin_wasm_relaxed_madd_f16x8:
+ case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f16x8:
case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4:
case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4:
case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2:
@@ -20651,10 +21384,12 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Value *C = EmitScalarExpr(E->getArg(2));
unsigned IntNo;
switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_relaxed_madd_f16x8:
case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4:
case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2:
IntNo = Intrinsic::wasm_relaxed_madd;
break;
+ case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f16x8:
case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4:
case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f64x2:
IntNo = Intrinsic::wasm_relaxed_nmadd;
@@ -20758,9 +21493,31 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::wasm_relaxed_dot_bf16x8_add_f32);
return Builder.CreateCall(Callee, {LHS, RHS, Acc});
}
+ case WebAssembly::BI__builtin_wasm_loadf16_f32: {
+ Value *Addr = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_loadf16_f32);
+ return Builder.CreateCall(Callee, {Addr});
+ }
+ case WebAssembly::BI__builtin_wasm_storef16_f32: {
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ Value *Addr = EmitScalarExpr(E->getArg(1));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_storef16_f32);
+ return Builder.CreateCall(Callee, {Val, Addr});
+ }
+ case WebAssembly::BI__builtin_wasm_splat_f16x8: {
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_splat_f16x8);
+ return Builder.CreateCall(Callee, {Val});
+ }
+ case WebAssembly::BI__builtin_wasm_extract_lane_f16x8: {
+ Value *Vector = EmitScalarExpr(E->getArg(0));
+ Value *Index = EmitScalarExpr(E->getArg(1));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_extract_lane_f16x8);
+ return Builder.CreateCall(Callee, {Vector, Index});
+ }
case WebAssembly::BI__builtin_wasm_table_get: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
Value *Index = EmitScalarExpr(E->getArg(1));
Function *Callee;
if (E->getType().isWebAssemblyExternrefType())
@@ -20774,7 +21531,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_table_set: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
Value *Index = EmitScalarExpr(E->getArg(1));
Value *Val = EmitScalarExpr(E->getArg(2));
Function *Callee;
@@ -20789,13 +21546,13 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_table_size: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *Value = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Value = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_table_size);
return Builder.CreateCall(Callee, Value);
}
case WebAssembly::BI__builtin_wasm_table_grow: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
Value *Val = EmitScalarExpr(E->getArg(1));
Value *NElems = EmitScalarExpr(E->getArg(2));
@@ -20812,7 +21569,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_table_fill: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
Value *Index = EmitScalarExpr(E->getArg(1));
Value *Val = EmitScalarExpr(E->getArg(2));
Value *NElems = EmitScalarExpr(E->getArg(3));
@@ -20830,8 +21587,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_table_copy: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *TableX = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
- Value *TableY = EmitArrayToPointerDecay(E->getArg(1)).getPointer();
+ Value *TableX = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
+ Value *TableY = EmitArrayToPointerDecay(E->getArg(1)).emitRawPointer(*this);
Value *DstIdx = EmitScalarExpr(E->getArg(2));
Value *SrcIdx = EmitScalarExpr(E->getArg(3));
Value *NElems = EmitScalarExpr(E->getArg(4));
@@ -20910,7 +21667,7 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
// The base pointer is passed by address, so it needs to be loaded.
Address A = EmitPointerWithAlignment(E->getArg(0));
- Address BP = Address(A.getPointer(), Int8PtrTy, A.getAlignment());
+ Address BP = Address(A.emitRawPointer(*this), Int8PtrTy, A.getAlignment());
llvm::Value *Base = Builder.CreateLoad(BP);
// The treatment of both loads and stores is the same: the arguments for
// the builtin are the same as the arguments for the intrinsic.
@@ -20951,8 +21708,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
// per call.
Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
- DestAddr = Address(DestAddr.getPointer(), Int8Ty, DestAddr.getAlignment());
- llvm::Value *DestAddress = DestAddr.getPointer();
+ DestAddr = DestAddr.withElementType(Int8Ty);
+ llvm::Value *DestAddress = DestAddr.emitRawPointer(*this);
// Operands are Base, Dest, Modifier.
// The intrinsic format in LLVM IR is defined as
@@ -21003,8 +21760,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
- Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
- PredAddr.getAlignment());
+ Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.emitRawPointer(*this),
+ PredAddr.getAlignment());
return Builder.CreateExtractValue(Result, 0);
}
// These are identical to the builtins above, except they don't consume
@@ -21022,8 +21779,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
- Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
- PredAddr.getAlignment());
+ Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.emitRawPointer(*this),
+ PredAddr.getAlignment());
return Builder.CreateExtractValue(Result, 0);
}
@@ -21120,7 +21877,7 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
// Handle aggregate argument, namely RVV tuple types in segment load/store
if (hasAggregateEvaluationKind(E->getArg(i)->getType())) {
LValue L = EmitAggExprToLValue(E->getArg(i));
- llvm::Value *AggValue = Builder.CreateLoad(L.getAddress(*this));
+ llvm::Value *AggValue = Builder.CreateLoad(L.getAddress());
Ops.push_back(AggValue);
continue;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
index 5b43272bfa62..43dfbbb90dd5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
@@ -71,8 +71,6 @@ private:
bool RelocatableDeviceCode;
/// Mangle context for device.
std::unique_ptr<MangleContext> DeviceMC;
- /// Some zeros used for GEPs.
- llvm::Constant *Zeros[2];
llvm::FunctionCallee getSetupArgumentFn() const;
llvm::FunctionCallee getLaunchFn() const;
@@ -91,9 +89,7 @@ private:
/// where the C code specifies const char*.
llvm::Constant *makeConstantString(const std::string &Str,
const std::string &Name = "") {
- auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
- return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
- ConstStr.getPointer(), Zeros);
+ return CGM.GetAddrOfConstantCString(Str, Name.c_str()).getPointer();
}
/// Helper function which generates an initialized constant array from Str,
@@ -117,7 +113,7 @@ private:
}
if (Alignment)
GV->setAlignment(llvm::Align(Alignment));
- return llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
+ return GV;
}
/// Helper function that generates an empty dummy function returning void.
@@ -230,8 +226,6 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
IntTy = CGM.IntTy;
SizeTy = CGM.SizeTy;
VoidTy = CGM.VoidTy;
- Zeros[0] = llvm::ConstantInt::get(SizeTy, 0);
- Zeros[1] = Zeros[0];
PtrTy = CGM.UnqualPtrTy;
}
@@ -331,11 +325,11 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
llvm::ConstantInt::get(SizeTy, std::max<size_t>(1, Args.size())));
// Store pointers to the arguments in a locally allocated launch_args.
for (unsigned i = 0; i < Args.size(); ++i) {
- llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
+ llvm::Value *VarPtr = CGF.GetAddrOfLocalVar(Args[i]).emitRawPointer(CGF);
llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, PtrTy);
CGF.Builder.CreateDefaultAlignedStore(
- VoidVarPtr,
- CGF.Builder.CreateConstGEP1_32(PtrTy, KernelArgs.getPointer(), i));
+ VoidVarPtr, CGF.Builder.CreateConstGEP1_32(
+ PtrTy, KernelArgs.emitRawPointer(CGF), i));
}
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
@@ -361,7 +355,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
KernelLaunchAPI = KernelLaunchAPI + "_ptsz";
}
auto LaunchKernelName = addPrefixToName(KernelLaunchAPI);
- IdentifierInfo &cudaLaunchKernelII =
+ const IdentifierInfo &cudaLaunchKernelII =
CGM.getContext().Idents.get(LaunchKernelName);
FunctionDecl *cudaLaunchKernelFD = nullptr;
for (auto *Result : DC->lookup(&cudaLaunchKernelII)) {
@@ -393,9 +387,10 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
/*isVarArg=*/false),
addUnderscoredPrefixToName("PopCallConfiguration"));
- CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn,
- {GridDim.getPointer(), BlockDim.getPointer(),
- ShmemSize.getPointer(), Stream.getPointer()});
+ CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn, {GridDim.emitRawPointer(CGF),
+ BlockDim.emitRawPointer(CGF),
+ ShmemSize.emitRawPointer(CGF),
+ Stream.emitRawPointer(CGF)});
// Emit the call to cudaLaunch
llvm::Value *Kernel =
@@ -405,7 +400,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
cudaLaunchKernelFD->getParamDecl(0)->getType());
LaunchKernelArgs.add(RValue::getAggregate(GridDim), Dim3Ty);
LaunchKernelArgs.add(RValue::getAggregate(BlockDim), Dim3Ty);
- LaunchKernelArgs.add(RValue::get(KernelArgs.getPointer()),
+ LaunchKernelArgs.add(RValue::get(KernelArgs, CGF),
cudaLaunchKernelFD->getParamDecl(3)->getType());
LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(ShmemSize)),
cudaLaunchKernelFD->getParamDecl(4)->getType());
@@ -423,6 +418,33 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
CGM.CreateRuntimeFunction(FTy, LaunchKernelName);
CGF.EmitCall(FI, CGCallee::forDirect(cudaLaunchKernelFn), ReturnValueSlot(),
LaunchKernelArgs);
+
+ // To prevent CUDA device stub functions from being merged by ICF in MSVC
+ // environment, create an unique global variable for each kernel and write to
+ // the variable in the device stub.
+ if (CGM.getContext().getTargetInfo().getCXXABI().isMicrosoft() &&
+ !CGF.getLangOpts().HIP) {
+ llvm::Function *KernelFunction = llvm::cast<llvm::Function>(Kernel);
+ std::string GlobalVarName = (KernelFunction->getName() + ".id").str();
+
+ llvm::GlobalVariable *HandleVar =
+ CGM.getModule().getNamedGlobal(GlobalVarName);
+ if (!HandleVar) {
+ HandleVar = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty,
+ /*Constant=*/false, KernelFunction->getLinkage(),
+ llvm::ConstantInt::get(CGM.Int8Ty, 0), GlobalVarName);
+ HandleVar->setDSOLocal(KernelFunction->isDSOLocal());
+ HandleVar->setVisibility(KernelFunction->getVisibility());
+ if (KernelFunction->hasComdat())
+ HandleVar->setComdat(CGM.getModule().getOrInsertComdat(GlobalVarName));
+ }
+
+ CGF.Builder.CreateAlignedStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
+ HandleVar, CharUnits::One(),
+ /*IsVolatile=*/true);
+ }
+
CGF.EmitBranch(EndBlock);
CGF.EmitBlock(EndBlock);
@@ -438,8 +460,8 @@ void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
auto TInfo = CGM.getContext().getTypeInfoInChars(A->getType());
Offset = Offset.alignTo(TInfo.Align);
llvm::Value *Args[] = {
- CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
- PtrTy),
+ CGF.Builder.CreatePointerCast(
+ CGF.GetAddrOfLocalVar(A).emitRawPointer(CGF), PtrTy),
llvm::ConstantInt::get(SizeTy, TInfo.Width.getQuantity()),
llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
};
@@ -491,7 +513,8 @@ static void replaceManagedVar(llvm::GlobalVariable *Var,
// variable with instructions.
for (auto &&Op : WorkItem) {
auto *CE = cast<llvm::ConstantExpr>(Op);
- auto *NewInst = CE->getAsInstruction(I);
+ auto *NewInst = CE->getAsInstruction();
+ NewInst->insertBefore(*I->getParent(), I->getIterator());
NewInst->replaceUsesOfWith(OldV, NewV);
OldV = CE;
NewV = NewInst;
@@ -604,20 +627,10 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
uint64_t VarSize =
CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
if (Info.Flags.isManaged()) {
- auto *ManagedVar = new llvm::GlobalVariable(
- CGM.getModule(), Var->getType(),
- /*isConstant=*/false, Var->getLinkage(),
- /*Init=*/Var->isDeclaration()
- ? nullptr
- : llvm::ConstantPointerNull::get(Var->getType()),
- /*Name=*/"", /*InsertBefore=*/nullptr,
- llvm::GlobalVariable::NotThreadLocal);
- ManagedVar->setDSOLocal(Var->isDSOLocal());
- ManagedVar->setVisibility(Var->getVisibility());
- ManagedVar->setExternallyInitialized(true);
- ManagedVar->takeName(Var);
- Var->setName(Twine(ManagedVar->getName() + ".managed"));
- replaceManagedVar(Var, ManagedVar);
+ assert(Var->getName().ends_with(".managed") &&
+ "HIP managed variables not transformed");
+ auto *ManagedVar = CGM.getModule().getNamedGlobal(
+ Var->getName().drop_back(StringRef(".managed").size()));
llvm::Value *Args[] = {
&GpuBinaryHandlePtr,
ManagedVar,
@@ -760,10 +773,10 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// to contain the fat binary but will be populated somewhere else,
// e.g. by lld through link script.
FatBinStr = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int8Ty,
- /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
- "__hip_fatbin", nullptr,
- llvm::GlobalVariable::NotThreadLocal);
+ CGM.getModule(), CGM.Int8Ty,
+ /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
+ "__hip_fatbin_" + CGM.getContext().getCUIDHash(), nullptr,
+ llvm::GlobalVariable::NotThreadLocal);
cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
}
@@ -816,8 +829,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// thread safety of the loaded program. Therefore we can assume sequential
// execution of constructor functions here.
if (IsHIP) {
- auto Linkage = CudaGpuBinary ? llvm::GlobalValue::InternalLinkage :
- llvm::GlobalValue::LinkOnceAnyLinkage;
+ auto Linkage = CudaGpuBinary ? llvm::GlobalValue::InternalLinkage
+ : llvm::GlobalValue::ExternalLinkage;
llvm::BasicBlock *IfBlock =
llvm::BasicBlock::Create(Context, "if", ModuleCtorFunc);
llvm::BasicBlock *ExitBlock =
@@ -826,11 +839,11 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// of HIP ABI.
GpuBinaryHandle = new llvm::GlobalVariable(
TheModule, PtrTy, /*isConstant=*/false, Linkage,
- /*Initializer=*/llvm::ConstantPointerNull::get(PtrTy),
- "__hip_gpubin_handle");
- if (Linkage == llvm::GlobalValue::LinkOnceAnyLinkage)
- GpuBinaryHandle->setComdat(
- CGM.getModule().getOrInsertComdat(GpuBinaryHandle->getName()));
+ /*Initializer=*/
+ CudaGpuBinary ? llvm::ConstantPointerNull::get(PtrTy) : nullptr,
+ CudaGpuBinary
+ ? "__hip_gpubin_handle"
+ : "__hip_gpubin_handle_" + CGM.getContext().getCUIDHash());
GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
// Prevent the weak symbol in different shared libraries being merged.
if (Linkage != llvm::GlobalValue::InternalLinkage)
@@ -1092,7 +1105,9 @@ void CGNVCUDARuntime::transformManagedVars() {
: llvm::ConstantPointerNull::get(Var->getType()),
/*Name=*/"", /*InsertBefore=*/nullptr,
llvm::GlobalVariable::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(LangAS::cuda_device));
+ CGM.getContext().getTargetAddressSpace(CGM.getLangOpts().CUDAIsDevice
+ ? LangAS::cuda_device
+ : LangAS::Default));
ManagedVar->setDSOLocal(Var->isDSOLocal());
ManagedVar->setVisibility(Var->getVisibility());
ManagedVar->setExternallyInitialized(true);
@@ -1101,7 +1116,7 @@ void CGNVCUDARuntime::transformManagedVars() {
Var->setName(Twine(ManagedVar->getName()) + ".managed");
// Keep managed variables even if they are not used in device code since
// they need to be allocated by the runtime.
- if (!Var->isDeclaration()) {
+ if (CGM.getLangOpts().CUDAIsDevice && !Var->isDeclaration()) {
assert(!ManagedVar->isDeclaration());
CGM.addCompilerUsedGlobal(Var);
CGM.addCompilerUsedGlobal(ManagedVar);
@@ -1159,9 +1174,8 @@ void CGNVCUDARuntime::createOffloadingEntries() {
// Returns module constructor to be added.
llvm::Function *CGNVCUDARuntime::finalizeModule() {
+ transformManagedVars();
if (CGM.getLangOpts().CUDAIsDevice) {
- transformManagedVars();
-
// Mark ODR-used device variables as compiler used to prevent it from being
// eliminated by optimization. This is necessary for device variables
// ODR-used by host functions. Sema correctly marks them as ODR-used no
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
index c7af8f1cf0fe..8030d632cc3d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
@@ -54,10 +54,15 @@ public:
};
private:
+ LLVM_PREFERRED_TYPE(DeviceVarKind)
unsigned Kind : 2;
+ LLVM_PREFERRED_TYPE(bool)
unsigned Extern : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned Constant : 1; // Constant variable.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Managed : 1; // Managed variable.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Normalized : 1; // Normalized texture.
int SurfTexType; // Type of surface/texutre.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
index e95a735f92f7..23ebbee19bf7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
@@ -263,7 +263,16 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
CGF.Builder.CreateConstInBoundsGEP1_64(Ty, VTable, VTableIndex, "vfnkxt");
llvm::Value *VFunc = CGF.Builder.CreateAlignedLoad(
Ty, VFuncPtr, llvm::Align(CGF.PointerAlignInBytes));
- CGCallee Callee(GD, VFunc);
+
+ CGPointerAuthInfo PointerAuth;
+ if (auto &Schema =
+ CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers) {
+ GlobalDecl OrigMD =
+ CGM.getItaniumVTableContext().findOriginalMethod(GD.getCanonicalDecl());
+ PointerAuth = CGF.EmitPointerAuthInfo(Schema, VFuncPtr, OrigMD, QualType());
+ }
+
+ CGCallee Callee(GD, VFunc, PointerAuth);
return Callee;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
index a8bf57a277e9..7c6dfc3e59d8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
@@ -20,6 +20,12 @@ using namespace CodeGen;
CGCXXABI::~CGCXXABI() { }
+Address CGCXXABI::getThisAddress(CodeGenFunction &CGF) {
+ return CGF.makeNaturalAddressForPointer(
+ CGF.CXXABIThisValue, CGF.CXXABIThisDecl->getType()->getPointeeType(),
+ CGF.CXXABIThisAlignment);
+}
+
void CGCXXABI::ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S) {
DiagnosticsEngine &Diags = CGF.CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
@@ -44,8 +50,12 @@ CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer(
llvm::Value *MemPtr, const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "calls through member pointers");
- ThisPtrForCall = This.getPointer();
- const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
+ const auto *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
+ ThisPtrForCall =
+ CGF.getAsNaturalPointerTo(This, CGF.getContext().getRecordType(RD));
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
llvm::Constant *FnPtr = llvm::Constant::getNullValue(
llvm::PointerType::getUnqual(CGM.getLLVMContext()));
return CGCallee::forDirect(FnPtr, FPT);
@@ -251,16 +261,15 @@ void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, Address ptr,
// If we don't need an array cookie, bail out early.
if (!requiresArrayCookie(expr, eltTy)) {
- allocPtr = ptr.getPointer();
+ allocPtr = ptr.emitRawPointer(CGF);
numElements = nullptr;
cookieSize = CharUnits::Zero();
return;
}
cookieSize = getArrayCookieSizeImpl(eltTy);
- Address allocAddr =
- CGF.Builder.CreateConstInBoundsByteGEP(ptr, -cookieSize);
- allocPtr = allocAddr.getPointer();
+ Address allocAddr = CGF.Builder.CreateConstInBoundsByteGEP(ptr, -cookieSize);
+ allocPtr = allocAddr.emitRawPointer(CGF);
numElements = readArrayCookieImpl(CGF, allocAddr, cookieSize);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
index ad1ad08d0856..7dcc53911199 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
@@ -57,12 +57,8 @@ protected:
llvm::Value *getThisValue(CodeGenFunction &CGF) {
return CGF.CXXABIThisValue;
}
- Address getThisAddress(CodeGenFunction &CGF) {
- return Address(
- CGF.CXXABIThisValue,
- CGF.ConvertTypeForMem(CGF.CXXABIThisDecl->getType()->getPointeeType()),
- CGF.CXXABIThisAlignment);
- }
+
+ Address getThisAddress(CodeGenFunction &CGF);
/// Issue a diagnostic about unsupported features in the ABI.
void ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S);
@@ -278,8 +274,7 @@ public:
getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) = 0;
virtual CatchTypeInfo getCatchAllTypeInfo();
- virtual bool shouldTypeidBeNullChecked(bool IsDeref,
- QualType SrcRecordTy) = 0;
+ virtual bool shouldTypeidBeNullChecked(QualType SrcRecordTy) = 0;
virtual void EmitBadTypeidCall(CodeGenFunction &CGF) = 0;
virtual llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
Address ThisPtr,
@@ -475,12 +470,6 @@ public:
BaseSubobject Base,
const CXXRecordDecl *NearestVBase) = 0;
- /// Get the address point of the vtable for the given base subobject while
- /// building a constexpr.
- virtual llvm::Constant *
- getVTableAddressPointForConstExpr(BaseSubobject Base,
- const CXXRecordDecl *VTableClass) = 0;
-
/// Get the address of the vtable for the given record decl which should be
/// used for the vptr at the given offset in RD.
virtual llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
@@ -515,13 +504,15 @@ public:
virtual void setThunkLinkage(llvm::Function *Thunk, bool ForVTable,
GlobalDecl GD, bool ReturnAdjustment) = 0;
- virtual llvm::Value *performThisAdjustment(CodeGenFunction &CGF,
- Address This,
- const ThisAdjustment &TA) = 0;
+ virtual llvm::Value *
+ performThisAdjustment(CodeGenFunction &CGF, Address This,
+ const CXXRecordDecl *UnadjustedClass,
+ const ThunkInfo &TI) = 0;
- virtual llvm::Value *performReturnAdjustment(CodeGenFunction &CGF,
- Address Ret,
- const ReturnAdjustment &RA) = 0;
+ virtual llvm::Value *
+ performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
+ const CXXRecordDecl *UnadjustedClass,
+ const ReturnAdjustment &RA) = 0;
virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
index 28c211aa631e..0416fa03d749 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
@@ -73,6 +73,10 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_Swift: return llvm::CallingConv::Swift;
case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
case CC_M68kRTD: return llvm::CallingConv::M68k_RTD;
+ case CC_PreserveNone: return llvm::CallingConv::PreserveNone;
+ // clang-format off
+ case CC_RISCVVectorCall: return llvm::CallingConv::RISCV_VectorCall;
+ // clang-format on
}
}
@@ -256,6 +260,12 @@ static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
if (D->hasAttr<M68kRTDAttr>())
return CC_M68kRTD;
+ if (D->hasAttr<PreserveNoneAttr>())
+ return CC_PreserveNone;
+
+ if (D->hasAttr<RISCVVectorCCAttr>())
+ return CC_RISCVVectorCall;
+
return CC_C;
}
@@ -304,7 +314,8 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
if (MD->isImplicitObjectMemberFunction()) {
// The abstract case is perfectly fine.
- const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
+ const CXXRecordDecl *ThisType =
+ getCXXABI().getThisArgumentTypeForMethod(MD);
return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
}
@@ -327,7 +338,7 @@ CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
SmallVector<CanQualType, 16> argTypes;
SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
- const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD);
+ const CXXRecordDecl *ThisType = getCXXABI().getThisArgumentTypeForMethod(GD);
argTypes.push_back(DeriveThisType(ThisType, MD));
bool PassParams = true;
@@ -346,7 +357,7 @@ CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
appendParameterTypes(*this, argTypes, paramInfos, FTP);
CGCXXABI::AddedStructorArgCounts AddedArgs =
- TheCXXABI.buildStructorSignature(GD, argTypes);
+ getCXXABI().buildStructorSignature(GD, argTypes);
if (!paramInfos.empty()) {
// Note: prefix implies after the first param.
if (AddedArgs.Prefix)
@@ -362,11 +373,10 @@ CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
: RequiredArgs::All);
FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- CanQualType resultType = TheCXXABI.HasThisReturn(GD)
- ? argTypes.front()
- : TheCXXABI.hasMostDerivedReturn(GD)
- ? CGM.getContext().VoidPtrTy
- : Context.VoidTy;
+ CanQualType resultType = getCXXABI().HasThisReturn(GD) ? argTypes.front()
+ : getCXXABI().hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : Context.VoidTy;
return arrangeLLVMFunctionInfo(resultType, FnInfoOpts::IsInstanceMethod,
argTypes, extInfo, paramInfos, required);
}
@@ -427,11 +437,10 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
: RequiredArgs::All;
GlobalDecl GD(D, CtorKind);
- CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
- ? ArgTypes.front()
- : TheCXXABI.hasMostDerivedReturn(GD)
- ? CGM.getContext().VoidPtrTy
- : Context.VoidTy;
+ CanQualType ResultType = getCXXABI().HasThisReturn(GD) ? ArgTypes.front()
+ : getCXXABI().hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : Context.VoidTy;
FunctionType::ExtInfo Info = FPT->getExtInfo();
llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
@@ -796,7 +805,7 @@ const CGFunctionInfo &CodeGenTypes::arrangeLLVMFunctionInfo(
} else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) {
swiftcall::computeABIInfo(CGM, *FI);
} else {
- getABIInfo().computeInfo(*FI);
+ CGM.getABIInfo().computeInfo(*FI);
}
// Loop over all of the computed argument and return value info. If any of
@@ -929,8 +938,8 @@ struct NoExpansion : TypeExpansion {
static std::unique_ptr<TypeExpansion>
getTypeExpansion(QualType Ty, const ASTContext &Context) {
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
- return std::make_unique<ConstantArrayExpansion>(
- AT->getElementType(), AT->getSize().getZExtValue());
+ return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
+ AT->getZExtSize());
}
if (const RecordType *RT = Ty->getAs<RecordType>()) {
SmallVector<const CXXBaseSpecifier *, 1> Bases;
@@ -1027,15 +1036,9 @@ static void forConstantArrayExpansion(CodeGenFunction &CGF,
ConstantArrayExpansion *CAE,
Address BaseAddr,
llvm::function_ref<void(Address)> Fn) {
- CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
- CharUnits EltAlign =
- BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
- llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy);
-
for (int i = 0, n = CAE->NumElts; i < n; i++) {
- llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32(
- BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i);
- Fn(Address(EltAddr, EltTy, EltAlign));
+ Address EltAddr = CGF.Builder.CreateConstGEP2_32(BaseAddr, 0, i);
+ Fn(EltAddr);
}
}
@@ -1047,12 +1050,12 @@ void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
forConstantArrayExpansion(
- *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
+ *this, CAExp, LV.getAddress(), [&](Address EltAddr) {
LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
});
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- Address This = LV.getAddress(*this);
+ Address This = LV.getAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
Address Base =
@@ -1084,7 +1087,7 @@ void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
// pointer type they use (see D118744). Once clang uses opaque pointers
// all LLVM pointer types will be the same and we can remove this check.
if (Arg->getType()->isPointerTy()) {
- Address Addr = LV.getAddress(*this);
+ Address Addr = LV.getAddress();
Arg = Builder.CreateBitCast(Arg, Addr.getElementType());
}
EmitStoreOfScalar(Arg, LV);
@@ -1097,7 +1100,7 @@ void CodeGenFunction::ExpandTypeToArgs(
SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
- Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
+ Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
: Arg.getKnownRValue().getAggregateAddress();
forConstantArrayExpansion(
*this, CAExp, Addr, [&](Address EltAddr) {
@@ -1108,7 +1111,7 @@ void CodeGenFunction::ExpandTypeToArgs(
IRCallArgPos);
});
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
+ Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
: Arg.getKnownRValue().getAggregateAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
@@ -1150,9 +1153,10 @@ void CodeGenFunction::ExpandTypeToArgs(
}
/// Create a temporary allocation for the purposes of coercion.
-static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
- CharUnits MinAlign,
- const Twine &Name = "tmp") {
+static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF,
+ llvm::Type *Ty,
+ CharUnits MinAlign,
+ const Twine &Name = "tmp") {
// Don't use an alignment that's worse than what LLVM would prefer.
auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty);
CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
@@ -1297,111 +1301,84 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
// If coercing a fixed vector to a scalable vector for ABI compatibility, and
// the types match, use the llvm.vector.insert intrinsic to perform the
// conversion.
- if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
- if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
- // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
+ if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
+ if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
+ // If we are casting a fixed i8 vector to a scalable i1 predicate
// vector, use a vector insert and bitcast the result.
- bool NeedsBitcast = false;
- auto PredType =
- llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16);
- llvm::Type *OrigType = Ty;
- if (ScalableDst == PredType &&
- FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) {
- ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2);
- NeedsBitcast = true;
+ if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
+ ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
+ FixedSrcTy->getElementType()->isIntegerTy(8)) {
+ ScalableDstTy = llvm::ScalableVectorType::get(
+ FixedSrcTy->getElementType(),
+ ScalableDstTy->getElementCount().getKnownMinValue() / 8);
}
- if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
+ if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
auto *Load = CGF.Builder.CreateLoad(Src);
- auto *UndefVec = llvm::UndefValue::get(ScalableDst);
+ auto *UndefVec = llvm::UndefValue::get(ScalableDstTy);
auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
llvm::Value *Result = CGF.Builder.CreateInsertVector(
- ScalableDst, UndefVec, Load, Zero, "cast.scalable");
- if (NeedsBitcast)
- Result = CGF.Builder.CreateBitCast(Result, OrigType);
+ ScalableDstTy, UndefVec, Load, Zero, "cast.scalable");
+ if (ScalableDstTy != Ty)
+ Result = CGF.Builder.CreateBitCast(Result, Ty);
return Result;
}
}
}
// Otherwise do coercion through memory. This is stupid, but simple.
- Address Tmp =
+ RawAddress Tmp =
CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
CGF.Builder.CreateMemCpy(
- Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
- Src.getAlignment().getAsAlign(),
+ Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
+ Src.emitRawPointer(CGF), Src.getAlignment().getAsAlign(),
llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue()));
return CGF.Builder.CreateLoad(Tmp);
}
-// Function to store a first-class aggregate into memory. We prefer to
-// store the elements rather than the aggregate to be more friendly to
-// fast-isel.
-// FIXME: Do we need to recurse here?
-void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
- bool DestIsVolatile) {
- // Prefer scalar stores to first-class aggregate stores.
- if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- Address EltPtr = Builder.CreateStructGEP(Dest, i);
- llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
- Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
- }
- } else {
- Builder.CreateStore(Val, Dest, DestIsVolatile);
- }
-}
-
-/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
-/// where the source and destination may have different types. The
-/// destination is known to be aligned to \arg DstAlign bytes.
-///
-/// This safely handles the case when the src type is larger than the
-/// destination type; the upper bits of the src will be lost.
-static void CreateCoercedStore(llvm::Value *Src,
- Address Dst,
- bool DstIsVolatile,
- CodeGenFunction &CGF) {
- llvm::Type *SrcTy = Src->getType();
- llvm::Type *DstTy = Dst.getElementType();
- if (SrcTy == DstTy) {
- CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
- return;
- }
-
- llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
-
- if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
- Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
- SrcSize.getFixedValue(), CGF);
- DstTy = Dst.getElementType();
- }
-
- llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
- llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
- if (SrcPtrTy && DstPtrTy &&
- SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
- Src = CGF.Builder.CreateAddrSpaceCast(Src, DstTy);
- CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
+void CodeGenFunction::CreateCoercedStore(llvm::Value *Src, Address Dst,
+ llvm::TypeSize DstSize,
+ bool DstIsVolatile) {
+ if (!DstSize)
return;
- }
- // If the source and destination are integer or pointer types, just do an
- // extension or truncation to the desired type.
- if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
- (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
- Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
- CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
- return;
+ llvm::Type *SrcTy = Src->getType();
+ llvm::TypeSize SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+
+ // GEP into structs to try to make types match.
+ // FIXME: This isn't really that useful with opaque types, but it impacts a
+ // lot of regression tests.
+ if (SrcTy != Dst.getElementType()) {
+ if (llvm::StructType *DstSTy =
+ dyn_cast<llvm::StructType>(Dst.getElementType())) {
+ assert(!SrcSize.isScalable());
+ Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
+ SrcSize.getFixedValue(), *this);
+ }
}
- llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
-
- // If store is legal, just bitcast the src pointer.
- if (isa<llvm::ScalableVectorType>(SrcTy) ||
- isa<llvm::ScalableVectorType>(DstTy) ||
- SrcSize.getFixedValue() <= DstSize.getFixedValue()) {
- Dst = Dst.withElementType(SrcTy);
- CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
+ if (SrcSize.isScalable() || SrcSize <= DstSize) {
+ if (SrcTy->isIntegerTy() && Dst.getElementType()->isPointerTy() &&
+ SrcSize == CGM.getDataLayout().getTypeAllocSize(Dst.getElementType())) {
+ // If the value is supposed to be a pointer, convert it before storing it.
+ Src = CoerceIntOrPtrToIntOrPtr(Src, Dst.getElementType(), *this);
+ Builder.CreateStore(Src, Dst, DstIsVolatile);
+ } else if (llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(Src->getType())) {
+ // Prefer scalar stores to first-class aggregate stores.
+ Dst = Dst.withElementType(SrcTy);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Address EltPtr = Builder.CreateStructGEP(Dst, i);
+ llvm::Value *Elt = Builder.CreateExtractValue(Src, i);
+ Builder.CreateStore(Elt, EltPtr, DstIsVolatile);
+ }
+ } else {
+ Builder.CreateStore(Src, Dst.withElementType(SrcTy), DstIsVolatile);
+ }
+ } else if (SrcTy->isIntegerTy()) {
+ // If the source is a simple integer, coerce it directly.
+ llvm::Type *DstIntTy = Builder.getIntNTy(DstSize.getFixedValue() * 8);
+ Src = CoerceIntOrPtrToIntOrPtr(Src, DstIntTy, *this);
+ Builder.CreateStore(Src, Dst.withElementType(DstIntTy), DstIsVolatile);
} else {
// Otherwise do coercion through memory. This is stupid, but
// simple.
@@ -1412,12 +1389,13 @@ static void CreateCoercedStore(llvm::Value *Src,
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
- Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
- CGF.Builder.CreateStore(Src, Tmp);
- CGF.Builder.CreateMemCpy(
- Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
- Tmp.getAlignment().getAsAlign(),
- llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue()));
+ RawAddress Tmp =
+ CreateTempAllocaForCoercion(*this, SrcTy, Dst.getAlignment());
+ Builder.CreateStore(Src, Tmp);
+ Builder.CreateMemCpy(Dst.emitRawPointer(*this),
+ Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
+ Tmp.getAlignment().getAsAlign(),
+ Builder.CreateTypeSize(IntPtrTy, DstSize));
}
}
@@ -1581,6 +1559,11 @@ bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
}
+bool CodeGenModule::ReturnTypeHasInReg(const CGFunctionInfo &FI) {
+ const auto &RI = FI.getReturnInfo();
+ return RI.getInReg();
+}
+
bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
return ReturnTypeUsesSRet(FI) &&
getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
@@ -1774,14 +1757,14 @@ static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
FuncAttrs.addAttribute("aarch64_pstate_sm_compatible");
// ZA
- if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Out ||
- FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_InOut)
- FuncAttrs.addAttribute("aarch64_pstate_za_shared");
- if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Preserves ||
- FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_In) {
- FuncAttrs.addAttribute("aarch64_pstate_za_shared");
- FuncAttrs.addAttribute("aarch64_pstate_za_preserved");
- }
+ if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Preserves)
+ FuncAttrs.addAttribute("aarch64_preserves_za");
+ if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_In)
+ FuncAttrs.addAttribute("aarch64_in_za");
+ if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Out)
+ FuncAttrs.addAttribute("aarch64_out_za");
+ if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_InOut)
+ FuncAttrs.addAttribute("aarch64_inout_za");
// ZT0
if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_Preserves)
@@ -1794,14 +1777,14 @@ static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
FuncAttrs.addAttribute("aarch64_inout_zt0");
}
-static void AddAttributesFromAssumes(llvm::AttrBuilder &FuncAttrs,
- const Decl *Callee) {
+static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs,
+ const Decl *Callee) {
if (!Callee)
return;
SmallVector<StringRef, 4> Attrs;
- for (const AssumptionAttr *AA : Callee->specific_attrs<AssumptionAttr>())
+ for (const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
AA->getAssumption().split(Attrs, ",");
if (!Attrs.empty())
@@ -1908,6 +1891,7 @@ static void getTrivialDefaultFunctionAttributes(
case CodeGenOptions::FramePointerKind::None:
// This is the default behavior.
break;
+ case CodeGenOptions::FramePointerKind::Reserved:
case CodeGenOptions::FramePointerKind::NonLeaf:
case CodeGenOptions::FramePointerKind::All:
FuncAttrs.addAttribute("frame-pointer",
@@ -2020,6 +2004,9 @@ static void getTrivialDefaultFunctionAttributes(
std::tie(Var, Value) = Attr.split('=');
FuncAttrs.addAttribute(Var, Value);
}
+
+ TargetInfo::BranchProtectionInfo BPI(LangOpts);
+ TargetCodeGenInfo::initBranchProtectionFnAttributes(BPI, FuncAttrs);
}
/// Merges `target-features` from \TargetOpts and \F, and sets the result in
@@ -2342,7 +2329,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// Attach assumption attributes to the declaration. If this is a call
// site, attach assumptions from the caller to the call as well.
- AddAttributesFromAssumes(FuncAttrs, TargetDecl);
+ AddAttributesFromOMPAssumes(FuncAttrs, TargetDecl);
bool HasOptnone = false;
// The NoBuiltinAttr attached to the target FunctionDecl.
@@ -3016,17 +3003,17 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Indirect:
case ABIArgInfo::IndirectAliased: {
assert(NumIRArgs == 1);
- Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty),
- ArgI.getIndirectAlign(), KnownNonNull);
+ Address ParamAddr = makeNaturalAddressForPointer(
+ Fn->getArg(FirstIRArg), Ty, ArgI.getIndirectAlign(), false, nullptr,
+ nullptr, KnownNonNull);
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
// need to do is realign the value, if requested. Also, if the address
// may be aliased, copy it to ensure that the parameter variable is
// mutable and has a unique adress, as C requires.
- Address V = ParamAddr;
if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
- Address AlignedTemp = CreateMemTemp(Ty, "coerce");
+ RawAddress AlignedTemp = CreateMemTemp(Ty, "coerce");
// Copy from the incoming argument pointer to the temporary with the
// appropriate alignment.
@@ -3036,11 +3023,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
CharUnits Size = getContext().getTypeSizeInChars(Ty);
Builder.CreateMemCpy(
AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
- ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
+ ParamAddr.emitRawPointer(*this),
+ ParamAddr.getAlignment().getAsAlign(),
llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
- V = AlignedTemp;
+ ParamAddr = AlignedTemp;
}
- ArgVals.push_back(ParamValue::forIndirect(V));
+ ArgVals.push_back(ParamValue::forIndirect(ParamAddr));
} else {
// Load scalar value from indirect argument.
llvm::Value *V =
@@ -3084,7 +3072,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Align Alignment =
CGM.getNaturalTypeAlignment(ETy).getAsAlign();
AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
- uint64_t ArrSize = ArrTy->getSize().getZExtValue();
+ uint64_t ArrSize = ArrTy->getZExtSize();
if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
ArrSize) {
llvm::AttrBuilder Attrs(getLLVMContext());
@@ -3154,10 +3142,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
== ParameterABI::SwiftErrorResult) {
QualType pointeeTy = Ty->getPointeeType();
assert(pointeeTy->isPointerType());
- Address temp =
- CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
- Address arg(V, ConvertTypeForMem(pointeeTy),
- getContext().getTypeAlignInChars(pointeeTy));
+ RawAddress temp =
+ CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
+ Address arg = makeNaturalAddressForPointer(
+ V, pointeeTy, getContext().getTypeAlignInChars(pointeeTy));
llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
Builder.CreateStore(incomingErrorValue, temp);
V = temp.getPointer();
@@ -3195,13 +3183,14 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Value *Coerced = Fn->getArg(FirstIRArg);
if (auto *VecTyFrom =
dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
- // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
+ // If we are casting a scalable i1 predicate vector to a fixed i8
// vector, bitcast the source and use a vector extract.
- auto PredType =
- llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
- if (VecTyFrom == PredType &&
+ if (VecTyFrom->getElementType()->isIntegerTy(1) &&
+ VecTyFrom->getElementCount().isKnownMultipleOf(8) &&
VecTyTo->getElementType() == Builder.getInt8Ty()) {
- VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
+ VecTyFrom = llvm::ScalableVectorType::get(
+ VecTyTo->getElementType(),
+ VecTyFrom->getElementCount().getKnownMinValue() / 8);
Coerced = Builder.CreateBitCast(Coerced, VecTyFrom);
}
if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
@@ -3216,6 +3205,24 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
}
}
+ llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
+ if (ArgI.isDirect() && !ArgI.getCanBeFlattened() && STy &&
+ STy->getNumElements() > 1) {
+ [[maybe_unused]] llvm::TypeSize StructSize =
+ CGM.getDataLayout().getTypeAllocSize(STy);
+ [[maybe_unused]] llvm::TypeSize PtrElementSize =
+ CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(Ty));
+ if (STy->containsHomogeneousScalableVectorTypes()) {
+ assert(StructSize == PtrElementSize &&
+ "Only allow non-fractional movement of structure with"
+ "homogeneous scalable vector type");
+
+ ArgVals.push_back(ParamValue::forDirect(AI));
+ break;
+ }
+ }
+
Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
Arg->getName());
@@ -3224,7 +3231,6 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
- llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
STy->getNumElements() > 1) {
llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(STy);
@@ -3277,7 +3283,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(NumIRArgs == 1);
auto AI = Fn->getArg(FirstIRArg);
AI->setName(Arg->getName() + ".coerce");
- CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
+ CreateCoercedStore(
+ AI, Ptr,
+ llvm::TypeSize::getFixed(
+ getContext().getTypeSizeInChars(Ty).getQuantity() -
+ ArgI.getDirectOffset()),
+ /*DstIsVolatile=*/false);
}
// Match to what EmitParmDecl is expecting for this type.
@@ -3476,7 +3487,7 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
llvm::LoadInst *load =
dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
if (!load || load->isAtomic() || load->isVolatile() ||
- load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
+ load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getBasePointer())
return nullptr;
// Okay! Burn it all down. This relies for correctness on the
@@ -3513,12 +3524,15 @@ static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
/// Heuristically search for a dominating store to the return-value slot.
static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
+ llvm::Value *ReturnValuePtr = CGF.ReturnValue.getBasePointer();
+
// Check if a User is a store which pointerOperand is the ReturnValue.
// We are looking for stores to the ReturnValue, not for stores of the
// ReturnValue to some other location.
- auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
+ auto GetStoreIfValid = [&CGF,
+ ReturnValuePtr](llvm::User *U) -> llvm::StoreInst * {
auto *SI = dyn_cast<llvm::StoreInst>(U);
- if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() ||
+ if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType())
return nullptr;
// These aren't actually possible for non-coerced returns, and we
@@ -3532,7 +3546,7 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
// for something immediately preceding the IP. Sometimes this can
// happen with how we generate implicit-returns; it can also happen
// with noreturn cleanups.
- if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
+ if (!ReturnValuePtr->hasOneUse()) {
llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
if (IP->empty()) return nullptr;
@@ -3550,8 +3564,7 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
return nullptr;
}
- llvm::StoreInst *store =
- GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
+ llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
if (!store) return nullptr;
// Now do a first-and-dirty dominance check: just walk up the
@@ -3640,7 +3653,7 @@ static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
const FieldDecl *F = *I;
- if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
+ if (F->isUnnamedBitField() || F->isZeroLengthBitField(Context) ||
F->getType()->isIncompleteArrayType())
continue;
@@ -3829,7 +3842,8 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
LValue ArgVal =
LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo);
EmitStoreOfScalar(
- Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true);
+ EmitLoadOfScalar(MakeAddrLValue(ReturnValue, RetTy), EndLoc), ArgVal,
+ /*isInit*/ true);
break;
}
}
@@ -4095,29 +4109,32 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
}
static bool isProvablyNull(llvm::Value *addr) {
- return isa<llvm::ConstantPointerNull>(addr);
+ return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
+}
+
+static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF) {
+ return llvm::isKnownNonZero(Addr.getBasePointer(), CGF.CGM.getDataLayout());
}
/// Emit the actual writing-back of a writeback.
static void emitWriteback(CodeGenFunction &CGF,
const CallArgList::Writeback &writeback) {
const LValue &srcLV = writeback.Source;
- Address srcAddr = srcLV.getAddress(CGF);
- assert(!isProvablyNull(srcAddr.getPointer()) &&
+ Address srcAddr = srcLV.getAddress();
+ assert(!isProvablyNull(srcAddr.getBasePointer()) &&
"shouldn't have writeback for provably null argument");
llvm::BasicBlock *contBB = nullptr;
// If the argument wasn't provably non-null, we need to null check
// before doing the store.
- bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
- CGF.CGM.getDataLayout());
+ bool provablyNonNull = isProvablyNonNull(srcAddr, CGF);
+
if (!provablyNonNull) {
llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
contBB = CGF.createBasicBlock("icr.done");
- llvm::Value *isNull =
- CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
CGF.EmitBlock(writebackBB);
}
@@ -4210,7 +4227,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
}
- Address srcAddr = srcLV.getAddress(CGF);
+ Address srcAddr = srcLV.getAddress();
// The dest and src types don't necessarily match in LLVM terms
// because of the crazy ObjC compatibility rules.
@@ -4221,7 +4238,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
CGF.ConvertTypeForMem(CRE->getType()->getPointeeType());
// If the address is a constant null, just pass the appropriate null.
- if (isProvablyNull(srcAddr.getPointer())) {
+ if (isProvablyNull(srcAddr.getBasePointer())) {
args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
CRE->getType());
return;
@@ -4250,17 +4267,16 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// If the address is *not* known to be non-null, we need to switch.
llvm::Value *finalArgument;
- bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
- CGF.CGM.getDataLayout());
+ bool provablyNonNull = isProvablyNonNull(srcAddr, CGF);
+
if (provablyNonNull) {
- finalArgument = temp.getPointer();
+ finalArgument = temp.emitRawPointer(CGF);
} else {
- llvm::Value *isNull =
- CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
- finalArgument = CGF.Builder.CreateSelect(isNull,
- llvm::ConstantPointerNull::get(destType),
- temp.getPointer(), "icr.argument");
+ finalArgument = CGF.Builder.CreateSelect(
+ isNull, llvm::ConstantPointerNull::get(destType),
+ temp.emitRawPointer(CGF), "icr.argument");
// If we need to copy, then the load has to be conditional, which
// means we need control flow.
@@ -4352,7 +4368,8 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
bool CanCheckNullability = false;
- if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
+ if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
+ !PVD->getType()->isRecordType()) {
auto Nullability = PVD->getType()->getNullability();
CanCheckNullability = Nullability &&
*Nullability == NullabilityKind::NonNull &&
@@ -4384,6 +4401,16 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
}
+void CodeGenFunction::EmitNonNullArgCheck(Address Addr, QualType ArgType,
+ SourceLocation ArgLoc,
+ AbstractCallee AC, unsigned ParmNum) {
+ if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
+ SanOpts.has(SanitizerKind::NullabilityArg)))
+ return;
+
+ EmitNonNullArgCheck(RValue::get(Addr, *this), ArgType, ArgLoc, AC, ParmNum);
+}
+
// Check if the call is going to use the inalloca convention. This needs to
// agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
// later, so we can't check it directly.
@@ -4606,7 +4633,7 @@ RValue CallArg::getRValue(CodeGenFunction &CGF) const {
CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
LV.isVolatile());
IsUsed = true;
- return RValue::getAggregate(Copy.getAddress(CGF));
+ return RValue::getAggregate(Copy.getAddress());
}
void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
@@ -4616,7 +4643,7 @@ void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
else if (!HasLV && RV.isComplex())
CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
else {
- auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
+ auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
// We assume that call args are never copied into subobjects.
CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
@@ -4655,11 +4682,11 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
AggValueSlot Slot = args.isUsingInAlloca()
? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp");
- bool DestroyedInCallee = true, NeedsEHCleanup = true;
+ bool DestroyedInCallee = true, NeedsCleanup = true;
if (const auto *RD = type->getAsCXXRecordDecl())
DestroyedInCallee = RD->hasNonTrivialDestructor();
else
- NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
+ NeedsCleanup = type.isDestructedType();
if (DestroyedInCallee)
Slot.setExternallyDestructed();
@@ -4668,21 +4695,23 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
RValue RV = Slot.asRValue();
args.add(RV, type);
- if (DestroyedInCallee && NeedsEHCleanup) {
+ if (DestroyedInCallee && NeedsCleanup) {
// Create a no-op GEP between the placeholder and the cleanup so we can
// RAUW it successfully. It also serves as a marker of the first
// instruction where the cleanup is active.
- pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
- type);
+ pushFullExprCleanup<DestroyUnpassedArg>(NormalAndEHCleanup,
+ Slot.getAddress(), type);
// This unreachable is a temporary marker which will be removed later.
- llvm::Instruction *IsActive = Builder.CreateUnreachable();
+ llvm::Instruction *IsActive =
+ Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
args.addArgCleanupDeactivation(EHStack.stable_begin(), IsActive);
}
return;
}
if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
- cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
+ cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue &&
+ !type->isArrayParameterType()) {
LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
assert(L.isSimple());
args.addUncopiedAggregate(L, type);
@@ -4724,12 +4753,22 @@ CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
llvm::CallInst *
CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
const llvm::Twine &name) {
- return EmitNounwindRuntimeCall(callee, std::nullopt, name);
+ return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value *>(), name);
}
/// Emits a call to the given nounwind runtime function.
llvm::CallInst *
CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
+ ArrayRef<Address> args,
+ const llvm::Twine &name) {
+ SmallVector<llvm::Value *, 3> values;
+ for (auto arg : args)
+ values.push_back(arg.emitRawPointer(*this));
+ return EmitNounwindRuntimeCall(callee, values, name);
+}
+
+llvm::CallInst *
+CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
ArrayRef<llvm::Value *> args,
const llvm::Twine &name) {
llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
@@ -4775,6 +4814,9 @@ llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
llvm::CallInst *call = Builder.CreateCall(
callee, args, getBundlesForFunclet(callee.getCallee()), name);
call->setCallingConv(getRuntimeCC());
+
+ if (CGM.shouldEmitConvergenceTokens() && call->isConvergent())
+ return addControlledConvergenceToken(call);
return call;
}
@@ -4971,7 +5013,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
ReturnValueSlot ReturnValue,
const CallArgList &CallArgs,
llvm::CallBase **callOrInvoke, bool IsMustTail,
- SourceLocation Loc) {
+ SourceLocation Loc,
+ bool IsVirtualFunctionPointerThunk) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
assert(Callee.isOrdinary() || Callee.isVirtual());
@@ -4995,18 +5038,19 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
(TargetDecl->hasAttr<TargetAttr>() ||
(CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>())))
checkTargetFeatures(Loc, FD);
-
- // Some architectures (such as x86-64) have the ABI changed based on
- // attribute-target/features. Give them a chance to diagnose.
- CGM.getTargetCodeGenInfo().checkFunctionCallABI(
- CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
}
+ // Some architectures (such as x86-64) have the ABI changed based on
+ // attribute-target/features. Give them a chance to diagnose.
+ CGM.getTargetCodeGenInfo().checkFunctionCallABI(
+ CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl),
+ dyn_cast_or_null<FunctionDecl>(TargetDecl), CallArgs, RetTy);
+
// 1. Set up the arguments.
// If we're using inalloca, insert the allocation after the stack save.
// FIXME: Do this earlier rather than hacking it in here!
- Address ArgMemory = Address::invalid();
+ RawAddress ArgMemory = RawAddress::invalid();
if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
const llvm::DataLayout &DL = CGM.getDataLayout();
llvm::Instruction *IP = CallArgs.getStackBase();
@@ -5022,7 +5066,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
AI->setAlignment(Align.getAsAlign());
AI->setUsedWithInAlloca(true);
assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
- ArgMemory = Address(AI, ArgStruct, Align);
+ ArgMemory = RawAddress(AI, ArgStruct, Align);
}
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
@@ -5031,11 +5075,15 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
Address SRetPtr = Address::invalid();
- Address SRetAlloca = Address::invalid();
+ RawAddress SRetAlloca = RawAddress::invalid();
llvm::Value *UnusedReturnSizePtr = nullptr;
if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
- if (!ReturnValue.isNull()) {
- SRetPtr = ReturnValue.getValue();
+ if (IsVirtualFunctionPointerThunk && RetAI.isIndirect()) {
+ SRetPtr = makeNaturalAddressForPointer(CurFn->arg_begin() +
+ IRFunctionArgs.getSRetArgNo(),
+ RetTy, CharUnits::fromQuantity(1));
+ } else if (!ReturnValue.isNull()) {
+ SRetPtr = ReturnValue.getAddress();
} else {
SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
if (HaveInsertPoint() && ReturnValue.isUnused()) {
@@ -5045,15 +5093,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
if (IRFunctionArgs.hasSRetArg()) {
- IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
+ IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
+ getAsNaturalPointerTo(SRetPtr, RetTy);
} else if (RetAI.isInAlloca()) {
Address Addr =
Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
- Builder.CreateStore(SRetPtr.getPointer(), Addr);
+ Builder.CreateStore(getAsNaturalPointerTo(SRetPtr, RetTy), Addr);
}
}
- Address swiftErrorTemp = Address::invalid();
+ RawAddress swiftErrorTemp = RawAddress::invalid();
Address swiftErrorArg = Address::invalid();
// When passing arguments using temporary allocas, we need to add the
@@ -5086,9 +5135,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(NumIRArgs == 0);
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
if (I->isAggregate()) {
- Address Addr = I->hasLValue()
- ? I->getKnownLValue().getAddress(*this)
- : I->getKnownRValue().getAggregateAddress();
+ RawAddress Addr = I->hasLValue()
+ ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress();
llvm::Instruction *Placeholder =
cast<llvm::Instruction>(Addr.getPointer());
@@ -5112,7 +5161,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else if (ArgInfo.getInAllocaIndirect()) {
// Make a temporary alloca and store the address of it into the argument
// struct.
- Address Addr = CreateMemTempWithoutCast(
+ RawAddress Addr = CreateMemTempWithoutCast(
I->Ty, getContext().getTypeAlignInChars(I->Ty),
"indirect-arg-temp");
I->copyInto(*this, Addr);
@@ -5132,18 +5181,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Indirect:
case ABIArgInfo::IndirectAliased: {
assert(NumIRArgs == 1);
- if (!I->isAggregate()) {
- // Make a temporary alloca to pass the argument.
- Address Addr = CreateMemTempWithoutCast(
- I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
-
- llvm::Value *Val = Addr.getPointer();
- if (ArgHasMaybeUndefAttr)
- Val = Builder.CreateFreeze(Addr.getPointer());
- IRCallArgs[FirstIRArg] = Val;
-
- I->copyInto(*this, Addr);
- } else {
+ if (I->isAggregate()) {
// We want to avoid creating an unnecessary temporary+copy here;
// however, we need one in three cases:
// 1. If the argument is not byval, and we are required to copy the
@@ -5153,9 +5191,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// 3. If the argument is byval, but RV is not located in default
// or alloca address space.
Address Addr = I->hasLValue()
- ? I->getKnownLValue().getAddress(*this)
+ ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
- llvm::Value *V = Addr.getPointer();
CharUnits Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
@@ -5166,8 +5203,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
bool NeedCopy = false;
if (Addr.getAlignment() < Align &&
- llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
- Align.getAsAlign()) {
+ llvm::getOrEnforceKnownAlignment(Addr.emitRawPointer(*this),
+ Align.getAsAlign(),
+ *TD) < Align.getAsAlign()) {
NeedCopy = true;
} else if (I->hasLValue()) {
auto LV = I->getKnownLValue();
@@ -5196,29 +5234,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
- if (NeedCopy) {
- // Create an aligned temporary, and copy to it.
- Address AI = CreateMemTempWithoutCast(
- I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
- llvm::Value *Val = AI.getPointer();
- if (ArgHasMaybeUndefAttr)
- Val = Builder.CreateFreeze(AI.getPointer());
- IRCallArgs[FirstIRArg] = Val;
-
- // Emit lifetime markers for the temporary alloca.
- llvm::TypeSize ByvalTempElementSize =
- CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
- llvm::Value *LifetimeSize =
- EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
-
- // Add cleanup code to emit the end lifetime marker after the call.
- if (LifetimeSize) // In case we disabled lifetime markers.
- CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
-
- // Generate the copy.
- I->copyInto(*this, AI);
- } else {
+ if (!NeedCopy) {
// Skip the extra memcpy call.
+ llvm::Value *V = getAsNaturalPointerTo(Addr, I->Ty);
auto *T = llvm::PointerType::get(
CGM.getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace());
@@ -5228,8 +5246,31 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (ArgHasMaybeUndefAttr)
Val = Builder.CreateFreeze(Val);
IRCallArgs[FirstIRArg] = Val;
+ break;
}
}
+
+ // For non-aggregate args and aggregate args meeting conditions above
+ // we need to create an aligned temporary, and copy to it.
+ RawAddress AI = CreateMemTempWithoutCast(
+ I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
+ llvm::Value *Val = getAsNaturalPointerTo(AI, I->Ty);
+ if (ArgHasMaybeUndefAttr)
+ Val = Builder.CreateFreeze(Val);
+ IRCallArgs[FirstIRArg] = Val;
+
+ // Emit lifetime markers for the temporary alloca.
+ llvm::TypeSize ByvalTempElementSize =
+ CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
+ llvm::Value *LifetimeSize =
+ EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
+
+ // Add cleanup code to emit the end lifetime marker after the call.
+ if (LifetimeSize) // In case we disabled lifetime markers.
+ CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
+
+ // Generate the copy.
+ I->copyInto(*this, AI);
break;
}
@@ -5248,7 +5289,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
V = I->getKnownRValue().getScalarVal();
else
V = Builder.CreateLoad(
- I->hasLValue() ? I->getKnownLValue().getAddress(*this)
+ I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress());
// Implement swifterror by copying into a new swifterror argument.
@@ -5258,8 +5299,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
QualType pointeeTy = I->Ty->getPointeeType();
- swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy),
- getContext().getTypeAlignInChars(pointeeTy));
+ swiftErrorArg = makeNaturalAddressForPointer(
+ V, pointeeTy, getContext().getTypeAlignInChars(pointeeTy));
swiftErrorTemp =
CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
@@ -5287,13 +5328,31 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
break;
}
+ llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
+ if (STy && ArgInfo.isDirect() && !ArgInfo.getCanBeFlattened()) {
+ llvm::Type *SrcTy = ConvertTypeForMem(I->Ty);
+ [[maybe_unused]] llvm::TypeSize SrcTypeSize =
+ CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ [[maybe_unused]] llvm::TypeSize DstTypeSize =
+ CGM.getDataLayout().getTypeAllocSize(STy);
+ if (STy->containsHomogeneousScalableVectorTypes()) {
+ assert(SrcTypeSize == DstTypeSize &&
+ "Only allow non-fractional movement of structure with "
+ "homogeneous scalable vector type");
+
+ IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
+ break;
+ }
+ }
+
// FIXME: Avoid the conversion through memory if possible.
Address Src = Address::invalid();
if (!I->isAggregate()) {
Src = CreateMemTemp(I->Ty, "coerce");
I->copyInto(*this, Src);
} else {
- Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
+ Src = I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
}
@@ -5302,8 +5361,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
- llvm::StructType *STy =
- dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
llvm::Type *SrcTy = Src.getElementType();
llvm::TypeSize SrcTypeSize =
@@ -5380,9 +5437,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *tempSize = nullptr;
Address addr = Address::invalid();
- Address AllocaAddr = Address::invalid();
+ RawAddress AllocaAddr = RawAddress::invalid();
if (I->isAggregate()) {
- addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
+ addr = I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
} else {
@@ -5524,6 +5581,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
/*AttrOnCallSite=*/true,
/*IsThunk=*/false);
+ if (CallingConv == llvm::CallingConv::X86_VectorCall &&
+ getTarget().getTriple().isWindowsArm64EC()) {
+ CGM.Error(Loc, "__vectorcall calling convention is not currently "
+ "supported");
+ }
+
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
if (FD->hasAttr<StrictFPAttr>())
// All calls within a strictfp function are marked strictfp
@@ -5603,6 +5666,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
!isa_and_nonnull<FunctionDecl>(TargetDecl))
EmitKCFIOperandBundle(ConcreteCallee, BundleList);
+ // Add the pointer-authentication bundle.
+ EmitPointerAuthOperandBundle(ConcreteCallee.getPointerAuthInfo(), BundleList);
+
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
if (FD->hasAttr<StrictFPAttr>())
// All calls within a strictfp function are marked strictfp
@@ -5650,6 +5716,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!CI->getType()->isVoidTy())
CI->setName("call");
+ if (CGM.shouldEmitConvergenceTokens() && CI->isConvergent())
+ CI = addControlledConvergenceToken(CI);
+
// Update largest vector width from the return type.
LargestVectorWidth =
std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType()));
@@ -5670,8 +5739,35 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
- else if (IsMustTail)
+ else if (IsMustTail) {
+ if (getTarget().getTriple().isPPC()) {
+ if (getTarget().getTriple().isOSAIX())
+ CGM.getDiags().Report(Loc, diag::err_aix_musttail_unsupported);
+ else if (!getTarget().hasFeature("pcrelative-memops")) {
+ if (getTarget().hasFeature("longcall"))
+ CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 0;
+ else if (Call->isIndirectCall())
+ CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail) << 1;
+ else if (isa_and_nonnull<FunctionDecl>(TargetDecl)) {
+ if (!cast<FunctionDecl>(TargetDecl)->isDefined())
+ // The undefined callee may be a forward declaration. Without
+ // knowning all symbols in the module, we won't know the symbol is
+ // defined or not. Collect all these symbols for later diagnosing.
+ CGM.addUndefinedGlobalForTailCall(
+ {cast<FunctionDecl>(TargetDecl), Loc});
+ else {
+ llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(
+ GlobalDecl(cast<FunctionDecl>(TargetDecl)));
+ if (llvm::GlobalValue::isWeakForLinker(Linkage) ||
+ llvm::GlobalValue::isDiscardableIfUnused(Linkage))
+ CGM.getDiags().Report(Loc, diag::err_ppc_impossible_musttail)
+ << 2;
+ }
+ }
+ }
+ }
Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
+ }
}
// Add metadata for calls to MSAllocator functions
@@ -5682,7 +5778,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Add metadata if calling an __attribute__((error(""))) or warning fn.
if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) {
llvm::ConstantInt *Line =
- llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding());
+ llvm::ConstantInt::get(Int64Ty, Loc.getRawEncoding());
llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line);
llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD});
CI->setMetadata("srcloc", MDT);
@@ -5765,118 +5861,127 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CallArgs.freeArgumentMemory(*this);
// Extract the return value.
- RValue Ret = [&] {
- switch (RetAI.getKind()) {
- case ABIArgInfo::CoerceAndExpand: {
- auto coercionType = RetAI.getCoerceAndExpandType();
+ RValue Ret;
- Address addr = SRetPtr.withElementType(coercionType);
-
- assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
- bool requiresExtract = isa<llvm::StructType>(CI->getType());
-
- unsigned unpaddedIndex = 0;
- for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
- llvm::Type *eltType = coercionType->getElementType(i);
- if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
- Address eltAddr = Builder.CreateStructGEP(addr, i);
- llvm::Value *elt = CI;
- if (requiresExtract)
- elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
- else
- assert(unpaddedIndex == 0);
- Builder.CreateStore(elt, eltAddr);
+ // If the current function is a virtual function pointer thunk, avoid copying
+ // the return value of the musttail call to a temporary.
+ if (IsVirtualFunctionPointerThunk) {
+ Ret = RValue::get(CI);
+ } else {
+ Ret = [&] {
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::CoerceAndExpand: {
+ auto coercionType = RetAI.getCoerceAndExpandType();
+
+ Address addr = SRetPtr.withElementType(coercionType);
+
+ assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
+ bool requiresExtract = isa<llvm::StructType>(CI->getType());
+
+ unsigned unpaddedIndex = 0;
+ for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
+ llvm::Type *eltType = coercionType->getElementType(i);
+ if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
+ continue;
+ Address eltAddr = Builder.CreateStructGEP(addr, i);
+ llvm::Value *elt = CI;
+ if (requiresExtract)
+ elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
+ else
+ assert(unpaddedIndex == 0);
+ Builder.CreateStore(elt, eltAddr);
+ }
+ [[fallthrough]];
}
- [[fallthrough]];
- }
-
- case ABIArgInfo::InAlloca:
- case ABIArgInfo::Indirect: {
- RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
- if (UnusedReturnSizePtr)
- PopCleanupBlock();
- return ret;
- }
-
- case ABIArgInfo::Ignore:
- // If we are ignoring an argument that had a result, make sure to
- // construct the appropriate return value for our caller.
- return GetUndefRValue(RetTy);
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct: {
- llvm::Type *RetIRTy = ConvertType(RetTy);
- if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
- switch (getEvaluationKind(RetTy)) {
- case TEK_Complex: {
- llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
- llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
- return RValue::getComplex(std::make_pair(Real, Imag));
- }
- case TEK_Aggregate: {
- Address DestPtr = ReturnValue.getValue();
- bool DestIsVolatile = ReturnValue.isVolatile();
+ case ABIArgInfo::InAlloca:
+ case ABIArgInfo::Indirect: {
+ RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
+ if (UnusedReturnSizePtr)
+ PopCleanupBlock();
+ return ret;
+ }
- if (!DestPtr.isValid()) {
- DestPtr = CreateMemTemp(RetTy, "agg.tmp");
- DestIsVolatile = false;
+ case ABIArgInfo::Ignore:
+ // If we are ignoring an argument that had a result, make sure to
+ // construct the appropriate return value for our caller.
+ return GetUndefRValue(RetTy);
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ llvm::Type *RetIRTy = ConvertType(RetTy);
+ if (RetAI.getCoerceToType() == RetIRTy &&
+ RetAI.getDirectOffset() == 0) {
+ switch (getEvaluationKind(RetTy)) {
+ case TEK_Complex: {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ case TEK_Aggregate:
+ break;
+ case TEK_Scalar: {
+ // If the argument doesn't match, perform a bitcast to coerce it.
+ // This can happen due to trivial type mismatches.
+ llvm::Value *V = CI;
+ if (V->getType() != RetIRTy)
+ V = Builder.CreateBitCast(V, RetIRTy);
+ return RValue::get(V);
+ }
}
- EmitAggregateStore(CI, DestPtr, DestIsVolatile);
- return RValue::getAggregate(DestPtr);
}
- case TEK_Scalar: {
- // If the argument doesn't match, perform a bitcast to coerce it. This
- // can happen due to trivial type mismatches.
+
+ // If coercing a fixed vector from a scalable vector for ABI
+ // compatibility, and the types match, use the llvm.vector.extract
+ // intrinsic to perform the conversion.
+ if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
llvm::Value *V = CI;
- if (V->getType() != RetIRTy)
- V = Builder.CreateBitCast(V, RetIRTy);
- return RValue::get(V);
- }
+ if (auto *ScalableSrcTy =
+ dyn_cast<llvm::ScalableVectorType>(V->getType())) {
+ if (FixedDstTy->getElementType() ==
+ ScalableSrcTy->getElementType()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
+ V = Builder.CreateExtractVector(FixedDstTy, V, Zero,
+ "cast.fixed");
+ return RValue::get(V);
+ }
+ }
}
- llvm_unreachable("bad evaluation kind");
- }
- // If coercing a fixed vector from a scalable vector for ABI
- // compatibility, and the types match, use the llvm.vector.extract
- // intrinsic to perform the conversion.
- if (auto *FixedDst = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
- llvm::Value *V = CI;
- if (auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(V->getType())) {
- if (FixedDst->getElementType() == ScalableSrc->getElementType()) {
- llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
- V = Builder.CreateExtractVector(FixedDst, V, Zero, "cast.fixed");
- return RValue::get(V);
- }
+ Address DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+ uint64_t DestSize =
+ getContext().getTypeInfoDataSizeInChars(RetTy).Width.getQuantity();
+
+ if (!DestPtr.isValid()) {
+ DestPtr = CreateMemTemp(RetTy, "coerce");
+ DestIsVolatile = false;
+ DestSize = getContext().getTypeSizeInChars(RetTy).getQuantity();
}
- }
- Address DestPtr = ReturnValue.getValue();
- bool DestIsVolatile = ReturnValue.isVolatile();
+ // An empty record can overlap other data (if declared with
+ // no_unique_address); omit the store for such types - as there is no
+ // actual data to store.
+ if (!isEmptyRecord(getContext(), RetTy, true)) {
+ // If the value is offset in memory, apply the offset now.
+ Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
+ CreateCoercedStore(
+ CI, StorePtr,
+ llvm::TypeSize::getFixed(DestSize - RetAI.getDirectOffset()),
+ DestIsVolatile);
+ }
- if (!DestPtr.isValid()) {
- DestPtr = CreateMemTemp(RetTy, "coerce");
- DestIsVolatile = false;
+ return convertTempToRValue(DestPtr, RetTy, SourceLocation());
}
- // An empty record can overlap other data (if declared with
- // no_unique_address); omit the store for such types - as there is no
- // actual data to store.
- if (!isEmptyRecord(getContext(), RetTy, true)) {
- // If the value is offset in memory, apply the offset now.
- Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
- CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
+ case ABIArgInfo::Expand:
+ case ABIArgInfo::IndirectAliased:
+ llvm_unreachable("Invalid ABI kind for return argument");
}
- return convertTempToRValue(DestPtr, RetTy, SourceLocation());
- }
-
- case ABIArgInfo::Expand:
- case ABIArgInfo::IndirectAliased:
- llvm_unreachable("Invalid ABI kind for return argument");
- }
-
- llvm_unreachable("Unhandled ABIArgInfo::Kind");
- } ();
+ llvm_unreachable("Unhandled ABIArgInfo::Kind");
+ }();
+ }
// Emit the assume_aligned check on the return value.
if (Ret.isScalar() && TargetDecl) {
@@ -5910,12 +6015,12 @@ CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
/* VarArg handling */
-Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
- VAListAddr = VE->isMicrosoftABI()
- ? EmitMSVAListRef(VE->getSubExpr())
- : EmitVAListRef(VE->getSubExpr());
+RValue CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr,
+ AggValueSlot Slot) {
+ VAListAddr = VE->isMicrosoftABI() ? EmitMSVAListRef(VE->getSubExpr())
+ : EmitVAListRef(VE->getSubExpr());
QualType Ty = VE->getType();
if (VE->isMicrosoftABI())
- return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
- return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
+ return CGM.getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty, Slot);
+ return CGM.getABIInfo().EmitVAArg(*this, VAListAddr, Ty, Slot);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
index 1c0d15dc932a..412b44a8c753 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_CGCALL_H
#define LLVM_CLANG_LIB_CODEGEN_CGCALL_H
+#include "CGPointerAuthInfo.h"
#include "CGValue.h"
#include "EHScopeStack.h"
#include "clang/AST/ASTFwd.h"
@@ -69,6 +70,10 @@ class CGCallee {
Last = Virtual
};
+ struct OrdinaryInfoStorage {
+ CGCalleeInfo AbstractInfo;
+ CGPointerAuthInfo PointerAuthInfo;
+ };
struct BuiltinInfoStorage {
const FunctionDecl *Decl;
unsigned ID;
@@ -85,7 +90,7 @@ class CGCallee {
SpecialKind KindOrFunctionPointer;
union {
- CGCalleeInfo AbstractInfo;
+ OrdinaryInfoStorage OrdinaryInfo;
BuiltinInfoStorage BuiltinInfo;
PseudoDestructorInfoStorage PseudoDestructorInfo;
VirtualInfoStorage VirtualInfo;
@@ -104,10 +109,13 @@ public:
/// Construct a callee. Call this constructor directly when this
/// isn't a direct call.
- CGCallee(const CGCalleeInfo &abstractInfo, llvm::Value *functionPtr)
+ CGCallee(const CGCalleeInfo &abstractInfo, llvm::Value *functionPtr,
+ /* FIXME: make parameter pointerAuthInfo mandatory */
+ const CGPointerAuthInfo &pointerAuthInfo = CGPointerAuthInfo())
: KindOrFunctionPointer(
SpecialKind(reinterpret_cast<uintptr_t>(functionPtr))) {
- AbstractInfo = abstractInfo;
+ OrdinaryInfo.AbstractInfo = abstractInfo;
+ OrdinaryInfo.PointerAuthInfo = pointerAuthInfo;
assert(functionPtr && "configuring callee without function pointer");
assert(functionPtr->getType()->isPointerTy());
}
@@ -173,7 +181,11 @@ public:
if (isVirtual())
return VirtualInfo.MD;
assert(isOrdinary());
- return AbstractInfo;
+ return OrdinaryInfo.AbstractInfo;
+ }
+ const CGPointerAuthInfo &getPointerAuthInfo() const {
+ assert(isOrdinary());
+ return OrdinaryInfo.PointerAuthInfo;
}
llvm::Value *getFunctionPointer() const {
assert(isOrdinary());
@@ -184,6 +196,10 @@ public:
KindOrFunctionPointer =
SpecialKind(reinterpret_cast<uintptr_t>(functionPtr));
}
+ void setPointerAuthInfo(CGPointerAuthInfo PointerAuth) {
+ assert(isOrdinary());
+ OrdinaryInfo.PointerAuthInfo = PointerAuth;
+ }
bool isVirtual() const {
return KindOrFunctionPointer == SpecialKind::Virtual;
@@ -357,8 +373,11 @@ class ReturnValueSlot {
Address Addr = Address::invalid();
// Return value slot flags
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsVolatile : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsUnused : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsExternallyDestructed : 1;
public:
@@ -374,6 +393,7 @@ public:
Address getValue() const { return Addr; }
bool isUnused() const { return IsUnused; }
bool isExternallyDestructed() const { return IsExternallyDestructed; }
+ Address getAddress() const { return Addr; }
};
/// Adds attributes to \p F according to our \p CodeGenOpts and \p LangOpts, as
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
index 34319381901a..e5ba50de3462 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "ABIInfoImpl.h"
#include "CGBlocks.h"
#include "CGCXXABI.h"
#include "CGDebugInfo.h"
@@ -139,8 +140,9 @@ Address CodeGenFunction::LoadCXXThisAddress() {
CXXThisAlignment = CGM.getClassPointerAlignment(MD->getParent());
}
- llvm::Type *Ty = ConvertType(MD->getFunctionObjectParameterType());
- return Address(LoadCXXThis(), Ty, CXXThisAlignment, KnownNonNull);
+ return makeNaturalAddressForPointer(
+ LoadCXXThis(), MD->getFunctionObjectParameterType(), CXXThisAlignment,
+ false, nullptr, nullptr, KnownNonNull);
}
/// Emit the address of a field using a member data pointer.
@@ -207,7 +209,7 @@ CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
return nullptr;
llvm::Type *PtrDiffTy =
- Types.ConvertType(getContext().getPointerDiffType());
+ getTypes().ConvertType(getContext().getPointerDiffType());
return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
}
@@ -270,7 +272,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
}
// Apply the base offset.
- llvm::Value *ptr = addr.getPointer();
+ llvm::Value *ptr = addr.emitRawPointer(CGF);
ptr = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ptr, baseOffset, "add.ptr");
// If we have a virtual component, the alignment of the result will
@@ -338,8 +340,8 @@ Address CodeGenFunction::GetAddressOfBaseClass(
if (sanitizePerformTypeCheck()) {
SanitizerSet SkippedChecks;
SkippedChecks.set(SanitizerKind::Null, !NullCheckValue);
- EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(),
- DerivedTy, DerivedAlign, SkippedChecks);
+ EmitTypeCheck(TCK_Upcast, Loc, Value.emitRawPointer(*this), DerivedTy,
+ DerivedAlign, SkippedChecks);
}
return Value.withElementType(BaseValueTy);
}
@@ -354,7 +356,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
endBB = createBasicBlock("cast.end");
- llvm::Value *isNull = Builder.CreateIsNull(Value.getPointer());
+ llvm::Value *isNull = Builder.CreateIsNull(Value);
Builder.CreateCondBr(isNull, endBB, notNullBB);
EmitBlock(notNullBB);
}
@@ -363,14 +365,15 @@ Address CodeGenFunction::GetAddressOfBaseClass(
SanitizerSet SkippedChecks;
SkippedChecks.set(SanitizerKind::Null, true);
EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc,
- Value.getPointer(), DerivedTy, DerivedAlign, SkippedChecks);
+ Value.emitRawPointer(*this), DerivedTy, DerivedAlign,
+ SkippedChecks);
}
// Compute the virtual offset.
llvm::Value *VirtualOffset = nullptr;
if (VBase) {
VirtualOffset =
- CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
+ CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
}
// Apply both offsets.
@@ -387,7 +390,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
EmitBlock(endBB);
llvm::PHINode *PHI = Builder.CreatePHI(PtrTy, 2, "cast.result");
- PHI->addIncoming(Value.getPointer(), notNullBB);
+ PHI->addIncoming(Value.emitRawPointer(*this), notNullBB);
PHI->addIncoming(llvm::Constant::getNullValue(PtrTy), origBB);
Value = Value.withPointer(PHI, NotKnownNonNull);
}
@@ -424,15 +427,19 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
CastNotNull = createBasicBlock("cast.notnull");
CastEnd = createBasicBlock("cast.end");
- llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr.getPointer());
+ llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr);
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
// Apply the offset.
- llvm::Value *Value = BaseAddr.getPointer();
- Value = Builder.CreateInBoundsGEP(
- Int8Ty, Value, Builder.CreateNeg(NonVirtualOffset), "sub.ptr");
+ Address Addr = BaseAddr.withElementType(Int8Ty);
+ Addr = Builder.CreateInBoundsGEP(
+ Addr, Builder.CreateNeg(NonVirtualOffset), Int8Ty,
+ CGM.getClassPointerAlignment(Derived), "sub.ptr");
+
+ // Just cast.
+ Addr = Addr.withElementType(DerivedValueTy);
// Produce a PHI if we had a null-check.
if (NullCheckValue) {
@@ -441,13 +448,15 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
Builder.CreateBr(CastEnd);
EmitBlock(CastEnd);
+ llvm::Value *Value = Addr.emitRawPointer(*this);
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
PHI->addIncoming(Value, CastNotNull);
PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
- Value = PHI;
+ return Address(PHI, Addr.getElementType(),
+ CGM.getClassPointerAlignment(Derived));
}
- return Address(Value, DerivedValueTy, CGM.getClassPointerAlignment(Derived));
+ return Addr;
}
llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
@@ -672,7 +681,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// the constructor.
QualType::DestructionKind dtorKind = FieldType.isDestructedType();
if (CGF.needsEHCleanup(dtorKind))
- CGF.pushEHDestroy(dtorKind, LHS.getAddress(CGF), FieldType);
+ CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
return;
}
}
@@ -697,9 +706,9 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
break;
case TEK_Aggregate: {
AggValueSlot Slot = AggValueSlot::forLValue(
- LHS, *this, AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
- getOverlapForFieldInit(Field), AggValueSlot::IsNotZeroed,
+ LHS, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased, getOverlapForFieldInit(Field),
+ AggValueSlot::IsNotZeroed,
// Checks are made by the code that calls constructor.
AggValueSlot::IsSanitizerChecked);
EmitAggExpr(Init, Slot);
@@ -711,7 +720,7 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
// later in the constructor.
QualType::DestructionKind dtorKind = FieldType.isDestructedType();
if (needsEHCleanup(dtorKind))
- pushEHDestroy(dtorKind, LHS.getAddress(*this), FieldType);
+ pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
}
/// Checks whether the given constructor is a valid subject for the
@@ -851,7 +860,7 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// Enter the function-try-block before the constructor prologue if
// applicable.
- bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
+ bool IsTryBody = isa_and_nonnull<CXXTryStmt>(Body);
if (IsTryBody)
EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
@@ -925,7 +934,7 @@ namespace {
}
void addMemcpyableField(FieldDecl *F) {
- if (F->isZeroSize(CGF.getContext()))
+ if (isEmptyFieldForLayout(CGF.getContext(), F))
return;
if (!FirstField)
addInitialField(F);
@@ -975,8 +984,8 @@ namespace {
LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
emitMemcpyIR(
- Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(CGF),
- Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(CGF),
+ Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(),
+ Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(),
MemcpySize);
reset();
}
@@ -1123,7 +1132,7 @@ namespace {
continue;
LValue FieldLHS = LHS;
EmitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS);
- CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(CGF), FieldType);
+ CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType);
}
}
@@ -1396,7 +1405,7 @@ FieldHasTrivialDestructorBody(ASTContext &Context,
// The destructor for an implicit anonymous union member is never invoked.
if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion())
- return false;
+ return true;
return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
}
@@ -1467,7 +1476,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// If the body is a function-try-block, enter the try before
// anything else.
- bool isTryBody = (Body && isa<CXXTryStmt>(Body));
+ bool isTryBody = isa_and_nonnull<CXXTryStmt>(Body);
if (isTryBody)
EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
EmitAsanPrologueOrEpilogue(false);
@@ -1639,7 +1648,7 @@ namespace {
LValue LV = CGF.EmitLValueForField(ThisLV, field);
assert(LV.isSimple());
- CGF.emitDestroy(LV.getAddress(CGF), field->getType(), destroyer,
+ CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
@@ -1719,7 +1728,7 @@ namespace {
// Use the base class declaration location as inline DebugLocation. All
// fields of the class are destroyed.
DeclAsInlineDebugLocation InlineHere(CGF, *BaseClass);
- EmitSanitizerDtorFieldsCallback(CGF, Addr.getPointer(),
+ EmitSanitizerDtorFieldsCallback(CGF, Addr.emitRawPointer(CGF),
BaseSize.getQuantity());
// Prevent the current stack frame from disappearing from the stack trace.
@@ -1807,7 +1816,7 @@ namespace {
const CXXDestructorDecl *DD)
: Context(Context), EHStack(EHStack), DD(DD), StartIndex(std::nullopt) {}
void PushCleanupForField(const FieldDecl *Field) {
- if (Field->isZeroSize(Context))
+ if (isEmptyFieldForLayout(Context, Field))
return;
unsigned FieldIndex = Field->getFieldIndex();
if (FieldHasTrivialDestructorBody(Context, Field)) {
@@ -2022,7 +2031,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
// Find the end of the array.
llvm::Type *elementType = arrayBase.getElementType();
- llvm::Value *arrayBegin = arrayBase.getPointer();
+ llvm::Value *arrayBegin = arrayBase.emitRawPointer(*this);
llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(
elementType, arrayBegin, numElements, "arrayctor.end");
@@ -2118,14 +2127,15 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
Address This = ThisAVS.getAddress();
LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace();
LangAS ThisAS = D->getFunctionObjectParameterType().getAddressSpace();
- llvm::Value *ThisPtr = This.getPointer();
+ llvm::Value *ThisPtr =
+ getAsNaturalPointerTo(This, D->getThisType()->getPointeeType());
if (SlotAS != ThisAS) {
unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS);
llvm::Type *NewType =
llvm::PointerType::get(getLLVMContext(), TargetThisAS);
- ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(),
- ThisAS, SlotAS, NewType);
+ ThisPtr = getTargetHooks().performAddrSpaceCast(*this, ThisPtr, ThisAS,
+ SlotAS, NewType);
}
// Push the this ptr.
@@ -2194,7 +2204,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
const CXXRecordDecl *ClassDecl = D->getParent();
if (!NewPointerIsChecked)
- EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This.getPointer(),
+ EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This,
getContext().getRecordType(ClassDecl), CharUnits::Zero());
if (D->isTrivial() && D->isDefaultConstructor()) {
@@ -2207,10 +2217,9 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
// model that copy.
if (isMemcpyEquivalentSpecialMember(D)) {
assert(Args.size() == 2 && "unexpected argcount for trivial ctor");
-
QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType();
- Address Src = Address(Args[1].getRValue(*this).getScalarVal(), ConvertTypeForMem(SrcTy),
- CGM.getNaturalTypeAlignment(SrcTy));
+ Address Src = makeNaturalAddressForPointer(
+ Args[1].getRValue(*this).getScalarVal(), SrcTy);
LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
QualType DestTy = getContext().getTypeDeclType(ClassDecl);
LValue DestLVal = MakeAddrLValue(This, DestTy);
@@ -2263,7 +2272,9 @@ void CodeGenFunction::EmitInheritedCXXConstructorCall(
const CXXConstructorDecl *D, bool ForVirtualBase, Address This,
bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) {
CallArgList Args;
- CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType());
+ CallArg ThisArg(RValue::get(getAsNaturalPointerTo(
+ This, D->getThisType()->getPointeeType())),
+ D->getThisType());
// Forward the parameters.
if (InheritedFromVBase &&
@@ -2388,12 +2399,14 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
CallArgList Args;
// Push the this ptr.
- Args.add(RValue::get(This.getPointer()), D->getThisType());
+ Args.add(RValue::get(getAsNaturalPointerTo(This, D->getThisType())),
+ D->getThisType());
// Push the src ptr.
QualType QT = *(FPT->param_type_begin());
llvm::Type *t = CGM.getTypes().ConvertType(QT);
- llvm::Value *SrcVal = Builder.CreateBitCast(Src.getPointer(), t);
+ llvm::Value *Val = getAsNaturalPointerTo(Src, D->getThisType());
+ llvm::Value *SrcVal = Builder.CreateBitCast(Val, t);
Args.add(RValue::get(SrcVal), QT);
// Skip over first argument (Src).
@@ -2418,7 +2431,9 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
// this
Address This = LoadCXXThisAddress();
- DelegateArgs.add(RValue::get(This.getPointer()), (*I)->getType());
+ DelegateArgs.add(RValue::get(getAsNaturalPointerTo(
+ This, (*I)->getType()->getPointeeType())),
+ (*I)->getType());
++I;
// FIXME: The location of the VTT parameter in the parameter list is
@@ -2574,6 +2589,11 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
// the same addr space. Note that this might not be LLVM address space 0.
VTableField = VTableField.withElementType(PtrTy);
+ if (auto AuthenticationInfo = CGM.getVTablePointerAuthInfo(
+ this, Vptr.Base.getBase(), VTableField.emitRawPointer(*this)))
+ VTableAddressPoint =
+ EmitPointerAuthSign(*AuthenticationInfo, VTableAddressPoint);
+
llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(PtrTy);
CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
@@ -2667,12 +2687,35 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
llvm::Value *CodeGenFunction::GetVTablePtr(Address This,
llvm::Type *VTableTy,
- const CXXRecordDecl *RD) {
+ const CXXRecordDecl *RD,
+ VTableAuthMode AuthMode) {
Address VTablePtrSrc = This.withElementType(VTableTy);
llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTableTy);
CGM.DecorateInstructionWithTBAA(VTable, TBAAInfo);
+ if (auto AuthenticationInfo =
+ CGM.getVTablePointerAuthInfo(this, RD, This.emitRawPointer(*this))) {
+ if (AuthMode != VTableAuthMode::UnsafeUbsanStrip) {
+ VTable = cast<llvm::Instruction>(
+ EmitPointerAuthAuth(*AuthenticationInfo, VTable));
+ if (AuthMode == VTableAuthMode::MustTrap) {
+ // This is clearly suboptimal but until we have an ability
+ // to rely on the authentication intrinsic trapping and force
+ // an authentication to occur we don't really have a choice.
+ VTable =
+ cast<llvm::Instruction>(Builder.CreateBitCast(VTable, Int8PtrTy));
+ Builder.CreateLoad(RawAddress(VTable, Int8Ty, CGM.getPointerAlign()),
+ /* IsVolatile */ true);
+ }
+ } else {
+ VTable = cast<llvm::Instruction>(EmitPointerAuthAuth(
+ CGPointerAuthInfo(0, PointerAuthenticationMode::Strip, false, false,
+ nullptr),
+ VTable));
+ }
+ }
+
if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
CGM.getCodeGenOpts().StrictVTablePointers)
CGM.DecorateInstructionWithInvariantGroup(VTable, RD);
@@ -2775,7 +2818,7 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, Address Derived,
if (MayBeNull) {
llvm::Value *DerivedNotNull =
- Builder.CreateIsNotNull(Derived.getPointer(), "cast.nonnull");
+ Builder.CreateIsNotNull(Derived.emitRawPointer(*this), "cast.nonnull");
llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check");
ContBlock = createBasicBlock("cast.cont");
@@ -2976,7 +3019,7 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
Address ThisPtr = GetAddrOfBlockDecl(variable);
- CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
+ CallArgs.add(RValue::get(getAsNaturalPointerTo(ThisPtr, ThisType)), ThisType);
// Add the rest of the parameters.
for (auto *param : BD->parameters())
@@ -3004,7 +3047,7 @@ void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
QualType LambdaType = getContext().getRecordType(Lambda);
QualType ThisType = getContext().getPointerType(LambdaType);
Address ThisPtr = CreateMemTemp(LambdaType, "unused.capture");
- CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
+ CallArgs.add(RValue::get(ThisPtr.emitRawPointer(*this)), ThisType);
EmitLambdaDelegatingInvokeBody(MD, CallArgs);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
index f87caf050eea..4e210a9e3c95 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
@@ -27,7 +27,7 @@ bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
if (rv.isScalar())
return DominatingLLVMValue::needsSaving(rv.getScalarVal());
if (rv.isAggregate())
- return DominatingLLVMValue::needsSaving(rv.getAggregatePointer());
+ return DominatingValue<Address>::needsSaving(rv.getAggregateAddress());
return true;
}
@@ -35,69 +35,40 @@ DominatingValue<RValue>::saved_type
DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
if (rv.isScalar()) {
llvm::Value *V = rv.getScalarVal();
-
- // These automatically dominate and don't need to be saved.
- if (!DominatingLLVMValue::needsSaving(V))
- return saved_type(V, nullptr, ScalarLiteral);
-
- // Everything else needs an alloca.
- Address addr =
- CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue");
- CGF.Builder.CreateStore(V, addr);
- return saved_type(addr.getPointer(), nullptr, ScalarAddress);
+ return saved_type(DominatingLLVMValue::save(CGF, V),
+ DominatingLLVMValue::needsSaving(V) ? ScalarAddress
+ : ScalarLiteral);
}
if (rv.isComplex()) {
CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
- llvm::Type *ComplexTy =
- llvm::StructType::get(V.first->getType(), V.second->getType());
- Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex");
- CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0));
- CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1));
- return saved_type(addr.getPointer(), nullptr, ComplexAddress);
+ return saved_type(DominatingLLVMValue::save(CGF, V.first),
+ DominatingLLVMValue::save(CGF, V.second));
}
assert(rv.isAggregate());
- Address V = rv.getAggregateAddress(); // TODO: volatile?
- if (!DominatingLLVMValue::needsSaving(V.getPointer()))
- return saved_type(V.getPointer(), V.getElementType(), AggregateLiteral,
- V.getAlignment().getQuantity());
-
- Address addr =
- CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue");
- CGF.Builder.CreateStore(V.getPointer(), addr);
- return saved_type(addr.getPointer(), V.getElementType(), AggregateAddress,
- V.getAlignment().getQuantity());
+ Address V = rv.getAggregateAddress();
+ return saved_type(DominatingValue<Address>::save(CGF, V),
+ DominatingValue<Address>::needsSaving(V)
+ ? AggregateAddress
+ : AggregateLiteral);
}
/// Given a saved r-value produced by SaveRValue, perform the code
/// necessary to restore it to usability at the current insertion
/// point.
RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
- auto getSavingAddress = [&](llvm::Value *value) {
- auto *AI = cast<llvm::AllocaInst>(value);
- return Address(value, AI->getAllocatedType(),
- CharUnits::fromQuantity(AI->getAlign().value()));
- };
switch (K) {
case ScalarLiteral:
- return RValue::get(Value);
case ScalarAddress:
- return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value)));
+ return RValue::get(DominatingLLVMValue::restore(CGF, Vals.first));
case AggregateLiteral:
+ case AggregateAddress:
return RValue::getAggregate(
- Address(Value, ElementType, CharUnits::fromQuantity(Align)));
- case AggregateAddress: {
- auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value));
- return RValue::getAggregate(
- Address(addr, ElementType, CharUnits::fromQuantity(Align)));
- }
+ DominatingValue<Address>::restore(CGF, AggregateAddr));
case ComplexAddress: {
- Address address = getSavingAddress(Value);
- llvm::Value *real =
- CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 0));
- llvm::Value *imag =
- CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 1));
+ llvm::Value *real = DominatingLLVMValue::restore(CGF, Vals.first);
+ llvm::Value *imag = DominatingLLVMValue::restore(CGF, Vals.second);
return RValue::getComplex(real, imag);
}
}
@@ -294,14 +265,14 @@ void EHScopeStack::popNullFixups() {
BranchFixups.pop_back();
}
-Address CodeGenFunction::createCleanupActiveFlag() {
+RawAddress CodeGenFunction::createCleanupActiveFlag() {
// Create a variable to decide whether the cleanup needs to be run.
- Address active = CreateTempAllocaWithoutCast(
+ RawAddress active = CreateTempAllocaWithoutCast(
Builder.getInt1Ty(), CharUnits::One(), "cleanup.cond");
// Initialize it to false at a site that's guaranteed to be run
// before each evaluation.
- setBeforeOutermostConditional(Builder.getFalse(), active);
+ setBeforeOutermostConditional(Builder.getFalse(), active, *this);
// Initialize it to true at the current location.
Builder.CreateStore(Builder.getTrue(), active);
@@ -309,7 +280,7 @@ Address CodeGenFunction::createCleanupActiveFlag() {
return active;
}
-void CodeGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) {
+void CodeGenFunction::initFullExprCleanupWithFlag(RawAddress ActiveFlag) {
// Set that as the active flag in the cleanup.
EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?");
@@ -322,15 +293,17 @@ void CodeGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) {
void EHScopeStack::Cleanup::anchor() {}
static void createStoreInstBefore(llvm::Value *value, Address addr,
- llvm::Instruction *beforeInst) {
- auto store = new llvm::StoreInst(value, addr.getPointer(), beforeInst);
+ llvm::Instruction *beforeInst,
+ CodeGenFunction &CGF) {
+ auto store = new llvm::StoreInst(value, addr.emitRawPointer(CGF), beforeInst);
store->setAlignment(addr.getAlignment().getAsAlign());
}
static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name,
- llvm::Instruction *beforeInst) {
- return new llvm::LoadInst(addr.getElementType(), addr.getPointer(), name,
- false, addr.getAlignment().getAsAlign(),
+ llvm::Instruction *beforeInst,
+ CodeGenFunction &CGF) {
+ return new llvm::LoadInst(addr.getElementType(), addr.emitRawPointer(CGF),
+ name, false, addr.getAlignment().getAsAlign(),
beforeInst);
}
@@ -357,8 +330,8 @@ static void ResolveAllBranchFixups(CodeGenFunction &CGF,
// entry which we're currently popping.
if (Fixup.OptimisticBranchBlock == nullptr) {
createStoreInstBefore(CGF.Builder.getInt32(Fixup.DestinationIndex),
- CGF.getNormalCleanupDestSlot(),
- Fixup.InitialBranch);
+ CGF.getNormalCleanupDestSlot(), Fixup.InitialBranch,
+ CGF);
Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
}
@@ -385,7 +358,7 @@ static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
assert(Br->isUnconditional());
auto Load = createLoadInstBefore(CGF.getNormalCleanupDestSlot(),
- "cleanup.dest", Term);
+ "cleanup.dest", Term, CGF);
llvm::SwitchInst *Switch =
llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
Br->eraseFromParent();
@@ -513,8 +486,8 @@ void CodeGenFunction::PopCleanupBlocks(
I += Header.getSize();
if (Header.isConditional()) {
- Address ActiveFlag =
- reinterpret_cast<Address &>(LifetimeExtendedCleanupStack[I]);
+ RawAddress ActiveFlag =
+ reinterpret_cast<RawAddress &>(LifetimeExtendedCleanupStack[I]);
initFullExprCleanupWithFlag(ActiveFlag);
I += sizeof(ActiveFlag);
}
@@ -661,12 +634,19 @@ static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
-void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
+void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough,
+ bool ForDeactivation) {
assert(!EHStack.empty() && "cleanup stack is empty!");
assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+ // If we are deactivating a normal cleanup, we need to pretend that the
+ // fallthrough is unreachable. We restore this IP before returning.
+ CGBuilderTy::InsertPoint NormalDeactivateOrigIP;
+ if (ForDeactivation && (Scope.isNormalCleanup() || !getLangOpts().EHAsynch)) {
+ NormalDeactivateOrigIP = Builder.saveAndClearIP();
+ }
// Remember activation information.
bool IsActive = Scope.isActive();
Address NormalActiveFlag =
@@ -694,7 +674,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// - whether there's a fallthrough
llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
- bool HasFallthrough = (FallthroughSource != nullptr && IsActive);
+ bool HasFallthrough =
+ FallthroughSource != nullptr && (IsActive || HasExistingBranches);
// Branch-through fall-throughs leave the insertion point set to the
// end of the last cleanup, which points to the current scope. The
@@ -719,7 +700,11 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// If we have a prebranched fallthrough into an inactive normal
// cleanup, rewrite it so that it leads to the appropriate place.
- if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) {
+ if (Scope.isNormalCleanup() && HasPrebranchedFallthrough &&
+ !RequiresNormalCleanup) {
+ // FIXME: Come up with a program which would need forwarding prebranched
+ // fallthrough and add tests. Otherwise delete this and assert against it.
+ assert(!IsActive);
llvm::BasicBlock *prebranchDest;
// If the prebranch is semantically branching through the next
@@ -751,6 +736,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EHStack.popCleanup(); // safe because there are no fixups
assert(EHStack.getNumBranchFixups() == 0 ||
EHStack.hasNormalCleanups());
+ if (NormalDeactivateOrigIP.isSet())
+ Builder.restoreIP(NormalDeactivateOrigIP);
return;
}
@@ -787,11 +774,19 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (!RequiresNormalCleanup) {
// Mark CPP scope end for passed-by-value Arg temp
// per Windows ABI which is "normally" Cleanup in callee
- if (IsEHa && getInvokeDest() && Builder.GetInsertBlock()) {
- if (Personality.isMSVCXXPersonality())
+ if (IsEHa && getInvokeDest()) {
+ // If we are deactivating a normal cleanup then we don't have a
+ // fallthrough. Restore original IP to emit CPP scope ends in the correct
+ // block.
+ if (NormalDeactivateOrigIP.isSet())
+ Builder.restoreIP(NormalDeactivateOrigIP);
+ if (Personality.isMSVCXXPersonality() && Builder.GetInsertBlock())
EmitSehCppScopeEnd();
+ if (NormalDeactivateOrigIP.isSet())
+ NormalDeactivateOrigIP = Builder.saveAndClearIP();
}
destroyOptimisticNormalEntry(*this, Scope);
+ Scope.MarkEmitted();
EHStack.popCleanup();
} else {
// If we have a fallthrough and no other need for the cleanup,
@@ -808,6 +803,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
}
destroyOptimisticNormalEntry(*this, Scope);
+ Scope.MarkEmitted();
EHStack.popCleanup();
EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
@@ -888,7 +884,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (NormalCleanupDestSlot->hasOneUse()) {
NormalCleanupDestSlot->user_back()->eraseFromParent();
NormalCleanupDestSlot->eraseFromParent();
- NormalCleanupDest = Address::invalid();
+ NormalCleanupDest = RawAddress::invalid();
}
llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
@@ -912,9 +908,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// pass the abnormal exit flag to Fn (SEH cleanup)
cleanupFlags.setHasExitSwitch();
- llvm::LoadInst *Load =
- createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest",
- nullptr);
+ llvm::LoadInst *Load = createLoadInstBefore(
+ getNormalCleanupDestSlot(), "cleanup.dest", nullptr, *this);
llvm::SwitchInst *Switch =
llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
@@ -944,6 +939,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
}
// IV. Pop the cleanup and emit it.
+ Scope.MarkEmitted();
EHStack.popCleanup();
assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
@@ -961,8 +957,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (!Fixup.Destination) continue;
if (!Fixup.OptimisticBranchBlock) {
createStoreInstBefore(Builder.getInt32(Fixup.DestinationIndex),
- getNormalCleanupDestSlot(),
- Fixup.InitialBranch);
+ getNormalCleanupDestSlot(), Fixup.InitialBranch,
+ *this);
Fixup.InitialBranch->setSuccessor(0, NormalEntry);
}
Fixup.OptimisticBranchBlock = NormalExit;
@@ -1012,6 +1008,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
}
}
+ if (NormalDeactivateOrigIP.isSet())
+ Builder.restoreIP(NormalDeactivateOrigIP);
assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
// Emit the EH cleanup if required.
@@ -1135,7 +1133,7 @@ void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
// Store the index at the start.
llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
- createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI);
+ createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI, *this);
// Adjust BI to point to the first cleanup block.
{
@@ -1171,25 +1169,6 @@ void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
Builder.ClearInsertionPoint();
}
-static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
- EHScopeStack::stable_iterator C) {
- // If we needed a normal block for any reason, that counts.
- if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock())
- return true;
-
- // Check whether any enclosed cleanups were needed.
- for (EHScopeStack::stable_iterator
- I = EHStack.getInnermostNormalCleanup();
- I != C; ) {
- assert(C.strictlyEncloses(I));
- EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
- if (S.getNormalBlock()) return true;
- I = S.getEnclosingNormalCleanup();
- }
-
- return false;
-}
-
static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
EHScopeStack::stable_iterator cleanup) {
// If we needed an EH block for any reason, that counts.
@@ -1238,8 +1217,7 @@ static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
// Calculate whether the cleanup was used:
// - as a normal cleanup
- if (Scope.isNormalCleanup() &&
- (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) {
+ if (Scope.isNormalCleanup()) {
Scope.setTestFlagInNormalCleanup();
needFlag = true;
}
@@ -1252,13 +1230,16 @@ static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
}
// If it hasn't yet been used as either, we're done.
- if (!needFlag) return;
+ if (!needFlag)
+ return;
Address var = Scope.getActiveFlag();
if (!var.isValid()) {
+ CodeGenFunction::AllocaTrackerRAII AllocaTracker(CGF);
var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), CharUnits::One(),
"cleanup.isactive");
Scope.setActiveFlag(var);
+ Scope.AddAuxAllocas(AllocaTracker.Take());
assert(dominatingIP && "no existing variable and no dominating IP!");
@@ -1269,9 +1250,9 @@ static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
// If we're in a conditional block, ignore the dominating IP and
// use the outermost conditional branch.
if (CGF.isInConditionalBranch()) {
- CGF.setBeforeOutermostConditional(value, var);
+ CGF.setBeforeOutermostConditional(value, var, CGF);
} else {
- createStoreInstBefore(value, var, dominatingIP);
+ createStoreInstBefore(value, var, dominatingIP, CGF);
}
}
@@ -1301,17 +1282,8 @@ void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
// to the current RunCleanupsScope.
if (C == EHStack.stable_begin() &&
CurrentCleanupScopeDepth.strictlyEncloses(C)) {
- // Per comment below, checking EHAsynch is not really necessary
- // it's there to assure zero-impact w/o EHAsynch option
- if (!Scope.isNormalCleanup() && getLangOpts().EHAsynch) {
- PopCleanupBlock();
- } else {
- // If it's a normal cleanup, we need to pretend that the
- // fallthrough is unreachable.
- CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
- PopCleanupBlock();
- Builder.restoreIP(SavedIP);
- }
+ PopCleanupBlock(/*FallthroughIsBranchThrough=*/false,
+ /*ForDeactivation=*/true);
return;
}
@@ -1321,7 +1293,7 @@ void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
Scope.setActive(false);
}
-Address CodeGenFunction::getNormalCleanupDestSlot() {
+RawAddress CodeGenFunction::getNormalCleanupDestSlot() {
if (!NormalCleanupDest.isValid())
NormalCleanupDest =
CreateDefaultAlignTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
index fcfbf41b0eaf..c73c97146abc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
@@ -16,8 +16,11 @@
#include "EHScopeStack.h"
#include "Address.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Instruction.h"
namespace llvm {
class BasicBlock;
@@ -40,6 +43,10 @@ struct CatchTypeInfo {
/// A protected scope for zero-cost EH handling.
class EHScope {
+public:
+ enum Kind { Cleanup, Catch, Terminate, Filter };
+
+private:
llvm::BasicBlock *CachedLandingPad;
llvm::BasicBlock *CachedEHDispatchBlock;
@@ -47,6 +54,7 @@ class EHScope {
class CommonBitFields {
friend class EHScope;
+ LLVM_PREFERRED_TYPE(Kind)
unsigned Kind : 3;
};
enum { NumCommonBits = 3 };
@@ -64,21 +72,27 @@ protected:
unsigned : NumCommonBits;
/// Whether this cleanup needs to be run along normal edges.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsNormalCleanup : 1;
/// Whether this cleanup needs to be run along exception edges.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsEHCleanup : 1;
/// Whether this cleanup is currently active.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsActive : 1;
/// Whether this cleanup is a lifetime marker
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsLifetimeMarker : 1;
/// Whether the normal cleanup should test the activation flag.
+ LLVM_PREFERRED_TYPE(bool)
unsigned TestFlagInNormalCleanup : 1;
/// Whether the EH cleanup should test the activation flag.
+ LLVM_PREFERRED_TYPE(bool)
unsigned TestFlagInEHCleanup : 1;
/// The amount of extra storage needed by the Cleanup.
@@ -101,8 +115,6 @@ protected:
};
public:
- enum Kind { Cleanup, Catch, Terminate, Filter };
-
EHScope(Kind kind, EHScopeStack::stable_iterator enclosingEHScope)
: CachedLandingPad(nullptr), CachedEHDispatchBlock(nullptr),
EnclosingEHScope(enclosingEHScope) {
@@ -257,6 +269,51 @@ class alignas(8) EHCleanupScope : public EHScope {
};
mutable struct ExtInfo *ExtInfo;
+ /// Erases auxillary allocas and their usages for an unused cleanup.
+ /// Cleanups should mark these allocas as 'used' if the cleanup is
+ /// emitted, otherwise these instructions would be erased.
+ struct AuxillaryAllocas {
+ SmallVector<llvm::Instruction *, 1> AuxAllocas;
+ bool used = false;
+
+ // Records a potentially unused instruction to be erased later.
+ void Add(llvm::AllocaInst *Alloca) { AuxAllocas.push_back(Alloca); }
+
+ // Mark all recorded instructions as used. These will not be erased later.
+ void MarkUsed() {
+ used = true;
+ AuxAllocas.clear();
+ }
+
+ ~AuxillaryAllocas() {
+ if (used)
+ return;
+ llvm::SetVector<llvm::Instruction *> Uses;
+ for (auto *Inst : llvm::reverse(AuxAllocas))
+ CollectUses(Inst, Uses);
+ // Delete uses in the reverse order of insertion.
+ for (auto *I : llvm::reverse(Uses))
+ I->eraseFromParent();
+ }
+
+ private:
+ void CollectUses(llvm::Instruction *I,
+ llvm::SetVector<llvm::Instruction *> &Uses) {
+ if (!I || !Uses.insert(I))
+ return;
+ for (auto *User : I->users())
+ CollectUses(cast<llvm::Instruction>(User), Uses);
+ }
+ };
+ mutable struct AuxillaryAllocas *AuxAllocas;
+
+ AuxillaryAllocas &getAuxillaryAllocas() {
+ if (!AuxAllocas) {
+ AuxAllocas = new struct AuxillaryAllocas();
+ }
+ return *AuxAllocas;
+ }
+
/// The number of fixups required by enclosing scopes (not including
/// this one). If this is the top cleanup scope, all the fixups
/// from this index onwards belong to this scope.
@@ -289,7 +346,7 @@ public:
EHScopeStack::stable_iterator enclosingEH)
: EHScope(EHScope::Cleanup, enclosingEH),
EnclosingNormal(enclosingNormal), NormalBlock(nullptr),
- ActiveFlag(Address::invalid()), ExtInfo(nullptr),
+ ActiveFlag(Address::invalid()), ExtInfo(nullptr), AuxAllocas(nullptr),
FixupDepth(fixupDepth) {
CleanupBits.IsNormalCleanup = isNormal;
CleanupBits.IsEHCleanup = isEH;
@@ -303,8 +360,15 @@ public:
}
void Destroy() {
+ if (AuxAllocas)
+ delete AuxAllocas;
delete ExtInfo;
}
+ void AddAuxAllocas(llvm::SmallVector<llvm::AllocaInst *> Allocas) {
+ for (auto *Alloca : Allocas)
+ getAuxillaryAllocas().Add(Alloca);
+ }
+ void MarkEmitted() { getAuxillaryAllocas().MarkUsed(); }
// Objects of EHCleanupScope are not destructed. Use Destroy().
~EHCleanupScope() = delete;
@@ -324,7 +388,7 @@ public:
Address getActiveFlag() const {
return ActiveFlag;
}
- void setActiveFlag(Address Var) {
+ void setActiveFlag(RawAddress Var) {
assert(Var.getAlignment().isOne());
ActiveFlag = Var;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
index 888d30bfb3e1..a8a70186c2c5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
@@ -103,7 +103,7 @@ static void createCoroData(CodeGenFunction &CGF,
return;
}
- CurCoro.Data = std::unique_ptr<CGCoroData>(new CGCoroData);
+ CurCoro.Data = std::make_unique<CGCoroData>();
CurCoro.Data->CoroId = CoroId;
CurCoro.Data->CoroIdExpr = CoroIdExpr;
}
@@ -141,7 +141,7 @@ static bool FunctionCanThrow(const FunctionDecl *D) {
Proto->canThrow() != CT_Cannot;
}
-static bool ResumeStmtCanThrow(const Stmt *S) {
+static bool StmtCanThrow(const Stmt *S) {
if (const auto *CE = dyn_cast<CallExpr>(S)) {
const auto *Callee = CE->getDirectCallee();
if (!Callee)
@@ -167,7 +167,7 @@ static bool ResumeStmtCanThrow(const Stmt *S) {
}
for (const auto *child : S->children())
- if (ResumeStmtCanThrow(child))
+ if (StmtCanThrow(child))
return true;
return false;
@@ -178,18 +178,31 @@ static bool ResumeStmtCanThrow(const Stmt *S) {
// auto && x = CommonExpr();
// if (!x.await_ready()) {
// llvm_coro_save();
-// x.await_suspend(...); (*)
-// llvm_coro_suspend(); (**)
+// llvm_coro_await_suspend(&x, frame, wrapper) (*) (**)
+// llvm_coro_suspend(); (***)
// }
// x.await_resume();
//
// where the result of the entire expression is the result of x.await_resume()
//
-// (*) If x.await_suspend return type is bool, it allows to veto a suspend:
+// (*) llvm_coro_await_suspend_{void, bool, handle} is lowered to
+// wrapper(&x, frame) when it's certain not to interfere with
+// coroutine transform. await_suspend expression is
+// asynchronous to the coroutine body and not all analyses
+// and transformations can handle it correctly at the moment.
+//
+// Wrapper function encapsulates x.await_suspend(...) call and looks like:
+//
+// auto __await_suspend_wrapper(auto& awaiter, void* frame) {
+// std::coroutine_handle<> handle(frame);
+// return awaiter.await_suspend(handle);
+// }
+//
+// (**) If x.await_suspend return type is bool, it allows to veto a suspend:
// if (x.await_suspend(...))
// llvm_coro_suspend();
//
-// (**) llvm_coro_suspend() encodes three possible continuations as
+// (***) llvm_coro_suspend() encodes three possible continuations as
// a switch instruction:
//
// %where-to = call i8 @llvm.coro.suspend(...)
@@ -212,9 +225,10 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
bool ignoreResult, bool forLValue) {
auto *E = S.getCommonExpr();
- auto Binder =
+ auto CommonBinder =
CodeGenFunction::OpaqueValueMappingData::bind(CGF, S.getOpaqueValue(), E);
- auto UnbindOnExit = llvm::make_scope_exit([&] { Binder.unbind(CGF); });
+ auto UnbindCommonOnExit =
+ llvm::make_scope_exit([&] { CommonBinder.unbind(CGF); });
auto Prefix = buildSuspendPrefixStr(Coro, Kind);
BasicBlock *ReadyBlock = CGF.createBasicBlock(Prefix + Twine(".ready"));
@@ -232,16 +246,74 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
auto *NullPtr = llvm::ConstantPointerNull::get(CGF.CGM.Int8PtrTy);
auto *SaveCall = Builder.CreateCall(CoroSave, {NullPtr});
+ auto SuspendWrapper = CodeGenFunction(CGF.CGM).generateAwaitSuspendWrapper(
+ CGF.CurFn->getName(), Prefix, S);
+
CGF.CurCoro.InSuspendBlock = true;
- auto *SuspendRet = CGF.EmitScalarExpr(S.getSuspendExpr());
+
+ assert(CGF.CurCoro.Data && CGF.CurCoro.Data->CoroBegin &&
+ "expected to be called in coroutine context");
+
+ SmallVector<llvm::Value *, 3> SuspendIntrinsicCallArgs;
+ SuspendIntrinsicCallArgs.push_back(
+ CGF.getOrCreateOpaqueLValueMapping(S.getOpaqueValue()).getPointer(CGF));
+
+ SuspendIntrinsicCallArgs.push_back(CGF.CurCoro.Data->CoroBegin);
+ SuspendIntrinsicCallArgs.push_back(SuspendWrapper);
+
+ const auto SuspendReturnType = S.getSuspendReturnType();
+ llvm::Intrinsic::ID AwaitSuspendIID;
+
+ switch (SuspendReturnType) {
+ case CoroutineSuspendExpr::SuspendReturnType::SuspendVoid:
+ AwaitSuspendIID = llvm::Intrinsic::coro_await_suspend_void;
+ break;
+ case CoroutineSuspendExpr::SuspendReturnType::SuspendBool:
+ AwaitSuspendIID = llvm::Intrinsic::coro_await_suspend_bool;
+ break;
+ case CoroutineSuspendExpr::SuspendReturnType::SuspendHandle:
+ AwaitSuspendIID = llvm::Intrinsic::coro_await_suspend_handle;
+ break;
+ }
+
+ llvm::Function *AwaitSuspendIntrinsic = CGF.CGM.getIntrinsic(AwaitSuspendIID);
+
+ // SuspendHandle might throw since it also resumes the returned handle.
+ const bool AwaitSuspendCanThrow =
+ SuspendReturnType ==
+ CoroutineSuspendExpr::SuspendReturnType::SuspendHandle ||
+ StmtCanThrow(S.getSuspendExpr());
+
+ llvm::CallBase *SuspendRet = nullptr;
+ // FIXME: add call attributes?
+ if (AwaitSuspendCanThrow)
+ SuspendRet =
+ CGF.EmitCallOrInvoke(AwaitSuspendIntrinsic, SuspendIntrinsicCallArgs);
+ else
+ SuspendRet = CGF.EmitNounwindRuntimeCall(AwaitSuspendIntrinsic,
+ SuspendIntrinsicCallArgs);
+
+ assert(SuspendRet);
CGF.CurCoro.InSuspendBlock = false;
- if (SuspendRet != nullptr && SuspendRet->getType()->isIntegerTy(1)) {
+ switch (SuspendReturnType) {
+ case CoroutineSuspendExpr::SuspendReturnType::SuspendVoid:
+ assert(SuspendRet->getType()->isVoidTy());
+ break;
+ case CoroutineSuspendExpr::SuspendReturnType::SuspendBool: {
+ assert(SuspendRet->getType()->isIntegerTy());
+
// Veto suspension if requested by bool returning await_suspend.
BasicBlock *RealSuspendBlock =
CGF.createBasicBlock(Prefix + Twine(".suspend.bool"));
CGF.Builder.CreateCondBr(SuspendRet, RealSuspendBlock, ReadyBlock);
CGF.EmitBlock(RealSuspendBlock);
+ break;
+ }
+ case CoroutineSuspendExpr::SuspendReturnType::SuspendHandle: {
+ assert(SuspendRet->getType()->isVoidTy());
+ break;
+ }
}
// Emit the suspend point.
@@ -267,7 +339,7 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
// is marked as 'noexcept', we avoid generating this additional IR.
CXXTryStmt *TryStmt = nullptr;
if (Coro.ExceptionHandler && Kind == AwaitKind::Init &&
- ResumeStmtCanThrow(S.getResumeExpr())) {
+ StmtCanThrow(S.getResumeExpr())) {
Coro.ResumeEHVar =
CGF.CreateTempAlloca(Builder.getInt1Ty(), Prefix + Twine("resume.eh"));
Builder.CreateFlagStore(true, Coro.ResumeEHVar);
@@ -338,6 +410,67 @@ static QualType getCoroutineSuspendExprReturnType(const ASTContext &Ctx,
}
#endif
+llvm::Function *
+CodeGenFunction::generateAwaitSuspendWrapper(Twine const &CoroName,
+ Twine const &SuspendPointName,
+ CoroutineSuspendExpr const &S) {
+ std::string FuncName =
+ (CoroName + ".__await_suspend_wrapper__" + SuspendPointName).str();
+
+ ASTContext &C = getContext();
+
+ FunctionArgList args;
+
+ ImplicitParamDecl AwaiterDecl(C, C.VoidPtrTy, ImplicitParamKind::Other);
+ ImplicitParamDecl FrameDecl(C, C.VoidPtrTy, ImplicitParamKind::Other);
+ QualType ReturnTy = S.getSuspendExpr()->getType();
+
+ args.push_back(&AwaiterDecl);
+ args.push_back(&FrameDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
+
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn = llvm::Function::Create(
+ LTy, llvm::GlobalValue::PrivateLinkage, FuncName, &CGM.getModule());
+
+ Fn->addParamAttr(0, llvm::Attribute::AttrKind::NonNull);
+ Fn->addParamAttr(0, llvm::Attribute::AttrKind::NoUndef);
+
+ Fn->addParamAttr(1, llvm::Attribute::AttrKind::NoUndef);
+
+ Fn->setMustProgress();
+ Fn->addFnAttr(llvm::Attribute::AttrKind::AlwaysInline);
+
+ StartFunction(GlobalDecl(), ReturnTy, Fn, FI, args);
+
+ // FIXME: add TBAA metadata to the loads
+ llvm::Value *AwaiterPtr = Builder.CreateLoad(GetAddrOfLocalVar(&AwaiterDecl));
+ auto AwaiterLValue =
+ MakeNaturalAlignAddrLValue(AwaiterPtr, AwaiterDecl.getType());
+
+ CurAwaitSuspendWrapper.FramePtr =
+ Builder.CreateLoad(GetAddrOfLocalVar(&FrameDecl));
+
+ auto AwaiterBinder = CodeGenFunction::OpaqueValueMappingData::bind(
+ *this, S.getOpaqueValue(), AwaiterLValue);
+
+ auto *SuspendRet = EmitScalarExpr(S.getSuspendExpr());
+
+ auto UnbindCommonOnExit =
+ llvm::make_scope_exit([&] { AwaiterBinder.unbind(*this); });
+ if (SuspendRet != nullptr) {
+ Fn->addRetAttr(llvm::Attribute::AttrKind::NoUndef);
+ Builder.CreateStore(SuspendRet, ReturnValue);
+ }
+
+ CurAwaitSuspendWrapper.FramePtr = nullptr;
+ FinishFunction();
+ return Fn;
+}
+
LValue
CodeGenFunction::EmitCoawaitLValue(const CoawaitExpr *E) {
assert(getCoroutineSuspendExprReturnType(getContext(), E)->isReferenceType() &&
@@ -733,8 +866,8 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
EmitStmt(S.getPromiseDeclStmt());
Address PromiseAddr = GetAddrOfLocalVar(S.getPromiseDecl());
- auto *PromiseAddrVoidPtr =
- new llvm::BitCastInst(PromiseAddr.getPointer(), VoidPtrTy, "", CoroId);
+ auto *PromiseAddrVoidPtr = new llvm::BitCastInst(
+ PromiseAddr.emitRawPointer(*this), VoidPtrTy, "", CoroId);
// Update CoroId to refer to the promise. We could not do it earlier because
// promise local variable was not emitted yet.
CoroId->setArgOperand(1, PromiseAddrVoidPtr);
@@ -834,6 +967,11 @@ RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
if (CurCoro.Data && CurCoro.Data->CoroBegin) {
return RValue::get(CurCoro.Data->CoroBegin);
}
+
+ if (CurAwaitSuspendWrapper.FramePtr) {
+ return RValue::get(CurAwaitSuspendWrapper.FramePtr);
+ }
+
CGM.Error(E->getBeginLoc(), "this builtin expect that __builtin_coro_begin "
"has been used earlier in this function");
auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getPtrTy());
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
index 0f3f684d61dc..3d8a715b692d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -32,6 +32,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Version.h"
+#include "clang/CodeGen/ModuleBuilder.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/ModuleMap.h"
@@ -58,7 +59,16 @@ using namespace clang::CodeGen;
static uint32_t getTypeAlignIfRequired(const Type *Ty, const ASTContext &Ctx) {
auto TI = Ctx.getTypeInfo(Ty);
- return TI.isAlignRequired() ? TI.Align : 0;
+ if (TI.isAlignRequired())
+ return TI.Align;
+
+ // MaxFieldAlignmentAttr is the attribute added to types
+ // declared after #pragma pack(n).
+ if (auto *Decl = Ty->getAsRecordDecl())
+ if (Decl->hasAttr<MaxFieldAlignmentAttr>())
+ return TI.Align;
+
+ return 0;
}
static uint32_t getTypeAlignIfRequired(QualType Ty, const ASTContext &Ctx) {
@@ -856,7 +866,16 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return SingletonId; \
}
#include "clang/Basic/WebAssemblyReferenceTypes.def"
-
+#define AMDGPU_OPAQUE_PTR_TYPE(Name, MangledName, AS, Width, Align, Id, \
+ SingletonId) \
+ case BuiltinType::Id: { \
+ if (!SingletonId) \
+ SingletonId = \
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, \
+ MangledName, TheCU, TheCU->getFile(), 0); \
+ return SingletonId; \
+ }
+#include "clang/Basic/AMDGPUTypes.def"
case BuiltinType::UChar:
case BuiltinType::Char_U:
Encoding = llvm::dwarf::DW_ATE_unsigned_char;
@@ -1313,6 +1332,44 @@ llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
return DBuilder.createPointerType(EltTy, Size);
}
+static llvm::SmallVector<TemplateArgument>
+GetTemplateArgs(const TemplateDecl *TD, const TemplateSpecializationType *Ty) {
+ assert(Ty->isTypeAlias());
+ // TemplateSpecializationType doesn't know if its template args are
+ // being substituted into a parameter pack. We can find out if that's
+ // the case now by inspecting the TypeAliasTemplateDecl template
+ // parameters. Insert Ty's template args into SpecArgs, bundling args
+ // passed to a parameter pack into a TemplateArgument::Pack. It also
+ // doesn't know the value of any defaulted args, so collect those now
+ // too.
+ SmallVector<TemplateArgument> SpecArgs;
+ ArrayRef SubstArgs = Ty->template_arguments();
+ for (const NamedDecl *Param : TD->getTemplateParameters()->asArray()) {
+ // If Param is a parameter pack, pack the remaining arguments.
+ if (Param->isParameterPack()) {
+ SpecArgs.push_back(TemplateArgument(SubstArgs));
+ break;
+ }
+
+ // Skip defaulted args.
+ // FIXME: Ideally, we wouldn't do this. We can read the default values
+ // for each parameter. However, defaulted arguments which are dependent
+ // values or dependent types can't (easily?) be resolved here.
+ if (SubstArgs.empty()) {
+ // If SubstArgs is now empty (we're taking from it each iteration) and
+ // this template parameter isn't a pack, then that should mean we're
+ // using default values for the remaining template parameters (after
+ // which there may be an empty pack too which we will ignore).
+ break;
+ }
+
+ // Take the next argument.
+ SpecArgs.push_back(SubstArgs.front());
+ SubstArgs = SubstArgs.drop_front();
+ }
+ return SpecArgs;
+}
+
llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
llvm::DIFile *Unit) {
assert(Ty->isTypeAlias());
@@ -1332,6 +1389,52 @@ llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
auto PP = getPrintingPolicy();
Ty->getTemplateName().print(OS, PP, TemplateName::Qualified::None);
+ SourceLocation Loc = AliasDecl->getLocation();
+
+ if (CGM.getCodeGenOpts().DebugTemplateAlias &&
+ // FIXME: This is a workaround for the issue
+ // https://github.com/llvm/llvm-project/issues/89774
+ // The TemplateSpecializationType doesn't contain any instantiation
+ // information; dependent template arguments can't be resolved. For now,
+ // fall back to DW_TAG_typedefs for template aliases that are
+ // instantiation dependent, e.g.:
+ // ```
+ // template <int>
+ // using A = int;
+ //
+ // template<int I>
+ // struct S {
+ // using AA = A<I>; // Instantiation dependent.
+ // AA aa;
+ // };
+ //
+ // S<0> s;
+ // ```
+ // S::AA's underlying type A<I> is dependent on I so will be emitted as a
+ // DW_TAG_typedef.
+ !Ty->isInstantiationDependentType()) {
+ auto ArgVector = ::GetTemplateArgs(TD, Ty);
+ TemplateArgs Args = {TD->getTemplateParameters(), ArgVector};
+
+ // FIXME: Respect DebugTemplateNameKind::Mangled, e.g. by using GetName.
+ // Note we can't use GetName without additional work: TypeAliasTemplateDecl
+ // doesn't have instantiation information, so
+ // TypeAliasTemplateDecl::getNameForDiagnostic wouldn't have access to the
+ // template args.
+ std::string Name;
+ llvm::raw_string_ostream OS(Name);
+ TD->getNameForDiagnostic(OS, PP, /*Qualified=*/false);
+ if (CGM.getCodeGenOpts().getDebugSimpleTemplateNames() !=
+ llvm::codegenoptions::DebugTemplateNamesKind::Simple ||
+ !HasReconstitutableArgs(Args.Args))
+ printTemplateArgumentList(OS, Args.Args, PP);
+
+ llvm::DIDerivedType *AliasTy = DBuilder.createTemplateAlias(
+ Src, Name, getOrCreateFile(Loc), getLineNumber(Loc),
+ getDeclContextDescriptor(AliasDecl), CollectTemplateParams(Args, Unit));
+ return AliasTy;
+ }
+
// Disable PrintCanonicalTypes here because we want
// the DW_AT_name to benefit from the TypePrinter's ability
// to skip defaulted template arguments.
@@ -1343,8 +1446,6 @@ llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
PP.PrintCanonicalTypes = false;
printTemplateArgumentList(OS, Ty->template_arguments(), PP,
TD->getTemplateParameters());
-
- SourceLocation Loc = AliasDecl->getLocation();
return DBuilder.createTypedef(Src, OS.str(), getOrCreateFile(Loc),
getLineNumber(Loc),
getDeclContextDescriptor(AliasDecl));
@@ -1440,8 +1541,7 @@ static unsigned getDwarfCC(CallingConv CC) {
case CC_Swift:
return llvm::dwarf::DW_CC_LLVM_Swift;
case CC_SwiftAsync:
- // [FIXME: swiftasynccc] Update to SwiftAsync once LLVM support lands.
- return llvm::dwarf::DW_CC_LLVM_Swift;
+ return llvm::dwarf::DW_CC_LLVM_SwiftTail;
case CC_PreserveMost:
return llvm::dwarf::DW_CC_LLVM_PreserveMost;
case CC_PreserveAll:
@@ -1450,6 +1550,10 @@ static unsigned getDwarfCC(CallingConv CC) {
return llvm::dwarf::DW_CC_LLVM_X86RegCall;
case CC_M68kRTD:
return llvm::dwarf::DW_CC_LLVM_M68kRTD;
+ case CC_PreserveNone:
+ return llvm::dwarf::DW_CC_LLVM_PreserveNone;
+ case CC_RISCVVectorCall:
+ return llvm::dwarf::DW_CC_LLVM_RISCVVectorCall;
}
return 0;
}
@@ -1628,6 +1732,28 @@ llvm::DIType *CGDebugInfo::createFieldType(
offsetInBits, flags, debugType, Annotations);
}
+llvm::DISubprogram *
+CGDebugInfo::createInlinedTrapSubprogram(StringRef FuncName,
+ llvm::DIFile *FileScope) {
+ // We are caching the subprogram because we don't want to duplicate
+ // subprograms with the same message. Note that `SPFlagDefinition` prevents
+ // subprograms from being uniqued.
+ llvm::DISubprogram *&SP = InlinedTrapFuncMap[FuncName];
+
+ if (!SP) {
+ llvm::DISubroutineType *DIFnTy = DBuilder.createSubroutineType(nullptr);
+ SP = DBuilder.createFunction(
+ /*Scope=*/FileScope, /*Name=*/FuncName, /*LinkageName=*/StringRef(),
+ /*File=*/FileScope, /*LineNo=*/0, /*Ty=*/DIFnTy,
+ /*ScopeLine=*/0,
+ /*Flags=*/llvm::DINode::FlagArtificial,
+ /*SPFlags=*/llvm::DISubprogram::SPFlagDefinition,
+ /*TParams=*/nullptr, /*ThrownTypes=*/nullptr, /*Annotations=*/nullptr);
+ }
+
+ return SP;
+}
+
void CGDebugInfo::CollectRecordLambdaFields(
const CXXRecordDecl *CXXDecl, SmallVectorImpl<llvm::Metadata *> &elements,
llvm::DIType *RecordTy) {
@@ -2751,7 +2877,7 @@ CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
// Collect data fields (including static variables and any initializers).
CollectRecordFields(RD, DefUnit, EltTys, FwdDecl);
- if (CXXDecl)
+ if (CXXDecl && !CGM.getCodeGenOpts().DebugOmitUnreferencedMethods)
CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl);
LexicalBlockStack.pop_back();
@@ -3237,7 +3363,7 @@ llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
// };
int64_t Count = -1; // Count == -1 is an unbounded array.
if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty))
- Count = CAT->getSize().getZExtValue();
+ Count = CAT->getZExtSize();
else if (const auto *VAT = dyn_cast<VariableArrayType>(Ty)) {
if (Expr *Size = VAT->getSizeExpr()) {
Expr::EvalResult Result;
@@ -3424,6 +3550,23 @@ llvm::DIMacroFile *CGDebugInfo::CreateTempMacroFile(llvm::DIMacroFile *Parent,
return DBuilder.createTempMacroFile(Parent, Line, FName);
}
+llvm::DILocation *CGDebugInfo::CreateTrapFailureMessageFor(
+ llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg) {
+ // Create a debug location from `TrapLocation` that adds an artificial inline
+ // frame.
+ SmallString<64> FuncName(ClangTrapPrefix);
+
+ FuncName += "$";
+ FuncName += Category;
+ FuncName += "$";
+ FuncName += FailureMsg;
+
+ llvm::DISubprogram *TrapSP =
+ createInlinedTrapSubprogram(FuncName, TrapLocation->getFile());
+ return llvm::DILocation::get(CGM.getLLVMContext(), /*Line=*/0, /*Column=*/0,
+ /*Scope=*/TrapSP, /*InlinedAt=*/TrapLocation);
+}
+
static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
Qualifiers Quals;
do {
@@ -3461,6 +3604,9 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::BTFTagAttributed:
T = cast<BTFTagAttributedType>(T)->getWrappedType();
break;
+ case Type::CountAttributed:
+ T = cast<CountAttributedType>(T)->desugar();
+ break;
case Type::Elaborated:
T = cast<ElaboratedType>(T)->getNamedType();
break;
@@ -3483,6 +3629,10 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
T = DT;
break;
}
+ case Type::PackIndexing: {
+ T = cast<PackIndexingType>(T)->getSelectedType();
+ break;
+ }
case Type::Adjusted:
case Type::Decayed:
// Decayed and adjusted types use the adjusted type in LLVM and DWARF.
@@ -3631,6 +3781,7 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::ConstantArray:
case Type::VariableArray:
case Type::IncompleteArray:
+ case Type::ArrayParameter:
return CreateType(cast<ArrayType>(Ty), Unit);
case Type::LValueReference:
@@ -3652,6 +3803,7 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::TemplateSpecialization:
return CreateType(cast<TemplateSpecializationType>(Ty), Unit);
+ case Type::CountAttributed:
case Type::Auto:
case Type::Attributed:
case Type::BTFTagAttributed:
@@ -3666,6 +3818,7 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::Decltype:
+ case Type::PackIndexing:
case Type::UnaryTransform:
break;
}
@@ -4770,40 +4923,6 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
return D;
}
-llvm::DIType *CGDebugInfo::CreateBindingDeclType(const BindingDecl *BD) {
- llvm::DIFile *Unit = getOrCreateFile(BD->getLocation());
-
- // If the declaration is bound to a bitfield struct field, its type may have a
- // size that is different from its deduced declaration type's.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(BD->getBinding())) {
- if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
- if (FD->isBitField()) {
- ASTContext &Context = CGM.getContext();
- const CGRecordLayout &RL =
- CGM.getTypes().getCGRecordLayout(FD->getParent());
- const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD);
-
- // Find an integer type with the same bitwidth as the bitfield size. If
- // no suitable type is present in the target, give up on producing debug
- // information as it would be wrong. It is certainly possible to produce
- // correct debug info, but the logic isn't currently implemented.
- uint64_t BitfieldSizeInBits = Info.Size;
- QualType IntTy =
- Context.getIntTypeForBitwidth(BitfieldSizeInBits, Info.IsSigned);
- if (IntTy.isNull())
- return nullptr;
- Qualifiers Quals = BD->getType().getQualifiers();
- QualType FinalTy = Context.getQualifiedType(IntTy, Quals);
- llvm::DIType *Ty = getOrCreateType(FinalTy, Unit);
- assert(Ty);
- return Ty;
- }
- }
- }
-
- return getOrCreateType(BD->getType(), Unit);
-}
-
llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const BindingDecl *BD,
llvm::Value *Storage,
std::optional<unsigned> ArgNo,
@@ -4818,7 +4937,8 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const BindingDecl *BD,
if (isa<DeclRefExpr>(BD->getBinding()))
return nullptr;
- llvm::DIType *Ty = CreateBindingDeclType(BD);
+ llvm::DIFile *Unit = getOrCreateFile(BD->getLocation());
+ llvm::DIType *Ty = getOrCreateType(BD->getType(), Unit);
// If there is no debug info for this type then do not emit debug info
// for this variable.
@@ -4844,7 +4964,6 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const BindingDecl *BD,
unsigned Column = getColumnNumber(BD->getLocation());
StringRef Name = BD->getName();
auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
- llvm::DIFile *Unit = getOrCreateFile(BD->getLocation());
// Create the descriptor for the variable.
llvm::DILocalVariable *D = DBuilder.createAutoVariable(
Scope, Name, Unit, Line, Ty, CGM.getLangOpts().Optimize,
@@ -4858,13 +4977,29 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const BindingDecl *BD,
const ASTRecordLayout &layout =
CGM.getContext().getASTRecordLayout(parent);
const uint64_t fieldOffset = layout.getFieldOffset(fieldIndex);
-
- if (fieldOffset != 0) {
- // Currently if the field offset is not a multiple of byte, the produced
- // location would not be accurate. Therefore give up.
- if (fieldOffset % CGM.getContext().getCharWidth() != 0)
- return nullptr;
-
+ if (FD->isBitField()) {
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(FD->getParent());
+ const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD);
+ // Use DW_OP_plus_uconst to adjust to the start of the bitfield
+ // storage.
+ if (!Info.StorageOffset.isZero()) {
+ Expr.push_back(llvm::dwarf::DW_OP_plus_uconst);
+ Expr.push_back(Info.StorageOffset.getQuantity());
+ }
+ // Use LLVM_extract_bits to extract the appropriate bits from this
+ // bitfield.
+ Expr.push_back(Info.IsSigned
+ ? llvm::dwarf::DW_OP_LLVM_extract_bits_sext
+ : llvm::dwarf::DW_OP_LLVM_extract_bits_zext);
+ Expr.push_back(Info.Offset);
+ // If we have an oversized bitfield then the value won't be more than
+ // the size of the type.
+ const uint64_t TypeSize = CGM.getContext().getTypeSize(BD->getType());
+ Expr.push_back(std::min((uint64_t)Info.Size, TypeSize));
+ } else if (fieldOffset != 0) {
+ assert(fieldOffset % CGM.getContext().getCharWidth() == 0 &&
+ "Unexpected non-bitfield with non-byte-aligned offset");
Expr.push_back(llvm::dwarf::DW_OP_plus_uconst);
Expr.push_back(
CGM.getContext().toCharUnitsFromBits(fieldOffset).getQuantity());
@@ -5350,6 +5485,54 @@ static bool IsReconstitutableType(QualType QT) {
return T.Reconstitutable;
}
+bool CGDebugInfo::HasReconstitutableArgs(
+ ArrayRef<TemplateArgument> Args) const {
+ return llvm::all_of(Args, [&](const TemplateArgument &TA) {
+ switch (TA.getKind()) {
+ case TemplateArgument::Template:
+ // Easy to reconstitute - the value of the parameter in the debug
+ // info is the string name of the template. The template name
+ // itself won't benefit from any name rebuilding, but that's a
+ // representational limitation - maybe DWARF could be
+ // changed/improved to use some more structural representation.
+ return true;
+ case TemplateArgument::Declaration:
+ // Reference and pointer non-type template parameters point to
+ // variables, functions, etc and their value is, at best (for
+ // variables) represented as an address - not a reference to the
+ // DWARF describing the variable/function/etc. This makes it hard,
+ // possibly impossible to rebuild the original name - looking up
+ // the address in the executable file's symbol table would be
+ // needed.
+ return false;
+ case TemplateArgument::NullPtr:
+ // These could be rebuilt, but figured they're close enough to the
+ // declaration case, and not worth rebuilding.
+ return false;
+ case TemplateArgument::Pack:
+ // A pack is invalid if any of the elements of the pack are
+ // invalid.
+ return HasReconstitutableArgs(TA.getPackAsArray());
+ case TemplateArgument::Integral:
+ // Larger integers get encoded as DWARF blocks which are a bit
+ // harder to parse back into a large integer, etc - so punting on
+ // this for now. Re-parsing the integers back into APInt is
+ // probably feasible some day.
+ return TA.getAsIntegral().getBitWidth() <= 64 &&
+ IsReconstitutableType(TA.getIntegralType());
+ case TemplateArgument::StructuralValue:
+ return false;
+ case TemplateArgument::Type:
+ return IsReconstitutableType(TA.getAsType());
+ case TemplateArgument::Expression:
+ return IsReconstitutableType(TA.getAsExpr()->getType());
+ default:
+ llvm_unreachable("Other, unresolved, template arguments should "
+ "not be seen here");
+ }
+ });
+}
+
std::string CGDebugInfo::GetName(const Decl *D, bool Qualified) const {
std::string Name;
llvm::raw_string_ostream OS(Name);
@@ -5376,49 +5559,7 @@ std::string CGDebugInfo::GetName(const Decl *D, bool Qualified) const {
} else if (auto *VD = dyn_cast<VarDecl>(ND)) {
Args = GetTemplateArgs(VD);
}
- std::function<bool(ArrayRef<TemplateArgument>)> HasReconstitutableArgs =
- [&](ArrayRef<TemplateArgument> Args) {
- return llvm::all_of(Args, [&](const TemplateArgument &TA) {
- switch (TA.getKind()) {
- case TemplateArgument::Template:
- // Easy to reconstitute - the value of the parameter in the debug
- // info is the string name of the template. (so the template name
- // itself won't benefit from any name rebuilding, but that's a
- // representational limitation - maybe DWARF could be
- // changed/improved to use some more structural representation)
- return true;
- case TemplateArgument::Declaration:
- // Reference and pointer non-type template parameters point to
- // variables, functions, etc and their value is, at best (for
- // variables) represented as an address - not a reference to the
- // DWARF describing the variable/function/etc. This makes it hard,
- // possibly impossible to rebuild the original name - looking up the
- // address in the executable file's symbol table would be needed.
- return false;
- case TemplateArgument::NullPtr:
- // These could be rebuilt, but figured they're close enough to the
- // declaration case, and not worth rebuilding.
- return false;
- case TemplateArgument::Pack:
- // A pack is invalid if any of the elements of the pack are invalid.
- return HasReconstitutableArgs(TA.getPackAsArray());
- case TemplateArgument::Integral:
- // Larger integers get encoded as DWARF blocks which are a bit
- // harder to parse back into a large integer, etc - so punting on
- // this for now. Re-parsing the integers back into APInt is probably
- // feasible some day.
- return TA.getAsIntegral().getBitWidth() <= 64 &&
- IsReconstitutableType(TA.getIntegralType());
- case TemplateArgument::StructuralValue:
- return false;
- case TemplateArgument::Type:
- return IsReconstitutableType(TA.getAsType());
- default:
- llvm_unreachable("Other, unresolved, template arguments should "
- "not be seen here");
- }
- });
- };
+
// A conversion operator presents complications/ambiguity if there's a
// conversion to class template that is itself a template, eg:
// template<typename T>
@@ -5636,6 +5777,48 @@ void CGDebugInfo::EmitExternalVariable(llvm::GlobalVariable *Var,
Var->addDebugInfo(GVE);
}
+void CGDebugInfo::EmitPseudoVariable(CGBuilderTy &Builder,
+ llvm::Instruction *Value, QualType Ty) {
+ // Only when -g2 or above is specified, debug info for variables will be
+ // generated.
+ if (CGM.getCodeGenOpts().getDebugInfo() <=
+ llvm::codegenoptions::DebugLineTablesOnly)
+ return;
+
+ llvm::DILocation *DIL = Value->getDebugLoc().get();
+ if (!DIL)
+ return;
+
+ llvm::DIFile *Unit = DIL->getFile();
+ llvm::DIType *Type = getOrCreateType(Ty, Unit);
+
+ // Check if Value is already a declared variable and has debug info, in this
+ // case we have nothing to do. Clang emits a declared variable as alloca, and
+ // it is loaded upon use, so we identify such pattern here.
+ if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Value)) {
+ llvm::Value *Var = Load->getPointerOperand();
+ // There can be implicit type cast applied on a variable if it is an opaque
+ // ptr, in this case its debug info may not match the actual type of object
+ // being used as in the next instruction, so we will need to emit a pseudo
+ // variable for type-casted value.
+ auto DeclareTypeMatches = [&](auto *DbgDeclare) {
+ return DbgDeclare->getVariable()->getType() == Type;
+ };
+ if (any_of(llvm::findDbgDeclares(Var), DeclareTypeMatches) ||
+ any_of(llvm::findDVRDeclares(Var), DeclareTypeMatches))
+ return;
+ }
+
+ llvm::DILocalVariable *D =
+ DBuilder.createAutoVariable(LexicalBlockStack.back(), "", nullptr, 0,
+ Type, false, llvm::DINode::FlagArtificial);
+
+ if (auto InsertPoint = Value->getInsertionPointAfterDef()) {
+ DBuilder.insertDbgValueIntrinsic(Value, D, DBuilder.createExpression(), DIL,
+ &**InsertPoint);
+ }
+}
+
void CGDebugInfo::EmitGlobalAlias(const llvm::GlobalValue *GV,
const GlobalDecl GD) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
index 7b60e94555d0..a0c419cf1e20 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
@@ -20,8 +20,8 @@
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/ASTSourceDescriptor.h"
#include "clang/Basic/CodeGenOptions.h"
-#include "clang/Basic/Module.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
@@ -29,7 +29,9 @@
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Allocator.h"
+#include <map>
#include <optional>
+#include <string>
namespace llvm {
class MDNode;
@@ -38,6 +40,7 @@ class MDNode;
namespace clang {
class ClassTemplateSpecializationDecl;
class GlobalDecl;
+class Module;
class ModuleMap;
class ObjCInterfaceDecl;
class UsingDecl;
@@ -82,6 +85,8 @@ class CGDebugInfo {
#include "clang/Basic/OpenCLExtensionTypes.def"
#define WASM_TYPE(Name, Id, SingletonId) llvm::DIType *SingletonId = nullptr;
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) llvm::DIType *SingletonId = nullptr;
+#include "clang/Basic/AMDGPUTypes.def"
/// Cache of previously constructed Types.
llvm::DenseMap<const void *, llvm::TrackingMDRef> TypeCache;
@@ -337,15 +342,20 @@ class CGDebugInfo {
llvm::DIScope *RecordTy,
const RecordDecl *RD);
- /// Create type for binding declarations.
- llvm::DIType *CreateBindingDeclType(const BindingDecl *BD);
-
/// Create an anonnymous zero-size separator for bit-field-decl if needed on
/// the target.
llvm::DIDerivedType *createBitFieldSeparatorIfNeeded(
const FieldDecl *BitFieldDecl, const llvm::DIDerivedType *BitFieldDI,
llvm::ArrayRef<llvm::Metadata *> PreviousFieldsDI, const RecordDecl *RD);
+ /// A cache that maps names of artificial inlined functions to subprograms.
+ llvm::StringMap<llvm::DISubprogram *> InlinedTrapFuncMap;
+
+ /// A function that returns the subprogram corresponding to the artificial
+ /// inlined function for traps.
+ llvm::DISubprogram *createInlinedTrapSubprogram(StringRef FuncName,
+ llvm::DIFile *FileScope);
+
/// Helpers for collecting fields of a record.
/// @{
void CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
@@ -529,6 +539,12 @@ public:
/// Emit information about an external variable.
void EmitExternalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
+ /// Emit a pseudo variable and debug info for an intermediate value if it does
+ /// not correspond to a variable in the source code, so that a profiler can
+ /// track more accurate usage of certain instructions of interest.
+ void EmitPseudoVariable(CGBuilderTy &Builder, llvm::Instruction *Value,
+ QualType Ty);
+
/// Emit information about global variable alias.
void EmitGlobalAlias(const llvm::GlobalValue *GV, const GlobalDecl Decl);
@@ -602,6 +618,18 @@ public:
return CoroutineParameterMappings;
}
+ /// Create a debug location from `TrapLocation` that adds an artificial inline
+ /// frame where the frame name is
+ ///
+ /// * `<Prefix>:<Category>:<FailureMsg>`
+ ///
+ /// `<Prefix>` is "__clang_trap_msg".
+ ///
+ /// This is used to store failure reasons for traps.
+ llvm::DILocation *CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation,
+ StringRef Category,
+ StringRef FailureMsg);
+
private:
/// Emit call to llvm.dbg.declare for a variable declaration.
/// Returns a pointer to the DILocalVariable associated with the
@@ -626,7 +654,8 @@ private:
llvm::DIType *WrappedType;
};
- std::string GetName(const Decl*, bool Qualified = false) const;
+ bool HasReconstitutableArgs(ArrayRef<TemplateArgument> Args) const;
+ std::string GetName(const Decl *, bool Qualified = false) const;
/// Build up structure info for the byref. See \a BuildByRefType.
BlockByRefType EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
index aa9997b87ecf..c3251bb5ab56 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
@@ -19,6 +19,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
+#include "EHScopeStack.h"
#include "PatternInit.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
@@ -32,9 +33,11 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Sema/Sema.h"
+#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
#include <optional>
@@ -284,6 +287,7 @@ llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
setTLSMode(GV, D);
setGVProperties(GV, &D);
+ getTargetCodeGenInfo().setTargetAttributes(cast<Decl>(&D), GV, *this);
// Make sure the result is of the correct type.
LangAS ExpectedAS = Ty.getAddressSpace();
@@ -735,18 +739,17 @@ static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
LValue srcLV = CGF.EmitLValue(srcExpr);
// Handle a formal type change to avoid asserting.
- auto srcAddr = srcLV.getAddress(CGF);
+ auto srcAddr = srcLV.getAddress();
if (needsCast) {
- srcAddr =
- srcAddr.withElementType(destLV.getAddress(CGF).getElementType());
+ srcAddr = srcAddr.withElementType(destLV.getAddress().getElementType());
}
// If it was an l-value, use objc_copyWeak.
if (srcExpr->isLValue()) {
- CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr);
+ CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
} else {
assert(srcExpr->isXValue());
- CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr);
+ CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
}
return true;
}
@@ -764,7 +767,7 @@ static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
static void drillIntoBlockVariable(CodeGenFunction &CGF,
LValue &lvalue,
const VarDecl *var) {
- lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var));
+ lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
}
void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
@@ -823,18 +826,17 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
if (capturedByInit) {
// We can use a simple GEP for this because it can't have been
// moved yet.
- tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this),
+ tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(),
cast<VarDecl>(D),
/*follow*/ false));
}
- auto ty =
- cast<llvm::PointerType>(tempLV.getAddress(*this).getElementType());
+ auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
// If __weak, we want to use a barrier under certain conditions.
if (lifetime == Qualifiers::OCL_Weak)
- EmitARCInitWeak(tempLV.getAddress(*this), zero);
+ EmitARCInitWeak(tempLV.getAddress(), zero);
// Otherwise just do a simple store.
else
@@ -877,9 +879,9 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
if (accessedByInit)
- EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true);
+ EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
else
- EmitARCInitWeak(lvalue.getAddress(*this), value);
+ EmitARCInitWeak(lvalue.getAddress(), value);
return;
}
@@ -1382,7 +1384,7 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
// For each dimension stores its QualType and corresponding
// size-expression Value.
SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
- SmallVector<IdentifierInfo *, 4> VLAExprNames;
+ SmallVector<const IdentifierInfo *, 4> VLAExprNames;
// Break down the array into individual dimensions.
QualType Type1D = D.getType();
@@ -1419,7 +1421,7 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
MD = llvm::ConstantAsMetadata::get(C);
else {
// Create an artificial VarDecl to generate debug info for.
- IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
+ const IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
auto QT = getContext().getIntTypeForBitwidth(
SizeTy->getScalarSizeInBits(), false);
auto *ArtificialDecl = VarDecl::Create(
@@ -1460,7 +1462,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
Address address = Address::invalid();
- Address AllocaAddr = Address::invalid();
+ RawAddress AllocaAddr = RawAddress::invalid();
Address OpenMPLocalAddr = Address::invalid();
if (CGM.getLangOpts().OpenMPIRBuilder)
OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
@@ -1523,7 +1525,10 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// return slot, so that we can elide the copy when returning this
// variable (C++0x [class.copy]p34).
address = ReturnValue;
- AllocaAddr = ReturnValue;
+ AllocaAddr =
+ RawAddress(ReturnValue.emitRawPointer(*this),
+ ReturnValue.getElementType(), ReturnValue.getAlignment());
+ ;
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
const auto *RD = RecordTy->getDecl();
@@ -1534,7 +1539,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// to this variable. Set it to zero to indicate that NRVO was not
// applied.
llvm::Value *Zero = Builder.getFalse();
- Address NRVOFlag =
+ RawAddress NRVOFlag =
CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
EnsureInsertPoint();
Builder.CreateStore(Zero, NRVOFlag);
@@ -1614,7 +1619,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
CGM.getContext().getDeclAlign(&D),
AlignmentSource::Decl);
- address = Base.getAddress(*this);
+ address = Base.getAddress();
// Push a cleanup block to emit the call to __kmpc_free_shared in the
// appropriate location at the end of the scope of the
@@ -1677,7 +1682,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
}
if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
- EmitVarAnnotations(&D, address.getPointer());
+ EmitVarAnnotations(&D, address.emitRawPointer(*this));
// Make sure we call @llvm.lifetime.end.
if (emission.useLifetimeMarkers())
@@ -1850,12 +1855,13 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
llvm::Value *BaseSizeInChars =
llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
Address Begin = Loc.withElementType(Int8Ty);
- llvm::Value *End = Builder.CreateInBoundsGEP(
- Begin.getElementType(), Begin.getPointer(), SizeVal, "vla.end");
+ llvm::Value *End = Builder.CreateInBoundsGEP(Begin.getElementType(),
+ Begin.emitRawPointer(*this),
+ SizeVal, "vla.end");
llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
EmitBlock(LoopBB);
llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
- Cur->addIncoming(Begin.getPointer(), OriginBB);
+ Cur->addIncoming(Begin.emitRawPointer(*this), OriginBB);
CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
auto *I =
Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign),
@@ -1964,10 +1970,35 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
constant = constWithPadding(CGM, IsPattern::No,
replaceUndef(CGM, isPattern, constant));
}
+
+ if (D.getType()->isBitIntType() &&
+ CGM.getTypes().typeRequiresSplitIntoByteArray(D.getType())) {
+ // Constants for long _BitInt types are split into individual bytes.
+ // Try to fold these back into an integer constant so it can be stored
+ // properly.
+ llvm::Type *LoadType = CGM.getTypes().convertTypeForLoadStore(
+ D.getType(), constant->getType());
+ constant = llvm::ConstantFoldLoadFromConst(
+ constant, LoadType, llvm::APInt::getZero(32), CGM.getDataLayout());
+ }
}
if (!constant) {
- initializeWhatIsTechnicallyUninitialized(Loc);
+ if (trivialAutoVarInit !=
+ LangOptions::TrivialAutoVarInitKind::Uninitialized) {
+ // At this point, we know D has an Init expression, but isn't a constant.
+ // - If D is not a scalar, auto-var-init conservatively (members may be
+ // left uninitialized by constructor Init expressions for example).
+ // - If D is a scalar, we only need to auto-var-init if there is a
+ // self-reference. Otherwise, the Init expression should be sufficient.
+ // It may be that the Init expression uses other uninitialized memory,
+ // but auto-var-init here would not help, as auto-init would get
+ // overwritten by Init.
+ if (!D.getType()->isScalarType() || capturedByInit ||
+ isAccessedBy(D, Init)) {
+ initializeWhatIsTechnicallyUninitialized(Loc);
+ }
+ }
LValue lv = MakeAddrLValue(Loc, type);
lv.setNonGC(true);
return EmitExprAsInit(Init, &D, lv, capturedByInit);
@@ -2027,10 +2058,10 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
else if (auto *FD = dyn_cast<FieldDecl>(D))
Overlap = getOverlapForFieldInit(FD);
// TODO: how can we delay here if D is captured by its initializer?
- EmitAggExpr(init, AggValueSlot::forLValue(
- lvalue, *this, AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased, Overlap));
+ EmitAggExpr(init,
+ AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased, Overlap));
}
return;
}
@@ -2196,6 +2227,27 @@ void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
destroyer, useEHCleanupForArray);
}
+// Pushes a destroy and defers its deactivation until its
+// CleanupDeactivationScope is exited.
+void CodeGenFunction::pushDestroyAndDeferDeactivation(
+ QualType::DestructionKind dtorKind, Address addr, QualType type) {
+ assert(dtorKind && "cannot push destructor for trivial type");
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ pushDestroyAndDeferDeactivation(
+ cleanupKind, addr, type, getDestroyer(dtorKind), cleanupKind & EHCleanup);
+}
+
+void CodeGenFunction::pushDestroyAndDeferDeactivation(
+ CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray) {
+ llvm::Instruction *DominatingIP =
+ Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
+ pushDestroy(cleanupKind, addr, type, destroyer, useEHCleanupForArray);
+ DeferredDeactivationCleanupStack.push_back(
+ {EHStack.stable_begin(), DominatingIP});
+}
+
void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
}
@@ -2212,39 +2264,48 @@ void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
// If we're not in a conditional branch, we don't need to bother generating a
// conditional cleanup.
if (!isInConditionalBranch()) {
- // Push an EH-only cleanup for the object now.
// FIXME: When popping normal cleanups, we need to keep this EH cleanup
// around in case a temporary's destructor throws an exception.
- if (cleanupKind & EHCleanup)
- EHStack.pushCleanup<DestroyObject>(
- static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
- destroyer, useEHCleanupForArray);
+ // Add the cleanup to the EHStack. After the full-expr, this would be
+ // deactivated before being popped from the stack.
+ pushDestroyAndDeferDeactivation(cleanupKind, addr, type, destroyer,
+ useEHCleanupForArray);
+
+ // Since this is lifetime-extended, push it once again to the EHStack after
+ // the full expression.
return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
- cleanupKind, Address::invalid(), addr, type, destroyer, useEHCleanupForArray);
+ cleanupKind, Address::invalid(), addr, type, destroyer,
+ useEHCleanupForArray);
}
// Otherwise, we should only destroy the object if it's been initialized.
- // Re-use the active flag and saved address across both the EH and end of
- // scope cleanups.
- using SavedType = typename DominatingValue<Address>::saved_type;
using ConditionalCleanupType =
EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType,
Destroyer *, bool>;
-
- Address ActiveFlag = createCleanupActiveFlag();
- SavedType SavedAddr = saveValueInCond(addr);
-
- if (cleanupKind & EHCleanup) {
- EHStack.pushCleanup<ConditionalCleanupType>(
- static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), SavedAddr, type,
- destroyer, useEHCleanupForArray);
- initFullExprCleanupWithFlag(ActiveFlag);
- }
-
+ DominatingValue<Address>::saved_type SavedAddr = saveValueInCond(addr);
+
+ // Remember to emit cleanup if we branch-out before end of full-expression
+ // (eg: through stmt-expr or coro suspensions).
+ AllocaTrackerRAII DeactivationAllocas(*this);
+ Address ActiveFlagForDeactivation = createCleanupActiveFlag();
+
+ pushCleanupAndDeferDeactivation<ConditionalCleanupType>(
+ cleanupKind, SavedAddr, type, destroyer, useEHCleanupForArray);
+ initFullExprCleanupWithFlag(ActiveFlagForDeactivation);
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
+ // Erase the active flag if the cleanup was not emitted.
+ cleanup.AddAuxAllocas(std::move(DeactivationAllocas).Take());
+
+ // Since this is lifetime-extended, push it once again to the EHStack after
+ // the full expression.
+ // The previous active flag would always be 'false' due to forced deferred
+ // deactivation. Use a separate flag for lifetime-extension to correctly
+ // remember if this branch was taken and the object was initialized.
+ Address ActiveFlagForLifetimeExt = createCleanupActiveFlag();
pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
- cleanupKind, ActiveFlag, SavedAddr, type, destroyer,
+ cleanupKind, ActiveFlagForLifetimeExt, SavedAddr, type, destroyer,
useEHCleanupForArray);
}
@@ -2282,7 +2343,7 @@ void CodeGenFunction::emitDestroy(Address addr, QualType type,
checkZeroLength = false;
}
- llvm::Value *begin = addr.getPointer();
+ llvm::Value *begin = addr.emitRawPointer(*this);
llvm::Value *end =
Builder.CreateInBoundsGEP(addr.getElementType(), begin, length);
emitArrayDestroy(begin, end, type, elementAlign, destroyer,
@@ -2437,9 +2498,9 @@ namespace {
};
} // end anonymous namespace
-/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
-/// already-constructed elements of the given array. The cleanup
-/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
+/// pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to
+/// destroy already-constructed elements of the given array. The cleanup may be
+/// popped with DeactivateCleanupBlock or PopCleanupBlock.
///
/// \param elementType - the immediate element type of the array;
/// possibly still an array type
@@ -2448,10 +2509,9 @@ void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
QualType elementType,
CharUnits elementAlign,
Destroyer *destroyer) {
- pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
- arrayBegin, arrayEndPointer,
- elementType, elementAlign,
- destroyer);
+ pushFullExprCleanup<IrregularPartialArrayDestroy>(
+ NormalAndEHCleanup, arrayBegin, arrayEndPointer, elementType,
+ elementAlign, destroyer);
}
/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
@@ -2542,7 +2602,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
}
Address DeclPtr = Address::invalid();
- Address AllocaPtr = Address::invalid();
+ RawAddress AllocaPtr = Address::invalid();
bool DoStore = false;
bool IsScalar = hasScalarEvaluationKind(Ty);
bool UseIndirectDebugAddress = false;
@@ -2554,8 +2614,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// Indirect argument is in alloca address space, which may be different
// from the default address space.
auto AllocaAS = CGM.getASTAllocaAddressSpace();
- auto *V = DeclPtr.getPointer();
- AllocaPtr = DeclPtr;
+ auto *V = DeclPtr.emitRawPointer(*this);
+ AllocaPtr = RawAddress(V, DeclPtr.getElementType(), DeclPtr.getAlignment());
// For truly ABI indirect arguments -- those that are not `byval` -- store
// the address of the argument on the stack to preserve debug information.
@@ -2647,7 +2707,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// objc_storeStrong attempts to release its old value.
llvm::Value *Null = CGM.EmitNullConstant(D.getType());
EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
- EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true);
+ EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
DoStore = false;
}
else
@@ -2694,7 +2754,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
}
if (D.hasAttr<AnnotateAttr>())
- EmitVarAnnotations(&D, DeclPtr.getPointer());
+ EmitVarAnnotations(&D, DeclPtr.emitRawPointer(*this));
// We can only check return value nullability if all arguments to the
// function satisfy their nullability preconditions. This makes it necessary
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
index e08a1e5f42df..2f56355cff90 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -57,7 +57,7 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
return;
case TEK_Aggregate:
CGF.EmitAggExpr(Init,
- AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed,
+ AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap));
@@ -162,7 +162,8 @@ void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
// Grab the llvm.invariant.start intrinsic.
llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
// Overloaded address space type.
- llvm::Type *ObjectPtr[1] = {Int8PtrTy};
+ assert(Addr->getType()->isPointerTy() && "Address must be a pointer");
+ llvm::Type *ObjectPtr[1] = {Addr->getType()};
llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
// Emit a call with the size in bytes of the object.
@@ -231,7 +232,7 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
/// Create a stub function, suitable for being passed to atexit,
/// which passes the given address to the given destructor function.
-llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
+llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD,
llvm::FunctionCallee dtor,
llvm::Constant *addr) {
// Get the destructor function type, void(*)(void).
@@ -263,7 +264,12 @@ llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
CGF.FinishFunction();
- return fn;
+ // Get a proper function pointer.
+ FunctionProtoType::ExtProtoInfo EPI(getContext().getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/false));
+ QualType fnType = getContext().getFunctionType(getContext().VoidTy,
+ {getContext().VoidPtrTy}, EPI);
+ return CGM.getFunctionPointer(fn, fnType);
}
/// Create a stub function, suitable for being passed to __pt_atexit_np,
@@ -332,7 +338,8 @@ void CodeGenFunction::registerGlobalDtorWithLLVM(const VarDecl &VD,
llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
// Create a function which calls the destructor.
- llvm::Function *dtorStub = createAtExitStub(VD, Dtor, Addr);
+ llvm::Function *dtorStub =
+ cast<llvm::Function>(createAtExitStub(VD, Dtor, Addr));
CGM.AddGlobalDtor(dtorStub);
}
@@ -476,6 +483,10 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
!isInNoSanitizeList(SanitizerKind::Thread, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
+ if (getLangOpts().Sanitize.has(SanitizerKind::NumericalStability) &&
+ !isInNoSanitizeList(SanitizerKind::NumericalStability, Fn, Loc))
+ Fn->addFnAttr(llvm::Attribute::SanitizeNumericalStability);
+
if (getLangOpts().Sanitize.has(SanitizerKind::Memory) &&
!isInNoSanitizeList(SanitizerKind::Memory, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
@@ -835,6 +846,10 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
// No Itanium initializer in header like modules.
if (M->isHeaderLikeModule())
continue;
+ // We're allowed to skip the initialization if we are sure it doesn't
+ // do any thing.
+ if (!M->isNamedModuleInterfaceHasInit())
+ continue;
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
SmallString<256> FnName;
{
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
index 5a9d06da12de..bb2ed237ee9f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
@@ -397,7 +397,7 @@ namespace {
void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
// Make sure the exception object is cleaned up if there's an
// exception during initialization.
- pushFullExprCleanup<FreeException>(EHCleanup, addr.getPointer());
+ pushFullExprCleanup<FreeException>(EHCleanup, addr.emitRawPointer(*this));
EHScopeStack::stable_iterator cleanup = EHStack.stable_begin();
// __cxa_allocate_exception returns a void*; we need to cast this
@@ -416,8 +416,8 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
/*IsInit*/ true);
// Deactivate the cleanup block.
- DeactivateCleanupBlock(cleanup,
- cast<llvm::Instruction>(typedAddr.getPointer()));
+ DeactivateCleanupBlock(
+ cleanup, cast<llvm::Instruction>(typedAddr.emitRawPointer(*this)));
}
Address CodeGenFunction::getExceptionSlot() {
@@ -1052,7 +1052,8 @@ static void emitWasmCatchPadBlock(CodeGenFunction &CGF,
CGF.Builder.CreateStore(Exn, CGF.getExceptionSlot());
llvm::CallInst *Selector = CGF.Builder.CreateCall(GetSelectorFn, CPI);
- llvm::Function *TypeIDFn = CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+ llvm::Function *TypeIDFn =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for, {CGF.VoidPtrTy});
// If there's only a single catch-all, branch directly to its handler.
if (CatchScope.getNumHandlers() == 1 &&
@@ -1137,7 +1138,7 @@ static void emitCatchDispatchBlock(CodeGenFunction &CGF,
// Select the right handler.
llvm::Function *llvm_eh_typeid_for =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for, {CGF.VoidPtrTy});
llvm::Type *argTy = llvm_eh_typeid_for->getArg(0)->getType();
LangAS globAS = CGF.CGM.GetGlobalVarAddressSpace(nullptr);
@@ -1834,7 +1835,8 @@ Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
llvm::Value *ParentFP) {
llvm::CallInst *RecoverCall = nullptr;
CGBuilderTy Builder(*this, AllocaInsertPt);
- if (auto *ParentAlloca = dyn_cast<llvm::AllocaInst>(ParentVar.getPointer())) {
+ if (auto *ParentAlloca =
+ dyn_cast_or_null<llvm::AllocaInst>(ParentVar.getBasePointer())) {
// Mark the variable escaped if nobody else referenced it and compute the
// localescape index.
auto InsertPair = ParentCGF.EscapedLocals.insert(
@@ -1851,8 +1853,8 @@ Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
// If the parent didn't have an alloca, we're doing some nested outlining.
// Just clone the existing localrecover call, but tweak the FP argument to
// use our FP value. All other arguments are constants.
- auto *ParentRecover =
- cast<llvm::IntrinsicInst>(ParentVar.getPointer()->stripPointerCasts());
+ auto *ParentRecover = cast<llvm::IntrinsicInst>(
+ ParentVar.emitRawPointer(*this)->stripPointerCasts());
assert(ParentRecover->getIntrinsicID() == llvm::Intrinsic::localrecover &&
"expected alloca or localrecover in parent LocalDeclMap");
RecoverCall = cast<llvm::CallInst>(ParentRecover->clone());
@@ -1925,7 +1927,8 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
if (isa<ImplicitParamDecl>(D) &&
D->getType() == getContext().VoidPtrTy) {
assert(D->getName().starts_with("frame_pointer"));
- FramePtrAddrAlloca = cast<llvm::AllocaInst>(I.second.getPointer());
+ FramePtrAddrAlloca =
+ cast<llvm::AllocaInst>(I.second.getBasePointer());
break;
}
}
@@ -1986,7 +1989,7 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
LValue ThisFieldLValue =
EmitLValueForLambdaField(LambdaThisCaptureField);
if (!LambdaThisCaptureField->getType()->isPointerType()) {
- CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
+ CXXThisValue = ThisFieldLValue.getAddress().emitRawPointer(*this);
} else {
CXXThisValue = EmitLoadOfLValue(ThisFieldLValue, SourceLocation())
.getScalarVal();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
index f8f997909977..3ef22b17f769 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "ABIInfoImpl.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGCall.h"
@@ -56,8 +57,13 @@ using namespace CodeGen;
// Experiment to make sanitizers easier to debug
static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization(
"ubsan-unique-traps", llvm::cl::Optional,
- llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check"),
- llvm::cl::init(false));
+ llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check."));
+
+// TODO: Introduce frontend options to enabled per sanitizers, similar to
+// `fsanitize-trap`.
+static llvm::cl::opt<bool> ClSanitizeGuardChecks(
+ "ubsan-guard-checks", llvm::cl::Optional,
+ llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
//===--------------------------------------------------------------------===//
// Miscellaneous Helper Methods
@@ -65,21 +71,21 @@ static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization(
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
-Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
- CharUnits Align,
- const Twine &Name,
- llvm::Value *ArraySize) {
+RawAddress
+CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
+ const Twine &Name,
+ llvm::Value *ArraySize) {
auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
Alloca->setAlignment(Align.getAsAlign());
- return Address(Alloca, Ty, Align, KnownNonNull);
+ return RawAddress(Alloca, Ty, Align, KnownNonNull);
}
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block. The alloca is casted to default address space if necessary.
-Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
- const Twine &Name,
- llvm::Value *ArraySize,
- Address *AllocaAddr) {
+RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
+ const Twine &Name,
+ llvm::Value *ArraySize,
+ RawAddress *AllocaAddr) {
auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
if (AllocaAddr)
*AllocaAddr = Alloca;
@@ -101,7 +107,7 @@ Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
}
- return Address(V, Ty, Align, KnownNonNull);
+ return RawAddress(V, Ty, Align, KnownNonNull);
}
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
@@ -110,38 +116,45 @@ Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
const Twine &Name,
llvm::Value *ArraySize) {
+ llvm::AllocaInst *Alloca;
if (ArraySize)
- return Builder.CreateAlloca(Ty, ArraySize, Name);
- return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
- ArraySize, Name, AllocaInsertPt);
+ Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
+ else
+ Alloca = new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
+ ArraySize, Name, &*AllocaInsertPt);
+ if (Allocas) {
+ Allocas->Add(Alloca);
+ }
+ return Alloca;
}
/// CreateDefaultAlignTempAlloca - This creates an alloca with the
/// default alignment of the corresponding LLVM type, which is *not*
/// guaranteed to be related in any way to the expected alignment of
/// an AST type that might have been lowered to Ty.
-Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
- const Twine &Name) {
+RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
+ const Twine &Name) {
CharUnits Align =
CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
return CreateTempAlloca(Ty, Align, Name);
}
-Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
+RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
CharUnits Align = getContext().getTypeAlignInChars(Ty);
return CreateTempAlloca(ConvertType(Ty), Align, Name);
}
-Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
- Address *Alloca) {
+RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
+ RawAddress *Alloca) {
// FIXME: Should we prefer the preferred type alignment here?
return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
}
-Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
- const Twine &Name, Address *Alloca) {
- Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
- /*ArraySize=*/nullptr, Alloca);
+RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
+ const Twine &Name,
+ RawAddress *Alloca) {
+ RawAddress Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
+ /*ArraySize=*/nullptr, Alloca);
if (Ty->isConstantMatrixType()) {
auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
@@ -154,13 +167,14 @@ Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
return Result;
}
-Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align,
- const Twine &Name) {
+RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
+ CharUnits Align,
+ const Twine &Name) {
return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
}
-Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
- const Twine &Name) {
+RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
+ const Twine &Name) {
return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
Name);
}
@@ -304,8 +318,8 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
CleanupKind CleanupKind;
if (Lifetime == Qualifiers::OCL_Strong) {
const ValueDecl *VD = M->getExtendingDecl();
- bool Precise =
- VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
+ bool Precise = isa_and_nonnull<VarDecl>(VD) &&
+ VD->hasAttr<ObjCPreciseLifetimeAttr>();
CleanupKind = CGF.getARCCleanupKind();
Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
: &CodeGenFunction::destroyARCStrongImprecise;
@@ -359,7 +373,7 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
} else {
CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
- CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
+ CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
}
CGF.CGM.getCXXABI().registerGlobalDtor(
CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
@@ -384,10 +398,10 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
}
}
-static Address createReferenceTemporary(CodeGenFunction &CGF,
- const MaterializeTemporaryExpr *M,
- const Expr *Inner,
- Address *Alloca = nullptr) {
+static RawAddress createReferenceTemporary(CodeGenFunction &CGF,
+ const MaterializeTemporaryExpr *M,
+ const Expr *Inner,
+ RawAddress *Alloca = nullptr) {
auto &TCG = CGF.getTargetHooks();
switch (M->getStorageDuration()) {
case SD_FullExpression:
@@ -416,7 +430,7 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
GV->getValueType()->getPointerTo(
CGF.getContext().getTargetAddressSpace(LangAS::Default)));
// FIXME: Should we put the new global into a COMDAT?
- return Address(C, GV->getValueType(), alignment);
+ return RawAddress(C, GV->getValueType(), alignment);
}
return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
}
@@ -448,7 +462,7 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
auto ownership = M->getType().getObjCLifetime();
if (ownership != Qualifiers::OCL_None &&
ownership != Qualifiers::OCL_ExplicitNone) {
- Address Object = createReferenceTemporary(*this, M, E);
+ RawAddress Object = createReferenceTemporary(*this, M, E);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
llvm::Type *Ty = ConvertTypeForMem(E->getType());
Object = Object.withElementType(Ty);
@@ -502,8 +516,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
}
// Create and initialize the reference temporary.
- Address Alloca = Address::invalid();
- Address Object = createReferenceTemporary(*this, M, E, &Alloca);
+ RawAddress Alloca = Address::invalid();
+ RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(
Object.getPointer()->stripPointerCasts())) {
llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
@@ -592,7 +606,7 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
LV = EmitLValueForField(LV, Adjustment.Field);
assert(LV.isSimple() &&
"materialized temporary field is not a simple lvalue");
- Object = LV.getAddress(*this);
+ Object = LV.getAddress();
break;
}
@@ -637,16 +651,13 @@ unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
->getZExtValue();
}
-/// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
-static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
- llvm::Value *High) {
- llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
- llvm::Value *K47 = Builder.getInt64(47);
- llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
- llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
- llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
- llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
- return Builder.CreateMul(B1, KMul);
+static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
+ llvm::Value *Ptr) {
+ llvm::Value *A0 =
+ Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
+ llvm::Value *A1 =
+ Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
+ return Builder.CreateXor(Acc, A1);
}
bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
@@ -808,11 +819,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
EmitBlock(VptrNotNull);
}
- // Compute a hash of the mangled name of the type.
- //
- // FIXME: This is not guaranteed to be deterministic! Move to a
- // fingerprinting mechanism once LLVM provides one. For the time
- // being the implementation happens to be deterministic.
+ // Compute a deterministic hash of the mangled name of the type.
SmallString<64> MangledName;
llvm::raw_svector_ostream Out(MangledName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
@@ -821,15 +828,19 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// Contained in NoSanitizeList based on the mangled type.
if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
Out.str())) {
- llvm::hash_code TypeHash = hash_value(Out.str());
+ // Load the vptr, and mix it with TypeHash.
+ llvm::Value *TypeHash =
+ llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
- // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
- llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
+ llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
- llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
- llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
+ llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
+ Ty->getAsCXXRecordDecl(),
+ VTableAuthMode::UnsafeUbsanStrip);
+ VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
- llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
+ llvm::Value *Hash =
+ emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
Hash = Builder.CreateTrunc(Hash, IntPtrTy);
// Look the hash up in our cache.
@@ -1041,6 +1052,8 @@ public:
return Visit(E->getBase());
}
const Expr *VisitCastExpr(const CastExpr *E) {
+ if (E->getCastKind() == CK_LValueToRValue)
+ return IsExpectedRecordDecl(E) ? E : nullptr;
return Visit(E->getSubExpr());
}
const Expr *VisitParenExpr(const ParenExpr *E) {
@@ -1060,21 +1073,25 @@ using RecIndicesTy =
SmallVector<std::pair<const RecordDecl *, llvm::Value *>, 8>;
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD,
- const FieldDecl *FD, RecIndicesTy &Indices) {
+ const FieldDecl *Field,
+ RecIndicesTy &Indices) {
const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
int64_t FieldNo = -1;
- for (const Decl *D : RD->decls()) {
- if (const auto *Field = dyn_cast<FieldDecl>(D)) {
- FieldNo = Layout.getLLVMFieldNo(Field);
- if (FD == Field) {
- Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
- return true;
- }
+ for (const FieldDecl *FD : RD->fields()) {
+ if (!Layout.containsFieldDecl(FD))
+ // This could happen if the field has a struct type that's empty. I don't
+ // know why either.
+ continue;
+
+ FieldNo = Layout.getLLVMFieldNo(FD);
+ if (FD == Field) {
+ Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
+ return true;
}
- if (const auto *Record = dyn_cast<RecordDecl>(D)) {
- ++FieldNo;
- if (getGEPIndicesToField(CGF, Record, FD, Indices)) {
+ QualType Ty = FD->getType();
+ if (Ty->isRecordType()) {
+ if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
if (RD->isUnion())
FieldNo = 0;
Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
@@ -1104,19 +1121,15 @@ llvm::Value *CodeGenFunction::EmitCountedByFieldExpr(
return nullptr;
llvm::Value *Res = nullptr;
- if (const auto *DRE = dyn_cast<DeclRefExpr>(StructBase)) {
- Res = EmitDeclRefLValue(DRE).getPointer(*this);
- Res = Builder.CreateAlignedLoad(ConvertType(DRE->getType()), Res,
- getPointerAlign(), "dre.load");
- } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(StructBase)) {
- LValue LV = EmitMemberExpr(ME);
- Address Addr = LV.getAddress(*this);
- Res = Addr.getPointer();
- } else if (StructBase->getType()->isPointerType()) {
+ if (StructBase->getType()->isPointerType()) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
- Res = Addr.getPointer();
+ Res = Addr.emitRawPointer(*this);
+ } else if (StructBase->isLValue()) {
+ LValue LV = EmitLValue(StructBase);
+ Address Addr = LV.getAddress();
+ Res = Addr.emitRawPointer(*this);
} else {
return nullptr;
}
@@ -1136,38 +1149,19 @@ llvm::Value *CodeGenFunction::EmitCountedByFieldExpr(
}
const FieldDecl *CodeGenFunction::FindCountedByField(const FieldDecl *FD) {
- if (!FD || !FD->hasAttr<CountedByAttr>())
- return nullptr;
-
- const auto *CBA = FD->getAttr<CountedByAttr>();
- if (!CBA)
- return nullptr;
-
- auto GetNonAnonStructOrUnion =
- [](const RecordDecl *RD) -> const RecordDecl * {
- while (RD && RD->isAnonymousStructOrUnion()) {
- const auto *R = dyn_cast<RecordDecl>(RD->getDeclContext());
- if (!R)
- return nullptr;
- RD = R;
- }
- return RD;
- };
- const RecordDecl *EnclosingRD = GetNonAnonStructOrUnion(FD->getParent());
- if (!EnclosingRD)
+ if (!FD)
return nullptr;
- DeclarationName DName(CBA->getCountedByField());
- DeclContext::lookup_result Lookup = EnclosingRD->lookup(DName);
-
- if (Lookup.empty())
+ const auto *CAT = FD->getType()->getAs<CountAttributedType>();
+ if (!CAT)
return nullptr;
- const NamedDecl *ND = Lookup.front();
- if (const auto *IFD = dyn_cast<IndirectFieldDecl>(ND))
- ND = IFD->getAnonField();
+ const auto *CountDRE = cast<DeclRefExpr>(CAT->getCountExpr());
+ const auto *CountDecl = CountDRE->getDecl();
+ if (const auto *IFD = dyn_cast<IndirectFieldDecl>(CountDecl))
+ CountDecl = IFD->getAnonField();
- return dyn_cast<FieldDecl>(ND);
+ return dyn_cast<FieldDecl>(CountDecl);
}
void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
@@ -1301,8 +1295,7 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
if (BaseInfo)
BaseInfo->mergeForCast(TargetTypeBaseInfo);
- Addr = Address(Addr.getPointer(), Addr.getElementType(), Align,
- IsKnownNonNull);
+ Addr.setAlignment(Align);
}
}
@@ -1319,9 +1312,10 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
CGF.ConvertTypeForMem(E->getType()->getPointeeType());
Addr = Addr.withElementType(ElemTy);
if (CE->getCastKind() == CK_AddressSpaceConversion)
- Addr = CGF.Builder.CreateAddrSpaceCast(Addr,
- CGF.ConvertType(E->getType()));
- return Addr;
+ Addr = CGF.Builder.CreateAddrSpaceCast(
+ Addr, CGF.ConvertType(E->getType()), ElemTy);
+ return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
+ CE->getType());
}
break;
@@ -1360,7 +1354,7 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
- return LV.getAddress(CGF);
+ return LV.getAddress();
}
}
@@ -1375,7 +1369,7 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
- return LV.getAddress(CGF);
+ return LV.getAddress();
}
}
}
@@ -1383,10 +1377,9 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
// TODO: conditional operators, comma.
// Otherwise, use the alignment of the type.
- CharUnits Align =
- CGF.CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
- llvm::Type *ElemTy = CGF.ConvertTypeForMem(E->getType()->getPointeeType());
- return Address(CGF.EmitScalarExpr(E), ElemTy, Align, IsKnownNonNull);
+ return CGF.makeNaturalAddressForPointer(
+ CGF.EmitScalarExpr(E), E->getType()->getPointeeType(), CharUnits(),
+ /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
}
/// EmitPointerWithAlignment - Given an expression of pointer type, try to
@@ -1487,8 +1480,7 @@ LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
SkippedChecks.set(SanitizerKind::Null, true);
}
- EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(),
- LV.getAlignment(), SkippedChecks);
+ EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
}
return LV;
}
@@ -1599,12 +1591,12 @@ LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
if (LV.isSimple()) {
// Defend against branches out of gnu statement expressions surrounded by
// cleanups.
- Address Addr = LV.getAddress(*this);
- llvm::Value *V = Addr.getPointer();
+ Address Addr = LV.getAddress();
+ llvm::Value *V = Addr.getBasePointer();
Scope.ForceCleanup({&V});
- return LValue::MakeAddr(Addr.withPointer(V, Addr.isKnownNonNull()),
- LV.getType(), getContext(), LV.getBaseInfo(),
- LV.getTBAAInfo());
+ Addr.replaceBasePointer(V);
+ return LValue::MakeAddr(Addr, LV.getType(), getContext(),
+ LV.getBaseInfo(), LV.getTBAAInfo());
}
// FIXME: Is it possible to create an ExprWithCleanups that produces a
// bitfield lvalue or some other non-simple lvalue?
@@ -1636,8 +1628,8 @@ LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
case Expr::MatrixSubscriptExprClass:
return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
- case Expr::OMPArraySectionExprClass:
- return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
+ case Expr::ArraySectionExprClass:
+ return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
case Expr::ExtVectorElementExprClass:
return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
case Expr::CXXThisExprClass:
@@ -1675,6 +1667,8 @@ LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
return EmitCoawaitLValue(cast<CoawaitExpr>(E));
case Expr::CoyieldExprClass:
return EmitCoyieldLValue(cast<CoyieldExpr>(E));
+ case Expr::PackIndexingExprClass:
+ return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
}
}
@@ -1846,7 +1840,7 @@ llvm::Value *CodeGenFunction::emitScalarConstant(
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
SourceLocation Loc) {
- return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(),
+ return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getType(), Loc, lvalue.getBaseInfo(),
lvalue.getTBAAInfo(), lvalue.isNontemporal());
}
@@ -1946,7 +1940,7 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo,
bool isNontemporal) {
- if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer()))
+ if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
if (GV->isThreadLocal())
Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
NotKnownNonNull);
@@ -1996,6 +1990,9 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
}
+ Addr =
+ Addr.withElementType(convertTypeForLoadStore(Ty, Addr.getElementType()));
+
llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
if (isNontemporal) {
llvm::MDNode *Node = llvm::MDNode::get(
@@ -2018,27 +2015,33 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
return EmitFromMemory(Load, Ty);
}
+/// Converts a scalar value from its primary IR type (as returned
+/// by ConvertType) to its load/store type (as returned by
+/// convertTypeForLoadStore).
llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
- // Bool has a different representation in memory than in registers.
- if (hasBooleanRepresentation(Ty)) {
- // This should really always be an i1, but sometimes it's already
- // an i8, and it's awkward to track those cases down.
- if (Value->getType()->isIntegerTy(1))
- return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
- assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
- "wrong value rep of bool");
+ if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
+ llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
+ bool Signed = Ty->isSignedIntegerOrEnumerationType();
+ return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
+ }
+
+ if (Ty->isExtVectorBoolType()) {
+ llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
+ // Expand to the memory bit width.
+ unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
+ // <N x i1> --> <P x i1>.
+ Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
+ // <P x i1> --> iP.
+ Value = Builder.CreateBitCast(Value, StoreTy);
}
return Value;
}
+/// Converts a scalar value from its load/store type (as returned
+/// by convertTypeForLoadStore) to its primary IR type (as returned
+/// by ConvertType).
llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
- // Bool has a different representation in memory than in registers.
- if (hasBooleanRepresentation(Ty)) {
- assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
- "wrong value rep of bool");
- return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
- }
if (Ty->isExtVectorBoolType()) {
const auto *RawIntTy = Value->getType();
// Bitcast iP --> <P x i1>.
@@ -2051,13 +2054,19 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
return emitBoolVecConversion(V, ValNumElems, "extractvec");
}
+ if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
+ llvm::Type *ResTy = ConvertType(Ty);
+ return Builder.CreateTrunc(Value, ResTy, "loadedv");
+ }
+
return Value;
}
// Convert the pointer of \p Addr to a pointer to a vector (the value type of
// MatrixType), if it points to a array (the memory type of MatrixType).
-static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
- bool IsVector = true) {
+static RawAddress MaybeConvertMatrixAddress(RawAddress Addr,
+ CodeGenFunction &CGF,
+ bool IsVector = true) {
auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
if (ArrayTy && IsVector) {
auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
@@ -2082,7 +2091,7 @@ static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
// (VectorType).
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
bool isInit, CodeGenFunction &CGF) {
- Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
+ Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
value->getType()->isVectorTy());
CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
@@ -2094,7 +2103,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo,
bool isInit, bool isNontemporal) {
- if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer()))
+ if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
if (GV->isThreadLocal())
Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
NotKnownNonNull);
@@ -2102,17 +2111,10 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
llvm::Type *SrcTy = Value->getType();
if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
- if (VecTy && ClangVecTy->isExtVectorBoolType()) {
- auto *MemIntTy = cast<llvm::IntegerType>(Addr.getElementType());
- // Expand to the memory bit width.
- unsigned MemNumElems = MemIntTy->getPrimitiveSizeInBits();
- // <N x i1> --> <P x i1>.
- Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
- // <P x i1> --> iP.
- Value = Builder.CreateBitCast(Value, MemIntTy);
- } else if (!CGM.getCodeGenOpts().PreserveVec3Type) {
+ if (!CGM.getCodeGenOpts().PreserveVec3Type) {
// Handle vec3 special.
- if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
+ if (VecTy && !ClangVecTy->isExtVectorBoolType() &&
+ cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
// Our source is a vec3, do a shuffle vector to make it a vec4.
Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
"extractVec");
@@ -2152,7 +2154,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
return;
}
- EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
+ EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getType(), lvalue.getBaseInfo(),
lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
}
@@ -2162,29 +2164,44 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
CodeGenFunction &CGF) {
assert(LV.getType()->isConstantMatrixType());
- Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
+ Address Addr = MaybeConvertMatrixAddress(LV.getAddress(), CGF);
LV.setAddress(Addr);
return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
}
+RValue CodeGenFunction::EmitLoadOfAnyValue(LValue LV, AggValueSlot Slot,
+ SourceLocation Loc) {
+ QualType Ty = LV.getType();
+ switch (getEvaluationKind(Ty)) {
+ case TEK_Scalar:
+ return EmitLoadOfLValue(LV, Loc);
+ case TEK_Complex:
+ return RValue::getComplex(EmitLoadOfComplex(LV, Loc));
+ case TEK_Aggregate:
+ EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
+ return Slot.asRValue();
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
/// method emits the address of the lvalue, then loads the result as an rvalue,
/// returning the rvalue.
RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
if (LV.isObjCWeak()) {
// load of a __weak object.
- Address AddrWeakObj = LV.getAddress(*this);
+ Address AddrWeakObj = LV.getAddress();
return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
AddrWeakObj));
}
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
// In MRC mode, we do a load+autorelease.
if (!getLangOpts().ObjCAutoRefCount) {
- return RValue::get(EmitARCLoadWeak(LV.getAddress(*this)));
+ return RValue::get(EmitARCLoadWeak(LV.getAddress()));
}
// In ARC mode, we load retained and then consume the value.
- llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this));
+ llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
Object = EmitObjCConsumeObject(LV.getType(), Object);
return RValue::get(Object);
}
@@ -2419,9 +2436,9 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
case Qualifiers::OCL_Weak:
if (isInit)
// Initialize and then skip the primitive store.
- EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal());
+ EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal());
else
- EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(),
+ EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(),
/*ignore*/ true);
return;
@@ -2435,7 +2452,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
// load of a __weak object.
- Address LvalueDst = Dst.getAddress(*this);
+ Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
return;
@@ -2443,20 +2460,18 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isObjCStrong() && !Dst.isNonGC()) {
// load of a __strong object.
- Address LvalueDst = Dst.getAddress(*this);
+ Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
if (Dst.isObjCIvar()) {
assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
llvm::Type *ResultType = IntPtrTy;
Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());
- llvm::Value *RHS = dst.getPointer();
+ llvm::Value *RHS = dst.emitRawPointer(*this);
RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
- llvm::Value *LHS =
- Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType,
- "sub.ptr.lhs.cast");
+ llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
+ ResultType, "sub.ptr.lhs.cast");
llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
- CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
- BytesBetween);
+ CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
} else if (Dst.isGlobalObjCRef()) {
CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
Dst.isThreadLocalRef());
@@ -2473,7 +2488,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
llvm::Value **Result) {
const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
- llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
+ llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
Address Ptr = Dst.getBitFieldAddress();
// Get the source value, truncated to the width of the bit-field.
@@ -2785,14 +2800,11 @@ CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
LValueBaseInfo *PointeeBaseInfo,
TBAAAccessInfo *PointeeTBAAInfo) {
llvm::LoadInst *Load =
- Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
+ Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
-
- QualType PointeeType = RefLVal.getType()->getPointeeType();
- CharUnits Align = CGM.getNaturalTypeAlignment(
- PointeeType, PointeeBaseInfo, PointeeTBAAInfo,
- /* forPointeeType= */ true);
- return Address(Load, ConvertTypeForMem(PointeeType), Align);
+ return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
+ CharUnits(), /*ForPointeeType=*/true,
+ PointeeBaseInfo, PointeeTBAAInfo);
}
LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
@@ -2809,10 +2821,9 @@ Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
LValueBaseInfo *BaseInfo,
TBAAAccessInfo *TBAAInfo) {
llvm::Value *Addr = Builder.CreateLoad(Ptr);
- return Address(Addr, ConvertTypeForMem(PtrTy->getPointeeType()),
- CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(), BaseInfo,
- TBAAInfo,
- /*forPointeeType=*/true));
+ return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
+ CharUnits(), /*ForPointeeType=*/true,
+ BaseInfo, TBAAInfo);
}
LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
@@ -2862,22 +2873,22 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
return LV;
}
-static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
- GlobalDecl GD) {
+llvm::Constant *CodeGenModule::getRawFunctionPointer(GlobalDecl GD,
+ llvm::Type *Ty) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (FD->hasAttr<WeakRefAttr>()) {
- ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
+ ConstantAddress aliasee = GetWeakRefReference(FD);
return aliasee.getPointer();
}
- llvm::Constant *V = CGM.GetAddrOfFunction(GD);
+ llvm::Constant *V = GetAddrOfFunction(GD, Ty);
return V;
}
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
GlobalDecl GD) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
- llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD);
+ llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
return CGF.MakeAddrLValue(V, E->getType(), Alignment,
AlignmentSource::Decl);
@@ -3008,7 +3019,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
/* BaseInfo= */ nullptr,
/* TBAAInfo= */ nullptr,
/* forPointeeType= */ true);
- Addr = Address(Val, ConvertTypeForMem(E->getType()), Alignment);
+ Addr = makeNaturalAddressForPointer(Val, T, Alignment);
}
return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
@@ -3039,12 +3050,13 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
LValue CapLVal =
EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
CapturedStmtInfo->getContextValue());
- Address LValueAddress = CapLVal.getAddress(*this);
- CapLVal = MakeAddrLValue(
- Address(LValueAddress.getPointer(), LValueAddress.getElementType(),
- getContext().getDeclAlign(VD)),
- CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl),
- CapLVal.getTBAAInfo());
+ Address LValueAddress = CapLVal.getAddress();
+ CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
+ LValueAddress.getElementType(),
+ getContext().getDeclAlign(VD)),
+ CapLVal.getType(),
+ LValueBaseInfo(AlignmentSource::Decl),
+ CapLVal.getTBAAInfo());
// Mark lvalue as nontemporal if the variable is marked as nontemporal
// in simd context.
if (getLangOpts().OpenMP &&
@@ -3100,7 +3112,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// Handle threadlocal function locals.
if (VD->getTLSKind() != VarDecl::TLS_None)
addr = addr.withPointer(
- Builder.CreateThreadLocalAddress(addr.getPointer()), NotKnownNonNull);
+ Builder.CreateThreadLocalAddress(addr.getBasePointer()),
+ NotKnownNonNull);
// Check for OpenMP threadprivate variables.
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
@@ -3139,21 +3152,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
return LV;
}
- if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
- LValue LV = EmitFunctionDeclLValue(*this, E, FD);
-
- // Emit debuginfo for the function declaration if the target wants to.
- if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) {
- if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) {
- auto *Fn =
- cast<llvm::Function>(LV.getPointer(*this)->stripPointerCasts());
- if (!Fn->getSubprogram())
- DI->EmitFunctionDecl(FD, FD->getLocation(), T, Fn);
- }
- }
-
- return LV;
- }
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, FD);
// FIXME: While we're emitting a binding from an enclosing scope, all other
// DeclRefExprs we see should be implicitly treated as if they also refer to
@@ -3227,7 +3227,7 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
// __real is valid on scalars. This is a faster way of testing that.
// __imag can only produce an rvalue on scalars.
if (E->getOpcode() == UO_Real &&
- !LV.getAddress(*this).getElementType()->isStructTy()) {
+ !LV.getAddress().getElementType()->isStructTy()) {
assert(E->getSubExpr()->getType()->isArithmeticType());
return LV;
}
@@ -3236,8 +3236,8 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
Address Component =
(E->getOpcode() == UO_Real
- ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType())
- : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType()));
+ ? emitAddrOfRealComponent(LV.getAddress(), LV.getType())
+ : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, T));
ElemLV.getQuals().addQualifiers(LV.getQuals());
@@ -3368,7 +3368,7 @@ llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
// Pointers are passed directly, everything else is passed by address.
if (!V->getType()->isPointerTy()) {
- Address Ptr = CreateDefaultAlignTempAlloca(V->getType());
+ RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
Builder.CreateStore(V, Ptr);
V = Ptr.getPointer();
}
@@ -3544,6 +3544,17 @@ void CodeGenFunction::EmitCheck(
Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
}
+ if (ClSanitizeGuardChecks) {
+ llvm::Value *Allow =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
+ llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
+
+ for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
+ if (*Cond)
+ *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
+ }
+ }
+
if (TrapCond)
EmitTrapCheck(TrapCond, CheckHandler);
if (!FatalCond && !RecoverableCond)
@@ -3570,9 +3581,8 @@ void CodeGenFunction::EmitCheck(
llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
// Give hint that we very much don't expect to execute the handler
- // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
llvm::MDBuilder MDHelper(getLLVMContext());
- llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
+ llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
EmitBlock(Handlers);
@@ -3640,7 +3650,7 @@ void CodeGenFunction::EmitCfiSlowPathCheck(
llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
llvm::MDBuilder MDHelper(getLLVMContext());
- llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
+ llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
EmitBlock(CheckBB);
@@ -3680,12 +3690,29 @@ void CodeGenFunction::EmitCfiSlowPathCheck(
// symbol in LTO mode.
void CodeGenFunction::EmitCfiCheckStub() {
llvm::Module *M = &CGM.getModule();
- auto &Ctx = M->getContext();
+ ASTContext &C = getContext();
+ QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
+
+ FunctionArgList FnArgs;
+ ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
+ ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
+ ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
+ ImplicitParamKind::Other);
+ FnArgs.push_back(&ArgCallsiteTypeId);
+ FnArgs.push_back(&ArgAddr);
+ FnArgs.push_back(&ArgCFICheckFailData);
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs);
+
llvm::Function *F = llvm::Function::Create(
- llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false),
+ llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
+ CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
F->setAlignment(llvm::Align(4096));
CGM.setDSOLocal(F);
+
+ llvm::LLVMContext &Ctx = M->getContext();
llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
// CrossDSOCFI pass is not executed if there is no executable code.
SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
@@ -3805,7 +3832,7 @@ void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
// If we're optimizing, collapse all calls to trap down to just one per
// check-type per function to save on code size.
- if (TrapBBs.size() <= CheckHandlerID)
+ if ((int)TrapBBs.size() <= CheckHandlerID)
TrapBBs.resize(CheckHandlerID + 1);
llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
@@ -3826,9 +3853,10 @@ void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
llvm::CallInst *TrapCall = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
- llvm::ConstantInt::get(CGM.Int8Ty, ClSanitizeDebugDeoptimization
- ? TrapBB->getParent()->size()
- : CheckHandlerID));
+ llvm::ConstantInt::get(CGM.Int8Ty,
+ ClSanitizeDebugDeoptimization
+ ? TrapBB->getParent()->size()
+ : static_cast<uint64_t>(CheckHandlerID)));
if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
@@ -3864,7 +3892,7 @@ Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
// Expressions of array type can't be bitfields or vector elements.
LValue LV = EmitLValue(E);
- Address Addr = LV.getAddress(*this);
+ Address Addr = LV.getAddress();
// If the array type was an incomplete type, we need to make sure
// the decay ends up being the right type.
@@ -3924,6 +3952,21 @@ static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
}
}
+static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
+ ArrayRef<llvm::Value *> indices,
+ llvm::Type *elementType, bool inbounds,
+ bool signedIndices, SourceLocation loc,
+ CharUnits align,
+ const llvm::Twine &name = "arrayidx") {
+ if (inbounds) {
+ return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
+ CodeGenFunction::NotSubtraction, loc,
+ align, name);
+ } else {
+ return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
+ }
+}
+
static CharUnits getArrayElementAlign(CharUnits arrayAlign,
llvm::Value *idx,
CharUnits eltSize) {
@@ -3971,7 +4014,7 @@ static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF,
llvm::Function *Fn =
CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
- llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.getPointer()});
+ llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
return Address(Call, Addr.getElementType(), Addr.getAlignment());
}
@@ -4034,7 +4077,7 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
// We can use that to compute the best alignment of the element.
CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
CharUnits eltAlign =
- getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
+ getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
if (hasBPFPreserveStaticOffset(Base))
addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
@@ -4043,19 +4086,19 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
if (!LastIndex ||
(!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) {
- eltPtr = emitArraySubscriptGEP(
- CGF, addr.getElementType(), addr.getPointer(), indices, inbounds,
- signedIndices, loc, name);
+ addr = emitArraySubscriptGEP(CGF, addr, indices,
+ CGF.ConvertTypeForMem(eltType), inbounds,
+ signedIndices, loc, eltAlign, name);
+ return addr;
} else {
// Remember the original array subscript for bpf target
unsigned idx = LastIndex->getZExtValue();
llvm::DIType *DbgInfo = nullptr;
if (arrayType)
DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
- eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getElementType(),
- addr.getPointer(),
- indices.size() - 1,
- idx, DbgInfo);
+ eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
+ addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
+ idx, DbgInfo);
}
return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
@@ -4147,15 +4190,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// If the base is a vector type, then we are forming a vector element lvalue
// with this subscript.
- if (E->getBase()->getType()->isVectorType() &&
+ if (E->getBase()->getType()->isSubscriptableVectorType() &&
!isa<ExtVectorElementExpr>(E->getBase())) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
auto *Idx = EmitIdxAfterBase(/*Promote*/false);
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
- return LValue::MakeVectorElt(LHS.getAddress(*this), Idx,
- E->getBase()->getType(), LHS.getBaseInfo(),
- TBAAAccessInfo());
+ return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
+ LHS.getBaseInfo(), TBAAAccessInfo());
}
// All the other cases basically behave like simple offsetting.
@@ -4224,8 +4266,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
CharUnits EltAlign =
getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
llvm::Value *EltPtr =
- emitArraySubscriptGEP(*this, Int8Ty, Addr.getPointer(), ScaledIdx,
- false, SignedIndices, E->getExprLoc());
+ emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
+ ScaledIdx, false, SignedIndices, E->getExprLoc());
Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
@@ -4257,7 +4299,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
if (const auto *ME = dyn_cast<MemberExpr>(Array);
ME &&
ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
- ME->getMemberDecl()->hasAttr<CountedByAttr>()) {
+ ME->getMemberDecl()->getType()->isCountAttributedType()) {
const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(ME->getMemberDecl());
if (const FieldDecl *CountFD = FindCountedByField(FAMDecl)) {
if (std::optional<int64_t> Diff =
@@ -4267,11 +4309,11 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// Create a GEP with a byte offset between the FAM and count and
// use that to load the count value.
Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(
- ArrayLV.getAddress(*this), Int8PtrTy, Int8Ty);
+ ArrayLV.getAddress(), Int8PtrTy, Int8Ty);
llvm::Type *CountTy = ConvertType(CountFD->getType());
llvm::Value *Res = Builder.CreateInBoundsGEP(
- Int8Ty, Addr.getPointer(),
+ Int8Ty, Addr.emitRawPointer(*this),
Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
".counted_by.load");
@@ -4287,7 +4329,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// Propagate the alignment from the array itself to the result.
QualType arrayType = Array->getType();
Addr = emitArraySubscriptGEP(
- *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
+ *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
E->getExprLoc(), &arrayType, E->getBase());
EltBaseInfo = ArrayLV.getBaseInfo();
@@ -4326,7 +4368,7 @@ LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
llvm::Value *FinalIdx =
Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
return LValue::MakeMatrixElt(
- MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx,
+ MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
}
@@ -4336,10 +4378,10 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
QualType BaseTy, QualType ElTy,
bool IsLowerBound) {
LValue BaseLVal;
- if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) {
- BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound);
+ if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
+ BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
if (BaseTy->isArrayType()) {
- Address Addr = BaseLVal.getAddress(CGF);
+ Address Addr = BaseLVal.getAddress();
BaseInfo = BaseLVal.getBaseInfo();
// If the array type was an incomplete type, we need to make sure
@@ -4363,15 +4405,19 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
BaseInfo.mergeForCast(TypeBaseInfo);
TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
- return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)),
+ return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
CGF.ConvertTypeForMem(ElTy), Align);
}
return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
}
-LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
- bool IsLowerBound) {
- QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(E->getBase());
+LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E,
+ bool IsLowerBound) {
+
+ assert(!E->isOpenACCArraySection() &&
+ "OpenACC Array section codegen not implemented");
+
+ QualType BaseTy = ArraySectionExpr::getBaseOriginalType(E->getBase());
QualType ResultExprTy;
if (auto *AT = getContext().getAsArrayType(BaseTy))
ResultExprTy = AT->getElementType();
@@ -4511,15 +4557,15 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
// Propagate the alignment from the array itself to the result.
EltPtr = emitArraySubscriptGEP(
- *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
+ *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
/*signedIndices=*/false, E->getExprLoc());
BaseInfo = ArrayLV.getBaseInfo();
TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
} else {
- Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo,
- TBAAInfo, BaseTy, ResultExprTy,
- IsLowerBound);
+ Address Base =
+ emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
+ ResultExprTy, IsLowerBound);
EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
!getLangOpts().isSignedOverflowDefined(),
/*signedIndices=*/false, E->getExprLoc());
@@ -4571,7 +4617,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
if (Base.isSimple()) {
llvm::Constant *CV =
llvm::ConstantDataVector::get(getLLVMContext(), Indices);
- return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type,
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
Base.getBaseInfo(), TBAAAccessInfo());
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
@@ -4606,7 +4652,7 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
SkippedChecks.set(SanitizerKind::Alignment, true);
if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
SkippedChecks.set(SanitizerKind::Null, true);
- EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy,
+ EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
/*Alignment=*/CharUnits::Zero(), SkippedChecks);
BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
} else
@@ -4640,7 +4686,8 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
llvm::Value *ThisValue) {
bool HasExplicitObjectParameter = false;
- if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl)) {
+ const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
+ if (MD) {
HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
assert(MD->getParent()->isLambda());
assert(MD->getParent() == Field->getParent());
@@ -4655,8 +4702,19 @@ LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
AlignmentSource::Decl);
else
- LambdaLV = MakeNaturalAlignAddrLValue(AddrOfExplicitObject.getPointer(),
- D->getType().getNonReferenceType());
+ LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
+ D->getType().getNonReferenceType());
+
+ // Make sure we have an lvalue to the lambda itself and not a derived class.
+ auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
+ auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
+ if (ThisTy != LambdaTy) {
+ const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
+ Address Base = GetAddressOfBaseClass(
+ LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
+ BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
+ LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
+ }
} else {
QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
@@ -4677,7 +4735,7 @@ unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec,
for (auto *F : Rec->getDefinition()->fields()) {
if (I == FieldIndex)
break;
- if (F->isUnnamedBitfield())
+ if (F->isUnnamedBitField())
Skipped++;
I++;
}
@@ -4703,7 +4761,7 @@ static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
/// The resulting address doesn't necessarily have the right type.
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
const FieldDecl *field) {
- if (field->isZeroSize(CGF.getContext()))
+ if (isEmptyFieldForLayout(CGF.getContext(), field))
return emitAddrOfZeroSizeField(CGF, base, field);
const RecordDecl *rec = field->getParent();
@@ -4760,7 +4818,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
field->getType()
.withCVRQualifiers(base.getVRQualifiers())
.isVolatileQualified();
- Address Addr = base.getAddress(*this);
+ Address Addr = base.getAddress();
unsigned Idx = RL.getLLVMFieldNo(field);
const RecordDecl *rec = field->getParent();
if (hasBPFPreserveStaticOffset(rec))
@@ -4836,7 +4894,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
getContext().getTypeSizeInChars(FieldType).getQuantity();
}
- Address addr = base.getAddress(*this);
+ Address addr = base.getAddress();
if (hasBPFPreserveStaticOffset(rec))
addr = wrapWithBPFPreserveStaticOffset(*this, addr);
if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
@@ -4846,7 +4904,8 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
// information provided by invariant.group. This is because accessing
// fields may leak the real address of dynamic object, which could result
// in miscompilation when leaked pointer would be compared.
- auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
+ auto *stripped =
+ Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this));
addr = Address(stripped, addr.getElementType(), addr.getAlignment());
}
}
@@ -4865,10 +4924,11 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
// Remember the original union field index
llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
rec->getLocation());
- addr = Address(
- Builder.CreatePreserveUnionAccessIndex(
- addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
- addr.getElementType(), addr.getAlignment());
+ addr =
+ Address(Builder.CreatePreserveUnionAccessIndex(
+ addr.emitRawPointer(*this),
+ getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
+ addr.getElementType(), addr.getAlignment());
}
if (FieldType->isReferenceType())
@@ -4921,7 +4981,7 @@ CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
if (!FieldType->isReferenceType())
return EmitLValueForField(Base, Field);
- Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field);
+ Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
// Make sure that the address is pointing to the right type.
llvm::Type *llvmType = ConvertTypeForMem(FieldType);
@@ -5103,13 +5163,11 @@ LValue CodeGenFunction::EmitConditionalOperatorLValue(
return EmitUnsupportedLValue(expr, "conditional operator");
if (Info.LHS && Info.RHS) {
- Address lhsAddr = Info.LHS->getAddress(*this);
- Address rhsAddr = Info.RHS->getAddress(*this);
- llvm::PHINode *phi = Builder.CreatePHI(lhsAddr.getType(), 2, "cond-lvalue");
- phi->addIncoming(lhsAddr.getPointer(), Info.lhsBlock);
- phi->addIncoming(rhsAddr.getPointer(), Info.rhsBlock);
- Address result(phi, lhsAddr.getElementType(),
- std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
+ Address lhsAddr = Info.LHS->getAddress();
+ Address rhsAddr = Info.RHS->getAddress();
+ Address result = mergeAddressesInConditionalExpr(
+ lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
+ Builder.GetInsertBlock(), expr->getType());
AlignmentSource alignSource =
std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
Info.RHS->getBaseInfo().getAlignmentSource());
@@ -5178,6 +5236,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_FixedPointToIntegral:
case CK_IntegralToFixedPoint:
case CK_MatrixCast:
+ case CK_HLSLVectorTruncation:
+ case CK_HLSLArrayRValue:
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
case CK_Dependent:
@@ -5193,9 +5253,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_Dynamic: {
LValue LV = EmitLValue(E->getSubExpr());
- Address V = LV.getAddress(*this);
+ Address V = LV.getAddress();
const auto *DCE = cast<CXXDynamicCastExpr>(E);
- return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType());
+ return MakeNaturalAlignRawAddrLValue(EmitDynamicCast(V, DCE), E->getType());
}
case CK_ConstructorConversion:
@@ -5214,7 +5274,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
if (E->changesVolatileQualification())
LV.getQuals() = E->getType().getQualifiers();
if (LV.isSimple()) {
- Address V = LV.getAddress(*this);
+ Address V = LV.getAddress();
if (V.isValid()) {
llvm::Type *T = ConvertTypeForMem(E->getType());
if (V.getElementType() != T)
@@ -5231,7 +5291,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
- Address This = LV.getAddress(*this);
+ Address This = LV.getAddress();
// Perform the derived-to-base conversion
Address Base = GetAddressOfBaseClass(
@@ -5254,14 +5314,14 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
// Perform the base-to-derived conversion
Address Derived = GetAddressOfDerivedClass(
- LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(),
+ LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
/*NullCheckValue=*/false);
// C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
// performed and the object is not of the derived type.
if (sanitizePerformTypeCheck())
- EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(),
- Derived.getPointer(), E->getType());
+ EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived,
+ E->getType());
if (SanOpts.has(SanitizerKind::CFIDerivedCast))
EmitVTablePtrCheckForCast(E->getType(), Derived,
@@ -5277,7 +5337,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
CGM.EmitExplicitCastExprType(CE, this);
LValue LV = EmitLValue(E->getSubExpr());
- Address V = LV.getAddress(*this).withElementType(
+ Address V = LV.getAddress().withElementType(
ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
@@ -5296,12 +5356,12 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
E->getSubExpr()->getType().getAddressSpace(),
E->getType().getAddressSpace(), ConvertType(DestTy));
return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()),
- LV.getAddress(*this).getAlignment()),
+ LV.getAddress().getAlignment()),
E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
}
case CK_ObjCObjectLValueCast: {
LValue LV = EmitLValue(E->getSubExpr());
- Address V = LV.getAddress(*this).withElementType(ConvertType(E->getType()));
+ Address V = LV.getAddress().withElementType(ConvertType(E->getType()));
return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
@@ -5361,7 +5421,7 @@ RValue CodeGenFunction::EmitRValueForField(LValue LV,
case TEK_Complex:
return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
case TEK_Aggregate:
- return FieldLV.asAggregateRValue(*this);
+ return FieldLV.asAggregateRValue();
case TEK_Scalar:
// This routine is used to load fields one-by-one to perform a copy, so
// don't load reference fields.
@@ -5451,7 +5511,7 @@ static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
// name to make it clear it's not the actual builtin.
if (CGF.CurFn->getName() != FDInlineName &&
OnlyHasInlineBuiltinDeclaration(FD)) {
- llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
+ llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
llvm::Module *M = Fn->getParent();
llvm::Function *Clone = M->getFunction(FDInlineName);
@@ -5474,7 +5534,7 @@ static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
return CGCallee::forBuiltin(builtinID, FD);
}
- llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
+ llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
FD->hasAttr<CUDAGlobalAttr>())
CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
@@ -5531,7 +5591,8 @@ CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
GD = GlobalDecl(VD);
CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
- CGCallee callee(calleeInfo, calleePtr);
+ CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType);
+ CGCallee callee(calleeInfo, calleePtr, pointerAuth);
return callee;
}
@@ -5568,11 +5629,44 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
break;
}
- RValue RV = EmitAnyExpr(E->getRHS());
+ // TODO: Can we de-duplicate this code with the corresponding code in
+ // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
+ RValue RV;
+ llvm::Value *Previous = nullptr;
+ QualType SrcType = E->getRHS()->getType();
+ // Check if LHS is a bitfield, if RHS contains an implicit cast expression
+ // we want to extract that value and potentially (if the bitfield sanitizer
+ // is enabled) use it to check for an implicit conversion.
+ if (E->getLHS()->refersToBitField()) {
+ llvm::Value *RHS =
+ EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
+ RV = RValue::get(RHS);
+ } else
+ RV = EmitAnyExpr(E->getRHS());
+
LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
+
if (RV.isScalar())
EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc());
- EmitStoreThroughLValue(RV, LV);
+
+ if (LV.isBitField()) {
+ llvm::Value *Result = nullptr;
+ // If bitfield sanitizers are enabled we want to use the result
+ // to check whether a truncation or sign change has occurred.
+ if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
+ EmitStoreThroughBitfieldLValue(RV, LV, &Result);
+ else
+ EmitStoreThroughBitfieldLValue(RV, LV);
+
+ // If the expression contained an implicit conversion, make sure
+ // to use the value before the scalar conversion.
+ llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
+ QualType DstType = E->getLHS()->getType();
+ EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
+ LV.getBitFieldInfo(), E->getExprLoc());
+ } else
+ EmitStoreThroughLValue(RV, LV);
+
if (getLangOpts().OpenMP)
CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
E->getLHS());
@@ -5617,7 +5711,7 @@ LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
LValue
CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
- return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType());
+ return MakeNaturalAlignRawAddrLValue(EmitCXXTypeidExpr(E), E->getType());
}
Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
@@ -5746,6 +5840,15 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
llvm::Value *CalleePtr = Callee.getFunctionPointer();
+ if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) {
+ // Use raw pointer since we are using the callee pointer as data here.
+ Address Addr =
+ Address(CalleePtr, CalleePtr->getType(),
+ CharUnits::fromQuantity(
+ CalleePtr->getPointerAlignment(CGM.getDataLayout())),
+ Callee.getPointerAuthInfo(), nullptr);
+ CalleePtr = Addr.emitRawPointer(*this);
+ }
// On 32-bit Arm, the low bit of a function pointer indicates whether
// it's using the Arm or Thumb instruction set. The actual first
@@ -5950,7 +6053,7 @@ EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
if (E->getOpcode() == BO_PtrMemI) {
BaseAddr = EmitPointerWithAlignment(E->getLHS());
} else {
- BaseAddr = EmitLValue(E->getLHS()).getAddress(*this);
+ BaseAddr = EmitLValue(E->getLHS()).getAddress();
}
llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
@@ -5975,7 +6078,7 @@ RValue CodeGenFunction::convertTempToRValue(Address addr,
case TEK_Complex:
return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
case TEK_Aggregate:
- return lvalue.asAggregateRValue(*this);
+ return lvalue.asAggregateRValue();
case TEK_Scalar:
return RValue::get(EmitLoadOfScalar(lvalue, loc));
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
index 810b28f25fa1..d9f44f4be617 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
@@ -15,6 +15,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
+#include "EHScopeStack.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
@@ -24,6 +25,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
using namespace clang;
@@ -33,6 +35,10 @@ using namespace CodeGen;
// Aggregate Expression Emitter
//===----------------------------------------------------------------------===//
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
namespace {
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CodeGenFunction &CGF;
@@ -72,15 +78,11 @@ public:
/// then loads the result into DestPtr.
void EmitAggLoadOfLValue(const Expr *E);
- enum ExprValueKind {
- EVK_RValue,
- EVK_NonRValue
- };
-
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
/// SrcIsRValue is true if source comes from an RValue.
void EmitFinalDestCopy(QualType type, const LValue &src,
- ExprValueKind SrcValueKind = EVK_NonRValue);
+ CodeGenFunction::ExprValueKind SrcValueKind =
+ CodeGenFunction::EVK_NonRValue);
void EmitFinalDestCopy(QualType type, RValue src);
void EmitCopy(QualType type, const AggValueSlot &dest,
const AggValueSlot &src);
@@ -129,15 +131,12 @@ public:
EnsureDest(E->getType());
if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
- Address StoreDest = Dest.getAddress();
- // The emitted value is guaranteed to have the same size as the
- // destination but can have a different type. Just do a bitcast in this
- // case to avoid incorrect GEPs.
- if (Result->getType() != StoreDest.getType())
- StoreDest = StoreDest.withElementType(Result->getType());
-
- CGF.EmitAggregateStore(Result, StoreDest,
- E->getType().isVolatileQualified());
+ CGF.CreateCoercedStore(
+ Result, Dest.getAddress(),
+ llvm::TypeSize::getFixed(
+ Dest.getPreferredSize(CGF.getContext(), E->getType())
+ .getQuantity()),
+ E->getType().isVolatileQualified());
return;
}
return Visit(E->getSubExpr());
@@ -235,6 +234,9 @@ public:
RValue Res = CGF.EmitAtomicExpr(E);
EmitFinalDestCopy(E->getType(), Res);
}
+ void VisitPackIndexingExpr(PackIndexingExpr *E) {
+ Visit(E->getSelectedExpr());
+ }
};
} // end anonymous namespace.
@@ -287,10 +289,10 @@ void AggExprEmitter::withReturnValueSlot(
// Otherwise, EmitCall will emit its own, notice that it's "unused", and end
// its lifetime before we have the chance to emit a proper destructor call.
bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
- (RequiresDestruction && !Dest.getAddress().isValid());
+ (RequiresDestruction && Dest.isIgnored());
Address RetAddr = Address::invalid();
- Address RetAllocaAddr = Address::invalid();
+ RawAddress RetAllocaAddr = RawAddress::invalid();
EHScopeStack::stable_iterator LifetimeEndBlock;
llvm::Value *LifetimeSizePtr = nullptr;
@@ -322,7 +324,8 @@ void AggExprEmitter::withReturnValueSlot(
if (!UseTemp)
return;
- assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer());
+ assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
+ Src.getAggregatePointer(E->getType(), CGF));
EmitFinalDestCopy(E->getType(), Src);
if (!RequiresDestruction && LifetimeStartInst) {
@@ -338,12 +341,13 @@ void AggExprEmitter::withReturnValueSlot(
void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
assert(src.isAggregate() && "value must be aggregate value!");
LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
- EmitFinalDestCopy(type, srcLV, EVK_RValue);
+ EmitFinalDestCopy(type, srcLV, CodeGenFunction::EVK_RValue);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
-void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
- ExprValueKind SrcValueKind) {
+void AggExprEmitter::EmitFinalDestCopy(
+ QualType type, const LValue &src,
+ CodeGenFunction::ExprValueKind SrcValueKind) {
// If Dest is ignored, then we're evaluating an aggregate expression
// in a context that doesn't care about the result. Note that loads
// from volatile l-values force the existence of a non-ignored
@@ -355,7 +359,7 @@ void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
LValue DstLV = CGF.MakeAddrLValue(
Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
- if (SrcValueKind == EVK_RValue) {
+ if (SrcValueKind == CodeGenFunction::EVK_RValue) {
if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
if (Dest.isPotentiallyAliased())
CGF.callCStructMoveAssignmentOperator(DstLV, src);
@@ -374,8 +378,8 @@ void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
}
AggValueSlot srcAgg = AggValueSlot::forLValue(
- src, CGF, AggValueSlot::IsDestructed, needsGC(type),
- AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
+ src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased,
+ AggValueSlot::MayOverlap);
EmitCopy(type, Dest, srcAgg);
}
@@ -413,60 +417,51 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
ASTContext &Ctx = CGF.getContext();
LValue Array = CGF.EmitLValue(E->getSubExpr());
assert(Array.isSimple() && "initializer_list array not a simple lvalue");
- Address ArrayPtr = Array.getAddress(CGF);
+ Address ArrayPtr = Array.getAddress();
const ConstantArrayType *ArrayType =
Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
assert(ArrayType && "std::initializer_list constructed from non-array");
- // FIXME: Perform the checks on the field types in SemaInit.
RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
RecordDecl::field_iterator Field = Record->field_begin();
- if (Field == Record->field_end()) {
- CGF.ErrorUnsupported(E, "weird std::initializer_list");
- return;
- }
+ assert(Field != Record->field_end() &&
+ Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType()) &&
+ "Expected std::initializer_list first field to be const E *");
// Start pointer.
- if (!Field->getType()->isPointerType() ||
- !Ctx.hasSameType(Field->getType()->getPointeeType(),
- ArrayType->getElementType())) {
- CGF.ErrorUnsupported(E, "weird std::initializer_list");
- return;
- }
-
AggValueSlot Dest = EnsureSlot(E->getType());
LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
- llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
- llvm::Value *IdxStart[] = { Zero, Zero };
- llvm::Value *ArrayStart = Builder.CreateInBoundsGEP(
- ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxStart, "arraystart");
+ llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF);
CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
++Field;
-
- if (Field == Record->field_end()) {
- CGF.ErrorUnsupported(E, "weird std::initializer_list");
- return;
- }
+ assert(Field != Record->field_end() &&
+ "Expected std::initializer_list to have two fields");
llvm::Value *Size = Builder.getInt(ArrayType->getSize());
LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
- if (Field->getType()->isPointerType() &&
- Ctx.hasSameType(Field->getType()->getPointeeType(),
- ArrayType->getElementType())) {
+ if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
+ // Length.
+ CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
+
+ } else {
// End pointer.
+ assert(Field->getType()->isPointerType() &&
+ Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType()) &&
+ "Expected std::initializer_list second field to be const E *");
+ llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
llvm::Value *IdxEnd[] = { Zero, Size };
llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
- ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxEnd, "arrayend");
+ ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
+ "arrayend");
CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
- } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
- // Length.
- CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
- } else {
- CGF.ErrorUnsupported(E, "weird std::initializer_list");
- return;
}
+
+ assert(++Field == Record->field_end() &&
+ "Expected std::initializer_list to only have two fields");
}
/// Determine if E is a trivial array filler, that is, one that is
@@ -500,19 +495,20 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
uint64_t NumInitElements = Args.size();
uint64_t NumArrayElements = AType->getNumElements();
+ for (const auto *Init : Args) {
+ if (const auto *Embed = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
+ NumInitElements += Embed->getDataElementCount() - 1;
+ if (NumInitElements > NumArrayElements) {
+ NumInitElements = NumArrayElements;
+ break;
+ }
+ }
+ }
+
assert(NumInitElements <= NumArrayElements);
QualType elementType =
CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
-
- // DestPtr is an array*. Construct an elementType* by drilling
- // down a level.
- llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
- llvm::Value *indices[] = { zero, zero };
- llvm::Value *begin = Builder.CreateInBoundsGEP(
- DestPtr.getElementType(), DestPtr.getPointer(), indices,
- "arrayinit.begin");
-
CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
CharUnits elementAlign =
DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
@@ -525,9 +521,12 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
elementType.isTriviallyCopyableType(CGF.getContext())) {
CodeGen::CodeGenModule &CGM = CGF.CGM;
ConstantEmitter Emitter(CGF);
- LangAS AS = ArrayQTy.getAddressSpace();
+ QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(
+ CGM.getContext().removeAddrSpaceQualType(ArrayQTy),
+ CGM.GetGlobalConstantAddressSpace());
+ LangAS AS = GVArrayQTy.getAddressSpace();
if (llvm::Constant *C =
- Emitter.tryEmitForInitializer(ExprToVisit, AS, ArrayQTy)) {
+ Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {
auto GV = new llvm::GlobalVariable(
CGM.getModule(), C->getType(),
/* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
@@ -535,10 +534,10 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
/* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(AS));
Emitter.finalize(GV);
- CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);
GV->setAlignment(Align.getAsAlign());
Address GVAddr(GV, GV->getValueType(), Align);
- EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, ArrayQTy));
+ EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));
return;
}
}
@@ -548,51 +547,63 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
// For that, we'll need an EH cleanup.
QualType::DestructionKind dtorKind = elementType.isDestructedType();
Address endOfInit = Address::invalid();
- EHScopeStack::stable_iterator cleanup;
- llvm::Instruction *cleanupDominator = nullptr;
- if (CGF.needsEHCleanup(dtorKind)) {
+ CodeGenFunction::CleanupDeactivationScope deactivation(CGF);
+
+ llvm::Value *begin = DestPtr.emitRawPointer(CGF);
+ if (dtorKind) {
+ CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);
// In principle we could tell the cleanup where we are more
// directly, but the control flow can get so varied here that it
// would actually be quite complex. Therefore we go through an
// alloca.
+ llvm::Instruction *dominatingIP =
+ Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));
endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
"arrayinit.endOfInit");
- cleanupDominator = Builder.CreateStore(begin, endOfInit);
+ Builder.CreateStore(begin, endOfInit);
CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
elementAlign,
CGF.getDestroyer(dtorKind));
- cleanup = CGF.EHStack.stable_begin();
+ cast<EHCleanupScope>(*CGF.EHStack.find(CGF.EHStack.stable_begin()))
+ .AddAuxAllocas(allocaTracker.Take());
- // Otherwise, remember that we didn't need a cleanup.
- } else {
- dtorKind = QualType::DK_none;
+ CGF.DeferredDeactivationCleanupStack.push_back(
+ {CGF.EHStack.stable_begin(), dominatingIP});
}
llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
- // The 'current element to initialize'. The invariants on this
- // variable are complicated. Essentially, after each iteration of
- // the loop, it points to the last initialized element, except
- // that it points to the beginning of the array before any
- // elements have been initialized.
- llvm::Value *element = begin;
-
- // Emit the explicit initializers.
- for (uint64_t i = 0; i != NumInitElements; ++i) {
- // Advance to the next element.
- if (i > 0) {
+ auto Emit = [&](Expr *Init, uint64_t ArrayIndex) {
+ llvm::Value *element = begin;
+ if (ArrayIndex > 0) {
element = Builder.CreateInBoundsGEP(
- llvmElementType, element, one, "arrayinit.element");
+ llvmElementType, begin,
+ llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex), "arrayinit.element");
// Tell the cleanup that it needs to destroy up to this
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
- if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
+ if (endOfInit.isValid())
+ Builder.CreateStore(element, endOfInit);
}
LValue elementLV = CGF.MakeAddrLValue(
Address(element, llvmElementType, elementAlign), elementType);
- EmitInitializationToLValue(Args[i], elementLV);
+ EmitInitializationToLValue(Init, elementLV);
+ return true;
+ };
+
+ unsigned ArrayIndex = 0;
+ // Emit the explicit initializers.
+ for (uint64_t i = 0; i != NumInitElements; ++i) {
+ if (ArrayIndex >= NumInitElements)
+ break;
+ if (auto *EmbedS = dyn_cast<EmbedExpr>(Args[i]->IgnoreParenImpCasts())) {
+ EmbedS->doForEachDataElement(Emit, ArrayIndex);
+ } else {
+ Emit(Args[i], ArrayIndex);
+ ArrayIndex++;
+ }
}
// Check whether there's a non-trivial array-fill expression.
@@ -609,9 +620,12 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
// do { *array++ = filler; } while (array != end);
// Advance to the start of the rest of the array.
+ llvm::Value *element = begin;
if (NumInitElements) {
element = Builder.CreateInBoundsGEP(
- llvmElementType, element, one, "arrayinit.start");
+ llvmElementType, element,
+ llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),
+ "arrayinit.start");
if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
}
@@ -661,9 +675,6 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
CGF.EmitBlock(endBB);
}
-
- // Leave the partial-array cleanup if we entered one.
- if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
}
//===----------------------------------------------------------------------===//
@@ -732,7 +743,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
CodeGenFunction::TCK_Load);
// FIXME: Do we also need to handle property references here?
if (LV.isSimple())
- CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E));
+ CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
else
CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
@@ -765,8 +776,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
}
LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
- Address SourceAddress =
- SourceLV.getAddress(CGF).withElementType(CGF.Int8Ty);
+ Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
llvm::Value *SizeVal = llvm::ConstantInt::get(
CGF.SizeTy,
@@ -873,6 +883,9 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
[[fallthrough]];
+ case CK_HLSLArrayRValue:
+ Visit(E->getSubExpr());
+ break;
case CK_NoOp:
case CK_UserDefinedConversion:
@@ -930,6 +943,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLOpaqueType:
case CK_MatrixCast:
+ case CK_HLSLVectorTruncation:
case CK_IntToOCLSampler:
case CK_FloatingToFixedPoint:
@@ -1051,7 +1065,7 @@ void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
if (RV.isScalar())
return {RV.getScalarVal(), nullptr};
if (RV.isAggregate())
- return {RV.getAggregatePointer(), nullptr};
+ return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
assert(RV.isComplex());
return RV.getComplexVal();
};
@@ -1212,7 +1226,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
}
EmitCopy(E->getLHS()->getType(),
- AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed,
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased,
AggValueSlot::MayOverlap),
@@ -1234,7 +1248,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// Codegen the RHS so that it stores directly into the LHS.
AggValueSlot LHSSlot = AggValueSlot::forLValue(
- LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
+ LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
// A non-volatile aggregate destination might have volatile member.
if (!LHSSlot.isVolatile() &&
@@ -1275,7 +1289,10 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
- CGF.incrementProfileCounter(E);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E->getTrueExpr());
+ else
+ CGF.incrementProfileCounter(E);
Visit(E->getTrueExpr());
eval.end(CGF);
@@ -1290,6 +1307,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E->getFalseExpr());
Visit(E->getFalseExpr());
eval.end(CGF);
@@ -1298,6 +1317,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
E->getType());
CGF.EmitBlock(ContBlock);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E);
}
void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
@@ -1306,15 +1327,13 @@ void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
Address ArgValue = Address::invalid();
- Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
+ CGF.EmitVAArg(VE, ArgValue, Dest);
// If EmitVAArg fails, emit an error.
- if (!ArgPtr.isValid()) {
+ if (!ArgValue.isValid()) {
CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
return;
}
-
- EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
}
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
@@ -1353,9 +1372,8 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
// We'll need to enter cleanup scopes in case any of the element
- // initializers throws an exception.
- SmallVector<EHScopeStack::stable_iterator, 16> Cleanups;
- llvm::Instruction *CleanupDominator = nullptr;
+ // initializers throws an exception or contains branch out of the expressions.
+ CodeGenFunction::CleanupDeactivationScope scope(CGF);
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
@@ -1374,28 +1392,12 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
if (QualType::DestructionKind DtorKind =
CurField->getType().isDestructedType()) {
assert(LV.isSimple());
- if (CGF.needsEHCleanup(DtorKind)) {
- if (!CleanupDominator)
- CleanupDominator = CGF.Builder.CreateAlignedLoad(
- CGF.Int8Ty,
- llvm::Constant::getNullValue(CGF.Int8PtrTy),
- CharUnits::One()); // placeholder
-
- CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(),
- CGF.getDestroyer(DtorKind), false);
- Cleanups.push_back(CGF.EHStack.stable_begin());
- }
+ if (DtorKind)
+ CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),
+ CurField->getType(),
+ CGF.getDestroyer(DtorKind), false);
}
}
-
- // Deactivate all the partial cleanups in reverse order, which
- // generally means popping them.
- for (unsigned i = Cleanups.size(); i != 0; --i)
- CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator);
-
- // Destroy the placeholder if we made one.
- if (CleanupDominator)
- CleanupDominator->eraseFromParent();
}
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
@@ -1454,6 +1456,7 @@ static bool castPreservesZero(const CastExpr *CE) {
case CK_MatrixCast:
case CK_NonAtomicToAtomic:
case CK_AtomicToNonAtomic:
+ case CK_HLSLVectorTruncation:
return true;
case CK_BaseToDerivedMemberPointer:
@@ -1505,6 +1508,7 @@ static bool castPreservesZero(const CastExpr *CE) {
case CK_LValueToRValue:
case CK_LValueToRValueBitCast:
case CK_UncheckedDerivedToBase:
+ case CK_HLSLArrayRValue:
return false;
}
llvm_unreachable("Unhandled clang::CastKind enum");
@@ -1569,7 +1573,7 @@ AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
return;
case TEK_Aggregate:
CGF.EmitAggExpr(
- E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed,
+ E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::MayOverlap, Dest.isZeroed()));
@@ -1608,7 +1612,7 @@ void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
// There's a potential optimization opportunity in combining
// memsets; that would be easy for arrays, but relatively
// difficult for structures with the current code.
- CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType());
+ CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
}
}
@@ -1682,14 +1686,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
// We'll need to enter cleanup scopes in case any of the element
// initializers throws an exception.
SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
- llvm::Instruction *cleanupDominator = nullptr;
- auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) {
- cleanups.push_back(cleanup);
- if (!cleanupDominator) // create placeholder once needed
- cleanupDominator = CGF.Builder.CreateAlignedLoad(
- CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy),
- CharUnits::One());
- };
+ CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);
unsigned curInitIndex = 0;
@@ -1712,10 +1709,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
if (QualType::DestructionKind dtorKind =
- Base.getType().isDestructedType()) {
- CGF.pushDestroy(dtorKind, V, Base.getType());
- addCleanup(CGF.EHStack.stable_begin());
- }
+ Base.getType().isDestructedType())
+ CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());
}
}
@@ -1732,7 +1727,9 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
// Make sure that it's really an empty and not a failure of
// semantic analysis.
for (const auto *Field : record->fields())
- assert((Field->isUnnamedBitfield() || Field->isAnonymousStructOrUnion()) && "Only unnamed bitfields or ananymous class allowed");
+ assert(
+ (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&
+ "Only unnamed bitfields or anonymous class allowed");
#endif
return;
}
@@ -1760,7 +1757,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
break;
// Always skip anonymous bitfields.
- if (field->isUnnamedBitfield())
+ if (field->isUnnamedBitField())
continue;
// We're done if we reach the end of the explicit initializers, we
@@ -1786,37 +1783,16 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
// Push a destructor if necessary.
// FIXME: if we have an array of structures, all explicitly
// initialized, we can end up pushing a linear number of cleanups.
- bool pushedCleanup = false;
if (QualType::DestructionKind dtorKind
= field->getType().isDestructedType()) {
assert(LV.isSimple());
- if (CGF.needsEHCleanup(dtorKind)) {
- CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(),
- CGF.getDestroyer(dtorKind), false);
- addCleanup(CGF.EHStack.stable_begin());
- pushedCleanup = true;
+ if (dtorKind) {
+ CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),
+ field->getType(),
+ CGF.getDestroyer(dtorKind), false);
}
}
-
- // If the GEP didn't get used because of a dead zero init or something
- // else, clean it up for -O0 builds and general tidiness.
- if (!pushedCleanup && LV.isSimple())
- if (llvm::GetElementPtrInst *GEP =
- dyn_cast<llvm::GetElementPtrInst>(LV.getPointer(CGF)))
- if (GEP->use_empty())
- GEP->eraseFromParent();
- }
-
- // Deactivate all the partial cleanups in reverse order, which
- // generally means popping them.
- assert((cleanupDominator || cleanups.empty()) &&
- "Missing cleanupDominator before deactivating cleanup blocks");
- for (unsigned i = cleanups.size(); i != 0; --i)
- CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
-
- // Destroy the placeholder if we made one.
- if (cleanupDominator)
- cleanupDominator->eraseFromParent();
+ }
}
void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
@@ -1833,9 +1809,9 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
// destPtr is an array*. Construct an elementType* by drilling down a level.
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = {zero, zero};
- llvm::Value *begin = Builder.CreateInBoundsGEP(
- destPtr.getElementType(), destPtr.getPointer(), indices,
- "arrayinit.begin");
+ llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
+ destPtr.emitRawPointer(CGF),
+ indices, "arrayinit.begin");
// Prepare to special-case multidimensional array initialization: we avoid
// emitting multiple destructor loops in that case.
@@ -1887,7 +1863,7 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
if (InnerLoop) {
// If the subexpression is an ArrayInitLoopExpr, share its cleanup.
auto elementSlot = AggValueSlot::forLValue(
- elementLV, CGF, AggValueSlot::IsDestructed,
+ elementLV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap);
AggExprEmitter(CGF, elementSlot, false)
@@ -1965,7 +1941,7 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
if (Field->getType()->isIncompleteArrayType() ||
ILEElement == ILE->getNumInits())
break;
- if (Field->isUnnamedBitfield())
+ if (Field->isUnnamedBitField())
continue;
const Expr *E = ILE->getInit(ILEElement++);
@@ -2052,18 +2028,29 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
Address Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
- EmitAggExpr(E, AggValueSlot::forLValue(
- LV, *this, AggValueSlot::IsNotDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap));
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap));
return LV;
}
+void CodeGenFunction::EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest,
+ const LValue &Src,
+ ExprValueKind SrcKind) {
+ return AggExprEmitter(*this, Dest, Dest.isIgnored())
+ .EmitFinalDestCopy(Type, Src, SrcKind);
+}
+
AggValueSlot::Overlap_t
CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) {
if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
return AggValueSlot::DoesNotOverlap;
+ // Empty fields can overlap earlier fields.
+ if (FD->getType()->getAsCXXRecordDecl()->isEmpty())
+ return AggValueSlot::MayOverlap;
+
// If the field lies entirely within the enclosing class's nvsize, its tail
// padding cannot overlap any already-initialized object. (The only subobjects
// with greater addresses that might already be initialized are vbases.)
@@ -2086,6 +2073,10 @@ AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit(
if (IsVirtual)
return AggValueSlot::MayOverlap;
+ // Empty bases can overlap earlier bases.
+ if (BaseRD->isEmpty())
+ return AggValueSlot::MayOverlap;
+
// If the base class is laid out entirely within the nvsize of the derived
// class, its tail padding cannot yet be initialized, so we can issue
// stores at the full width of the base class.
@@ -2104,8 +2095,8 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
bool isVolatile) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
- Address DestPtr = Dest.getAddress(*this);
- Address SrcPtr = Src.getAddress(*this);
+ Address DestPtr = Dest.getAddress();
+ Address SrcPtr = Src.getAddress();
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
index d136bfc37278..8eb6ab7381ac 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
@@ -142,7 +142,7 @@ RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
BaseQuals = PTy->getPointeeType().getQualifiers();
} else {
LValue BaseLV = EmitLValue(BaseExpr);
- BaseValue = BaseLV.getAddress(*this);
+ BaseValue = BaseLV.getAddress();
QualType BaseTy = BaseExpr->getType();
BaseQuals = BaseTy.getQualifiers();
}
@@ -280,7 +280,8 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
- This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo);
+ This = MakeAddrLValue(ThisValue, Base->getType()->getPointeeType(),
+ BaseInfo, TBAAInfo);
} else {
This = EmitLValue(Base);
}
@@ -297,7 +298,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
/*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
- /*Delegating=*/false, This.getAddress(*this), Args,
+ /*Delegating=*/false, This.getAddress(), Args,
AggValueSlot::DoesNotOverlap, CE->getExprLoc(),
/*NewPointerIsChecked=*/false);
return RValue::get(nullptr);
@@ -353,10 +354,12 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))
SkippedChecks.set(SanitizerKind::Null, true);
}
- EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,
- This.getPointer(*this),
- C.getRecordType(CalleeDecl->getParent()),
- /*Alignment=*/CharUnits::Zero(), SkippedChecks);
+
+ if (sanitizePerformTypeCheck())
+ EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,
+ This.emitRawPointer(*this),
+ C.getRecordType(CalleeDecl->getParent()),
+ /*Alignment=*/CharUnits::Zero(), SkippedChecks);
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
@@ -372,7 +375,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
if (UseVirtualCall) {
CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
- This.getAddress(*this),
+ This.getAddress(),
cast<CXXMemberCallExpr>(CE));
} else {
GlobalDecl GD(Dtor, Dtor_Complete);
@@ -400,14 +403,14 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
CGCallee Callee;
if (UseVirtualCall) {
- Callee = CGCallee::forVirtual(CE, MD, This.getAddress(*this), Ty);
+ Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);
} else {
if (SanOpts.has(SanitizerKind::CFINVCall) &&
MD->getParent()->isDynamicClass()) {
llvm::Value *VTable;
const CXXRecordDecl *RD;
std::tie(VTable, RD) = CGM.getCXXABI().LoadVTablePtr(
- *this, This.getAddress(*this), CalleeDecl->getParent());
+ *this, This.getAddress(), CalleeDecl->getParent());
EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc());
}
@@ -426,7 +429,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (MD->isVirtual()) {
Address NewThisAddr =
CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
- *this, CalleeDecl, This.getAddress(*this), UseVirtualCall);
+ *this, CalleeDecl, This.getAddress(), UseVirtualCall);
This.setAddress(NewThisAddr);
}
@@ -453,9 +456,9 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
if (BO->getOpcode() == BO_PtrMemI)
This = EmitPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull);
else
- This = EmitLValue(BaseExpr, KnownNonNull).getAddress(*this);
+ This = EmitLValue(BaseExpr, KnownNonNull).getAddress();
- EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
+ EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),
QualType(MPT->getClass(), 0));
// Get the member function pointer.
@@ -1005,8 +1008,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
const Expr *Init = E->getInitializer();
Address EndOfInit = Address::invalid();
QualType::DestructionKind DtorKind = ElementType.isDestructedType();
- EHScopeStack::stable_iterator Cleanup;
- llvm::Instruction *CleanupDominator = nullptr;
+ CleanupDeactivationScope deactivation(*this);
+ bool pushedCleanup = false;
CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
CharUnits ElementAlign =
@@ -1073,8 +1076,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Move past these elements.
InitListElements =
cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe())
- ->getSize()
- .getZExtValue();
+ ->getZExtSize();
CurPtr = Builder.CreateConstInBoundsGEP(
CurPtr, InitListElements, "string.init.end");
@@ -1103,18 +1105,24 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
// Enter a partial-destruction Cleanup if necessary.
- if (needsEHCleanup(DtorKind)) {
+ if (DtorKind) {
+ AllocaTrackerRAII AllocaTracker(*this);
// In principle we could tell the Cleanup where we are more
// directly, but the control flow can get so varied here that it
// would actually be quite complex. Therefore we go through an
// alloca.
+ llvm::Instruction *DominatingIP =
+ Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy));
EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
"array.init.end");
- CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
- pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
- ElementType, ElementAlign,
+ pushIrregularPartialArrayCleanup(BeginPtr.emitRawPointer(*this),
+ EndOfInit, ElementType, ElementAlign,
getDestroyer(DtorKind));
- Cleanup = EHStack.stable_begin();
+ cast<EHCleanupScope>(*EHStack.find(EHStack.stable_begin()))
+ .AddAuxAllocas(AllocaTracker.Take());
+ DeferredDeactivationCleanupStack.push_back(
+ {EHStack.stable_begin(), DominatingIP});
+ pushedCleanup = true;
}
CharUnits StartAlign = CurPtr.getAlignment();
@@ -1124,16 +1132,17 @@ void CodeGenFunction::EmitNewArrayInitializer(
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
if (EndOfInit.isValid()) {
- Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
+ Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);
}
// FIXME: If the last initializer is an incomplete initializer list for
// an array, and we have an array filler, we can fold together the two
// initialization loops.
StoreAnyExprIntoOneUnit(*this, IE, IE->getType(), CurPtr,
AggValueSlot::DoesNotOverlap);
- CurPtr = Address(Builder.CreateInBoundsGEP(
- CurPtr.getElementType(), CurPtr.getPointer(),
- Builder.getSize(1), "array.exp.next"),
+ CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
+ CurPtr.emitRawPointer(*this),
+ Builder.getSize(1),
+ "array.exp.next"),
CurPtr.getElementType(),
StartAlign.alignmentAtOffset((++i) * ElementSize));
}
@@ -1160,9 +1169,6 @@ void CodeGenFunction::EmitNewArrayInitializer(
// initialization.
llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
- // If there was a Cleanup, deactivate it.
- if (CleanupDominator)
- DeactivateCleanupBlock(Cleanup, CleanupDominator);
return;
}
@@ -1187,7 +1193,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
// FIXME: Share this cleanup with the constructor call emission rather than
// having it create a cleanup of its own.
if (EndOfInit.isValid())
- Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
+ Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);
// Emit a constructor call loop to initialize the remaining elements.
if (InitListElements)
@@ -1231,7 +1237,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
NumElements = CXXRD->getNumBases();
for (auto *Field : RType->getDecl()->fields())
- if (!Field->isUnnamedBitfield())
+ if (!Field->isUnnamedBitField())
++NumElements;
// FIXME: Recurse into nested InitListExprs.
if (ILE->getNumInits() == NumElements)
@@ -1250,15 +1256,15 @@ void CodeGenFunction::EmitNewArrayInitializer(
llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
// Find the end of the array, hoisted out of the loop.
- llvm::Value *EndPtr =
- Builder.CreateInBoundsGEP(BeginPtr.getElementType(), BeginPtr.getPointer(),
- NumElements, "array.end");
+ llvm::Value *EndPtr = Builder.CreateInBoundsGEP(
+ BeginPtr.getElementType(), BeginPtr.emitRawPointer(*this), NumElements,
+ "array.end");
// If the number of elements isn't constant, we have to now check if there is
// anything left to initialize.
if (!ConstNum) {
- llvm::Value *IsEmpty =
- Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
+ llvm::Value *IsEmpty = Builder.CreateICmpEQ(CurPtr.emitRawPointer(*this),
+ EndPtr, "array.isempty");
Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
}
@@ -1268,21 +1274,23 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Set up the current-element phi.
llvm::PHINode *CurPtrPhi =
Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
- CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
+ CurPtrPhi->addIncoming(CurPtr.emitRawPointer(*this), EntryBB);
CurPtr = Address(CurPtrPhi, CurPtr.getElementType(), ElementAlign);
// Store the new Cleanup position for irregular Cleanups.
if (EndOfInit.isValid())
- Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
+ Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);
// Enter a partial-destruction Cleanup if necessary.
- if (!CleanupDominator && needsEHCleanup(DtorKind)) {
- pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
- ElementType, ElementAlign,
- getDestroyer(DtorKind));
- Cleanup = EHStack.stable_begin();
- CleanupDominator = Builder.CreateUnreachable();
+ if (!pushedCleanup && needsEHCleanup(DtorKind)) {
+ llvm::Instruction *DominatingIP =
+ Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy));
+ pushRegularPartialArrayCleanup(BeginPtr.emitRawPointer(*this),
+ CurPtr.emitRawPointer(*this), ElementType,
+ ElementAlign, getDestroyer(DtorKind));
+ DeferredDeactivationCleanupStack.push_back(
+ {EHStack.stable_begin(), DominatingIP});
}
// Emit the initializer into this element.
@@ -1290,15 +1298,11 @@ void CodeGenFunction::EmitNewArrayInitializer(
AggValueSlot::DoesNotOverlap);
// Leave the Cleanup if we entered one.
- if (CleanupDominator) {
- DeactivateCleanupBlock(Cleanup, CleanupDominator);
- CleanupDominator->eraseFromParent();
- }
+ deactivation.ForceDeactivate();
// Advance to the next element by adjusting the pointer type as necessary.
- llvm::Value *NextPtr =
- Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
- "array.next");
+ llvm::Value *NextPtr = Builder.CreateConstInBoundsGEP1_32(
+ ElementTy, CurPtr.emitRawPointer(*this), 1, "array.next");
// Check whether we've gotten to the end of the array and, if so,
// exit the loop.
@@ -1423,6 +1427,7 @@ namespace {
};
unsigned NumPlacementArgs : 31;
+ LLVM_PREFERRED_TYPE(bool)
unsigned PassAlignmentToPlacementDelete : 1;
const FunctionDecl *OperatorDelete;
ValueTy Ptr;
@@ -1523,14 +1528,9 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
- DirectCleanup *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
- E->getNumPlacementArgs(),
- E->getOperatorDelete(),
- NewPtr.getPointer(),
- AllocSize,
- E->passAlignment(),
- AllocAlign);
+ DirectCleanup *Cleanup = CGF.EHStack.pushCleanupWithExtra<DirectCleanup>(
+ EHCleanup, E->getNumPlacementArgs(), E->getOperatorDelete(),
+ NewPtr.emitRawPointer(CGF), AllocSize, E->passAlignment(), AllocAlign);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
auto &Arg = NewArgs[I + NumNonPlacementArgs];
Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
@@ -1541,7 +1541,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
// Otherwise, we need to save all this stuff.
DominatingValue<RValue>::saved_type SavedNewPtr =
- DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
+ DominatingValue<RValue>::save(CGF, RValue::get(NewPtr, CGF));
DominatingValue<RValue>::saved_type SavedAllocSize =
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
@@ -1590,8 +1590,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
isa<StringLiteral>(IgnoreParen) || isa<ObjCEncodeExpr>(IgnoreParen)) {
minElements =
cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe())
- ->getSize()
- .getZExtValue();
+ ->getZExtSize();
} else if (ILE || CPLIE) {
minElements = ILE ? ILE->getNumInits() : CPLIE->getInitExprs().size();
}
@@ -1619,14 +1618,14 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// In these cases, discard the computed alignment and use the
// formal alignment of the allocated type.
if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
- allocation = allocation.withAlignment(allocAlign);
+ allocation.setAlignment(allocAlign);
// Set up allocatorArgs for the call to operator delete if it's not
// the reserved global operator.
if (E->getOperatorDelete() &&
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
- allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
+ allocatorArgs.add(RValue::get(allocation, *this), arg->getType());
}
} else {
@@ -1714,8 +1713,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
contBB = createBasicBlock("new.cont");
- llvm::Value *isNull =
- Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
+ llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
Builder.CreateCondBr(isNull, contBB, notNullBB);
EmitBlock(notNullBB);
}
@@ -1761,12 +1759,12 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
SkippedChecks.set(SanitizerKind::Null, nullCheck);
EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall,
E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
- result.getPointer(), allocType, result.getAlignment(),
- SkippedChecks, numElements);
+ result, allocType, result.getAlignment(), SkippedChecks,
+ numElements);
EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
allocSizeWithoutCookie);
- llvm::Value *resultPtr = result.getPointer();
+ llvm::Value *resultPtr = result.emitRawPointer(*this);
if (E->isArray()) {
// NewPtr is a pointer to the base element type. If we're
// allocating an array of arrays, we'll need to cast back to the
@@ -1910,7 +1908,8 @@ static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
Dtor);
else
- CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.getPointer(), ElementType);
+ CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.emitRawPointer(CGF),
+ ElementType);
}
/// Emit the code for deleting a single object.
@@ -1926,8 +1925,7 @@ static bool EmitObjectDelete(CodeGenFunction &CGF,
// dynamic type, the static type shall be a base class of the dynamic type
// of the object to be deleted and the static type shall have a virtual
// destructor or the behavior is undefined.
- CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
- DE->getExprLoc(), Ptr.getPointer(),
+ CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall, DE->getExprLoc(), Ptr,
ElementType);
const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
@@ -1976,9 +1974,8 @@ static bool EmitObjectDelete(CodeGenFunction &CGF,
// Make sure that we call delete even if the dtor throws.
// This doesn't have to a conditional cleanup because we're going
// to pop it off in a second.
- CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
- Ptr.getPointer(),
- OperatorDelete, ElementType);
+ CGF.EHStack.pushCleanup<CallObjectDelete>(
+ NormalAndEHCleanup, Ptr.emitRawPointer(CGF), OperatorDelete, ElementType);
if (Dtor)
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
@@ -2065,7 +2062,7 @@ static void EmitArrayDelete(CodeGenFunction &CGF,
CharUnits elementAlign =
deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
- llvm::Value *arrayBegin = deletedPtr.getPointer();
+ llvm::Value *arrayBegin = deletedPtr.emitRawPointer(CGF);
llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP(
deletedPtr.getElementType(), arrayBegin, numElements, "delete.end");
@@ -2096,7 +2093,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
- llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
+ llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
EmitBlock(DeleteNotNull);
@@ -2131,10 +2128,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
GEP.push_back(Zero);
}
- Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getElementType(),
- Ptr.getPointer(), GEP, "del.first"),
- ConvertTypeForMem(DeleteTy), Ptr.getAlignment(),
- Ptr.isKnownNonNull());
+ Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, ConvertTypeForMem(DeleteTy),
+ Ptr.getAlignment(), "del.first");
}
assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
@@ -2148,42 +2143,11 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
}
}
-static bool isGLValueFromPointerDeref(const Expr *E) {
- E = E->IgnoreParens();
-
- if (const auto *CE = dyn_cast<CastExpr>(E)) {
- if (!CE->getSubExpr()->isGLValue())
- return false;
- return isGLValueFromPointerDeref(CE->getSubExpr());
- }
-
- if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
- return isGLValueFromPointerDeref(OVE->getSourceExpr());
-
- if (const auto *BO = dyn_cast<BinaryOperator>(E))
- if (BO->getOpcode() == BO_Comma)
- return isGLValueFromPointerDeref(BO->getRHS());
-
- if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
- return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
- isGLValueFromPointerDeref(ACO->getFalseExpr());
-
- // C++11 [expr.sub]p1:
- // The expression E1[E2] is identical (by definition) to *((E1)+(E2))
- if (isa<ArraySubscriptExpr>(E))
- return true;
-
- if (const auto *UO = dyn_cast<UnaryOperator>(E))
- if (UO->getOpcode() == UO_Deref)
- return true;
-
- return false;
-}
-
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
- llvm::Type *StdTypeInfoPtrTy) {
+ llvm::Type *StdTypeInfoPtrTy,
+ bool HasNullCheck) {
// Get the vtable pointer.
- Address ThisPtr = CGF.EmitLValue(E).getAddress(CGF);
+ Address ThisPtr = CGF.EmitLValue(E).getAddress();
QualType SrcRecordTy = E->getType();
@@ -2192,23 +2156,18 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
// destruction and the static type of the operand is neither the constructor
// or destructor’s class nor one of its bases, the behavior is undefined.
CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(),
- ThisPtr.getPointer(), SrcRecordTy);
+ ThisPtr, SrcRecordTy);
- // C++ [expr.typeid]p2:
- // If the glvalue expression is obtained by applying the unary * operator to
- // a pointer and the pointer is a null pointer value, the typeid expression
- // throws the std::bad_typeid exception.
- //
- // However, this paragraph's intent is not clear. We choose a very generous
- // interpretation which implores us to consider comma operators, conditional
- // operators, parentheses and other such constructs.
- if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
- isGLValueFromPointerDeref(E), SrcRecordTy)) {
+ // Whether we need an explicit null pointer check. For example, with the
+ // Microsoft ABI, if this is a call to __RTtypeid, the null pointer check and
+ // exception throw is inside the __RTtypeid(nullptr) call
+ if (HasNullCheck &&
+ CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(SrcRecordTy)) {
llvm::BasicBlock *BadTypeidBlock =
CGF.createBasicBlock("typeid.bad_typeid");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
- llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
CGF.EmitBlock(BadTypeidBlock);
@@ -2221,7 +2180,12 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
}
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
- llvm::Type *PtrTy = llvm::PointerType::getUnqual(getLLVMContext());
+ // Ideally, we would like to use GlobalsInt8PtrTy here, however, we cannot,
+ // primarily because the result of applying typeid is a value of type
+ // type_info, which is declared & defined by the standard library
+ // implementation and expects to operate on the generic (default) AS.
+ // https://reviews.llvm.org/D157452 has more context, and a possible solution.
+ llvm::Type *PtrTy = Int8PtrTy;
LangAS GlobAS = CGM.GetGlobalVarAddressSpace(nullptr);
auto MaybeASCast = [=](auto &&TypeInfo) {
@@ -2244,7 +2208,8 @@ llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
// type) to which the glvalue refers.
// If the operand is already most derived object, no need to look up vtable.
if (E->isPotentiallyEvaluated() && !E->isMostDerived(getContext()))
- return EmitTypeidFromVTable(*this, E->getExprOperand(), PtrTy);
+ return EmitTypeidFromVTable(*this, E->getExprOperand(), PtrTy,
+ E->hasNullCheck());
QualType OperandTy = E->getExprOperand()->getType();
return MaybeASCast(CGM.GetAddrOfRTTIDescriptor(OperandTy));
@@ -2294,8 +2259,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
// construction or destruction and the static type of the operand is not a
// pointer to or object of the constructor or destructor’s own class or one
// of its bases, the dynamic_cast results in undefined behavior.
- EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(),
- SrcRecordTy);
+ EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr, SrcRecordTy);
if (DCE->isAlwaysNull()) {
if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy)) {
@@ -2330,7 +2294,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
CastNull = createBasicBlock("dynamic_cast.null");
CastNotNull = createBasicBlock("dynamic_cast.notnull");
- llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
+ llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr);
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
index 839fe16cd772..4d45f6d64c1c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
@@ -28,6 +28,10 @@ using namespace CodeGen;
// Complex Expression Emitter
//===----------------------------------------------------------------------===//
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
typedef CodeGenFunction::ComplexPairTy ComplexPairTy;
/// Return the complex type that we are meant to emit.
@@ -47,11 +51,12 @@ class ComplexExprEmitter
CGBuilderTy &Builder;
bool IgnoreReal;
bool IgnoreImag;
-public:
- ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false)
- : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii) {
- }
+ bool FPHasBeenPromoted;
+public:
+ ComplexExprEmitter(CodeGenFunction &cgf, bool ir = false, bool ii = false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii),
+ FPHasBeenPromoted(false) {}
//===--------------------------------------------------------------------===//
// Utilities
@@ -283,9 +288,62 @@ public:
ComplexPairTy EmitComplexBinOpLibCall(StringRef LibCallName,
const BinOpInfo &Op);
- QualType getPromotionType(QualType Ty) {
+ QualType GetHigherPrecisionFPType(QualType ElementType) {
+ const auto *CurrentBT = cast<BuiltinType>(ElementType);
+ switch (CurrentBT->getKind()) {
+ case BuiltinType::Kind::Float16:
+ return CGF.getContext().FloatTy;
+ case BuiltinType::Kind::Float:
+ case BuiltinType::Kind::BFloat16:
+ return CGF.getContext().DoubleTy;
+ case BuiltinType::Kind::Double:
+ return CGF.getContext().LongDoubleTy;
+ default:
+ return ElementType;
+ }
+ }
+
+ QualType HigherPrecisionTypeForComplexArithmetic(QualType ElementType,
+ bool IsDivOpCode) {
+ QualType HigherElementType = GetHigherPrecisionFPType(ElementType);
+ const llvm::fltSemantics &ElementTypeSemantics =
+ CGF.getContext().getFloatTypeSemantics(ElementType);
+ const llvm::fltSemantics &HigherElementTypeSemantics =
+ CGF.getContext().getFloatTypeSemantics(HigherElementType);
+ // Check that the promoted type can handle the intermediate values without
+ // overflowing. This can be interpreted as:
+ // (SmallerType.LargestFiniteVal * SmallerType.LargestFiniteVal) * 2 <=
+ // LargerType.LargestFiniteVal.
+ // In terms of exponent it gives this formula:
+ // (SmallerType.LargestFiniteVal * SmallerType.LargestFiniteVal
+ // doubles the exponent of SmallerType.LargestFiniteVal)
+ if (llvm::APFloat::semanticsMaxExponent(ElementTypeSemantics) * 2 + 1 <=
+ llvm::APFloat::semanticsMaxExponent(HigherElementTypeSemantics)) {
+ FPHasBeenPromoted = true;
+ return CGF.getContext().getComplexType(HigherElementType);
+ } else {
+ DiagnosticsEngine &Diags = CGF.CGM.getDiags();
+ Diags.Report(diag::warn_next_larger_fp_type_same_size_than_fp);
+ return QualType();
+ }
+ }
+
+ QualType getPromotionType(FPOptionsOverride Features, QualType Ty,
+ bool IsDivOpCode = false) {
if (auto *CT = Ty->getAs<ComplexType>()) {
QualType ElementType = CT->getElementType();
+ bool IsFloatingType = ElementType->isFloatingType();
+ bool IsComplexRangePromoted = CGF.getLangOpts().getComplexRange() ==
+ LangOptions::ComplexRangeKind::CX_Promoted;
+ bool HasNoComplexRangeOverride = !Features.hasComplexRangeOverride();
+ bool HasMatchingComplexRange = Features.hasComplexRangeOverride() &&
+ Features.getComplexRangeOverride() ==
+ CGF.getLangOpts().getComplexRange();
+
+ if (IsDivOpCode && IsFloatingType && IsComplexRangePromoted &&
+ (HasNoComplexRangeOverride || HasMatchingComplexRange))
+ return HigherPrecisionTypeForComplexArithmetic(ElementType,
+ IsDivOpCode);
if (ElementType.UseExcessPrecision(CGF.getContext()))
return CGF.getContext().getComplexType(CGF.getContext().FloatTy);
}
@@ -296,11 +354,12 @@ public:
#define HANDLEBINOP(OP) \
ComplexPairTy VisitBin##OP(const BinaryOperator *E) { \
- QualType promotionTy = getPromotionType(E->getType()); \
+ QualType promotionTy = getPromotionType( \
+ E->getStoredFPFeaturesOrDefault(), E->getType(), \
+ (E->getOpcode() == BinaryOperatorKind::BO_Div) ? true : false); \
ComplexPairTy result = EmitBin##OP(EmitBinOps(E, promotionTy)); \
if (!promotionTy.isNull()) \
- result = \
- CGF.EmitUnPromotedValue(result, E->getType()); \
+ result = CGF.EmitUnPromotedValue(result, E->getType()); \
return result; \
}
@@ -354,6 +413,10 @@ public:
ComplexPairTy VisitAtomicExpr(AtomicExpr *E) {
return CGF.EmitAtomicExpr(E).getComplexVal();
}
+
+ ComplexPairTy VisitPackIndexingExpr(PackIndexingExpr *E) {
+ return Visit(E->getSelectedExpr());
+ }
};
} // end anonymous namespace.
@@ -379,7 +442,7 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
if (lvalue.getType()->isAtomicType())
return CGF.EmitAtomicLoad(lvalue, loc).getComplexVal();
- Address SrcPtr = lvalue.getAddress(CGF);
+ Address SrcPtr = lvalue.getAddress();
bool isVolatile = lvalue.isVolatileQualified();
llvm::Value *Real = nullptr, *Imag = nullptr;
@@ -405,7 +468,7 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
(!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue)))
return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit);
- Address Ptr = lvalue.getAddress(CGF);
+ Address Ptr = lvalue.getAddress();
Address RealPtr = CGF.emitAddrOfRealComponent(Ptr, lvalue.getType());
Address ImagPtr = CGF.emitAddrOfImagComponent(Ptr, lvalue.getType());
@@ -496,14 +559,14 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_LValueBitCast: {
LValue origLV = CGF.EmitLValue(Op);
- Address V = origLV.getAddress(CGF).withElementType(CGF.ConvertType(DestTy));
+ Address V = origLV.getAddress().withElementType(CGF.ConvertType(DestTy));
return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
}
case CK_LValueToRValueBitCast: {
LValue SourceLVal = CGF.EmitLValue(Op);
- Address Addr = SourceLVal.getAddress(CGF).withElementType(
- CGF.ConvertTypeForMem(DestTy));
+ Address Addr =
+ SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, Op->getExprLoc());
@@ -560,6 +623,8 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_FixedPointToIntegral:
case CK_IntegralToFixedPoint:
case CK_MatrixCast:
+ case CK_HLSLVectorTruncation:
+ case CK_HLSLArrayRValue:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
@@ -584,9 +649,12 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
ComplexPairTy ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
QualType PromotionType) {
- QualType promotionTy = PromotionType.isNull()
- ? getPromotionType(E->getSubExpr()->getType())
- : PromotionType;
+ E->hasStoredFPFeatures();
+ QualType promotionTy =
+ PromotionType.isNull()
+ ? getPromotionType(E->getStoredFPFeaturesOrDefault(),
+ E->getSubExpr()->getType())
+ : PromotionType;
ComplexPairTy result = VisitPlus(E, promotionTy);
if (!promotionTy.isNull())
return CGF.EmitUnPromotedValue(result, E->getSubExpr()->getType());
@@ -604,9 +672,11 @@ ComplexPairTy ComplexExprEmitter::VisitPlus(const UnaryOperator *E,
ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
QualType PromotionType) {
- QualType promotionTy = PromotionType.isNull()
- ? getPromotionType(E->getSubExpr()->getType())
- : PromotionType;
+ QualType promotionTy =
+ PromotionType.isNull()
+ ? getPromotionType(E->getStoredFPFeaturesOrDefault(),
+ E->getSubExpr()->getType())
+ : PromotionType;
ComplexPairTy result = VisitMinus(E, promotionTy);
if (!promotionTy.isNull())
return CGF.EmitUnPromotedValue(result, E->getSubExpr()->getType());
@@ -760,8 +830,6 @@ ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
//
// But we can fold away components which would be zero due to a real
// operand according to C11 Annex G.5.1p2.
- // FIXME: C11 also provides for imaginary types which would allow folding
- // still more of this within the type system.
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Op.FPFeatures);
if (Op.LHS.second && Op.RHS.second) {
@@ -785,8 +853,9 @@ ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
ResR = Builder.CreateFSub(AC, BD, "mul_r");
ResI = Builder.CreateFAdd(AD, BC, "mul_i");
- if (Op.FPFeatures.getComplexRange() == LangOptions::CX_Limited ||
- Op.FPFeatures.getComplexRange() == LangOptions::CX_Fortran)
+ if (Op.FPFeatures.getComplexRange() == LangOptions::CX_Basic ||
+ Op.FPFeatures.getComplexRange() == LangOptions::CX_Improved ||
+ Op.FPFeatures.getComplexRange() == LangOptions::CX_Promoted)
return ComplexPairTy(ResR, ResI);
// Emit the test for the real part becoming NaN and create a branch to
@@ -798,8 +867,7 @@ ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
llvm::BasicBlock *OrigBB = Branch->getParent();
// Give hint that we very much don't expect to see NaNs.
- // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
- llvm::MDNode *BrWeight = MDHelper.createBranchWeights(1, (1U << 20) - 1);
+ llvm::MDNode *BrWeight = MDHelper.createUnlikelyBranchWeights();
Branch->setMetadata(llvm::LLVMContext::MD_prof, BrWeight);
// Now test the imaginary part and create its branch.
@@ -977,22 +1045,21 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
llvm::Value *OrigLHSi = LHSi;
if (!LHSi)
LHSi = llvm::Constant::getNullValue(RHSi->getType());
- if (Op.FPFeatures.getComplexRange() == LangOptions::CX_Fortran)
+ if (Op.FPFeatures.getComplexRange() == LangOptions::CX_Improved ||
+ (Op.FPFeatures.getComplexRange() == LangOptions::CX_Promoted &&
+ !FPHasBeenPromoted))
return EmitRangeReductionDiv(LHSr, LHSi, RHSr, RHSi);
- else if (Op.FPFeatures.getComplexRange() == LangOptions::CX_Limited)
+ else if (Op.FPFeatures.getComplexRange() == LangOptions::CX_Basic ||
+ Op.FPFeatures.getComplexRange() == LangOptions::CX_Promoted)
return EmitAlgebraicDiv(LHSr, LHSi, RHSr, RHSi);
- else if (!CGF.getLangOpts().FastMath ||
- // '-ffast-math' is used in the command line but followed by an
- // '-fno-cx-limited-range'.
- Op.FPFeatures.getComplexRange() == LangOptions::CX_Full) {
+ // '-ffast-math' is used in the command line but followed by an
+ // '-fno-cx-limited-range' or '-fcomplex-arithmetic=full'.
+ else if (Op.FPFeatures.getComplexRange() == LangOptions::CX_Full) {
LHSi = OrigLHSi;
// If we have a complex operand on the RHS and FastMath is not allowed, we
// delegate to a libcall to handle all of the complexities and minimize
// underflow/overflow cases. When FastMath is allowed we construct the
// divide inline using the same algorithm as for integer operands.
- //
- // FIXME: We would be able to avoid the libcall in many places if we
- // supported imaginary types in addition to complex types.
BinOpInfo LibCallOp = Op;
// If LHS was a real, supply a null imaginary part.
if (!LHSi)
@@ -1164,13 +1231,15 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
// __block variables need to have the rhs evaluated first, plus this should
// improve codegen a little.
QualType PromotionTypeCR;
- PromotionTypeCR = getPromotionType(E->getComputationResultType());
+ PromotionTypeCR = getPromotionType(E->getStoredFPFeaturesOrDefault(),
+ E->getComputationResultType());
if (PromotionTypeCR.isNull())
PromotionTypeCR = E->getComputationResultType();
OpInfo.Ty = PromotionTypeCR;
QualType ComplexElementTy =
OpInfo.Ty->castAs<ComplexType>()->getElementType();
- QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
+ QualType PromotionTypeRHS = getPromotionType(
+ E->getStoredFPFeaturesOrDefault(), E->getRHS()->getType());
// The RHS should have been converted to the computation type.
if (E->getRHS()->getType()->isRealFloatingType()) {
@@ -1198,7 +1267,8 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
// Load from the l-value and convert it.
SourceLocation Loc = E->getExprLoc();
- QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
+ QualType PromotionTypeLHS = getPromotionType(
+ E->getStoredFPFeaturesOrDefault(), E->getComputationLHSType());
if (LHSTy->isAnyComplexType()) {
ComplexPairTy LHSVal = EmitLoadOfLValue(LHS, Loc);
if (!PromotionTypeLHS.isNull())
@@ -1325,7 +1395,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
- CGF.incrementProfileCounter(E);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E->getTrueExpr());
+ else
+ CGF.incrementProfileCounter(E);
+
ComplexPairTy LHS = Visit(E->getTrueExpr());
LHSBlock = Builder.GetInsertBlock();
CGF.EmitBranch(ContBlock);
@@ -1333,9 +1407,13 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E->getFalseExpr());
ComplexPairTy RHS = Visit(E->getFalseExpr());
RHSBlock = Builder.GetInsertBlock();
CGF.EmitBlock(ContBlock);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E);
eval.end(CGF);
// Create a PHI node for the real part.
@@ -1381,9 +1459,9 @@ ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
Address ArgValue = Address::invalid();
- Address ArgPtr = CGF.EmitVAArg(E, ArgValue);
+ RValue RV = CGF.EmitVAArg(E, ArgValue);
- if (!ArgPtr.isValid()) {
+ if (!ArgValue.isValid()) {
CGF.ErrorUnsupported(E, "complex va_arg expression");
llvm::Type *EltTy =
CGF.ConvertType(E->getType()->castAs<ComplexType>()->getElementType());
@@ -1391,8 +1469,7 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
return ComplexPairTy(U, U);
}
- return EmitLoadOfLValue(CGF.MakeAddrLValue(ArgPtr, E->getType()),
- E->getExprLoc());
+ return RV.getComplexVal();
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
index 604e3958161d..f22321f0e738 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "ABIInfoImpl.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGRecordLayout.h"
@@ -393,7 +394,7 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
static llvm::Constant *
EmitArrayConstant(CodeGenModule &CGM, llvm::ArrayType *DesiredType,
- llvm::Type *CommonElementType, unsigned ArrayBound,
+ llvm::Type *CommonElementType, uint64_t ArrayBound,
SmallVectorImpl<llvm::Constant *> &Elements,
llvm::Constant *Filler);
@@ -564,12 +565,13 @@ class ConstStructBuilder {
public:
static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
- InitListExpr *ILE, QualType StructTy);
+ const InitListExpr *ILE,
+ QualType StructTy);
static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
const APValue &Value, QualType ValTy);
static bool UpdateStruct(ConstantEmitter &Emitter,
ConstantAggregateBuilder &Const, CharUnits Offset,
- InitListExpr *Updater);
+ const InitListExpr *Updater);
private:
ConstStructBuilder(ConstantEmitter &Emitter,
@@ -584,9 +586,9 @@ private:
bool AllowOverwrite = false);
bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
- llvm::ConstantInt *InitExpr, bool AllowOverwrite = false);
+ llvm::Constant *InitExpr, bool AllowOverwrite = false);
- bool Build(InitListExpr *ILE, bool AllowOverwrite);
+ bool Build(const InitListExpr *ILE, bool AllowOverwrite);
bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
llvm::Constant *Finalize(QualType Ty);
@@ -608,9 +610,25 @@ bool ConstStructBuilder::AppendBytes(CharUnits FieldOffsetInChars,
return Builder.add(InitCst, StartOffset + FieldOffsetInChars, AllowOverwrite);
}
-bool ConstStructBuilder::AppendBitField(
- const FieldDecl *Field, uint64_t FieldOffset, llvm::ConstantInt *CI,
- bool AllowOverwrite) {
+bool ConstStructBuilder::AppendBitField(const FieldDecl *Field,
+ uint64_t FieldOffset, llvm::Constant *C,
+ bool AllowOverwrite) {
+
+ llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C);
+ if (!CI) {
+ // Constants for long _BitInt types are sometimes split into individual
+ // bytes. Try to fold these back into an integer constant. If that doesn't
+ // work out, then we are trying to initialize a bitfield with a non-trivial
+ // constant, this must require run-time code.
+ llvm::Type *LoadType =
+ CGM.getTypes().convertTypeForLoadStore(Field->getType(), C->getType());
+ llvm::Constant *FoldedConstant = llvm::ConstantFoldLoadFromConst(
+ C, LoadType, llvm::APInt::getZero(32), CGM.getDataLayout());
+ CI = dyn_cast_if_present<llvm::ConstantInt>(FoldedConstant);
+ if (!CI)
+ return false;
+ }
+
const CGRecordLayout &RL =
CGM.getTypes().getCGRecordLayout(Field->getParent());
const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
@@ -635,7 +653,7 @@ bool ConstStructBuilder::AppendBitField(
static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
ConstantAggregateBuilder &Const,
CharUnits Offset, QualType Type,
- InitListExpr *Updater) {
+ const InitListExpr *Updater) {
if (Type->isRecordType())
return ConstStructBuilder::UpdateStruct(Emitter, Const, Offset, Updater);
@@ -647,7 +665,7 @@ static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
llvm::Type *ElemTy = Emitter.CGM.getTypes().ConvertTypeForMem(ElemType);
llvm::Constant *FillC = nullptr;
- if (Expr *Filler = Updater->getArrayFiller()) {
+ if (const Expr *Filler = Updater->getArrayFiller()) {
if (!isa<NoInitExpr>(Filler)) {
FillC = Emitter.tryEmitAbstractForMemory(Filler, ElemType);
if (!FillC)
@@ -656,9 +674,9 @@ static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
}
unsigned NumElementsToUpdate =
- FillC ? CAT->getSize().getZExtValue() : Updater->getNumInits();
+ FillC ? CAT->getZExtSize() : Updater->getNumInits();
for (unsigned I = 0; I != NumElementsToUpdate; ++I, Offset += ElemSize) {
- Expr *Init = nullptr;
+ const Expr *Init = nullptr;
if (I < Updater->getNumInits())
Init = Updater->getInit(I);
@@ -667,7 +685,7 @@ static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
return false;
} else if (!Init || isa<NoInitExpr>(Init)) {
continue;
- } else if (InitListExpr *ChildILE = dyn_cast<InitListExpr>(Init)) {
+ } else if (const auto *ChildILE = dyn_cast<InitListExpr>(Init)) {
if (!EmitDesignatedInitUpdater(Emitter, Const, Offset, ElemType,
ChildILE))
return false;
@@ -683,7 +701,7 @@ static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
return true;
}
-bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) {
+bool ConstStructBuilder::Build(const InitListExpr *ILE, bool AllowOverwrite) {
RecordDecl *RD = ILE->getType()->castAs<RecordType>()->getDecl();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
@@ -706,20 +724,20 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) {
continue;
// Don't emit anonymous bitfields.
- if (Field->isUnnamedBitfield())
+ if (Field->isUnnamedBitField())
continue;
// Get the initializer. A struct can include fields without initializers,
// we just use explicit null values for them.
- Expr *Init = nullptr;
+ const Expr *Init = nullptr;
if (ElementNo < ILE->getNumInits())
Init = ILE->getInit(ElementNo++);
- if (Init && isa<NoInitExpr>(Init))
+ if (isa_and_nonnull<NoInitExpr>(Init))
continue;
// Zero-sized fields are not emitted, but their initializers may still
// prevent emission of this struct as a constant.
- if (Field->isZeroSize(CGM.getContext())) {
+ if (isEmptyFieldForLayout(CGM.getContext(), Field)) {
if (Init->HasSideEffects(CGM.getContext()))
return false;
continue;
@@ -761,15 +779,9 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) {
AllowOverwrite = true;
} else {
// Otherwise we have a bitfield.
- if (auto *CI = dyn_cast<llvm::ConstantInt>(EltInit)) {
- if (!AppendBitField(Field, Layout.getFieldOffset(FieldNo), CI,
- AllowOverwrite))
- return false;
- } else {
- // We are trying to initialize a bitfield with a non-trivial constant,
- // this must require run-time code.
+ if (!AppendBitField(Field, Layout.getFieldOffset(FieldNo), EltInit,
+ AllowOverwrite))
return false;
- }
}
}
@@ -800,8 +812,14 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
// Add a vtable pointer, if we need one and it hasn't already been added.
if (Layout.hasOwnVFPtr()) {
llvm::Constant *VTableAddressPoint =
- CGM.getCXXABI().getVTableAddressPointForConstExpr(
- BaseSubobject(CD, Offset), VTableClass);
+ CGM.getCXXABI().getVTableAddressPoint(BaseSubobject(CD, Offset),
+ VTableClass);
+ if (auto Authentication = CGM.getVTablePointerAuthentication(CD)) {
+ VTableAddressPoint = Emitter.tryEmitConstantSignedPointer(
+ VTableAddressPoint, *Authentication);
+ if (!VTableAddressPoint)
+ return false;
+ }
if (!AppendBytes(Offset, VTableAddressPoint))
return false;
}
@@ -840,7 +858,8 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
continue;
// Don't emit anonymous bitfields or zero-sized fields.
- if (Field->isUnnamedBitfield() || Field->isZeroSize(CGM.getContext()))
+ if (Field->isUnnamedBitField() ||
+ isEmptyFieldForLayout(CGM.getContext(), *Field))
continue;
// Emit the value of the initializer.
@@ -863,7 +882,7 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
} else {
// Otherwise we have a bitfield.
if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
- cast<llvm::ConstantInt>(EltInit), AllowOverwrite))
+ EltInit, AllowOverwrite))
return false;
}
}
@@ -879,7 +898,7 @@ llvm::Constant *ConstStructBuilder::Finalize(QualType Type) {
}
llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
- InitListExpr *ILE,
+ const InitListExpr *ILE,
QualType ValTy) {
ConstantAggregateBuilder Const(Emitter.CGM);
ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero());
@@ -906,7 +925,8 @@ llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
bool ConstStructBuilder::UpdateStruct(ConstantEmitter &Emitter,
ConstantAggregateBuilder &Const,
- CharUnits Offset, InitListExpr *Updater) {
+ CharUnits Offset,
+ const InitListExpr *Updater) {
return ConstStructBuilder(Emitter, Const, Offset)
.Build(Updater, /*AllowOverwrite*/ true);
}
@@ -947,11 +967,11 @@ tryEmitGlobalCompoundLiteral(ConstantEmitter &emitter,
static llvm::Constant *
EmitArrayConstant(CodeGenModule &CGM, llvm::ArrayType *DesiredType,
- llvm::Type *CommonElementType, unsigned ArrayBound,
+ llvm::Type *CommonElementType, uint64_t ArrayBound,
SmallVectorImpl<llvm::Constant *> &Elements,
llvm::Constant *Filler) {
// Figure out how long the initial prefix of non-zero elements is.
- unsigned NonzeroLength = ArrayBound;
+ uint64_t NonzeroLength = ArrayBound;
if (Elements.size() < NonzeroLength && Filler->isNullValue())
NonzeroLength = Elements.size();
if (NonzeroLength == Elements.size()) {
@@ -963,7 +983,7 @@ EmitArrayConstant(CodeGenModule &CGM, llvm::ArrayType *DesiredType,
return llvm::ConstantAggregateZero::get(DesiredType);
// Add a zeroinitializer array filler if we have lots of trailing zeroes.
- unsigned TrailingZeroes = ArrayBound - NonzeroLength;
+ uint64_t TrailingZeroes = ArrayBound - NonzeroLength;
if (TrailingZeroes >= 8) {
assert(Elements.size() >= NonzeroLength &&
"missing initializer for non-zero element");
@@ -1013,8 +1033,8 @@ EmitArrayConstant(CodeGenModule &CGM, llvm::ArrayType *DesiredType,
//
// Constant folding is currently missing support for a few features supported
// here: CK_ToUnion, CK_ReinterpretMemberPointer, and DesignatedInitUpdateExpr.
-class ConstExprEmitter :
- public StmtVisitor<ConstExprEmitter, llvm::Constant*, QualType> {
+class ConstExprEmitter
+ : public ConstStmtVisitor<ConstExprEmitter, llvm::Constant *, QualType> {
CodeGenModule &CGM;
ConstantEmitter &Emitter;
llvm::LLVMContext &VMContext;
@@ -1027,43 +1047,60 @@ public:
// Visitor Methods
//===--------------------------------------------------------------------===//
- llvm::Constant *VisitStmt(Stmt *S, QualType T) {
- return nullptr;
- }
+ llvm::Constant *VisitStmt(const Stmt *S, QualType T) { return nullptr; }
- llvm::Constant *VisitConstantExpr(ConstantExpr *CE, QualType T) {
+ llvm::Constant *VisitConstantExpr(const ConstantExpr *CE, QualType T) {
if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(CE))
return Result;
return Visit(CE->getSubExpr(), T);
}
- llvm::Constant *VisitParenExpr(ParenExpr *PE, QualType T) {
+ llvm::Constant *VisitParenExpr(const ParenExpr *PE, QualType T) {
return Visit(PE->getSubExpr(), T);
}
llvm::Constant *
- VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE,
+ VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *PE,
QualType T) {
return Visit(PE->getReplacement(), T);
}
- llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE,
+ llvm::Constant *VisitGenericSelectionExpr(const GenericSelectionExpr *GE,
QualType T) {
return Visit(GE->getResultExpr(), T);
}
- llvm::Constant *VisitChooseExpr(ChooseExpr *CE, QualType T) {
+ llvm::Constant *VisitChooseExpr(const ChooseExpr *CE, QualType T) {
return Visit(CE->getChosenSubExpr(), T);
}
- llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E, QualType T) {
+ llvm::Constant *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E,
+ QualType T) {
return Visit(E->getInitializer(), T);
}
- llvm::Constant *VisitCastExpr(CastExpr *E, QualType destType) {
+ llvm::Constant *ProduceIntToIntCast(const Expr *E, QualType DestType) {
+ QualType FromType = E->getType();
+ // See also HandleIntToIntCast in ExprConstant.cpp
+ if (FromType->isIntegerType())
+ if (llvm::Constant *C = Visit(E, FromType))
+ if (auto *CI = dyn_cast<llvm::ConstantInt>(C)) {
+ unsigned SrcWidth = CGM.getContext().getIntWidth(FromType);
+ unsigned DstWidth = CGM.getContext().getIntWidth(DestType);
+ if (DstWidth == SrcWidth)
+ return CI;
+ llvm::APInt A = FromType->isSignedIntegerType()
+ ? CI->getValue().sextOrTrunc(DstWidth)
+ : CI->getValue().zextOrTrunc(DstWidth);
+ return llvm::ConstantInt::get(CGM.getLLVMContext(), A);
+ }
+ return nullptr;
+ }
+
+ llvm::Constant *VisitCastExpr(const CastExpr *E, QualType destType) {
if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
CGM.EmitExplicitCastExprType(ECE, Emitter.CGF);
- Expr *subExpr = E->getSubExpr();
+ const Expr *subExpr = E->getSubExpr();
switch (E->getCastKind()) {
case CK_ToUnion: {
@@ -1117,7 +1154,8 @@ public:
// interesting conversions should be done in Evaluate(). But as a
// special case, allow compound literals to support the gcc extension
// allowing "struct x {int x;} x = (struct x) {};".
- if (auto *E = dyn_cast<CompoundLiteralExpr>(subExpr->IgnoreParens()))
+ if (const auto *E =
+ dyn_cast<CompoundLiteralExpr>(subExpr->IgnoreParens()))
return Visit(E->getInitializer(), destType);
return nullptr;
}
@@ -1140,23 +1178,8 @@ public:
case CK_IntToOCLSampler:
llvm_unreachable("global sampler variables are not generated");
- case CK_IntegralCast: {
- QualType FromType = subExpr->getType();
- // See also HandleIntToIntCast in ExprConstant.cpp
- if (FromType->isIntegerType())
- if (llvm::Constant *C = Visit(subExpr, FromType))
- if (auto *CI = dyn_cast<llvm::ConstantInt>(C)) {
- unsigned SrcWidth = CGM.getContext().getIntWidth(FromType);
- unsigned DstWidth = CGM.getContext().getIntWidth(destType);
- if (DstWidth == SrcWidth)
- return CI;
- llvm::APInt A = FromType->isSignedIntegerType()
- ? CI->getValue().sextOrTrunc(DstWidth)
- : CI->getValue().zextOrTrunc(DstWidth);
- return llvm::ConstantInt::get(CGM.getLLVMContext(), A);
- }
- return nullptr;
- }
+ case CK_IntegralCast:
+ return ProduceIntToIntCast(subExpr, destType);
case CK_Dependent: llvm_unreachable("saw dependent cast!");
@@ -1225,63 +1248,131 @@ public:
case CK_IntegralToFixedPoint:
case CK_ZeroToOCLOpaqueType:
case CK_MatrixCast:
+ case CK_HLSLVectorTruncation:
+ case CK_HLSLArrayRValue:
return nullptr;
}
llvm_unreachable("Invalid CastKind");
}
- llvm::Constant *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE, QualType T) {
+ llvm::Constant *VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *DIE,
+ QualType T) {
// No need for a DefaultInitExprScope: we don't handle 'this' in a
// constant expression.
return Visit(DIE->getExpr(), T);
}
- llvm::Constant *VisitExprWithCleanups(ExprWithCleanups *E, QualType T) {
+ llvm::Constant *VisitExprWithCleanups(const ExprWithCleanups *E, QualType T) {
return Visit(E->getSubExpr(), T);
}
- llvm::Constant *VisitIntegerLiteral(IntegerLiteral *I, QualType T) {
+ llvm::Constant *VisitIntegerLiteral(const IntegerLiteral *I, QualType T) {
return llvm::ConstantInt::get(CGM.getLLVMContext(), I->getValue());
}
- llvm::Constant *EmitArrayInitialization(InitListExpr *ILE, QualType T) {
+ static APValue withDestType(ASTContext &Ctx, const Expr *E, QualType SrcType,
+ QualType DestType, const llvm::APSInt &Value) {
+ if (!Ctx.hasSameType(SrcType, DestType)) {
+ if (DestType->isFloatingType()) {
+ llvm::APFloat Result =
+ llvm::APFloat(Ctx.getFloatTypeSemantics(DestType), 1);
+ llvm::RoundingMode RM =
+ E->getFPFeaturesInEffect(Ctx.getLangOpts()).getRoundingMode();
+ if (RM == llvm::RoundingMode::Dynamic)
+ RM = llvm::RoundingMode::NearestTiesToEven;
+ Result.convertFromAPInt(Value, Value.isSigned(), RM);
+ return APValue(Result);
+ }
+ }
+ return APValue(Value);
+ }
+
+ llvm::Constant *EmitArrayInitialization(const InitListExpr *ILE, QualType T) {
auto *CAT = CGM.getContext().getAsConstantArrayType(ILE->getType());
assert(CAT && "can't emit array init for non-constant-bound array");
- unsigned NumInitElements = ILE->getNumInits();
- unsigned NumElements = CAT->getSize().getZExtValue();
+ uint64_t NumInitElements = ILE->getNumInits();
+ const uint64_t NumElements = CAT->getZExtSize();
+ for (const auto *Init : ILE->inits()) {
+ if (const auto *Embed =
+ dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
+ NumInitElements += Embed->getDataElementCount() - 1;
+ if (NumInitElements > NumElements) {
+ NumInitElements = NumElements;
+ break;
+ }
+ }
+ }
// Initialising an array requires us to automatically
// initialise any elements that have not been initialised explicitly
- unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+ uint64_t NumInitableElts = std::min<uint64_t>(NumInitElements, NumElements);
QualType EltType = CAT->getElementType();
// Initialize remaining array elements.
llvm::Constant *fillC = nullptr;
- if (Expr *filler = ILE->getArrayFiller()) {
+ if (const Expr *filler = ILE->getArrayFiller()) {
fillC = Emitter.tryEmitAbstractForMemory(filler, EltType);
if (!fillC)
return nullptr;
}
// Copy initializer elements.
- SmallVector<llvm::Constant*, 16> Elts;
+ SmallVector<llvm::Constant *, 16> Elts;
if (fillC && fillC->isNullValue())
Elts.reserve(NumInitableElts + 1);
else
Elts.reserve(NumElements);
llvm::Type *CommonElementType = nullptr;
- for (unsigned i = 0; i < NumInitableElts; ++i) {
- Expr *Init = ILE->getInit(i);
- llvm::Constant *C = Emitter.tryEmitPrivateForMemory(Init, EltType);
+ auto Emit = [&](const Expr *Init, unsigned ArrayIndex) {
+ llvm::Constant *C = nullptr;
+ C = Emitter.tryEmitPrivateForMemory(Init, EltType);
if (!C)
- return nullptr;
- if (i == 0)
+ return false;
+ if (ArrayIndex == 0)
CommonElementType = C->getType();
else if (C->getType() != CommonElementType)
CommonElementType = nullptr;
Elts.push_back(C);
+ return true;
+ };
+
+ unsigned ArrayIndex = 0;
+ QualType DestTy = CAT->getElementType();
+ for (unsigned i = 0; i < ILE->getNumInits(); ++i) {
+ const Expr *Init = ILE->getInit(i);
+ if (auto *EmbedS = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
+ StringLiteral *SL = EmbedS->getDataStringLiteral();
+ llvm::APSInt Value(CGM.getContext().getTypeSize(DestTy),
+ DestTy->isUnsignedIntegerType());
+ llvm::Constant *C;
+ for (unsigned I = EmbedS->getStartingElementPos(),
+ N = EmbedS->getDataElementCount();
+ I != EmbedS->getStartingElementPos() + N; ++I) {
+ Value = SL->getCodeUnit(I);
+ if (DestTy->isIntegerType()) {
+ C = llvm::ConstantInt::get(CGM.getLLVMContext(), Value);
+ } else {
+ C = Emitter.tryEmitPrivateForMemory(
+ withDestType(CGM.getContext(), Init, EmbedS->getType(), DestTy,
+ Value),
+ EltType);
+ }
+ if (!C)
+ return nullptr;
+ Elts.push_back(C);
+ ArrayIndex++;
+ }
+ if ((ArrayIndex - EmbedS->getDataElementCount()) == 0)
+ CommonElementType = C->getType();
+ else if (C->getType() != CommonElementType)
+ CommonElementType = nullptr;
+ } else {
+ if (!Emit(Init, ArrayIndex))
+ return nullptr;
+ ArrayIndex++;
+ }
}
llvm::ArrayType *Desired =
@@ -1290,16 +1381,17 @@ public:
fillC);
}
- llvm::Constant *EmitRecordInitialization(InitListExpr *ILE, QualType T) {
+ llvm::Constant *EmitRecordInitialization(const InitListExpr *ILE,
+ QualType T) {
return ConstStructBuilder::BuildStruct(Emitter, ILE, T);
}
- llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E,
+ llvm::Constant *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E,
QualType T) {
return CGM.EmitNullConstant(T);
}
- llvm::Constant *VisitInitListExpr(InitListExpr *ILE, QualType T) {
+ llvm::Constant *VisitInitListExpr(const InitListExpr *ILE, QualType T) {
if (ILE->isTransparent())
return Visit(ILE->getInit(0), T);
@@ -1312,8 +1404,9 @@ public:
return nullptr;
}
- llvm::Constant *VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E,
- QualType destType) {
+ llvm::Constant *
+ VisitDesignatedInitUpdateExpr(const DesignatedInitUpdateExpr *E,
+ QualType destType) {
auto C = Visit(E->getBase(), destType);
if (!C)
return nullptr;
@@ -1327,12 +1420,13 @@ public:
llvm::Type *ValTy = CGM.getTypes().ConvertType(destType);
bool HasFlexibleArray = false;
- if (auto *RT = destType->getAs<RecordType>())
+ if (const auto *RT = destType->getAs<RecordType>())
HasFlexibleArray = RT->getDecl()->hasFlexibleArrayMember();
return Const.build(ValTy, HasFlexibleArray);
}
- llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E, QualType Ty) {
+ llvm::Constant *VisitCXXConstructExpr(const CXXConstructExpr *E,
+ QualType Ty) {
if (!E->getConstructor()->isTrivial())
return nullptr;
@@ -1342,13 +1436,13 @@ public:
assert(E->getConstructor()->isCopyOrMoveConstructor() &&
"trivial ctor has argument but isn't a copy/move ctor");
- Expr *Arg = E->getArg(0);
+ const Expr *Arg = E->getArg(0);
assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
"argument to copy ctor is of wrong type");
// Look through the temporary; it's just converting the value to an
// lvalue to pass it to the constructor.
- if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Arg))
+ if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Arg))
return Visit(MTE->getSubExpr(), Ty);
// Don't try to support arbitrary lvalue-to-rvalue conversions for now.
return nullptr;
@@ -1357,12 +1451,12 @@ public:
return CGM.EmitNullConstant(Ty);
}
- llvm::Constant *VisitStringLiteral(StringLiteral *E, QualType T) {
+ llvm::Constant *VisitStringLiteral(const StringLiteral *E, QualType T) {
// This is a string literal initializing an array in an initializer.
return CGM.GetConstantArrayFromStringLiteral(E);
}
- llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E, QualType T) {
+ llvm::Constant *VisitObjCEncodeExpr(const ObjCEncodeExpr *E, QualType T) {
// This must be an @encode initializing an array in a static initializer.
// Don't emit it as the address of the string, emit the string data itself
// as an inline array.
@@ -1373,7 +1467,7 @@ public:
// Resize the string to the right size, adding zeros at the end, or
// truncating as needed.
- Str.resize(CAT->getSize().getZExtValue(), '\0');
+ Str.resize(CAT->getZExtSize(), '\0');
return llvm::ConstantDataArray::getString(VMContext, Str, false);
}
@@ -1381,13 +1475,17 @@ public:
return Visit(E->getSubExpr(), T);
}
- llvm::Constant *VisitUnaryMinus(UnaryOperator *U, QualType T) {
+ llvm::Constant *VisitUnaryMinus(const UnaryOperator *U, QualType T) {
if (llvm::Constant *C = Visit(U->getSubExpr(), T))
if (auto *CI = dyn_cast<llvm::ConstantInt>(C))
return llvm::ConstantInt::get(CGM.getLLVMContext(), -CI->getValue());
return nullptr;
}
+ llvm::Constant *VisitPackIndexingExpr(const PackIndexingExpr *E, QualType T) {
+ return Visit(E->getSelectedExpr(), T);
+ }
+
// Utility methods
llvm::Type *ConvertType(QualType T) {
return CGM.getTypes().ConvertType(T);
@@ -1455,9 +1553,11 @@ ConstantEmitter::emitAbstract(const Expr *E, QualType destType) {
llvm::Constant *
ConstantEmitter::emitAbstract(SourceLocation loc, const APValue &value,
- QualType destType) {
+ QualType destType,
+ bool EnablePtrAuthFunctionTypeDiscrimination) {
auto state = pushAbstract();
- auto C = tryEmitPrivate(value, destType);
+ auto C =
+ tryEmitPrivate(value, destType, EnablePtrAuthFunctionTypeDiscrimination);
C = validateAndPopAbstract(C, state);
if (!C) {
CGM.Error(loc,
@@ -1567,7 +1667,7 @@ namespace {
// messing around with llvm::Constant structures, which never itself
// does anything that should be visible in compiler output.
for (auto &entry : Locations) {
- assert(entry.first->getParent() == nullptr && "not a placeholder!");
+ assert(entry.first->getName() == "" && "not a placeholder!");
entry.first->replaceAllUsesWith(entry.second);
entry.first->eraseFromParent();
}
@@ -1690,8 +1790,7 @@ llvm::Constant *ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) {
if (!destType->isReferenceType()) {
QualType nonMemoryDestType = getNonMemoryType(CGM, destType);
- if (llvm::Constant *C = ConstExprEmitter(*this).Visit(const_cast<Expr *>(E),
- nonMemoryDestType))
+ if (llvm::Constant *C = ConstExprEmitter(*this).Visit(E, nonMemoryDestType))
return emitForMemory(C, destType);
}
@@ -1732,6 +1831,43 @@ llvm::Constant *ConstantEmitter::tryEmitPrivateForMemory(const APValue &value,
return (C ? emitForMemory(C, destType) : nullptr);
}
+/// Try to emit a constant signed pointer, given a raw pointer and the
+/// destination ptrauth qualifier.
+///
+/// This can fail if the qualifier needs address discrimination and the
+/// emitter is in an abstract mode.
+llvm::Constant *
+ConstantEmitter::tryEmitConstantSignedPointer(llvm::Constant *UnsignedPointer,
+ PointerAuthQualifier Schema) {
+ assert(Schema && "applying trivial ptrauth schema");
+
+ if (Schema.hasKeyNone())
+ return UnsignedPointer;
+
+ unsigned Key = Schema.getKey();
+
+ // Create an address placeholder if we're using address discrimination.
+ llvm::GlobalValue *StorageAddress = nullptr;
+ if (Schema.isAddressDiscriminated()) {
+ // We can't do this if the emitter is in an abstract state.
+ if (isAbstract())
+ return nullptr;
+
+ StorageAddress = getCurrentAddrPrivate();
+ }
+
+ llvm::ConstantInt *Discriminator =
+ llvm::ConstantInt::get(CGM.IntPtrTy, Schema.getExtraDiscriminator());
+
+ llvm::Constant *SignedPointer = CGM.getConstantSignedPointer(
+ UnsignedPointer, Key, StorageAddress, Discriminator);
+
+ if (Schema.isAddressDiscriminated())
+ registerCurrentAddrPrivate(SignedPointer, StorageAddress);
+
+ return SignedPointer;
+}
+
llvm::Constant *ConstantEmitter::emitForMemory(CodeGenModule &CGM,
llvm::Constant *C,
QualType destType) {
@@ -1763,6 +1899,27 @@ llvm::Constant *ConstantEmitter::emitForMemory(CodeGenModule &CGM,
return Res;
}
+ if (destType->isBitIntType()) {
+ ConstantAggregateBuilder Builder(CGM);
+ llvm::Type *LoadStoreTy = CGM.getTypes().convertTypeForLoadStore(destType);
+ // ptrtoint/inttoptr should not involve _BitInt in constant expressions, so
+ // casting to ConstantInt is safe here.
+ auto *CI = cast<llvm::ConstantInt>(C);
+ llvm::Constant *Res = llvm::ConstantFoldCastOperand(
+ destType->isSignedIntegerOrEnumerationType() ? llvm::Instruction::SExt
+ : llvm::Instruction::ZExt,
+ CI, LoadStoreTy, CGM.getDataLayout());
+ if (CGM.getTypes().typeRequiresSplitIntoByteArray(destType, C->getType())) {
+ // Long _BitInt has array of bytes as in-memory type.
+ // So, split constant into individual bytes.
+ llvm::Type *DesiredTy = CGM.getTypes().ConvertTypeForMem(destType);
+ llvm::APInt Value = cast<llvm::ConstantInt>(Res)->getValue();
+ Builder.addBits(Value, /*OffsetInBits=*/0, /*AllowOverwrite=*/false);
+ return Builder.build(DesiredTy, /*AllowOversized*/ false);
+ }
+ return Res;
+ }
+
return C;
}
@@ -1771,8 +1928,7 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const Expr *E,
assert(!destType->isVoidType() && "can't emit a void constant");
if (!destType->isReferenceType())
- if (llvm::Constant *C =
- ConstExprEmitter(*this).Visit(const_cast<Expr *>(E), destType))
+ if (llvm::Constant *C = ConstExprEmitter(*this).Visit(E, destType))
return C;
Expr::EvalResult Result;
@@ -1816,14 +1972,18 @@ class ConstantLValueEmitter : public ConstStmtVisitor<ConstantLValueEmitter,
ConstantEmitter &Emitter;
const APValue &Value;
QualType DestType;
+ bool EnablePtrAuthFunctionTypeDiscrimination;
// Befriend StmtVisitorBase so that we don't have to expose Visit*.
friend StmtVisitorBase;
public:
ConstantLValueEmitter(ConstantEmitter &emitter, const APValue &value,
- QualType destType)
- : CGM(emitter.CGM), Emitter(emitter), Value(value), DestType(destType) {}
+ QualType destType,
+ bool EnablePtrAuthFunctionTypeDiscrimination = true)
+ : CGM(emitter.CGM), Emitter(emitter), Value(value), DestType(destType),
+ EnablePtrAuthFunctionTypeDiscrimination(
+ EnablePtrAuthFunctionTypeDiscrimination) {}
llvm::Constant *tryEmit();
@@ -1846,6 +2006,12 @@ private:
ConstantLValue VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *E);
+ ConstantLValue emitPointerAuthSignConstant(const CallExpr *E);
+ llvm::Constant *emitPointerAuthPointer(const Expr *E);
+ unsigned emitPointerAuthKey(const Expr *E);
+ std::pair<llvm::Constant *, llvm::ConstantInt *>
+ emitPointerAuthDiscriminator(const Expr *E);
+
bool hasNonZeroOffset() const {
return !Value.getLValueOffset().isZero();
}
@@ -1940,10 +2106,30 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
if (D->hasAttr<WeakRefAttr>())
return CGM.GetWeakRefReference(D).getPointer();
- if (auto FD = dyn_cast<FunctionDecl>(D))
- return CGM.GetAddrOfFunction(FD);
+ auto PtrAuthSign = [&](llvm::Constant *C) {
+ CGPointerAuthInfo AuthInfo;
+
+ if (EnablePtrAuthFunctionTypeDiscrimination)
+ AuthInfo = CGM.getFunctionPointerAuthInfo(DestType);
- if (auto VD = dyn_cast<VarDecl>(D)) {
+ if (AuthInfo) {
+ if (hasNonZeroOffset())
+ return ConstantLValue(nullptr);
+
+ C = applyOffset(C);
+ C = CGM.getConstantSignedPointer(
+ C, AuthInfo.getKey(), nullptr,
+ cast_or_null<llvm::ConstantInt>(AuthInfo.getDiscriminator()));
+ return ConstantLValue(C, /*applied offset*/ true);
+ }
+
+ return ConstantLValue(C);
+ };
+
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ return PtrAuthSign(CGM.getRawFunctionPointer(FD));
+
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
// We can never refer to a variable with local storage.
if (!VD->hasLocalStorage()) {
if (VD->isFileVarDecl() || VD->hasExternalStorage())
@@ -1956,13 +2142,13 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
}
}
- if (auto *GD = dyn_cast<MSGuidDecl>(D))
+ if (const auto *GD = dyn_cast<MSGuidDecl>(D))
return CGM.GetAddrOfMSGuidDecl(GD);
- if (auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(D))
+ if (const auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(D))
return CGM.GetAddrOfUnnamedGlobalConstantDecl(GCD);
- if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(D))
+ if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(D))
return CGM.GetAddrOfTemplateParamObject(TPO);
return nullptr;
@@ -2016,7 +2202,7 @@ ConstantLValue
ConstantLValueEmitter::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
assert(E->isExpressibleAsConstantInitializer() &&
"this boxed expression can't be emitted as a compile-time constant");
- auto *SL = cast<StringLiteral>(E->getSubExpr()->IgnoreParenCasts());
+ const auto *SL = cast<StringLiteral>(E->getSubExpr()->IgnoreParenCasts());
return emitConstantObjCStringLiteral(SL, E->getType(), CGM);
}
@@ -2038,20 +2224,75 @@ ConstantLValueEmitter::VisitCallExpr(const CallExpr *E) {
if (builtin == Builtin::BI__builtin_function_start)
return CGM.GetFunctionStart(
E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext()));
+
+ if (builtin == Builtin::BI__builtin_ptrauth_sign_constant)
+ return emitPointerAuthSignConstant(E);
+
if (builtin != Builtin::BI__builtin___CFStringMakeConstantString &&
builtin != Builtin::BI__builtin___NSStringMakeConstantString)
return nullptr;
- auto literal = cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts());
+ const auto *Literal = cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts());
if (builtin == Builtin::BI__builtin___NSStringMakeConstantString) {
- return CGM.getObjCRuntime().GenerateConstantString(literal);
+ return CGM.getObjCRuntime().GenerateConstantString(Literal);
} else {
// FIXME: need to deal with UCN conversion issues.
- return CGM.GetAddrOfConstantCFString(literal);
+ return CGM.GetAddrOfConstantCFString(Literal);
}
}
ConstantLValue
+ConstantLValueEmitter::emitPointerAuthSignConstant(const CallExpr *E) {
+ llvm::Constant *UnsignedPointer = emitPointerAuthPointer(E->getArg(0));
+ unsigned Key = emitPointerAuthKey(E->getArg(1));
+ auto [StorageAddress, OtherDiscriminator] =
+ emitPointerAuthDiscriminator(E->getArg(2));
+
+ llvm::Constant *SignedPointer = CGM.getConstantSignedPointer(
+ UnsignedPointer, Key, StorageAddress, OtherDiscriminator);
+ return SignedPointer;
+}
+
+llvm::Constant *ConstantLValueEmitter::emitPointerAuthPointer(const Expr *E) {
+ Expr::EvalResult Result;
+ bool Succeeded = E->EvaluateAsRValue(Result, CGM.getContext());
+ assert(Succeeded);
+ (void)Succeeded;
+
+ // The assertions here are all checked by Sema.
+ assert(Result.Val.isLValue());
+ if (isa<FunctionDecl>(Result.Val.getLValueBase().get<const ValueDecl *>()))
+ assert(Result.Val.getLValueOffset().isZero());
+ return ConstantEmitter(CGM, Emitter.CGF)
+ .emitAbstract(E->getExprLoc(), Result.Val, E->getType(), false);
+}
+
+unsigned ConstantLValueEmitter::emitPointerAuthKey(const Expr *E) {
+ return E->EvaluateKnownConstInt(CGM.getContext()).getZExtValue();
+}
+
+std::pair<llvm::Constant *, llvm::ConstantInt *>
+ConstantLValueEmitter::emitPointerAuthDiscriminator(const Expr *E) {
+ E = E->IgnoreParens();
+
+ if (const auto *Call = dyn_cast<CallExpr>(E)) {
+ if (Call->getBuiltinCallee() ==
+ Builtin::BI__builtin_ptrauth_blend_discriminator) {
+ llvm::Constant *Pointer = ConstantEmitter(CGM).emitAbstract(
+ Call->getArg(0), Call->getArg(0)->getType());
+ auto *Extra = cast<llvm::ConstantInt>(ConstantEmitter(CGM).emitAbstract(
+ Call->getArg(1), Call->getArg(1)->getType()));
+ return {Pointer, Extra};
+ }
+ }
+
+ llvm::Constant *Result = ConstantEmitter(CGM).emitAbstract(E, E->getType());
+ if (Result->getType()->isPointerTy())
+ return {Result, nullptr};
+ return {nullptr, cast<llvm::ConstantInt>(Result)};
+}
+
+ConstantLValue
ConstantLValueEmitter::VisitBlockExpr(const BlockExpr *E) {
StringRef functionName;
if (auto CGF = Emitter.CGF)
@@ -2076,22 +2317,22 @@ ConstantLValue
ConstantLValueEmitter::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *E) {
assert(E->getStorageDuration() == SD_Static);
- SmallVector<const Expr *, 2> CommaLHSs;
- SmallVector<SubobjectAdjustment, 2> Adjustments;
- const Expr *Inner =
- E->getSubExpr()->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
+ const Expr *Inner = E->getSubExpr()->skipRValueSubobjectAdjustments();
return CGM.GetAddrOfGlobalTemporary(E, Inner);
}
-llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
- QualType DestType) {
+llvm::Constant *
+ConstantEmitter::tryEmitPrivate(const APValue &Value, QualType DestType,
+ bool EnablePtrAuthFunctionTypeDiscrimination) {
switch (Value.getKind()) {
case APValue::None:
case APValue::Indeterminate:
// Out-of-lifetime and indeterminate values can be modeled as 'undef'.
return llvm::UndefValue::get(CGM.getTypes().ConvertType(DestType));
case APValue::LValue:
- return ConstantLValueEmitter(*this, Value, DestType).tryEmit();
+ return ConstantLValueEmitter(*this, Value, DestType,
+ EnablePtrAuthFunctionTypeDiscrimination)
+ .tryEmit();
case APValue::Int:
return llvm::ConstantInt::get(CGM.getLLVMContext(), Value.getInt());
case APValue::FixedPoint:
@@ -2286,8 +2527,10 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
// Ignore empty bases.
- if (base->isEmpty() ||
- CGM.getContext().getASTRecordLayout(base).getNonVirtualSize()
+ if (isEmptyRecordForLayout(CGM.getContext(), I.getType()) ||
+ CGM.getContext()
+ .getASTRecordLayout(base)
+ .getNonVirtualSize()
.isZero())
continue;
@@ -2301,7 +2544,8 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
for (const auto *Field : record->fields()) {
// Fill in non-bitfields. (Bitfields always use a zero pattern, which we
// will fill in later.)
- if (!Field->isBitField() && !Field->isZeroSize(CGM.getContext())) {
+ if (!Field->isBitField() &&
+ !isEmptyFieldForLayout(CGM.getContext(), Field)) {
unsigned fieldIndex = layout.getLLVMFieldNo(Field);
elements[fieldIndex] = CGM.EmitNullConstant(Field->getType());
}
@@ -2323,7 +2567,7 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
// Ignore empty bases.
- if (base->isEmpty())
+ if (isEmptyRecordForLayout(CGM.getContext(), I.getType()))
continue;
unsigned fieldIndex = layout.getVirtualBaseIndex(base);
@@ -2380,7 +2624,7 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
llvm::Constant *Element =
ConstantEmitter::emitNullForMemory(*this, ElementTy);
- unsigned NumElements = CAT->getSize().getZExtValue();
+ unsigned NumElements = CAT->getZExtSize();
SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
return llvm::ConstantArray::get(ATy, Array);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
index 181b15e9c7d0..6e212e74676e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
@@ -15,6 +15,7 @@
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
#include "CGOpenMPRuntime.h"
+#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
@@ -52,6 +53,10 @@ using llvm::Value;
// Scalar Expression Emitter
//===----------------------------------------------------------------------===//
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
namespace {
/// Determine whether the given binary operation may overflow.
@@ -142,6 +147,15 @@ struct BinOpInfo {
return UnOp->getSubExpr()->getType()->isFixedPointType();
return false;
}
+
+ /// Check if the RHS has a signed integer representation.
+ bool rhsHasSignedIntegerRepresentation() const {
+ if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
+ QualType RHSType = BinOp->getRHS()->getType();
+ return RHSType->hasSignedIntegerRepresentation();
+ }
+ return false;
+ }
};
static bool MustVisitNullValue(const Expr *E) {
@@ -304,6 +318,7 @@ public:
llvm::Type *DstTy, SourceLocation Loc);
/// Known implicit conversion check kinds.
+ /// This is used for bitfield conversion checks as well.
/// Keep in sync with the enum of the same name in ubsan_handlers.h
enum ImplicitConversionCheckKind : unsigned char {
ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
@@ -421,9 +436,10 @@ public:
if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
if (E->isGLValue())
- return CGF.Builder.CreateLoad(Address(
- Result, CGF.ConvertTypeForMem(E->getType()),
- CGF.getContext().getTypeAlignInChars(E->getType())));
+ return CGF.EmitLoadOfScalar(
+ Address(Result, CGF.convertTypeForLoadStore(E->getType()),
+ CGF.getContext().getTypeAlignInChars(E->getType())),
+ /*Volatile*/ false, E->getType(), E->getExprLoc());
return Result;
}
return Visit(E->getSubExpr());
@@ -491,6 +507,7 @@ public:
}
Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
+ Value *VisitEmbedExpr(EmbedExpr *E);
Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
if (E->isGLValue())
@@ -723,7 +740,9 @@ public:
if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
- return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ [[fallthrough]];
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
@@ -774,7 +793,7 @@ public:
void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
llvm::Value *Zero,bool isDiv);
// Common helper for getting how wide LHS of shift is.
- static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
+ static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
// Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
// non powers of two.
@@ -904,6 +923,9 @@ public:
}
Value *VisitAsTypeExpr(AsTypeExpr *CE);
Value *VisitAtomicExpr(AtomicExpr *AE);
+ Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
+ return Visit(E->getSelectedExpr());
+ }
};
} // end anonymous namespace.
@@ -1089,11 +1111,28 @@ void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
llvm::Constant *StaticArgs[] = {
CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
CGF.EmitCheckTypeDescriptor(DstType),
- llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
+ llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
+ llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
+
CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
{Src, Dst});
}
+static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
+ const char *Name,
+ CGBuilderTy &Builder) {
+ bool VSigned = VType->isSignedIntegerOrEnumerationType();
+ llvm::Type *VTy = V->getType();
+ if (!VSigned) {
+ // If the value is unsigned, then it is never negative.
+ return llvm::ConstantInt::getFalse(VTy->getContext());
+ }
+ llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
+ return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
+ llvm::Twine(Name) + "." + V->getName() +
+ ".negativitycheck");
+}
+
// Should be called within CodeGenFunction::SanitizerScope RAII scope.
// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
@@ -1118,30 +1157,12 @@ EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
"either the widths should be different, or the signednesses.");
- // NOTE: zero value is considered to be non-negative.
- auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
- const char *Name) -> Value * {
- // Is this value a signed type?
- bool VSigned = VType->isSignedIntegerOrEnumerationType();
- llvm::Type *VTy = V->getType();
- if (!VSigned) {
- // If the value is unsigned, then it is never negative.
- // FIXME: can we encounter non-scalar VTy here?
- return llvm::ConstantInt::getFalse(VTy->getContext());
- }
- // Get the zero of the same type with which we will be comparing.
- llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
- // %V.isnegative = icmp slt %V, 0
- // I.e is %V *strictly* less than zero, does it have negative value?
- return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
- llvm::Twine(Name) + "." + V->getName() +
- ".negativitycheck");
- };
-
// 1. Was the old Value negative?
- llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
+ llvm::Value *SrcIsNegative =
+ EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
// 2. Is the new Value negative?
- llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
+ llvm::Value *DstIsNegative =
+ EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
// 3. Now, was the 'negativity status' preserved during the conversion?
// NOTE: conversion from negative to zero is considered to change the sign.
// (We want to get 'false' when the conversion changed the sign)
@@ -1230,12 +1251,143 @@ void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
llvm::Constant *StaticArgs[] = {
CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
CGF.EmitCheckTypeDescriptor(DstType),
- llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
+ llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
+ llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
// EmitCheck() will 'and' all the checks together.
CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
{Src, Dst});
}
+// Should be called within CodeGenFunction::SanitizerScope RAII scope.
+// Returns 'i1 false' when the truncation Src -> Dst was lossy.
+static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
+ std::pair<llvm::Value *, SanitizerMask>>
+EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
+ QualType DstType, CGBuilderTy &Builder) {
+ bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
+ bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
+
+ ScalarExprEmitter::ImplicitConversionCheckKind Kind;
+ if (!SrcSigned && !DstSigned)
+ Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
+ else
+ Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
+
+ llvm::Value *Check = nullptr;
+ // 1. Extend the truncated value back to the same width as the Src.
+ Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
+ // 2. Equality-compare with the original source value
+ Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
+ // If the comparison result is 'i1 false', then the truncation was lossy.
+
+ return std::make_pair(
+ Kind, std::make_pair(Check, SanitizerKind::ImplicitBitfieldConversion));
+}
+
+// Should be called within CodeGenFunction::SanitizerScope RAII scope.
+// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
+static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
+ std::pair<llvm::Value *, SanitizerMask>>
+EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
+ QualType DstType, CGBuilderTy &Builder) {
+ // 1. Was the old Value negative?
+ llvm::Value *SrcIsNegative =
+ EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
+ // 2. Is the new Value negative?
+ llvm::Value *DstIsNegative =
+ EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
+ // 3. Now, was the 'negativity status' preserved during the conversion?
+ // NOTE: conversion from negative to zero is considered to change the sign.
+ // (We want to get 'false' when the conversion changed the sign)
+ // So we should just equality-compare the negativity statuses.
+ llvm::Value *Check = nullptr;
+ Check =
+ Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
+ // If the comparison result is 'false', then the conversion changed the sign.
+ return std::make_pair(
+ ScalarExprEmitter::ICCK_IntegerSignChange,
+ std::make_pair(Check, SanitizerKind::ImplicitBitfieldConversion));
+}
+
+void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType,
+ Value *Dst, QualType DstType,
+ const CGBitFieldInfo &Info,
+ SourceLocation Loc) {
+
+ if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
+ return;
+
+ // We only care about int->int conversions here.
+ // We ignore conversions to/from pointer and/or bool.
+ if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
+ DstType))
+ return;
+
+ if (DstType->isBooleanType() || SrcType->isBooleanType())
+ return;
+
+ // This should be truncation of integral types.
+ assert(isa<llvm::IntegerType>(Src->getType()) &&
+ isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
+
+ // TODO: Calculate src width to avoid emitting code
+ // for unecessary cases.
+ unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
+ unsigned DstBits = Info.Size;
+
+ bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
+ bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
+
+ CodeGenFunction::SanitizerScope SanScope(this);
+
+ std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
+ std::pair<llvm::Value *, SanitizerMask>>
+ Check;
+
+ // Truncation
+ bool EmitTruncation = DstBits < SrcBits;
+ // If Dst is signed and Src unsigned, we want to be more specific
+ // about the CheckKind we emit, in this case we want to emit
+ // ICCK_SignedIntegerTruncationOrSignChange.
+ bool EmitTruncationFromUnsignedToSigned =
+ EmitTruncation && DstSigned && !SrcSigned;
+ // Sign change
+ bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
+ bool BothUnsigned = !SrcSigned && !DstSigned;
+ bool LargerSigned = (DstBits > SrcBits) && DstSigned;
+ // We can avoid emitting sign change checks in some obvious cases
+ // 1. If Src and Dst have the same signedness and size
+ // 2. If both are unsigned sign check is unecessary!
+ // 3. If Dst is signed and bigger than Src, either
+ // sign-extension or zero-extension will make sure
+ // the sign remains.
+ bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
+
+ if (EmitTruncation)
+ Check =
+ EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
+ else if (EmitSignChange) {
+ assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
+ "either the widths should be different, or the signednesses.");
+ Check =
+ EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
+ } else
+ return;
+
+ ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
+ if (EmitTruncationFromUnsignedToSigned)
+ CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
+
+ llvm::Constant *StaticArgs[] = {
+ EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(SrcType),
+ EmitCheckTypeDescriptor(DstType),
+ llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
+ llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
+
+ EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
+ {Src, Dst});
+}
+
Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
QualType DstType, llvm::Type *SrcTy,
llvm::Type *DstTy,
@@ -1390,7 +1542,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
// The source value may be an integer, or a pointer.
if (isa<llvm::PointerType>(SrcTy))
- return Builder.CreateBitCast(Src, DstTy, "conv");
+ return Src;
assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
// First, convert to the correct width so that we control the kind of
@@ -1646,6 +1798,12 @@ ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
"usn_addr_cast");
}
+Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
+ assert(E->getDataElementCount() == 1);
+ auto It = E->begin();
+ return Builder.getInt((*It)->getValue());
+}
+
Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
// Vector Mask Case
if (E->getNumSubExprs() == 2) {
@@ -1787,7 +1945,26 @@ Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
}
}
- return EmitLoadOfLValue(E);
+ llvm::Value *Result = EmitLoadOfLValue(E);
+
+ // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
+ // debug info for the pointer, even if there is no variable associated with
+ // the pointer's expression.
+ if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
+ if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {
+ if (llvm::GetElementPtrInst *GEP =
+ dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {
+ if (llvm::Instruction *Pointer =
+ dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {
+ QualType Ty = E->getBase()->getType();
+ if (!E->isArrow())
+ Ty = CGF.getContext().getPointerType(Ty);
+ CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);
+ }
+ }
+ }
+ }
+ return Result;
}
Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
@@ -2062,7 +2239,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
- Address Addr = EmitLValue(E).getAddress(CGF);
+ Address Addr = EmitLValue(E).getAddress();
Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
return EmitLoadOfLValue(LV, CE->getExprLoc());
@@ -2070,8 +2247,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_LValueToRValueBitCast: {
LValue SourceLVal = CGF.EmitLValue(E);
- Address Addr = SourceLVal.getAddress(CGF).withElementType(
- CGF.ConvertTypeForMem(DestTy));
+ Address Addr =
+ SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
@@ -2134,26 +2311,24 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// If Src is a fixed vector and Dst is a scalable vector, and both have the
// same element type, use the llvm.vector.insert intrinsic to perform the
// bitcast.
- if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
- if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
- // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
+ if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
+ if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
+ // If we are casting a fixed i8 vector to a scalable i1 predicate
// vector, use a vector insert and bitcast the result.
- bool NeedsBitCast = false;
- auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
- llvm::Type *OrigType = DstTy;
- if (ScalableDst == PredType &&
- FixedSrc->getElementType() == Builder.getInt8Ty()) {
- DstTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
- ScalableDst = cast<llvm::ScalableVectorType>(DstTy);
- NeedsBitCast = true;
+ if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
+ ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
+ FixedSrcTy->getElementType()->isIntegerTy(8)) {
+ ScalableDstTy = llvm::ScalableVectorType::get(
+ FixedSrcTy->getElementType(),
+ ScalableDstTy->getElementCount().getKnownMinValue() / 8);
}
- if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
- llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
+ if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
+ llvm::Value *UndefVec = llvm::UndefValue::get(ScalableDstTy);
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
llvm::Value *Result = Builder.CreateInsertVector(
- DstTy, UndefVec, Src, Zero, "cast.scalable");
- if (NeedsBitCast)
- Result = Builder.CreateBitCast(Result, OrigType);
+ ScalableDstTy, UndefVec, Src, Zero, "cast.scalable");
+ if (Result->getType() != DstTy)
+ Result = Builder.CreateBitCast(Result, DstTy);
return Result;
}
}
@@ -2162,18 +2337,19 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// If Src is a scalable vector and Dst is a fixed vector, and both have the
// same element type, use the llvm.vector.extract intrinsic to perform the
// bitcast.
- if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
- if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
- // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
+ if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
+ if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
+ // If we are casting a scalable i1 predicate vector to a fixed i8
// vector, bitcast the source and use a vector extract.
- auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
- if (ScalableSrc == PredType &&
- FixedDst->getElementType() == Builder.getInt8Ty()) {
- SrcTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
- ScalableSrc = cast<llvm::ScalableVectorType>(SrcTy);
- Src = Builder.CreateBitCast(Src, SrcTy);
+ if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
+ ScalableSrcTy->getElementCount().isKnownMultipleOf(8) &&
+ FixedDstTy->getElementType()->isIntegerTy(8)) {
+ ScalableSrcTy = llvm::ScalableVectorType::get(
+ FixedDstTy->getElementType(),
+ ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
+ Src = Builder.CreateBitCast(Src, ScalableSrcTy);
}
- if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
+ if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
return Builder.CreateExtractVector(DstTy, Src, Zero, "cast.fixed");
}
@@ -2181,7 +2357,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
// Perform VLAT <-> VLST bitcast through memory.
- // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
+ // TODO: since the llvm.vector.{insert,extract} intrinsics
// require the element types of the vectors to be the same, we
// need to keep this around for bitcasts between VLAT <-> VLST where
// the element types of the vectors are not the same, until we figure
@@ -2198,7 +2374,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
}
- return Builder.CreateBitCast(Src, DstTy);
+
+ llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);
+ return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);
}
case CK_AddressSpaceConversion: {
Expr::EvalResult Result;
@@ -2242,7 +2420,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// performed and the object is not of the derived type.
if (CGF.sanitizePerformTypeCheck())
CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
- Derived.getPointer(), DestTy->getPointeeType());
+ Derived, DestTy->getPointeeType());
if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
@@ -2250,13 +2428,14 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
CodeGenFunction::CFITCK_DerivedCast,
CE->getBeginLoc());
- return Derived.getPointer();
+ return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
}
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
// The EmitPointerWithAlignment path does this fine; just discard
// the alignment.
- return CGF.EmitPointerWithAlignment(CE).getPointer();
+ return CGF.getAsNaturalPointerTo(CGF.EmitPointerWithAlignment(CE),
+ CE->getType()->getPointeeType());
}
case CK_Dynamic: {
@@ -2266,7 +2445,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
case CK_ArrayToPointerDecay:
- return CGF.EmitArrayToPointerDecay(E).getPointer();
+ return CGF.getAsNaturalPointerTo(CGF.EmitArrayToPointerDecay(E),
+ CE->getType()->getPointeeType());
case CK_FunctionToPointerDecay:
return EmitLValue(E).getPointer(CGF);
@@ -2319,6 +2499,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_FloatingComplexToIntegralComplex:
case CK_ConstructorConversion:
case CK_ToUnion:
+ case CK_HLSLArrayRValue:
llvm_unreachable("scalar cast to non-scalar value");
case CK_LValueToRValue:
@@ -2345,6 +2526,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
if (DestTy.mayBeDynamicClass())
IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
}
+
+ IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
return IntToPtr;
}
case CK_PointerToIntegral: {
@@ -2360,6 +2543,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
}
+ PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);
return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
}
case CK_ToVoid: {
@@ -2406,6 +2590,12 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
CE->getExprLoc());
case CK_IntegralCast: {
+ if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
+ QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
+ return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
+ SrcElTy->isSignedIntegerOrEnumerationType(),
+ "conv");
+ }
ScalarConversionOpts Opts;
if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
if (!ICE->isPartOfExplicitCast())
@@ -2414,9 +2604,44 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return EmitScalarConversion(Visit(E), E->getType(), DestTy,
CE->getExprLoc(), Opts);
}
- case CK_IntegralToFloating:
- case CK_FloatingToIntegral:
- case CK_FloatingCast:
+ case CK_IntegralToFloating: {
+ if (E->getType()->isVectorType() && DestTy->isVectorType()) {
+ // TODO: Support constrained FP intrinsics.
+ QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
+ if (SrcElTy->isSignedIntegerOrEnumerationType())
+ return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
+ return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
+ }
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
+ }
+ case CK_FloatingToIntegral: {
+ if (E->getType()->isVectorType() && DestTy->isVectorType()) {
+ // TODO: Support constrained FP intrinsics.
+ QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
+ if (DstElTy->isSignedIntegerOrEnumerationType())
+ return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
+ return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
+ }
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
+ }
+ case CK_FloatingCast: {
+ if (E->getType()->isVectorType() && DestTy->isVectorType()) {
+ // TODO: Support constrained FP intrinsics.
+ QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
+ QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
+ if (DstElTy->castAs<BuiltinType>()->getKind() <
+ SrcElTy->castAs<BuiltinType>()->getKind())
+ return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
+ return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
+ }
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
+ }
case CK_FixedPointToFloating:
case CK_FloatingToFixedPoint: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
@@ -2466,6 +2691,17 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_IntToOCLSampler:
return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
+ case CK_HLSLVectorTruncation: {
+ assert(DestTy->isVectorType() && "Expected dest type to be vector type");
+ Value *Vec = Visit(const_cast<Expr *>(E));
+ SmallVector<int, 16> Mask;
+ unsigned NumElts = DestTy->castAs<VectorType>()->getNumElements();
+ for (unsigned I = 0; I != NumElts; ++I)
+ Mask.push_back(I);
+
+ return Builder.CreateShuffleVector(Vec, Mask, "trunc");
+ }
+
} // end of switch
llvm_unreachable("unknown scalar cast");
@@ -2514,7 +2750,9 @@ llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
StringRef Name = IsInc ? "inc" : "dec";
switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
- return Builder.CreateAdd(InVal, Amount, Name);
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
+ return Builder.CreateAdd(InVal, Amount, Name);
+ [[fallthrough]];
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(InVal, Amount, Name);
@@ -2555,6 +2793,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::PHINode *atomicPHI = nullptr;
llvm::Value *value;
llvm::Value *input;
+ llvm::Value *Previous = nullptr;
+ QualType SrcType = E->getType();
int amount = (isInc ? 1 : -1);
bool isSubtraction = !isInc;
@@ -2564,14 +2804,14 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (isInc && type->isBooleanType()) {
llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
if (isPre) {
- Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
+ Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
return Builder.getTrue();
}
// For atomic bool increment, we just store true and return it for
// preincrement, do an atomic swap with true for postincrement
return Builder.CreateAtomicRMW(
- llvm::AtomicRMWInst::Xchg, LV.getAddress(CGF), True,
+ llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
llvm::AtomicOrdering::SequentiallyConsistent);
}
// Special case for atomic increment / decrement on integers, emit
@@ -2589,10 +2829,27 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = CGF.EmitToMemory(
llvm::ConstantInt::get(ConvertType(type), 1, true), type);
llvm::Value *old =
- Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
+ Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
llvm::AtomicOrdering::SequentiallyConsistent);
return isPre ? Builder.CreateBinOp(op, old, amt) : old;
}
+ // Special case for atomic increment/decrement on floats.
+ // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
+ if (type->isFloatingType()) {
+ llvm::Type *Ty = ConvertType(type);
+ if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {
+ llvm::AtomicRMWInst::BinOp aop =
+ isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
+ llvm::Instruction::BinaryOps op =
+ isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
+ llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);
+ llvm::AtomicRMWInst *old = Builder.CreateAtomicRMW(
+ aop, LV.getAddress(), amt,
+ llvm::AtomicOrdering::SequentiallyConsistent);
+
+ return isPre ? Builder.CreateBinOp(op, old, amt) : old;
+ }
+ }
value = EmitLoadOfLValue(LV, E->getExprLoc());
input = value;
// For every other atomic operation, we need to emit a load-op-cmpxchg loop
@@ -2643,7 +2900,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
"base or promoted) will be signed, or the bitwidths will match.");
}
if (CGF.SanOpts.hasOneOf(
- SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
+ SanitizerKind::ImplicitIntegerArithmeticValueChange |
+ SanitizerKind::ImplicitBitfieldConversion) &&
canPerformLossyDemotionCheck) {
// While `x += 1` (for `x` with width less than int) is modeled as
// promotion+arithmetics+demotion, and we can catch lossy demotion with
@@ -2654,13 +2912,26 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// the increment/decrement in the wider type, and finally
// perform the demotion. This will catch lossy demotions.
+ // We have a special case for bitfields defined using all the bits of the
+ // type. In this case we need to do the same trick as for the integer
+ // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
+
value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
// Do pass non-default ScalarConversionOpts so that sanitizer check is
- // emitted.
+ // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
+ // checks will take care of the conversion.
+ ScalarConversionOpts Opts;
+ if (!LV.isBitField())
+ Opts = ScalarConversionOpts(CGF.SanOpts);
+ else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
+ Previous = value;
+ SrcType = promotedType;
+ }
+
value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
- ScalarConversionOpts(CGF.SanOpts));
+ Opts);
// Note that signed integer inc/dec with width less than int can't
// overflow because of promotion rules; we're just eliding a few steps
@@ -2845,9 +3116,12 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
// Store the updated result through the lvalue.
- if (LV.isBitField())
+ if (LV.isBitField()) {
+ Value *Src = Previous ? Previous : value;
CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
- else
+ CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
+ LV.getBitFieldInfo(), E->getExprLoc());
+ } else
CGF.EmitStoreThroughLValue(RValue::get(value), LV);
// If this is a postinc, return the value read from memory, otherwise use the
@@ -3314,7 +3588,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
E->getExprLoc()),
LHSTy);
Value *OldVal = Builder.CreateAtomicRMW(
- AtomicOp, LHSLV.getAddress(CGF), Amt,
+ AtomicOp, LHSLV.getAddress(), Amt,
llvm::AtomicOrdering::SequentiallyConsistent);
// Since operation is atomic, the result type is guaranteed to be the
@@ -3352,8 +3626,15 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// Convert the result back to the LHS type,
// potentially with Implicit Conversion sanitizer check.
- Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
- ScalarConversionOpts(CGF.SanOpts));
+ // If LHSLV is a bitfield, use default ScalarConversionOpts
+ // to avoid emit any implicit integer checks.
+ Value *Previous = nullptr;
+ if (LHSLV.isBitField()) {
+ Previous = Result;
+ Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
+ } else
+ Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
+ ScalarConversionOpts(CGF.SanOpts));
if (atomicPHI) {
llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
@@ -3372,9 +3653,14 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// specially because the result is altered by the store, i.e., [C99 6.5.16p1]
// 'An assignment expression has the value of the left operand after the
// assignment...'.
- if (LHSLV.isBitField())
+ if (LHSLV.isBitField()) {
+ Value *Src = Previous ? Previous : Result;
+ QualType SrcType = E->getRHS()->getType();
+ QualType DstType = E->getLHS()->getType();
CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
- else
+ CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
+ LHSLV.getBitFieldInfo(), E->getExprLoc());
+ } else
CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
if (CGF.getLangOpts().OpenMP)
@@ -3859,7 +4145,9 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
if (op.Ty->isSignedIntegerOrEnumerationType()) {
switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
- return Builder.CreateAdd(op.LHS, op.RHS, "add");
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+ [[fallthrough]];
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
@@ -4013,7 +4301,9 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
if (op.Ty->isSignedIntegerOrEnumerationType()) {
switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
- return Builder.CreateSub(op.LHS, op.RHS, "sub");
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
+ [[fallthrough]];
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
@@ -4112,13 +4402,24 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
}
-Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
+Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
+ bool RHSIsSigned) {
llvm::IntegerType *Ty;
if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
Ty = cast<llvm::IntegerType>(VT->getElementType());
else
Ty = cast<llvm::IntegerType>(LHS->getType());
- return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
+ // For a given type of LHS the maximum shift amount is width(LHS)-1, however
+ // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
+ // this in ConstantInt::get, this results in the value getting truncated.
+ // Constrain the return value to be max(RHS) in this case.
+ llvm::Type *RHSTy = RHS->getType();
+ llvm::APInt RHSMax =
+ RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
+ : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
+ if (RHSMax.ult(Ty->getBitWidth()))
+ return llvm::ConstantInt::get(RHSTy, RHSMax);
+ return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
}
Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
@@ -4130,7 +4431,7 @@ Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
Ty = cast<llvm::IntegerType>(LHS->getType());
if (llvm::isPowerOf2_64(Ty->getBitWidth()))
- return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name);
+ return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
return Builder.CreateURem(
RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
@@ -4157,13 +4458,15 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
// OpenCL 6.3j: shift values are effectively % word size of LHS.
- if (CGF.getLangOpts().OpenCL)
+ if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
else if ((SanitizeBase || SanitizeExponent) &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
- llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
+ bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
+ llvm::Value *WidthMinusOne =
+ GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
if (SanitizeExponent) {
@@ -4181,7 +4484,7 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
llvm::Value *PromotedWidthMinusOne =
(RHS == Ops.RHS) ? WidthMinusOne
- : GetWidthMinusOneValue(Ops.LHS, RHS);
+ : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
CGF.EmitBlock(CheckShiftBase);
llvm::Value *BitsShiftedOff = Builder.CreateLShr(
Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
@@ -4226,13 +4529,14 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
// OpenCL 6.3j: shift values are effectively % word size of LHS.
- if (CGF.getLangOpts().OpenCL)
+ if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
- llvm::Value *Valid =
- Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
+ bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
+ llvm::Value *Valid = Builder.CreateICmpULE(
+ Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
}
@@ -4474,6 +4778,24 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
E->getExprLoc());
}
+llvm::Value *CodeGenFunction::EmitWithOriginalRHSBitfieldAssignment(
+ const BinaryOperator *E, Value **Previous, QualType *SrcType) {
+ // In case we have the integer or bitfield sanitizer checks enabled
+ // we want to get the expression before scalar conversion.
+ if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
+ CastKind Kind = ICE->getCastKind();
+ if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
+ *SrcType = ICE->getSubExpr()->getType();
+ *Previous = EmitScalarExpr(ICE->getSubExpr());
+ // Pass default ScalarConversionOpts to avoid emitting
+ // integer sanitizer checks as E refers to bitfield.
+ return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
+ ICE->getExprLoc());
+ }
+ }
+ return EmitScalarExpr(E->getRHS());
+}
+
Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
bool Ignore = TestAndClearIgnoreResultAssign();
@@ -4496,13 +4818,22 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
case Qualifiers::OCL_Weak:
RHS = Visit(E->getRHS());
LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
- RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
+ RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
break;
case Qualifiers::OCL_None:
// __block variables need to have the rhs evaluated first, plus
// this should improve codegen just a little.
- RHS = Visit(E->getRHS());
+ Value *Previous = nullptr;
+ QualType SrcType = E->getRHS()->getType();
+ // Check if LHS is a bitfield, if RHS contains an implicit cast expression
+ // we want to extract that value and potentially (if the bitfield sanitizer
+ // is enabled) use it to check for an implicit conversion.
+ if (E->getLHS()->refersToBitField())
+ RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
+ else
+ RHS = Visit(E->getRHS());
+
LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
// Store the value into the LHS. Bit-fields are handled specially
@@ -4511,6 +4842,12 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// the assignment...'.
if (LHS.isBitField()) {
CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
+ // If the expression contained an implicit conversion, make sure
+ // to use the value before the scalar conversion.
+ Value *Src = Previous ? Previous : RHS;
+ QualType DstType = E->getLHS()->getType();
+ CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
+ LHS.getBitFieldInfo(), E->getExprLoc());
} else {
CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
@@ -4855,8 +5192,13 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// If the dead side doesn't have labels we need, just emit the Live part.
if (!CGF.ContainsLabel(dead)) {
- if (CondExprBool)
+ if (CondExprBool) {
+ if (llvm::EnableSingleByteCoverage) {
+ CGF.incrementProfileCounter(lhsExpr);
+ CGF.incrementProfileCounter(rhsExpr);
+ }
CGF.incrementProfileCounter(E);
+ }
Value *Result = Visit(live);
// If the live part is a throw expression, it acts like it has a void
@@ -4935,7 +5277,12 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
- CGF.incrementProfileCounter(E, StepV);
+ if (llvm::EnableSingleByteCoverage) {
+ CGF.incrementProfileCounter(lhsExpr);
+ CGF.incrementProfileCounter(rhsExpr);
+ CGF.incrementProfileCounter(E);
+ } else
+ CGF.incrementProfileCounter(E, StepV);
llvm::Value *LHS = Visit(lhsExpr);
llvm::Value *RHS = Visit(rhsExpr);
@@ -4967,7 +5314,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
if (CGF.MCDCLogOpStack.empty())
CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
- CGF.incrementProfileCounter(E);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(lhsExpr);
+ else
+ CGF.incrementProfileCounter(E);
+
eval.begin(CGF);
Value *LHS = Visit(lhsExpr);
eval.end(CGF);
@@ -4983,6 +5334,9 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
if (CGF.MCDCLogOpStack.empty())
CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(rhsExpr);
+
eval.begin(CGF);
Value *RHS = Visit(rhsExpr);
eval.end(CGF);
@@ -5001,6 +5355,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
PN->addIncoming(LHS, LHSBlock);
PN->addIncoming(RHS, RHSBlock);
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E);
+
return PN;
}
@@ -5015,28 +5374,9 @@ Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
CGF.EmitVariablyModifiedType(Ty);
Address ArgValue = Address::invalid();
- Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
+ RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);
- llvm::Type *ArgTy = ConvertType(VE->getType());
-
- // If EmitVAArg fails, emit an error.
- if (!ArgPtr.isValid()) {
- CGF.ErrorUnsupported(VE, "va_arg expression");
- return llvm::UndefValue::get(ArgTy);
- }
-
- // FIXME Volatility.
- llvm::Value *Val = Builder.CreateLoad(ArgPtr);
-
- // If EmitVAArg promoted the type, we must truncate it.
- if (ArgTy != Val->getType()) {
- if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
- Val = Builder.CreateIntToPtr(Val, ArgTy);
- else
- Val = Builder.CreateTrunc(Val, ArgTy);
- }
-
- return Val;
+ return ArgPtr.getScalarVal();
}
Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
@@ -5211,7 +5551,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
ConvertTypeForMem(BaseExpr->getType()->getPointeeType());
Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
} else {
- Addr = EmitLValue(BaseExpr).getAddress(*this);
+ Addr = EmitLValue(BaseExpr).getAddress();
}
// Cast the address to Class*.
@@ -5492,3 +5832,16 @@ CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
return GEPVal;
}
+
+Address CodeGenFunction::EmitCheckedInBoundsGEP(
+ Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
+ bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
+ const Twine &Name) {
+ if (!SanOpts.has(SanitizerKind::PointerOverflow))
+ return Builder.CreateInBoundsGEP(Addr, IdxList, elementType, Align, Name);
+
+ return RawAddress(
+ EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
+ IdxList, SignedIndices, IsSubtraction, Loc, Name),
+ elementType, Align);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
index e465789a003e..b2340732afeb 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
@@ -136,7 +136,8 @@ RValue EmitDevicePrintfCallExpr(const CallExpr *E, CodeGenFunction *CGF,
llvm::Function *Decl, bool WithSizeArg) {
CodeGenModule &CGM = CGF->CGM;
CGBuilderTy &Builder = CGF->Builder;
- assert(E->getBuiltinCallee() == Builtin::BIprintf);
+ assert(E->getBuiltinCallee() == Builtin::BIprintf ||
+ E->getBuiltinCallee() == Builtin::BI__builtin_printf);
assert(E->getNumArgs() >= 1); // printf always has at least one arg.
// Uses the same format as nvptx for the argument packing, but also passes
@@ -178,7 +179,9 @@ RValue CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E) {
}
RValue CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E) {
- assert(getTarget().getTriple().getArch() == llvm::Triple::amdgcn);
+ assert(getTarget().getTriple().isAMDGCN() ||
+ (getTarget().getTriple().isSPIRV() &&
+ getTarget().getTriple().getVendor() == llvm::Triple::AMD));
assert(E->getBuiltinCallee() == Builtin::BIprintf ||
E->getBuiltinCallee() == Builtin::BI__builtin_printf);
assert(E->getNumArgs() >= 1); // printf always has at least one arg.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp
index e887d35198b3..6a6aff594fb0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -17,7 +17,6 @@
#include "CodeGenModule.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/IR/IntrinsicsDirectX.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/FormatVariadic.h"
@@ -116,6 +115,10 @@ GlobalVariable *replaceBuffer(CGHLSLRuntime::Buffer &Buf) {
} // namespace
+llvm::Triple::ArchType CGHLSLRuntime::getArch() {
+ return CGM.getTarget().getTriple().getArch();
+}
+
void CGHLSLRuntime::addConstant(VarDecl *D, Buffer &CB) {
if (D->getStorageClass() == SC_Static) {
// For static inside cbuffer, take as global static.
@@ -277,13 +280,14 @@ void CGHLSLRuntime::annotateHLSLResource(const VarDecl *D, GlobalVariable *GV) {
const auto *RD = Ty->getAsCXXRecordDecl();
if (!RD)
return;
- const auto *Attr = RD->getAttr<HLSLResourceAttr>();
- if (!Attr)
+ const auto *HLSLResAttr = RD->getAttr<HLSLResourceAttr>();
+ const auto *HLSLResClassAttr = RD->getAttr<HLSLResourceClassAttr>();
+ if (!HLSLResAttr || !HLSLResClassAttr)
return;
- llvm::hlsl::ResourceClass RC = Attr->getResourceClass();
- llvm::hlsl::ResourceKind RK = Attr->getResourceKind();
- bool IsROV = Attr->getIsROV();
+ llvm::hlsl::ResourceClass RC = HLSLResClassAttr->getResourceClass();
+ llvm::hlsl::ResourceKind RK = HLSLResAttr->getResourceKind();
+ bool IsROV = HLSLResAttr->getIsROV();
llvm::hlsl::ElementType ET = calculateElementType(CGM.getContext(), Ty);
BufferResBinding Binding(D->getAttr<HLSLResourceBindingAttr>());
@@ -310,7 +314,7 @@ void clang::CodeGen::CGHLSLRuntime::setHLSLEntryAttributes(
assert(ShaderAttr && "All entry functions must have a HLSLShaderAttr");
const StringRef ShaderAttrKindStr = "hlsl.shader";
Fn->addFnAttr(ShaderAttrKindStr,
- ShaderAttr->ConvertShaderTypeToStr(ShaderAttr->getType()));
+ llvm::Triple::getEnvironmentTypeName(ShaderAttr->getType()));
if (HLSLNumThreadsAttr *NumThreadsAttr = FD->getAttr<HLSLNumThreadsAttr>()) {
const StringRef NumThreadsKindStr = "hlsl.numthreads";
std::string NumThreadsStr =
@@ -342,8 +346,9 @@ llvm::Value *CGHLSLRuntime::emitInputSemantic(IRBuilder<> &B,
return B.CreateCall(FunctionCallee(DxGroupIndex));
}
if (D.hasAttr<HLSLSV_DispatchThreadIDAttr>()) {
- llvm::Function *DxThreadID = CGM.getIntrinsic(Intrinsic::dx_thread_id);
- return buildVectorInput(B, DxThreadID, Ty);
+ llvm::Function *ThreadIDIntrinsic =
+ CGM.getIntrinsic(getThreadIdIntrinsic());
+ return buildVectorInput(B, ThreadIDIntrinsic, Ty);
}
assert(false && "Unhandled parameter attribute");
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h
index bffefb66740a..4036ce711bea 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -16,7 +16,11 @@
#define LLVM_CLANG_LIB_CODEGEN_CGHLSLRUNTIME_H
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsDirectX.h"
+#include "llvm/IR/IntrinsicsSPIRV.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/HLSLRuntime.h"
#include "llvm/ADT/SmallVector.h"
@@ -26,6 +30,22 @@
#include <optional>
#include <vector>
+// A function generator macro for picking the right intrinsic
+// for the target backend
+#define GENERATE_HLSL_INTRINSIC_FUNCTION(FunctionName, IntrinsicPostfix) \
+ llvm::Intrinsic::ID get##FunctionName##Intrinsic() { \
+ llvm::Triple::ArchType Arch = getArch(); \
+ switch (Arch) { \
+ case llvm::Triple::dxil: \
+ return llvm::Intrinsic::dx_##IntrinsicPostfix; \
+ case llvm::Triple::spirv: \
+ return llvm::Intrinsic::spv_##IntrinsicPostfix; \
+ default: \
+ llvm_unreachable("Intrinsic " #IntrinsicPostfix \
+ " not supported by target architecture"); \
+ } \
+ }
+
namespace llvm {
class GlobalVariable;
class Function;
@@ -48,6 +68,20 @@ class CodeGenModule;
class CGHLSLRuntime {
public:
+ //===----------------------------------------------------------------------===//
+ // Start of reserved area for HLSL intrinsic getters.
+ //===----------------------------------------------------------------------===//
+
+ GENERATE_HLSL_INTRINSIC_FUNCTION(All, all)
+ GENERATE_HLSL_INTRINSIC_FUNCTION(Any, any)
+ GENERATE_HLSL_INTRINSIC_FUNCTION(Lerp, lerp)
+ GENERATE_HLSL_INTRINSIC_FUNCTION(Rsqrt, rsqrt)
+ GENERATE_HLSL_INTRINSIC_FUNCTION(ThreadId, thread_id)
+
+ //===----------------------------------------------------------------------===//
+ // End of reserved area for HLSL intrinsic getters.
+ //===----------------------------------------------------------------------===//
+
struct BufferResBinding {
// The ID like 2 in register(b2, space1).
std::optional<unsigned> Reg;
@@ -96,6 +130,7 @@ private:
BufferResBinding &Binding);
void addConstant(VarDecl *D, Buffer &CB);
void addBufferDecls(const DeclContext *DC, Buffer &CB);
+ llvm::Triple::ArchType getArch();
llvm::SmallVector<Buffer> Buffers;
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp
index 0d4800b90a2f..6b886bd6b6d2 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp
@@ -612,9 +612,9 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr);
const OpenCLUnrollHintAttr *OpenCLHint =
dyn_cast<OpenCLUnrollHintAttr>(Attr);
-
+ const HLSLLoopHintAttr *HLSLLoopHint = dyn_cast<HLSLLoopHintAttr>(Attr);
// Skip non loop hint attributes
- if (!LH && !OpenCLHint) {
+ if (!LH && !OpenCLHint && !HLSLLoopHint) {
continue;
}
@@ -635,6 +635,17 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
Option = LoopHintAttr::UnrollCount;
State = LoopHintAttr::Numeric;
}
+ } else if (HLSLLoopHint) {
+ ValueInt = HLSLLoopHint->getDirective();
+ if (HLSLLoopHint->getSemanticSpelling() ==
+ HLSLLoopHintAttr::Spelling::Microsoft_unroll) {
+ if (ValueInt == 0)
+ State = LoopHintAttr::Enable;
+ if (ValueInt > 0) {
+ Option = LoopHintAttr::UnrollCount;
+ State = LoopHintAttr::Numeric;
+ }
+ }
} else if (LH) {
auto *ValueExpr = LH->getValue();
if (ValueExpr) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h
index a1c8c7e5307f..0fe33b289130 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h
@@ -110,6 +110,10 @@ public:
/// been processed.
void finish();
+ /// Returns the first outer loop containing this loop if any, nullptr
+ /// otherwise.
+ const LoopInfo *getParent() const { return Parent; }
+
private:
/// Loop ID metadata.
llvm::TempMDTuple TempLoopID;
@@ -291,12 +295,13 @@ public:
/// Set no progress for the next loop pushed.
void setMustProgress(bool P) { StagedAttrs.MustProgress = P; }
-private:
/// Returns true if there is LoopInfo on the stack.
bool hasInfo() const { return !Active.empty(); }
/// Return the LoopInfo for the current loop. HasInfo should be called
/// first to ensure LoopInfo is present.
const LoopInfo &getInfo() const { return *Active.back(); }
+
+private:
/// The set of attributes that will be applied to the next pushed loop.
LoopAttributes StagedAttrs;
/// Stack of active loops.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index 75c1d7fbea84..6a02e4dbf84d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -366,7 +366,7 @@ template <class Derived> struct GenFuncBase {
llvm::Value *SizeInBytes =
CGF.Builder.CreateNUWMul(BaseEltSizeVal, NumElts);
llvm::Value *DstArrayEnd = CGF.Builder.CreateInBoundsGEP(
- CGF.Int8Ty, DstAddr.getPointer(), SizeInBytes);
+ CGF.Int8Ty, DstAddr.emitRawPointer(CGF), SizeInBytes);
llvm::BasicBlock *PreheaderBB = CGF.Builder.GetInsertBlock();
// Create the header block and insert the phi instructions.
@@ -376,7 +376,7 @@ template <class Derived> struct GenFuncBase {
for (unsigned I = 0; I < N; ++I) {
PHIs[I] = CGF.Builder.CreatePHI(CGF.CGM.Int8PtrPtrTy, 2, "addr.cur");
- PHIs[I]->addIncoming(StartAddrs[I].getPointer(), PreheaderBB);
+ PHIs[I]->addIncoming(StartAddrs[I].emitRawPointer(CGF), PreheaderBB);
}
// Create the exit and loop body blocks.
@@ -410,7 +410,7 @@ template <class Derived> struct GenFuncBase {
// Instrs to update the destination and source addresses.
// Update phi instructions.
NewAddrs[I] = getAddrWithOffset(NewAddrs[I], EltSize);
- PHIs[I]->addIncoming(NewAddrs[I].getPointer(), LoopBB);
+ PHIs[I]->addIncoming(NewAddrs[I].emitRawPointer(CGF), LoopBB);
}
// Insert an unconditional branch to the header block.
@@ -488,7 +488,7 @@ template <class Derived> struct GenFuncBase {
for (unsigned I = 0; I < N; ++I) {
Alignments[I] = Addrs[I].getAlignment();
- Ptrs[I] = Addrs[I].getPointer();
+ Ptrs[I] = Addrs[I].emitRawPointer(CallerCGF);
}
if (llvm::Function *F =
@@ -711,7 +711,7 @@ struct GenMoveConstructor : GenBinaryFunc<GenMoveConstructor, true> {
LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT);
llvm::Value *SrcVal =
CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal();
- CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress(*CGF)), SrcLV);
+ CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV);
CGF->EmitStoreOfScalar(SrcVal, CGF->MakeAddrLValue(Addrs[DstIdx], QT),
/* isInitialization */ true);
}
@@ -774,7 +774,7 @@ struct GenMoveAssignment : GenBinaryFunc<GenMoveAssignment, true> {
LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT);
llvm::Value *SrcVal =
CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal();
- CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress(*CGF)), SrcLV);
+ CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV);
LValue DstLV = CGF->MakeAddrLValue(Addrs[DstIdx], QT);
llvm::Value *DstVal =
CGF->EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
@@ -810,7 +810,7 @@ void CodeGenFunction::destroyNonTrivialCStruct(CodeGenFunction &CGF,
// such structure.
void CodeGenFunction::defaultInitNonTrivialCStructVar(LValue Dst) {
GenDefaultInitialize Gen(getContext());
- Address DstPtr = Dst.getAddress(*this).withElementType(CGM.Int8PtrTy);
+ Address DstPtr = Dst.getAddress().withElementType(CGM.Int8PtrTy);
Gen.setCGF(this);
QualType QT = Dst.getType();
QT = Dst.isVolatile() ? QT.withVolatile() : QT;
@@ -842,7 +842,7 @@ getSpecialFunction(G &&Gen, StringRef FuncName, QualType QT, bool IsVolatile,
// Functions to emit calls to the special functions of a non-trivial C struct.
void CodeGenFunction::callCStructDefaultConstructor(LValue Dst) {
bool IsVolatile = Dst.isVolatile();
- Address DstPtr = Dst.getAddress(*this);
+ Address DstPtr = Dst.getAddress();
QualType QT = Dst.getType();
GenDefaultInitializeFuncName GenName(DstPtr.getAlignment(), getContext());
std::string FuncName = GenName.getName(QT, IsVolatile);
@@ -866,7 +866,7 @@ std::string CodeGenFunction::getNonTrivialDestructorStr(QualType QT,
void CodeGenFunction::callCStructDestructor(LValue Dst) {
bool IsVolatile = Dst.isVolatile();
- Address DstPtr = Dst.getAddress(*this);
+ Address DstPtr = Dst.getAddress();
QualType QT = Dst.getType();
GenDestructorFuncName GenName("__destructor_", DstPtr.getAlignment(),
getContext());
@@ -877,7 +877,7 @@ void CodeGenFunction::callCStructDestructor(LValue Dst) {
void CodeGenFunction::callCStructCopyConstructor(LValue Dst, LValue Src) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<false> GenName("__copy_constructor_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
@@ -891,7 +891,7 @@ void CodeGenFunction::callCStructCopyAssignmentOperator(LValue Dst, LValue Src
) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<false> GenName("__copy_assignment_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
@@ -902,7 +902,7 @@ void CodeGenFunction::callCStructCopyAssignmentOperator(LValue Dst, LValue Src
void CodeGenFunction::callCStructMoveConstructor(LValue Dst, LValue Src) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<true> GenName("__move_constructor_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
@@ -916,7 +916,7 @@ void CodeGenFunction::callCStructMoveAssignmentOperator(LValue Dst, LValue Src
) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<true> GenName("__move_assignment_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
index 03fc0ec7ff54..80a64d8e4cdd 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
@@ -94,8 +94,8 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
// and cast value to correct type
Address Temporary = CreateMemTemp(SubExpr->getType());
EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
- llvm::Value *BitCast =
- Builder.CreateBitCast(Temporary.getPointer(), ConvertType(ArgQT));
+ llvm::Value *BitCast = Builder.CreateBitCast(
+ Temporary.emitRawPointer(*this), ConvertType(ArgQT));
Args.add(RValue::get(BitCast), ArgQT);
// Create char array to store type encoding
@@ -204,11 +204,11 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
const ParmVarDecl *argDecl = *PI++;
QualType ArgQT = argDecl->getType().getUnqualifiedType();
- Args.add(RValue::get(Objects.getPointer()), ArgQT);
+ Args.add(RValue::get(Objects, *this), ArgQT);
if (DLE) {
argDecl = *PI++;
ArgQT = argDecl->getType().getUnqualifiedType();
- Args.add(RValue::get(Keys.getPointer()), ArgQT);
+ Args.add(RValue::get(Keys, *this), ArgQT);
}
argDecl = *PI;
ArgQT = argDecl->getType().getUnqualifiedType();
@@ -586,7 +586,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
method->getMethodFamily() == OMF_retain) {
if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) {
LValue lvalue = EmitLValue(lvalueExpr);
- llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress(*this));
+ llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress());
return AdjustObjCObjectType(*this, E->getType(), RValue::get(result));
}
}
@@ -827,7 +827,7 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
// sizeof (Type of Ivar), isAtomic, false);
CallArgList args;
- llvm::Value *dest = CGF.ReturnValue.getPointer();
+ llvm::Value *dest = CGF.ReturnValue.emitRawPointer(CGF);
args.add(RValue::get(dest), Context.VoidPtrTy);
args.add(RValue::get(src), Context.VoidPtrTy);
@@ -899,9 +899,13 @@ namespace {
const ObjCPropertyImplDecl *propImpl);
private:
+ LLVM_PREFERRED_TYPE(StrategyKind)
unsigned Kind : 8;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsAtomic : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsCopy : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasStrong : 1;
CharUnits IvarSize;
@@ -1143,8 +1147,8 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
callCStructCopyConstructor(Dst, Src);
} else {
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
- emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), ivar,
- AtomicHelperFn);
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue.emitRawPointer(*this),
+ ivar, AtomicHelperFn);
}
return;
}
@@ -1159,7 +1163,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
}
else {
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
- emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(),
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue.emitRawPointer(*this),
ivar, AtomicHelperFn);
}
return;
@@ -1185,7 +1189,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize);
// Perform an atomic load. This does not impose ordering constraints.
- Address ivarAddr = LV.getAddress(*this);
+ Address ivarAddr = LV.getAddress();
ivarAddr = ivarAddr.withElementType(bitcastType);
llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
load->setAtomic(llvm::AtomicOrdering::Unordered);
@@ -1283,14 +1287,14 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
case TEK_Scalar: {
llvm::Value *value;
if (propType->isReferenceType()) {
- value = LV.getAddress(*this).getPointer();
+ value = LV.getAddress().emitRawPointer(*this);
} else {
// We want to load and autoreleaseReturnValue ARC __weak ivars.
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
if (getLangOpts().ObjCAutoRefCount) {
value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
} else {
- value = EmitARCLoadWeak(LV.getAddress(*this));
+ value = EmitARCLoadWeak(LV.getAddress());
}
// Otherwise we want to do a simple load, suppressing the
@@ -1473,7 +1477,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
LValue ivarLValue =
EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
- Address ivarAddr = ivarLValue.getAddress(*this);
+ Address ivarAddr = ivarLValue.getAddress();
// Currently, all atomic accesses have to be through integer
// types, so there's no point in trying to pick a prettier type.
@@ -1651,7 +1655,7 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) override {
LValue lvalue
= CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
- CGF.emitDestroy(lvalue.getAddress(CGF), ivar->getType(), destroyer,
+ CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
@@ -1718,7 +1722,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
LoadObjCSelf(), Ivar, 0);
EmitAggExpr(IvarInit->getInit(),
- AggValueSlot::forLValue(LV, *this, AggValueSlot::IsDestructed,
+ AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap));
@@ -1785,11 +1789,10 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
static const unsigned NumItems = 16;
// Fetch the countByEnumeratingWithState:objects:count: selector.
- IdentifierInfo *II[] = {
- &CGM.getContext().Idents.get("countByEnumeratingWithState"),
- &CGM.getContext().Idents.get("objects"),
- &CGM.getContext().Idents.get("count")
- };
+ const IdentifierInfo *II[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")};
Selector FastEnumSel =
CGM.getContext().Selectors.getSelector(std::size(II), &II[0]);
@@ -1817,16 +1820,14 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
CallArgList Args;
// The first argument is a temporary of the enumeration-state type.
- Args.add(RValue::get(StatePtr.getPointer()),
- getContext().getPointerType(StateTy));
+ Args.add(RValue::get(StatePtr, *this), getContext().getPointerType(StateTy));
// The second argument is a temporary array with space for NumItems
// pointers. We'll actually be loading elements from the array
// pointer written into the control state; this buffer is so that
// collections that *aren't* backed by arrays can still queue up
// batches of elements.
- Args.add(RValue::get(ItemsPtr.getPointer()),
- getContext().getPointerType(ItemsTy));
+ Args.add(RValue::get(ItemsPtr, *this), getContext().getPointerType(ItemsTy));
// The third argument is the capacity of that temporary array.
llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType());
@@ -1951,7 +1952,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
Builder.CreateLoad(StateItemsPtr, "stateitems");
// Fetch the value at the current index from the buffer.
- llvm::Value *CurrentItemPtr = Builder.CreateGEP(
+ llvm::Value *CurrentItemPtr = Builder.CreateInBoundsGEP(
ObjCIdType, EnumStateItems, index, "currentitem.ptr");
llvm::Value *CurrentItem =
Builder.CreateAlignedLoad(ObjCIdType, CurrentItemPtr, getPointerAlign());
@@ -2027,7 +2028,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// First we check in the local buffer.
llvm::Value *indexPlusOne =
- Builder.CreateAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1));
+ Builder.CreateNUWAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1));
// If we haven't overrun the buffer yet, we can continue.
// Set the branch weights based on the simplifying assumption that this is
@@ -2194,7 +2195,7 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr,
if (!fn)
fn = getARCIntrinsic(IntID, CGF.CGM);
- return CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
+ return CGF.EmitNounwindRuntimeCall(fn, addr.emitRawPointer(CGF));
}
/// Perform an operation having the following signature:
@@ -2212,9 +2213,8 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr,
llvm::Type *origType = value->getType();
llvm::Value *args[] = {
- CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy),
- CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)
- };
+ CGF.Builder.CreateBitCast(addr.emitRawPointer(CGF), CGF.Int8PtrPtrTy),
+ CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)};
llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args);
if (ignored) return nullptr;
@@ -2233,9 +2233,8 @@ static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src,
fn = getARCIntrinsic(IntID, CGF.CGM);
llvm::Value *args[] = {
- CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy),
- CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy)
- };
+ CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), CGF.Int8PtrPtrTy),
+ CGF.Builder.CreateBitCast(src.emitRawPointer(CGF), CGF.Int8PtrPtrTy)};
CGF.EmitNounwindRuntimeCall(fn, args);
}
@@ -2486,9 +2485,8 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
fn = getARCIntrinsic(llvm::Intrinsic::objc_storeStrong, CGM);
llvm::Value *args[] = {
- Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy),
- Builder.CreateBitCast(value, Int8PtrTy)
- };
+ Builder.CreateBitCast(addr.emitRawPointer(*this), Int8PtrPtrTy),
+ Builder.CreateBitCast(value, Int8PtrTy)};
EmitNounwindRuntimeCall(fn, args);
if (ignored) return nullptr;
@@ -2510,7 +2508,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
!isBlock &&
(dst.getAlignment().isZero() ||
dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
- return EmitARCStoreStrongCall(dst.getAddress(*this), newValue, ignored);
+ return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
}
// Otherwise, split it out.
@@ -2639,7 +2637,7 @@ void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
if (!fn)
fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM);
- EmitNounwindRuntimeCall(fn, addr.getPointer());
+ EmitNounwindRuntimeCall(fn, addr.emitRawPointer(*this));
}
/// void \@objc_moveWeak(i8** %dest, i8** %src)
@@ -2721,7 +2719,7 @@ llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this);
// [NSAutoreleasePool alloc]
- IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
+ const IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
Selector AllocSel = getContext().Selectors.getSelector(0, &II);
CallArgList Args;
RValue AllocRV =
@@ -2768,7 +2766,7 @@ llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value,
/// Produce the code to do a primitive release.
/// [tmp drain];
void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
- IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
+ const IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
Selector DrainSel = getContext().Selectors.getSelector(0, &II);
CallArgList Args;
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
@@ -2900,7 +2898,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal();
} else {
assert(type.getObjCLifetime() == Qualifiers::OCL_Weak);
- result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress(CGF));
+ result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress());
}
return TryEmitResult(result, !shouldRetain);
}
@@ -2924,7 +2922,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
SourceLocation()).getScalarVal();
// Set the source pointer to NULL.
- CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress(CGF)), lv);
+ CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
return TryEmitResult(result, true);
}
@@ -3716,8 +3714,8 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
return HelperFn;
- IdentifierInfo *II
- = &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
+ const IdentifierInfo *II =
+ &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
QualType ReturnTy = C.VoidTy;
QualType DestTy = C.getPointerType(Ty);
@@ -3814,7 +3812,7 @@ llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
return HelperFn;
- IdentifierInfo *II =
+ const IdentifierInfo *II =
&CGM.getContext().Idents.get("__copy_helper_atomic_property_");
QualType ReturnTy = C.VoidTy;
@@ -3908,10 +3906,10 @@ llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
llvm::Value *
CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
// Get selectors for retain/autorelease.
- IdentifierInfo *CopyID = &getContext().Idents.get("copy");
+ const IdentifierInfo *CopyID = &getContext().Idents.get("copy");
Selector CopySelector =
getContext().Selectors.getNullarySelector(CopyID);
- IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
+ const IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
Selector AutoreleaseSelector =
getContext().Selectors.getNullarySelector(AutoreleaseID);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
index a36b0cdddaf0..948b10954ebb 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -199,8 +199,7 @@ protected:
llvm::Constant *MakeConstantString(StringRef Str, const char *Name = "") {
ConstantAddress Array =
CGM.GetAddrOfConstantCString(std::string(Str), Name);
- return llvm::ConstantExpr::getGetElementPtr(Array.getElementType(),
- Array.getPointer(), Zeros);
+ return Array.getPointer();
}
/// Emits a linkonce_odr string, whose name is the prefix followed by the
@@ -221,8 +220,7 @@ protected:
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
ConstStr = GV;
}
- return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(),
- ConstStr, Zeros);
+ return ConstStr;
}
/// Returns a property name and encoding string.
@@ -706,7 +704,8 @@ protected:
llvm::Value *cmd, MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {
- EnforceType(Builder, ObjCSuper.getPointer(), PtrToObjCSuperTy), cmd};
+ EnforceType(Builder, ObjCSuper.emitRawPointer(CGF), PtrToObjCSuperTy),
+ cmd};
return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
}
@@ -761,8 +760,8 @@ class CGObjCGNUstep : public CGObjCGNU {
llvm::FunctionCallee LookupFn = SlotLookupFn;
// Store the receiver on the stack so that we can reload it later
- Address ReceiverPtr =
- CGF.CreateTempAlloca(Receiver->getType(), CGF.getPointerAlign());
+ RawAddress ReceiverPtr =
+ CGF.CreateTempAlloca(Receiver->getType(), CGF.getPointerAlign());
Builder.CreateStore(Receiver, ReceiverPtr);
llvm::Value *self;
@@ -778,9 +777,9 @@ class CGObjCGNUstep : public CGObjCGNU {
LookupFn2->addParamAttr(0, llvm::Attribute::NoCapture);
llvm::Value *args[] = {
- EnforceType(Builder, ReceiverPtr.getPointer(), PtrToIdTy),
- EnforceType(Builder, cmd, SelectorTy),
- EnforceType(Builder, self, IdTy) };
+ EnforceType(Builder, ReceiverPtr.getPointer(), PtrToIdTy),
+ EnforceType(Builder, cmd, SelectorTy),
+ EnforceType(Builder, self, IdTy)};
llvm::CallBase *slot = CGF.EmitRuntimeCallOrInvoke(LookupFn, args);
slot->setOnlyReadsMemory();
slot->setMetadata(msgSendMDKind, node);
@@ -800,7 +799,7 @@ class CGObjCGNUstep : public CGObjCGNU {
llvm::Value *cmd,
MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {ObjCSuper.getPointer(), cmd};
+ llvm::Value *lookupArgs[] = {ObjCSuper.emitRawPointer(CGF), cmd};
llvm::CallInst *slot =
CGF.EmitNounwindRuntimeCall(SlotLookupSuperFn, lookupArgs);
@@ -1221,10 +1220,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Value *cmd, MessageSendInfo &MSI) override {
// Don't access the slot unless we're trying to cache the result.
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {CGObjCGNU::EnforceType(Builder,
- ObjCSuper.getPointer(),
- PtrToObjCSuperTy),
- cmd};
+ llvm::Value *lookupArgs[] = {
+ CGObjCGNU::EnforceType(Builder, ObjCSuper.emitRawPointer(CGF),
+ PtrToObjCSuperTy),
+ cmd};
return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
}
@@ -1476,8 +1475,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
TypesGlobal = GV;
}
- return llvm::ConstantExpr::getGetElementPtr(TypesGlobal->getValueType(),
- TypesGlobal, Zeros);
+ return TypesGlobal;
}
llvm::Constant *GetConstantSelector(Selector Sel,
const std::string &TypeEncoding) override {
@@ -2071,7 +2069,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
Builder.CreateCondBr(Builder.CreateICmpEQ(selfValue, Zero),
SelfIsNilBlock, ContBlock,
- MDHelper.createBranchWeights(1, 1 << 20));
+ MDHelper.createUnlikelyBranchWeights());
CGF.EmitBlock(SelfIsNilBlock);
@@ -2106,7 +2104,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
CGF.createBasicBlock("objc_direct_method.class_initialized");
Builder.CreateCondBr(Builder.CreateICmpEQ(isInitialized, Zeros[0]),
notInitializedBlock, initializedBlock,
- MDHelper.createBranchWeights(1, 1 << 20));
+ MDHelper.createUnlikelyBranchWeights());
CGF.EmitBlock(notInitializedBlock);
Builder.SetInsertPoint(notInitializedBlock);
CGF.EmitRuntimeCall(SentInitializeFn, selfValue);
@@ -2186,7 +2184,8 @@ protected:
llvm::Value *cmd, MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {
- EnforceType(Builder, ObjCSuper.getPointer(), PtrToObjCSuperTy), cmd,
+ EnforceType(Builder, ObjCSuper.emitRawPointer(CGF), PtrToObjCSuperTy),
+ cmd,
};
if (CGM.ReturnTypeUsesSRet(MSI.CallInfo))
@@ -2903,23 +2902,29 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
break;
case CodeGenOptions::Mixed:
case CodeGenOptions::NonLegacy:
+ StringRef name = "objc_msgSend";
if (CGM.ReturnTypeUsesFPRet(ResultType)) {
- imp =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
- "objc_msgSend_fpret")
- .getCallee();
+ name = "objc_msgSend_fpret";
} else if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
- // The actual types here don't matter - we're going to bitcast the
- // function anyway
- imp =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
- "objc_msgSend_stret")
- .getCallee();
- } else {
- imp = CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IdTy, IdTy, true), "objc_msgSend")
- .getCallee();
+ name = "objc_msgSend_stret";
+
+ // The address of the memory block is be passed in x8 for POD type,
+ // or in x0 for non-POD type (marked as inreg).
+ bool shouldCheckForInReg =
+ CGM.getContext()
+ .getTargetInfo()
+ .getTriple()
+ .isWindowsMSVCEnvironment() &&
+ CGM.getContext().getTargetInfo().getTriple().isAArch64();
+ if (shouldCheckForInReg && CGM.ReturnTypeHasInReg(MSI.CallInfo)) {
+ name = "objc_msgSend_stret2";
+ }
}
+ // The actual types here don't matter - we're going to bitcast the
+ // function anyway
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ name)
+ .getCallee();
}
// Reset the receiver in case the lookup modified it
@@ -4201,15 +4206,15 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
Address AddrWeakObj) {
CGBuilderTy &B = CGF.Builder;
- return B.CreateCall(WeakReadFn,
- EnforceType(B, AddrWeakObj.getPointer(), PtrToIdTy));
+ return B.CreateCall(
+ WeakReadFn, EnforceType(B, AddrWeakObj.emitRawPointer(CGF), PtrToIdTy));
}
void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
+ llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), PtrToIdTy);
B.CreateCall(WeakAssignFn, {src, dstVal});
}
@@ -4218,7 +4223,7 @@ void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
bool threadlocal) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
+ llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), PtrToIdTy);
// FIXME. Add threadloca assign API
assert(!threadlocal && "EmitObjCGlobalAssign - Threal Local API NYI");
B.CreateCall(GlobalAssignFn, {src, dstVal});
@@ -4229,7 +4234,7 @@ void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
llvm::Value *ivarOffset) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- llvm::Value *dstVal = EnforceType(B, dst.getPointer(), IdTy);
+ llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), IdTy);
B.CreateCall(IvarAssignFn, {src, dstVal, ivarOffset});
}
@@ -4237,7 +4242,7 @@ void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
+ llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), PtrToIdTy);
B.CreateCall(StrongCastAssignFn, {src, dstVal});
}
@@ -4246,8 +4251,8 @@ void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
Address SrcPtr,
llvm::Value *Size) {
CGBuilderTy &B = CGF.Builder;
- llvm::Value *DestPtrVal = EnforceType(B, DestPtr.getPointer(), PtrTy);
- llvm::Value *SrcPtrVal = EnforceType(B, SrcPtr.getPointer(), PtrTy);
+ llvm::Value *DestPtrVal = EnforceType(B, DestPtr.emitRawPointer(CGF), PtrTy);
+ llvm::Value *SrcPtrVal = EnforceType(B, SrcPtr.emitRawPointer(CGF), PtrTy);
B.CreateCall(MemMoveFn, {DestPtrVal, SrcPtrVal, Size});
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
index 517f7cddebc1..30f3911a8b03 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
@@ -1310,7 +1310,7 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel);
- Address EmitSelectorAddr(Selector Sel);
+ ConstantAddress EmitSelectorAddr(Selector Sel);
public:
CGObjCMac(CodeGen::CodeGenModule &cgm);
@@ -1538,7 +1538,7 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel);
- Address EmitSelectorAddr(Selector Sel);
+ ConstantAddress EmitSelectorAddr(Selector Sel);
/// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
/// interface. The return value has type EHTypePtrTy.
@@ -1555,12 +1555,12 @@ private:
// Shamelessly stolen from Analysis/CFRefCount.cpp
Selector GetNullarySelector(const char* name) const {
- IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ const IdentifierInfo *II = &CGM.getContext().Idents.get(name);
return CGM.getContext().Selectors.getSelector(0, &II);
}
Selector GetUnarySelector(const char* name) const {
- IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ const IdentifierInfo *II = &CGM.getContext().Idents.get(name);
return CGM.getContext().Selectors.getSelector(1, &II);
}
@@ -1593,12 +1593,20 @@ private:
}
bool isClassLayoutKnownStatically(const ObjCInterfaceDecl *ID) {
- // NSObject is a fixed size. If we can see the @implementation of a class
- // which inherits from NSObject then we know that all it's offsets also must
- // be fixed. FIXME: Can we do this if see a chain of super classes with
- // implementations leading to NSObject?
- return ID->getImplementation() && ID->getSuperClass() &&
- ID->getSuperClass()->getName() == "NSObject";
+ // Test a class by checking its superclasses up to
+ // its base class if it has one.
+ for (; ID; ID = ID->getSuperClass()) {
+ // The layout of base class NSObject
+ // is guaranteed to be statically known
+ if (ID->getIdentifier()->getName() == "NSObject")
+ return true;
+
+ // If we cannot see the @implementation of a class,
+ // we cannot statically know the class layout.
+ if (!ID->getImplementation())
+ return false;
+ }
+ return false;
}
public:
@@ -2056,9 +2064,8 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
const ObjCMethodDecl *Method) {
// Create and init a super structure; this is a (receiver, class)
// pair we will pass to objc_msgSendSuper.
- Address ObjCSuper =
- CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(),
- "objc_super");
+ RawAddress ObjCSuper = CGF.CreateTempAlloca(
+ ObjCTypes.SuperTy, CGF.getPointerAlign(), "objc_super");
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
CGF.Builder.CreateStore(ReceiverAsObject,
@@ -2493,12 +2500,12 @@ void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
auto *CArray = cast<ConstantArrayType>(Array);
- uint64_t ElCount = CArray->getSize().getZExtValue();
+ uint64_t ElCount = CArray->getZExtSize();
assert(CArray && "only array with known element size is supported");
FQT = CArray->getElementType();
while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
auto *CArray = cast<ConstantArrayType>(Array);
- ElCount *= CArray->getSize().getZExtValue();
+ ElCount *= CArray->getZExtSize();
FQT = CArray->getElementType();
}
if (FQT->isRecordType() && ElCount) {
@@ -4065,7 +4072,7 @@ void CGObjCCommonMac::GenerateDirectMethodPrologue(
llvm::MDBuilder MDHelper(CGM.getLLVMContext());
Builder.CreateCondBr(Builder.CreateICmpEQ(selfValue, Zero), SelfIsNilBlock,
- ContBlock, MDHelper.createBranchWeights(1, 1 << 20));
+ ContBlock, MDHelper.createUnlikelyBranchWeights());
CGF.EmitBlock(SelfIsNilBlock);
@@ -4251,7 +4258,7 @@ namespace {
CGF.EmitBlock(FinallyCallExit);
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryExitFn(),
- ExceptionData.getPointer());
+ ExceptionData.emitRawPointer(CGF));
CGF.EmitBlock(FinallyNoCallExit);
@@ -4417,7 +4424,9 @@ void FragileHazards::emitHazardsInNewBlocks() {
}
static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, Address V) {
- if (V.isValid()) S.insert(V.getPointer());
+ if (V.isValid())
+ if (llvm::Value *Ptr = V.getBasePointer())
+ S.insert(Ptr);
}
void FragileHazards::collectLocals() {
@@ -4620,13 +4629,13 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// - Call objc_exception_try_enter to push ExceptionData on top of
// the EH stack.
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(),
- ExceptionData.getPointer());
+ ExceptionData.emitRawPointer(CGF));
// - Call setjmp on the exception data buffer.
llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
llvm::Value *SetJmpBuffer = CGF.Builder.CreateGEP(
- ObjCTypes.ExceptionDataTy, ExceptionData.getPointer(), GEPIndexes,
+ ObjCTypes.ExceptionDataTy, ExceptionData.emitRawPointer(CGF), GEPIndexes,
"setjmp_buffer");
llvm::CallInst *SetJmpResult = CGF.EmitNounwindRuntimeCall(
ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
@@ -4665,9 +4674,9 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
} else {
// Retrieve the exception object. We may emit multiple blocks but
// nothing can cross this so the value is already in SSA form.
- llvm::CallInst *Caught =
- CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData.getPointer(), "caught");
+ llvm::CallInst *Caught = CGF.EmitNounwindRuntimeCall(
+ ObjCTypes.getExceptionExtractFn(), ExceptionData.emitRawPointer(CGF),
+ "caught");
// Push the exception to rethrow onto the EH value stack for the
// benefit of any @throws in the handlers.
@@ -4690,7 +4699,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Enter a new exception try block (in case a @catch block
// throws an exception).
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(),
- ExceptionData.getPointer());
+ ExceptionData.emitRawPointer(CGF));
llvm::CallInst *SetJmpResult =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getSetJmpFn(),
@@ -4821,9 +4830,9 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Extract the new exception and save it to the
// propagating-exception slot.
assert(PropagatingExnVar.isValid());
- llvm::CallInst *NewCaught =
- CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData.getPointer(), "caught");
+ llvm::CallInst *NewCaught = CGF.EmitNounwindRuntimeCall(
+ ObjCTypes.getExceptionExtractFn(), ExceptionData.emitRawPointer(CGF),
+ "caught");
CGF.Builder.CreateStore(NewCaught, PropagatingExnVar);
// Don't pop the catch handler; the throw already did.
@@ -4853,9 +4862,8 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Otherwise, just look in the buffer for the exception to throw.
} else {
- llvm::CallInst *Caught =
- CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData.getPointer());
+ llvm::CallInst *Caught = CGF.EmitNounwindRuntimeCall(
+ ObjCTypes.getExceptionExtractFn(), ExceptionData.emitRawPointer(CGF));
PropagatingExn = Caught;
}
@@ -4898,7 +4906,7 @@ llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
Address AddrWeakObj) {
llvm::Type* DestTy = AddrWeakObj.getElementType();
llvm::Value *AddrWeakObjVal = CGF.Builder.CreateBitCast(
- AddrWeakObj.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ AddrWeakObj.emitRawPointer(CGF), ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
AddrWeakObjVal, "weakread");
@@ -4920,8 +4928,8 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = { src, dstVal };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
@@ -4942,8 +4950,8 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal};
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
@@ -4969,8 +4977,8 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal, ivarOffset};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
}
@@ -4989,8 +4997,8 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
args, "strongassign");
@@ -4999,7 +5007,8 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
Address DestPtr, Address SrcPtr,
llvm::Value *size) {
- llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), size };
+ llvm::Value *args[] = {DestPtr.emitRawPointer(CGF),
+ SrcPtr.emitRawPointer(CGF), size};
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -5038,12 +5047,10 @@ std::string CGObjCCommonMac::GetSectionName(StringRef Section,
return ("__DATA," + Section + "," + MachOAttributes).str();
}
case llvm::Triple::ELF:
- assert(Section.substr(0, 2) == "__" &&
- "expected the name to begin with __");
+ assert(Section.starts_with("__") && "expected the name to begin with __");
return Section.substr(2).str();
case llvm::Triple::COFF:
- assert(Section.substr(0, 2) == "__" &&
- "expected the name to begin with __");
+ assert(Section.starts_with("__") && "expected the name to begin with __");
return ("." + Section.substr(2) + "$B").str();
case llvm::Triple::Wasm:
case llvm::Triple::GOFF:
@@ -5237,7 +5244,7 @@ llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel) {
return CGF.Builder.CreateLoad(EmitSelectorAddr(Sel));
}
-Address CGObjCMac::EmitSelectorAddr(Selector Sel) {
+ConstantAddress CGObjCMac::EmitSelectorAddr(Selector Sel) {
CharUnits Align = CGM.getPointerAlign();
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
@@ -5248,7 +5255,7 @@ Address CGObjCMac::EmitSelectorAddr(Selector Sel) {
Entry->setExternallyInitialized(true);
}
- return Address(Entry, ObjCTypes.SelectorPtrTy, Align);
+ return ConstantAddress(Entry, ObjCTypes.SelectorPtrTy, Align);
}
llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
@@ -5320,7 +5327,7 @@ void IvarLayoutBuilder::visitField(const FieldDecl *field,
}
// Unlike incomplete arrays, constant arrays can be nested.
while (auto arrayType = CGM.getContext().getAsConstantArrayType(fieldType)) {
- numElts *= arrayType->getSize().getZExtValue();
+ numElts *= arrayType->getZExtSize();
fieldType = arrayType->getElementType();
}
@@ -6261,11 +6268,10 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
VTableDispatchMethods.insert(GetUnarySelector("addObject"));
// "countByEnumeratingWithState:objects:count"
- IdentifierInfo *KeyIdents[] = {
- &CGM.getContext().Idents.get("countByEnumeratingWithState"),
- &CGM.getContext().Idents.get("objects"),
- &CGM.getContext().Idents.get("count")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")};
VTableDispatchMethods.insert(
CGM.getContext().Selectors.getSelector(3, KeyIdents));
}
@@ -7317,7 +7323,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
ObjCTypes.MessageRefTy, CGF.getPointerAlign());
// Update the message ref argument.
- args[1].setRValue(RValue::get(mref.getPointer()));
+ args[1].setRValue(RValue::get(mref, CGF));
// Load the function to call from the message ref table.
Address calleeAddr = CGF.Builder.CreateStructGEP(mref, 0);
@@ -7546,9 +7552,8 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// ...
// Create and init a super structure; this is a (receiver, class)
// pair we will pass to objc_msgSendSuper.
- Address ObjCSuper =
- CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(),
- "objc_super");
+ RawAddress ObjCSuper = CGF.CreateTempAlloca(
+ ObjCTypes.SuperTy, CGF.getPointerAlign(), "objc_super");
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
@@ -7588,7 +7593,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF,
return LI;
}
-Address CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) {
+ConstantAddress CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
CharUnits Align = CGM.getPointerAlign();
if (!Entry) {
@@ -7604,7 +7609,7 @@ Address CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) {
CGM.addCompilerUsedGlobal(Entry);
}
- return Address(Entry, ObjCTypes.SelectorPtrTy, Align);
+ return ConstantAddress(Entry, ObjCTypes.SelectorPtrTy, Align);
}
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
@@ -7623,8 +7628,8 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal, ivarOffset};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
}
@@ -7644,8 +7649,8 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
args, "weakassign");
@@ -7654,7 +7659,8 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr,
llvm::Value *Size) {
- llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), Size };
+ llvm::Value *args[] = {DestPtr.emitRawPointer(CGF),
+ SrcPtr.emitRawPointer(CGF), Size};
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -7666,7 +7672,7 @@ llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
Address AddrWeakObj) {
llvm::Type *DestTy = AddrWeakObj.getElementType();
llvm::Value *AddrWeakObjVal = CGF.Builder.CreateBitCast(
- AddrWeakObj.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ AddrWeakObj.emitRawPointer(CGF), ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
AddrWeakObjVal, "weakread");
@@ -7688,8 +7694,8 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
@@ -7710,8 +7716,8 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal};
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
index 424564f97599..01d0f35da196 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -67,7 +67,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
V = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, V, Offset, "add.ptr");
if (!Ivar->isBitField()) {
- LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
+ LValue LV = CGF.MakeNaturalAlignRawAddrLValue(V, IvarTy);
return LV;
}
@@ -233,7 +233,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
llvm::Instruction *CPICandidate = Handler.Block->getFirstNonPHI();
if (auto *CPI = dyn_cast_or_null<llvm::CatchPadInst>(CPICandidate)) {
CGF.CurrentFuncletPad = CPI;
- CPI->setOperand(2, CGF.getExceptionSlot().getPointer());
+ CPI->setOperand(2, CGF.getExceptionSlot().emitRawPointer(CGF));
CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
}
}
@@ -405,7 +405,7 @@ bool CGObjCRuntime::canMessageReceiverBeNull(CodeGenFunction &CGF,
auto self = curMethod->getSelfDecl();
if (self->getType().isConstQualified()) {
if (auto LI = dyn_cast<llvm::LoadInst>(receiver->stripPointerCasts())) {
- llvm::Value *selfAddr = CGF.GetAddrOfLocalVar(self).getPointer();
+ llvm::Value *selfAddr = CGF.GetAddrOfLocalVar(self).emitRawPointer(CGF);
if (selfAddr == LI->getPointerOperand()) {
return false;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 4855e7410a01..a6a87ec88ee8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "CGOpenMPRuntime.h"
+#include "ABIInfoImpl.h"
#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGRecordLayout.h"
@@ -30,6 +31,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/Constants.h"
@@ -373,7 +375,7 @@ public:
/*RefersToEnclosingVariableOrCapture=*/false,
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
- PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
+ PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
}
(void)PrivScope.Privatize();
}
@@ -622,7 +624,7 @@ static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
auto *GV = new llvm::GlobalVariable(
CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
llvm::GlobalValue::PrivateLinkage, Init, Name);
- LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
+ LValue LV = CGF.MakeNaturalAlignRawAddrLValue(GV, Ty);
RValue InitRVal;
switch (CGF.getEvaluationKind(Ty)) {
case TEK_Scalar:
@@ -668,8 +670,8 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
llvm::Value *SrcBegin = nullptr;
if (DRD)
- SrcBegin = SrcAddr.getPointer();
- llvm::Value *DestBegin = DestAddr.getPointer();
+ SrcBegin = SrcAddr.emitRawPointer(CGF);
+ llvm::Value *DestBegin = DestAddr.emitRawPointer(CGF);
// Cast from pointer to array type to pointer to single element.
llvm::Value *DestEnd =
CGF.Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
@@ -742,8 +744,8 @@ LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
const Expr *E) {
- if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
- return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
+ if (const auto *OASE = dyn_cast<ArraySectionExpr>(E))
+ return CGF.EmitArraySectionExpr(OASE, /*IsLowerBound=*/false);
return LValue();
}
@@ -800,7 +802,7 @@ void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) {
void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
QualType PrivateType = getPrivateType(N);
- bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
+ bool AsArraySection = isa<ArraySectionExpr>(ClausesData[N].Ref);
if (!PrivateType->isVariablyModifiedType()) {
Sizes.emplace_back(
CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()),
@@ -809,7 +811,7 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
}
llvm::Value *Size;
llvm::Value *SizeInChars;
- auto *ElemType = OrigAddresses[N].first.getAddress(CGF).getElementType();
+ auto *ElemType = OrigAddresses[N].first.getAddress().getElementType();
auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
if (AsArraySection) {
Size = CGF.Builder.CreatePtrDiff(ElemType,
@@ -897,22 +899,22 @@ static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
!CGF.getContext().hasSameType(BaseTy, ElTy)) {
if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
- BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(CGF), PtrTy);
+ BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
} else {
- LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(CGF), BaseTy);
+ LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
}
BaseTy = BaseTy->getPointeeType();
}
return CGF.MakeAddrLValue(
- BaseLV.getAddress(CGF).withElementType(CGF.ConvertTypeForMem(ElTy)),
+ BaseLV.getAddress().withElementType(CGF.ConvertTypeForMem(ElTy)),
BaseLV.getType(), BaseLV.getBaseInfo(),
CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
}
static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
Address OriginalBaseAddress, llvm::Value *Addr) {
- Address Tmp = Address::invalid();
+ RawAddress Tmp = RawAddress::invalid();
Address TopTmp = Address::invalid();
Address MostTopTmp = Address::invalid();
BaseTy = BaseTy.getNonReferenceType();
@@ -941,9 +943,9 @@ static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
const VarDecl *OrigVD = nullptr;
- if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
+ if (const auto *OASE = dyn_cast<ArraySectionExpr>(Ref)) {
const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
- while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ while (const auto *TempOASE = dyn_cast<ArraySectionExpr>(Base))
Base = TempOASE->getBase()->IgnoreParenImpCasts();
while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = TempASE->getBase()->IgnoreParenImpCasts();
@@ -968,18 +970,18 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
LValue BaseLValue =
loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
OriginalBaseLValue);
- Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
+ Address SharedAddr = SharedAddresses[N].first.getAddress();
llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
SharedAddr.getElementType(), BaseLValue.getPointer(CGF),
- SharedAddr.getPointer());
+ SharedAddr.emitRawPointer(CGF));
llvm::Value *PrivatePointer =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- PrivateAddr.getPointer(), SharedAddr.getType());
+ PrivateAddr.emitRawPointer(CGF), SharedAddr.getType());
llvm::Value *Ptr = CGF.Builder.CreateGEP(
SharedAddr.getElementType(), PrivatePointer, Adjustment);
return castToBase(CGF, OrigVD->getType(),
SharedAddresses[N].first.getType(),
- OriginalBaseLValue.getAddress(CGF), Ptr);
+ OriginalBaseLValue.getAddress(), Ptr);
}
BaseDecls.emplace_back(
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
@@ -1108,11 +1110,11 @@ emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
Scope.addPrivate(
In, CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
- .getAddress(CGF));
+ .getAddress());
Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
Scope.addPrivate(
Out, CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
- .getAddress(CGF));
+ .getAddress());
(void)Scope.Privatize();
if (!IsCombiner && Out->hasInit() &&
!CGF.isTrivialInitializer(Out->getInit())) {
@@ -1557,7 +1559,7 @@ static llvm::TargetRegionEntryInfo getEntryInfoFromPresumedLoc(
return OMPBuilder.getTargetEntryUniqueInfo(FileInfoCallBack, ParentName);
}
-Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
+ConstantAddress CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
auto AddrOfGlobal = [&VD, this]() { return CGM.GetAddrOfGlobal(VD); };
auto LinkageForVariable = [&VD, this]() {
@@ -1579,8 +1581,8 @@ Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
LinkageForVariable);
if (!addr)
- return Address::invalid();
- return Address(addr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD));
+ return ConstantAddress::invalid();
+ return ConstantAddress(addr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD));
}
llvm::Constant *
@@ -1604,7 +1606,7 @@ Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
llvm::Type *VarTy = VDAddr.getElementType();
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy),
+ CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.Int8PtrTy),
CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
getOrCreateThreadPrivateCache(VD)};
return Address(
@@ -1627,7 +1629,8 @@ void CGOpenMPRuntime::emitThreadPrivateVarInit(
// Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
// to register constructor/destructor for variable.
llvm::Value *Args[] = {
- OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
+ OMPLoc,
+ CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.VoidPtrTy),
Ctor, CopyCtor, Dtor};
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
@@ -1900,13 +1903,13 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
// OutlinedFn(&GTid, &zero_bound, CapturedStruct);
Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
- Address ZeroAddrBound =
+ RawAddress ZeroAddrBound =
CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
/*Name=*/".bound.zero.addr");
CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddrBound);
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
// ThreadId for serialized parallels is 0.
- OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
+ OutlinedFnArgs.push_back(ThreadIDAddr.emitRawPointer(CGF));
OutlinedFnArgs.push_back(ZeroAddrBound.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
@@ -1945,7 +1948,7 @@ Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
if (OMPRegionInfo->getThreadIDVariable())
- return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(CGF);
+ return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
llvm::Value *ThreadID = getThreadID(CGF, Loc);
QualType Int32Ty =
@@ -2272,7 +2275,7 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
emitUpdateLocation(CGF, Loc), // ident_t *<loc>
getThreadID(CGF, Loc), // i32 <gtid>
BufSize, // size_t <buf_size>
- CL.getPointer(), // void *<copyprivate list>
+ CL.emitRawPointer(CGF), // void *<copyprivate list>
CpyFn, // void (*) (void *, void *) <copy_func>
DidItVal // i32 did_it
};
@@ -2552,6 +2555,15 @@ void CGOpenMPRuntime::emitForDispatchInit(
Args);
}
+void CGOpenMPRuntime::emitForDispatchDeinit(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Call __kmpc_dispatch_deinit(ident_t *loc, kmp_int32 tid);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
+ CGF.EmitRuntimeCall(OMPBuilder.createDispatchDeinitFunction(), Args);
+}
+
static void emitForStaticInitCall(
CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
@@ -2591,10 +2603,10 @@ static void emitForStaticInitCall(
ThreadId,
CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1,
M2)), // Schedule type
- Values.IL.getPointer(), // &isLastIter
- Values.LB.getPointer(), // &LB
- Values.UB.getPointer(), // &UB
- Values.ST.getPointer(), // &Stride
+ Values.IL.emitRawPointer(CGF), // &isLastIter
+ Values.LB.emitRawPointer(CGF), // &LB
+ Values.UB.emitRawPointer(CGF), // &UB
+ Values.ST.emitRawPointer(CGF), // &Stride
CGF.Builder.getIntN(Values.IVSize, 1), // Incr
Chunk // Chunk
};
@@ -2647,16 +2659,20 @@ void CGOpenMPRuntime::emitDistributeStaticInit(
void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind DKind) {
+ assert((DKind == OMPD_distribute || DKind == OMPD_for ||
+ DKind == OMPD_sections) &&
+ "Expected distribute, for, or sections directive kind");
if (!CGF.HaveInsertPoint())
return;
// Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc,
- isOpenMPDistributeDirective(DKind)
+ isOpenMPDistributeDirective(DKind) ||
+ (DKind == OMPD_target_teams_loop)
? OMP_IDENT_WORK_DISTRIBUTE
- : isOpenMPLoopDirective(DKind)
- ? OMP_IDENT_WORK_LOOP
- : OMP_IDENT_WORK_SECTIONS),
+ : isOpenMPLoopDirective(DKind)
+ ? OMP_IDENT_WORK_LOOP
+ : OMP_IDENT_WORK_SECTIONS),
getThreadID(CGF, Loc)};
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
if (isOpenMPDistributeDirective(DKind) &&
@@ -2694,12 +2710,11 @@ llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
// kmp_int[32|64] *p_stride);
llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc),
- getThreadID(CGF, Loc),
- IL.getPointer(), // &isLastIter
- LB.getPointer(), // &Lower
- UB.getPointer(), // &Upper
- ST.getPointer() // &Stride
+ emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ IL.emitRawPointer(CGF), // &isLastIter
+ LB.emitRawPointer(CGF), // &Lower
+ UB.emitRawPointer(CGF), // &Upper
+ ST.emitRawPointer(CGF) // &Stride
};
llvm::Value *Call = CGF.EmitRuntimeCall(
OMPBuilder.createDispatchNextFunction(IVSize, IVSigned), Args);
@@ -3042,9 +3057,9 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
llvm::Value *CommonArgs[] = {
GtidParam, PartidParam, PrivatesParam, TaskPrivatesMap,
CGF.Builder
- .CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(CGF),
+ .CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(),
CGF.VoidPtrTy, CGF.Int8Ty)
- .getPointer()};
+ .emitRawPointer(CGF)};
SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
std::end(CommonArgs));
if (isOpenMPTaskLoopDirective(Kind)) {
@@ -3121,7 +3136,7 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
if (QualType::DestructionKind DtorKind =
Field->getType().isDestructedType()) {
LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
- CGF.pushDestroy(DtorKind, FieldLValue.getAddress(CGF), Field->getType());
+ CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
}
}
CGF.FinishFunction();
@@ -3229,7 +3244,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
LValue RefLVal =
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
- RefLVal.getAddress(CGF), RefLVal.getType()->castAs<PointerType>());
+ RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal);
++Counter;
}
@@ -3301,7 +3316,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
} else if (ForDup) {
SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
SharedRefLValue = CGF.MakeAddrLValue(
- SharedRefLValue.getAddress(CGF).withAlignment(
+ SharedRefLValue.getAddress().withAlignment(
C.getDeclAlign(OriginalVD)),
SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
SharedRefLValue.getTBAAInfo());
@@ -3325,8 +3340,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
// Initialize firstprivate array using element-by-element
// initialization.
CGF.EmitOMPAggregateAssign(
- PrivateLValue.getAddress(CGF), SharedRefLValue.getAddress(CGF),
- Type,
+ PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
[&CGF, Elem, Init, &CapturesInfo](Address DestElement,
Address SrcElement) {
// Clean up any temporaries needed by the initialization.
@@ -3343,7 +3357,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
}
} else {
CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(Elem, SharedRefLValue.getAddress(CGF));
+ InitScope.addPrivate(Elem, SharedRefLValue.getAddress());
(void)InitScope.Privatize();
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
CGF.EmitExprAsInit(Init, VD, PrivateLValue,
@@ -3504,7 +3518,7 @@ public:
HelperData.CounterVD->getType());
// Counter = 0;
CGF.EmitStoreOfScalar(
- llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0),
+ llvm::ConstantInt::get(CLVal.getAddress().getElementType(), 0),
CLVal);
CodeGenFunction::JumpDest &ContDest =
ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
@@ -3566,12 +3580,12 @@ getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
SizeVal = CGF.Builder.CreateNUWMul(SizeVal, Sz);
}
} else if (const auto *ASE =
- dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
- LValue UpAddrLVal =
- CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
- Address UpAddrAddress = UpAddrLVal.getAddress(CGF);
+ dyn_cast<ArraySectionExpr>(E->IgnoreParenImpCasts())) {
+ LValue UpAddrLVal = CGF.EmitArraySectionExpr(ASE, /*IsLowerBound=*/false);
+ Address UpAddrAddress = UpAddrLVal.getAddress();
llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
- UpAddrAddress.getElementType(), UpAddrAddress.getPointer(), /*Idx0=*/1);
+ UpAddrAddress.getElementType(), UpAddrAddress.emitRawPointer(CGF),
+ /*Idx0=*/1);
llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
@@ -3885,8 +3899,9 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *Size;
std::tie(Addr, Size) = getPointerAndSize(CGF, E);
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
- LValue Base = CGF.MakeAddrLValue(
- CGF.Builder.CreateGEP(AffinitiesArray, Idx), KmpTaskAffinityInfoTy);
+ LValue Base =
+ CGF.MakeAddrLValue(CGF.Builder.CreateGEP(CGF, AffinitiesArray, Idx),
+ KmpTaskAffinityInfoTy);
// affs[i].base_addr = &<Affinities[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
@@ -3907,7 +3922,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *LocRef = emitUpdateLocation(CGF, Loc);
llvm::Value *GTid = getThreadID(CGF, Loc);
llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- AffinitiesArray.getPointer(), CGM.VoidPtrTy);
+ AffinitiesArray.emitRawPointer(CGF), CGM.VoidPtrTy);
// FIXME: Emit the function and ignore its result for now unless the
// runtime function is properly implemented.
(void)CGF.EmitRuntimeCall(
@@ -3918,8 +3933,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *NewTaskNewTaskTTy =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
NewTask, KmpTaskTWithPrivatesPtrTy);
- LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
- KmpTaskTWithPrivatesQTy);
+ LValue Base = CGF.MakeNaturalAlignRawAddrLValue(NewTaskNewTaskTTy,
+ KmpTaskTWithPrivatesQTy);
LValue TDBase =
CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
// Fill the data in the resulting kmp_task_t record.
@@ -4040,11 +4055,11 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF).withElementType(
+ DepobjLVal.getAddress().withElementType(
CGF.ConvertTypeForMem(KmpDependInfoPtrTy)),
KmpDependInfoPtrTy->castAs<PointerType>());
Address DepObjAddr = CGF.Builder.CreateGEP(
- Base.getAddress(CGF),
+ CGF, Base.getAddress(),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
@@ -4094,7 +4109,7 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
LValue &PosLVal = *Pos.get<LValue *>();
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Base = CGF.MakeAddrLValue(
- CGF.Builder.CreateGEP(DependenciesArray, Idx), KmpDependInfoTy);
+ CGF.Builder.CreateGEP(CGF, DependenciesArray, Idx), KmpDependInfoTy);
}
// deps[i].base_addr = &<Dependencies[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
@@ -4151,7 +4166,7 @@ SmallVector<llvm::Value *, 4> CGOpenMPRuntime::emitDepobjElementsSizes(
CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
C.getUIntPtrType());
CGF.Builder.CreateStore(llvm::ConstantInt::get(CGF.IntPtrTy, 0),
- NumLVal.getAddress(CGF));
+ NumLVal.getAddress());
llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
CGF.EmitStoreOfScalar(Add, NumLVal);
@@ -4192,8 +4207,8 @@ void CGOpenMPRuntime::emitDepobjElements(CodeGenFunction &CGF,
ElSize,
CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
- Address DepAddr = CGF.Builder.CreateGEP(DependenciesArray, Pos);
- CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
+ Address DepAddr = CGF.Builder.CreateGEP(CGF, DependenciesArray, Pos);
+ CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(), Size);
// Increase pos.
// pos += size;
@@ -4244,14 +4259,18 @@ std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
// Include number of iterations, if any.
if (const auto *IE = cast_or_null<OMPIteratorExpr>(D.IteratorExpr)) {
+ llvm::Value *ClauseIteratorSpace =
+ llvm::ConstantInt::get(CGF.IntPtrTy, 1);
for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, /*isSigned=*/false);
- llvm::Value *NumClauseDeps = CGF.Builder.CreateNUWMul(
- Sz, llvm::ConstantInt::get(CGF.IntPtrTy, D.DepExprs.size()));
- NumOfRegularWithIterators =
- CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumClauseDeps);
+ ClauseIteratorSpace = CGF.Builder.CreateNUWMul(Sz, ClauseIteratorSpace);
}
+ llvm::Value *NumClauseDeps = CGF.Builder.CreateNUWMul(
+ ClauseIteratorSpace,
+ llvm::ConstantInt::get(CGF.IntPtrTy, D.DepExprs.size()));
+ NumOfRegularWithIterators =
+ CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumClauseDeps);
HasRegularWithIterators = true;
continue;
}
@@ -4420,14 +4439,14 @@ void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
ASTContext &C = CGM.getContext();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
- LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF), C.VoidPtrTy.castAs<PointerType>());
+ LValue Base = CGF.EmitLoadOfPointerLValue(DepobjLVal.getAddress(),
+ C.VoidPtrTy.castAs<PointerType>());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy),
+ Base.getAddress(), CGF.ConvertTypeForMem(KmpDependInfoPtrTy),
CGF.ConvertTypeForMem(KmpDependInfoTy));
llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
+ Addr.getElementType(), Addr.emitRawPointer(CGF),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr,
CGF.VoidPtrTy);
@@ -4455,10 +4474,10 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
LValue Base;
std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc);
- Address Begin = Base.getAddress(CGF);
+ Address Begin = Base.getAddress();
// Cast from pointer to array type to pointer to single element.
- llvm::Value *End = CGF.Builder.CreateGEP(
- Begin.getElementType(), Begin.getPointer(), NumDeps);
+ llvm::Value *End = CGF.Builder.CreateGEP(Begin.getElementType(),
+ Begin.emitRawPointer(CGF), NumDeps);
// The basic structure here is a while-do loop.
llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body");
llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done");
@@ -4466,7 +4485,7 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
CGF.EmitBlock(BodyBB);
llvm::PHINode *ElementPHI =
CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
- ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
+ ElementPHI->addIncoming(Begin.emitRawPointer(CGF), EntryBB);
Begin = Begin.withPointer(ElementPHI, KnownNonNull);
Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
@@ -4480,12 +4499,12 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
FlagsLVal);
// Shift the address forward by one element.
- Address ElementNext =
- CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext");
- ElementPHI->addIncoming(ElementNext.getPointer(),
- CGF.Builder.GetInsertBlock());
+ llvm::Value *ElementNext =
+ CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext")
+ .emitRawPointer(CGF);
+ ElementPHI->addIncoming(ElementNext, CGF.Builder.GetInsertBlock());
llvm::Value *IsEmpty =
- CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty");
+ CGF.Builder.CreateICmpEQ(ElementNext, End, "omp.isempty");
CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
// Done.
CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
@@ -4528,7 +4547,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
DepTaskArgs[1] = ThreadID;
DepTaskArgs[2] = NewTask;
DepTaskArgs[3] = NumOfElements;
- DepTaskArgs[4] = DependenciesArray.getPointer();
+ DepTaskArgs[4] = DependenciesArray.emitRawPointer(CGF);
DepTaskArgs[5] = CGF.Builder.getInt32(0);
DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
@@ -4560,7 +4579,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
DepWaitTaskArgs[0] = UpLoc;
DepWaitTaskArgs[1] = ThreadID;
DepWaitTaskArgs[2] = NumOfElements;
- DepWaitTaskArgs[3] = DependenciesArray.getPointer();
+ DepWaitTaskArgs[3] = DependenciesArray.emitRawPointer(CGF);
DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
DepWaitTaskArgs[6] =
@@ -4641,24 +4660,21 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
const auto *LBVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
- CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(CGF),
- LBLVal.getQuals(),
+ CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(),
/*IsInitializer=*/true);
LValue UBLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
const auto *UBVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
- CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(CGF),
- UBLVal.getQuals(),
+ CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(),
/*IsInitializer=*/true);
LValue StLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
const auto *StVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
- CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(CGF),
- StLVal.getQuals(),
+ CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(),
/*IsInitializer=*/true);
// Store reductions address.
LValue RedLVal = CGF.EmitLValueForField(
@@ -4667,7 +4683,7 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
if (Data.Reductions) {
CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
} else {
- CGF.EmitNullInitialization(RedLVal.getAddress(CGF),
+ CGF.EmitNullInitialization(RedLVal.getAddress(),
CGF.getContext().VoidPtrTy);
}
enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
@@ -4722,8 +4738,8 @@ static void EmitOMPAggregateReduction(
const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
- llvm::Value *RHSBegin = RHSAddr.getPointer();
- llvm::Value *LHSBegin = LHSAddr.getPointer();
+ llvm::Value *RHSBegin = RHSAddr.emitRawPointer(CGF);
+ llvm::Value *LHSBegin = LHSAddr.emitRawPointer(CGF);
// Cast from pointer to array type to pointer to single element.
llvm::Value *LHSEnd =
CGF.Builder.CreateGEP(LHSAddr.getElementType(), LHSBegin, NumElements);
@@ -4987,7 +5003,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
QualType ReductionArrayTy = C.getConstantArrayType(
C.VoidPtrTy, ArraySize, nullptr, ArraySizeModifier::Normal,
/*IndexTypeQuals=*/0);
- Address ReductionList =
+ RawAddress ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
const auto *IPriv = Privates.begin();
unsigned Idx = 0;
@@ -5459,7 +5475,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
C.getConstantArrayType(RDType, ArraySize, nullptr,
ArraySizeModifier::Normal, /*IndexTypeQuals=*/0);
// kmp_task_red_input_t .rd_input.[Size];
- Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
+ RawAddress TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs,
Data.ReductionCopies, Data.ReductionOps);
for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
@@ -5470,7 +5486,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
TaskRedInput.getElementType(), TaskRedInput.getPointer(), Idxs,
/*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
".rd_input.gep.");
- LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
+ LValue ElemLVal = CGF.MakeNaturalAlignRawAddrLValue(GEP, RDType);
// ElemLVal.reduce_shar = &Shareds[Cnt];
LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
RCG.emitSharedOrigLValue(CGF, Cnt);
@@ -5517,8 +5533,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*isSigned=*/true),
FlagsLVal);
} else
- CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF),
- FlagsLVal.getType());
+ CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType());
}
if (Data.IsReductionWithTaskMod) {
// Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
@@ -5626,7 +5641,7 @@ void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc,
DepWaitTaskArgs[0] = UpLoc;
DepWaitTaskArgs[1] = ThreadID;
DepWaitTaskArgs[2] = NumOfElements;
- DepWaitTaskArgs[3] = DependenciesArray.getPointer();
+ DepWaitTaskArgs[3] = DependenciesArray.emitRawPointer(CGF);
DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
DepWaitTaskArgs[6] =
@@ -5845,11 +5860,11 @@ void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
.getLimitedValue());
LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy, CGF.VoidPtrTy);
+ AllocatorTraitsLVal.getAddress(), CGF.VoidPtrPtrTy, CGF.VoidPtrTy);
AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
AllocatorTraitsLVal.getBaseInfo(),
AllocatorTraitsLVal.getTBAAInfo());
- llvm::Value *Traits = Addr.getPointer();
+ llvm::Value *Traits = Addr.emitRawPointer(CGF);
llvm::Value *AllocatorVal =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
@@ -6666,8 +6681,8 @@ private:
// Given that an array section is considered a built-in type, we need to
// do the calculation based on the length of the section instead of relying
// on CGF.getTypeSize(E->getType()).
- if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
- QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
+ if (const auto *OAE = dyn_cast<ArraySectionExpr>(E)) {
+ QualType BaseTy = ArraySectionExpr::getBaseOriginalType(
OAE->getBase()->IgnoreParenImpCasts())
.getCanonicalType();
@@ -6773,7 +6788,7 @@ private:
/// Return true if the provided expression is a final array section. A
/// final array section, is one whose length can't be proved to be one.
bool isFinalArraySectionExpression(const Expr *E) const {
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
+ const auto *OASE = dyn_cast<ArraySectionExpr>(E);
// It is not an array section and therefore not a unity-size one.
if (!OASE)
@@ -6789,11 +6804,11 @@ private:
// for this dimension. Also, we should always expect a length if the
// base type is pointer.
if (!Length) {
- QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
+ QualType BaseQTy = ArraySectionExpr::getBaseOriginalType(
OASE->getBase()->IgnoreParenImpCasts())
.getCanonicalType();
if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
- return ATy->getSize().getSExtValue() != 1;
+ return ATy->getSExtSize() != 1;
// If we don't have a constant dimension length, we have to consider
// the current section as having any size, so it is not necessarily
// unitary. If it happen to be unity size, that's user fault.
@@ -6825,7 +6840,8 @@ private:
const ValueDecl *Mapper = nullptr, bool ForDeviceAddr = false,
const ValueDecl *BaseDecl = nullptr, const Expr *MapExpr = nullptr,
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
- OverlappedElements = std::nullopt) const {
+ OverlappedElements = std::nullopt,
+ bool AreBothBasePtrAndPteeMapped = false) const {
// The following summarizes what has to be generated for each map and the
// types below. The generated information is expressed in this order:
// base pointer, section pointer, size, flags
@@ -7001,6 +7017,10 @@ private:
// &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(4) | PTR_AND_OBJ | TO
// (*) the struct this entry pertains to is the 4th element in the list
// of arguments, hence MEMBER_OF(4)
+ //
+ // map(p, p[:100])
+ // ===> map(p[:100])
+ // &p, &p[0], 100*sizeof(float), TARGET_PARAM | PTR_AND_OBJ | TO | FROM
// Track if the map information being generated is the first for a capture.
bool IsCaptureFirstInfo = IsFirstComponentList;
@@ -7021,9 +7041,11 @@ private:
Address BP = Address::invalid();
const Expr *AssocExpr = I->getAssociatedExpression();
const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr);
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
+ const auto *OASE = dyn_cast<ArraySectionExpr>(AssocExpr);
const auto *OAShE = dyn_cast<OMPArrayShapingExpr>(AssocExpr);
+ if (AreBothBasePtrAndPteeMapped && std::next(I) == CE)
+ return;
if (isa<MemberExpr>(AssocExpr)) {
// The base is the 'this' pointer. The content of the pointer is going
// to be the base of the field being mapped.
@@ -7031,7 +7053,7 @@ private:
} else if ((AE && isa<CXXThisExpr>(AE->getBase()->IgnoreParenImpCasts())) ||
(OASE &&
isa<CXXThisExpr>(OASE->getBase()->IgnoreParenImpCasts()))) {
- BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
+ BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress();
} else if (OAShE &&
isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
BP = Address(
@@ -7041,7 +7063,7 @@ private:
} else {
// The base is the reference to the variable.
// BP = &Var.
- BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
+ BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress();
if (const auto *VD =
dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
@@ -7066,8 +7088,9 @@ private:
// can be associated with the combined storage if shared memory mode is
// active or the base declaration is not global variable.
const auto *VD = dyn_cast<VarDecl>(I->getAssociatedDeclaration());
- if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
- !VD || VD->hasLocalStorage())
+ if (!AreBothBasePtrAndPteeMapped &&
+ (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
+ !VD || VD->hasLocalStorage()))
BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
else
FirstPointerInComplexData = true;
@@ -7173,14 +7196,14 @@ private:
// special treatment for array sections given that they are built-in
// types.
const auto *OASE =
- dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
+ dyn_cast<ArraySectionExpr>(I->getAssociatedExpression());
const auto *OAShE =
dyn_cast<OMPArrayShapingExpr>(I->getAssociatedExpression());
const auto *UO = dyn_cast<UnaryOperator>(I->getAssociatedExpression());
const auto *BO = dyn_cast<BinaryOperator>(I->getAssociatedExpression());
bool IsPointer =
OAShE ||
- (OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
+ (OASE && ArraySectionExpr::getBaseOriginalType(OASE)
.getCanonicalType()
->isAnyPointerType()) ||
I->getAssociatedExpression()->getType()->isAnyPointerType();
@@ -7201,7 +7224,7 @@ private:
assert((Next == CE ||
isa<MemberExpr>(Next->getAssociatedExpression()) ||
isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) ||
+ isa<ArraySectionExpr>(Next->getAssociatedExpression()) ||
isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) ||
isa<UnaryOperator>(Next->getAssociatedExpression()) ||
isa<BinaryOperator>(Next->getAssociatedExpression())) &&
@@ -7239,13 +7262,13 @@ private:
LValue BaseLVal = EmitMemberExprBase(CGF, ME);
LowestElem = CGF.EmitLValueForFieldInitialization(
BaseLVal, cast<FieldDecl>(MapDecl))
- .getAddress(CGF);
+ .getAddress();
LB = CGF.EmitLoadOfReferenceLValue(LowestElem, MapDecl->getType())
- .getAddress(CGF);
+ .getAddress();
} else {
LowestElem = LB =
CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
- .getAddress(CGF);
+ .getAddress();
}
// If this component is a pointer inside the base struct then we don't
@@ -7303,23 +7326,25 @@ private:
LValue BaseLVal = EmitMemberExprBase(CGF, ME);
ComponentLB =
CGF.EmitLValueForFieldInitialization(BaseLVal, FD)
- .getAddress(CGF);
+ .getAddress();
} else {
ComponentLB =
CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
- .getAddress(CGF);
+ .getAddress();
}
- Size = CGF.Builder.CreatePtrDiff(
- CGF.Int8Ty, ComponentLB.getPointer(), LB.getPointer());
+ llvm::Value *ComponentLBPtr = ComponentLB.emitRawPointer(CGF);
+ llvm::Value *LBPtr = LB.emitRawPointer(CGF);
+ Size = CGF.Builder.CreatePtrDiff(CGF.Int8Ty, ComponentLBPtr,
+ LBPtr);
break;
}
}
assert(Size && "Failed to determine structure size");
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
CombinedInfo.DevicePtrDecls.push_back(nullptr);
CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- CombinedInfo.Pointers.push_back(LB.getPointer());
+ CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
Size, CGF.Int64Ty, /*isSigned=*/true));
CombinedInfo.Types.push_back(Flags);
@@ -7329,13 +7354,14 @@ private:
LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
}
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
CombinedInfo.DevicePtrDecls.push_back(nullptr);
CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- CombinedInfo.Pointers.push_back(LB.getPointer());
+ CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
+ llvm::Value *LBPtr = LB.emitRawPointer(CGF);
Size = CGF.Builder.CreatePtrDiff(
- CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
- LB.getPointer());
+ CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).emitRawPointer(CGF),
+ LBPtr);
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
CombinedInfo.Types.push_back(Flags);
@@ -7353,20 +7379,21 @@ private:
(Next == CE && MapType != OMPC_MAP_unknown)) {
if (!IsMappingWholeStruct) {
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
CombinedInfo.DevicePtrDecls.push_back(nullptr);
CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- CombinedInfo.Pointers.push_back(LB.getPointer());
+ CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
Size, CGF.Int64Ty, /*isSigned=*/true));
CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
: 1);
} else {
StructBaseCombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- StructBaseCombinedInfo.BasePointers.push_back(BP.getPointer());
+ StructBaseCombinedInfo.BasePointers.push_back(
+ BP.emitRawPointer(CGF));
StructBaseCombinedInfo.DevicePtrDecls.push_back(nullptr);
StructBaseCombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- StructBaseCombinedInfo.Pointers.push_back(LB.getPointer());
+ StructBaseCombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
StructBaseCombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
Size, CGF.Int64Ty, /*isSigned=*/true));
StructBaseCombinedInfo.NonContigInfo.Dims.push_back(
@@ -7385,11 +7412,13 @@ private:
// same expression except for the first one. We also need to signal
// this map is the first one that relates with the current capture
// (there is a set of entries for each capture).
- OpenMPOffloadMappingFlags Flags = getMapTypeBits(
- MapType, MapModifiers, MotionModifiers, IsImplicit,
- !IsExpressionFirstInfo || RequiresReference ||
- FirstPointerInComplexData || IsMemberReference,
- IsCaptureFirstInfo && !RequiresReference, IsNonContiguous);
+ OpenMPOffloadMappingFlags Flags =
+ getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
+ !IsExpressionFirstInfo || RequiresReference ||
+ FirstPointerInComplexData || IsMemberReference,
+ AreBothBasePtrAndPteeMapped ||
+ (IsCaptureFirstInfo && !RequiresReference),
+ IsNonContiguous);
if (!IsExpressionFirstInfo || IsMemberReference) {
// If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
@@ -7429,8 +7458,8 @@ private:
PartialStruct.LowestElem = {FieldIndex, LowestElem};
if (IsFinalArraySection) {
Address HB =
- CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false)
- .getAddress(CGF);
+ CGF.EmitArraySectionExpr(OASE, /*IsLowerBound=*/false)
+ .getAddress();
PartialStruct.HighestElem = {FieldIndex, HB};
} else {
PartialStruct.HighestElem = {FieldIndex, LowestElem};
@@ -7442,8 +7471,8 @@ private:
} else if (FieldIndex > PartialStruct.HighestElem.first) {
if (IsFinalArraySection) {
Address HB =
- CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false)
- .getAddress(CGF);
+ CGF.EmitArraySectionExpr(OASE, /*IsLowerBound=*/false)
+ .getAddress();
PartialStruct.HighestElem = {FieldIndex, HB};
} else {
PartialStruct.HighestElem = {FieldIndex, LowestElem};
@@ -7500,12 +7529,12 @@ private:
for (const OMPClauseMappableExprCommon::MappableComponent &Component :
Components) {
const Expr *AssocExpr = Component.getAssociatedExpression();
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
+ const auto *OASE = dyn_cast<ArraySectionExpr>(AssocExpr);
if (!OASE)
continue;
- QualType Ty = OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ QualType Ty = ArraySectionExpr::getBaseOriginalType(OASE->getBase());
auto *CAT = Context.getAsConstantArrayType(Ty);
auto *VAT = Context.getAsVariableArrayType(Ty);
@@ -7543,8 +7572,8 @@ private:
// it.
if (DimSizes.size() < Components.size() - 1) {
if (CAT)
- DimSizes.push_back(llvm::ConstantInt::get(
- CGF.Int64Ty, CAT->getSize().getZExtValue()));
+ DimSizes.push_back(
+ llvm::ConstantInt::get(CGF.Int64Ty, CAT->getZExtSize()));
else if (VAT)
DimSizes.push_back(CGF.Builder.CreateIntCast(
CGF.EmitScalarExpr(VAT->getSizeExpr()), CGF.Int64Ty,
@@ -7579,7 +7608,7 @@ private:
continue;
}
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
+ const auto *OASE = dyn_cast<ArraySectionExpr>(AssocExpr);
if (!OASE)
continue;
@@ -7705,12 +7734,15 @@ private:
for (const auto &I : RD->bases()) {
if (I.isVirtual())
continue;
- const auto *Base = I.getType()->getAsCXXRecordDecl();
+
+ QualType BaseTy = I.getType();
+ const auto *Base = BaseTy->getAsCXXRecordDecl();
// Ignore empty bases.
- if (Base->isEmpty() || CGF.getContext()
- .getASTRecordLayout(Base)
- .getNonVirtualSize()
- .isZero())
+ if (isEmptyRecordForLayout(CGF.getContext(), BaseTy) ||
+ CGF.getContext()
+ .getASTRecordLayout(Base)
+ .getNonVirtualSize()
+ .isZero())
continue;
unsigned FieldIndex = RL.getNonVirtualBaseLLVMFieldNo(Base);
@@ -7718,10 +7750,12 @@ private:
}
// Fill in virtual bases.
for (const auto &I : RD->vbases()) {
- const auto *Base = I.getType()->getAsCXXRecordDecl();
+ QualType BaseTy = I.getType();
// Ignore empty bases.
- if (Base->isEmpty())
+ if (isEmptyRecordForLayout(CGF.getContext(), BaseTy))
continue;
+
+ const auto *Base = BaseTy->getAsCXXRecordDecl();
unsigned FieldIndex = RL.getVirtualBaseIndex(Base);
if (RecordLayout[FieldIndex])
continue;
@@ -7732,7 +7766,8 @@ private:
for (const auto *Field : RD->fields()) {
// Fill in non-bitfields. (Bitfields always use a zero pattern, which we
// will fill in later.)
- if (!Field->isBitField() && !Field->isZeroSize(CGF.getContext())) {
+ if (!Field->isBitField() &&
+ !isEmptyFieldForLayout(CGF.getContext(), Field)) {
unsigned FieldIndex = RL.getLLVMFieldNo(Field);
RecordLayout[FieldIndex] = Field;
}
@@ -8011,6 +8046,21 @@ private:
MapCombinedInfoTy StructBaseCurInfo;
const Decl *D = Data.first;
const ValueDecl *VD = cast_or_null<ValueDecl>(D);
+ bool HasMapBasePtr = false;
+ bool HasMapArraySec = false;
+ if (VD && VD->getType()->isAnyPointerType()) {
+ for (const auto &M : Data.second) {
+ HasMapBasePtr = any_of(M, [](const MapInfo &L) {
+ return isa_and_present<DeclRefExpr>(L.VarRef);
+ });
+ HasMapArraySec = any_of(M, [](const MapInfo &L) {
+ return isa_and_present<ArraySectionExpr, ArraySubscriptExpr>(
+ L.VarRef);
+ });
+ if (HasMapBasePtr && HasMapArraySec)
+ break;
+ }
+ }
for (const auto &M : Data.second) {
for (const MapInfo &L : M) {
assert(!L.Components.empty() &&
@@ -8027,7 +8077,8 @@ private:
CurInfo, StructBaseCurInfo, PartialStruct,
/*IsFirstComponentList=*/false, L.IsImplicit,
/*GenerateAllInfoForClauses*/ true, L.Mapper, L.ForDeviceAddr, VD,
- L.VarRef);
+ L.VarRef, /*OverlappedElements*/ std::nullopt,
+ HasMapBasePtr && HasMapArraySec);
// If this entry relates to a device pointer, set the relevant
// declaration and add the 'return pointer' flag.
@@ -8208,11 +8259,11 @@ public:
}
CombinedInfo.Exprs.push_back(VD);
// Base is the base of the struct
- CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
+ CombinedInfo.BasePointers.push_back(PartialStruct.Base.emitRawPointer(CGF));
CombinedInfo.DevicePtrDecls.push_back(nullptr);
CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
// Pointer is the address of the lowest element
- llvm::Value *LB = LBAddr.getPointer();
+ llvm::Value *LB = LBAddr.emitRawPointer(CGF);
const CXXMethodDecl *MD =
CGF.CurFuncDecl ? dyn_cast<CXXMethodDecl>(CGF.CurFuncDecl) : nullptr;
const CXXRecordDecl *RD = MD ? MD->getParent() : nullptr;
@@ -8226,7 +8277,7 @@ public:
// if the this[:1] expression had appeared in a map clause with a map-type
// of tofrom.
// Emit this[:1]
- CombinedInfo.Pointers.push_back(PartialStruct.Base.getPointer());
+ CombinedInfo.Pointers.push_back(PartialStruct.Base.emitRawPointer(CGF));
QualType Ty = MD->getFunctionObjectParameterType();
llvm::Value *Size =
CGF.Builder.CreateIntCast(CGF.getTypeSize(Ty), CGF.Int64Ty,
@@ -8235,7 +8286,7 @@ public:
} else {
CombinedInfo.Pointers.push_back(LB);
// Size is (addr of {highest+1} element) - (addr of lowest element)
- llvm::Value *HB = HBAddr.getPointer();
+ llvm::Value *HB = HBAddr.emitRawPointer(CGF);
llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(
HBAddr.getElementType(), HB, /*Idx0=*/1);
llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
@@ -8483,6 +8534,8 @@ public:
assert(CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive");
const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
+ bool HasMapBasePtr = false;
+ bool HasMapArraySec = false;
for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>()) {
const auto *EI = C->getVarRefs().begin();
for (const auto L : C->decl_component_lists(VD)) {
@@ -8494,6 +8547,11 @@ public:
assert(VDecl == VD && "We got information for the wrong declaration??");
assert(!Components.empty() &&
"Not expecting declaration with no component lists.");
+ if (VD && E && VD->getType()->isAnyPointerType() && isa<DeclRefExpr>(E))
+ HasMapBasePtr = true;
+ if (VD && E && VD->getType()->isAnyPointerType() &&
+ (isa<ArraySectionExpr>(E) || isa<ArraySubscriptExpr>(E)))
+ HasMapArraySec = true;
DeclComponentLists.emplace_back(Components, C->getMapType(),
C->getMapTypeModifiers(),
C->isImplicit(), Mapper, E);
@@ -8676,7 +8734,9 @@ public:
MapType, MapModifiers, std::nullopt, Components, CombinedInfo,
StructBaseCombinedInfo, PartialStruct, IsFirstComponentList,
IsImplicit, /*GenerateAllInfoForClauses*/ false, Mapper,
- /*ForDeviceAddr=*/false, VD, VarRef);
+ /*ForDeviceAddr=*/false, VD, VarRef,
+ /*OverlappedElements*/ std::nullopt,
+ HasMapBasePtr && HasMapArraySec);
IsFirstComponentList = false;
}
}
@@ -8744,7 +8804,7 @@ public:
Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
CV, ElementType, CGF.getContext().getDeclAlign(VD),
AlignmentSource::Decl));
- CombinedInfo.Pointers.push_back(PtrAddr.getPointer());
+ CombinedInfo.Pointers.push_back(PtrAddr.emitRawPointer(CGF));
} else {
CombinedInfo.Pointers.push_back(CV);
}
@@ -8770,7 +8830,7 @@ static ValueDecl *getDeclFromThisExpr(const Expr *E) {
if (!E)
return nullptr;
- if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenCasts()))
+ if (const auto *OASE = dyn_cast<ArraySectionExpr>(E->IgnoreParenCasts()))
if (const MemberExpr *ME =
dyn_cast<MemberExpr>(OASE->getBase()->IgnoreParenImpCasts()))
return ME->getMemberDecl();
@@ -8876,7 +8936,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
switch (D.getDirectiveKind()) {
case OMPD_target:
- // For now, just treat 'target teams loop' as if it's distributed.
+ // For now, treat 'target' with nested 'teams loop' as if it's
+ // distributed (target teams distribute).
if (isOpenMPDistributeDirective(DKind) || DKind == OMPD_teams_loop)
return NestedDir;
if (DKind == OMPD_teams) {
@@ -9360,7 +9421,8 @@ llvm::Value *CGOpenMPRuntime::emitTargetNumIterationsCall(
SizeEmitter) {
OpenMPDirectiveKind Kind = D.getDirectiveKind();
const OMPExecutableDirective *TD = &D;
- // Get nested teams distribute kind directive, if any.
+ // Get nested teams distribute kind directive, if any. For now, treat
+ // 'target_teams_loop' as if it's really a target_teams_distribute.
if ((!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind)) &&
Kind != OMPD_target_teams_loop)
TD = getNestedDistributeDirective(CGM.getContext(), D);
@@ -9555,10 +9617,11 @@ static void emitTargetCallKernelLaunch(
bool HasNoWait = D.hasClausesOfKind<OMPNowaitClause>();
unsigned NumTargetItems = InputInfo.NumberOfTargetItems;
- llvm::Value *BasePointersArray = InputInfo.BasePointersArray.getPointer();
- llvm::Value *PointersArray = InputInfo.PointersArray.getPointer();
- llvm::Value *SizesArray = InputInfo.SizesArray.getPointer();
- llvm::Value *MappersArray = InputInfo.MappersArray.getPointer();
+ llvm::Value *BasePointersArray =
+ InputInfo.BasePointersArray.emitRawPointer(CGF);
+ llvm::Value *PointersArray = InputInfo.PointersArray.emitRawPointer(CGF);
+ llvm::Value *SizesArray = InputInfo.SizesArray.emitRawPointer(CGF);
+ llvm::Value *MappersArray = InputInfo.MappersArray.emitRawPointer(CGF);
auto &&EmitTargetCallFallbackCB =
[&OMPRuntime, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS,
@@ -10100,44 +10163,6 @@ bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
return !AlreadyEmittedTargetDecls.insert(D).second;
}
-llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
- // If we don't have entries or if we are emitting code for the device, we
- // don't need to do anything.
- if (CGM.getLangOpts().OMPTargetTriples.empty() ||
- CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsTargetDevice ||
- (OMPBuilder.OffloadInfoManager.empty() &&
- !HasEmittedDeclareTargetRegion && !HasEmittedTargetRegion))
- return nullptr;
-
- // Create and register the function that handles the requires directives.
- ASTContext &C = CGM.getContext();
-
- llvm::Function *RequiresRegFn;
- {
- CodeGenFunction CGF(CGM);
- const auto &FI = CGM.getTypes().arrangeNullaryFunction();
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- std::string ReqName = getName({"omp_offloading", "requires_reg"});
- RequiresRegFn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, ReqName, FI);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, RequiresRegFn, FI, {});
- // TODO: check for other requires clauses.
- // The requires directive takes effect only when a target region is
- // present in the compilation unit. Otherwise it is ignored and not
- // passed to the runtime. This avoids the runtime from throwing an error
- // for mismatching requires clauses across compilation units that don't
- // contain at least 1 target region.
- assert((HasEmittedTargetRegion || HasEmittedDeclareTargetRegion ||
- !OMPBuilder.OffloadInfoManager.empty()) &&
- "Target or declare target region expected.");
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_register_requires),
- llvm::ConstantInt::get(
- CGM.Int64Ty, OMPBuilder.Config.getRequiresFlags()));
- CGF.FinishFunction();
- }
- return RequiresRegFn;
-}
-
void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc,
@@ -10344,15 +10369,12 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
// Source location for the ident struct
llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
- llvm::Value *OffloadingArgs[] = {RTLoc,
- DeviceID,
- PointerNum,
- InputInfo.BasePointersArray.getPointer(),
- InputInfo.PointersArray.getPointer(),
- InputInfo.SizesArray.getPointer(),
- MapTypesArray,
- MapNamesArray,
- InputInfo.MappersArray.getPointer()};
+ SmallVector<llvm::Value *, 13> OffloadingArgs(
+ {RTLoc, DeviceID, PointerNum,
+ InputInfo.BasePointersArray.emitRawPointer(CGF),
+ InputInfo.PointersArray.emitRawPointer(CGF),
+ InputInfo.SizesArray.emitRawPointer(CGF), MapTypesArray, MapNamesArray,
+ InputInfo.MappersArray.emitRawPointer(CGF)});
// Select the right runtime function call for each standalone
// directive.
@@ -10441,6 +10463,12 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
llvm_unreachable("Unexpected standalone target data directive.");
break;
}
+ if (HasNowait) {
+ OffloadingArgs.push_back(llvm::Constant::getNullValue(CGF.Int32Ty));
+ OffloadingArgs.push_back(llvm::Constant::getNullValue(CGF.VoidPtrTy));
+ OffloadingArgs.push_back(llvm::Constant::getNullValue(CGF.Int32Ty));
+ OffloadingArgs.push_back(llvm::Constant::getNullValue(CGF.VoidPtrTy));
+ }
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), RTLFn),
OffloadingArgs);
@@ -11163,7 +11191,7 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
getThreadID(CGF, D.getBeginLoc()),
llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(),
+ CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).emitRawPointer(CGF),
CGM.VoidPtrTy)};
llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
@@ -11197,7 +11225,8 @@ static void EmitDoacrossOrdered(CodeGenFunction &CGF, CodeGenModule &CGM,
/*Volatile=*/false, Int64Ty);
}
llvm::Value *Args[] = {
- ULoc, ThreadID, CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
+ ULoc, ThreadID,
+ CGF.Builder.CreateConstArrayGEP(CntAddr, 0).emitRawPointer(CGF)};
llvm::FunctionCallee RTLFn;
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
OMPDoacrossKind<T> ODK;
@@ -11367,7 +11396,7 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
Args[0] = CGF.CGM.getOpenMPRuntime().getThreadID(
CGF, SourceLocation::getFromRawEncoding(LocEncoding));
Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Addr.getPointer(), CGF.VoidPtrTy);
+ Addr.emitRawPointer(CGF), CGF.VoidPtrTy);
llvm::Value *AllocVal = getAllocatorVal(CGF, AllocExpr);
Args[2] = AllocVal;
CGF.EmitRuntimeCall(RTLFn, Args);
@@ -11639,7 +11668,7 @@ Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
CGF.EmitStoreOfScalar(
llvm::ConstantInt::getNullValue(CGF.ConvertTypeForMem(C.CharTy)),
FiredLVal);
- return CGF.EmitLValueForField(BaseLVal, VDField).getAddress(CGF);
+ return CGF.EmitLValueForField(BaseLVal, VDField).getAddress();
}
namespace {
@@ -11725,15 +11754,17 @@ void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
LLIVTy, getName({UniqueDeclName, "iv"}));
cast<llvm::GlobalVariable>(LastIV)->setAlignment(
IVLVal.getAlignment().getAsAlign());
- LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType());
+ LValue LastIVLVal =
+ CGF.MakeNaturalAlignRawAddrLValue(LastIV, IVLVal.getType());
// Last value of the lastprivate conditional.
// decltype(priv_a) last_a;
llvm::GlobalVariable *Last = OMPBuilder.getOrCreateInternalVariable(
CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
- Last->setAlignment(LVal.getAlignment().getAsAlign());
- LValue LastLVal = CGF.MakeAddrLValue(
- Address(Last, Last->getValueType(), LVal.getAlignment()), LVal.getType());
+ cast<llvm::GlobalVariable>(Last)->setAlignment(
+ LVal.getAlignment().getAsAlign());
+ LValue LastLVal =
+ CGF.MakeRawAddrLValue(Last, LVal.getType(), LVal.getAlignment());
// Global loop counter. Required to handle inner parallel-for regions.
// iv
@@ -11823,7 +11854,7 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const FieldDecl* FiredDecl = std::get<2>(It->getSecond());
LValue PrivLVal = CGF.EmitLValue(FoundE);
Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- PrivLVal.getAddress(CGF),
+ PrivLVal.getAddress(),
CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)),
CGF.ConvertTypeForMem(StructTy));
LValue BaseLVal =
@@ -11906,9 +11937,8 @@ void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
// The variable was not updated in the region - exit.
if (!GV)
return;
- LValue LPLVal = CGF.MakeAddrLValue(
- Address(GV, GV->getValueType(), PrivLVal.getAlignment()),
- PrivLVal.getType().getNonReferenceType());
+ LValue LPLVal = CGF.MakeRawAddrLValue(
+ GV, PrivLVal.getType().getNonReferenceType(), PrivLVal.getAlignment());
llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc);
CGF.EmitStoreOfScalar(Res, PrivLVal);
}
@@ -12005,6 +12035,11 @@ void CGOpenMPSIMDRuntime::emitForDispatchInit(
llvm_unreachable("Not supported in SIMD-only mode");
}
+void CGOpenMPSIMDRuntime::emitForDispatchDeinit(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
void CGOpenMPSIMDRuntime::emitForStaticInit(
CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
index b01b39abd160..f65314d014c0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -946,6 +946,14 @@ public:
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
+ /// This is used for non static scheduled types and when the ordered
+ /// clause is present on the loop construct.
+ ///
+ /// \param CGF Reference to current CodeGenFunction.
+ /// \param Loc Clang source location.
+ ///
+ virtual void emitForDispatchDeinit(CodeGenFunction &CGF, SourceLocation Loc);
+
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
@@ -1068,13 +1076,12 @@ public:
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
- const VarDecl *VD,
- Address VDAddr,
+ const VarDecl *VD, Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
- virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
+ virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
@@ -1407,10 +1414,6 @@ public:
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
- /// Creates and returns a registration function for when at least one
- /// requires directives was used in the current module.
- llvm::Function *emitRequiresDirectiveRegFun();
-
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
@@ -1834,6 +1837,14 @@ public:
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
+ /// This is used for non static scheduled types and when the ordered
+ /// clause is present on the loop construct.
+ ///
+ /// \param CGF Reference to current CodeGenFunction.
+ /// \param Loc Clang source location.
+ ///
+ void emitForDispatchDeinit(CodeGenFunction &CGF, SourceLocation Loc) override;
+
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 299ee1460b3d..8965a14d88a6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -92,9 +92,9 @@ static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = TempASE->getBase()->IgnoreParenImpCasts();
RefExpr = Base;
- } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
+ } else if (auto *OASE = dyn_cast<ArraySectionExpr>(RefExpr)) {
const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
- while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ while (const auto *TempOASE = dyn_cast<ArraySectionExpr>(Base))
Base = TempOASE->getBase()->IgnoreParenImpCasts();
while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = TempASE->getBase()->IgnoreParenImpCasts();
@@ -501,31 +501,6 @@ public:
};
} // anonymous namespace
-/// Get the id of the warp in the block.
-/// We assume that the warp size is 32, which is always the case
-/// on the NVPTX device, to generate more efficient code.
-static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
- CGBuilderTy &Bld = CGF.Builder;
- unsigned LaneIDBits =
- llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
- auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
- return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
-}
-
-/// Get the id of the current lane in the Warp.
-/// We assume that the warp size is 32, which is always the case
-/// on the NVPTX device, to generate more efficient code.
-static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
- CGBuilderTy &Bld = CGF.Builder;
- unsigned LaneIDBits =
- llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
- assert(LaneIDBits < 32 && "Invalid LaneIDBits size in NVPTX device.");
- unsigned LaneIDMask = ~0u >> (32u - LaneIDBits);
- auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
- return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
- "nvptx_lane_id");
-}
-
CGOpenMPRuntimeGPU::ExecutionMode
CGOpenMPRuntimeGPU::getExecutionMode() const {
return CurrentExecutionMode;
@@ -646,7 +621,6 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_target:
case OMPD_target_teams:
return hasNestedSPMDDirective(Ctx, D);
- case OMPD_target_teams_loop:
case OMPD_target_parallel_loop:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
@@ -658,6 +632,12 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
return true;
case OMPD_target_teams_distribute:
return false;
+ case OMPD_target_teams_loop:
+ // Whether this is true or not depends on how the directive will
+ // eventually be emitted.
+ if (auto *TTLD = dyn_cast<OMPTargetTeamsGenericLoopDirective>(&D))
+ return TTLD->canBeParallelFor();
+ return false;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
@@ -1096,14 +1076,15 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
VoidPtr, VarPtrTy, VD->getName() + "_on_stack");
- LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy);
- Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
+ LValue VarAddr =
+ CGF.MakeNaturalAlignPointeeRawAddrLValue(CastedVoidPtr, VarTy);
+ Rec.second.PrivateAddr = VarAddr.getAddress();
Rec.second.GlobalizedVal = VoidPtr;
// Assign the local allocation to the newly globalized location.
if (EscapedParam) {
CGF.EmitStoreOfScalar(ParValue, VarAddr);
- I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF));
+ I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress());
}
if (auto *DI = CGF.getDebugInfo())
VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
@@ -1117,7 +1098,7 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
LValue Base = CGF.MakeAddrLValue(AddrSizePair.first, VD->getType(),
CGM.getContext().getDeclAlign(VD),
AlignmentSource::Decl);
- I->getSecond().MappedParams->setVarAddr(CGF, VD, Base.getAddress(CGF));
+ I->getSecond().MappedParams->setVarAddr(CGF, VD, Base.getAddress());
}
I->getSecond().MappedParams->apply(CGF);
}
@@ -1206,8 +1187,8 @@ void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
bool IsBareKernel = D.getSingleClause<OMPXBareClause>();
- Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".zero.addr");
+ RawAddress ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
+ /*Name=*/".zero.addr");
CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
// We don't emit any thread id function call in bare kernel, but because the
@@ -1215,7 +1196,7 @@ void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
if (IsBareKernel)
OutlinedFnArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
else
- OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
+ OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).emitRawPointer(CGF));
OutlinedFnArgs.push_back(ZeroAddr.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
@@ -1289,7 +1270,7 @@ void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
llvm::ConstantInt::get(CGF.Int32Ty, -1),
FnPtr,
ID,
- Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(),
+ Bld.CreateBitOrPointerCast(CapturedVarsAddrs.emitRawPointer(CGF),
CGF.VoidPtrPtrTy),
llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
@@ -1429,1132 +1410,6 @@ static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
TBAAAccessInfo());
}
-/// This function creates calls to one of two shuffle functions to copy
-/// variables between lanes in a warp.
-static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
- llvm::Value *Elem,
- QualType ElemType,
- llvm::Value *Offset,
- SourceLocation Loc) {
- CodeGenModule &CGM = CGF.CGM;
- CGBuilderTy &Bld = CGF.Builder;
- CGOpenMPRuntimeGPU &RT =
- *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
- llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder();
-
- CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
- assert(Size.getQuantity() <= 8 &&
- "Unsupported bitwidth in shuffle instruction.");
-
- RuntimeFunction ShuffleFn = Size.getQuantity() <= 4
- ? OMPRTL___kmpc_shuffle_int32
- : OMPRTL___kmpc_shuffle_int64;
-
- // Cast all types to 32- or 64-bit values before calling shuffle routines.
- QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
- Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
- llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
- llvm::Value *WarpSize =
- Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
-
- llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn),
- {ElemCast, Offset, WarpSize});
-
- return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
-}
-
-static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
- Address DestAddr, QualType ElemType,
- llvm::Value *Offset, SourceLocation Loc) {
- CGBuilderTy &Bld = CGF.Builder;
-
- CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
- // Create the loop over the big sized data.
- // ptr = (void*)Elem;
- // ptrEnd = (void*) Elem + 1;
- // Step = 8;
- // while (ptr + Step < ptrEnd)
- // shuffle((int64_t)*ptr);
- // Step = 4;
- // while (ptr + Step < ptrEnd)
- // shuffle((int32_t)*ptr);
- // ...
- Address ElemPtr = DestAddr;
- Address Ptr = SrcAddr;
- Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy, CGF.Int8Ty);
- for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
- if (Size < CharUnits::fromQuantity(IntSize))
- continue;
- QualType IntType = CGF.getContext().getIntTypeForBitwidth(
- CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
- /*Signed=*/1);
- llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
- Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo(),
- IntTy);
- ElemPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- ElemPtr, IntTy->getPointerTo(), IntTy);
- if (Size.getQuantity() / IntSize > 1) {
- llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
- llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
- CGF.EmitBlock(PreCondBB);
- llvm::PHINode *PhiSrc =
- Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
- PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
- llvm::PHINode *PhiDest =
- Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
- PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
- Ptr = Address(PhiSrc, Ptr.getElementType(), Ptr.getAlignment());
- ElemPtr =
- Address(PhiDest, ElemPtr.getElementType(), ElemPtr.getAlignment());
- llvm::Value *PtrDiff = Bld.CreatePtrDiff(
- CGF.Int8Ty, PtrEnd.getPointer(),
- Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr.getPointer(),
- CGF.VoidPtrTy));
- Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
- ThenBB, ExitBB);
- CGF.EmitBlock(ThenBB);
- llvm::Value *Res = createRuntimeShuffleFunction(
- CGF,
- CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
- LValueBaseInfo(AlignmentSource::Type),
- TBAAAccessInfo()),
- IntType, Offset, Loc);
- CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
- LValueBaseInfo(AlignmentSource::Type),
- TBAAAccessInfo());
- Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
- Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
- PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
- PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
- CGF.EmitBranch(PreCondBB);
- CGF.EmitBlock(ExitBB);
- } else {
- llvm::Value *Res = createRuntimeShuffleFunction(
- CGF,
- CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
- LValueBaseInfo(AlignmentSource::Type),
- TBAAAccessInfo()),
- IntType, Offset, Loc);
- CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
- LValueBaseInfo(AlignmentSource::Type),
- TBAAAccessInfo());
- Ptr = Bld.CreateConstGEP(Ptr, 1);
- ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
- }
- Size = Size % IntSize;
- }
-}
-
-namespace {
-enum CopyAction : unsigned {
- // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
- // the warp using shuffle instructions.
- RemoteLaneToThread,
- // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
- ThreadCopy,
-};
-} // namespace
-
-struct CopyOptionsTy {
- llvm::Value *RemoteLaneOffset;
- llvm::Value *ScratchpadIndex;
- llvm::Value *ScratchpadWidth;
-};
-
-/// Emit instructions to copy a Reduce list, which contains partially
-/// aggregated values, in the specified direction.
-static void emitReductionListCopy(
- CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
- ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
- CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
-
- CodeGenModule &CGM = CGF.CGM;
- ASTContext &C = CGM.getContext();
- CGBuilderTy &Bld = CGF.Builder;
-
- llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
-
- // Iterates, element-by-element, through the source Reduce list and
- // make a copy.
- unsigned Idx = 0;
- for (const Expr *Private : Privates) {
- Address SrcElementAddr = Address::invalid();
- Address DestElementAddr = Address::invalid();
- Address DestElementPtrAddr = Address::invalid();
- // Should we shuffle in an element from a remote lane?
- bool ShuffleInElement = false;
- // Set to true to update the pointer in the dest Reduce list to a
- // newly created element.
- bool UpdateDestListPtr = false;
- QualType PrivatePtrType = C.getPointerType(Private->getType());
- llvm::Type *PrivateLlvmPtrType = CGF.ConvertType(PrivatePtrType);
-
- switch (Action) {
- case RemoteLaneToThread: {
- // Step 1.1: Get the address for the src element in the Reduce list.
- Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr = CGF.EmitLoadOfPointer(
- SrcElementPtrAddr.withElementType(PrivateLlvmPtrType),
- PrivatePtrType->castAs<PointerType>());
-
- // Step 1.2: Create a temporary to store the element in the destination
- // Reduce list.
- DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
- DestElementAddr =
- CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
- ShuffleInElement = true;
- UpdateDestListPtr = true;
- break;
- }
- case ThreadCopy: {
- // Step 1.1: Get the address for the src element in the Reduce list.
- Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr = CGF.EmitLoadOfPointer(
- SrcElementPtrAddr.withElementType(PrivateLlvmPtrType),
- PrivatePtrType->castAs<PointerType>());
-
- // Step 1.2: Get the address for dest element. The destination
- // element has already been created on the thread's stack.
- DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
- DestElementAddr = CGF.EmitLoadOfPointer(
- DestElementPtrAddr.withElementType(PrivateLlvmPtrType),
- PrivatePtrType->castAs<PointerType>());
- break;
- }
- }
-
- // Regardless of src and dest of copy, we emit the load of src
- // element as this is required in all directions
- SrcElementAddr = SrcElementAddr.withElementType(
- CGF.ConvertTypeForMem(Private->getType()));
- DestElementAddr =
- DestElementAddr.withElementType(SrcElementAddr.getElementType());
-
- // Now that all active lanes have read the element in the
- // Reduce list, shuffle over the value from the remote lane.
- if (ShuffleInElement) {
- shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
- RemoteLaneOffset, Private->getExprLoc());
- } else {
- switch (CGF.getEvaluationKind(Private->getType())) {
- case TEK_Scalar: {
- llvm::Value *Elem = CGF.EmitLoadOfScalar(
- SrcElementAddr, /*Volatile=*/false, Private->getType(),
- Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
- TBAAAccessInfo());
- // Store the source element value to the dest element address.
- CGF.EmitStoreOfScalar(
- Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
- LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
- break;
- }
- case TEK_Complex: {
- CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
- CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
- Private->getExprLoc());
- CGF.EmitStoreOfComplex(
- Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
- /*isInit=*/false);
- break;
- }
- case TEK_Aggregate:
- CGF.EmitAggregateCopy(
- CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
- CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
- Private->getType(), AggValueSlot::DoesNotOverlap);
- break;
- }
- }
-
- // Step 3.1: Modify reference in dest Reduce list as needed.
- // Modifying the reference in Reduce list to point to the newly
- // created element. The element is live in the current function
- // scope and that of functions it invokes (i.e., reduce_function).
- // RemoteReduceData[i] = (void*)&RemoteElem
- if (UpdateDestListPtr) {
- CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
- DestElementAddr.getPointer(), CGF.VoidPtrTy),
- DestElementPtrAddr, /*Volatile=*/false,
- C.VoidPtrTy);
- }
-
- ++Idx;
- }
-}
-
-/// This function emits a helper that gathers Reduce lists from the first
-/// lane of every active warp to lanes in the first warp.
-///
-/// void inter_warp_copy_func(void* reduce_data, num_warps)
-/// shared smem[warp_size];
-/// For all data entries D in reduce_data:
-/// sync
-/// If (I am the first lane in each warp)
-/// Copy my local D to smem[warp_id]
-/// sync
-/// if (I am the first warp)
-/// Copy smem[thread_id] to my local D
-static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
- ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy,
- SourceLocation Loc) {
- ASTContext &C = CGM.getContext();
- llvm::Module &M = CGM.getModule();
-
- // ReduceList: thread local Reduce list.
- // At the stage of the computation when this function is called, partially
- // aggregated values reside in the first lane of every active warp.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamKind::Other);
- // NumWarps: number of warps active in the parallel region. This could
- // be smaller than 32 (max warps in a CTA) for partial block reduction.
- ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.getIntTypeForBitwidth(32, /* Signed */ true),
- ImplicitParamKind::Other);
- FunctionArgList Args;
- Args.push_back(&ReduceListArg);
- Args.push_back(&NumWarpsArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
- llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_inter_warp_copy_func", &M);
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- // This array is used as a medium to transfer, one reduce element at a time,
- // the data from the first lane of every warp to lanes in the first warp
- // in order to perform the final step of a reduction in a parallel region
- // (reduction across warps). The array is placed in NVPTX __shared__ memory
- // for reduced latency, as well as to have a distinct copy for concurrently
- // executing target regions. The array is declared with common linkage so
- // as to be shared across compilation units.
- StringRef TransferMediumName =
- "__openmp_nvptx_data_transfer_temporary_storage";
- llvm::GlobalVariable *TransferMedium =
- M.getGlobalVariable(TransferMediumName);
- unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
- if (!TransferMedium) {
- auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
- unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
- TransferMedium = new llvm::GlobalVariable(
- M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage,
- llvm::UndefValue::get(Ty), TransferMediumName,
- /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
- SharedAddressSpace);
- CGM.addCompilerUsedGlobal(TransferMedium);
- }
-
- auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
- // Get the CUDA thread id of the current OpenMP thread on the GPU.
- llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
- // nvptx_lane_id = nvptx_id % warpsize
- llvm::Value *LaneID = getNVPTXLaneID(CGF);
- // nvptx_warp_id = nvptx_id / warpsize
- llvm::Value *WarpID = getNVPTXWarpID(CGF);
-
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
- Address LocalReduceList(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(
- AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
- LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
- ElemTy->getPointerTo()),
- ElemTy, CGF.getPointerAlign());
-
- unsigned Idx = 0;
- for (const Expr *Private : Privates) {
- //
- // Warp master copies reduce element to transfer medium in __shared__
- // memory.
- //
- unsigned RealTySize =
- C.getTypeSizeInChars(Private->getType())
- .alignTo(C.getTypeAlignInChars(Private->getType()))
- .getQuantity();
- for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
- unsigned NumIters = RealTySize / TySize;
- if (NumIters == 0)
- continue;
- QualType CType = C.getIntTypeForBitwidth(
- C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
- llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
- CharUnits Align = CharUnits::fromQuantity(TySize);
- llvm::Value *Cnt = nullptr;
- Address CntAddr = Address::invalid();
- llvm::BasicBlock *PrecondBB = nullptr;
- llvm::BasicBlock *ExitBB = nullptr;
- if (NumIters > 1) {
- CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
- CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
- /*Volatile=*/false, C.IntTy);
- PrecondBB = CGF.createBasicBlock("precond");
- ExitBB = CGF.createBasicBlock("exit");
- llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(PrecondBB);
- Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
- llvm::Value *Cmp =
- Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
- Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
- CGF.EmitBlock(BodyBB);
- }
- // kmpc_barrier.
- CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
- /*EmitChecks=*/false,
- /*ForceSimpleCall=*/true);
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
-
- // if (lane_id == 0)
- llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
- Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
- CGF.EmitBlock(ThenBB);
-
- // Reduce element = LocalReduceList[i]
- Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
- llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
- ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- // elemptr = ((CopyType*)(elemptrptr)) + I
- Address ElemPtr(ElemPtrPtr, CopyType, Align);
- if (NumIters > 1)
- ElemPtr = Bld.CreateGEP(ElemPtr, Cnt);
-
- // Get pointer to location in transfer medium.
- // MediumPtr = &medium[warp_id]
- llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
- TransferMedium->getValueType(), TransferMedium,
- {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
- // Casting to actual data type.
- // MediumPtr = (CopyType*)MediumPtrAddr;
- Address MediumPtr(MediumPtrVal, CopyType, Align);
-
- // elem = *elemptr
- //*MediumPtr = elem
- llvm::Value *Elem = CGF.EmitLoadOfScalar(
- ElemPtr, /*Volatile=*/false, CType, Loc,
- LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
- // Store the source element value to the dest element address.
- CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
- LValueBaseInfo(AlignmentSource::Type),
- TBAAAccessInfo());
-
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(ElseBB);
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(MergeBB);
-
- // kmpc_barrier.
- CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
- /*EmitChecks=*/false,
- /*ForceSimpleCall=*/true);
-
- //
- // Warp 0 copies reduce element from transfer medium.
- //
- llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
-
- Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
- llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
- AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
-
- // Up to 32 threads in warp 0 are active.
- llvm::Value *IsActiveThread =
- Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
- Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
-
- CGF.EmitBlock(W0ThenBB);
-
- // SrcMediumPtr = &medium[tid]
- llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
- TransferMedium->getValueType(), TransferMedium,
- {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
- // SrcMediumVal = *SrcMediumPtr;
- Address SrcMediumPtr(SrcMediumPtrVal, CopyType, Align);
-
- // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
- Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
- llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
- TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
- Address TargetElemPtr(TargetElemPtrVal, CopyType, Align);
- if (NumIters > 1)
- TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt);
-
- // *TargetElemPtr = SrcMediumVal;
- llvm::Value *SrcMediumValue =
- CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
- CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
- CType);
- Bld.CreateBr(W0MergeBB);
-
- CGF.EmitBlock(W0ElseBB);
- Bld.CreateBr(W0MergeBB);
-
- CGF.EmitBlock(W0MergeBB);
-
- if (NumIters > 1) {
- Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
- CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
- CGF.EmitBranch(PrecondBB);
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(ExitBB);
- }
- RealTySize %= TySize;
- }
- ++Idx;
- }
-
- CGF.FinishFunction();
- return Fn;
-}
-
-/// Emit a helper that reduces data across two OpenMP threads (lanes)
-/// in the same warp. It uses shuffle instructions to copy over data from
-/// a remote lane's stack. The reduction algorithm performed is specified
-/// by the fourth parameter.
-///
-/// Algorithm Versions.
-/// Full Warp Reduce (argument value 0):
-/// This algorithm assumes that all 32 lanes are active and gathers
-/// data from these 32 lanes, producing a single resultant value.
-/// Contiguous Partial Warp Reduce (argument value 1):
-/// This algorithm assumes that only a *contiguous* subset of lanes
-/// are active. This happens for the last warp in a parallel region
-/// when the user specified num_threads is not an integer multiple of
-/// 32. This contiguous subset always starts with the zeroth lane.
-/// Partial Warp Reduce (argument value 2):
-/// This algorithm gathers data from any number of lanes at any position.
-/// All reduced values are stored in the lowest possible lane. The set
-/// of problems every algorithm addresses is a super set of those
-/// addressable by algorithms with a lower version number. Overhead
-/// increases as algorithm version increases.
-///
-/// Terminology
-/// Reduce element:
-/// Reduce element refers to the individual data field with primitive
-/// data types to be combined and reduced across threads.
-/// Reduce list:
-/// Reduce list refers to a collection of local, thread-private
-/// reduce elements.
-/// Remote Reduce list:
-/// Remote Reduce list refers to a collection of remote (relative to
-/// the current thread) reduce elements.
-///
-/// We distinguish between three states of threads that are important to
-/// the implementation of this function.
-/// Alive threads:
-/// Threads in a warp executing the SIMT instruction, as distinguished from
-/// threads that are inactive due to divergent control flow.
-/// Active threads:
-/// The minimal set of threads that has to be alive upon entry to this
-/// function. The computation is correct iff active threads are alive.
-/// Some threads are alive but they are not active because they do not
-/// contribute to the computation in any useful manner. Turning them off
-/// may introduce control flow overheads without any tangible benefits.
-/// Effective threads:
-/// In order to comply with the argument requirements of the shuffle
-/// function, we must keep all lanes holding data alive. But at most
-/// half of them perform value aggregation; we refer to this half of
-/// threads as effective. The other half is simply handing off their
-/// data.
-///
-/// Procedure
-/// Value shuffle:
-/// In this step active threads transfer data from higher lane positions
-/// in the warp to lower lane positions, creating Remote Reduce list.
-/// Value aggregation:
-/// In this step, effective threads combine their thread local Reduce list
-/// with Remote Reduce list and store the result in the thread local
-/// Reduce list.
-/// Value copy:
-/// In this step, we deal with the assumption made by algorithm 2
-/// (i.e. contiguity assumption). When we have an odd number of lanes
-/// active, say 2k+1, only k threads will be effective and therefore k
-/// new values will be produced. However, the Reduce list owned by the
-/// (2k+1)th thread is ignored in the value aggregation. Therefore
-/// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
-/// that the contiguity assumption still holds.
-static llvm::Function *emitShuffleAndReduceFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
- ASTContext &C = CGM.getContext();
-
- // Thread local Reduce list used to host the values of data to be reduced.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamKind::Other);
- // Current lane id; could be logical.
- ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
- ImplicitParamKind::Other);
- // Offset of the remote source lane relative to the current lane.
- ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.ShortTy, ImplicitParamKind::Other);
- // Algorithm version. This is expected to be known at compile time.
- ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.ShortTy, ImplicitParamKind::Other);
- FunctionArgList Args;
- Args.push_back(&ReduceListArg);
- Args.push_back(&LaneIDArg);
- Args.push_back(&RemoteLaneOffsetArg);
- Args.push_back(&AlgoVerArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
-
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
- Address LocalReduceList(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, SourceLocation()),
- ElemTy->getPointerTo()),
- ElemTy, CGF.getPointerAlign());
-
- Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
- llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
- AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
-
- Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
- llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
- AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
-
- Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
- llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
- AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
-
- // Create a local thread-private variable to host the Reduce list
- // from a remote lane.
- Address RemoteReduceList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
-
- // This loop iterates through the list of reduce elements and copies,
- // element by element, from a remote lane in the warp to RemoteReduceList,
- // hosted on the thread's stack.
- emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
- LocalReduceList, RemoteReduceList,
- {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
- /*ScratchpadIndex=*/nullptr,
- /*ScratchpadWidth=*/nullptr});
-
- // The actions to be performed on the Remote Reduce list is dependent
- // on the algorithm version.
- //
- // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
- // LaneId % 2 == 0 && Offset > 0):
- // do the reduction value aggregation
- //
- // The thread local variable Reduce list is mutated in place to host the
- // reduced data, which is the aggregated value produced from local and
- // remote lanes.
- //
- // Note that AlgoVer is expected to be a constant integer known at compile
- // time.
- // When AlgoVer==0, the first conjunction evaluates to true, making
- // the entire predicate true during compile time.
- // When AlgoVer==1, the second conjunction has only the second part to be
- // evaluated during runtime. Other conjunctions evaluates to false
- // during compile time.
- // When AlgoVer==2, the third conjunction has only the second part to be
- // evaluated during runtime. Other conjunctions evaluates to false
- // during compile time.
- llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
-
- llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
- llvm::Value *CondAlgo1 = Bld.CreateAnd(
- Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
-
- llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
- llvm::Value *CondAlgo2 = Bld.CreateAnd(
- Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
- CondAlgo2 = Bld.CreateAnd(
- CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
-
- llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
- CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
-
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
- Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
-
- CGF.EmitBlock(ThenBB);
- // reduce_function(LocalReduceList, RemoteReduceList)
- llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- LocalReduceList.getPointer(), CGF.VoidPtrTy);
- llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- RemoteReduceList.getPointer(), CGF.VoidPtrTy);
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
- CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(ElseBB);
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(MergeBB);
-
- // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
- // Reduce list.
- Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
- llvm::Value *CondCopy = Bld.CreateAnd(
- Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
-
- llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
- Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
-
- CGF.EmitBlock(CpyThenBB);
- emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
- RemoteReduceList, LocalReduceList);
- Bld.CreateBr(CpyMergeBB);
-
- CGF.EmitBlock(CpyElseBB);
- Bld.CreateBr(CpyMergeBB);
-
- CGF.EmitBlock(CpyMergeBB);
-
- CGF.FinishFunction();
- return Fn;
-}
-
-/// This function emits a helper that copies all the reduction variables from
-/// the team into the provided global buffer for the reduction variables.
-///
-/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
-/// For all data entries D in reduce_data:
-/// Copy local D to buffer.D[Idx]
-static llvm::Value *emitListToGlobalCopyFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, SourceLocation Loc,
- const RecordDecl *TeamReductionRec,
- const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &VarFieldMap) {
- ASTContext &C = CGM.getContext();
-
- // Buffer: global reduction buffer.
- ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamKind::Other);
- // Idx: index of the buffer.
- ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamKind::Other);
- // ReduceList: thread local Reduce list.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamKind::Other);
- FunctionArgList Args;
- Args.push_back(&BufferArg);
- Args.push_back(&IdxArg);
- Args.push_back(&ReduceListArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
- llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
- Address LocalReduceList(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, Loc),
- ElemTy->getPointerTo()),
- ElemTy, CGF.getPointerAlign());
- QualType StaticTy = C.getRecordType(TeamReductionRec);
- llvm::Type *LLVMReductionsBufferTy =
- CGM.getTypes().ConvertTypeForMem(StaticTy);
- llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
- LLVMReductionsBufferTy->getPointerTo());
- llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
- /*Volatile=*/false, C.IntTy,
- Loc)};
- unsigned Idx = 0;
- for (const Expr *Private : Privates) {
- // Reduce element = LocalReduceList[i]
- Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
- llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
- ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- // elemptr = ((CopyType*)(elemptrptr)) + I
- ElemTy = CGF.ConvertTypeForMem(Private->getType());
- ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- ElemPtrPtr, ElemTy->getPointerTo());
- Address ElemPtr =
- Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
- const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
- // Global = Buffer.VD[Idx];
- const FieldDecl *FD = VarFieldMap.lookup(VD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
- LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
- GlobLVal.setAddress(Address(GlobAddr.getPointer(),
- CGF.ConvertTypeForMem(Private->getType()),
- GlobAddr.getAlignment()));
- switch (CGF.getEvaluationKind(Private->getType())) {
- case TEK_Scalar: {
- llvm::Value *V = CGF.EmitLoadOfScalar(
- ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
- LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
- CGF.EmitStoreOfScalar(V, GlobLVal);
- break;
- }
- case TEK_Complex: {
- CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
- CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
- CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
- break;
- }
- case TEK_Aggregate:
- CGF.EmitAggregateCopy(GlobLVal,
- CGF.MakeAddrLValue(ElemPtr, Private->getType()),
- Private->getType(), AggValueSlot::DoesNotOverlap);
- break;
- }
- ++Idx;
- }
-
- CGF.FinishFunction();
- return Fn;
-}
-
-/// This function emits a helper that reduces all the reduction variables from
-/// the team into the provided global buffer for the reduction variables.
-///
-/// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
-/// void *GlobPtrs[];
-/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
-/// ...
-/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
-/// reduce_function(GlobPtrs, reduce_data);
-static llvm::Value *emitListToGlobalReduceFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, SourceLocation Loc,
- const RecordDecl *TeamReductionRec,
- const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &VarFieldMap,
- llvm::Function *ReduceFn) {
- ASTContext &C = CGM.getContext();
-
- // Buffer: global reduction buffer.
- ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamKind::Other);
- // Idx: index of the buffer.
- ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamKind::Other);
- // ReduceList: thread local Reduce list.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamKind::Other);
- FunctionArgList Args;
- Args.push_back(&BufferArg);
- Args.push_back(&IdxArg);
- Args.push_back(&ReduceListArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
- QualType StaticTy = C.getRecordType(TeamReductionRec);
- llvm::Type *LLVMReductionsBufferTy =
- CGM.getTypes().ConvertTypeForMem(StaticTy);
- llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
- LLVMReductionsBufferTy->getPointerTo());
-
- // 1. Build a list of reduction variables.
- // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- Address ReductionList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- auto IPriv = Privates.begin();
- llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
- /*Volatile=*/false, C.IntTy,
- Loc)};
- unsigned Idx = 0;
- for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- // Global = Buffer.VD[Idx];
- const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
- const FieldDecl *FD = VarFieldMap.lookup(VD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
- LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
- CGF.EmitStoreOfScalar(GlobAddr.getPointer(), Elem, /*Volatile=*/false,
- C.VoidPtrTy);
- if ((*IPriv)->getType()->isVariablyModifiedType()) {
- // Store array size.
- ++Idx;
- Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- llvm::Value *Size = CGF.Builder.CreateIntCast(
- CGF.getVLASize(
- CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .NumElts,
- CGF.SizeTy, /*isSigned=*/false);
- CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
- Elem);
- }
- }
-
- // Call reduce_function(GlobalReduceList, ReduceList)
- llvm::Value *GlobalReduceList = ReductionList.getPointer();
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
- AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
- CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
- CGF.FinishFunction();
- return Fn;
-}
-
-/// This function emits a helper that copies all the reduction variables from
-/// the team into the provided global buffer for the reduction variables.
-///
-/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
-/// For all data entries D in reduce_data:
-/// Copy buffer.D[Idx] to local D;
-static llvm::Value *emitGlobalToListCopyFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, SourceLocation Loc,
- const RecordDecl *TeamReductionRec,
- const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &VarFieldMap) {
- ASTContext &C = CGM.getContext();
-
- // Buffer: global reduction buffer.
- ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamKind::Other);
- // Idx: index of the buffer.
- ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamKind::Other);
- // ReduceList: thread local Reduce list.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamKind::Other);
- FunctionArgList Args;
- Args.push_back(&BufferArg);
- Args.push_back(&IdxArg);
- Args.push_back(&ReduceListArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
- llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
- Address LocalReduceList(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, Loc),
- ElemTy->getPointerTo()),
- ElemTy, CGF.getPointerAlign());
- QualType StaticTy = C.getRecordType(TeamReductionRec);
- llvm::Type *LLVMReductionsBufferTy =
- CGM.getTypes().ConvertTypeForMem(StaticTy);
- llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
- LLVMReductionsBufferTy->getPointerTo());
-
- llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
- /*Volatile=*/false, C.IntTy,
- Loc)};
- unsigned Idx = 0;
- for (const Expr *Private : Privates) {
- // Reduce element = LocalReduceList[i]
- Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
- llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
- ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- // elemptr = ((CopyType*)(elemptrptr)) + I
- ElemTy = CGF.ConvertTypeForMem(Private->getType());
- ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- ElemPtrPtr, ElemTy->getPointerTo());
- Address ElemPtr =
- Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
- const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
- // Global = Buffer.VD[Idx];
- const FieldDecl *FD = VarFieldMap.lookup(VD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
- LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
- GlobLVal.setAddress(Address(GlobAddr.getPointer(),
- CGF.ConvertTypeForMem(Private->getType()),
- GlobAddr.getAlignment()));
- switch (CGF.getEvaluationKind(Private->getType())) {
- case TEK_Scalar: {
- llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
- CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
- LValueBaseInfo(AlignmentSource::Type),
- TBAAAccessInfo());
- break;
- }
- case TEK_Complex: {
- CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
- CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
- /*isInit=*/false);
- break;
- }
- case TEK_Aggregate:
- CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
- GlobLVal, Private->getType(),
- AggValueSlot::DoesNotOverlap);
- break;
- }
- ++Idx;
- }
-
- CGF.FinishFunction();
- return Fn;
-}
-
-/// This function emits a helper that reduces all the reduction variables from
-/// the team into the provided global buffer for the reduction variables.
-///
-/// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
-/// void *GlobPtrs[];
-/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
-/// ...
-/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
-/// reduce_function(reduce_data, GlobPtrs);
-static llvm::Value *emitGlobalToListReduceFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, SourceLocation Loc,
- const RecordDecl *TeamReductionRec,
- const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &VarFieldMap,
- llvm::Function *ReduceFn) {
- ASTContext &C = CGM.getContext();
-
- // Buffer: global reduction buffer.
- ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamKind::Other);
- // Idx: index of the buffer.
- ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamKind::Other);
- // ReduceList: thread local Reduce list.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamKind::Other);
- FunctionArgList Args;
- Args.push_back(&BufferArg);
- Args.push_back(&IdxArg);
- Args.push_back(&ReduceListArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
- QualType StaticTy = C.getRecordType(TeamReductionRec);
- llvm::Type *LLVMReductionsBufferTy =
- CGM.getTypes().ConvertTypeForMem(StaticTy);
- llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
- LLVMReductionsBufferTy->getPointerTo());
-
- // 1. Build a list of reduction variables.
- // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- Address ReductionList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- auto IPriv = Privates.begin();
- llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
- /*Volatile=*/false, C.IntTy,
- Loc)};
- unsigned Idx = 0;
- for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- // Global = Buffer.VD[Idx];
- const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
- const FieldDecl *FD = VarFieldMap.lookup(VD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
- LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
- CGF.EmitStoreOfScalar(GlobAddr.getPointer(), Elem, /*Volatile=*/false,
- C.VoidPtrTy);
- if ((*IPriv)->getType()->isVariablyModifiedType()) {
- // Store array size.
- ++Idx;
- Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- llvm::Value *Size = CGF.Builder.CreateIntCast(
- CGF.getVLASize(
- CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .NumElts,
- CGF.SizeTy, /*isSigned=*/false);
- CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
- Elem);
- }
- }
-
- // Call reduce_function(ReduceList, GlobalReduceList)
- llvm::Value *GlobalReduceList = ReductionList.getPointer();
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
- AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
- CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
- CGF.FinishFunction();
- return Fn;
-}
-
///
/// Design of OpenMP reductions on the GPU
///
@@ -2805,21 +1660,20 @@ void CGOpenMPRuntimeGPU::emitReduction(
return;
bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
-#ifndef NDEBUG
+ bool DistributeReduction = isOpenMPDistributeDirective(Options.ReductionKind);
bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
-#endif
+
+ ASTContext &C = CGM.getContext();
if (Options.SimpleReduction) {
assert(!TeamsReduction && !ParallelReduction &&
"Invalid reduction selection in emitReduction.");
+ (void)ParallelReduction;
CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
ReductionOps, Options);
return;
}
- assert((TeamsReduction || ParallelReduction) &&
- "Invalid reduction selection in emitReduction.");
-
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
int Cnt = 0;
@@ -2827,145 +1681,85 @@ void CGOpenMPRuntimeGPU::emitReduction(
PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
++Cnt;
}
-
- ASTContext &C = CGM.getContext();
const RecordDecl *ReductionRec = ::buildRecordForGlobalizedVars(
CGM.getContext(), PrivatesReductions, std::nullopt, VarFieldMap, 1);
- // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
- // RedList, shuffle_reduce_func, interwarp_copy_func);
- // or
- // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
-
- llvm::Value *Res;
- // 1. Build a list of reduction variables.
- // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- auto Size = RHSExprs.size();
- for (const Expr *E : Privates) {
- if (E->getType()->isVariablyModifiedType())
- // Reserve place for array size.
- ++Size;
- }
- llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
- QualType ReductionArrayTy = C.getConstantArrayType(
- C.VoidPtrTy, ArraySize, nullptr, ArraySizeModifier::Normal,
- /*IndexTypeQuals=*/0);
- Address ReductionList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- auto IPriv = Privates.begin();
- unsigned Idx = 0;
- for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- CGF.Builder.CreateStore(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
- Elem);
- if ((*IPriv)->getType()->isVariablyModifiedType()) {
- // Store array size.
- ++Idx;
- Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
- llvm::Value *Size = CGF.Builder.CreateIntCast(
- CGF.getVLASize(
- CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .NumElts,
- CGF.SizeTy, /*isSigned=*/false);
- CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
- Elem);
- }
- }
-
- llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- ReductionList.getPointer(), CGF.VoidPtrTy);
- llvm::Function *ReductionFn = emitReductionFunction(
- CGF.CurFn->getName(), Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
- Privates, LHSExprs, RHSExprs, ReductionOps);
- llvm::Value *ReductionDataSize =
- CGF.getTypeSize(C.getRecordType(ReductionRec));
- ReductionDataSize =
- CGF.Builder.CreateSExtOrTrunc(ReductionDataSize, CGF.Int64Ty);
- llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
- CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
- llvm::Value *InterWarpCopyFn =
- emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
-
- if (ParallelReduction) {
- llvm::Value *Args[] = {RTLoc, ReductionDataSize, RL, ShuffleAndReduceFn,
- InterWarpCopyFn};
-
- Res = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2),
- Args);
- } else {
- assert(TeamsReduction && "expected teams reduction.");
+ if (TeamsReduction)
TeamsReductions.push_back(ReductionRec);
- auto *KernelTeamsReductionPtr = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_reduction_get_fixed_buffer),
- {}, "_openmp_teams_reductions_buffer_$_$ptr");
- llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
- CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap);
- llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
- CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap,
- ReductionFn);
- llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
- CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap);
- llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
- CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap,
- ReductionFn);
- llvm::Value *Args[] = {
- RTLoc,
- KernelTeamsReductionPtr,
- CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
- ReductionDataSize,
- RL,
- ShuffleAndReduceFn,
- InterWarpCopyFn,
- GlobalToBufferCpyFn,
- GlobalToBufferRedFn,
- BufferToGlobalCpyFn,
- BufferToGlobalRedFn};
-
- Res = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2),
- Args);
- }
+ // Source location for the ident struct
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- // 5. Build if (res == 1)
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
- llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
- Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
- CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
-
- // 6. Build then branch: where we have reduced values in the master
- // thread in each team.
- // __kmpc_end_reduce{_nowait}(<gtid>);
- // break;
- CGF.EmitBlock(ThenBB);
-
- // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
- auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
- this](CodeGenFunction &CGF, PrePostActionTy &Action) {
- auto IPriv = Privates.begin();
- auto ILHS = LHSExprs.begin();
- auto IRHS = RHSExprs.begin();
- for (const Expr *E : ReductionOps) {
- emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
- cast<DeclRefExpr>(*IRHS));
- ++IPriv;
- ++ILHS;
- ++IRHS;
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+ InsertPointTy AllocaIP(CGF.AllocaInsertPt->getParent(),
+ CGF.AllocaInsertPt->getIterator());
+ InsertPointTy CodeGenIP(CGF.Builder.GetInsertBlock(),
+ CGF.Builder.GetInsertPoint());
+ llvm::OpenMPIRBuilder::LocationDescription OmpLoc(
+ CodeGenIP, CGF.SourceLocToDebugLoc(Loc));
+ llvm::SmallVector<llvm::OpenMPIRBuilder::ReductionInfo> ReductionInfos;
+
+ CodeGenFunction::OMPPrivateScope Scope(CGF);
+ unsigned Idx = 0;
+ for (const Expr *Private : Privates) {
+ llvm::Type *ElementType;
+ llvm::Value *Variable;
+ llvm::Value *PrivateVariable;
+ llvm::OpenMPIRBuilder::ReductionGenAtomicCBTy AtomicReductionGen = nullptr;
+ ElementType = CGF.ConvertTypeForMem(Private->getType());
+ const auto *RHSVar =
+ cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[Idx])->getDecl());
+ PrivateVariable = CGF.GetAddrOfLocalVar(RHSVar).emitRawPointer(CGF);
+ const auto *LHSVar =
+ cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[Idx])->getDecl());
+ Variable = CGF.GetAddrOfLocalVar(LHSVar).emitRawPointer(CGF);
+ llvm::OpenMPIRBuilder::EvalKind EvalKind;
+ switch (CGF.getEvaluationKind(Private->getType())) {
+ case TEK_Scalar:
+ EvalKind = llvm::OpenMPIRBuilder::EvalKind::Scalar;
+ break;
+ case TEK_Complex:
+ EvalKind = llvm::OpenMPIRBuilder::EvalKind::Complex;
+ break;
+ case TEK_Aggregate:
+ EvalKind = llvm::OpenMPIRBuilder::EvalKind::Aggregate;
+ break;
}
- };
- RegionCodeGenTy RCG(CodeGen);
- RCG(CGF);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+ auto ReductionGen = [&](InsertPointTy CodeGenIP, unsigned I,
+ llvm::Value **LHSPtr, llvm::Value **RHSPtr,
+ llvm::Function *NewFunc) {
+ CGF.Builder.restoreIP(CodeGenIP);
+ auto *CurFn = CGF.CurFn;
+ CGF.CurFn = NewFunc;
+
+ *LHSPtr = CGF.GetAddrOfLocalVar(
+ cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl()))
+ .emitRawPointer(CGF);
+ *RHSPtr = CGF.GetAddrOfLocalVar(
+ cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl()))
+ .emitRawPointer(CGF);
+
+ emitSingleReductionCombiner(CGF, ReductionOps[I], Privates[I],
+ cast<DeclRefExpr>(LHSExprs[I]),
+ cast<DeclRefExpr>(RHSExprs[I]));
+
+ CGF.CurFn = CurFn;
+
+ return InsertPointTy(CGF.Builder.GetInsertBlock(),
+ CGF.Builder.GetInsertPoint());
+ };
+ ReductionInfos.emplace_back(llvm::OpenMPIRBuilder::ReductionInfo(
+ ElementType, Variable, PrivateVariable, EvalKind,
+ /*ReductionGen=*/nullptr, ReductionGen, AtomicReductionGen));
+ Idx++;
+ }
+
+ CGF.Builder.restoreIP(OMPBuilder.createReductionsGPU(
+ OmpLoc, AllocaIP, CodeGenIP, ReductionInfos, false, TeamsReduction,
+ DistributeReduction, llvm::OpenMPIRBuilder::ReductionGenCBKind::Clang,
+ CGF.getTarget().getGridValue(), C.getLangOpts().OpenMPCUDAReductionBufNum,
+ RTLoc));
+ return;
}
const VarDecl *
@@ -3106,15 +1900,15 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
// Get the array of arguments.
SmallVector<llvm::Value *, 8> Args;
- Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
- Args.emplace_back(ZeroAddr.getPointer());
+ Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).emitRawPointer(CGF));
+ Args.emplace_back(ZeroAddr.emitRawPointer(CGF));
CGBuilderTy &Bld = CGF.Builder;
auto CI = CS.capture_begin();
// Use global memory for data sharing.
// Handle passing of global args to workers.
- Address GlobalArgs =
+ RawAddress GlobalArgs =
CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
@@ -3399,8 +2193,8 @@ void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
if (VD->getType().getCanonicalType()->isReferenceType())
VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
VD->getType().getCanonicalType())
- .getAddress(CGF);
- CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
+ .getAddress();
+ CGF.EmitStoreOfScalar(VDAddr.emitRawPointer(CGF), VarLVal);
}
}
}
@@ -3434,106 +2228,112 @@ bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
return false;
}
-// Get current CudaArch and ignore any unknown values
-static CudaArch getCudaArch(CodeGenModule &CGM) {
+// Get current OffloadArch and ignore any unknown values
+static OffloadArch getOffloadArch(CodeGenModule &CGM) {
if (!CGM.getTarget().hasFeature("ptx"))
- return CudaArch::UNKNOWN;
+ return OffloadArch::UNKNOWN;
for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) {
if (Feature.getValue()) {
- CudaArch Arch = StringToCudaArch(Feature.getKey());
- if (Arch != CudaArch::UNKNOWN)
+ OffloadArch Arch = StringToOffloadArch(Feature.getKey());
+ if (Arch != OffloadArch::UNKNOWN)
return Arch;
}
}
- return CudaArch::UNKNOWN;
+ return OffloadArch::UNKNOWN;
}
/// Check to see if target architecture supports unified addressing which is
/// a restriction for OpenMP requires clause "unified_shared_memory".
-void CGOpenMPRuntimeGPU::processRequiresDirective(
- const OMPRequiresDecl *D) {
+void CGOpenMPRuntimeGPU::processRequiresDirective(const OMPRequiresDecl *D) {
for (const OMPClause *Clause : D->clauselists()) {
if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
- CudaArch Arch = getCudaArch(CGM);
+ OffloadArch Arch = getOffloadArch(CGM);
switch (Arch) {
- case CudaArch::SM_20:
- case CudaArch::SM_21:
- case CudaArch::SM_30:
- case CudaArch::SM_32:
- case CudaArch::SM_35:
- case CudaArch::SM_37:
- case CudaArch::SM_50:
- case CudaArch::SM_52:
- case CudaArch::SM_53: {
+ case OffloadArch::SM_20:
+ case OffloadArch::SM_21:
+ case OffloadArch::SM_30:
+ case OffloadArch::SM_32_:
+ case OffloadArch::SM_35:
+ case OffloadArch::SM_37:
+ case OffloadArch::SM_50:
+ case OffloadArch::SM_52:
+ case OffloadArch::SM_53: {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
- Out << "Target architecture " << CudaArchToString(Arch)
+ Out << "Target architecture " << OffloadArchToString(Arch)
<< " does not support unified addressing";
CGM.Error(Clause->getBeginLoc(), Out.str());
return;
}
- case CudaArch::SM_60:
- case CudaArch::SM_61:
- case CudaArch::SM_62:
- case CudaArch::SM_70:
- case CudaArch::SM_72:
- case CudaArch::SM_75:
- case CudaArch::SM_80:
- case CudaArch::SM_86:
- case CudaArch::SM_87:
- case CudaArch::SM_89:
- case CudaArch::SM_90:
- case CudaArch::SM_90a:
- case CudaArch::GFX600:
- case CudaArch::GFX601:
- case CudaArch::GFX602:
- case CudaArch::GFX700:
- case CudaArch::GFX701:
- case CudaArch::GFX702:
- case CudaArch::GFX703:
- case CudaArch::GFX704:
- case CudaArch::GFX705:
- case CudaArch::GFX801:
- case CudaArch::GFX802:
- case CudaArch::GFX803:
- case CudaArch::GFX805:
- case CudaArch::GFX810:
- case CudaArch::GFX900:
- case CudaArch::GFX902:
- case CudaArch::GFX904:
- case CudaArch::GFX906:
- case CudaArch::GFX908:
- case CudaArch::GFX909:
- case CudaArch::GFX90a:
- case CudaArch::GFX90c:
- case CudaArch::GFX940:
- case CudaArch::GFX941:
- case CudaArch::GFX942:
- case CudaArch::GFX1010:
- case CudaArch::GFX1011:
- case CudaArch::GFX1012:
- case CudaArch::GFX1013:
- case CudaArch::GFX1030:
- case CudaArch::GFX1031:
- case CudaArch::GFX1032:
- case CudaArch::GFX1033:
- case CudaArch::GFX1034:
- case CudaArch::GFX1035:
- case CudaArch::GFX1036:
- case CudaArch::GFX1100:
- case CudaArch::GFX1101:
- case CudaArch::GFX1102:
- case CudaArch::GFX1103:
- case CudaArch::GFX1150:
- case CudaArch::GFX1151:
- case CudaArch::GFX1200:
- case CudaArch::GFX1201:
- case CudaArch::Generic:
- case CudaArch::UNUSED:
- case CudaArch::UNKNOWN:
+ case OffloadArch::SM_60:
+ case OffloadArch::SM_61:
+ case OffloadArch::SM_62:
+ case OffloadArch::SM_70:
+ case OffloadArch::SM_72:
+ case OffloadArch::SM_75:
+ case OffloadArch::SM_80:
+ case OffloadArch::SM_86:
+ case OffloadArch::SM_87:
+ case OffloadArch::SM_89:
+ case OffloadArch::SM_90:
+ case OffloadArch::SM_90a:
+ case OffloadArch::GFX600:
+ case OffloadArch::GFX601:
+ case OffloadArch::GFX602:
+ case OffloadArch::GFX700:
+ case OffloadArch::GFX701:
+ case OffloadArch::GFX702:
+ case OffloadArch::GFX703:
+ case OffloadArch::GFX704:
+ case OffloadArch::GFX705:
+ case OffloadArch::GFX801:
+ case OffloadArch::GFX802:
+ case OffloadArch::GFX803:
+ case OffloadArch::GFX805:
+ case OffloadArch::GFX810:
+ case OffloadArch::GFX9_GENERIC:
+ case OffloadArch::GFX900:
+ case OffloadArch::GFX902:
+ case OffloadArch::GFX904:
+ case OffloadArch::GFX906:
+ case OffloadArch::GFX908:
+ case OffloadArch::GFX909:
+ case OffloadArch::GFX90a:
+ case OffloadArch::GFX90c:
+ case OffloadArch::GFX940:
+ case OffloadArch::GFX941:
+ case OffloadArch::GFX942:
+ case OffloadArch::GFX10_1_GENERIC:
+ case OffloadArch::GFX1010:
+ case OffloadArch::GFX1011:
+ case OffloadArch::GFX1012:
+ case OffloadArch::GFX1013:
+ case OffloadArch::GFX10_3_GENERIC:
+ case OffloadArch::GFX1030:
+ case OffloadArch::GFX1031:
+ case OffloadArch::GFX1032:
+ case OffloadArch::GFX1033:
+ case OffloadArch::GFX1034:
+ case OffloadArch::GFX1035:
+ case OffloadArch::GFX1036:
+ case OffloadArch::GFX11_GENERIC:
+ case OffloadArch::GFX1100:
+ case OffloadArch::GFX1101:
+ case OffloadArch::GFX1102:
+ case OffloadArch::GFX1103:
+ case OffloadArch::GFX1150:
+ case OffloadArch::GFX1151:
+ case OffloadArch::GFX1152:
+ case OffloadArch::GFX12_GENERIC:
+ case OffloadArch::GFX1200:
+ case OffloadArch::GFX1201:
+ case OffloadArch::AMDGCNSPIRV:
+ case OffloadArch::Generic:
+ case OffloadArch::UNUSED:
+ case OffloadArch::UNKNOWN:
break;
- case CudaArch::LAST:
- llvm_unreachable("Unexpected Cuda arch.");
+ case OffloadArch::LAST:
+ llvm_unreachable("Unexpected GPU arch.");
}
}
}
@@ -3560,10 +2360,3 @@ llvm::Value *CGOpenMPRuntimeGPU::getGPUThreadID(CodeGenFunction &CGF) {
CGM.getModule(), OMPRTL___kmpc_get_hardware_thread_id_in_block),
Args);
}
-
-llvm::Value *CGOpenMPRuntimeGPU::getGPUWarpSize(CodeGenFunction &CGF) {
- ArrayRef<llvm::Value *> Args{};
- return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_get_warp_size),
- Args);
-}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
index 141436f26230..4d586ec972f8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
@@ -150,9 +150,6 @@ public:
CodeGenFunction &CGF,
const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair) override;
- /// Get the GPU warp size.
- llvm::Value *getGPUWarpSize(CodeGenFunction &CGF);
-
/// Get the id of the current thread on the GPU.
llvm::Value *getGPUThreadID(CodeGenFunction &CGF);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGPointerAuth.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGPointerAuth.cpp
new file mode 100644
index 000000000000..0c63b9d6bb7e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGPointerAuth.cpp
@@ -0,0 +1,621 @@
+//===--- CGPointerAuth.cpp - IR generation for pointer authentication -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains common routines relating to the emission of
+// pointer authentication operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/CodeGen/CodeGenABITypes.h"
+#include "clang/CodeGen/ConstantInitBuilder.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Support/SipHash.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+/// Given a pointer-authentication schema, return a concrete "other"
+/// discriminator for it.
+llvm::ConstantInt *CodeGenModule::getPointerAuthOtherDiscriminator(
+ const PointerAuthSchema &Schema, GlobalDecl Decl, QualType Type) {
+ switch (Schema.getOtherDiscrimination()) {
+ case PointerAuthSchema::Discrimination::None:
+ return nullptr;
+
+ case PointerAuthSchema::Discrimination::Type:
+ assert(!Type.isNull() && "type not provided for type-discriminated schema");
+ return llvm::ConstantInt::get(
+ IntPtrTy, getContext().getPointerAuthTypeDiscriminator(Type));
+
+ case PointerAuthSchema::Discrimination::Decl:
+ assert(Decl.getDecl() &&
+ "declaration not provided for decl-discriminated schema");
+ return llvm::ConstantInt::get(IntPtrTy,
+ getPointerAuthDeclDiscriminator(Decl));
+
+ case PointerAuthSchema::Discrimination::Constant:
+ return llvm::ConstantInt::get(IntPtrTy, Schema.getConstantDiscrimination());
+ }
+ llvm_unreachable("bad discrimination kind");
+}
+
+uint16_t CodeGen::getPointerAuthTypeDiscriminator(CodeGenModule &CGM,
+ QualType FunctionType) {
+ return CGM.getContext().getPointerAuthTypeDiscriminator(FunctionType);
+}
+
+uint16_t CodeGen::getPointerAuthDeclDiscriminator(CodeGenModule &CGM,
+ GlobalDecl Declaration) {
+ return CGM.getPointerAuthDeclDiscriminator(Declaration);
+}
+
+/// Return the "other" decl-specific discriminator for the given decl.
+uint16_t
+CodeGenModule::getPointerAuthDeclDiscriminator(GlobalDecl Declaration) {
+ uint16_t &EntityHash = PtrAuthDiscriminatorHashes[Declaration];
+
+ if (EntityHash == 0) {
+ StringRef Name = getMangledName(Declaration);
+ EntityHash = llvm::getPointerAuthStableSipHash(Name);
+ }
+
+ return EntityHash;
+}
+
+/// Return the abstract pointer authentication schema for a pointer to the given
+/// function type.
+CGPointerAuthInfo CodeGenModule::getFunctionPointerAuthInfo(QualType T) {
+ const auto &Schema = getCodeGenOpts().PointerAuth.FunctionPointers;
+ if (!Schema)
+ return CGPointerAuthInfo();
+
+ assert(!Schema.isAddressDiscriminated() &&
+ "function pointers cannot use address-specific discrimination");
+
+ llvm::Constant *Discriminator = nullptr;
+ if (T->isFunctionPointerType() || T->isFunctionReferenceType())
+ T = T->getPointeeType();
+ if (T->isFunctionType())
+ Discriminator = getPointerAuthOtherDiscriminator(Schema, GlobalDecl(), T);
+
+ return CGPointerAuthInfo(Schema.getKey(), Schema.getAuthenticationMode(),
+ /*IsaPointer=*/false, /*AuthenticatesNull=*/false,
+ Discriminator);
+}
+
+llvm::Value *
+CodeGenFunction::EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress,
+ llvm::Value *Discriminator) {
+ StorageAddress = Builder.CreatePtrToInt(StorageAddress, IntPtrTy);
+ auto Intrinsic = CGM.getIntrinsic(llvm::Intrinsic::ptrauth_blend);
+ return Builder.CreateCall(Intrinsic, {StorageAddress, Discriminator});
+}
+
+/// Emit the concrete pointer authentication informaton for the
+/// given authentication schema.
+CGPointerAuthInfo CodeGenFunction::EmitPointerAuthInfo(
+ const PointerAuthSchema &Schema, llvm::Value *StorageAddress,
+ GlobalDecl SchemaDecl, QualType SchemaType) {
+ if (!Schema)
+ return CGPointerAuthInfo();
+
+ llvm::Value *Discriminator =
+ CGM.getPointerAuthOtherDiscriminator(Schema, SchemaDecl, SchemaType);
+
+ if (Schema.isAddressDiscriminated()) {
+ assert(StorageAddress &&
+ "address not provided for address-discriminated schema");
+
+ if (Discriminator)
+ Discriminator =
+ EmitPointerAuthBlendDiscriminator(StorageAddress, Discriminator);
+ else
+ Discriminator = Builder.CreatePtrToInt(StorageAddress, IntPtrTy);
+ }
+
+ return CGPointerAuthInfo(Schema.getKey(), Schema.getAuthenticationMode(),
+ Schema.isIsaPointer(),
+ Schema.authenticatesNullValues(), Discriminator);
+}
+
+/// Return the natural pointer authentication for values of the given
+/// pointee type.
+static CGPointerAuthInfo
+getPointerAuthInfoForPointeeType(CodeGenModule &CGM, QualType PointeeType) {
+ if (PointeeType.isNull())
+ return CGPointerAuthInfo();
+
+ // Function pointers use the function-pointer schema by default.
+ if (PointeeType->isFunctionType())
+ return CGM.getFunctionPointerAuthInfo(PointeeType);
+
+ // Normal data pointers never use direct pointer authentication by default.
+ return CGPointerAuthInfo();
+}
+
+CGPointerAuthInfo CodeGenModule::getPointerAuthInfoForPointeeType(QualType T) {
+ return ::getPointerAuthInfoForPointeeType(*this, T);
+}
+
+/// Return the natural pointer authentication for values of the given
+/// pointer type.
+static CGPointerAuthInfo getPointerAuthInfoForType(CodeGenModule &CGM,
+ QualType PointerType) {
+ assert(PointerType->isSignableType());
+
+ // Block pointers are currently not signed.
+ if (PointerType->isBlockPointerType())
+ return CGPointerAuthInfo();
+
+ auto PointeeType = PointerType->getPointeeType();
+
+ if (PointeeType.isNull())
+ return CGPointerAuthInfo();
+
+ return ::getPointerAuthInfoForPointeeType(CGM, PointeeType);
+}
+
+CGPointerAuthInfo CodeGenModule::getPointerAuthInfoForType(QualType T) {
+ return ::getPointerAuthInfoForType(*this, T);
+}
+
+static bool isZeroConstant(const llvm::Value *Value) {
+ if (const auto *CI = dyn_cast<llvm::ConstantInt>(Value))
+ return CI->isZero();
+ return false;
+}
+
+static bool equalAuthPolicies(const CGPointerAuthInfo &Left,
+ const CGPointerAuthInfo &Right) {
+ assert((Left.isSigned() || Right.isSigned()) &&
+ "shouldn't be called if neither is signed");
+ if (Left.isSigned() != Right.isSigned())
+ return false;
+ return Left.getKey() == Right.getKey() &&
+ Left.getAuthenticationMode() == Right.getAuthenticationMode();
+}
+
+// Return the discriminator or return zero if the discriminator is null.
+static llvm::Value *getDiscriminatorOrZero(const CGPointerAuthInfo &Info,
+ CGBuilderTy &Builder) {
+ llvm::Value *Discriminator = Info.getDiscriminator();
+ return Discriminator ? Discriminator : Builder.getSize(0);
+}
+
+llvm::Value *
+CodeGenFunction::emitPointerAuthResignCall(llvm::Value *Value,
+ const CGPointerAuthInfo &CurAuth,
+ const CGPointerAuthInfo &NewAuth) {
+ assert(CurAuth && NewAuth);
+
+ if (CurAuth.getAuthenticationMode() !=
+ PointerAuthenticationMode::SignAndAuth ||
+ NewAuth.getAuthenticationMode() !=
+ PointerAuthenticationMode::SignAndAuth) {
+ llvm::Value *AuthedValue = EmitPointerAuthAuth(CurAuth, Value);
+ return EmitPointerAuthSign(NewAuth, AuthedValue);
+ }
+ // Convert the pointer to intptr_t before signing it.
+ auto *OrigType = Value->getType();
+ Value = Builder.CreatePtrToInt(Value, IntPtrTy);
+
+ auto *CurKey = Builder.getInt32(CurAuth.getKey());
+ auto *NewKey = Builder.getInt32(NewAuth.getKey());
+
+ llvm::Value *CurDiscriminator = getDiscriminatorOrZero(CurAuth, Builder);
+ llvm::Value *NewDiscriminator = getDiscriminatorOrZero(NewAuth, Builder);
+
+ // call i64 @llvm.ptrauth.resign(i64 %pointer,
+ // i32 %curKey, i64 %curDiscriminator,
+ // i32 %newKey, i64 %newDiscriminator)
+ auto *Intrinsic = CGM.getIntrinsic(llvm::Intrinsic::ptrauth_resign);
+ Value = EmitRuntimeCall(
+ Intrinsic, {Value, CurKey, CurDiscriminator, NewKey, NewDiscriminator});
+
+ // Convert back to the original type.
+ Value = Builder.CreateIntToPtr(Value, OrigType);
+ return Value;
+}
+
+llvm::Value *CodeGenFunction::emitPointerAuthResign(
+ llvm::Value *Value, QualType Type, const CGPointerAuthInfo &CurAuthInfo,
+ const CGPointerAuthInfo &NewAuthInfo, bool IsKnownNonNull) {
+ // Fast path: if neither schema wants a signature, we're done.
+ if (!CurAuthInfo && !NewAuthInfo)
+ return Value;
+
+ llvm::Value *Null = nullptr;
+ // If the value is obviously null, we're done.
+ if (auto *PointerValue = dyn_cast<llvm::PointerType>(Value->getType())) {
+ Null = CGM.getNullPointer(PointerValue, Type);
+ } else {
+ assert(Value->getType()->isIntegerTy());
+ Null = llvm::ConstantInt::get(IntPtrTy, 0);
+ }
+ if (Value == Null)
+ return Value;
+
+ // If both schemas sign the same way, we're done.
+ if (equalAuthPolicies(CurAuthInfo, NewAuthInfo)) {
+ const llvm::Value *CurD = CurAuthInfo.getDiscriminator();
+ const llvm::Value *NewD = NewAuthInfo.getDiscriminator();
+ if (CurD == NewD)
+ return Value;
+
+ if ((CurD == nullptr && isZeroConstant(NewD)) ||
+ (NewD == nullptr && isZeroConstant(CurD)))
+ return Value;
+ }
+
+ llvm::BasicBlock *InitBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *ResignBB = nullptr, *ContBB = nullptr;
+
+ // Null pointers have to be mapped to null, and the ptrauth_resign
+ // intrinsic doesn't do that.
+ if (!IsKnownNonNull && !llvm::isKnownNonZero(Value, CGM.getDataLayout())) {
+ ContBB = createBasicBlock("resign.cont");
+ ResignBB = createBasicBlock("resign.nonnull");
+
+ auto *IsNonNull = Builder.CreateICmpNE(Value, Null);
+ Builder.CreateCondBr(IsNonNull, ResignBB, ContBB);
+ EmitBlock(ResignBB);
+ }
+
+ // Perform the auth/sign/resign operation.
+ if (!NewAuthInfo)
+ Value = EmitPointerAuthAuth(CurAuthInfo, Value);
+ else if (!CurAuthInfo)
+ Value = EmitPointerAuthSign(NewAuthInfo, Value);
+ else
+ Value = emitPointerAuthResignCall(Value, CurAuthInfo, NewAuthInfo);
+
+ // Clean up with a phi if we branched before.
+ if (ContBB) {
+ EmitBlock(ContBB);
+ auto *Phi = Builder.CreatePHI(Value->getType(), 2);
+ Phi->addIncoming(Null, InitBB);
+ Phi->addIncoming(Value, ResignBB);
+ Value = Phi;
+ }
+
+ return Value;
+}
+
+llvm::Constant *
+CodeGenModule::getConstantSignedPointer(llvm::Constant *Pointer, unsigned Key,
+ llvm::Constant *StorageAddress,
+ llvm::ConstantInt *OtherDiscriminator) {
+ llvm::Constant *AddressDiscriminator;
+ if (StorageAddress) {
+ assert(StorageAddress->getType() == UnqualPtrTy);
+ AddressDiscriminator = StorageAddress;
+ } else {
+ AddressDiscriminator = llvm::Constant::getNullValue(UnqualPtrTy);
+ }
+
+ llvm::ConstantInt *IntegerDiscriminator;
+ if (OtherDiscriminator) {
+ assert(OtherDiscriminator->getType() == Int64Ty);
+ IntegerDiscriminator = OtherDiscriminator;
+ } else {
+ IntegerDiscriminator = llvm::ConstantInt::get(Int64Ty, 0);
+ }
+
+ return llvm::ConstantPtrAuth::get(Pointer,
+ llvm::ConstantInt::get(Int32Ty, Key),
+ IntegerDiscriminator, AddressDiscriminator);
+}
+
+/// Does a given PointerAuthScheme require us to sign a value
+bool CodeGenModule::shouldSignPointer(const PointerAuthSchema &Schema) {
+ auto AuthenticationMode = Schema.getAuthenticationMode();
+ return AuthenticationMode == PointerAuthenticationMode::SignAndStrip ||
+ AuthenticationMode == PointerAuthenticationMode::SignAndAuth;
+}
+
+/// Sign a constant pointer using the given scheme, producing a constant
+/// with the same IR type.
+llvm::Constant *CodeGenModule::getConstantSignedPointer(
+ llvm::Constant *Pointer, const PointerAuthSchema &Schema,
+ llvm::Constant *StorageAddress, GlobalDecl SchemaDecl,
+ QualType SchemaType) {
+ assert(shouldSignPointer(Schema));
+ llvm::ConstantInt *OtherDiscriminator =
+ getPointerAuthOtherDiscriminator(Schema, SchemaDecl, SchemaType);
+
+ return getConstantSignedPointer(Pointer, Schema.getKey(), StorageAddress,
+ OtherDiscriminator);
+}
+
+/// If applicable, sign a given constant function pointer with the ABI rules for
+/// functionType.
+llvm::Constant *CodeGenModule::getFunctionPointer(llvm::Constant *Pointer,
+ QualType FunctionType) {
+ assert(FunctionType->isFunctionType() ||
+ FunctionType->isFunctionReferenceType() ||
+ FunctionType->isFunctionPointerType());
+
+ if (auto PointerAuth = getFunctionPointerAuthInfo(FunctionType))
+ return getConstantSignedPointer(
+ Pointer, PointerAuth.getKey(), /*StorageAddress=*/nullptr,
+ cast_or_null<llvm::ConstantInt>(PointerAuth.getDiscriminator()));
+
+ return Pointer;
+}
+
+llvm::Constant *CodeGenModule::getFunctionPointer(GlobalDecl GD,
+ llvm::Type *Ty) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+ QualType FuncType = FD->getType();
+
+ // Annoyingly, K&R functions have prototypes in the clang AST, but
+ // expressions referring to them are unprototyped.
+ if (!FD->hasPrototype())
+ if (const auto *Proto = FuncType->getAs<FunctionProtoType>())
+ FuncType = Context.getFunctionNoProtoType(Proto->getReturnType(),
+ Proto->getExtInfo());
+
+ return getFunctionPointer(getRawFunctionPointer(GD, Ty), FuncType);
+}
+
+CGPointerAuthInfo CodeGenModule::getMemberFunctionPointerAuthInfo(QualType FT) {
+ assert(FT->getAs<MemberPointerType>() && "MemberPointerType expected");
+ const auto &Schema = getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers;
+ if (!Schema)
+ return CGPointerAuthInfo();
+
+ assert(!Schema.isAddressDiscriminated() &&
+ "function pointers cannot use address-specific discrimination");
+
+ llvm::ConstantInt *Discriminator =
+ getPointerAuthOtherDiscriminator(Schema, GlobalDecl(), FT);
+ return CGPointerAuthInfo(Schema.getKey(), Schema.getAuthenticationMode(),
+ /* IsIsaPointer */ false,
+ /* AuthenticatesNullValues */ false, Discriminator);
+}
+
+llvm::Constant *CodeGenModule::getMemberFunctionPointer(llvm::Constant *Pointer,
+ QualType FT) {
+ if (CGPointerAuthInfo PointerAuth = getMemberFunctionPointerAuthInfo(FT))
+ return getConstantSignedPointer(
+ Pointer, PointerAuth.getKey(), nullptr,
+ cast_or_null<llvm::ConstantInt>(PointerAuth.getDiscriminator()));
+
+ return Pointer;
+}
+
+llvm::Constant *CodeGenModule::getMemberFunctionPointer(const FunctionDecl *FD,
+ llvm::Type *Ty) {
+ QualType FT = FD->getType();
+ FT = getContext().getMemberPointerType(
+ FT, cast<CXXMethodDecl>(FD)->getParent()->getTypeForDecl());
+ return getMemberFunctionPointer(getRawFunctionPointer(FD, Ty), FT);
+}
+
+std::optional<PointerAuthQualifier>
+CodeGenModule::computeVTPointerAuthentication(const CXXRecordDecl *ThisClass) {
+ auto DefaultAuthentication = getCodeGenOpts().PointerAuth.CXXVTablePointers;
+ if (!DefaultAuthentication)
+ return std::nullopt;
+ const CXXRecordDecl *PrimaryBase =
+ Context.baseForVTableAuthentication(ThisClass);
+
+ unsigned Key = DefaultAuthentication.getKey();
+ bool AddressDiscriminated = DefaultAuthentication.isAddressDiscriminated();
+ auto DefaultDiscrimination = DefaultAuthentication.getOtherDiscrimination();
+ unsigned TypeBasedDiscriminator =
+ Context.getPointerAuthVTablePointerDiscriminator(PrimaryBase);
+ unsigned Discriminator;
+ if (DefaultDiscrimination == PointerAuthSchema::Discrimination::Type) {
+ Discriminator = TypeBasedDiscriminator;
+ } else if (DefaultDiscrimination ==
+ PointerAuthSchema::Discrimination::Constant) {
+ Discriminator = DefaultAuthentication.getConstantDiscrimination();
+ } else {
+ assert(DefaultDiscrimination == PointerAuthSchema::Discrimination::None);
+ Discriminator = 0;
+ }
+ if (auto ExplicitAuthentication =
+ PrimaryBase->getAttr<VTablePointerAuthenticationAttr>()) {
+ auto ExplicitAddressDiscrimination =
+ ExplicitAuthentication->getAddressDiscrimination();
+ auto ExplicitDiscriminator =
+ ExplicitAuthentication->getExtraDiscrimination();
+
+ unsigned ExplicitKey = ExplicitAuthentication->getKey();
+ if (ExplicitKey == VTablePointerAuthenticationAttr::NoKey)
+ return std::nullopt;
+
+ if (ExplicitKey != VTablePointerAuthenticationAttr::DefaultKey) {
+ if (ExplicitKey == VTablePointerAuthenticationAttr::ProcessIndependent)
+ Key = (unsigned)PointerAuthSchema::ARM8_3Key::ASDA;
+ else {
+ assert(ExplicitKey ==
+ VTablePointerAuthenticationAttr::ProcessDependent);
+ Key = (unsigned)PointerAuthSchema::ARM8_3Key::ASDB;
+ }
+ }
+
+ if (ExplicitAddressDiscrimination !=
+ VTablePointerAuthenticationAttr::DefaultAddressDiscrimination)
+ AddressDiscriminated =
+ ExplicitAddressDiscrimination ==
+ VTablePointerAuthenticationAttr::AddressDiscrimination;
+
+ if (ExplicitDiscriminator ==
+ VTablePointerAuthenticationAttr::TypeDiscrimination)
+ Discriminator = TypeBasedDiscriminator;
+ else if (ExplicitDiscriminator ==
+ VTablePointerAuthenticationAttr::CustomDiscrimination)
+ Discriminator = ExplicitAuthentication->getCustomDiscriminationValue();
+ else if (ExplicitDiscriminator ==
+ VTablePointerAuthenticationAttr::NoExtraDiscrimination)
+ Discriminator = 0;
+ }
+ return PointerAuthQualifier::Create(Key, AddressDiscriminated, Discriminator,
+ PointerAuthenticationMode::SignAndAuth,
+ /* IsIsaPointer */ false,
+ /* AuthenticatesNullValues */ false);
+}
+
+std::optional<PointerAuthQualifier>
+CodeGenModule::getVTablePointerAuthentication(const CXXRecordDecl *Record) {
+ if (!Record->getDefinition() || !Record->isPolymorphic())
+ return std::nullopt;
+
+ auto Existing = VTablePtrAuthInfos.find(Record);
+ std::optional<PointerAuthQualifier> Authentication;
+ if (Existing != VTablePtrAuthInfos.end()) {
+ Authentication = Existing->getSecond();
+ } else {
+ Authentication = computeVTPointerAuthentication(Record);
+ VTablePtrAuthInfos.insert(std::make_pair(Record, Authentication));
+ }
+ return Authentication;
+}
+
+std::optional<CGPointerAuthInfo>
+CodeGenModule::getVTablePointerAuthInfo(CodeGenFunction *CGF,
+ const CXXRecordDecl *Record,
+ llvm::Value *StorageAddress) {
+ auto Authentication = getVTablePointerAuthentication(Record);
+ if (!Authentication)
+ return std::nullopt;
+
+ llvm::Value *Discriminator = nullptr;
+ if (auto ExtraDiscriminator = Authentication->getExtraDiscriminator())
+ Discriminator = llvm::ConstantInt::get(IntPtrTy, ExtraDiscriminator);
+
+ if (Authentication->isAddressDiscriminated()) {
+ assert(StorageAddress &&
+ "address not provided for address-discriminated schema");
+ if (Discriminator)
+ Discriminator =
+ CGF->EmitPointerAuthBlendDiscriminator(StorageAddress, Discriminator);
+ else
+ Discriminator = CGF->Builder.CreatePtrToInt(StorageAddress, IntPtrTy);
+ }
+
+ return CGPointerAuthInfo(Authentication->getKey(),
+ PointerAuthenticationMode::SignAndAuth,
+ /* IsIsaPointer */ false,
+ /* AuthenticatesNullValues */ false, Discriminator);
+}
+
+llvm::Value *CodeGenFunction::authPointerToPointerCast(llvm::Value *ResultPtr,
+ QualType SourceType,
+ QualType DestType) {
+ CGPointerAuthInfo CurAuthInfo, NewAuthInfo;
+ if (SourceType->isSignableType())
+ CurAuthInfo = getPointerAuthInfoForType(CGM, SourceType);
+
+ if (DestType->isSignableType())
+ NewAuthInfo = getPointerAuthInfoForType(CGM, DestType);
+
+ if (!CurAuthInfo && !NewAuthInfo)
+ return ResultPtr;
+
+ // If only one side of the cast is a function pointer, then we still need to
+ // resign to handle casts to/from opaque pointers.
+ if (!CurAuthInfo && DestType->isFunctionPointerType())
+ CurAuthInfo = CGM.getFunctionPointerAuthInfo(SourceType);
+
+ if (!NewAuthInfo && SourceType->isFunctionPointerType())
+ NewAuthInfo = CGM.getFunctionPointerAuthInfo(DestType);
+
+ return emitPointerAuthResign(ResultPtr, DestType, CurAuthInfo, NewAuthInfo,
+ /*IsKnownNonNull=*/false);
+}
+
+Address CodeGenFunction::authPointerToPointerCast(Address Ptr,
+ QualType SourceType,
+ QualType DestType) {
+ CGPointerAuthInfo CurAuthInfo, NewAuthInfo;
+ if (SourceType->isSignableType())
+ CurAuthInfo = getPointerAuthInfoForType(CGM, SourceType);
+
+ if (DestType->isSignableType())
+ NewAuthInfo = getPointerAuthInfoForType(CGM, DestType);
+
+ if (!CurAuthInfo && !NewAuthInfo)
+ return Ptr;
+
+ if (!CurAuthInfo && DestType->isFunctionPointerType()) {
+ // When casting a non-signed pointer to a function pointer, just set the
+ // auth info on Ptr to the assumed schema. The pointer will be resigned to
+ // the effective type when used.
+ Ptr.setPointerAuthInfo(CGM.getFunctionPointerAuthInfo(SourceType));
+ return Ptr;
+ }
+
+ if (!NewAuthInfo && SourceType->isFunctionPointerType()) {
+ NewAuthInfo = CGM.getFunctionPointerAuthInfo(DestType);
+ Ptr = Ptr.getResignedAddress(NewAuthInfo, *this);
+ Ptr.setPointerAuthInfo(CGPointerAuthInfo());
+ return Ptr;
+ }
+
+ return Ptr;
+}
+
+Address CodeGenFunction::getAsNaturalAddressOf(Address Addr,
+ QualType PointeeTy) {
+ CGPointerAuthInfo Info =
+ PointeeTy.isNull() ? CGPointerAuthInfo()
+ : CGM.getPointerAuthInfoForPointeeType(PointeeTy);
+ return Addr.getResignedAddress(Info, *this);
+}
+
+Address Address::getResignedAddress(const CGPointerAuthInfo &NewInfo,
+ CodeGenFunction &CGF) const {
+ assert(isValid() && "pointer isn't valid");
+ CGPointerAuthInfo CurInfo = getPointerAuthInfo();
+ llvm::Value *Val;
+
+ // Nothing to do if neither the current or the new ptrauth info needs signing.
+ if (!CurInfo.isSigned() && !NewInfo.isSigned())
+ return Address(getBasePointer(), getElementType(), getAlignment(),
+ isKnownNonNull());
+
+ assert(ElementType && "Effective type has to be set");
+ assert(!Offset && "unexpected non-null offset");
+
+ // If the current and the new ptrauth infos are the same and the offset is
+ // null, just cast the base pointer to the effective type.
+ if (CurInfo == NewInfo && !hasOffset())
+ Val = getBasePointer();
+ else
+ Val = CGF.emitPointerAuthResign(getBasePointer(), QualType(), CurInfo,
+ NewInfo, isKnownNonNull());
+
+ Val = CGF.Builder.CreateBitCast(Val, getType());
+ return Address(Val, getElementType(), getAlignment(), NewInfo,
+ /*Offset=*/nullptr, isKnownNonNull());
+}
+
+llvm::Value *Address::emitRawPointerSlow(CodeGenFunction &CGF) const {
+ return CGF.getAsNaturalPointerTo(*this, QualType());
+}
+
+llvm::Value *LValue::getPointer(CodeGenFunction &CGF) const {
+ assert(isSimple());
+ return emitResignedPointer(getType(), CGF);
+}
+
+llvm::Value *LValue::emitResignedPointer(QualType PointeeTy,
+ CodeGenFunction &CGF) const {
+ assert(isSimple());
+ return CGF.getAsNaturalAddressOf(Addr, PointeeTy).getBasePointer();
+}
+
+llvm::Value *LValue::emitRawPointer(CodeGenFunction &CGF) const {
+ assert(isSimple());
+ return Addr.isValid() ? Addr.emitRawPointer(CGF) : nullptr;
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGPointerAuthInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGPointerAuthInfo.h
new file mode 100644
index 000000000000..0a0c11fb423f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGPointerAuthInfo.h
@@ -0,0 +1,99 @@
+//===----- CGPointerAuthInfo.h - -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Pointer auth info class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGPOINTERAUTHINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_CGPOINTERAUTHINFO_H
+
+#include "clang/AST/Type.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+
+namespace clang {
+namespace CodeGen {
+
+class CGPointerAuthInfo {
+private:
+ PointerAuthenticationMode AuthenticationMode : 2;
+ unsigned IsIsaPointer : 1;
+ unsigned AuthenticatesNullValues : 1;
+ unsigned Key : 2;
+ llvm::Value *Discriminator;
+
+public:
+ CGPointerAuthInfo()
+ : AuthenticationMode(PointerAuthenticationMode::None),
+ IsIsaPointer(false), AuthenticatesNullValues(false), Key(0),
+ Discriminator(nullptr) {}
+ CGPointerAuthInfo(unsigned Key, PointerAuthenticationMode AuthenticationMode,
+ bool IsIsaPointer, bool AuthenticatesNullValues,
+ llvm::Value *Discriminator)
+ : AuthenticationMode(AuthenticationMode), IsIsaPointer(IsIsaPointer),
+ AuthenticatesNullValues(AuthenticatesNullValues), Key(Key),
+ Discriminator(Discriminator) {
+ assert(!Discriminator || Discriminator->getType()->isIntegerTy() ||
+ Discriminator->getType()->isPointerTy());
+ }
+
+ explicit operator bool() const { return isSigned(); }
+
+ bool isSigned() const {
+ return AuthenticationMode != PointerAuthenticationMode::None;
+ }
+
+ unsigned getKey() const {
+ assert(isSigned());
+ return Key;
+ }
+ llvm::Value *getDiscriminator() const {
+ assert(isSigned());
+ return Discriminator;
+ }
+
+ PointerAuthenticationMode getAuthenticationMode() const {
+ return AuthenticationMode;
+ }
+
+ bool isIsaPointer() const { return IsIsaPointer; }
+
+ bool authenticatesNullValues() const { return AuthenticatesNullValues; }
+
+ bool shouldStrip() const {
+ return AuthenticationMode == PointerAuthenticationMode::Strip ||
+ AuthenticationMode == PointerAuthenticationMode::SignAndStrip;
+ }
+
+ bool shouldSign() const {
+ return AuthenticationMode == PointerAuthenticationMode::SignAndStrip ||
+ AuthenticationMode == PointerAuthenticationMode::SignAndAuth;
+ }
+
+ bool shouldAuth() const {
+ return AuthenticationMode == PointerAuthenticationMode::SignAndAuth;
+ }
+
+ friend bool operator!=(const CGPointerAuthInfo &LHS,
+ const CGPointerAuthInfo &RHS) {
+ return LHS.Key != RHS.Key || LHS.Discriminator != RHS.Discriminator ||
+ LHS.AuthenticationMode != RHS.AuthenticationMode;
+ }
+
+ friend bool operator==(const CGPointerAuthInfo &LHS,
+ const CGPointerAuthInfo &RHS) {
+ return !(LHS != RHS);
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
index d5ea74922603..44e888c93108 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
@@ -71,6 +71,7 @@ struct CGBitFieldInfo {
unsigned Size : 15;
/// Whether the bit-field is signed.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsSigned : 1;
/// The storage size in bits which should be used when accessing this
@@ -192,6 +193,10 @@ public:
return IsZeroInitializableAsBase;
}
+ bool containsFieldDecl(const FieldDecl *FD) const {
+ return FieldInfo.count(FD) != 0;
+ }
+
/// Return llvm::StructType element number that corresponds to the
/// field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 868ef810f3c4..ea44e6f21f3c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -10,8 +10,9 @@
//
//===----------------------------------------------------------------------===//
-#include "CGRecordLayout.h"
+#include "ABIInfoImpl.h"
#include "CGCXXABI.h"
+#include "CGRecordLayout.h"
#include "CodeGenTypes.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
@@ -41,14 +42,17 @@ namespace {
/// contains enough information to determine where the runs break. Microsoft
/// and Itanium follow different rules and use different codepaths.
/// * It is desired that, when possible, bitfields use the appropriate iN type
-/// when lowered to llvm types. For example unsigned x : 24 gets lowered to
+/// when lowered to llvm types. For example unsigned x : 24 gets lowered to
/// i24. This isn't always possible because i24 has storage size of 32 bit
-/// and if it is possible to use that extra byte of padding we must use
-/// [i8 x 3] instead of i24. The function clipTailPadding does this.
+/// and if it is possible to use that extra byte of padding we must use [i8 x
+/// 3] instead of i24. This is computed when accumulating bitfields in
+/// accumulateBitfields.
/// C++ examples that require clipping:
/// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
-/// struct A { int a : 24; }; // a must be clipped because a struct like B
-// could exist: struct B : A { char b; }; // b goes at offset 3
+/// struct A { int a : 24; ~A(); }; // a must be clipped because:
+/// struct B : A { char b; }; // b goes at offset 3
+/// * The allocation of bitfield access units is described in more detail in
+/// CGRecordLowering::accumulateBitFields.
/// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
/// fields. The existing asserts suggest that LLVM assumes that *every* field
/// has an underlying storage type. Therefore empty structures containing
@@ -60,11 +64,7 @@ namespace {
/// that the tail padding is not used in the complete class.) However,
/// because LLVM reads from the complete type it can generate incorrect code
/// if we do not clip the tail padding off of the bitfield in the complete
-/// layout. This introduces a somewhat awkward extra unnecessary clip stage.
-/// The location of the clip is stored internally as a sentinel of type
-/// SCISSOR. If LLVM were updated to read base types (which it probably
-/// should because locations of things such as VBases are bogus in the llvm
-/// type anyway) then we could eliminate the SCISSOR.
+/// layout.
/// * Itanium allows nearly empty primary virtual bases. These bases don't get
/// get their own storage because they're laid out as part of another base
/// or at the beginning of the structure. Determining if a VBase actually
@@ -76,7 +76,7 @@ struct CGRecordLowering {
// sentinel member type that ensures correct rounding.
struct MemberInfo {
CharUnits Offset;
- enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
+ enum InfoKind { VFPtr, VBPtr, Field, Base, VBase } Kind;
llvm::Type *Data;
union {
const FieldDecl *FD;
@@ -95,7 +95,7 @@ struct CGRecordLowering {
CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
// Short helper routines.
/// Constructs a MemberInfo instance from an offset and llvm::Type *.
- MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
+ static MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
return MemberInfo(Offset, MemberInfo::Field, Data);
}
@@ -104,7 +104,7 @@ struct CGRecordLowering {
/// fields of the same formal type. We want to emit a layout with
/// these discrete storage units instead of combining them into a
/// continuous run.
- bool isDiscreteBitFieldABI() {
+ bool isDiscreteBitFieldABI() const {
return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
D->isMsStruct(Context);
}
@@ -121,22 +121,22 @@ struct CGRecordLowering {
/// other bases, which complicates layout in specific ways.
///
/// Note specifically that the ms_struct attribute doesn't change this.
- bool isOverlappingVBaseABI() {
+ bool isOverlappingVBaseABI() const {
return !Context.getTargetInfo().getCXXABI().isMicrosoft();
}
/// Wraps llvm::Type::getIntNTy with some implicit arguments.
- llvm::Type *getIntNType(uint64_t NumBits) {
+ llvm::Type *getIntNType(uint64_t NumBits) const {
unsigned AlignedBits = llvm::alignTo(NumBits, Context.getCharWidth());
return llvm::Type::getIntNTy(Types.getLLVMContext(), AlignedBits);
}
/// Get the LLVM type sized as one character unit.
- llvm::Type *getCharType() {
+ llvm::Type *getCharType() const {
return llvm::Type::getIntNTy(Types.getLLVMContext(),
Context.getCharWidth());
}
/// Gets an llvm type of size NumChars and alignment 1.
- llvm::Type *getByteArrayType(CharUnits NumChars) {
+ llvm::Type *getByteArrayType(CharUnits NumChars) const {
assert(!NumChars.isZero() && "Empty byte arrays aren't allowed.");
llvm::Type *Type = getCharType();
return NumChars == CharUnits::One() ? Type :
@@ -144,7 +144,7 @@ struct CGRecordLowering {
}
/// Gets the storage type for a field decl and handles storage
/// for itanium bitfields that are smaller than their declared type.
- llvm::Type *getStorageType(const FieldDecl *FD) {
+ llvm::Type *getStorageType(const FieldDecl *FD) const {
llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
if (!FD->isBitField()) return Type;
if (isDiscreteBitFieldABI()) return Type;
@@ -152,29 +152,29 @@ struct CGRecordLowering {
(unsigned)Context.toBits(getSize(Type))));
}
/// Gets the llvm Basesubobject type from a CXXRecordDecl.
- llvm::Type *getStorageType(const CXXRecordDecl *RD) {
+ llvm::Type *getStorageType(const CXXRecordDecl *RD) const {
return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
}
- CharUnits bitsToCharUnits(uint64_t BitOffset) {
+ CharUnits bitsToCharUnits(uint64_t BitOffset) const {
return Context.toCharUnitsFromBits(BitOffset);
}
- CharUnits getSize(llvm::Type *Type) {
+ CharUnits getSize(llvm::Type *Type) const {
return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
}
- CharUnits getAlignment(llvm::Type *Type) {
+ CharUnits getAlignment(llvm::Type *Type) const {
return CharUnits::fromQuantity(DataLayout.getABITypeAlign(Type));
}
- bool isZeroInitializable(const FieldDecl *FD) {
+ bool isZeroInitializable(const FieldDecl *FD) const {
return Types.isZeroInitializable(FD->getType());
}
- bool isZeroInitializable(const RecordDecl *RD) {
+ bool isZeroInitializable(const RecordDecl *RD) const {
return Types.isZeroInitializable(RD);
}
void appendPaddingBytes(CharUnits Size) {
if (!Size.isZero())
FieldTypes.push_back(getByteArrayType(Size));
}
- uint64_t getFieldBitOffset(const FieldDecl *FD) {
+ uint64_t getFieldBitOffset(const FieldDecl *FD) const {
return Layout.getFieldOffset(FD->getFieldIndex());
}
// Layout routines.
@@ -183,20 +183,22 @@ struct CGRecordLowering {
/// Lowers an ASTRecordLayout to a llvm type.
void lower(bool NonVirtualBaseType);
void lowerUnion(bool isNoUniqueAddress);
- void accumulateFields();
- void accumulateBitFields(RecordDecl::field_iterator Field,
- RecordDecl::field_iterator FieldEnd);
+ void accumulateFields(bool isNonVirtualBaseType);
+ RecordDecl::field_iterator
+ accumulateBitFields(bool isNonVirtualBaseType,
+ RecordDecl::field_iterator Field,
+ RecordDecl::field_iterator FieldEnd);
void computeVolatileBitfields();
void accumulateBases();
void accumulateVPtrs();
void accumulateVBases();
/// Recursively searches all of the bases to find out if a vbase is
/// not the primary vbase of some base class.
- bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
+ bool hasOwnStorage(const CXXRecordDecl *Decl,
+ const CXXRecordDecl *Query) const;
void calculateZeroInit();
- /// Lowers bitfield storage types to I8 arrays for bitfields with tail
- /// padding that is or can potentially be used.
- void clipTailPadding();
+ CharUnits calculateTailClippingOffset(bool isNonVirtualBaseType) const;
+ void checkBitfieldClipping(bool isNonVirtualBaseType) const;
/// Determines if we need a packed llvm struct.
void determinePacked(bool NVBaseType);
/// Inserts padding everywhere it's needed.
@@ -284,7 +286,7 @@ void CGRecordLowering::lower(bool NVBaseType) {
computeVolatileBitfields();
return;
}
- accumulateFields();
+ accumulateFields(NVBaseType);
// RD implies C++.
if (RD) {
accumulateVPtrs();
@@ -298,8 +300,8 @@ void CGRecordLowering::lower(bool NVBaseType) {
accumulateVBases();
}
llvm::stable_sort(Members);
+ checkBitfieldClipping(NVBaseType);
Members.push_back(StorageInfo(Size, getIntNType(8)));
- clipTailPadding();
determinePacked(NVBaseType);
insertPadding();
Members.pop_back();
@@ -375,16 +377,18 @@ void CGRecordLowering::lowerUnion(bool isNoUniqueAddress) {
Packed = true;
}
-void CGRecordLowering::accumulateFields() {
+void CGRecordLowering::accumulateFields(bool isNonVirtualBaseType) {
for (RecordDecl::field_iterator Field = D->field_begin(),
FieldEnd = D->field_end();
- Field != FieldEnd;) {
+ Field != FieldEnd;) {
if (Field->isBitField()) {
- RecordDecl::field_iterator Start = Field;
- // Iterate to gather the list of bitfields.
- for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
- accumulateBitFields(Start, Field);
- } else if (!Field->isZeroSize(Context)) {
+ Field = accumulateBitFields(isNonVirtualBaseType, Field, FieldEnd);
+ assert((Field == FieldEnd || !Field->isBitField()) &&
+ "Failed to accumulate all the bitfields");
+ } else if (isEmptyFieldForLayout(Context, *Field)) {
+ // Empty fields have no storage.
+ ++Field;
+ } else {
// Use base subobject layout for the potentially-overlapping field,
// as it is done in RecordLayoutBuilder
Members.push_back(MemberInfo(
@@ -394,35 +398,37 @@ void CGRecordLowering::accumulateFields() {
: getStorageType(*Field),
*Field));
++Field;
- } else {
- ++Field;
}
}
}
-void
-CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
+// Create members for bitfields. Field is a bitfield, and FieldEnd is the end
+// iterator of the record. Return the first non-bitfield encountered. We need
+// to know whether this is the base or complete layout, as virtual bases could
+// affect the upper bound of bitfield access unit allocation.
+RecordDecl::field_iterator
+CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
+ RecordDecl::field_iterator Field,
RecordDecl::field_iterator FieldEnd) {
- // Run stores the first element of the current run of bitfields. FieldEnd is
- // used as a special value to note that we don't have a current run. A
- // bitfield run is a contiguous collection of bitfields that can be stored in
- // the same storage block. Zero-sized bitfields and bitfields that would
- // cross an alignment boundary break a run and start a new one.
- RecordDecl::field_iterator Run = FieldEnd;
- // Tail is the offset of the first bit off the end of the current run. It's
- // used to determine if the ASTRecordLayout is treating these two bitfields as
- // contiguous. StartBitOffset is offset of the beginning of the Run.
- uint64_t StartBitOffset, Tail = 0;
if (isDiscreteBitFieldABI()) {
- for (; Field != FieldEnd; ++Field) {
- uint64_t BitOffset = getFieldBitOffset(*Field);
+ // Run stores the first element of the current run of bitfields. FieldEnd is
+ // used as a special value to note that we don't have a current run. A
+ // bitfield run is a contiguous collection of bitfields that can be stored
+ // in the same storage block. Zero-sized bitfields and bitfields that would
+ // cross an alignment boundary break a run and start a new one.
+ RecordDecl::field_iterator Run = FieldEnd;
+ // Tail is the offset of the first bit off the end of the current run. It's
+ // used to determine if the ASTRecordLayout is treating these two bitfields
+ // as contiguous. StartBitOffset is offset of the beginning of the Run.
+ uint64_t StartBitOffset, Tail = 0;
+ for (; Field != FieldEnd && Field->isBitField(); ++Field) {
// Zero-width bitfields end runs.
if (Field->isZeroLengthBitField(Context)) {
Run = FieldEnd;
continue;
}
- llvm::Type *Type =
- Types.ConvertTypeForMem(Field->getType(), /*ForBitField=*/true);
+ uint64_t BitOffset = getFieldBitOffset(*Field);
+ llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
// If we don't have a run yet, or don't live within the previous run's
// allocated storage then we allocate some storage and start a new run.
if (Run == FieldEnd || BitOffset >= Tail) {
@@ -439,82 +445,276 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
MemberInfo::Field, nullptr, *Field));
}
- return;
+ return Field;
}
- // Check if OffsetInRecord (the size in bits of the current run) is better
- // as a single field run. When OffsetInRecord has legal integer width, and
- // its bitfield offset is naturally aligned, it is better to make the
- // bitfield a separate storage component so as it can be accessed directly
- // with lower cost.
- auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
- uint64_t StartBitOffset) {
- if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
- return false;
- if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
- !DataLayout.fitsInLegalInteger(OffsetInRecord))
- return false;
- // Make sure StartBitOffset is naturally aligned if it is treated as an
- // IType integer.
- if (StartBitOffset %
- Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
- 0)
- return false;
- return true;
- };
+ // The SysV ABI can overlap bitfield storage units with both other bitfield
+ // storage units /and/ other non-bitfield data members. Accessing a sequence
+ // of bitfields mustn't interfere with adjacent non-bitfields -- they're
+ // permitted to be accessed in separate threads for instance.
+
+ // We split runs of bit-fields into a sequence of "access units". When we emit
+ // a load or store of a bit-field, we'll load/store the entire containing
+ // access unit. As mentioned, the standard requires that these loads and
+ // stores must not interfere with accesses to other memory locations, and it
+ // defines the bit-field's memory location as the current run of
+ // non-zero-width bit-fields. So an access unit must never overlap with
+ // non-bit-field storage or cross a zero-width bit-field. Otherwise, we're
+ // free to draw the lines as we see fit.
+
+ // Drawing these lines well can be complicated. LLVM generally can't modify a
+ // program to access memory that it didn't before, so using very narrow access
+ // units can prevent the compiler from using optimal access patterns. For
+ // example, suppose a run of bit-fields occupies four bytes in a struct. If we
+ // split that into four 1-byte access units, then a sequence of assignments
+ // that doesn't touch all four bytes may have to be emitted with multiple
+ // 8-bit stores instead of a single 32-bit store. On the other hand, if we use
+ // very wide access units, we may find ourselves emitting accesses to
+ // bit-fields we didn't really need to touch, just because LLVM was unable to
+ // clean up after us.
+
+ // It is desirable to have access units be aligned powers of 2 no larger than
+ // a register. (On non-strict alignment ISAs, the alignment requirement can be
+ // dropped.) A three byte access unit will be accessed using 2-byte and 1-byte
+ // accesses and bit manipulation. If no bitfield straddles across the two
+ // separate accesses, it is better to have separate 2-byte and 1-byte access
+ // units, as then LLVM will not generate unnecessary memory accesses, or bit
+ // manipulation. Similarly, on a strict-alignment architecture, it is better
+ // to keep access-units naturally aligned, to avoid similar bit
+ // manipulation synthesizing larger unaligned accesses.
+
+ // Bitfields that share parts of a single byte are, of necessity, placed in
+ // the same access unit. That unit will encompass a consecutive run where
+ // adjacent bitfields share parts of a byte. (The first bitfield of such an
+ // access unit will start at the beginning of a byte.)
+
+ // We then try and accumulate adjacent access units when the combined unit is
+ // naturally sized, no larger than a register, and (on a strict alignment
+ // ISA), naturally aligned. Note that this requires lookahead to one or more
+ // subsequent access units. For instance, consider a 2-byte access-unit
+ // followed by 2 1-byte units. We can merge that into a 4-byte access-unit,
+ // but we would not want to merge a 2-byte followed by a single 1-byte (and no
+ // available tail padding). We keep track of the best access unit seen so far,
+ // and use that when we determine we cannot accumulate any more. Then we start
+ // again at the bitfield following that best one.
+
+ // The accumulation is also prevented when:
+ // *) it would cross a character-aigned zero-width bitfield, or
+ // *) fine-grained bitfield access option is in effect.
+
+ CharUnits RegSize =
+ bitsToCharUnits(Context.getTargetInfo().getRegisterWidth());
+ unsigned CharBits = Context.getCharWidth();
+
+ // Limit of useable tail padding at end of the record. Computed lazily and
+ // cached here.
+ CharUnits ScissorOffset = CharUnits::Zero();
+
+ // Data about the start of the span we're accumulating to create an access
+ // unit from. Begin is the first bitfield of the span. If Begin is FieldEnd,
+ // we've not got a current span. The span starts at the BeginOffset character
+ // boundary. BitSizeSinceBegin is the size (in bits) of the span -- this might
+ // include padding when we've advanced to a subsequent bitfield run.
+ RecordDecl::field_iterator Begin = FieldEnd;
+ CharUnits BeginOffset;
+ uint64_t BitSizeSinceBegin;
+
+ // The (non-inclusive) end of the largest acceptable access unit we've found
+ // since Begin. If this is Begin, we're gathering the initial set of bitfields
+ // of a new span. BestEndOffset is the end of that acceptable access unit --
+ // it might extend beyond the last character of the bitfield run, using
+ // available padding characters.
+ RecordDecl::field_iterator BestEnd = Begin;
+ CharUnits BestEndOffset;
+ bool BestClipped; // Whether the representation must be in a byte array.
- // The start field is better as a single field run.
- bool StartFieldAsSingleRun = false;
for (;;) {
- // Check to see if we need to start a new run.
- if (Run == FieldEnd) {
- // If we're out of fields, return.
- if (Field == FieldEnd)
+ // AtAlignedBoundary is true iff Field is the (potential) start of a new
+ // span (or the end of the bitfields). When true, LimitOffset is the
+ // character offset of that span and Barrier indicates whether the new
+ // span cannot be merged into the current one.
+ bool AtAlignedBoundary = false;
+ bool Barrier = false;
+
+ if (Field != FieldEnd && Field->isBitField()) {
+ uint64_t BitOffset = getFieldBitOffset(*Field);
+ if (Begin == FieldEnd) {
+ // Beginning a new span.
+ Begin = Field;
+ BestEnd = Begin;
+
+ assert((BitOffset % CharBits) == 0 && "Not at start of char");
+ BeginOffset = bitsToCharUnits(BitOffset);
+ BitSizeSinceBegin = 0;
+ } else if ((BitOffset % CharBits) != 0) {
+ // Bitfield occupies the same character as previous bitfield, it must be
+ // part of the same span. This can include zero-length bitfields, should
+ // the target not align them to character boundaries. Such non-alignment
+ // is at variance with the standards, which require zero-length
+ // bitfields be a barrier between access units. But of course we can't
+ // achieve that in the middle of a character.
+ assert(BitOffset == Context.toBits(BeginOffset) + BitSizeSinceBegin &&
+ "Concatenating non-contiguous bitfields");
+ } else {
+ // Bitfield potentially begins a new span. This includes zero-length
+ // bitfields on non-aligning targets that lie at character boundaries
+ // (those are barriers to merging).
+ if (Field->isZeroLengthBitField(Context))
+ Barrier = true;
+ AtAlignedBoundary = true;
+ }
+ } else {
+ // We've reached the end of the bitfield run. Either we're done, or this
+ // is a barrier for the current span.
+ if (Begin == FieldEnd)
break;
- // Any non-zero-length bitfield can start a new run.
- if (!Field->isZeroLengthBitField(Context)) {
- Run = Field;
- StartBitOffset = getFieldBitOffset(*Field);
- Tail = StartBitOffset + Field->getBitWidthValue(Context);
- StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
- StartBitOffset);
+
+ Barrier = true;
+ AtAlignedBoundary = true;
+ }
+
+ // InstallBest indicates whether we should create an access unit for the
+ // current best span: fields [Begin, BestEnd) occupying characters
+ // [BeginOffset, BestEndOffset).
+ bool InstallBest = false;
+ if (AtAlignedBoundary) {
+ // Field is the start of a new span or the end of the bitfields. The
+ // just-seen span now extends to BitSizeSinceBegin.
+
+ // Determine if we can accumulate that just-seen span into the current
+ // accumulation.
+ CharUnits AccessSize = bitsToCharUnits(BitSizeSinceBegin + CharBits - 1);
+ if (BestEnd == Begin) {
+ // This is the initial run at the start of a new span. By definition,
+ // this is the best seen so far.
+ BestEnd = Field;
+ BestEndOffset = BeginOffset + AccessSize;
+ // Assume clipped until proven not below.
+ BestClipped = true;
+ if (!BitSizeSinceBegin)
+ // A zero-sized initial span -- this will install nothing and reset
+ // for another.
+ InstallBest = true;
+ } else if (AccessSize > RegSize)
+ // Accumulating the just-seen span would create a multi-register access
+ // unit, which would increase register pressure.
+ InstallBest = true;
+
+ if (!InstallBest) {
+ // Determine if accumulating the just-seen span will create an expensive
+ // access unit or not.
+ llvm::Type *Type = getIntNType(Context.toBits(AccessSize));
+ if (!Context.getTargetInfo().hasCheapUnalignedBitFieldAccess()) {
+ // Unaligned accesses are expensive. Only accumulate if the new unit
+ // is naturally aligned. Otherwise install the best we have, which is
+ // either the initial access unit (can't do better), or a naturally
+ // aligned accumulation (since we would have already installed it if
+ // it wasn't naturally aligned).
+ CharUnits Align = getAlignment(Type);
+ if (Align > Layout.getAlignment())
+ // The alignment required is greater than the containing structure
+ // itself.
+ InstallBest = true;
+ else if (!BeginOffset.isMultipleOf(Align))
+ // The access unit is not at a naturally aligned offset within the
+ // structure.
+ InstallBest = true;
+
+ if (InstallBest && BestEnd == Field)
+ // We're installing the first span, whose clipping was presumed
+ // above. Compute it correctly.
+ if (getSize(Type) == AccessSize)
+ BestClipped = false;
+ }
+
+ if (!InstallBest) {
+ // Find the next used storage offset to determine what the limit of
+ // the current span is. That's either the offset of the next field
+ // with storage (which might be Field itself) or the end of the
+ // non-reusable tail padding.
+ CharUnits LimitOffset;
+ for (auto Probe = Field; Probe != FieldEnd; ++Probe)
+ if (!isEmptyFieldForLayout(Context, *Probe)) {
+ // A member with storage sets the limit.
+ assert((getFieldBitOffset(*Probe) % CharBits) == 0 &&
+ "Next storage is not byte-aligned");
+ LimitOffset = bitsToCharUnits(getFieldBitOffset(*Probe));
+ goto FoundLimit;
+ }
+ // We reached the end of the fields, determine the bounds of useable
+ // tail padding. As this can be complex for C++, we cache the result.
+ if (ScissorOffset.isZero()) {
+ ScissorOffset = calculateTailClippingOffset(isNonVirtualBaseType);
+ assert(!ScissorOffset.isZero() && "Tail clipping at zero");
+ }
+
+ LimitOffset = ScissorOffset;
+ FoundLimit:;
+
+ CharUnits TypeSize = getSize(Type);
+ if (BeginOffset + TypeSize <= LimitOffset) {
+ // There is space before LimitOffset to create a naturally-sized
+ // access unit.
+ BestEndOffset = BeginOffset + TypeSize;
+ BestEnd = Field;
+ BestClipped = false;
+ }
+
+ if (Barrier)
+ // The next field is a barrier that we cannot merge across.
+ InstallBest = true;
+ else if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
+ // Fine-grained access, so no merging of spans.
+ InstallBest = true;
+ else
+ // Otherwise, we're not installing. Update the bit size
+ // of the current span to go all the way to LimitOffset, which is
+ // the (aligned) offset of next bitfield to consider.
+ BitSizeSinceBegin = Context.toBits(LimitOffset - BeginOffset);
+ }
}
- ++Field;
- continue;
}
- // If the start field of a new run is better as a single run, or
- // if current field (or consecutive fields) is better as a single run, or
- // if current field has zero width bitfield and either
- // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
- // true, or
- // if the offset of current field is inconsistent with the offset of
- // previous field plus its offset,
- // skip the block below and go ahead to emit the storage.
- // Otherwise, try to add bitfields to the run.
- if (!StartFieldAsSingleRun && Field != FieldEnd &&
- !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
- (!Field->isZeroLengthBitField(Context) ||
- (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
- !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
- Tail == getFieldBitOffset(*Field)) {
- Tail += Field->getBitWidthValue(Context);
+ if (InstallBest) {
+ assert((Field == FieldEnd || !Field->isBitField() ||
+ (getFieldBitOffset(*Field) % CharBits) == 0) &&
+ "Installing but not at an aligned bitfield or limit");
+ CharUnits AccessSize = BestEndOffset - BeginOffset;
+ if (!AccessSize.isZero()) {
+ // Add the storage member for the access unit to the record. The
+ // bitfields get the offset of their storage but come afterward and
+ // remain there after a stable sort.
+ llvm::Type *Type;
+ if (BestClipped) {
+ assert(getSize(getIntNType(Context.toBits(AccessSize))) >
+ AccessSize &&
+ "Clipped access need not be clipped");
+ Type = getByteArrayType(AccessSize);
+ } else {
+ Type = getIntNType(Context.toBits(AccessSize));
+ assert(getSize(Type) == AccessSize &&
+ "Unclipped access must be clipped");
+ }
+ Members.push_back(StorageInfo(BeginOffset, Type));
+ for (; Begin != BestEnd; ++Begin)
+ if (!Begin->isZeroLengthBitField(Context))
+ Members.push_back(
+ MemberInfo(BeginOffset, MemberInfo::Field, nullptr, *Begin));
+ }
+ // Reset to start a new span.
+ Field = BestEnd;
+ Begin = FieldEnd;
+ } else {
+ assert(Field != FieldEnd && Field->isBitField() &&
+ "Accumulating past end of bitfields");
+ assert(!Barrier && "Accumulating across barrier");
+ // Accumulate this bitfield into the current (potential) span.
+ BitSizeSinceBegin += Field->getBitWidthValue(Context);
++Field;
- continue;
}
-
- // We've hit a break-point in the run and need to emit a storage field.
- llvm::Type *Type = getIntNType(Tail - StartBitOffset);
- // Add the storage member to the record and set the bitfield info for all of
- // the bitfields in the run. Bitfields get the offset of their storage but
- // come afterward and remain there after a stable sort.
- Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
- for (; Run != Field; ++Run)
- Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
- MemberInfo::Field, nullptr, *Run));
- Run = FieldEnd;
- StartFieldAsSingleRun = false;
}
+
+ return Field;
}
void CGRecordLowering::accumulateBases() {
@@ -532,7 +732,7 @@ void CGRecordLowering::accumulateBases() {
// Bases can be zero-sized even if not technically empty if they
// contain only a trailing array member.
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
- if (!BaseDecl->isEmpty() &&
+ if (!isEmptyRecordForLayout(Context, Base.getType()) &&
!Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
@@ -667,16 +867,20 @@ void CGRecordLowering::accumulateVPtrs() {
llvm::PointerType::getUnqual(Types.getLLVMContext())));
}
-void CGRecordLowering::accumulateVBases() {
+CharUnits
+CGRecordLowering::calculateTailClippingOffset(bool isNonVirtualBaseType) const {
+ if (!RD)
+ return Layout.getDataSize();
+
CharUnits ScissorOffset = Layout.getNonVirtualSize();
// In the itanium ABI, it's possible to place a vbase at a dsize that is
// smaller than the nvsize. Here we check to see if such a base is placed
// before the nvsize and set the scissor offset to that, instead of the
// nvsize.
- if (isOverlappingVBaseABI())
+ if (!isNonVirtualBaseType && isOverlappingVBaseABI())
for (const auto &Base : RD->vbases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
- if (BaseDecl->isEmpty())
+ if (isEmptyRecordForLayout(Context, Base.getType()))
continue;
// If the vbase is a primary virtual base of some base, then it doesn't
// get its own storage location but instead lives inside of that base.
@@ -685,11 +889,14 @@ void CGRecordLowering::accumulateVBases() {
ScissorOffset = std::min(ScissorOffset,
Layout.getVBaseClassOffset(BaseDecl));
}
- Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
- RD));
+
+ return ScissorOffset;
+}
+
+void CGRecordLowering::accumulateVBases() {
for (const auto &Base : RD->vbases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
- if (BaseDecl->isEmpty())
+ if (isEmptyRecordForLayout(Context, Base.getType()))
continue;
CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
// If the vbase is a primary virtual base of some base, then it doesn't
@@ -711,7 +918,7 @@ void CGRecordLowering::accumulateVBases() {
}
bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
- const CXXRecordDecl *Query) {
+ const CXXRecordDecl *Query) const {
const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
return false;
@@ -740,32 +947,22 @@ void CGRecordLowering::calculateZeroInit() {
}
}
-void CGRecordLowering::clipTailPadding() {
- std::vector<MemberInfo>::iterator Prior = Members.begin();
- CharUnits Tail = getSize(Prior->Data);
- for (std::vector<MemberInfo>::iterator Member = Prior + 1,
- MemberEnd = Members.end();
- Member != MemberEnd; ++Member) {
- // Only members with data and the scissor can cut into tail padding.
- if (!Member->Data && Member->Kind != MemberInfo::Scissor)
+// Verify accumulateBitfields computed the correct storage representations.
+void CGRecordLowering::checkBitfieldClipping(bool IsNonVirtualBaseType) const {
+#ifndef NDEBUG
+ auto ScissorOffset = calculateTailClippingOffset(IsNonVirtualBaseType);
+ auto Tail = CharUnits::Zero();
+ for (const auto &M : Members) {
+ // Only members with data could possibly overlap.
+ if (!M.Data)
continue;
- if (Member->Offset < Tail) {
- assert(Prior->Kind == MemberInfo::Field &&
- "Only storage fields have tail padding!");
- if (!Prior->FD || Prior->FD->isBitField())
- Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
- cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
- else {
- assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
- "should not have reused this field's tail padding");
- Prior->Data = getByteArrayType(
- Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).Width);
- }
- }
- if (Member->Data)
- Prior = Member;
- Tail = Prior->Offset + getSize(Prior->Data);
+
+ assert(M.Offset >= Tail && "Bitfield access unit is not clipped");
+ Tail = M.Offset + getSize(M.Data);
+ assert((Tail <= ScissorOffset || M.Offset >= ScissorOffset) &&
+ "Bitfield straddles scissor offset");
}
+#endif
}
void CGRecordLowering::determinePacked(bool NVBaseType) {
@@ -965,7 +1162,7 @@ CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
const FieldDecl *FD = *it;
// Ignore zero-sized fields.
- if (FD->isZeroSize(getContext()))
+ if (isEmptyFieldForLayout(getContext(), FD))
continue;
// For non-bit-fields, just check that the LLVM struct offset matches the
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
index beff0ad9da27..2f466602d2f6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
@@ -43,6 +43,10 @@ using namespace CodeGen;
// Statement Emission
//===----------------------------------------------------------------------===//
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
if (CGDebugInfo *DI = getDebugInfo()) {
SourceLocation Loc;
@@ -218,6 +222,12 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::OMPUnrollDirectiveClass:
EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
break;
+ case Stmt::OMPReverseDirectiveClass:
+ EmitOMPReverseDirective(cast<OMPReverseDirective>(*S));
+ break;
+ case Stmt::OMPInterchangeDirectiveClass:
+ EmitOMPInterchangeDirective(cast<OMPInterchangeDirective>(*S));
+ break;
case Stmt::OMPForDirectiveClass:
EmitOMPForDirective(cast<OMPForDirective>(*S));
break;
@@ -410,7 +420,8 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
break;
case Stmt::OMPScopeDirectiveClass:
- llvm_unreachable("scope not supported with FE outlining");
+ CGM.ErrorUnsupported(S, "scope with FE outlining");
+ break;
case Stmt::OMPMaskedDirectiveClass:
EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
break;
@@ -435,6 +446,12 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::OMPParallelMaskedDirectiveClass:
EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
break;
+ case Stmt::OpenACCComputeConstructClass:
+ EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
+ break;
+ case Stmt::OpenACCLoopConstructClass:
+ EmitOpenACCLoopConstruct(cast<OpenACCLoopConstruct>(*S));
+ break;
}
}
@@ -721,11 +738,19 @@ void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
case attr::AlwaysInline:
alwaysinline = true;
break;
- case attr::MustTail:
+ case attr::MustTail: {
const Stmt *Sub = S.getSubStmt();
const ReturnStmt *R = cast<ReturnStmt>(Sub);
musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
- break;
+ } break;
+ case attr::CXXAssume: {
+ const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
+ if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
+ !Assumption->HasSideEffects(getContext())) {
+ llvm::Value *AssumptionVal = EvaluateExprAsBool(Assumption);
+ Builder.CreateAssumption(AssumptionVal);
+ }
+ } break;
}
}
SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
@@ -853,7 +878,10 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// Emit the 'then' code.
EmitBlock(ThenBlock);
- incrementProfileCounter(&S);
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getThen());
+ else
+ incrementProfileCounter(&S);
{
RunCleanupsScope ThenScope(*this);
EmitStmt(S.getThen());
@@ -867,6 +895,9 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
auto NL = ApplyDebugLocation::CreateEmpty(*this);
EmitBlock(ElseBlock);
}
+ // When single byte coverage mode is enabled, add a counter to else block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(Else);
{
RunCleanupsScope ElseScope(*this);
EmitStmt(Else);
@@ -880,6 +911,74 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// Emit the continuation block for code after the if.
EmitBlock(ContBlock, true);
+
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(&S);
+}
+
+bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
+ bool HasEmptyBody) {
+ if (CGM.getCodeGenOpts().getFiniteLoops() ==
+ CodeGenOptions::FiniteLoopsKind::Never)
+ return false;
+
+ // Now apply rules for plain C (see 6.8.5.6 in C11).
+ // Loops with constant conditions do not have to make progress in any C
+ // version.
+ // As an extension, we consisider loops whose constant expression
+ // can be constant-folded.
+ Expr::EvalResult Result;
+ bool CondIsConstInt =
+ !ControllingExpression ||
+ (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
+ Result.Val.isInt());
+
+ bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
+ Result.Val.getInt().getBoolValue());
+
+ // Loops with non-constant conditions must make progress in C11 and later.
+ if (getLangOpts().C11 && !CondIsConstInt)
+ return true;
+
+ // [C++26][intro.progress] (DR)
+ // The implementation may assume that any thread will eventually do one of the
+ // following:
+ // [...]
+ // - continue execution of a trivial infinite loop ([stmt.iter.general]).
+ if (CGM.getCodeGenOpts().getFiniteLoops() ==
+ CodeGenOptions::FiniteLoopsKind::Always ||
+ getLangOpts().CPlusPlus11) {
+ if (HasEmptyBody && CondIsTrue) {
+ CurFn->removeFnAttr(llvm::Attribute::MustProgress);
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+// [C++26][stmt.iter.general] (DR)
+// A trivially empty iteration statement is an iteration statement matching one
+// of the following forms:
+// - while ( expression ) ;
+// - while ( expression ) { }
+// - do ; while ( expression ) ;
+// - do { } while ( expression ) ;
+// - for ( init-statement expression(opt); ) ;
+// - for ( init-statement expression(opt); ) { }
+template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
+ if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
+ if (S.getInc())
+ return false;
+ }
+ const Stmt *Body = S.getBody();
+ if (!Body || isa<NullStmt>(Body))
+ return true;
+ if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
+ return Compound->body_empty();
+ return false;
}
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
@@ -889,6 +988,10 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
EmitBlock(LoopHeader.getBlock());
+ if (CGM.shouldEmitConvergenceTokens())
+ ConvergenceTokenStack.push_back(emitConvergenceLoopToken(
+ LoopHeader.getBlock(), ConvergenceTokenStack.back()));
+
// Create an exit block for when the condition fails, which will
// also become the break target.
JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
@@ -916,13 +1019,16 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
// while(1) is common, avoid extra exit blocks. Be sure
// to correctly handle break/continue though.
llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
- bool CondIsConstInt = C != nullptr;
- bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
+ bool EmitBoolCondBranch = !C || !C->isOne();
const SourceRange &R = S.getSourceRange();
LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()),
- checkIfLoopMustProgress(CondIsConstInt));
+ checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
+
+ // When single byte coverage mode is enabled, add a counter to loop condition.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getCond());
// As long as the condition is true, go to the loop body.
llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
@@ -956,7 +1062,11 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
{
RunCleanupsScope BodyScope(*this);
EmitBlock(LoopBody);
- incrementProfileCounter(&S);
+ // When single byte coverage mode is enabled, add a counter to the body.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getBody());
+ else
+ incrementProfileCounter(&S);
EmitStmt(S.getBody());
}
@@ -978,6 +1088,14 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
// a branch, try to erase it.
if (!EmitBoolCondBranch)
SimplifyForwardingBlocks(LoopHeader.getBlock());
+
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(&S);
+
+ if (CGM.shouldEmitConvergenceTokens())
+ ConvergenceTokenStack.pop_back();
}
void CodeGenFunction::EmitDoStmt(const DoStmt &S,
@@ -993,13 +1111,24 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
// Emit the body of the loop.
llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
- EmitBlockWithFallThrough(LoopBody, &S);
+ if (llvm::EnableSingleByteCoverage)
+ EmitBlockWithFallThrough(LoopBody, S.getBody());
+ else
+ EmitBlockWithFallThrough(LoopBody, &S);
+
+ if (CGM.shouldEmitConvergenceTokens())
+ ConvergenceTokenStack.push_back(
+ emitConvergenceLoopToken(LoopBody, ConvergenceTokenStack.back()));
+
{
RunCleanupsScope BodyScope(*this);
EmitStmt(S.getBody());
}
EmitBlock(LoopCond.getBlock());
+ // When single byte coverage mode is enabled, add a counter to loop condition.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getCond());
// C99 6.8.5.2: "The evaluation of the controlling expression takes place
// after each execution of the loop body."
@@ -1014,14 +1143,13 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
// "do {} while (0)" is common in macros, avoid extra blocks. Be sure
// to correctly handle break/continue though.
llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
- bool CondIsConstInt = C;
bool EmitBoolCondBranch = !C || !C->isZero();
const SourceRange &R = S.getSourceRange();
LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()),
- checkIfLoopMustProgress(CondIsConstInt));
+ checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
// As long as the condition is true, iterate the loop.
if (EmitBoolCondBranch) {
@@ -1040,6 +1168,14 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
// emitting a branch, try to erase it.
if (!EmitBoolCondBranch)
SimplifyForwardingBlocks(LoopCond.getBlock());
+
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(&S);
+
+ if (CGM.shouldEmitConvergenceTokens())
+ ConvergenceTokenStack.pop_back();
}
void CodeGenFunction::EmitForStmt(const ForStmt &S,
@@ -1059,15 +1195,15 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
llvm::BasicBlock *CondBlock = CondDest.getBlock();
EmitBlock(CondBlock);
- Expr::EvalResult Result;
- bool CondIsConstInt =
- !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
+ if (CGM.shouldEmitConvergenceTokens())
+ ConvergenceTokenStack.push_back(
+ emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
const SourceRange &R = S.getSourceRange();
LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()),
- checkIfLoopMustProgress(CondIsConstInt));
+ checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
// Create a cleanup scope for the condition variable cleanups.
LexicalScope ConditionScope(*this, S.getSourceRange());
@@ -1098,6 +1234,11 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
BreakContinueStack.back().ContinueBlock = Continue;
}
+ // When single byte coverage mode is enabled, add a counter to loop
+ // condition.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getCond());
+
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
@@ -1128,8 +1269,12 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
// Treat it as a non-zero constant. Don't even create a new block for the
// body, just fall into it.
}
- incrementProfileCounter(&S);
+ // When single byte coverage mode is enabled, add a counter to the body.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getBody());
+ else
+ incrementProfileCounter(&S);
{
// Create a separate cleanup scope for the body, in case it is not
// a compound statement.
@@ -1141,6 +1286,8 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
if (S.getInc()) {
EmitBlock(Continue.getBlock());
EmitStmt(S.getInc());
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getInc());
}
BreakContinueStack.pop_back();
@@ -1156,6 +1303,14 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
// Emit the fall-through block.
EmitBlock(LoopExit.getBlock(), true);
+
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(&S);
+
+ if (CGM.shouldEmitConvergenceTokens())
+ ConvergenceTokenStack.pop_back();
}
void
@@ -1178,6 +1333,10 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
EmitBlock(CondBlock);
+ if (CGM.shouldEmitConvergenceTokens())
+ ConvergenceTokenStack.push_back(
+ emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
+
const SourceRange &R = S.getSourceRange();
LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
SourceLocToDebugLoc(R.getBegin()),
@@ -1208,7 +1367,10 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
}
EmitBlock(ForBody);
- incrementProfileCounter(&S);
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getBody());
+ else
+ incrementProfileCounter(&S);
// Create a block for the increment. In case of a 'continue', we jump there.
JumpDest Continue = getJumpDestInCurrentScope("for.inc");
@@ -1238,6 +1400,14 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
// Emit the fall-through block.
EmitBlock(LoopExit.getBlock(), true);
+
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(&S);
+
+ if (CGM.shouldEmitConvergenceTokens())
+ ConvergenceTokenStack.pop_back();
}
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
@@ -1267,10 +1437,8 @@ struct SaveRetExprRAII {
};
} // namespace
-/// If we have 'return f(...);', where both caller and callee are SwiftAsync,
-/// codegen it as 'tail call ...; ret void;'.
-static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
- const CGFunctionInfo *CurFnInfo) {
+/// Determine if the given call uses the swiftasync calling convention.
+static bool isSwiftAsyncCallee(const CallExpr *CE) {
auto calleeQualType = CE->getCallee()->getType();
const FunctionType *calleeType = nullptr;
if (calleeQualType->isFunctionPointerType() ||
@@ -1285,18 +1453,12 @@ static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
// getMethodDecl() doesn't handle member pointers at the moment.
calleeType = methodDecl->getType()->castAs<FunctionType>();
} else {
- return;
+ return false;
}
} else {
- return;
- }
- if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
- (CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)) {
- auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
- CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
- Builder.CreateRetVoid();
- Builder.ClearInsertionPoint();
+ return false;
}
+ return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
}
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
@@ -1336,6 +1498,19 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
RunCleanupsScope cleanupScope(*this);
if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
RV = EWC->getSubExpr();
+
+ // If we're in a swiftasynccall function, and the return expression is a
+ // call to a swiftasynccall function, mark the call as the musttail call.
+ std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
+ if (RV && CurFnInfo &&
+ CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync) {
+ if (auto CE = dyn_cast<CallExpr>(RV)) {
+ if (isSwiftAsyncCallee(CE)) {
+ SaveMustTail.emplace(MustTailCall, CE);
+ }
+ }
+ }
+
// FIXME: Clean this up by using an LValue for ReturnTemp,
// EmitStoreThroughLValue, and EmitAnyExpr.
// Check if the NRVO candidate was not globalized in OpenMP mode.
@@ -1358,8 +1533,6 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// for side effects.
if (RV) {
EmitAnyExpr(RV);
- if (auto *CE = dyn_cast<CallExpr>(RV))
- makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo);
}
} else if (!RV) {
// Do nothing (return value is left uninitialized)
@@ -1370,9 +1543,15 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
Builder.CreateStore(Result.getScalarVal(), ReturnValue);
} else {
switch (getEvaluationKind(RV->getType())) {
- case TEK_Scalar:
- Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
+ case TEK_Scalar: {
+ llvm::Value *Ret = EmitScalarExpr(RV);
+ if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect)
+ EmitStoreOfScalar(Ret, MakeAddrLValue(ReturnValue, RV->getType()),
+ /*isInit*/ true);
+ else
+ Builder.CreateStore(Ret, ReturnValue);
break;
+ }
case TEK_Complex:
EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
/*isInit*/ true);
@@ -2209,15 +2388,14 @@ std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
- return {
- Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)),
- nullptr};
+ return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
+ nullptr};
}
}
- Address Addr = InputValue.getAddress(*this);
+ Address Addr = InputValue.getAddress();
ConstraintStr += '*';
- return {Addr.getPointer(), Addr.getElementType()};
+ return {InputValue.getPointer(*this), Addr.getElementType()};
}
std::pair<llvm::Value *, llvm::Type *>
@@ -2411,7 +2589,7 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
// ResultTypeRequiresCast.size() elements of RegResults.
if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
- Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]);
+ Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
Builder.CreateStore(Tmp, A);
continue;
@@ -2613,7 +2791,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinValue());
} else {
- Address DestAddr = Dest.getAddress(*this);
+ Address DestAddr = Dest.getAddress();
// Matrix types in memory are represented by arrays, but accessed through
// vector pointers, with the alignment specified on the access operation.
// For inline assembly, update pointer arguments to use vector pointers.
@@ -2624,7 +2802,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
ArgTypes.push_back(DestAddr.getType());
ArgElemTypes.push_back(DestAddr.getElementType());
- Args.push_back(DestAddr.getPointer());
+ Args.push_back(DestAddr.emitRawPointer(*this));
Constraints += "=*";
Constraints += OutputConstraint;
ReadOnly = ReadNone = false;
@@ -2961,7 +3139,7 @@ CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
LValue CapStruct = InitCapturedStruct(S);
- return CapStruct.getAddress(*this);
+ return CapStruct.getAddress();
}
/// Creates the outlined function for a CapturedStmt.
@@ -2999,8 +3177,8 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
// Initialize variable-length arrays.
- LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
- Ctx.getTagDeclType(RD));
+ LValue Base = MakeNaturalAlignRawAddrLValue(
+ CapturedStmtInfo->getContextValue(), Ctx.getTagDeclType(RD));
for (auto *FD : RD->fields()) {
if (FD->hasCapturedVLAType()) {
auto *ExprArg =
@@ -3024,3 +3202,67 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
return F;
}
+
+namespace {
+// Returns the first convergence entry/loop/anchor instruction found in |BB|.
+// std::nullptr otherwise.
+llvm::IntrinsicInst *getConvergenceToken(llvm::BasicBlock *BB) {
+ for (auto &I : *BB) {
+ auto *II = dyn_cast<llvm::IntrinsicInst>(&I);
+ if (II && llvm::isConvergenceControlIntrinsic(II->getIntrinsicID()))
+ return II;
+ }
+ return nullptr;
+}
+
+} // namespace
+
+llvm::CallBase *
+CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input,
+ llvm::Value *ParentToken) {
+ llvm::Value *bundleArgs[] = {ParentToken};
+ llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
+ auto Output = llvm::CallBase::addOperandBundle(
+ Input, llvm::LLVMContext::OB_convergencectrl, OB, Input);
+ Input->replaceAllUsesWith(Output);
+ Input->eraseFromParent();
+ return Output;
+}
+
+llvm::IntrinsicInst *
+CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB,
+ llvm::Value *ParentToken) {
+ CGBuilderTy::InsertPoint IP = Builder.saveIP();
+ if (BB->empty())
+ Builder.SetInsertPoint(BB);
+ else
+ Builder.SetInsertPoint(BB->getFirstInsertionPt());
+
+ llvm::CallBase *CB = Builder.CreateIntrinsic(
+ llvm::Intrinsic::experimental_convergence_loop, {}, {});
+ Builder.restoreIP(IP);
+
+ llvm::CallBase *I = addConvergenceControlToken(CB, ParentToken);
+ return cast<llvm::IntrinsicInst>(I);
+}
+
+llvm::IntrinsicInst *
+CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
+ llvm::BasicBlock *BB = &F->getEntryBlock();
+ llvm::IntrinsicInst *Token = getConvergenceToken(BB);
+ if (Token)
+ return Token;
+
+ // Adding a convergence token requires the function to be marked as
+ // convergent.
+ F->setConvergent();
+
+ CGBuilderTy::InsertPoint IP = Builder.saveIP();
+ Builder.SetInsertPoint(&BB->front());
+ llvm::CallBase *I = Builder.CreateIntrinsic(
+ llvm::Intrinsic::experimental_convergence_entry, {}, {});
+ assert(isa<llvm::IntrinsicInst>(I));
+ Builder.restoreIP(IP);
+
+ return cast<llvm::IntrinsicInst>(I);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
index e362c9da51fe..adf74ea16c89 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
@@ -34,11 +35,14 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Debug.h"
#include <optional>
using namespace clang;
using namespace CodeGen;
using namespace llvm::omp;
+#define TTL_CODEGEN_TYPE "target-teams-loop-codegen"
+
static const VarDecl *getBaseDecl(const Expr *Ref);
namespace {
@@ -68,7 +72,7 @@ class OMPLexicalScope : public CodeGenFunction::LexicalScope {
static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
return CGF.LambdaCaptureFields.lookup(VD) ||
(CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
- (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
+ (isa_and_nonnull<BlockDecl>(CGF.CurCodeDecl) &&
cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
}
@@ -96,7 +100,7 @@ public:
isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
InlinedShareds.isGlobalVarCaptured(VD)),
VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
- InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
+ InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
}
}
(void)InlinedShareds.Privatize();
@@ -138,7 +142,7 @@ public:
/// of used expression from loop statement.
class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) {
- const DeclStmt *PreInits;
+ const Stmt *PreInits;
CodeGenFunction::OMPMapVars PreCondVars;
if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
@@ -178,17 +182,39 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
}
return false;
});
- PreInits = cast_or_null<DeclStmt>(LD->getPreInits());
+ PreInits = LD->getPreInits();
} else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) {
- PreInits = cast_or_null<DeclStmt>(Tile->getPreInits());
+ PreInits = Tile->getPreInits();
} else if (const auto *Unroll = dyn_cast<OMPUnrollDirective>(&S)) {
- PreInits = cast_or_null<DeclStmt>(Unroll->getPreInits());
+ PreInits = Unroll->getPreInits();
+ } else if (const auto *Reverse = dyn_cast<OMPReverseDirective>(&S)) {
+ PreInits = Reverse->getPreInits();
+ } else if (const auto *Interchange =
+ dyn_cast<OMPInterchangeDirective>(&S)) {
+ PreInits = Interchange->getPreInits();
} else {
llvm_unreachable("Unknown loop-based directive kind.");
}
if (PreInits) {
- for (const auto *I : PreInits->decls())
- CGF.EmitVarDecl(cast<VarDecl>(*I));
+ // CompoundStmts and DeclStmts are used as lists of PreInit statements and
+ // declarations. Since declarations must be visible in the the following
+ // that they initialize, unpack the CompoundStmt they are nested in.
+ SmallVector<const Stmt *> PreInitStmts;
+ if (auto *PreInitCompound = dyn_cast<CompoundStmt>(PreInits))
+ llvm::append_range(PreInitStmts, PreInitCompound->body());
+ else
+ PreInitStmts.push_back(PreInits);
+
+ for (const Stmt *S : PreInitStmts) {
+ // EmitStmt skips any OMPCapturedExprDecls, but needs to be emitted
+ // here.
+ if (auto *PreInitDecl = dyn_cast<DeclStmt>(S)) {
+ for (Decl *I : PreInitDecl->decls())
+ CGF.EmitVarDecl(cast<VarDecl>(*I));
+ continue;
+ }
+ CGF.EmitStmt(S);
+ }
}
PreCondVars.restore(CGF);
}
@@ -206,7 +232,7 @@ class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope {
static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
return CGF.LambdaCaptureFields.lookup(VD) ||
(CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
- (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
+ (isa_and_nonnull<BlockDecl>(CGF.CurCodeDecl) &&
cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
}
@@ -272,7 +298,7 @@ public:
InlinedShareds.isGlobalVarCaptured(VD)),
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
- InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
+ InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
}
}
CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
@@ -294,7 +320,7 @@ LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) {
bool IsCaptured =
LambdaCaptureFields.lookup(OrigVD) ||
(CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) ||
- (CurCodeDecl && isa<BlockDecl>(CurCodeDecl));
+ (isa_and_nonnull<BlockDecl>(CurCodeDecl));
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured,
OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc());
return EmitLValue(&DRE);
@@ -350,7 +376,8 @@ void CodeGenFunction::GenerateOpenMPCapturedVars(
LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
llvm::Value *SrcAddrVal = EmitScalarConversion(
- DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
+ DstAddr.emitRawPointer(*this),
+ Ctx.getPointerType(Ctx.getUIntPtrType()),
Ctx.getPointerType(CurField->getType()), CurCap->getLocation());
LValue SrcLV =
MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
@@ -364,7 +391,7 @@ void CodeGenFunction::GenerateOpenMPCapturedVars(
CapturedVars.push_back(CV);
} else {
assert(CurCap->capturesVariable() && "Expected capture by reference.");
- CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer());
+ CapturedVars.push_back(EmitLValue(*I).getAddress().emitRawPointer(*this));
}
}
}
@@ -375,10 +402,11 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
ASTContext &Ctx = CGF.getContext();
llvm::Value *CastedPtr = CGF.EmitScalarConversion(
- AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(),
+ AddrLV.getAddress().emitRawPointer(CGF), Ctx.getUIntPtrType(),
Ctx.getPointerType(DstType), Loc);
+ // FIXME: should the pointee type (DstType) be passed?
Address TmpAddr =
- CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF);
+ CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress();
return TmpAddr;
}
@@ -571,7 +599,7 @@ static llvm::Function *emitOutlinedFunctionPrologue(
} else if (I->capturesVariable()) {
const VarDecl *Var = I->getCapturedVar();
QualType VarTy = Var->getType();
- Address ArgAddr = ArgLVal.getAddress(CGF);
+ Address ArgAddr = ArgLVal.getAddress();
if (ArgLVal.getType()->isLValueReferenceType()) {
ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
} else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
@@ -592,12 +620,12 @@ static llvm::Function *emitOutlinedFunctionPrologue(
? castValueFromUintptr(
CGF, I->getLocation(), FD->getType(),
Args[Cnt]->getName(), ArgLVal)
- : ArgLVal.getAddress(CGF)}});
+ : ArgLVal.getAddress()}});
} else {
// If 'this' is captured, load it into CXXThisValue.
assert(I->capturesThis());
CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
- LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}});
+ LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress()}});
}
++Cnt;
++I;
@@ -667,7 +695,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
I->second.first ? I->second.first->getType() : Arg->getType(),
AlignmentSource::Decl);
if (LV.getType()->isAnyComplexType())
- LV.setAddress(LV.getAddress(WrapperCGF).withElementType(PI->getType()));
+ LV.setAddress(LV.getAddress().withElementType(PI->getType()));
CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
} else {
auto EI = VLASizes.find(Arg);
@@ -702,8 +730,8 @@ void CodeGenFunction::EmitOMPAggregateAssign(
llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
SrcAddr = SrcAddr.withElementType(DestAddr.getElementType());
- llvm::Value *SrcBegin = SrcAddr.getPointer();
- llvm::Value *DestBegin = DestAddr.getPointer();
+ llvm::Value *SrcBegin = SrcAddr.emitRawPointer(*this);
+ llvm::Value *DestBegin = DestAddr.emitRawPointer(*this);
// Cast from pointer to array type to pointer to single element.
llvm::Value *DestEnd = Builder.CreateInBoundsGEP(DestAddr.getElementType(),
DestBegin, NumElements);
@@ -883,8 +911,7 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
EmitAggregateAssign(Dest, OriginalLVal, Type);
} else {
EmitOMPAggregateAssign(
- Emission.getAllocatedAddress(), OriginalLVal.getAddress(*this),
- Type,
+ Emission.getAllocatedAddress(), OriginalLVal.getAddress(), Type,
[this, VDInit, Init](Address DestElement, Address SrcElement) {
// Clean up any temporaries needed by the
// initialization.
@@ -901,7 +928,7 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
IsRegistered =
PrivateScope.addPrivate(OrigVD, Emission.getAllocatedAddress());
} else {
- Address OriginalAddr = OriginalLVal.getAddress(*this);
+ Address OriginalAddr = OriginalLVal.getAddress();
// Emit private VarDecl with copy init.
// Remap temp VDInit variable to the address of the original
// variable (for proper handling of captured global variables).
@@ -990,7 +1017,7 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
"Copyin threadprivates should have been captured!");
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- MasterAddr = EmitLValue(&DRE).getAddress(*this);
+ MasterAddr = EmitLValue(&DRE).getAddress();
LocalDeclMap.erase(VD);
} else {
MasterAddr =
@@ -1000,17 +1027,17 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
getContext().getDeclAlign(VD));
}
// Get the address of the threadprivate variable.
- Address PrivateAddr = EmitLValue(*IRef).getAddress(*this);
+ Address PrivateAddr = EmitLValue(*IRef).getAddress();
if (CopiedVars.size() == 1) {
// At first check if current thread is a master thread. If it is, no
// need to copy data.
CopyBegin = createBasicBlock("copyin.not.master");
CopyEnd = createBasicBlock("copyin.not.master.end");
// TODO: Avoid ptrtoint conversion.
- auto *MasterAddrInt =
- Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy);
- auto *PrivateAddrInt =
- Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy);
+ auto *MasterAddrInt = Builder.CreatePtrToInt(
+ MasterAddr.emitRawPointer(*this), CGM.IntPtrTy);
+ auto *PrivateAddrInt = Builder.CreatePtrToInt(
+ PrivateAddr.emitRawPointer(*this), CGM.IntPtrTy);
Builder.CreateCondBr(
Builder.CreateICmpNE(MasterAddrInt, PrivateAddrInt), CopyBegin,
CopyEnd);
@@ -1069,7 +1096,7 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
/*RefersToEnclosingVariableOrCapture=*/
CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress(*this));
+ PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress());
// Check if the variable is also a firstprivate: in this case IInit is
// not generated. Initialization of this variable will happen in codegen
// for 'firstprivate' clause.
@@ -1232,7 +1259,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
RedCG.emitAggregateType(*this, Count);
AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
- RedCG.getSharedLValue(Count).getAddress(*this),
+ RedCG.getSharedLValue(Count).getAddress(),
[&Emission](CodeGenFunction &CGF) {
CGF.EmitAutoVarInit(Emission);
return true;
@@ -1249,26 +1276,24 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
QualType Type = PrivateVD->getType();
- bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef);
+ bool isaOMPArraySectionExpr = isa<ArraySectionExpr>(IRef);
if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD,
- RedCG.getSharedLValue(Count).getAddress(*this));
+ PrivateScope.addPrivate(LHSVD, RedCG.getSharedLValue(Count).getAddress());
PrivateScope.addPrivate(RHSVD, GetAddrOfLocalVar(PrivateVD));
} else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
isa<ArraySubscriptExpr>(IRef)) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD,
- RedCG.getSharedLValue(Count).getAddress(*this));
+ PrivateScope.addPrivate(LHSVD, RedCG.getSharedLValue(Count).getAddress());
PrivateScope.addPrivate(RHSVD,
GetAddrOfLocalVar(PrivateVD).withElementType(
ConvertTypeForMem(RHSVD->getType())));
} else {
QualType Type = PrivateVD->getType();
bool IsArray = getContext().getAsArrayType(Type) != nullptr;
- Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this);
+ Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress();
// Store the address of the original variable associated with the LHS
// implicit variable.
if (IsArray) {
@@ -1391,7 +1416,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
case OMPD_end_declare_variant:
case OMPD_unknown:
default:
- llvm_unreachable("Enexpected directive with task reductions.");
+ llvm_unreachable("Unexpected directive with task reductions.");
}
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl());
@@ -1429,9 +1454,12 @@ void CodeGenFunction::EmitOMPReductionClauseFinal(
*this, D.getBeginLoc(),
isOpenMPWorksharingDirective(D.getDirectiveKind()));
}
+ bool TeamsLoopCanBeParallel = false;
+ if (auto *TTLD = dyn_cast<OMPTargetTeamsGenericLoopDirective>(&D))
+ TeamsLoopCanBeParallel = TTLD->canBeParallelFor();
bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
isOpenMPParallelDirective(D.getDirectiveKind()) ||
- ReductionKind == OMPD_simd;
+ TeamsLoopCanBeParallel || ReductionKind == OMPD_simd;
bool SimpleReduction = ReductionKind == OMPD_simd;
// Emit nowait reduction if nowait clause is present or directive is a
// parallel directive (it always has implicit barrier).
@@ -1666,7 +1694,7 @@ Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
llvm::Type *VarTy = VDAddr.getElementType();
llvm::Value *Data =
- CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy);
+ CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.Int8PtrTy);
llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy));
std::string Suffix = getNameWithSeparators({"cache", ""});
llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix);
@@ -1743,7 +1771,7 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
- // The cleanup callback that finalizes all variabels at the given location,
+ // The cleanup callback that finalizes all variables at the given location,
// thus calls destructors etc.
auto FiniCB = [this](InsertPointTy IP) {
OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
@@ -2045,7 +2073,7 @@ void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) {
->getParam(0)
->getType()
.getNonReferenceType();
- Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr");
+ RawAddress CountAddr = CreateMemTemp(LogicalTy, ".count.addr");
emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()});
llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count");
@@ -2059,9 +2087,9 @@ void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) {
// variable and emit the body.
const DeclRefExpr *LoopVarRef = S->getLoopVarRef();
LValue LCVal = EmitLValue(LoopVarRef);
- Address LoopVarAddress = LCVal.getAddress(*this);
+ Address LoopVarAddress = LCVal.getAddress();
emitCapturedStmtCall(*this, LoopVarClosure,
- {LoopVarAddress.getPointer(), IndVar});
+ {LoopVarAddress.emitRawPointer(*this), IndVar});
RunCleanupsScope BodyScope(*this);
EmitStmt(BodyStmt);
@@ -2200,7 +2228,7 @@ void CodeGenFunction::EmitOMPLinearClauseFinal(
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
- Address OrigAddr = EmitLValue(&DRE).getAddress(*this);
+ Address OrigAddr = EmitLValue(&DRE).getAddress();
CodeGenFunction::OMPPrivateScope VarScope(*this);
VarScope.addPrivate(OrigVD, OrigAddr);
(void)VarScope.Privatize();
@@ -2267,7 +2295,7 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
E->getType(), VK_LValue, E->getExprLoc());
- (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress(*this));
+ (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress());
} else {
(void)LoopScope.addPrivate(PrivateVD, VarEmission.getAllocatedAddress());
}
@@ -2433,13 +2461,12 @@ void CodeGenFunction::EmitOMPSimdFinal(
}
Address OrigAddr = Address::invalid();
if (CED) {
- OrigAddr =
- EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this);
+ OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
} else {
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
/*RefersToEnclosingVariableOrCapture=*/false,
(*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
- OrigAddr = EmitLValue(&DRE).getAddress(*this);
+ OrigAddr = EmitLValue(&DRE).getAddress();
}
OMPPrivateScope VarScope(*this);
VarScope.addPrivate(OrigVD, OrigAddr);
@@ -2740,6 +2767,19 @@ void CodeGenFunction::EmitOMPTileDirective(const OMPTileDirective &S) {
EmitStmt(S.getTransformedStmt());
}
+void CodeGenFunction::EmitOMPReverseDirective(const OMPReverseDirective &S) {
+ // Emit the de-sugared statement.
+ OMPTransformDirectiveScopeRAII ReverseScope(*this, &S);
+ EmitStmt(S.getTransformedStmt());
+}
+
+void CodeGenFunction::EmitOMPInterchangeDirective(
+ const OMPInterchangeDirective &S) {
+ // Emit the de-sugared statement.
+ OMPTransformDirectiveScopeRAII InterchangeScope(*this, &S);
+ EmitStmt(S.getTransformedStmt());
+}
+
void CodeGenFunction::EmitOMPUnrollDirective(const OMPUnrollDirective &S) {
bool UseOMPIRBuilder = CGM.getLangOpts().OpenMPIRBuilder;
@@ -2910,10 +2950,10 @@ void CodeGenFunction::EmitOMPOuterLoop(
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
- auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
+ auto &&CodeGen = [DynamicOrOrdered, &S, &LoopArgs](CodeGenFunction &CGF) {
if (!DynamicOrOrdered)
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
- S.getDirectiveKind());
+ LoopArgs.DKind);
};
OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
}
@@ -2963,12 +3003,14 @@ void CodeGenFunction::EmitOMPForOuterLoop(
// run-sched-var ICV. If the ICV is set to auto, the schedule is
// implementation defined
//
+ // __kmpc_dispatch_init();
// while(__kmpc_dispatch_next(&LB, &UB)) {
// idx = LB;
// while (idx <= UB) { BODY; ++idx;
// __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
// } // inner loop
// }
+ // __kmpc_dispatch_deinit();
//
// OpenMP [2.7.1, Loop Construct, Description, table 2-1]
// When schedule(static, chunk_size) is specified, iterations are divided into
@@ -3019,8 +3061,12 @@ void CodeGenFunction::EmitOMPForOuterLoop(
OuterLoopArgs.Cond = S.getCond();
OuterLoopArgs.NextLB = S.getNextLowerBound();
OuterLoopArgs.NextUB = S.getNextUpperBound();
+ OuterLoopArgs.DKind = LoopArgs.DKind;
EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
emitOMPLoopBodyWithStopPoint, CodeGenOrdered);
+ if (DynamicOrOrdered) {
+ RT.emitForDispatchDeinit(*this, S.getBeginLoc());
+ }
}
static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc,
@@ -3080,6 +3126,7 @@ void CodeGenFunction::EmitOMPDistributeOuterLoop(
OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedNextUpperBound()
: S.getNextUpperBound();
+ OuterLoopArgs.DKind = OMPD_distribute;
EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S,
LoopScope, OuterLoopArgs, CodeGenLoopContent,
@@ -3153,16 +3200,14 @@ static void emitDistributeParallelForDistributeInnerBoundParams(
const auto &Dir = cast<OMPLoopDirective>(S);
LValue LB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
- llvm::Value *LBCast =
- CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)),
- CGF.SizeTy, /*isSigned=*/false);
+ llvm::Value *LBCast = CGF.Builder.CreateIntCast(
+ CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
CapturedVars.push_back(LBCast);
LValue UB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
- llvm::Value *UBCast =
- CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)),
- CGF.SizeTy, /*isSigned=*/false);
+ llvm::Value *UBCast = CGF.Builder.CreateIntCast(
+ CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
CapturedVars.push_back(UBCast);
}
@@ -3414,8 +3459,8 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// one chunk is distributed to each thread. Note that the size of
// the chunks is unspecified in this case.
CGOpenMPRuntime::StaticRTInput StaticInit(
- IVSize, IVSigned, Ordered, IL.getAddress(CGF),
- LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF),
+ IVSize, IVSigned, Ordered, IL.getAddress(), LB.getAddress(),
+ UB.getAddress(), ST.getAddress(),
StaticChunkedOne ? Chunk : nullptr);
CGF.CGM.getOpenMPRuntime().emitForStaticInit(
CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind,
@@ -3452,15 +3497,16 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// Tell the runtime we are done.
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
- S.getDirectiveKind());
+ OMPD_for);
};
OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
} else {
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
- const OMPLoopArguments LoopArguments(
- LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
- IL.getAddress(*this), Chunk, EUB);
+ OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(),
+ ST.getAddress(), IL.getAddress(), Chunk,
+ EUB);
+ LoopArguments.DKind = OMPD_for;
EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
LoopArguments, CGDispatchBounds);
}
@@ -3626,11 +3672,10 @@ static void emitScanBasedDirectiveFinals(
RValue::get(OMPLast));
LValue DestLVal = CGF.EmitLValue(OrigExpr);
LValue SrcLVal = CGF.EmitLValue(CopyArrayElem);
- CGF.EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(CGF),
- SrcLVal.getAddress(CGF),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ CGF.EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
}
@@ -3740,7 +3785,7 @@ static void emitScanBasedDirective(
cast<OpaqueValueExpr>(
cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
RValue::get(IVal));
- LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress();
}
PrivScope.addPrivate(LHSVD, LHSAddr);
Address RHSAddr = Address::invalid();
@@ -3751,7 +3796,7 @@ static void emitScanBasedDirective(
cast<OpaqueValueExpr>(
cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
RValue::get(OffsetIVal));
- RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress();
}
PrivScope.addPrivate(RHSVD, RHSAddr);
++ILHS;
@@ -4065,8 +4110,8 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
OpenMPScheduleTy ScheduleKind;
ScheduleKind.Schedule = OMPC_SCHEDULE_static;
CGOpenMPRuntime::StaticRTInput StaticInit(
- /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF),
- LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF));
+ /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
+ LB.getAddress(), UB.getAddress(), ST.getAddress());
CGF.CGM.getOpenMPRuntime().emitForStaticInit(
CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit);
// UB = min(UB, GlobalUB);
@@ -4082,7 +4127,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
// Tell the runtime we are done.
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
- S.getDirectiveKind());
+ OMPD_sections);
};
CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
@@ -4746,10 +4791,10 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
(void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
CGF.Builder, false);
- llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back();
// Get the call dbg.declare instruction we just created and update
// its DIExpression to add offset to base address.
- if (auto DDI = dyn_cast<llvm::DbgVariableIntrinsic>(&Last)) {
+ auto UpdateExpr = [](llvm::LLVMContext &Ctx, auto *Declare,
+ unsigned Offset) {
SmallVector<uint64_t, 8> Ops;
// Add offset to the base address if non zero.
if (Offset) {
@@ -4757,9 +4802,21 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Ops.push_back(Offset);
}
Ops.push_back(llvm::dwarf::DW_OP_deref);
- auto &Ctx = DDI->getContext();
- llvm::DIExpression *DIExpr = llvm::DIExpression::get(Ctx, Ops);
- Last.setOperand(2, llvm::MetadataAsValue::get(Ctx, DIExpr));
+ Declare->setExpression(llvm::DIExpression::get(Ctx, Ops));
+ };
+ llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back();
+ if (auto DDI = dyn_cast<llvm::DbgVariableIntrinsic>(&Last))
+ UpdateExpr(DDI->getContext(), DDI, Offset);
+ // If we're emitting using the new debug info format into a block
+ // without a terminator, the record will be "trailing".
+ assert(!Last.isTerminator() && "unexpected terminator");
+ if (auto *Marker =
+ CGF.Builder.GetInsertBlock()->getTrailingDbgRecords()) {
+ for (llvm::DbgVariableRecord &DVR : llvm::reverse(
+ llvm::filterDbgVars(Marker->getDbgRecordRange()))) {
+ UpdateExpr(Last.getContext(), &DVR, Offset);
+ break;
+ }
}
}
}
@@ -4780,7 +4837,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
ParamTypes.push_back(PrivatesPtr->getType());
for (const Expr *E : Data.PrivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Address PrivatePtr = CGF.CreateMemTemp(
+ RawAddress PrivatePtr = CGF.CreateMemTemp(
CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
@@ -4788,7 +4845,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
}
for (const Expr *E : Data.FirstprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Address PrivatePtr =
+ RawAddress PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
".firstpriv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
@@ -4798,7 +4855,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
}
for (const Expr *E : Data.LastprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Address PrivatePtr =
+ RawAddress PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
".lastpriv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
@@ -4811,7 +4868,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Ty = CGF.getContext().getPointerType(Ty);
if (isAllocatableDecl(VD))
Ty = CGF.getContext().getPointerType(Ty);
- Address PrivatePtr = CGF.CreateMemTemp(
+ RawAddress PrivatePtr = CGF.CreateMemTemp(
CGF.getContext().getPointerType(Ty), ".local.ptr.addr");
auto Result = UntiedLocalVars.insert(
std::make_pair(VD, std::make_pair(PrivatePtr, Address::invalid())));
@@ -4833,7 +4890,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
Pair.second->getType(), VK_LValue,
Pair.second->getExprLoc());
- Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress(CGF));
+ Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress());
}
for (const auto &Pair : PrivatePtrs) {
Address Replacement = Address(
@@ -4844,7 +4901,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
if (auto *DI = CGF.getDebugInfo())
if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
(void)DI->EmitDeclareOfAutoVariable(
- Pair.first, Pair.second.getPointer(), CGF.Builder,
+ Pair.first, Pair.second.getBasePointer(), CGF.Builder,
/*UsePointerValue*/ true);
}
// Adjust mapping for internal locals by mapping actual memory instead of
@@ -4897,14 +4954,14 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
RedCG, Cnt);
Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
- Replacement =
- Address(CGF.EmitScalarConversion(
- Replacement.getPointer(), CGF.getContext().VoidPtrTy,
- CGF.getContext().getPointerType(
- Data.ReductionCopies[Cnt]->getType()),
- Data.ReductionCopies[Cnt]->getExprLoc()),
- CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
- Replacement.getAlignment());
+ Replacement = Address(
+ CGF.EmitScalarConversion(Replacement.emitRawPointer(CGF),
+ CGF.getContext().VoidPtrTy,
+ CGF.getContext().getPointerType(
+ Data.ReductionCopies[Cnt]->getType()),
+ Data.ReductionCopies[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
+ Replacement.getAlignment());
Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
}
@@ -4955,7 +5012,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
Replacement = Address(
CGF.EmitScalarConversion(
- Replacement.getPointer(), CGF.getContext().VoidPtrTy,
+ Replacement.emitRawPointer(CGF), CGF.getContext().VoidPtrTy,
CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
InRedPrivs[Cnt]->getExprLoc()),
CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
@@ -5074,7 +5131,7 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
// If there is no user-defined mapper, the mapper array will be nullptr. In
// this case, we don't need to privatize it.
if (!isa_and_nonnull<llvm::ConstantPointerNull>(
- InputInfo.MappersArray.getPointer())) {
+ InputInfo.MappersArray.emitRawPointer(*this))) {
MVD = createImplicitFirstprivateForType(
getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
TargetScope.addPrivate(MVD, InputInfo.MappersArray);
@@ -5100,7 +5157,7 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
ParamTypes.push_back(PrivatesPtr->getType());
for (const Expr *E : Data.FirstprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Address PrivatePtr =
+ RawAddress PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
".firstpriv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
@@ -5179,14 +5236,14 @@ void CodeGenFunction::processInReduction(const OMPExecutableDirective &S,
RedCG, Cnt);
Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
- Replacement =
- Address(CGF.EmitScalarConversion(
- Replacement.getPointer(), CGF.getContext().VoidPtrTy,
- CGF.getContext().getPointerType(
- Data.ReductionCopies[Cnt]->getType()),
- Data.ReductionCopies[Cnt]->getExprLoc()),
- CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
- Replacement.getAlignment());
+ Replacement = Address(
+ CGF.EmitScalarConversion(Replacement.emitRawPointer(CGF),
+ CGF.getContext().VoidPtrTy,
+ CGF.getContext().getPointerType(
+ Data.ReductionCopies[Cnt]->getType()),
+ Data.ReductionCopies[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
+ Replacement.getAlignment());
Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
}
@@ -5232,7 +5289,7 @@ void CodeGenFunction::processInReduction(const OMPExecutableDirective &S,
CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
Replacement = Address(
CGF.EmitScalarConversion(
- Replacement.getPointer(), CGF.getContext().VoidPtrTy,
+ Replacement.emitRawPointer(CGF), CGF.getContext().VoidPtrTy,
CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
InRedPrivs[Cnt]->getExprLoc()),
CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
@@ -5379,7 +5436,7 @@ void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) {
Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause(
*this, Dependencies, DC->getBeginLoc());
- EmitStoreOfScalar(DepAddr.getPointer(), DOLVal);
+ EmitStoreOfScalar(DepAddr.emitRawPointer(*this), DOLVal);
return;
}
if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) {
@@ -5480,8 +5537,8 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
*cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl()));
LValue DestLVal = EmitLValue(TempExpr);
LValue SrcLVal = EmitLValue(LHSs[I]);
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
+ EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(),
+ SrcLVal.getAddress(),
cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
CopyOps[I]);
@@ -5502,11 +5559,10 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
DestLVal = EmitLValue(RHSs[I]);
SrcLVal = EmitLValue(TempExpr);
}
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
}
EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock);
@@ -5539,11 +5595,10 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
RValue::get(IdxVal));
LValue DestLVal = EmitLValue(CopyArrayElem);
LValue SrcLVal = EmitLValue(OrigExpr);
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
}
EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
@@ -5581,11 +5636,10 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
RValue::get(IdxVal));
LValue SrcLVal = EmitLValue(CopyArrayElem);
LValue DestLVal = EmitLValue(OrigExpr);
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
if (!IsInclusive) {
EmitBlock(ExclusiveExitBB);
@@ -5710,8 +5764,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
/* Chunked */ Chunk != nullptr) ||
StaticChunked) {
CGOpenMPRuntime::StaticRTInput StaticInit(
- IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this),
- LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
+ IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(),
+ LB.getAddress(), UB.getAddress(), ST.getAddress(),
StaticChunked ? Chunk : nullptr);
RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind,
StaticInit);
@@ -5782,13 +5836,13 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
});
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
- RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind());
+ RT.emitForStaticFinish(*this, S.getEndLoc(), OMPD_distribute);
} else {
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
const OMPLoopArguments LoopArguments = {
- LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
- IL.getAddress(*this), Chunk};
+ LB.getAddress(), UB.getAddress(), ST.getAddress(), IL.getAddress(),
+ Chunk};
EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
CodeGenLoop);
}
@@ -6102,8 +6156,7 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
// target platform.
if (BO == BO_Comma || !Update.isScalar() || !X.isSimple() ||
(!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
- (Update.getScalarVal()->getType() !=
- X.getAddress(CGF).getElementType())) ||
+ (Update.getScalarVal()->getType() != X.getAddress().getElementType())) ||
!Context.getTargetInfo().hasBuiltinAtomic(
Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
return std::make_pair(false, RValue::get(nullptr));
@@ -6119,10 +6172,10 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
};
if (!CheckAtomicSupport(Update.getScalarVal()->getType(), BO) ||
- !CheckAtomicSupport(X.getAddress(CGF).getElementType(), BO))
+ !CheckAtomicSupport(X.getAddress().getElementType(), BO))
return std::make_pair(false, RValue::get(nullptr));
- bool IsInteger = X.getAddress(CGF).getElementType()->isIntegerTy();
+ bool IsInteger = X.getAddress().getElementType()->isIntegerTy();
llvm::AtomicRMWInst::BinOp RMWOp;
switch (BO) {
case BO_Add:
@@ -6199,14 +6252,14 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
if (IsInteger)
UpdateVal = CGF.Builder.CreateIntCast(
- IC, X.getAddress(CGF).getElementType(),
+ IC, X.getAddress().getElementType(),
X.getType()->hasSignedIntegerRepresentation());
else
UpdateVal = CGF.Builder.CreateCast(llvm::Instruction::CastOps::UIToFP, IC,
- X.getAddress(CGF).getElementType());
+ X.getAddress().getElementType());
}
llvm::Value *Res =
- CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(CGF), UpdateVal, AO);
+ CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
return std::make_pair(true, RValue::get(Res));
}
@@ -6431,7 +6484,7 @@ static void emitOMPAtomicCompareExpr(
}
LValue XLVal = CGF.EmitLValue(X);
- Address XAddr = XLVal.getAddress(CGF);
+ Address XAddr = XLVal.getAddress();
auto EmitRValueWithCastIfNeeded = [&CGF, Loc](const Expr *X, const Expr *E) {
if (X->getType() == E->getType())
@@ -6447,36 +6500,36 @@ static void emitOMPAtomicCompareExpr(
llvm::Value *DVal = D ? EmitRValueWithCastIfNeeded(X, D) : nullptr;
if (auto *CI = dyn_cast<llvm::ConstantInt>(EVal))
EVal = CGF.Builder.CreateIntCast(
- CI, XLVal.getAddress(CGF).getElementType(),
+ CI, XLVal.getAddress().getElementType(),
E->getType()->hasSignedIntegerRepresentation());
if (DVal)
if (auto *CI = dyn_cast<llvm::ConstantInt>(DVal))
DVal = CGF.Builder.CreateIntCast(
- CI, XLVal.getAddress(CGF).getElementType(),
+ CI, XLVal.getAddress().getElementType(),
D->getType()->hasSignedIntegerRepresentation());
llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{
- XAddr.getPointer(), XAddr.getElementType(),
+ XAddr.emitRawPointer(CGF), XAddr.getElementType(),
X->getType()->hasSignedIntegerRepresentation(),
X->getType().isVolatileQualified()};
llvm::OpenMPIRBuilder::AtomicOpValue VOpVal, ROpVal;
if (V) {
LValue LV = CGF.EmitLValue(V);
- Address Addr = LV.getAddress(CGF);
- VOpVal = {Addr.getPointer(), Addr.getElementType(),
+ Address Addr = LV.getAddress();
+ VOpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(),
V->getType()->hasSignedIntegerRepresentation(),
V->getType().isVolatileQualified()};
}
if (R) {
LValue LV = CGF.EmitLValue(R);
- Address Addr = LV.getAddress(CGF);
- ROpVal = {Addr.getPointer(), Addr.getElementType(),
+ Address Addr = LV.getAddress();
+ ROpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(),
R->getType()->hasSignedIntegerRepresentation(),
R->getType().isVolatileQualified()};
}
if (FailAO == llvm::AtomicOrdering::NotAtomic) {
- // fail clause was not mentionend on the
+ // fail clause was not mentioned on the
// "#pragma omp atomic compare" construct.
CGF.Builder.restoreIP(OMPBuilder.createAtomicCompare(
CGF.Builder, XOpVal, VOpVal, ROpVal, EVal, DVal, AO, Op, IsXBinopExpr,
@@ -6520,7 +6573,7 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
}
void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
- llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic;
+ llvm::AtomicOrdering AO = CGM.getOpenMPRuntime().getDefaultMemoryOrdering();
// Fail Memory Clause Ordering.
llvm::AtomicOrdering FailAO = llvm::AtomicOrdering::NotAtomic;
bool MemOrderingSpecified = false;
@@ -6546,6 +6599,9 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
// Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause,
// if it is first).
OpenMPClauseKind K = C->getClauseKind();
+ // TBD
+ if (K == OMPC_weak)
+ return;
if (K == OMPC_seq_cst || K == OMPC_acq_rel || K == OMPC_acquire ||
K == OMPC_release || K == OMPC_relaxed || K == OMPC_hint)
continue;
@@ -7011,7 +7067,7 @@ void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) {
std::tie(NumDependences, DependenciesArray) =
CGM.getOpenMPRuntime().emitDependClause(*this, Data.Dependences,
S.getBeginLoc());
- DependenceList = DependenciesArray.getPointer();
+ DependenceList = DependenciesArray.emitRawPointer(*this);
}
Data.HasNowaitClause = S.hasClausesOfKind<OMPNowaitClause>();
@@ -7020,31 +7076,47 @@ void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) {
S.getSingleClause<OMPUseClause>())) &&
"OMPNowaitClause clause is used separately in OMPInteropDirective.");
- if (const auto *C = S.getSingleClause<OMPInitClause>()) {
- llvm::Value *InteropvarPtr =
- EmitLValue(C->getInteropVar()).getPointer(*this);
- llvm::omp::OMPInteropType InteropType = llvm::omp::OMPInteropType::Unknown;
- if (C->getIsTarget()) {
- InteropType = llvm::omp::OMPInteropType::Target;
- } else {
- assert(C->getIsTargetSync() && "Expected interop-type target/targetsync");
- InteropType = llvm::omp::OMPInteropType::TargetSync;
+ auto ItOMPInitClause = S.getClausesOfKind<OMPInitClause>();
+ if (!ItOMPInitClause.empty()) {
+ // Look at the multiple init clauses
+ for (const OMPInitClause *C : ItOMPInitClause) {
+ llvm::Value *InteropvarPtr =
+ EmitLValue(C->getInteropVar()).getPointer(*this);
+ llvm::omp::OMPInteropType InteropType =
+ llvm::omp::OMPInteropType::Unknown;
+ if (C->getIsTarget()) {
+ InteropType = llvm::omp::OMPInteropType::Target;
+ } else {
+ assert(C->getIsTargetSync() &&
+ "Expected interop-type target/targetsync");
+ InteropType = llvm::omp::OMPInteropType::TargetSync;
+ }
+ OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType,
+ Device, NumDependences, DependenceList,
+ Data.HasNowaitClause);
+ }
+ }
+ auto ItOMPDestroyClause = S.getClausesOfKind<OMPDestroyClause>();
+ if (!ItOMPDestroyClause.empty()) {
+ // Look at the multiple destroy clauses
+ for (const OMPDestroyClause *C : ItOMPDestroyClause) {
+ llvm::Value *InteropvarPtr =
+ EmitLValue(C->getInteropVar()).getPointer(*this);
+ OMPBuilder.createOMPInteropDestroy(Builder, InteropvarPtr, Device,
+ NumDependences, DependenceList,
+ Data.HasNowaitClause);
+ }
+ }
+ auto ItOMPUseClause = S.getClausesOfKind<OMPUseClause>();
+ if (!ItOMPUseClause.empty()) {
+ // Look at the multiple use clauses
+ for (const OMPUseClause *C : ItOMPUseClause) {
+ llvm::Value *InteropvarPtr =
+ EmitLValue(C->getInteropVar()).getPointer(*this);
+ OMPBuilder.createOMPInteropUse(Builder, InteropvarPtr, Device,
+ NumDependences, DependenceList,
+ Data.HasNowaitClause);
}
- OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType, Device,
- NumDependences, DependenceList,
- Data.HasNowaitClause);
- } else if (const auto *C = S.getSingleClause<OMPDestroyClause>()) {
- llvm::Value *InteropvarPtr =
- EmitLValue(C->getInteropVar()).getPointer(*this);
- OMPBuilder.createOMPInteropDestroy(Builder, InteropvarPtr, Device,
- NumDependences, DependenceList,
- Data.HasNowaitClause);
- } else if (const auto *C = S.getSingleClause<OMPUseClause>()) {
- llvm::Value *InteropvarPtr =
- EmitLValue(C->getInteropVar()).getPointer(*this);
- OMPBuilder.createOMPInteropUse(Builder, InteropvarPtr, Device,
- NumDependences, DependenceList,
- Data.HasNowaitClause);
}
}
@@ -7245,7 +7317,7 @@ void CodeGenFunction::EmitOMPUseDevicePtrClause(
static const VarDecl *getBaseDecl(const Expr *Ref) {
const Expr *Base = Ref->IgnoreParenImpCasts();
- while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base))
+ while (const auto *OASE = dyn_cast<ArraySectionExpr>(Base))
Base = OASE->getBase()->IgnoreParenImpCasts();
while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = ASE->getBase()->IgnoreParenImpCasts();
@@ -7871,7 +7943,7 @@ void CodeGenFunction::EmitOMPGenericLoopDirective(
void CodeGenFunction::EmitOMPParallelGenericLoopDirective(
const OMPLoopDirective &S) {
- // Emit combined directive as if its consituent constructs are 'parallel'
+ // Emit combined directive as if its constituent constructs are 'parallel'
// and 'for'.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
@@ -7891,11 +7963,9 @@ void CodeGenFunction::EmitOMPParallelGenericLoopDirective(
void CodeGenFunction::EmitOMPTeamsGenericLoopDirective(
const OMPTeamsGenericLoopDirective &S) {
// To be consistent with current behavior of 'target teams loop', emit
- // 'teams loop' as if its constituent constructs are 'distribute,
- // 'parallel, and 'for'.
+ // 'teams loop' as if its constituent constructs are 'teams' and 'distribute'.
auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
- CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
- S.getDistInc());
+ CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
};
// Emit teams region as a standalone region.
@@ -7909,15 +7979,33 @@ void CodeGenFunction::EmitOMPTeamsGenericLoopDirective(
CodeGenDistribute);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
};
- emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen);
+ emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
emitPostUpdateForReductionClause(*this, S,
[](CodeGenFunction &) { return nullptr; });
}
-static void
-emitTargetTeamsGenericLoopRegion(CodeGenFunction &CGF,
- const OMPTargetTeamsGenericLoopDirective &S,
- PrePostActionTy &Action) {
+#ifndef NDEBUG
+static void emitTargetTeamsLoopCodegenStatus(CodeGenFunction &CGF,
+ std::string StatusMsg,
+ const OMPExecutableDirective &D) {
+ bool IsDevice = CGF.CGM.getLangOpts().OpenMPIsTargetDevice;
+ if (IsDevice)
+ StatusMsg += ": DEVICE";
+ else
+ StatusMsg += ": HOST";
+ SourceLocation L = D.getBeginLoc();
+ auto &SM = CGF.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(L);
+ const char *FileName = PLoc.isValid() ? PLoc.getFilename() : nullptr;
+ unsigned LineNo =
+ PLoc.isValid() ? PLoc.getLine() : SM.getExpansionLineNumber(L);
+ llvm::dbgs() << StatusMsg << ": " << FileName << ": " << LineNo << "\n";
+}
+#endif
+
+static void emitTargetTeamsGenericLoopRegionAsParallel(
+ CodeGenFunction &CGF, PrePostActionTy &Action,
+ const OMPTargetTeamsGenericLoopDirective &S) {
Action.Enter(CGF);
// Emit 'teams loop' as if its constituent constructs are 'distribute,
// 'parallel, and 'for'.
@@ -7937,19 +8025,50 @@ emitTargetTeamsGenericLoopRegion(CodeGenFunction &CGF,
CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
};
-
+ DEBUG_WITH_TYPE(TTL_CODEGEN_TYPE,
+ emitTargetTeamsLoopCodegenStatus(
+ CGF, TTL_CODEGEN_TYPE " as parallel for", S));
emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for,
CodeGenTeams);
emitPostUpdateForReductionClause(CGF, S,
[](CodeGenFunction &) { return nullptr; });
}
-/// Emit combined directive 'target teams loop' as if its constituent
-/// constructs are 'target', 'teams', 'distribute', 'parallel', and 'for'.
+static void emitTargetTeamsGenericLoopRegionAsDistribute(
+ CodeGenFunction &CGF, PrePostActionTy &Action,
+ const OMPTargetTeamsGenericLoopDirective &S) {
+ Action.Enter(CGF);
+ // Emit 'teams loop' as if its constituent construct is 'distribute'.
+ auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
+ };
+
+ // Emit teams region as a standalone region.
+ auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
+ CGF.EmitOMPReductionClauseInit(S, PrivateScope);
+ (void)PrivateScope.Privatize();
+ CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
+ CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
+ CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
+ };
+ DEBUG_WITH_TYPE(TTL_CODEGEN_TYPE,
+ emitTargetTeamsLoopCodegenStatus(
+ CGF, TTL_CODEGEN_TYPE " as distribute", S));
+ emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen);
+ emitPostUpdateForReductionClause(CGF, S,
+ [](CodeGenFunction &) { return nullptr; });
+}
+
void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDirective(
const OMPTargetTeamsGenericLoopDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
- emitTargetTeamsGenericLoopRegion(CGF, S, Action);
+ if (S.canBeParallelFor())
+ emitTargetTeamsGenericLoopRegionAsParallel(CGF, Action, S);
+ else
+ emitTargetTeamsGenericLoopRegionAsDistribute(CGF, Action, S);
};
emitCommonOMPTargetDirective(*this, S, CodeGen);
}
@@ -7959,7 +8078,10 @@ void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDeviceFunction(
const OMPTargetTeamsGenericLoopDirective &S) {
// Emit SPMD target parallel loop region as a standalone region.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
- emitTargetTeamsGenericLoopRegion(CGF, S, Action);
+ if (S.canBeParallelFor())
+ emitTargetTeamsGenericLoopRegionAsParallel(CGF, Action, S);
+ else
+ emitTargetTeamsGenericLoopRegionAsDistribute(CGF, Action, S);
};
llvm::Function *Fn;
llvm::Constant *Addr;
@@ -8033,7 +8155,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
continue;
if (!CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(Ref);
- GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
+ GlobalsScope.addPrivate(VD, GlobLVal.getAddress());
}
}
}
@@ -8048,7 +8170,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(E);
- GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
+ GlobalsScope.addPrivate(VD, GlobLVal.getAddress());
}
if (isa<OMPCapturedExprDecl>(VD)) {
// Emit only those that were not explicitly referenced in clauses.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
index 1d3f14f1c534..20bd2c2fc2c6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
@@ -77,9 +77,23 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
};
+ // Add inrange attribute to indicate that only the VTableIndex can be
+ // accessed.
+ unsigned ComponentSize =
+ CGM.getDataLayout().getTypeAllocSize(getVTableComponentType());
+ unsigned VTableSize = CGM.getDataLayout().getTypeAllocSize(
+ cast<llvm::StructType>(VTable->getValueType())
+ ->getElementType(AddressPoint.VTableIndex));
+ unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
+ llvm::ConstantRange InRange(llvm::APInt(32, -Offset, true),
+ llvm::APInt(32, VTableSize - Offset, true));
llvm::Constant *Init = llvm::ConstantExpr::getGetElementPtr(
- VTable->getValueType(), VTable, Idxs, /*InBounds=*/true,
- /*InRangeIndex=*/1);
+ VTable->getValueType(), VTable, Idxs, /*InBounds=*/true, InRange);
+
+ if (const auto &Schema =
+ CGM.getCodeGenOpts().PointerAuth.CXXVTTVTablePointers)
+ Init = CGM.getConstantSignedPointer(Init, Schema, nullptr, GlobalDecl(),
+ QualType());
VTTComponents.push_back(Init);
}
@@ -129,23 +143,24 @@ uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
BaseSubobject Base) {
BaseSubobjectPairTy ClassSubobjectPair(RD, Base);
- SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassSubobjectPair);
- if (I != SubVTTIndicies.end())
+ SubVTTIndicesMapTy::iterator I = SubVTTIndices.find(ClassSubobjectPair);
+ if (I != SubVTTIndices.end())
return I->second;
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
- for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
- Builder.getSubVTTIndicies().begin(),
- E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator
+ I = Builder.getSubVTTIndices().begin(),
+ E = Builder.getSubVTTIndices().end();
+ I != E; ++I) {
// Insert all indices.
BaseSubobjectPairTy ClassSubobjectPair(RD, I->first);
- SubVTTIndicies.insert(std::make_pair(ClassSubobjectPair, I->second));
+ SubVTTIndices.insert(std::make_pair(ClassSubobjectPair, I->second));
}
- I = SubVTTIndicies.find(ClassSubobjectPair);
- assert(I != SubVTTIndicies.end() && "Did not find index!");
+ I = SubVTTIndices.find(ClassSubobjectPair);
+ assert(I != SubVTTIndices.end() && "Did not find index!");
return I->second;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
index 8dee3f74b44b..267bdf098297 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
@@ -95,7 +95,7 @@ static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
CGF,
Address(ReturnValue, CGF.ConvertTypeForMem(ResultType->getPointeeType()),
ClassAlign),
- Thunk.Return);
+ ClassDecl, Thunk.Return);
if (NullCheckValue) {
CGF.Builder.CreateBr(AdjustEnd);
@@ -131,6 +131,12 @@ static void resolveTopLevelMetadata(llvm::Function *Fn,
// they are referencing.
for (auto &BB : *Fn) {
for (auto &I : BB) {
+ for (llvm::DbgVariableRecord &DVR :
+ llvm::filterDbgVars(I.getDbgRecordRange())) {
+ auto *DILocal = DVR.getVariable();
+ if (!DILocal->isResolved())
+ DILocal->resolve();
+ }
if (auto *DII = dyn_cast<llvm::DbgVariableIntrinsic>(&I)) {
auto *DILocal = DII->getVariable();
if (!DILocal->isResolved())
@@ -201,21 +207,22 @@ CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
// Find the first store of "this", which will be to the alloca associated
// with "this".
- Address ThisPtr =
- Address(&*AI, ConvertTypeForMem(MD->getFunctionObjectParameterType()),
- CGM.getClassPointerAlignment(MD->getParent()));
+ Address ThisPtr = makeNaturalAddressForPointer(
+ &*AI, MD->getFunctionObjectParameterType(),
+ CGM.getClassPointerAlignment(MD->getParent()));
llvm::BasicBlock *EntryBB = &Fn->front();
llvm::BasicBlock::iterator ThisStore =
llvm::find_if(*EntryBB, [&](llvm::Instruction &I) {
- return isa<llvm::StoreInst>(I) &&
- I.getOperand(0) == ThisPtr.getPointer();
+ return isa<llvm::StoreInst>(I) && I.getOperand(0) == &*AI;
});
assert(ThisStore != EntryBB->end() &&
"Store of this should be in entry block?");
// Adjust "this", if necessary.
Builder.SetInsertPoint(&*ThisStore);
- llvm::Value *AdjustedThisPtr =
- CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This);
+
+ const CXXRecordDecl *ThisValueClass = Thunk.ThisType->getPointeeCXXRecordDecl();
+ llvm::Value *AdjustedThisPtr = CGM.getCXXABI().performThisAdjustment(
+ *this, ThisPtr, ThisValueClass, Thunk);
AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr,
ThisStore->getOperand(0)->getType());
ThisStore->setOperand(0, AdjustedThisPtr);
@@ -302,10 +309,15 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl());
// Adjust the 'this' pointer if necessary
+ const CXXRecordDecl *ThisValueClass =
+ MD->getThisType()->getPointeeCXXRecordDecl();
+ if (Thunk)
+ ThisValueClass = Thunk->ThisType->getPointeeCXXRecordDecl();
+
llvm::Value *AdjustedThisPtr =
- Thunk ? CGM.getCXXABI().performThisAdjustment(
- *this, LoadCXXThisAddress(), Thunk->This)
- : LoadCXXThis();
+ Thunk ? CGM.getCXXABI().performThisAdjustment(*this, LoadCXXThisAddress(),
+ ThisValueClass, *Thunk)
+ : LoadCXXThis();
// If perfect forwarding is required a variadic method, a method using
// inalloca, or an unprototyped thunk, use musttail. Emit an error if this
@@ -499,10 +511,22 @@ llvm::Constant *CodeGenVTables::maybeEmitThunk(GlobalDecl GD,
SmallString<256> Name;
MangleContext &MCtx = CGM.getCXXABI().getMangleContext();
llvm::raw_svector_ostream Out(Name);
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD))
- MCtx.mangleCXXDtorThunk(DD, GD.getDtorType(), TI.This, Out);
- else
- MCtx.mangleThunk(MD, TI, Out);
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ MCtx.mangleCXXDtorThunk(DD, GD.getDtorType(), TI,
+ /* elideOverrideInfo */ false, Out);
+ } else
+ MCtx.mangleThunk(MD, TI, /* elideOverrideInfo */ false, Out);
+
+ if (CGM.getContext().useAbbreviatedThunkName(GD, Name.str())) {
+ Name = "";
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD))
+ MCtx.mangleCXXDtorThunk(DD, GD.getDtorType(), TI,
+ /* elideOverrideInfo */ true, Out);
+ else
+ MCtx.mangleThunk(MD, TI, /* elideOverrideInfo */ true, Out);
+ }
+
llvm::Type *ThunkVTableTy = CGM.getTypes().GetFunctionTypeForVTable(GD);
llvm::Constant *Thunk = CGM.GetAddrOfThunk(Name, ThunkVTableTy, GD);
@@ -814,11 +838,17 @@ void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
nextVTableThunkIndex++;
fnPtr = maybeEmitThunk(GD, thunkInfo, /*ForVTable=*/true);
+ if (CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers) {
+ assert(thunkInfo.Method && "Method not set");
+ GD = GD.getWithDecl(thunkInfo.Method);
+ }
// Otherwise we can use the method definition directly.
} else {
llvm::Type *fnTy = CGM.getTypes().GetFunctionTypeForVTable(GD);
fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true);
+ if (CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers)
+ GD = getItaniumVTableContext().findOriginalMethod(GD);
}
if (useRelativeLayout()) {
@@ -836,6 +866,9 @@ void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
if (FnAS != GVAS)
fnPtr =
llvm::ConstantExpr::getAddrSpaceCast(fnPtr, CGM.GlobalsInt8PtrTy);
+ if (const auto &Schema =
+ CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers)
+ return builder.addSignedPointer(fnPtr, Schema, GD, QualType());
return builder.add(fnPtr);
}
}
@@ -1045,29 +1078,41 @@ llvm::GlobalVariable::LinkageTypes
CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
if (!RD->isExternallyVisible())
return llvm::GlobalVariable::InternalLinkage;
-
- // We're at the end of the translation unit, so the current key
- // function is fully correct.
- const CXXMethodDecl *keyFunction = Context.getCurrentKeyFunction(RD);
- if (keyFunction && !RD->hasAttr<DLLImportAttr>()) {
+
+ // In windows, the linkage of vtable is not related to modules.
+ bool IsInNamedModule = !getTarget().getCXXABI().isMicrosoft() &&
+ RD->isInNamedModule();
+ // If the CXXRecordDecl is not in a module unit, we need to get
+ // its key function. We're at the end of the translation unit, so the current
+ // key function is fully correct.
+ const CXXMethodDecl *keyFunction =
+ IsInNamedModule ? nullptr : Context.getCurrentKeyFunction(RD);
+ if (IsInNamedModule || (keyFunction && !RD->hasAttr<DLLImportAttr>())) {
// If this class has a key function, use that to determine the
// linkage of the vtable.
const FunctionDecl *def = nullptr;
- if (keyFunction->hasBody(def))
+ if (keyFunction && keyFunction->hasBody(def))
keyFunction = cast<CXXMethodDecl>(def);
- switch (keyFunction->getTemplateSpecializationKind()) {
- case TSK_Undeclared:
- case TSK_ExplicitSpecialization:
+ bool IsExternalDefinition =
+ IsInNamedModule ? RD->shouldEmitInExternalSource() : !def;
+
+ TemplateSpecializationKind Kind =
+ IsInNamedModule ? RD->getTemplateSpecializationKind()
+ : keyFunction->getTemplateSpecializationKind();
+
+ switch (Kind) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
assert(
- (def || CodeGenOpts.OptimizationLevel > 0 ||
+ (IsInNamedModule || def || CodeGenOpts.OptimizationLevel > 0 ||
CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo) &&
- "Shouldn't query vtable linkage without key function, "
- "optimizations, or debug info");
- if (!def && CodeGenOpts.OptimizationLevel > 0)
+ "Shouldn't query vtable linkage without the class in module units, "
+ "key function, optimizations, or debug info");
+ if (IsExternalDefinition && CodeGenOpts.OptimizationLevel > 0)
return llvm::GlobalVariable::AvailableExternallyLinkage;
- if (keyFunction->isInlined())
+ if (keyFunction && keyFunction->isInlined())
return !Context.getLangOpts().AppleKext
? llvm::GlobalVariable::LinkOnceODRLinkage
: llvm::Function::InternalLinkage;
@@ -1086,7 +1131,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
case TSK_ExplicitInstantiationDeclaration:
llvm_unreachable("Should not have been asked to emit this");
- }
+ }
}
// -fapple-kext mode does not support weak linkage, so we must use
@@ -1180,22 +1225,20 @@ bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
TSK == TSK_ExplicitInstantiationDefinition)
return false;
+ // Otherwise, if the class is attached to a module, the tables are uniquely
+ // emitted in the object for the module unit in which it is defined.
+ if (RD->isInNamedModule())
+ return RD->shouldEmitInExternalSource();
+
// Otherwise, if the class doesn't have a key function (possibly
// anymore), the vtable must be defined here.
const CXXMethodDecl *keyFunction = CGM.getContext().getCurrentKeyFunction(RD);
if (!keyFunction)
return false;
- const FunctionDecl *Def;
// Otherwise, if we don't have a definition of the key function, the
// vtable must be defined somewhere else.
- if (!keyFunction->hasBody(Def))
- return true;
-
- assert(Def && "The body of the key function is not assigned to Def?");
- // If the non-inline key function comes from another module unit, the vtable
- // must be defined there.
- return Def->isInAnotherModuleUnit() && !Def->isInlineSpecified();
+ return !keyFunction->hasBody();
}
/// Given that we're currently at the end of the translation unit, and
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
index 9d4223547050..c06bf7a525d9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
@@ -38,10 +38,10 @@ class CodeGenVTables {
typedef VTableLayout::AddressPointsMapTy VTableAddressPointsMapTy;
typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy;
- typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndiciesMapTy;
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndicesMapTy;
- /// SubVTTIndicies - Contains indices into the various sub-VTTs.
- SubVTTIndiciesMapTy SubVTTIndicies;
+ /// SubVTTIndices - Contains indices into the various sub-VTTs.
+ SubVTTIndicesMapTy SubVTTIndices;
typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t>
SecondaryVirtualPointerIndicesMapTy;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGValue.h b/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
index 1e6f67250583..c4ec8d207d2e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
@@ -14,12 +14,14 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
#define LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
+#include "Address.h"
+#include "CGPointerAuthInfo.h"
+#include "CodeGenTBAA.h"
+#include "EHScopeStack.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Type.h"
-#include "llvm/IR/Value.h"
#include "llvm/IR/Type.h"
-#include "Address.h"
-#include "CodeGenTBAA.h"
+#include "llvm/IR/Value.h"
namespace llvm {
class Constant;
@@ -28,57 +30,64 @@ namespace llvm {
namespace clang {
namespace CodeGen {
- class AggValueSlot;
- class CodeGenFunction;
- struct CGBitFieldInfo;
+class AggValueSlot;
+class CGBuilderTy;
+class CodeGenFunction;
+struct CGBitFieldInfo;
/// RValue - This trivial value class is used to represent the result of an
/// expression that is evaluated. It can be one of three things: either a
/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
/// address of an aggregate value in memory.
class RValue {
- enum Flavor { Scalar, Complex, Aggregate };
+ friend struct DominatingValue<RValue>;
- // The shift to make to an aggregate's alignment to make it look
- // like a pointer.
- enum { AggAlignShift = 4 };
+ enum FlavorEnum { Scalar, Complex, Aggregate };
- // Stores first value and flavor.
- llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
- // Stores second value and volatility.
- llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
- // Stores element type for aggregate values.
- llvm::Type *ElementType;
+ union {
+ // Stores first and second value.
+ struct {
+ llvm::Value *first;
+ llvm::Value *second;
+ } Vals;
+
+ // Stores aggregate address.
+ Address AggregateAddr;
+ };
+
+ unsigned IsVolatile : 1;
+ unsigned Flavor : 2;
public:
- bool isScalar() const { return V1.getInt() == Scalar; }
- bool isComplex() const { return V1.getInt() == Complex; }
- bool isAggregate() const { return V1.getInt() == Aggregate; }
+ RValue() : Vals{nullptr, nullptr}, Flavor(Scalar) {}
+
+ bool isScalar() const { return Flavor == Scalar; }
+ bool isComplex() const { return Flavor == Complex; }
+ bool isAggregate() const { return Flavor == Aggregate; }
- bool isVolatileQualified() const { return V2.getInt(); }
+ bool isVolatileQualified() const { return IsVolatile; }
/// getScalarVal() - Return the Value* of this scalar value.
llvm::Value *getScalarVal() const {
assert(isScalar() && "Not a scalar!");
- return V1.getPointer();
+ return Vals.first;
}
/// getComplexVal - Return the real/imag components of this complex value.
///
std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
- return std::make_pair(V1.getPointer(), V2.getPointer());
+ return std::make_pair(Vals.first, Vals.second);
}
/// getAggregateAddr() - Return the Value* of the address of the aggregate.
Address getAggregateAddress() const {
assert(isAggregate() && "Not an aggregate!");
- auto align = reinterpret_cast<uintptr_t>(V2.getPointer()) >> AggAlignShift;
- return Address(
- V1.getPointer(), ElementType, CharUnits::fromQuantity(align));
+ return AggregateAddr;
}
- llvm::Value *getAggregatePointer() const {
- assert(isAggregate() && "Not an aggregate!");
- return V1.getPointer();
+
+ llvm::Value *getAggregatePointer(QualType PointeeType,
+ CodeGenFunction &CGF) const {
+ return getAggregateAddress().getBasePointer();
}
static RValue getIgnored() {
@@ -88,17 +97,19 @@ public:
static RValue get(llvm::Value *V) {
RValue ER;
- ER.V1.setPointer(V);
- ER.V1.setInt(Scalar);
- ER.V2.setInt(false);
+ ER.Vals.first = V;
+ ER.Flavor = Scalar;
+ ER.IsVolatile = false;
return ER;
}
+ static RValue get(Address Addr, CodeGenFunction &CGF) {
+ return RValue::get(Addr.emitRawPointer(CGF));
+ }
static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
RValue ER;
- ER.V1.setPointer(V1);
- ER.V2.setPointer(V2);
- ER.V1.setInt(Complex);
- ER.V2.setInt(false);
+ ER.Vals = {V1, V2};
+ ER.Flavor = Complex;
+ ER.IsVolatile = false;
return ER;
}
static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
@@ -107,15 +118,15 @@ public:
// FIXME: Aggregate rvalues need to retain information about whether they are
// volatile or not. Remove default to find all places that probably get this
// wrong.
+
+ /// Convert an Address to an RValue. If the Address is not
+ /// signed, create an RValue using the unsigned address. Otherwise, resign the
+ /// address using the provided type.
static RValue getAggregate(Address addr, bool isVolatile = false) {
RValue ER;
- ER.V1.setPointer(addr.getPointer());
- ER.V1.setInt(Aggregate);
- ER.ElementType = addr.getElementType();
-
- auto align = static_cast<uintptr_t>(addr.getAlignment().getQuantity());
- ER.V2.setPointer(reinterpret_cast<llvm::Value*>(align << AggAlignShift));
- ER.V2.setInt(isVolatile);
+ ER.AggregateAddr = addr;
+ ER.Flavor = Aggregate;
+ ER.IsVolatile = isVolatile;
return ER;
}
};
@@ -178,8 +189,10 @@ class LValue {
MatrixElt // This is a matrix element, use getVector*
} LVType;
- llvm::Value *V;
- llvm::Type *ElementType;
+ union {
+ Address Addr = Address::invalid();
+ llvm::Value *V;
+ };
union {
// Index into a vector subscript: V[i]
@@ -197,10 +210,6 @@ class LValue {
// 'const' is unused here
Qualifiers Quals;
- // The alignment to use when accessing this lvalue. (For vector elements,
- // this is the alignment of the whole vector.)
- unsigned Alignment;
-
// objective-c's ivar
bool Ivar:1;
@@ -225,32 +234,25 @@ class LValue {
// this lvalue.
bool Nontemporal : 1;
- // The pointer is known not to be null.
- bool IsKnownNonNull : 1;
-
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Expr *BaseIvarExp;
private:
- void Initialize(QualType Type, Qualifiers Quals, CharUnits Alignment,
+ void Initialize(QualType Type, Qualifiers Quals, Address Addr,
LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
- assert((!Alignment.isZero() || Type->isIncompleteType()) &&
- "initializing l-value with zero alignment!");
- if (isGlobalReg())
- assert(ElementType == nullptr && "Global reg does not store elem type");
- else
- assert(ElementType != nullptr && "Must have elem type");
-
this->Type = Type;
this->Quals = Quals;
const unsigned MaxAlign = 1U << 31;
- this->Alignment = Alignment.getQuantity() <= MaxAlign
- ? Alignment.getQuantity()
- : MaxAlign;
- assert(this->Alignment == Alignment.getQuantity() &&
- "Alignment exceeds allowed max!");
+ CharUnits Alignment = Addr.getAlignment();
+ assert((isGlobalReg() || !Alignment.isZero() || Type->isIncompleteType()) &&
+ "initializing l-value with zero alignment!");
+ if (Alignment.getQuantity() > MaxAlign) {
+ assert(false && "Alignment exceeds allowed max!");
+ Alignment = CharUnits::fromQuantity(MaxAlign);
+ }
+ this->Addr = Addr;
this->BaseInfo = BaseInfo;
this->TBAAInfo = TBAAInfo;
@@ -262,6 +264,16 @@ private:
this->BaseIvarExp = nullptr;
}
+ void initializeSimpleLValue(Address Addr, QualType Type,
+ LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo,
+ ASTContext &Context) {
+ Qualifiers QS = Type.getQualifiers();
+ QS.setObjCGCAttr(Context.getObjCGCAttrKind(Type));
+ LVType = Simple;
+ Initialize(Type, QS, Addr, BaseInfo, TBAAInfo);
+ assert(Addr.getBasePointer()->getType()->isPointerTy());
+ }
+
public:
bool isSimple() const { return LVType == Simple; }
bool isVectorElt() const { return LVType == VectorElt; }
@@ -328,45 +340,44 @@ public:
LangAS getAddressSpace() const { return Quals.getAddressSpace(); }
- CharUnits getAlignment() const { return CharUnits::fromQuantity(Alignment); }
- void setAlignment(CharUnits A) { Alignment = A.getQuantity(); }
+ CharUnits getAlignment() const { return Addr.getAlignment(); }
+ void setAlignment(CharUnits A) { Addr.setAlignment(A); }
LValueBaseInfo getBaseInfo() const { return BaseInfo; }
void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; }
- KnownNonNull_t isKnownNonNull() const {
- return (KnownNonNull_t)IsKnownNonNull;
- }
+ KnownNonNull_t isKnownNonNull() const { return Addr.isKnownNonNull(); }
LValue setKnownNonNull() {
- IsKnownNonNull = true;
+ Addr.setKnownNonNull();
return *this;
}
// simple lvalue
- llvm::Value *getPointer(CodeGenFunction &CGF) const {
- assert(isSimple());
- return V;
- }
- Address getAddress(CodeGenFunction &CGF) const {
- return Address(getPointer(CGF), ElementType, getAlignment(),
- isKnownNonNull());
- }
- void setAddress(Address address) {
- assert(isSimple());
- V = address.getPointer();
- ElementType = address.getElementType();
- Alignment = address.getAlignment().getQuantity();
- IsKnownNonNull = address.isKnownNonNull();
+ llvm::Value *getPointer(CodeGenFunction &CGF) const;
+ llvm::Value *emitResignedPointer(QualType PointeeTy,
+ CodeGenFunction &CGF) const;
+ llvm::Value *emitRawPointer(CodeGenFunction &CGF) const;
+
+ Address getAddress() const { return Addr; }
+
+ void setAddress(Address address) { Addr = address; }
+
+ CGPointerAuthInfo getPointerAuthInfo() const {
+ return Addr.getPointerAuthInfo();
}
// vector elt lvalue
Address getVectorAddress() const {
- return Address(getVectorPointer(), ElementType, getAlignment(),
- (KnownNonNull_t)isKnownNonNull());
+ assert(isVectorElt());
+ return Addr;
+ }
+ llvm::Value *getRawVectorPointer(CodeGenFunction &CGF) const {
+ assert(isVectorElt());
+ return Addr.emitRawPointer(CGF);
}
llvm::Value *getVectorPointer() const {
assert(isVectorElt());
- return V;
+ return Addr.getBasePointer();
}
llvm::Value *getVectorIdx() const {
assert(isVectorElt());
@@ -374,12 +385,12 @@ public:
}
Address getMatrixAddress() const {
- return Address(getMatrixPointer(), ElementType, getAlignment(),
- (KnownNonNull_t)isKnownNonNull());
+ assert(isMatrixElt());
+ return Addr;
}
llvm::Value *getMatrixPointer() const {
assert(isMatrixElt());
- return V;
+ return Addr.getBasePointer();
}
llvm::Value *getMatrixIdx() const {
assert(isMatrixElt());
@@ -388,12 +399,12 @@ public:
// extended vector elements.
Address getExtVectorAddress() const {
- return Address(getExtVectorPointer(), ElementType, getAlignment(),
- (KnownNonNull_t)isKnownNonNull());
+ assert(isExtVectorElt());
+ return Addr;
}
- llvm::Value *getExtVectorPointer() const {
+ llvm::Value *getRawExtVectorPointer(CodeGenFunction &CGF) const {
assert(isExtVectorElt());
- return V;
+ return Addr.emitRawPointer(CGF);
}
llvm::Constant *getExtVectorElts() const {
assert(isExtVectorElt());
@@ -402,10 +413,14 @@ public:
// bitfield lvalue
Address getBitFieldAddress() const {
- return Address(getBitFieldPointer(), ElementType, getAlignment(),
- (KnownNonNull_t)isKnownNonNull());
+ assert(isBitField());
+ return Addr;
+ }
+ llvm::Value *getRawBitFieldPointer(CodeGenFunction &CGF) const {
+ assert(isBitField());
+ return Addr.emitRawPointer(CGF);
}
- llvm::Value *getBitFieldPointer() const { assert(isBitField()); return V; }
+
const CGBitFieldInfo &getBitFieldInfo() const {
assert(isBitField());
return *BitFieldInfo;
@@ -414,18 +429,13 @@ public:
// global register lvalue
llvm::Value *getGlobalReg() const { assert(isGlobalReg()); return V; }
- static LValue MakeAddr(Address address, QualType type, ASTContext &Context,
+ static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context,
LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
- Qualifiers qs = type.getQualifiers();
- qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
-
LValue R;
R.LVType = Simple;
- assert(address.getPointer()->getType()->isPointerTy());
- R.V = address.getPointer();
- R.ElementType = address.getElementType();
- R.IsKnownNonNull = address.isKnownNonNull();
- R.Initialize(type, qs, address.getAlignment(), BaseInfo, TBAAInfo);
+ R.initializeSimpleLValue(Addr, type, BaseInfo, TBAAInfo, Context);
+ R.Addr = Addr;
+ assert(Addr.getType()->isPointerTy());
return R;
}
@@ -434,26 +444,18 @@ public:
TBAAAccessInfo TBAAInfo) {
LValue R;
R.LVType = VectorElt;
- R.V = vecAddress.getPointer();
- R.ElementType = vecAddress.getElementType();
R.VectorIdx = Idx;
- R.IsKnownNonNull = vecAddress.isKnownNonNull();
- R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
- BaseInfo, TBAAInfo);
+ R.Initialize(type, type.getQualifiers(), vecAddress, BaseInfo, TBAAInfo);
return R;
}
- static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts,
+ static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts,
QualType type, LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo) {
LValue R;
R.LVType = ExtVectorElt;
- R.V = vecAddress.getPointer();
- R.ElementType = vecAddress.getElementType();
R.VectorElts = Elts;
- R.IsKnownNonNull = vecAddress.isKnownNonNull();
- R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
- BaseInfo, TBAAInfo);
+ R.Initialize(type, type.getQualifiers(), Addr, BaseInfo, TBAAInfo);
return R;
}
@@ -468,12 +470,8 @@ public:
TBAAAccessInfo TBAAInfo) {
LValue R;
R.LVType = BitField;
- R.V = Addr.getPointer();
- R.ElementType = Addr.getElementType();
R.BitFieldInfo = &Info;
- R.IsKnownNonNull = Addr.isKnownNonNull();
- R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo,
- TBAAInfo);
+ R.Initialize(type, type.getQualifiers(), Addr, BaseInfo, TBAAInfo);
return R;
}
@@ -481,11 +479,9 @@ public:
QualType type) {
LValue R;
R.LVType = GlobalReg;
- R.V = V;
- R.ElementType = nullptr;
- R.IsKnownNonNull = true;
- R.Initialize(type, type.getQualifiers(), alignment,
+ R.Initialize(type, type.getQualifiers(), Address::invalid(),
LValueBaseInfo(AlignmentSource::Decl), TBAAAccessInfo());
+ R.V = V;
return R;
}
@@ -494,17 +490,13 @@ public:
TBAAAccessInfo TBAAInfo) {
LValue R;
R.LVType = MatrixElt;
- R.V = matAddress.getPointer();
- R.ElementType = matAddress.getElementType();
R.VectorIdx = Idx;
- R.IsKnownNonNull = matAddress.isKnownNonNull();
- R.Initialize(type, type.getQualifiers(), matAddress.getAlignment(),
- BaseInfo, TBAAInfo);
+ R.Initialize(type, type.getQualifiers(), matAddress, BaseInfo, TBAAInfo);
return R;
}
- RValue asAggregateRValue(CodeGenFunction &CGF) const {
- return RValue::getAggregate(getAddress(CGF), isVolatileQualified());
+ RValue asAggregateRValue() const {
+ return RValue::getAggregate(getAddress(), isVolatileQualified());
}
};
@@ -607,11 +599,11 @@ public:
}
static AggValueSlot
- forLValue(const LValue &LV, CodeGenFunction &CGF, IsDestructed_t isDestructed,
+ forLValue(const LValue &LV, IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC, IsAliased_t isAliased,
Overlap_t mayOverlap, IsZeroed_t isZeroed = IsNotZeroed,
IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) {
- return forAddr(LV.getAddress(CGF), LV.getQuals(), isDestructed, needsGC,
+ return forAddr(LV.getAddress(), LV.getQuals(), isDestructed, needsGC,
isAliased, mayOverlap, isZeroed, isChecked);
}
@@ -643,17 +635,17 @@ public:
return NeedsGCBarriers_t(ObjCGCFlag);
}
- llvm::Value *getPointer() const {
- return Addr.getPointer();
+ llvm::Value *getPointer(QualType PointeeTy, CodeGenFunction &CGF) const;
+
+ llvm::Value *emitRawPointer(CodeGenFunction &CGF) const {
+ return Addr.isValid() ? Addr.emitRawPointer(CGF) : nullptr;
}
Address getAddress() const {
return Addr;
}
- bool isIgnored() const {
- return !Addr.isValid();
- }
+ bool isIgnored() const { return !Addr.isValid(); }
CharUnits getAlignment() const {
return Addr.getAlignment();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
index f8038497d90a..e87226e60297 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
@@ -25,8 +25,11 @@
#include "clang/CodeGen/ModuleBuilder.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Serialization/ASTWriter.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
@@ -57,10 +60,6 @@ using namespace llvm;
#define DEBUG_TYPE "codegenaction"
-namespace llvm {
-extern cl::opt<bool> ClRelinkBuiltinBitcodePostop;
-}
-
namespace clang {
class BackendConsumer;
class ClangDiagnosticHandler final : public DiagnosticHandler {
@@ -109,56 +108,50 @@ static void reportOptRecordError(Error E, DiagnosticsEngine &Diags,
});
}
-BackendConsumer::BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
- const HeaderSearchOptions &HeaderSearchOpts,
- const PreprocessorOptions &PPOpts,
- const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- const LangOptions &LangOpts,
- const std::string &InFile,
- SmallVector<LinkModule, 4> LinkModules,
- std::unique_ptr<raw_pwrite_stream> OS,
- LLVMContext &C,
- CoverageSourceInfo *CoverageInfo)
- : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
- CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
- AsmOutStream(std::move(OS)), Context(nullptr), FS(VFS),
- LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
- LLVMIRGenerationRefCount(0),
- Gen(CreateLLVMCodeGen(Diags, InFile, std::move(VFS), HeaderSearchOpts,
- PPOpts, CodeGenOpts, C, CoverageInfo)),
- LinkModules(std::move(LinkModules)) {
- TimerIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
+BackendConsumer::BackendConsumer(
+ BackendAction Action, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PPOpts, const CodeGenOptions &CodeGenOpts,
+ const TargetOptions &TargetOpts, const LangOptions &LangOpts,
+ const std::string &InFile, SmallVector<LinkModule, 4> LinkModules,
+ std::unique_ptr<raw_pwrite_stream> OS, LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo)
+ : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
+ CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
+ AsmOutStream(std::move(OS)), Context(nullptr), FS(VFS),
+ LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
+ LLVMIRGenerationRefCount(0),
+ Gen(CreateLLVMCodeGen(Diags, InFile, std::move(VFS), HeaderSearchOpts,
+ PPOpts, CodeGenOpts, C, CoverageInfo)),
+ LinkModules(std::move(LinkModules)) {
+ TimerIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
}
// This constructor is used in installing an empty BackendConsumer
// to use the clang diagnostic handler for IR input files. It avoids
// initializing the OS field.
-BackendConsumer::BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
- const HeaderSearchOptions &HeaderSearchOpts,
- const PreprocessorOptions &PPOpts,
- const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- const LangOptions &LangOpts,
- llvm::Module *Module,
- SmallVector<LinkModule, 4> LinkModules,
- LLVMContext &C,
- CoverageSourceInfo *CoverageInfo)
- : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
- CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
- Context(nullptr), FS(VFS),
- LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
- LLVMIRGenerationRefCount(0),
- Gen(CreateLLVMCodeGen(Diags, "", std::move(VFS), HeaderSearchOpts,
- PPOpts, CodeGenOpts, C, CoverageInfo)),
- LinkModules(std::move(LinkModules)), CurLinkModule(Module) {
- TimerIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
+BackendConsumer::BackendConsumer(
+ BackendAction Action, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PPOpts, const CodeGenOptions &CodeGenOpts,
+ const TargetOptions &TargetOpts, const LangOptions &LangOpts,
+ llvm::Module *Module, SmallVector<LinkModule, 4> LinkModules,
+ LLVMContext &C, CoverageSourceInfo *CoverageInfo)
+ : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
+ CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
+ Context(nullptr), FS(VFS),
+ LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
+ LLVMIRGenerationRefCount(0),
+ Gen(CreateLLVMCodeGen(Diags, "", std::move(VFS), HeaderSearchOpts, PPOpts,
+ CodeGenOpts, C, CoverageInfo)),
+ LinkModules(std::move(LinkModules)), CurLinkModule(Module) {
+ TimerIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
}
llvm::Module* BackendConsumer::getModule() const {
@@ -233,17 +226,11 @@ void BackendConsumer::HandleInterestingDecl(DeclGroupRef D) {
HandleTopLevelDecl(D);
}
-// Links each entry in LinkModules into our module. Returns true on error.
-bool BackendConsumer::LinkInModules(llvm::Module *M, bool ShouldLinkFiles) {
-
+// Links each entry in LinkModules into our module. Returns true on error.
+bool BackendConsumer::LinkInModules(llvm::Module *M) {
for (auto &LM : LinkModules) {
assert(LM.Module && "LinkModule does not actually have a module");
- // If ShouldLinkFiles is not set, skip files added via the
- // -mlink-bitcode-files, only linking -mlink-builtin-bitcode
- if (!LM.Internalize && !ShouldLinkFiles)
- continue;
-
if (LM.PropagateAttrs)
for (Function &F : *LM.Module) {
// Skip intrinsics. Keep consistent with how intrinsics are created
@@ -257,37 +244,22 @@ bool BackendConsumer::LinkInModules(llvm::Module *M, bool ShouldLinkFiles) {
CurLinkModule = LM.Module.get();
bool Err;
- auto DoLink = [&](auto &Mod) {
- if (LM.Internalize) {
- Err = Linker::linkModules(
- *M, std::move(Mod), LM.LinkFlags,
- [](llvm::Module &M, const llvm::StringSet<> &GVS) {
- internalizeModule(M, [&GVS](const llvm::GlobalValue &GV) {
- return !GV.hasName() || (GVS.count(GV.getName()) == 0);
- });
+ if (LM.Internalize) {
+ Err = Linker::linkModules(
+ *M, std::move(LM.Module), LM.LinkFlags,
+ [](llvm::Module &M, const llvm::StringSet<> &GVS) {
+ internalizeModule(M, [&GVS](const llvm::GlobalValue &GV) {
+ return !GV.hasName() || (GVS.count(GV.getName()) == 0);
});
- } else
- Err = Linker::linkModules(*M, std::move(Mod), LM.LinkFlags);
- };
+ });
+ } else
+ Err = Linker::linkModules(*M, std::move(LM.Module), LM.LinkFlags);
- // Create a Clone to move to the linker, which preserves the original
- // linking modules, allowing them to be linked again in the future
- if (ClRelinkBuiltinBitcodePostop) {
- // TODO: If CloneModule() is updated to support cloning of unmaterialized
- // modules, we can remove this
- if (Error E = CurLinkModule->materializeAll())
- return false;
-
- std::unique_ptr<llvm::Module> Clone = llvm::CloneModule(*LM.Module);
-
- DoLink(Clone);
- }
- // Otherwise we can link (and clean up) the original modules
- else {
- DoLink(LM.Module);
- }
+ if (Err)
+ return true;
}
+ LinkModules.clear();
return false; // success
}
@@ -322,6 +294,9 @@ void BackendConsumer::HandleTranslationUnit(ASTContext &C) {
Ctx.setDiagnosticHandler(std::make_unique<ClangDiagnosticHandler>(
CodeGenOpts, this));
+ Ctx.setDefaultTargetCPU(TargetOpts.CPU);
+ Ctx.setDefaultTargetFeatures(llvm::join(TargetOpts.Features, ","));
+
Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
setupLLVMOptimizationRemarks(
Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
@@ -350,7 +325,7 @@ void BackendConsumer::HandleTranslationUnit(ASTContext &C) {
}
// Link each LinkModule into our module.
- if (LinkInModules(getModule()))
+ if (!CodeGenOpts.LinkBitcodePostopt && LinkInModules(getModule()))
return;
for (auto &F : getModule()->functions()) {
@@ -401,7 +376,7 @@ void BackendConsumer::CompleteTentativeDefinition(VarDecl *D) {
Gen->CompleteTentativeDefinition(D);
}
-void BackendConsumer::CompleteExternalDeclaration(VarDecl *D) {
+void BackendConsumer::CompleteExternalDeclaration(DeclaratorDecl *D) {
Gen->CompleteExternalDeclaration(D);
}
@@ -994,6 +969,12 @@ CodeGenerator *CodeGenAction::getCodeGenerator() const {
return BEConsumer->getCodeGenerator();
}
+bool CodeGenAction::BeginSourceFileAction(CompilerInstance &CI) {
+ if (CI.getFrontendOpts().GenReducedBMI)
+ CI.getLangOpts().setCompilingModule(LangOptions::CMK_ModuleInterface);
+ return true;
+}
+
static std::unique_ptr<raw_pwrite_stream>
GetOutputStream(CompilerInstance &CI, StringRef InFile, BackendAction Action) {
switch (Action) {
@@ -1051,6 +1032,16 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CI.getPreprocessor().addPPCallbacks(std::move(Callbacks));
}
+ if (CI.getFrontendOpts().GenReducedBMI &&
+ !CI.getFrontendOpts().ModuleOutputPath.empty()) {
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers(2);
+ Consumers[0] = std::make_unique<ReducedBMIGenerator>(
+ CI.getPreprocessor(), CI.getModuleCache(),
+ CI.getFrontendOpts().ModuleOutputPath);
+ Consumers[1] = std::move(Result);
+ return std::make_unique<MultiplexConsumer>(std::move(Consumers));
+ }
+
return std::move(Result);
}
@@ -1203,7 +1194,7 @@ void CodeGenAction::ExecuteAction() {
std::move(LinkModules), *VMContext, nullptr);
// Link in each pending link module.
- if (Result.LinkInModules(&*TheModule))
+ if (!CodeGenOpts.LinkBitcodePostopt && Result.LinkInModules(&*TheModule))
return;
// PR44896: Force DiscardValueNames as false. DiscardValueNames cannot be
@@ -1212,6 +1203,9 @@ void CodeGenAction::ExecuteAction() {
Ctx.setDiagnosticHandler(
std::make_unique<ClangDiagnosticHandler>(CodeGenOpts, &Result));
+ Ctx.setDefaultTargetCPU(TargetOpts.CPU);
+ Ctx.setDefaultTargetFeatures(llvm::join(TargetOpts.Features, ","));
+
Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
setupLLVMOptimizationRemarks(
Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
index 2673e4a5cee7..2b2e23f1e5d7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -31,6 +31,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/FrontendDiagnostic.h"
@@ -52,6 +53,10 @@
using namespace clang;
using namespace CodeGen;
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
/// markers.
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
@@ -86,6 +91,8 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
CodeGenFunction::~CodeGenFunction() {
assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
+ assert(DeferredDeactivationCleanupStack.empty() &&
+ "missed to deactivate a cleanup");
if (getLangOpts().OpenMP && CurFn)
CGM.getOpenMPRuntime().functionFinished(*this);
@@ -188,26 +195,47 @@ CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
CGF.Builder.setDefaultConstrainedRounding(OldRounding);
}
-LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+static LValue
+makeNaturalAlignAddrLValue(llvm::Value *V, QualType T, bool ForPointeeType,
+ bool MightBeSigned, CodeGenFunction &CGF,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
- CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
- Address Addr(V, ConvertTypeForMem(T), Alignment);
- return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
+ CharUnits Alignment =
+ CGF.CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, ForPointeeType);
+ Address Addr =
+ MightBeSigned
+ ? CGF.makeNaturalAddressForPointer(V, T, Alignment, false, nullptr,
+ nullptr, IsKnownNonNull)
+ : Address(V, CGF.ConvertTypeForMem(T), Alignment, IsKnownNonNull);
+ return CGF.MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
+}
+
+LValue
+CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T,
+ KnownNonNull_t IsKnownNonNull) {
+ return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
+ /*MightBeSigned*/ true, *this,
+ IsKnownNonNull);
}
-/// Given a value of type T* that may not be to a complete object,
-/// construct an l-value with the natural pointee alignment of T.
LValue
CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
- LValueBaseInfo BaseInfo;
- TBAAAccessInfo TBAAInfo;
- CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
- /* forPointeeType= */ true);
- Address Addr(V, ConvertTypeForMem(T), Align);
- return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
+ return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
+ /*MightBeSigned*/ true, *this);
}
+LValue CodeGenFunction::MakeNaturalAlignRawAddrLValue(llvm::Value *V,
+ QualType T) {
+ return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
+ /*MightBeSigned*/ false, *this);
+}
+
+LValue CodeGenFunction::MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V,
+ QualType T) {
+ return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
+ /*MightBeSigned*/ false, *this);
+}
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
@@ -217,6 +245,11 @@ llvm::Type *CodeGenFunction::ConvertType(QualType T) {
return CGM.getTypes().ConvertType(T);
}
+llvm::Type *CodeGenFunction::convertTypeForLoadStore(QualType ASTTy,
+ llvm::Type *LLVMTy) {
+ return CGM.getTypes().convertTypeForLoadStore(ASTTy, LLVMTy);
+}
+
TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
type = type.getCanonicalType();
while (true) {
@@ -262,6 +295,7 @@ TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
case Type::Record:
case Type::ObjCObject:
case Type::ObjCInterface:
+ case Type::ArrayParameter:
return TEK_Aggregate;
// We operate on atomic values according to their underlying type.
@@ -331,6 +365,16 @@ static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
assert(BreakContinueStack.empty() &&
"mismatched push/pop in break/continue stack!");
+ assert(LifetimeExtendedCleanupStack.empty() &&
+ "mismatched push/pop of cleanups in EHStack!");
+ assert(DeferredDeactivationCleanupStack.empty() &&
+ "mismatched activate/deactivate of cleanups!");
+
+ if (CGM.shouldEmitConvergenceTokens()) {
+ ConvergenceTokenStack.pop_back();
+ assert(ConvergenceTokenStack.empty() &&
+ "mismatched push/pop in convergence stack!");
+ }
bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
&& NumSimpleReturnExprs == NumReturnExprs
@@ -520,7 +564,8 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
ReturnBlock.getBlock()->eraseFromParent();
}
if (ReturnValue.isValid()) {
- auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
+ auto *RetAlloca =
+ dyn_cast<llvm::AllocaInst>(ReturnValue.emitRawPointer(*this));
if (RetAlloca && RetAlloca->use_empty()) {
RetAlloca->eraseFromParent();
ReturnValue = Address::invalid();
@@ -790,6 +835,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
if (SanOpts.has(SanitizerKind::Thread))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
+ if (SanOpts.has(SanitizerKind::NumericalStability))
+ Fn->addFnAttr(llvm::Attribute::SanitizeNumericalStability);
if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
}
@@ -806,7 +853,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
if (SanOpts.has(SanitizerKind::Thread)) {
if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
- IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
+ const IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
if (OMD->getMethodFamily() == OMF_dealloc ||
OMD->getMethodFamily() == OMF_initialize ||
(OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
@@ -831,6 +878,17 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
SanOpts.Mask &= ~SanitizerKind::Null;
+ // Add pointer authentication attributes.
+ const CodeGenOptions &CodeGenOpts = CGM.getCodeGenOpts();
+ if (CodeGenOpts.PointerAuth.ReturnAddresses)
+ Fn->addFnAttr("ptrauth-returns");
+ if (CodeGenOpts.PointerAuth.FunctionPointers)
+ Fn->addFnAttr("ptrauth-calls");
+ if (CodeGenOpts.PointerAuth.AuthTraps)
+ Fn->addFnAttr("ptrauth-auth-traps");
+ if (CodeGenOpts.PointerAuth.IndirectGotos)
+ Fn->addFnAttr("ptrauth-indirect-gotos");
+
// Apply xray attributes to the function (as a string, for now)
bool AlwaysXRayAttr = false;
if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
@@ -937,6 +995,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (D && D->hasAttr<NoProfileFunctionAttr>())
Fn->addFnAttr(llvm::Attribute::NoProfile);
+ if (D && D->hasAttr<HybridPatchableAttr>())
+ Fn->addFnAttr(llvm::Attribute::HybridPatchable);
+
if (D) {
// Function attributes take precedence over command line flags.
if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
@@ -957,6 +1018,11 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
EmitKernelMetadata(FD, Fn);
}
+ if (FD && FD->hasAttr<ClspvLibclcBuiltinAttr>()) {
+ Fn->setMetadata("clspv_libclc_builtin",
+ llvm::MDNode::get(getLLVMContext(), {}));
+ }
+
// If we are checking function types, emit a function type signature as
// prologue data.
if (FD && SanOpts.has(SanitizerKind::Function)) {
@@ -974,7 +1040,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
auto Nullability = FnRetTy->getNullability();
- if (Nullability && *Nullability == NullabilityKind::NonNull) {
+ if (Nullability && *Nullability == NullabilityKind::NonNull &&
+ !FnRetTy->isRecordType()) {
if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
RetValNullabilityPrecondition =
@@ -1117,13 +1184,14 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
auto AI = CurFn->arg_begin();
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
++AI;
- ReturnValue =
- Address(&*AI, ConvertType(RetTy),
- CurFnInfo->getReturnInfo().getIndirectAlign(), KnownNonNull);
+ ReturnValue = makeNaturalAddressForPointer(
+ &*AI, RetTy, CurFnInfo->getReturnInfo().getIndirectAlign(), false,
+ nullptr, nullptr, KnownNonNull);
if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
- ReturnValuePointer = CreateDefaultAlignTempAlloca(
- ReturnValue.getPointer()->getType(), "result.ptr");
- Builder.CreateStore(ReturnValue.getPointer(), ReturnValuePointer);
+ ReturnValuePointer =
+ CreateDefaultAlignTempAlloca(ReturnValue.getType(), "result.ptr");
+ Builder.CreateStore(ReturnValue.emitRawPointer(*this),
+ ReturnValuePointer);
}
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
!hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
@@ -1184,8 +1252,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// or contains the address of the enclosing object).
LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
if (!LambdaThisCaptureField->getType()->isPointerType()) {
- // If the enclosing object was captured by value, just use its address.
- CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
+ // If the enclosing object was captured by value, just use its
+ // address. Sign this pointer.
+ CXXThisValue = ThisFieldLValue.getPointer(*this);
} else {
// Load the lvalue pointed to by the field, since '*this' was captured
// by reference.
@@ -1252,6 +1321,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (CurFuncDecl)
if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
LargestVectorWidth = VecWidth->getVectorWidth();
+
+ if (CGM.shouldEmitConvergenceTokens())
+ ConvergenceTokenStack.push_back(getOrEmitConvergenceEntryToken(CurFn));
}
void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
@@ -1270,7 +1342,10 @@ void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
const Stmt *S) {
llvm::BasicBlock *SkipCountBB = nullptr;
- if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
+ // Do not skip over the instrumentation when single byte coverage mode is
+ // enabled.
+ if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr() &&
+ !llvm::EnableSingleByteCoverage) {
// When instrumenting for profiling, the fallthrough to certain
// statements needs to skip over the instrumentation code so that we
// get an accurate count.
@@ -1353,6 +1428,8 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
FunctionArgList Args;
QualType ResTy = BuildFunctionArgList(GD, Args);
+ CGM.getTargetCodeGenInfo().checkFunctionABI(CGM, FD);
+
if (FD->isInlineBuiltinDeclaration()) {
// When generating code for a builtin with an inline declaration, use a
// mangled name to hold the actual body, while keeping an external
@@ -1441,6 +1518,8 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// Ensure that the function adheres to the forward progress guarantee, which
// is required by certain optimizations.
+ // In C++11 and up, the attribute will be removed if the body contains a
+ // trivial empty loop.
if (checkIfFunctionMustProgress())
CurFn->addFnAttr(llvm::Attribute::MustProgress);
@@ -2002,8 +2081,9 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
= llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
Address begin = dest.withElementType(CGF.Int8Ty);
- llvm::Value *end = Builder.CreateInBoundsGEP(
- begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin.getElementType(),
+ begin.emitRawPointer(CGF),
+ sizeInChars, "vla.end");
llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
@@ -2014,7 +2094,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
CGF.EmitBlock(loopBB);
llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
- cur->addIncoming(begin.getPointer(), originBB);
+ cur->addIncoming(begin.emitRawPointer(CGF), originBB);
CharUnits curAlign =
dest.getAlignment().alignmentOfArrayElement(baseSize);
@@ -2179,8 +2259,8 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
dyn_cast<llvm::ArrayType>(addr.getElementType());
while (llvmArrayType) {
assert(isa<ConstantArrayType>(arrayType));
- assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
- == llvmArrayType->getNumElements());
+ assert(cast<ConstantArrayType>(arrayType)->getZExtSize() ==
+ llvmArrayType->getNumElements());
gepIndices.push_back(zero);
countFromCLAs *= llvmArrayType->getNumElements();
@@ -2198,8 +2278,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
// as some other type (probably a packed struct). Compute the array
// size, and just emit the 'begin' expression as a bitcast.
while (arrayType) {
- countFromCLAs *=
- cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
+ countFromCLAs *= cast<ConstantArrayType>(arrayType)->getZExtSize();
eltType = arrayType->getElementType();
arrayType = getContext().getAsArrayType(eltType);
}
@@ -2208,10 +2287,10 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
addr = addr.withElementType(baseType);
} else {
// Create the actual GEP.
- addr = Address(Builder.CreateInBoundsGEP(
- addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
- ConvertTypeForMem(eltType),
- addr.getAlignment());
+ addr = Address(Builder.CreateInBoundsGEP(addr.getElementType(),
+ addr.emitRawPointer(*this),
+ gepIndices, "array.begin"),
+ ConvertTypeForMem(eltType), addr.getAlignment());
}
baseType = eltType;
@@ -2339,6 +2418,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
type = cast<MemberPointerType>(ty)->getPointeeType();
break;
+ case Type::ArrayParameter:
case Type::ConstantArray:
case Type::IncompleteArray:
// Losing element qualification here is fine.
@@ -2399,6 +2479,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::BTFTagAttributed:
case Type::SubstTemplateTypeParm:
case Type::MacroQualified:
+ case Type::CountAttributed:
// Keep walking after single level desugaring.
type = type.getSingleStepDesugaredType(getContext());
break;
@@ -2407,6 +2488,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::Decltype:
case Type::Auto:
case Type::DeducedTemplateSpecialization:
+ case Type::PackIndexing:
// Stop walking: nothing to do.
return;
@@ -2429,11 +2511,11 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
Address CodeGenFunction::EmitVAListRef(const Expr* E) {
if (getContext().getBuiltinVaListType()->isArrayType())
return EmitPointerWithAlignment(E);
- return EmitLValue(E).getAddress(*this);
+ return EmitLValue(E).getAddress();
}
Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
- return EmitLValue(E).getAddress(*this);
+ return EmitLValue(E).getAddress();
}
void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
@@ -2550,7 +2632,7 @@ void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
Address Addr) {
assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
- llvm::Value *V = Addr.getPointer();
+ llvm::Value *V = Addr.emitRawPointer(*this);
llvm::Type *VTy = V->getType();
auto *PTy = dyn_cast<llvm::PointerType>(VTy);
unsigned AS = PTy ? PTy->getAddressSpace() : 0;
@@ -2586,7 +2668,6 @@ CodeGenFunction::SanitizerScope::~SanitizerScope() {
void CodeGenFunction::InsertHelper(llvm::Instruction *I,
const llvm::Twine &Name,
- llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const {
LoopStack.InsertHelper(I);
if (IsSanitizerScope)
@@ -2594,17 +2675,35 @@ void CodeGenFunction::InsertHelper(llvm::Instruction *I,
}
void CGBuilderInserter::InsertHelper(
- llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
+ llvm::Instruction *I, const llvm::Twine &Name,
llvm::BasicBlock::iterator InsertPt) const {
- llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
+ llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, InsertPt);
if (CGF)
- CGF->InsertHelper(I, Name, BB, InsertPt);
+ CGF->InsertHelper(I, Name, InsertPt);
}
// Emits an error if we don't have a valid set of target features for the
// called function.
void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
const FunctionDecl *TargetDecl) {
+ // SemaChecking cannot handle below x86 builtins because they have different
+ // parameter ranges with different TargetAttribute of caller.
+ if (CGM.getContext().getTargetInfo().getTriple().isX86()) {
+ unsigned BuiltinID = TargetDecl->getBuiltinID();
+ if (BuiltinID == X86::BI__builtin_ia32_cmpps ||
+ BuiltinID == X86::BI__builtin_ia32_cmpss ||
+ BuiltinID == X86::BI__builtin_ia32_cmppd ||
+ BuiltinID == X86::BI__builtin_ia32_cmpsd) {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
+ llvm::StringMap<bool> TargetFetureMap;
+ CGM.getContext().getFunctionFeatureMap(TargetFetureMap, FD);
+ llvm::APSInt Result =
+ *(E->getArg(2)->getIntegerConstantExpr(CGM.getContext()));
+ if (Result.getSExtValue() > 7 && !TargetFetureMap.lookup("avx"))
+ CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
+ << TargetDecl->getDeclName() << "avx";
+ }
+ }
return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
}
@@ -2707,11 +2806,8 @@ void CodeGenFunction::EmitKCFIOperandBundle(
llvm::Value *CodeGenFunction::FormAArch64ResolverCondition(
const MultiVersionResolverOption &RO) {
llvm::SmallVector<StringRef, 8> CondFeatures;
- for (const StringRef &Feature : RO.Conditions.Features) {
- // Form condition for features which are not yet enabled in target
- if (!getContext().getTargetInfo().hasFeature(Feature))
- CondFeatures.push_back(Feature);
- }
+ for (const StringRef &Feature : RO.Conditions.Features)
+ CondFeatures.push_back(Feature);
if (!CondFeatures.empty()) {
return EmitAArch64CpuSupports(CondFeatures);
}
@@ -2879,7 +2975,7 @@ void CodeGenFunction::emitAlignmentAssumptionCheck(
SourceLocation SecondaryLoc, llvm::Value *Alignment,
llvm::Value *OffsetValue, llvm::Value *TheCheck,
llvm::Instruction *Assumption) {
- assert(Assumption && isa<llvm::CallInst>(Assumption) &&
+ assert(isa_and_nonnull<llvm::CallInst>(Assumption) &&
cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
llvm::Intrinsic::getDeclaration(
Builder.GetInsertBlock()->getParent()->getParent(),
@@ -2969,3 +3065,82 @@ llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
}
+
+void CodeGenFunction::EmitPointerAuthOperandBundle(
+ const CGPointerAuthInfo &PointerAuth,
+ SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
+ if (!PointerAuth.isSigned())
+ return;
+
+ auto *Key = Builder.getInt32(PointerAuth.getKey());
+
+ llvm::Value *Discriminator = PointerAuth.getDiscriminator();
+ if (!Discriminator)
+ Discriminator = Builder.getSize(0);
+
+ llvm::Value *Args[] = {Key, Discriminator};
+ Bundles.emplace_back("ptrauth", Args);
+}
+
+static llvm::Value *EmitPointerAuthCommon(CodeGenFunction &CGF,
+ const CGPointerAuthInfo &PointerAuth,
+ llvm::Value *Pointer,
+ unsigned IntrinsicID) {
+ if (!PointerAuth)
+ return Pointer;
+
+ auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
+
+ llvm::Value *Discriminator = PointerAuth.getDiscriminator();
+ if (!Discriminator) {
+ Discriminator = CGF.Builder.getSize(0);
+ }
+
+ // Convert the pointer to intptr_t before signing it.
+ auto OrigType = Pointer->getType();
+ Pointer = CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy);
+
+ // call i64 @llvm.ptrauth.sign.i64(i64 %pointer, i32 %key, i64 %discriminator)
+ auto Intrinsic = CGF.CGM.getIntrinsic(IntrinsicID);
+ Pointer = CGF.EmitRuntimeCall(Intrinsic, {Pointer, Key, Discriminator});
+
+ // Convert back to the original type.
+ Pointer = CGF.Builder.CreateIntToPtr(Pointer, OrigType);
+ return Pointer;
+}
+
+llvm::Value *
+CodeGenFunction::EmitPointerAuthSign(const CGPointerAuthInfo &PointerAuth,
+ llvm::Value *Pointer) {
+ if (!PointerAuth.shouldSign())
+ return Pointer;
+ return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
+ llvm::Intrinsic::ptrauth_sign);
+}
+
+static llvm::Value *EmitStrip(CodeGenFunction &CGF,
+ const CGPointerAuthInfo &PointerAuth,
+ llvm::Value *Pointer) {
+ auto StripIntrinsic = CGF.CGM.getIntrinsic(llvm::Intrinsic::ptrauth_strip);
+
+ auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
+ // Convert the pointer to intptr_t before signing it.
+ auto OrigType = Pointer->getType();
+ Pointer = CGF.EmitRuntimeCall(
+ StripIntrinsic, {CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy), Key});
+ return CGF.Builder.CreateIntToPtr(Pointer, OrigType);
+}
+
+llvm::Value *
+CodeGenFunction::EmitPointerAuthAuth(const CGPointerAuthInfo &PointerAuth,
+ llvm::Value *Pointer) {
+ if (PointerAuth.shouldStrip()) {
+ return EmitStrip(*this, PointerAuth, Pointer);
+ }
+ if (!PointerAuth.shouldAuth()) {
+ return Pointer;
+ }
+
+ return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
+ llvm::Intrinsic::ptrauth_auth);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
index 143ad64e8816..60e6841e1b3d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
@@ -26,6 +26,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/Type.h"
#include "clang/Basic/ABI.h"
@@ -38,6 +39,7 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
@@ -135,7 +137,8 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
- SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
+ SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0) \
+ SANITIZER_CHECK(BoundsSafety, bounds_safety, 0)
enum SanitizerHandler {
#define SANITIZER_CHECK(Enum, Name, Version) Enum,
@@ -150,6 +153,9 @@ struct DominatingLLVMValue {
/// Answer whether the given value needs extra work to be saved.
static bool needsSaving(llvm::Value *value) {
+ if (!value)
+ return false;
+
// If it's not an instruction, we don't need to save.
if (!isa<llvm::Instruction>(value)) return false;
@@ -176,21 +182,28 @@ template <> struct DominatingValue<Address> {
typedef Address type;
struct saved_type {
- DominatingLLVMValue::saved_type SavedValue;
+ DominatingLLVMValue::saved_type BasePtr;
llvm::Type *ElementType;
CharUnits Alignment;
+ DominatingLLVMValue::saved_type Offset;
+ llvm::PointerType *EffectiveType;
};
static bool needsSaving(type value) {
- return DominatingLLVMValue::needsSaving(value.getPointer());
+ if (DominatingLLVMValue::needsSaving(value.getBasePointer()) ||
+ DominatingLLVMValue::needsSaving(value.getOffset()))
+ return true;
+ return false;
}
static saved_type save(CodeGenFunction &CGF, type value) {
- return { DominatingLLVMValue::save(CGF, value.getPointer()),
- value.getElementType(), value.getAlignment() };
+ return {DominatingLLVMValue::save(CGF, value.getBasePointer()),
+ value.getElementType(), value.getAlignment(),
+ DominatingLLVMValue::save(CGF, value.getOffset()), value.getType()};
}
static type restore(CodeGenFunction &CGF, saved_type value) {
- return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
- value.ElementType, value.Alignment);
+ return Address(DominatingLLVMValue::restore(CGF, value.BasePtr),
+ value.ElementType, value.Alignment, CGPointerAuthInfo(),
+ DominatingLLVMValue::restore(CGF, value.Offset));
}
};
@@ -200,13 +213,24 @@ template <> struct DominatingValue<RValue> {
class saved_type {
enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
AggregateAddress, ComplexAddress };
-
- llvm::Value *Value;
- llvm::Type *ElementType;
+ union {
+ struct {
+ DominatingLLVMValue::saved_type first, second;
+ } Vals;
+ DominatingValue<Address>::saved_type AggregateAddr;
+ };
+ LLVM_PREFERRED_TYPE(Kind)
unsigned K : 3;
- unsigned Align : 29;
- saved_type(llvm::Value *v, llvm::Type *e, Kind k, unsigned a = 0)
- : Value(v), ElementType(e), K(k), Align(a) {}
+
+ saved_type(DominatingLLVMValue::saved_type Val1, unsigned K)
+ : Vals{Val1, DominatingLLVMValue::saved_type()}, K(K) {}
+
+ saved_type(DominatingLLVMValue::saved_type Val1,
+ DominatingLLVMValue::saved_type Val2)
+ : Vals{Val1, Val2}, K(ComplexAddress) {}
+
+ saved_type(DominatingValue<Address>::saved_type AggregateAddr, unsigned K)
+ : AggregateAddr(AggregateAddr), K(K) {}
public:
static bool needsSaving(RValue value);
@@ -290,6 +314,9 @@ public:
/// Stack to track the Logical Operator recursion nest for MC/DC.
SmallVector<const BinaryOperator *, 16> MCDCLogOpStack;
+ /// Stack to track the controlled convergence tokens.
+ SmallVector<llvm::IntrinsicInst *, 4> ConvergenceTokenStack;
+
/// Number of nested loop to be consumed by the last surrounding
/// loop-associated directive.
int ExpectedOMPLoopDepth = 0;
@@ -316,7 +343,6 @@ public:
/// CGBuilder insert helper. This function is called after an
/// instruction is created using Builder.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
- llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const;
/// CurFuncDecl - Holds the Decl for the current outermost
@@ -350,6 +376,25 @@ public:
return isCoroutine() && CurCoro.InSuspendBlock;
}
+ // Holds FramePtr for await_suspend wrapper generation,
+ // so that __builtin_coro_frame call can be lowered
+ // directly to value of its second argument
+ struct AwaitSuspendWrapperInfo {
+ llvm::Value *FramePtr = nullptr;
+ };
+ AwaitSuspendWrapperInfo CurAwaitSuspendWrapper;
+
+ // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
+ // It encapsulates SuspendExpr in a function, to separate it's body
+ // from the main coroutine to avoid miscompilations. Intrinisic
+ // is lowered to this function call in CoroSplit pass
+ // Function signature is:
+ // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
+ // where type is one of (void, i1, ptr)
+ llvm::Function *generateAwaitSuspendWrapper(Twine const &CoroName,
+ Twine const &SuspendPointName,
+ CoroutineSuspendExpr const &S);
+
/// CurGD - The GlobalDecl for the current function being compiled.
GlobalDecl CurGD;
@@ -592,28 +637,7 @@ public:
/// Returns true if a loop must make progress, which means the mustprogress
/// attribute can be added. \p HasConstantCond indicates whether the branch
/// condition is a known constant.
- bool checkIfLoopMustProgress(bool HasConstantCond) {
- if (CGM.getCodeGenOpts().getFiniteLoops() ==
- CodeGenOptions::FiniteLoopsKind::Always)
- return true;
- if (CGM.getCodeGenOpts().getFiniteLoops() ==
- CodeGenOptions::FiniteLoopsKind::Never)
- return false;
-
- // If the containing function must make progress, loops also must make
- // progress (as in C++11 and later).
- if (checkIfFunctionMustProgress())
- return true;
-
- // Now apply rules for plain C (see 6.8.5.6 in C11).
- // Loops with constant conditions do not have to make progress in any C
- // version.
- if (HasConstantCond)
- return false;
-
- // Loops with non-constant conditions must make progress in C11 and later.
- return getLangOpts().C11;
- }
+ bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody);
const CodeGen::CGBlockInfo *BlockInfo = nullptr;
llvm::Value *BlockPointer = nullptr;
@@ -627,6 +651,51 @@ public:
EHScopeStack EHStack;
llvm::SmallVector<char, 256> LifetimeExtendedCleanupStack;
+
+ // A stack of cleanups which were added to EHStack but have to be deactivated
+ // later before being popped or emitted. These are usually deactivated on
+ // exiting a `CleanupDeactivationScope` scope. For instance, after a
+ // full-expr.
+ //
+ // These are specially useful for correctly emitting cleanups while
+ // encountering branches out of expression (through stmt-expr or coroutine
+ // suspensions).
+ struct DeferredDeactivateCleanup {
+ EHScopeStack::stable_iterator Cleanup;
+ llvm::Instruction *DominatingIP;
+ };
+ llvm::SmallVector<DeferredDeactivateCleanup> DeferredDeactivationCleanupStack;
+
+ // Enters a new scope for capturing cleanups which are deferred to be
+ // deactivated, all of which will be deactivated once the scope is exited.
+ struct CleanupDeactivationScope {
+ CodeGenFunction &CGF;
+ size_t OldDeactivateCleanupStackSize;
+ bool Deactivated;
+ CleanupDeactivationScope(CodeGenFunction &CGF)
+ : CGF(CGF), OldDeactivateCleanupStackSize(
+ CGF.DeferredDeactivationCleanupStack.size()),
+ Deactivated(false) {}
+
+ void ForceDeactivate() {
+ assert(!Deactivated && "Deactivating already deactivated scope");
+ auto &Stack = CGF.DeferredDeactivationCleanupStack;
+ for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) {
+ CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup,
+ Stack[I - 1].DominatingIP);
+ Stack[I - 1].DominatingIP->eraseFromParent();
+ }
+ Stack.resize(OldDeactivateCleanupStackSize);
+ Deactivated = true;
+ }
+
+ ~CleanupDeactivationScope() {
+ if (Deactivated)
+ return;
+ ForceDeactivate();
+ }
+ };
+
llvm::SmallVector<const JumpDest *, 2> SEHTryEpilogueStack;
llvm::Instruction *CurrentFuncletPad = nullptr;
@@ -638,7 +707,7 @@ public:
llvm::Value *Size;
public:
- CallLifetimeEnd(Address addr, llvm::Value *size)
+ CallLifetimeEnd(RawAddress addr, llvm::Value *size)
: Addr(addr.getPointer()), Size(size) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
@@ -650,9 +719,11 @@ public:
struct LifetimeExtendedCleanupHeader {
/// The size of the following cleanup object.
unsigned Size;
- /// The kind of cleanup to push: a value from the CleanupKind enumeration.
+ /// The kind of cleanup to push.
+ LLVM_PREFERRED_TYPE(CleanupKind)
unsigned Kind : 31;
/// Whether this is a conditional cleanup.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsConditional : 1;
size_t getSize() const { return Size; }
@@ -661,7 +732,7 @@ public:
};
/// i32s containing the indexes of the cleanup destinations.
- Address NormalCleanupDest = Address::invalid();
+ RawAddress NormalCleanupDest = RawAddress::invalid();
unsigned NextCleanupDestIndex = 1;
@@ -796,10 +867,10 @@ public:
template <class T, class... As>
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) {
if (!isInConditionalBranch())
- return pushCleanupAfterFullExprWithActiveFlag<T>(Kind, Address::invalid(),
- A...);
+ return pushCleanupAfterFullExprWithActiveFlag<T>(
+ Kind, RawAddress::invalid(), A...);
- Address ActiveFlag = createCleanupActiveFlag();
+ RawAddress ActiveFlag = createCleanupActiveFlag();
assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
"cleanup active flag should never need saving");
@@ -812,7 +883,7 @@ public:
template <class T, class... As>
void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind,
- Address ActiveFlag, As... A) {
+ RawAddress ActiveFlag, As... A) {
LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
ActiveFlag.isValid()};
@@ -827,7 +898,20 @@ public:
new (Buffer) LifetimeExtendedCleanupHeader(Header);
new (Buffer + sizeof(Header)) T(A...);
if (Header.IsConditional)
- new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag);
+ new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
+ }
+
+ // Push a cleanup onto EHStack and deactivate it later. It is usually
+ // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
+ // full expression).
+ template <class T, class... As>
+ void pushCleanupAndDeferDeactivation(CleanupKind Kind, As... A) {
+ // Placeholder dominating IP for this cleanup.
+ llvm::Instruction *DominatingIP =
+ Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
+ EHStack.pushCleanup<T>(Kind, A...);
+ DeferredDeactivationCleanupStack.push_back(
+ {EHStack.stable_begin(), DominatingIP});
}
/// Set up the last cleanup that was pushed as a conditional
@@ -836,8 +920,8 @@ public:
initFullExprCleanupWithFlag(createCleanupActiveFlag());
}
- void initFullExprCleanupWithFlag(Address ActiveFlag);
- Address createCleanupActiveFlag();
+ void initFullExprCleanupWithFlag(RawAddress ActiveFlag);
+ RawAddress createCleanupActiveFlag();
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
@@ -853,7 +937,8 @@ public:
/// PopCleanupBlock - Will pop the cleanup entry on the stack and
/// process all branch fixups.
- void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
+ void PopCleanupBlock(bool FallThroughIsBranchThrough = false,
+ bool ForDeactivation = false);
/// DeactivateCleanupBlock - Deactivates the given cleanup block.
/// The block cannot be reactivated. Pops it if it's the top of the
@@ -881,6 +966,7 @@ public:
class RunCleanupsScope {
EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
size_t LifetimeExtendedCleanupStackSize;
+ CleanupDeactivationScope DeactivateCleanups;
bool OldDidCallStackSave;
protected:
bool PerformCleanup;
@@ -895,8 +981,7 @@ public:
public:
/// Enter a new cleanup scope.
explicit RunCleanupsScope(CodeGenFunction &CGF)
- : PerformCleanup(true), CGF(CGF)
- {
+ : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) {
CleanupStackDepth = CGF.EHStack.stable_begin();
LifetimeExtendedCleanupStackSize =
CGF.LifetimeExtendedCleanupStack.size();
@@ -926,6 +1011,7 @@ public:
void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
assert(PerformCleanup && "Already forced cleanup");
CGF.DidCallStackSave = OldDidCallStackSave;
+ DeactivateCleanups.ForceDeactivate();
CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
ValuesToReload);
PerformCleanup = false;
@@ -1025,7 +1111,7 @@ public:
QualType VarTy = LocalVD->getType();
if (VarTy->isReferenceType()) {
Address Temp = CGF.CreateMemTemp(VarTy);
- CGF.Builder.CreateStore(TempAddr.getPointer(), Temp);
+ CGF.Builder.CreateStore(TempAddr.emitRawPointer(CGF), Temp);
TempAddr = Temp;
}
SavedTempAddresses.try_emplace(LocalVD, TempAddr);
@@ -1220,10 +1306,12 @@ public:
/// one branch or the other of a conditional expression.
bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
- void setBeforeOutermostConditional(llvm::Value *value, Address addr) {
+ void setBeforeOutermostConditional(llvm::Value *value, Address addr,
+ CodeGenFunction &CGF) {
assert(isInConditionalBranch());
llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
- auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
+ auto store =
+ new llvm::StoreInst(value, addr.emitRawPointer(CGF), &block->back());
store->setAlignment(addr.getAlignment().getAsAlign());
}
@@ -1540,8 +1628,10 @@ public:
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
if (CGM.getCodeGenOpts().hasProfileClangInstr() &&
!CurFn->hasFnAttribute(llvm::Attribute::NoProfile) &&
- !CurFn->hasFnAttribute(llvm::Attribute::SkipProfile))
- PGO.emitCounterIncrement(Builder, S, StepV);
+ !CurFn->hasFnAttribute(llvm::Attribute::SkipProfile)) {
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
+ PGO.emitCounterSetOrIncrement(Builder, S, StepV);
+ }
PGO.setCurrentStmt(S);
}
@@ -1578,7 +1668,7 @@ public:
/// If \p StepV is null, the default increment is 1.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E) {
if (isMCDCCoverageEnabled() && isBinaryLogicalOp(E)) {
- PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr);
+ PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr, *this);
PGO.setCurrentStmt(E);
}
}
@@ -1586,7 +1676,7 @@ public:
/// Update the MCDC temp value with the condition's evaluated result.
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val) {
if (isMCDCCoverageEnabled()) {
- PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val);
+ PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val, *this);
PGO.setCurrentStmt(E);
}
}
@@ -1681,7 +1771,7 @@ public:
: CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
OldCXXThisAlignment(CGF.CXXThisAlignment),
SourceLocScope(E, CGF.CurSourceLocExprScope) {
- CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
+ CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
}
~CXXDefaultInitExprScope() {
@@ -2067,7 +2157,7 @@ public:
llvm::Value *getExceptionFromSlot();
llvm::Value *getSelectorFromSlot();
- Address getNormalCleanupDestSlot();
+ RawAddress getNormalCleanupDestSlot();
llvm::BasicBlock *getUnreachableBlock() {
if (!UnreachableBlock) {
@@ -2113,6 +2203,11 @@ public:
Address addr, QualType type);
void pushDestroy(CleanupKind kind, Address addr, QualType type,
Destroyer *destroyer, bool useEHCleanupForArray);
+ void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind,
+ Address addr, QualType type);
+ void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr,
+ QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray);
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
@@ -2358,10 +2453,20 @@ public:
void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
+ // VTableTrapMode - whether we guarantee that loading the
+ // vtable is guaranteed to trap on authentication failure,
+ // even if the resulting vtable pointer is unused.
+ enum class VTableAuthMode {
+ Authenticate,
+ MustTrap,
+ UnsafeUbsanStrip // Should only be used for Vptr UBSan check
+ };
/// GetVTablePtr - Return the Value of the vtable pointer member pointed
/// to by This.
- llvm::Value *GetVTablePtr(Address This, llvm::Type *VTableTy,
- const CXXRecordDecl *VTableClass);
+ llvm::Value *
+ GetVTablePtr(Address This, llvm::Type *VTableTy,
+ const CXXRecordDecl *VTableClass,
+ VTableAuthMode AuthMode = VTableAuthMode::Authenticate);
enum CFITypeCheckKind {
CFITCK_VCall,
@@ -2471,6 +2576,8 @@ public:
llvm::Type *ConvertTypeForMem(QualType T);
llvm::Type *ConvertType(QualType T);
+ llvm::Type *convertTypeForLoadStore(QualType ASTTy,
+ llvm::Type *LLVMTy = nullptr);
llvm::Type *ConvertType(const TypeDecl *T) {
return ConvertType(getContext().getTypeDeclType(T));
}
@@ -2556,10 +2663,41 @@ public:
// Helpers
//===--------------------------------------------------------------------===//
+ Address mergeAddressesInConditionalExpr(Address LHS, Address RHS,
+ llvm::BasicBlock *LHSBlock,
+ llvm::BasicBlock *RHSBlock,
+ llvm::BasicBlock *MergeBlock,
+ QualType MergedType) {
+ Builder.SetInsertPoint(MergeBlock);
+ llvm::PHINode *PtrPhi = Builder.CreatePHI(LHS.getType(), 2, "cond");
+ PtrPhi->addIncoming(LHS.getBasePointer(), LHSBlock);
+ PtrPhi->addIncoming(RHS.getBasePointer(), RHSBlock);
+ LHS.replaceBasePointer(PtrPhi);
+ LHS.setAlignment(std::min(LHS.getAlignment(), RHS.getAlignment()));
+ return LHS;
+ }
+
+ /// Construct an address with the natural alignment of T. If a pointer to T
+ /// is expected to be signed, the pointer passed to this function must have
+ /// been signed, and the returned Address will have the pointer authentication
+ /// information needed to authenticate the signed pointer.
+ Address makeNaturalAddressForPointer(
+ llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
+ bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
+ TBAAAccessInfo *TBAAInfo = nullptr,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
+ if (Alignment.isZero())
+ Alignment =
+ CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, ForPointeeType);
+ return Address(Ptr, ConvertTypeForMem(T), Alignment,
+ CGM.getPointerAuthInfoForPointeeType(T), /*Offset=*/nullptr,
+ IsKnownNonNull);
+ }
+
LValue MakeAddrLValue(Address Addr, QualType T,
AlignmentSource Source = AlignmentSource::Type) {
- return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
- CGM.getTBAAAccessInfo(T));
+ return MakeAddrLValue(Addr, T, LValueBaseInfo(Source),
+ CGM.getTBAAAccessInfo(T));
}
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo,
@@ -2569,6 +2707,14 @@ public:
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
AlignmentSource Source = AlignmentSource::Type) {
+ return MakeAddrLValue(makeNaturalAddressForPointer(V, T, Alignment), T,
+ LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
+ }
+
+ /// Same as MakeAddrLValue above except that the pointer is known to be
+ /// unsigned.
+ LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
+ AlignmentSource Source = AlignmentSource::Type) {
Address Addr(V, ConvertTypeForMem(T), Alignment);
return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
CGM.getTBAAAccessInfo(T));
@@ -2581,8 +2727,19 @@ public:
TBAAAccessInfo());
}
+ /// Given a value of type T* that may not be to a complete object, construct
+ /// an l-value with the natural pointee alignment of T.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
- LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
+
+ LValue
+ MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
+
+ /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
+ /// to be unsigned.
+ LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T);
+
+ LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T);
Address EmitLoadOfReference(LValue RefLVal,
LValueBaseInfo *PointeeBaseInfo = nullptr,
@@ -2604,6 +2761,33 @@ public:
TBAAAccessInfo *TBAAInfo = nullptr);
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
+private:
+ struct AllocaTracker {
+ void Add(llvm::AllocaInst *I) { Allocas.push_back(I); }
+ llvm::SmallVector<llvm::AllocaInst *> Take() { return std::move(Allocas); }
+
+ private:
+ llvm::SmallVector<llvm::AllocaInst *> Allocas;
+ };
+ AllocaTracker *Allocas = nullptr;
+
+public:
+ // Captures all the allocas created during the scope of its RAII object.
+ struct AllocaTrackerRAII {
+ AllocaTrackerRAII(CodeGenFunction &CGF)
+ : CGF(CGF), OldTracker(CGF.Allocas) {
+ CGF.Allocas = &Tracker;
+ }
+ ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; }
+
+ llvm::SmallVector<llvm::AllocaInst *> Take() { return Tracker.Take(); }
+
+ private:
+ CodeGenFunction &CGF;
+ AllocaTracker *OldTracker;
+ AllocaTracker Tracker;
+ };
+
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
/// block if \p ArraySize is nullptr, otherwise inserts it at the current
/// insertion point of the builder. The caller is responsible for setting an
@@ -2632,13 +2816,13 @@ public:
/// more efficient if the caller knows that the address will not be exposed.
llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
llvm::Value *ArraySize = nullptr);
- Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
- const Twine &Name = "tmp",
- llvm::Value *ArraySize = nullptr,
- Address *Alloca = nullptr);
- Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
- const Twine &Name = "tmp",
- llvm::Value *ArraySize = nullptr);
+ RawAddress CreateTempAlloca(llvm::Type *Ty, CharUnits align,
+ const Twine &Name = "tmp",
+ llvm::Value *ArraySize = nullptr,
+ RawAddress *Alloca = nullptr);
+ RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
+ const Twine &Name = "tmp",
+ llvm::Value *ArraySize = nullptr);
/// CreateDefaultAlignedTempAlloca - This creates an alloca with the
/// default ABI alignment of the given LLVM type.
@@ -2650,8 +2834,8 @@ public:
/// not hand this address off to arbitrary IRGen routines, and especially
/// do not pass it as an argument to a function that might expect a
/// properly ABI-aligned value.
- Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
- const Twine &Name = "tmp");
+ RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty,
+ const Twine &Name = "tmp");
/// CreateIRTemp - Create a temporary IR object of the given type, with
/// appropriate alignment. This routine should only be used when an temporary
@@ -2661,38 +2845,52 @@ public:
///
/// That is, this is exactly equivalent to CreateMemTemp, but calling
/// ConvertType instead of ConvertTypeForMem.
- Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
+ RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignmen and cast it to the default address space. Returns
/// the original alloca instruction by \p Alloca if it is not nullptr.
- Address CreateMemTemp(QualType T, const Twine &Name = "tmp",
- Address *Alloca = nullptr);
- Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp",
- Address *Alloca = nullptr);
+ RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
+ RawAddress *Alloca = nullptr);
+ RawAddress CreateMemTemp(QualType T, CharUnits Align,
+ const Twine &Name = "tmp",
+ RawAddress *Alloca = nullptr);
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignmen without casting it to the default address space.
- Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
- Address CreateMemTempWithoutCast(QualType T, CharUnits Align,
- const Twine &Name = "tmp");
+ RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
+ RawAddress CreateMemTempWithoutCast(QualType T, CharUnits Align,
+ const Twine &Name = "tmp");
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
- Address *Alloca = nullptr) {
- return AggValueSlot::forAddr(CreateMemTemp(T, Name, Alloca),
- T.getQualifiers(),
- AggValueSlot::IsNotDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased,
- AggValueSlot::DoesNotOverlap);
+ RawAddress *Alloca = nullptr) {
+ return AggValueSlot::forAddr(
+ CreateMemTemp(T, Name, Alloca), T.getQualifiers(),
+ AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap);
}
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
/// expression and compare the result against zero, returning an Int1Ty value.
llvm::Value *EvaluateExprAsBool(const Expr *E);
+ /// Retrieve the implicit cast expression of the rhs in a binary operator
+ /// expression by passing pointers to Value and QualType
+ /// This is used for implicit bitfield conversion checks, which
+ /// must compare with the value before potential truncation.
+ llvm::Value *EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E,
+ llvm::Value **Previous,
+ QualType *SrcType);
+
+ /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
+ /// so we use the value after conversion.
+ void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType,
+ llvm::Value *Dst, QualType DstType,
+ const CGBitFieldInfo &Info,
+ SourceLocation Loc);
+
/// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
void EmitIgnoredExpr(const Expr *E);
@@ -2830,7 +3028,8 @@ public:
/// \returns A pointer to the argument.
// FIXME: We should be able to get rid of this method and use the va_arg
// instruction in LLVM instead once it works well enough.
- Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr);
+ RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr,
+ AggValueSlot Slot = AggValueSlot::ignored());
/// emitArrayLength - Compute the length of an array, even if it's a
/// VLA, and drill down to the base element type.
@@ -3060,6 +3259,25 @@ public:
/// calls to EmitTypeCheck can be skipped.
bool sanitizePerformTypeCheck() const;
+ void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV,
+ QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
+ llvm::Value *ArraySize = nullptr) {
+ if (!sanitizePerformTypeCheck())
+ return;
+ EmitTypeCheck(TCK, Loc, LV.emitRawPointer(*this), Type, LV.getAlignment(),
+ SkippedChecks, ArraySize);
+ }
+
+ void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, Address Addr,
+ QualType Type, CharUnits Alignment = CharUnits::Zero(),
+ SanitizerSet SkippedChecks = SanitizerSet(),
+ llvm::Value *ArraySize = nullptr) {
+ if (!sanitizePerformTypeCheck())
+ return;
+ EmitTypeCheck(TCK, Loc, Addr.emitRawPointer(*this), Type, Alignment,
+ SkippedChecks, ArraySize);
+ }
+
/// Emit a check that \p V is the address of storage of the
/// appropriate size and alignment for an object of type \p Type
/// (or if ArraySize is provided, for an array of that bound).
@@ -3077,12 +3295,12 @@ public:
llvm::Value *Index, QualType IndexType,
QualType IndexedType, bool Accessed);
- // Find a struct's flexible array member. It may be embedded inside multiple
- // sub-structs, but must still be the last field.
- const FieldDecl *FindFlexibleArrayMemberField(ASTContext &Ctx,
- const RecordDecl *RD,
- StringRef Name,
- uint64_t &Offset);
+ // Find a struct's flexible array member and get its offset. It may be
+ // embedded inside multiple sub-structs, but must still be the last field.
+ const FieldDecl *
+ FindFlexibleArrayMemberFieldAndOffset(ASTContext &Ctx, const RecordDecl *RD,
+ const FieldDecl *FAMDecl,
+ uint64_t &Offset);
/// Find the FieldDecl specified in a FAM's "counted_by" attribute. Returns
/// \p nullptr if either the attribute or the field doesn't exist.
@@ -3160,17 +3378,17 @@ public:
/// Address with original alloca instruction. Invalid if the variable was
/// emitted as a global constant.
- Address AllocaAddr;
+ RawAddress AllocaAddr;
struct Invalid {};
AutoVarEmission(Invalid)
: Variable(nullptr), Addr(Address::invalid()),
- AllocaAddr(Address::invalid()) {}
+ AllocaAddr(RawAddress::invalid()) {}
AutoVarEmission(const VarDecl &variable)
: Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
IsEscapingByRef(false), IsConstantAggregate(false),
- SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
+ SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
@@ -3193,7 +3411,7 @@ public:
}
/// Returns the address for the original alloca instruction.
- Address getOriginalAllocatedAddress() const { return AllocaAddr; }
+ RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
@@ -3223,23 +3441,32 @@ public:
llvm::GlobalValue::LinkageTypes Linkage);
class ParamValue {
- llvm::Value *Value;
- llvm::Type *ElementType;
- unsigned Alignment;
- ParamValue(llvm::Value *V, llvm::Type *T, unsigned A)
- : Value(V), ElementType(T), Alignment(A) {}
+ union {
+ Address Addr;
+ llvm::Value *Value;
+ };
+
+ bool IsIndirect;
+
+ ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
+ ParamValue(Address A) : Addr(A), IsIndirect(true) {}
+
public:
static ParamValue forDirect(llvm::Value *value) {
- return ParamValue(value, nullptr, 0);
+ return ParamValue(value);
}
static ParamValue forIndirect(Address addr) {
assert(!addr.getAlignment().isZero());
- return ParamValue(addr.getPointer(), addr.getElementType(),
- addr.getAlignment().getQuantity());
+ return ParamValue(addr);
}
- bool isIndirect() const { return Alignment != 0; }
- llvm::Value *getAnyValue() const { return Value; }
+ bool isIndirect() const { return IsIndirect; }
+ llvm::Value *getAnyValue() const {
+ if (!isIndirect())
+ return Value;
+ assert(!Addr.hasOffset() && "unexpected offset");
+ return Addr.getBasePointer();
+ }
llvm::Value *getDirectValue() const {
assert(!isIndirect());
@@ -3248,8 +3475,7 @@ public:
Address getIndirectAddress() const {
assert(isIndirect());
- return Address(Value, ElementType, CharUnits::fromQuantity(Alignment),
- KnownNonNull);
+ return Addr;
}
};
@@ -3594,6 +3820,8 @@ public:
void EmitOMPSimdDirective(const OMPSimdDirective &S);
void EmitOMPTileDirective(const OMPTileDirective &S);
void EmitOMPUnrollDirective(const OMPUnrollDirective &S);
+ void EmitOMPReverseDirective(const OMPReverseDirective &S);
+ void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S);
void EmitOMPForDirective(const OMPForDirective &S);
void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
@@ -3808,6 +4036,8 @@ private:
Expr *NextLB = nullptr;
/// Update of UB after a whole chunk has been executed
Expr *NextUB = nullptr;
+ /// Distinguish between the for distribute and sections
+ OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
OMPLoopArguments() = default;
OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
@@ -3837,6 +4067,22 @@ private:
void EmitSections(const OMPExecutableDirective &S);
public:
+ //===--------------------------------------------------------------------===//
+ // OpenACC Emission
+ //===--------------------------------------------------------------------===//
+ void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S) {
+ // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
+ // simply emitting its structured block, but in the future we will implement
+ // some sort of IR.
+ EmitStmt(S.getStructuredBlock());
+ }
+
+ void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S) {
+ // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
+ // simply emitting its loop, but in the future we will implement
+ // some sort of IR.
+ EmitStmt(S.getLoop());
+ }
//===--------------------------------------------------------------------===//
// LValue Expression Emission
@@ -3986,6 +4232,11 @@ public:
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
RValue EmitLoadOfGlobalRegLValue(LValue LV);
+ /// Like EmitLoadOfLValue but also handles complex and aggregate types.
+ RValue EmitLoadOfAnyValue(LValue V,
+ AggValueSlot Slot = AggValueSlot::ignored(),
+ SourceLocation Loc = {});
+
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
@@ -4023,8 +4274,8 @@ public:
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed = false);
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E);
- LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
- bool IsLowerBound = true);
+ LValue EmitArraySectionExpr(const ArraySectionExpr *E,
+ bool IsLowerBound = true);
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
LValue EmitMemberExpr(const MemberExpr *E);
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
@@ -4123,7 +4374,8 @@ public:
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
ReturnValueSlot ReturnValue, const CallArgList &Args,
llvm::CallBase **callOrInvoke, bool IsMustTail,
- SourceLocation Loc);
+ SourceLocation Loc,
+ bool IsVirtualFunctionPointerThunk = false);
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
ReturnValueSlot ReturnValue, const CallArgList &Args,
llvm::CallBase **callOrInvoke = nullptr,
@@ -4149,6 +4401,9 @@ public:
llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
const Twine &name = "");
llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
+ ArrayRef<Address> args,
+ const Twine &name = "");
+ llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
ArrayRef<llvm::Value *> args,
const Twine &name = "");
@@ -4174,6 +4429,45 @@ public:
CXXDtorType Type,
const CXXRecordDecl *RD);
+ bool isPointerKnownNonNull(const Expr *E);
+
+ /// Create the discriminator from the storage address and the entity hash.
+ llvm::Value *EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress,
+ llvm::Value *Discriminator);
+ CGPointerAuthInfo EmitPointerAuthInfo(const PointerAuthSchema &Schema,
+ llvm::Value *StorageAddress,
+ GlobalDecl SchemaDecl,
+ QualType SchemaType);
+
+ llvm::Value *EmitPointerAuthSign(const CGPointerAuthInfo &Info,
+ llvm::Value *Pointer);
+
+ llvm::Value *EmitPointerAuthAuth(const CGPointerAuthInfo &Info,
+ llvm::Value *Pointer);
+
+ llvm::Value *emitPointerAuthResign(llvm::Value *Pointer, QualType PointerType,
+ const CGPointerAuthInfo &CurAuthInfo,
+ const CGPointerAuthInfo &NewAuthInfo,
+ bool IsKnownNonNull);
+ llvm::Value *emitPointerAuthResignCall(llvm::Value *Pointer,
+ const CGPointerAuthInfo &CurInfo,
+ const CGPointerAuthInfo &NewInfo);
+
+ void EmitPointerAuthOperandBundle(
+ const CGPointerAuthInfo &Info,
+ SmallVectorImpl<llvm::OperandBundleDef> &Bundles);
+
+ llvm::Value *authPointerToPointerCast(llvm::Value *ResultPtr,
+ QualType SourceType, QualType DestType);
+ Address authPointerToPointerCast(Address Ptr, QualType SourceType,
+ QualType DestType);
+
+ Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy);
+
+ llvm::Value *getAsNaturalPointerTo(Address Addr, QualType PointeeType) {
+ return getAsNaturalAddressOf(Addr, PointeeType).getBasePointer();
+ }
+
// Return the copy constructor name with the prefix "__copy_constructor_"
// removed.
static std::string getNonTrivialCopyConstructorStr(QualType QT,
@@ -4392,6 +4686,7 @@ public:
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx,
const CallExpr *E);
llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -4401,6 +4696,9 @@ public:
llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue);
+
+ void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
+ const CallExpr *E);
void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
llvm::AtomicOrdering &AO,
llvm::SyncScope::ID &SSID);
@@ -4533,9 +4831,17 @@ public:
/// aggregate type into a temporary LValue.
LValue EmitAggExprToLValue(const Expr *E);
- /// Build all the stores needed to initialize an aggregate at Dest with the
- /// value Val.
- void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile);
+ enum ExprValueKind { EVK_RValue, EVK_NonRValue };
+
+ /// EmitAggFinalDestCopy - Emit copy of the specified aggregate into
+ /// destination address.
+ void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src,
+ ExprValueKind SrcKind);
+
+ /// Create a store to \arg DstPtr from \arg Src, truncating the stored value
+ /// to at most \arg DstSize bytes.
+ void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize,
+ bool DstIsVolatile);
/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
/// make sure it survives garbage collection until this point.
@@ -4581,7 +4887,7 @@ public:
void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
bool PerformInit);
- llvm::Function *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
+ llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
llvm::Constant *Addr);
llvm::Function *createTLSAtExitStub(const VarDecl &VD,
@@ -4745,6 +5051,11 @@ public:
SourceLocation Loc,
const Twine &Name = "");
+ Address EmitCheckedInBoundsGEP(Address Addr, ArrayRef<llvm::Value *> IdxList,
+ llvm::Type *elementType, bool SignedIndices,
+ bool IsSubtraction, SourceLocation Loc,
+ CharUnits Align, const Twine &Name = "");
+
/// Specifies which type of sanitizer check to apply when handling a
/// particular builtin.
enum BuiltinCheckKind {
@@ -4807,6 +5118,10 @@ public:
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
AbstractCallee AC, unsigned ParmNum);
+ void EmitNonNullArgCheck(Address Addr, QualType ArgType,
+ SourceLocation ArgLoc, AbstractCallee AC,
+ unsigned ParmNum);
+
/// EmitCallArg - Emit a single call argument.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
@@ -4835,6 +5150,29 @@ public:
llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
unsigned NumElementsDst,
const llvm::Twine &Name = "");
+ // Adds a convergence_ctrl token to |Input| and emits the required parent
+ // convergence instructions.
+ template <typename CallType>
+ CallType *addControlledConvergenceToken(CallType *Input) {
+ return cast<CallType>(
+ addConvergenceControlToken(Input, ConvergenceTokenStack.back()));
+ }
+
+private:
+ // Emits a convergence_loop instruction for the given |BB|, with |ParentToken|
+ // as it's parent convergence instr.
+ llvm::IntrinsicInst *emitConvergenceLoopToken(llvm::BasicBlock *BB,
+ llvm::Value *ParentToken);
+ // Adds a convergence_ctrl token with |ParentToken| as parent convergence
+ // instr to the call |Input|.
+ llvm::CallBase *addConvergenceControlToken(llvm::CallBase *Input,
+ llvm::Value *ParentToken);
+ // Find the convergence_entry instruction |F|, or emits ones if none exists.
+ // Returns the convergence instruction.
+ llvm::IntrinsicInst *getOrEmitConvergenceEntryToken(llvm::Function *F);
+ // Find the convergence_loop instruction for the loop defined by |LI|, or
+ // emits one if none exists. Returns the convergence instruction.
+ llvm::IntrinsicInst *getOrEmitConvergenceLoopToken(const LoopInfo *LI);
private:
llvm::MDNode *getRangeForLoadFromType(QualType Ty);
@@ -5000,10 +5338,10 @@ private:
llvm::Value *EmitAArch64CpuInit();
llvm::Value *
FormAArch64ResolverCondition(const MultiVersionResolverOption &RO);
+ llvm::Value *EmitAArch64CpuSupports(const CallExpr *E);
llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs);
};
-
inline DominatingLLVMValue::saved_type
DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
if (!needsSaving(value)) return saved_type(value, false);
@@ -5015,7 +5353,7 @@ DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
CGF.Builder.CreateStore(value, alloca);
- return saved_type(alloca.getPointer(), true);
+ return saved_type(alloca.emitRawPointer(CGF), true);
}
inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
index 1280bcd36de9..2a5d5f9083ae 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
@@ -30,6 +30,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
@@ -52,6 +53,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/BinaryFormat/ELF.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/AttributeMask.h"
#include "llvm/IR/CallingConv.h"
@@ -69,8 +71,10 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/xxhash.h"
+#include "llvm/TargetParser/RISCVISAInfo.h"
#include "llvm/TargetParser/Triple.h"
#include "llvm/TargetParser/X86TargetParser.h"
+#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include <optional>
using namespace clang;
@@ -143,6 +147,10 @@ createTargetCodeGenInfo(CodeGenModule &CGM) {
Kind = AArch64ABIKind::DarwinPCS;
else if (Triple.isOSWindows())
return createWindowsAArch64TargetCodeGenInfo(CGM, AArch64ABIKind::Win64);
+ else if (Target.getABI() == "aapcs-soft")
+ Kind = AArch64ABIKind::AAPCSSoft;
+ else if (Target.getABI() == "pauthtest")
+ Kind = AArch64ABIKind::PAuthTest;
return createAArch64TargetCodeGenInfo(CGM, Kind);
}
@@ -169,10 +177,7 @@ createTargetCodeGenInfo(CodeGenModule &CGM) {
else if (ABIStr == "aapcs16")
Kind = ARMABIKind::AAPCS16_VFP;
else if (CodeGenOpts.FloatABI == "hard" ||
- (CodeGenOpts.FloatABI != "soft" &&
- (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
- Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
- Triple.getEnvironment() == llvm::Triple::EABIHF)))
+ (CodeGenOpts.FloatABI != "soft" && Triple.isHardFloatABI()))
Kind = ARMABIKind::AAPCS_VFP;
return createARMTargetCodeGenInfo(CGM, Kind);
@@ -335,10 +340,11 @@ CodeGenModule::CodeGenModule(ASTContext &C,
: Context(C), LangOpts(C.getLangOpts()), FS(FS), HeaderSearchOpts(HSO),
PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
- VMContext(M.getContext()), Types(*this), VTables(*this),
+ VMContext(M.getContext()), VTables(*this),
SanitizerMD(new SanitizerMetadata(*this)) {
// Initialize the type cache.
+ Types.reset(new CodeGenTypes(*this));
llvm::LLVMContext &LLVMContext = M.getContext();
VoidTy = llvm::Type::getVoidTy(LLVMContext);
Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
@@ -362,7 +368,8 @@ CodeGenModule::CodeGenModule(ASTContext &C,
IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
IntPtrTy = llvm::IntegerType::get(LLVMContext,
C.getTargetInfo().getMaxPointerWidth());
- Int8PtrTy = llvm::PointerType::get(LLVMContext, 0);
+ Int8PtrTy = llvm::PointerType::get(LLVMContext,
+ C.getTargetAddressSpace(LangAS::Default));
const llvm::DataLayout &DL = M.getDataLayout();
AllocaInt8PtrTy =
llvm::PointerType::get(LLVMContext, DL.getAllocaAddrSpace());
@@ -395,8 +402,8 @@ CodeGenModule::CodeGenModule(ASTContext &C,
// Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
(!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
- TBAA.reset(new CodeGenTBAA(Context, TheModule, CodeGenOpts, getLangOpts(),
- getCXXABI().getMangleContext()));
+ TBAA.reset(new CodeGenTBAA(Context, getTypes(), TheModule, CodeGenOpts,
+ getLangOpts()));
// If debug info or coverage generation is enabled, create the CGDebugInfo
// object.
@@ -438,6 +445,11 @@ CodeGenModule::CodeGenModule(ASTContext &C,
}
ModuleNameHash = llvm::getUniqueInternalLinkagePostfix(Path);
}
+
+ // Record mregparm value now so it is visible through all of codegen.
+ if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
+ getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
+ CodeGenOpts.NumRegisterParameters);
}
CodeGenModule::~CodeGenModule() {}
@@ -624,6 +636,26 @@ static bool checkAliasedGlobal(
return true;
}
+// Emit a warning if toc-data attribute is requested for global variables that
+// have aliases and remove the toc-data attribute.
+static void checkAliasForTocData(llvm::GlobalVariable *GVar,
+ const CodeGenOptions &CodeGenOpts,
+ DiagnosticsEngine &Diags,
+ SourceLocation Location) {
+ if (GVar->hasAttribute("toc-data")) {
+ auto GVId = GVar->getName();
+ // Is this a global variable specified by the user as local?
+ if ((llvm::binary_search(CodeGenOpts.TocDataVarsUserSpecified, GVId))) {
+ Diags.Report(Location, diag::warn_toc_unsupported_type)
+ << GVId << "the variable has an alias";
+ }
+ llvm::AttributeSet CurrAttributes = GVar->getAttributes();
+ llvm::AttributeSet NewAttributes =
+ CurrAttributes.removeAttribute(GVar->getContext(), "toc-data");
+ GVar->setAttributes(NewAttributes);
+ }
+}
+
void CodeGenModule::checkAliases() {
// Check if the constructed aliases are well formed. It is really unfortunate
// that we have to do this in CodeGen, but we only construct mangled names
@@ -650,6 +682,12 @@ void CodeGenModule::checkAliases() {
continue;
}
+ if (getContext().getTargetInfo().getTriple().isOSAIX())
+ if (const llvm::GlobalVariable *GVar =
+ dyn_cast<const llvm::GlobalVariable>(GV))
+ checkAliasForTocData(const_cast<llvm::GlobalVariable *>(GVar),
+ getCodeGenOpts(), Diags, Location);
+
llvm::Constant *Aliasee =
IsIFunc ? cast<llvm::GlobalIFunc>(Alias)->getResolver()
: cast<llvm::GlobalAlias>(Alias)->getAliasee();
@@ -685,6 +723,11 @@ void CodeGenModule::checkAliases() {
cast<llvm::GlobalAlias>(Alias)->setAliasee(Aliasee);
}
}
+ // ifunc resolvers are usually implemented to run before sanitizer
+ // initialization. Disable instrumentation to prevent the ordering issue.
+ if (IsIFunc)
+ cast<llvm::Function>(Aliasee)->addFnAttr(
+ llvm::Attribute::DisableSanitizerInstrumentation);
}
if (!Error)
return;
@@ -836,10 +879,6 @@ void CodeGenModule::Release() {
AddGlobalCtor(CudaCtorFunction);
}
if (OpenMPRuntime) {
- if (llvm::Function *OpenMPRequiresDirectiveRegFun =
- OpenMPRuntime->emitRequiresDirectiveRegFun()) {
- AddGlobalCtor(OpenMPRequiresDirectiveRegFun, 0);
- }
OpenMPRuntime->createOffloadEntriesAndInfoMetadata();
OpenMPRuntime->clear();
}
@@ -860,6 +899,7 @@ void CodeGenModule::Release() {
checkAliases();
EmitDeferredUnusedCoverageMappings();
CodeGenPGO(*this).setValueProfilingFlag(getModule());
+ CodeGenPGO(*this).setProfileVersion(getModule());
if (CoverageMapping)
CoverageMapping->emit();
if (CodeGenOpts.SanitizeCfiCrossDso) {
@@ -872,13 +912,14 @@ void CodeGenModule::Release() {
if (Context.getTargetInfo().getTriple().isWasm())
EmitMainVoidAlias();
- if (getTriple().isAMDGPU()) {
- // Emit amdgpu_code_object_version module flag, which is code object version
+ if (getTriple().isAMDGPU() ||
+ (getTriple().isSPIRV() && getTriple().getVendor() == llvm::Triple::AMD)) {
+ // Emit amdhsa_code_object_version module flag, which is code object version
// times 100.
if (getTarget().getTargetOpts().CodeObjectVersion !=
llvm::CodeObjectVersionKind::COV_None) {
getModule().addModuleFlag(llvm::Module::Error,
- "amdgpu_code_object_version",
+ "amdhsa_code_object_version",
getTarget().getTargetOpts().CodeObjectVersion);
}
@@ -917,7 +958,15 @@ void CodeGenModule::Release() {
llvm::ConstantArray::get(ATy, UsedArray), "__clang_gpu_used_external");
addCompilerUsedGlobal(GV);
}
-
+ if (LangOpts.HIP && !getLangOpts().OffloadingNewDriver) {
+ // Emit a unique ID so that host and device binaries from the same
+ // compilation unit can be associated.
+ auto *GV = new llvm::GlobalVariable(
+ getModule(), Int8Ty, false, llvm::GlobalValue::ExternalLinkage,
+ llvm::Constant::getNullValue(Int8Ty),
+ "__hip_cuid_" + getContext().getCUIDHash());
+ addCompilerUsedGlobal(GV);
+ }
emitLLVMUsed();
if (SanStats)
SanStats->finish();
@@ -945,11 +994,6 @@ void CodeGenModule::Release() {
NMD->addOperand(MD);
}
- // Record mregparm value now so it is visible through rest of codegen.
- if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
- getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
- CodeGenOpts.NumRegisterParameters);
-
if (CodeGenOpts.DwarfVersion) {
getModule().addModuleFlag(llvm::Module::Max, "Dwarf Version",
CodeGenOpts.DwarfVersion);
@@ -1044,21 +1088,31 @@ void CodeGenModule::Release() {
llvm::MDString::get(VMContext, "ascii"));
}
- llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
- if ( Arch == llvm::Triple::arm
- || Arch == llvm::Triple::armeb
- || Arch == llvm::Triple::thumb
- || Arch == llvm::Triple::thumbeb) {
+ llvm::Triple T = Context.getTargetInfo().getTriple();
+ if (T.isARM() || T.isThumb()) {
// The minimum width of an enum in bytes
uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
}
- if (Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64) {
+ if (T.isRISCV()) {
StringRef ABIStr = Target.getABI();
llvm::LLVMContext &Ctx = TheModule.getContext();
getModule().addModuleFlag(llvm::Module::Error, "target-abi",
llvm::MDString::get(Ctx, ABIStr));
+
+ // Add the canonical ISA string as metadata so the backend can set the ELF
+ // attributes correctly. We use AppendUnique so LTO will keep all of the
+ // unique ISA strings that were linked together.
+ const std::vector<std::string> &Features =
+ getTarget().getTargetOpts().Features;
+ auto ParseResult =
+ llvm::RISCVISAInfo::parseFeatures(T.isRISCV64() ? 64 : 32, Features);
+ if (!errorToBool(ParseResult.takeError()))
+ getModule().addModuleFlag(
+ llvm::Module::AppendUnique, "riscv-isa",
+ llvm::MDNode::get(
+ Ctx, llvm::MDString::get(Ctx, (*ParseResult)->toString())));
}
if (CodeGenOpts.SanitizeCfiCrossDso) {
@@ -1080,6 +1134,11 @@ void CodeGenModule::Release() {
CodeGenOpts.SanitizeCfiCanonicalJumpTables);
}
+ if (CodeGenOpts.SanitizeCfiICallNormalizeIntegers) {
+ getModule().addModuleFlag(llvm::Module::Override, "cfi-normalize-integers",
+ 1);
+ }
+
if (LangOpts.Sanitize.has(SanitizerKind::KCFI)) {
getModule().addModuleFlag(llvm::Module::Override, "kcfi", 1);
// KCFI assumes patchable-function-prefix is the same for all indirectly
@@ -1127,10 +1186,7 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Override,
"tag-stack-memory-buildattr", 1);
- if (Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb ||
- Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
- Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
- Arch == llvm::Triple::aarch64_be) {
+ if (T.isARM() || T.isThumb() || T.isAArch64()) {
if (LangOpts.BranchTargetEnforcement)
getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
1);
@@ -1147,6 +1203,37 @@ void CodeGenModule::Release() {
if (!LangOpts.isSignReturnAddressWithAKey())
getModule().addModuleFlag(llvm::Module::Min,
"sign-return-address-with-bkey", 1);
+
+ if (getTriple().isOSLinux()) {
+ assert(getTriple().isOSBinFormatELF());
+ using namespace llvm::ELF;
+ uint64_t PAuthABIVersion =
+ (LangOpts.PointerAuthIntrinsics
+ << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INTRINSICS) |
+ (LangOpts.PointerAuthCalls
+ << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_CALLS) |
+ (LangOpts.PointerAuthReturns
+ << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_RETURNS) |
+ (LangOpts.PointerAuthAuthTraps
+ << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_AUTHTRAPS) |
+ (LangOpts.PointerAuthVTPtrAddressDiscrimination
+ << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRADDRDISCR) |
+ (LangOpts.PointerAuthVTPtrTypeDiscrimination
+ << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_VPTRTYPEDISCR) |
+ (LangOpts.PointerAuthInitFini
+ << AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI);
+ static_assert(AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_INITFINI ==
+ AARCH64_PAUTH_PLATFORM_LLVM_LINUX_VERSION_LAST,
+ "Update when new enum items are defined");
+ if (PAuthABIVersion != 0) {
+ getModule().addModuleFlag(llvm::Module::Error,
+ "aarch64-elf-pauthabi-platform",
+ AARCH64_PAUTH_PLATFORM_LLVM_LINUX);
+ getModule().addModuleFlag(llvm::Module::Error,
+ "aarch64-elf-pauthabi-version",
+ PAuthABIVersion);
+ }
+ }
}
if (CodeGenOpts.StackClashProtector)
@@ -1252,6 +1339,9 @@ void CodeGenModule::Release() {
case CodeGenOptions::FramePointerKind::None:
// 0 ("none") is the default.
break;
+ case CodeGenOptions::FramePointerKind::Reserved:
+ getModule().setFramePointer(llvm::FramePointerKind::Reserved);
+ break;
case CodeGenOptions::FramePointerKind::NonLeaf:
getModule().setFramePointer(llvm::FramePointerKind::NonLeaf);
break;
@@ -1314,22 +1404,45 @@ void CodeGenModule::Release() {
// that might affect the DLL storage class or the visibility, and
// before anything that might act on these.
setVisibilityFromDLLStorageClass(LangOpts, getModule());
+
+ // Check the tail call symbols are truly undefined.
+ if (getTriple().isPPC() && !MustTailCallUndefinedGlobals.empty()) {
+ for (auto &I : MustTailCallUndefinedGlobals) {
+ if (!I.first->isDefined())
+ getDiags().Report(I.second, diag::err_ppc_impossible_musttail) << 2;
+ else {
+ StringRef MangledName = getMangledName(GlobalDecl(I.first));
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (!Entry || Entry->isWeakForLinker() ||
+ Entry->isDeclarationForLinker())
+ getDiags().Report(I.second, diag::err_ppc_impossible_musttail) << 2;
+ }
+ }
+ }
}
void CodeGenModule::EmitOpenCLMetadata() {
// SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
// opencl.ocl.version named metadata node.
- // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL.
- auto Version = LangOpts.getOpenCLCompatibleVersion();
- llvm::Metadata *OCLVerElts[] = {
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, Version / 100)),
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- Int32Ty, (Version % 100) / 10))};
- llvm::NamedMDNode *OCLVerMD =
- TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
- llvm::LLVMContext &Ctx = TheModule.getContext();
- OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
+ // C++ for OpenCL has a distinct mapping for versions compatible with OpenCL.
+ auto CLVersion = LangOpts.getOpenCLCompatibleVersion();
+
+ auto EmitVersion = [this](StringRef MDName, int Version) {
+ llvm::Metadata *OCLVerElts[] = {
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(Int32Ty, Version / 100)),
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(Int32Ty, (Version % 100) / 10))};
+ llvm::NamedMDNode *OCLVerMD = TheModule.getOrInsertNamedMetadata(MDName);
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+ OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
+ };
+
+ EmitVersion("opencl.ocl.version", CLVersion);
+ if (LangOpts.OpenCLCPlusPlus) {
+ // In addition to the OpenCL compatible version, emit the C++ version.
+ EmitVersion("opencl.cxx.version", LangOpts.OpenCLCPlusPlusVersion);
+ }
}
void CodeGenModule::EmitBackendOptionsMetadata(
@@ -1342,12 +1455,12 @@ void CodeGenModule::EmitBackendOptionsMetadata(
void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
// Make sure that this type is translated.
- Types.UpdateCompletedType(TD);
+ getTypes().UpdateCompletedType(TD);
}
void CodeGenModule::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
// Make sure that this type is translated.
- Types.RefreshTypeCacheForClass(RD);
+ getTypes().RefreshTypeCacheForClass(RD);
}
llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
@@ -1713,59 +1826,6 @@ static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
Out << ".resolver";
}
-static void AppendTargetVersionMangling(const CodeGenModule &CGM,
- const TargetVersionAttr *Attr,
- raw_ostream &Out) {
- if (Attr->isDefaultVersion()) {
- Out << ".default";
- return;
- }
- Out << "._";
- const TargetInfo &TI = CGM.getTarget();
- llvm::SmallVector<StringRef, 8> Feats;
- Attr->getFeatures(Feats);
- llvm::stable_sort(Feats, [&TI](const StringRef FeatL, const StringRef FeatR) {
- return TI.multiVersionSortPriority(FeatL) <
- TI.multiVersionSortPriority(FeatR);
- });
- for (const auto &Feat : Feats) {
- Out << 'M';
- Out << Feat;
- }
-}
-
-static void AppendTargetMangling(const CodeGenModule &CGM,
- const TargetAttr *Attr, raw_ostream &Out) {
- if (Attr->isDefaultVersion())
- return;
-
- Out << '.';
- const TargetInfo &Target = CGM.getTarget();
- ParsedTargetAttr Info = Target.parseTargetAttr(Attr->getFeaturesStr());
- llvm::sort(Info.Features, [&Target](StringRef LHS, StringRef RHS) {
- // Multiversioning doesn't allow "no-${feature}", so we can
- // only have "+" prefixes here.
- assert(LHS.starts_with("+") && RHS.starts_with("+") &&
- "Features should always have a prefix.");
- return Target.multiVersionSortPriority(LHS.substr(1)) >
- Target.multiVersionSortPriority(RHS.substr(1));
- });
-
- bool IsFirst = true;
-
- if (!Info.CPU.empty()) {
- IsFirst = false;
- Out << "arch_" << Info.CPU;
- }
-
- for (StringRef Feat : Info.Features) {
- if (!IsFirst)
- Out << '_';
- IsFirst = false;
- Out << Feat.substr(1);
- }
-}
-
// Returns true if GD is a function decl with internal linkage and
// needs a unique suffix after the mangled name.
static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
@@ -1775,41 +1835,6 @@ static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
(CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage);
}
-static void AppendTargetClonesMangling(const CodeGenModule &CGM,
- const TargetClonesAttr *Attr,
- unsigned VersionIndex,
- raw_ostream &Out) {
- const TargetInfo &TI = CGM.getTarget();
- if (TI.getTriple().isAArch64()) {
- StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
- if (FeatureStr == "default") {
- Out << ".default";
- return;
- }
- Out << "._";
- SmallVector<StringRef, 8> Features;
- FeatureStr.split(Features, "+");
- llvm::stable_sort(Features,
- [&TI](const StringRef FeatL, const StringRef FeatR) {
- return TI.multiVersionSortPriority(FeatL) <
- TI.multiVersionSortPriority(FeatR);
- });
- for (auto &Feat : Features) {
- Out << 'M';
- Out << Feat;
- }
- } else {
- Out << '.';
- StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
- if (FeatureStr.starts_with("arch="))
- Out << "arch_" << FeatureStr.substr(sizeof("arch=") - 1);
- else
- Out << FeatureStr;
-
- Out << '.' << Attr->getMangledIndex(VersionIndex);
- }
-}
-
static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
const NamedDecl *ND,
bool OmitMultiVersionMangling = false) {
@@ -1863,16 +1888,31 @@ static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
FD->getAttr<CPUSpecificAttr>(),
GD.getMultiVersionIndex(), Out);
break;
- case MultiVersionKind::Target:
- AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
+ case MultiVersionKind::Target: {
+ auto *Attr = FD->getAttr<TargetAttr>();
+ assert(Attr && "Expected TargetAttr to be present "
+ "for attribute mangling");
+ const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
+ Info.appendAttributeMangling(Attr, Out);
break;
- case MultiVersionKind::TargetVersion:
- AppendTargetVersionMangling(CGM, FD->getAttr<TargetVersionAttr>(), Out);
+ }
+ case MultiVersionKind::TargetVersion: {
+ auto *Attr = FD->getAttr<TargetVersionAttr>();
+ assert(Attr && "Expected TargetVersionAttr to be present "
+ "for attribute mangling");
+ const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
+ Info.appendAttributeMangling(Attr, Out);
break;
- case MultiVersionKind::TargetClones:
- AppendTargetClonesMangling(CGM, FD->getAttr<TargetClonesAttr>(),
- GD.getMultiVersionIndex(), Out);
+ }
+ case MultiVersionKind::TargetClones: {
+ auto *Attr = FD->getAttr<TargetClonesAttr>();
+ assert(Attr && "Expected TargetClonesAttr to be present "
+ "for attribute mangling");
+ unsigned Index = GD.getMultiVersionIndex();
+ const ABIInfo &Info = CGM.getTargetCodeGenInfo().getABIInfo();
+ Info.appendAttributeMangling(Attr, Index, Out);
break;
+ }
case MultiVersionKind::None:
llvm_unreachable("None multiversion type isn't valid here");
}
@@ -2124,6 +2164,14 @@ void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
llvm::AttributeList PAL;
ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv,
/*AttrOnCallSite=*/false, IsThunk);
+ if (CallingConv == llvm::CallingConv::X86_VectorCall &&
+ getTarget().getTriple().isWindowsArm64EC()) {
+ SourceLocation Loc;
+ if (const Decl *D = GD.getDecl())
+ Loc = D->getLocation();
+
+ Error(Loc, "__vectorcall calling convention is not currently supported");
+ }
F->setAttributes(PAL);
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
@@ -2414,7 +2462,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (auto *Attr = D->getAttr<ArmNewAttr>()) {
if (Attr->isNewZA())
- B.addAttribute("aarch64_pstate_za_new");
+ B.addAttribute("aarch64_new_za");
if (Attr->isNewZT0())
B.addAttribute("aarch64_new_zt0");
}
@@ -2664,7 +2712,7 @@ void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
addUsedGlobal(F);
if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
if (!D->getAttr<SectionAttr>())
- F->addFnAttr("implicit-section-name", SA->getName());
+ F->setSection(SA->getName());
llvm::AttrBuilder Attrs(F->getContext());
if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
@@ -3486,6 +3534,9 @@ bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
// Implicit template instantiations may change linkage if they are later
// explicitly instantiated, so they should not be emitted eagerly.
return false;
+ // Defer until all versions have been semantically checked.
+ if (FD->hasAttr<TargetVersionAttr>() && !FD->isMultiVersion())
+ return false;
}
if (const auto *VD = dyn_cast<VarDecl>(Global)) {
if (Context.getInlineVariableDefinitionKind(VD) ==
@@ -3661,6 +3712,19 @@ template <typename AttrT> static bool hasImplicitAttr(const ValueDecl *D) {
return D->isImplicit();
}
+bool CodeGenModule::shouldEmitCUDAGlobalVar(const VarDecl *Global) const {
+ assert(LangOpts.CUDA && "Should not be called by non-CUDA languages");
+ // We need to emit host-side 'shadows' for all global
+ // device-side variables because the CUDA runtime needs their
+ // size and host-side address in order to provide access to
+ // their device-side incarnations.
+ return !LangOpts.CUDAIsDevice || Global->hasAttr<CUDADeviceAttr>() ||
+ Global->hasAttr<CUDAConstantAttr>() ||
+ Global->hasAttr<CUDASharedAttr>() ||
+ Global->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ Global->getType()->isCUDADeviceBuiltinTextureType();
+}
+
void CodeGenModule::EmitGlobal(GlobalDecl GD) {
const auto *Global = cast<ValueDecl>(GD.getDecl());
@@ -3685,36 +3749,27 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Non-constexpr non-lambda implicit host device functions are not emitted
// unless they are used on device side.
if (LangOpts.CUDA) {
- if (LangOpts.CUDAIsDevice) {
+ assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) &&
+ "Expected Variable or Function");
+ if (const auto *VD = dyn_cast<VarDecl>(Global)) {
+ if (!shouldEmitCUDAGlobalVar(VD))
+ return;
+ } else if (LangOpts.CUDAIsDevice) {
const auto *FD = dyn_cast<FunctionDecl>(Global);
if ((!Global->hasAttr<CUDADeviceAttr>() ||
- (LangOpts.OffloadImplicitHostDeviceTemplates && FD &&
+ (LangOpts.OffloadImplicitHostDeviceTemplates &&
hasImplicitAttr<CUDAHostAttr>(FD) &&
hasImplicitAttr<CUDADeviceAttr>(FD) && !FD->isConstexpr() &&
!isLambdaCallOperator(FD) &&
!getContext().CUDAImplicitHostDeviceFunUsedByDevice.count(FD))) &&
!Global->hasAttr<CUDAGlobalAttr>() &&
- !Global->hasAttr<CUDAConstantAttr>() &&
- !Global->hasAttr<CUDASharedAttr>() &&
- !Global->getType()->isCUDADeviceBuiltinSurfaceType() &&
- !Global->getType()->isCUDADeviceBuiltinTextureType() &&
!(LangOpts.HIPStdPar && isa<FunctionDecl>(Global) &&
!Global->hasAttr<CUDAHostAttr>()))
return;
- } else {
- // We need to emit host-side 'shadows' for all global
- // device-side variables because the CUDA runtime needs their
- // size and host-side address in order to provide access to
- // their device-side incarnations.
-
- // So device-only functions are the only things we skip.
- if (isa<FunctionDecl>(Global) && !Global->hasAttr<CUDAHostAttr>() &&
- Global->hasAttr<CUDADeviceAttr>())
- return;
-
- assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) &&
- "Expected Variable or Function");
- }
+ // Device-only functions are the only things we skip.
+ } else if (!Global->hasAttr<CUDAHostAttr>() &&
+ Global->hasAttr<CUDADeviceAttr>())
+ return;
}
if (LangOpts.OpenMP) {
@@ -3745,7 +3800,8 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Forward declarations are emitted lazily on first use.
if (!FD->doesThisDeclarationHaveABody()) {
- if (!FD->doesDeclarationForceExternallyVisibleDefinition())
+ if (!FD->doesDeclarationForceExternallyVisibleDefinition() &&
+ (!FD->isMultiVersion() || !getTarget().getTriple().isAArch64()))
return;
StringRef MangledName = getMangledName(GD);
@@ -3976,9 +4032,20 @@ bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
// behavior may break ABI compatibility of the current unit.
if (const Module *M = F->getOwningModule();
M && M->getTopLevelModule()->isNamedModule() &&
- getContext().getCurrentNamedModule() != M->getTopLevelModule() &&
- !F->hasAttr<AlwaysInlineAttr>())
- return false;
+ getContext().getCurrentNamedModule() != M->getTopLevelModule()) {
+ // There are practices to mark template member function as always-inline
+ // and mark the template as extern explicit instantiation but not give
+ // the definition for member function. So we have to emit the function
+ // from explicitly instantiation with always-inline.
+ //
+ // See https://github.com/llvm/llvm-project/issues/86893 for details.
+ //
+ // TODO: Maybe it is better to give it a warning if we call a non-inline
+ // function from other module units which is marked as always-inline.
+ if (!F->isTemplateInstantiation() || !F->hasAttr<AlwaysInlineAttr>()) {
+ return false;
+ }
+ }
if (F->hasAttr<NoInlineAttr>())
return false;
@@ -4028,17 +4095,21 @@ void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
auto *Spec = FD->getAttr<CPUSpecificAttr>();
for (unsigned I = 0; I < Spec->cpus_size(); ++I)
EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
- } else if (FD->isTargetClonesMultiVersion()) {
- auto *Clone = FD->getAttr<TargetClonesAttr>();
- for (unsigned I = 0; I < Clone->featuresStrs_size(); ++I)
- if (Clone->isFirstOfVersion(I))
+ } else if (auto *TC = FD->getAttr<TargetClonesAttr>()) {
+ for (unsigned I = 0; I < TC->featuresStrs_size(); ++I)
+ // AArch64 favors the default target version over the clone if any.
+ if ((!TC->isDefaultVersion(I) || !getTarget().getTriple().isAArch64()) &&
+ TC->isFirstOfVersion(I))
EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
// Ensure that the resolver function is also emitted.
GetOrCreateMultiVersionResolver(GD);
- } else if (FD->hasAttr<TargetVersionAttr>()) {
- GetOrCreateMultiVersionResolver(GD);
} else
EmitGlobalFunctionDefinition(GD, GV);
+
+ // Defer the resolver emission until we can reason whether the TU
+ // contains a default target version implementation.
+ if (FD->isTargetVersionMultiVersion())
+ AddDeferredMultiVersionResolverToEmit(GD);
}
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
@@ -4131,105 +4202,92 @@ void CodeGenModule::emitMultiVersionFunctions() {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
assert(FD && "Expected a FunctionDecl");
- SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
- if (FD->isTargetMultiVersion()) {
- getContext().forEachMultiversionedFunctionVersion(
- FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
- GlobalDecl CurGD{
- (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
- StringRef MangledName = getMangledName(CurGD);
- llvm::Constant *Func = GetGlobalValue(MangledName);
- if (!Func) {
- if (CurFD->isDefined()) {
- EmitGlobalFunctionDefinition(CurGD, nullptr);
- Func = GetGlobalValue(MangledName);
- } else {
- const CGFunctionInfo &FI =
- getTypes().arrangeGlobalDeclaration(GD);
- llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
- Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
- /*DontDefer=*/false, ForDefinition);
- }
- assert(Func && "This should have just been created");
- }
- if (CurFD->getMultiVersionKind() == MultiVersionKind::Target) {
- const auto *TA = CurFD->getAttr<TargetAttr>();
- llvm::SmallVector<StringRef, 8> Feats;
- TA->getAddedFeatures(Feats);
- Options.emplace_back(cast<llvm::Function>(Func),
- TA->getArchitecture(), Feats);
- } else {
- const auto *TVA = CurFD->getAttr<TargetVersionAttr>();
- llvm::SmallVector<StringRef, 8> Feats;
- TVA->getFeatures(Feats);
- Options.emplace_back(cast<llvm::Function>(Func),
- /*Architecture*/ "", Feats);
- }
- });
- } else if (FD->isTargetClonesMultiVersion()) {
- const auto *TC = FD->getAttr<TargetClonesAttr>();
- for (unsigned VersionIndex = 0; VersionIndex < TC->featuresStrs_size();
- ++VersionIndex) {
- if (!TC->isFirstOfVersion(VersionIndex))
- continue;
- GlobalDecl CurGD{(FD->isDefined() ? FD->getDefinition() : FD),
- VersionIndex};
- StringRef Version = TC->getFeatureStr(VersionIndex);
- StringRef MangledName = getMangledName(CurGD);
- llvm::Constant *Func = GetGlobalValue(MangledName);
- if (!Func) {
- if (FD->isDefined()) {
- EmitGlobalFunctionDefinition(CurGD, nullptr);
- Func = GetGlobalValue(MangledName);
- } else {
- const CGFunctionInfo &FI =
- getTypes().arrangeGlobalDeclaration(CurGD);
- llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
- Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
- /*DontDefer=*/false, ForDefinition);
- }
- assert(Func && "This should have just been created");
+ auto createFunction = [&](const FunctionDecl *Decl, unsigned MVIdx = 0) {
+ GlobalDecl CurGD{Decl->isDefined() ? Decl->getDefinition() : Decl, MVIdx};
+ StringRef MangledName = getMangledName(CurGD);
+ llvm::Constant *Func = GetGlobalValue(MangledName);
+ if (!Func) {
+ if (Decl->isDefined()) {
+ EmitGlobalFunctionDefinition(CurGD, nullptr);
+ Func = GetGlobalValue(MangledName);
+ } else {
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(CurGD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+ Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
+ /*DontDefer=*/false, ForDefinition);
}
+ assert(Func && "This should have just been created");
+ }
+ return cast<llvm::Function>(Func);
+ };
- StringRef Architecture;
- llvm::SmallVector<StringRef, 1> Feature;
+ // For AArch64, a resolver is only emitted if a function marked with
+ // target_version("default")) or target_clones() is present and defined
+ // in this TU. For other architectures it is always emitted.
+ bool ShouldEmitResolver = !getTarget().getTriple().isAArch64();
+ SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
- if (getTarget().getTriple().isAArch64()) {
- if (Version != "default") {
- llvm::SmallVector<StringRef, 8> VerFeats;
- Version.split(VerFeats, "+");
- for (auto &CurFeat : VerFeats)
- Feature.push_back(CurFeat.trim());
- }
- } else {
- if (Version.starts_with("arch="))
- Architecture = Version.drop_front(sizeof("arch=") - 1);
- else if (Version != "default")
- Feature.push_back(Version);
- }
+ getContext().forEachMultiversionedFunctionVersion(
+ FD, [&](const FunctionDecl *CurFD) {
+ llvm::SmallVector<StringRef, 8> Feats;
+ bool IsDefined = CurFD->doesThisDeclarationHaveABody();
+
+ if (const auto *TA = CurFD->getAttr<TargetAttr>()) {
+ TA->getAddedFeatures(Feats);
+ llvm::Function *Func = createFunction(CurFD);
+ Options.emplace_back(Func, TA->getArchitecture(), Feats);
+ } else if (const auto *TVA = CurFD->getAttr<TargetVersionAttr>()) {
+ if (TVA->isDefaultVersion() && IsDefined)
+ ShouldEmitResolver = true;
+ TVA->getFeatures(Feats);
+ llvm::Function *Func = createFunction(CurFD);
+ Options.emplace_back(Func, /*Architecture*/ "", Feats);
+ } else if (const auto *TC = CurFD->getAttr<TargetClonesAttr>()) {
+ if (IsDefined)
+ ShouldEmitResolver = true;
+ for (unsigned I = 0; I < TC->featuresStrs_size(); ++I) {
+ if (!TC->isFirstOfVersion(I))
+ continue;
+
+ llvm::Function *Func = createFunction(CurFD, I);
+ StringRef Architecture;
+ Feats.clear();
+ if (getTarget().getTriple().isAArch64())
+ TC->getFeatures(Feats, I);
+ else {
+ StringRef Version = TC->getFeatureStr(I);
+ if (Version.starts_with("arch="))
+ Architecture = Version.drop_front(sizeof("arch=") - 1);
+ else if (Version != "default")
+ Feats.push_back(Version);
+ }
+ Options.emplace_back(Func, Architecture, Feats);
+ }
+ } else
+ llvm_unreachable("unexpected MultiVersionKind");
+ });
- Options.emplace_back(cast<llvm::Function>(Func), Architecture, Feature);
- }
- } else {
- assert(0 && "Expected a target or target_clones multiversion function");
+ if (!ShouldEmitResolver)
continue;
- }
llvm::Constant *ResolverConstant = GetOrCreateMultiVersionResolver(GD);
if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(ResolverConstant)) {
ResolverConstant = IFunc->getResolver();
- if (FD->isTargetClonesMultiVersion()) {
- const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
- llvm::FunctionType *DeclTy = getTypes().GetFunctionType(FI);
+ if (FD->isTargetClonesMultiVersion() &&
+ !getTarget().getTriple().isAArch64()) {
std::string MangledName = getMangledNameImpl(
*this, GD, FD, /*OmitMultiVersionMangling=*/true);
- // In prior versions of Clang, the mangling for ifuncs incorrectly
- // included an .ifunc suffix. This alias is generated for backward
- // compatibility. It is deprecated, and may be removed in the future.
- auto *Alias = llvm::GlobalAlias::create(
- DeclTy, 0, getMultiversionLinkage(*this, GD),
- MangledName + ".ifunc", IFunc, &getModule());
- SetCommonAttributes(FD, Alias);
+ if (!GetGlobalValue(MangledName + ".ifunc")) {
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *DeclTy = getTypes().GetFunctionType(FI);
+ // In prior versions of Clang, the mangling for ifuncs incorrectly
+ // included an .ifunc suffix. This alias is generated for backward
+ // compatibility. It is deprecated, and may be removed in the future.
+ auto *Alias = llvm::GlobalAlias::create(
+ DeclTy, 0, getMultiversionLinkage(*this, GD),
+ MangledName + ".ifunc", IFunc, &getModule());
+ SetCommonAttributes(FD, Alias);
+ }
}
}
llvm::Function *ResolverFunc = cast<llvm::Function>(ResolverConstant);
@@ -4262,6 +4320,14 @@ void CodeGenModule::emitMultiVersionFunctions() {
emitMultiVersionFunctions();
}
+static void replaceDeclarationWith(llvm::GlobalValue *Old,
+ llvm::Constant *New) {
+ assert(cast<llvm::Function>(Old)->isDeclaration() && "Not a declaration");
+ New->takeName(Old);
+ Old->replaceAllUsesWith(New);
+ Old->eraseFromParent();
+}
+
void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
assert(FD && "Not a FunctionDecl?");
@@ -4366,12 +4432,9 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
// Fix up function declarations that were created for cpu_specific before
// cpu_dispatch was known
if (!isa<llvm::GlobalIFunc>(IFunc)) {
- assert(cast<llvm::Function>(IFunc)->isDeclaration());
auto *GI = llvm::GlobalIFunc::create(DeclTy, 0, Linkage, "", ResolverFunc,
&getModule());
- GI->takeName(IFunc);
- IFunc->replaceAllUsesWith(GI);
- IFunc->eraseFromParent();
+ replaceDeclarationWith(IFunc, GI);
IFunc = GI;
}
@@ -4386,8 +4449,23 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
}
}
+/// Adds a declaration to the list of multi version functions if not present.
+void CodeGenModule::AddDeferredMultiVersionResolverToEmit(GlobalDecl GD) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+ assert(FD && "Not a FunctionDecl?");
+
+ if (FD->isTargetVersionMultiVersion() || FD->isTargetClonesMultiVersion()) {
+ std::string MangledName =
+ getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
+ if (!DeferredResolversToEmit.insert(MangledName).second)
+ return;
+ }
+ MultiVersionFuncs.push_back(GD);
+}
+
/// If a dispatcher for the specified mangled name is not in the module, create
-/// and return an llvm Function with the specified type.
+/// and return it. The dispatcher is either an llvm Function with the specified
+/// type, or a global ifunc.
llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
assert(FD && "Not a FunctionDecl?");
@@ -4399,14 +4477,31 @@ llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) {
// a separate resolver).
std::string ResolverName = MangledName;
if (getTarget().supportsIFunc()) {
- if (!FD->isTargetClonesMultiVersion())
+ switch (FD->getMultiVersionKind()) {
+ case MultiVersionKind::None:
+ llvm_unreachable("unexpected MultiVersionKind::None for resolver");
+ case MultiVersionKind::Target:
+ case MultiVersionKind::CPUSpecific:
+ case MultiVersionKind::CPUDispatch:
ResolverName += ".ifunc";
+ break;
+ case MultiVersionKind::TargetClones:
+ case MultiVersionKind::TargetVersion:
+ break;
+ }
} else if (FD->isTargetMultiVersion()) {
ResolverName += ".resolver";
}
- // If the resolver has already been created, just return it.
- if (llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName))
+ // If the resolver has already been created, just return it. This lookup may
+ // yield a function declaration instead of a resolver on AArch64. That is
+ // because we didn't know whether a resolver will be generated when we first
+ // encountered a use of the symbol named after this resolver. Therefore,
+ // targets which support ifuncs should not return here unless we actually
+ // found an ifunc.
+ llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName);
+ if (ResolverGV &&
+ (isa<llvm::GlobalIFunc>(ResolverGV) || !getTarget().supportsIFunc()))
return ResolverGV;
const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
@@ -4415,7 +4510,7 @@ llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) {
// The resolver needs to be created. For target and target_clones, defer
// creation until the end of the TU.
if (FD->isTargetMultiVersion() || FD->isTargetClonesMultiVersion())
- MultiVersionFuncs.push_back(GD);
+ AddDeferredMultiVersionResolverToEmit(GD);
// For cpu_specific, don't create an ifunc yet because we don't know if the
// cpu_dispatch will be emitted in this translation unit.
@@ -4432,7 +4527,8 @@ llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) {
"", Resolver, &getModule());
GIF->setName(ResolverName);
SetCommonAttributes(FD, GIF);
-
+ if (ResolverGV)
+ replaceDeclarationWith(ResolverGV, GIF);
return GIF;
}
@@ -4441,9 +4537,24 @@ llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) {
assert(isa<llvm::GlobalValue>(Resolver) &&
"Resolver should be created for the first time");
SetCommonAttributes(FD, cast<llvm::GlobalValue>(Resolver));
+ if (ResolverGV)
+ replaceDeclarationWith(ResolverGV, Resolver);
return Resolver;
}
+bool CodeGenModule::shouldDropDLLAttribute(const Decl *D,
+ const llvm::GlobalValue *GV) const {
+ auto SC = GV->getDLLStorageClass();
+ if (SC == llvm::GlobalValue::DefaultStorageClass)
+ return false;
+ const Decl *MRD = D->getMostRecentDecl();
+ return (((SC == llvm::GlobalValue::DLLImportStorageClass &&
+ !MRD->hasAttr<DLLImportAttr>()) ||
+ (SC == llvm::GlobalValue::DLLExportStorageClass &&
+ !MRD->hasAttr<DLLExportAttr>())) &&
+ !shouldMapVisibilityToDLLExport(cast<NamedDecl>(MRD)));
+}
+
/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
/// module, create and return an llvm Function with the specified type. If there
/// is something in the module with the specified name, return it potentially
@@ -4457,6 +4568,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
ForDefinition_t IsForDefinition) {
const Decl *D = GD.getDecl();
+ std::string NameWithoutMultiVersionMangling;
// Any attempts to use a MultiVersion function should result in retrieving
// the iFunc instead. Name Mangling will handle the rest of the changes.
if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D)) {
@@ -4478,11 +4590,24 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
if (FD->isMultiVersion()) {
UpdateMultiVersionNames(GD, FD, MangledName);
- if (!IsForDefinition)
- return GetOrCreateMultiVersionResolver(GD);
+ if (!IsForDefinition) {
+ // On AArch64 we do not immediatelly emit an ifunc resolver when a
+ // function is used. Instead we defer the emission until we see a
+ // default definition. In the meantime we just reference the symbol
+ // without FMV mangling (it may or may not be replaced later).
+ if (getTarget().getTriple().isAArch64()) {
+ AddDeferredMultiVersionResolverToEmit(GD);
+ NameWithoutMultiVersionMangling = getMangledNameImpl(
+ *this, GD, FD, /*OmitMultiVersionMangling=*/true);
+ } else
+ return GetOrCreateMultiVersionResolver(GD);
+ }
}
}
+ if (!NameWithoutMultiVersionMangling.empty())
+ MangledName = NameWithoutMultiVersionMangling;
+
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
@@ -4493,8 +4618,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
// Handle dropped DLL attributes.
- if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>() &&
- !shouldMapVisibilityToDLLExport(cast_or_null<NamedDecl>(D))) {
+ if (D && shouldDropDLLAttribute(D, Entry)) {
Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
setDSOLocal(Entry);
}
@@ -4753,6 +4877,10 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
}
}
setDSOLocal(F);
+ // FIXME: We should use CodeGenModule::SetLLVMFunctionAttributes() instead
+ // of trying to approximate the attributes using the LLVM function
+ // signature. This requires revising the API of CreateRuntimeFunction().
+ markRegisterParameterAttributes(F);
}
}
@@ -4784,8 +4912,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
}
// Handle dropped DLL attributes.
- if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>() &&
- !shouldMapVisibilityToDLLExport(D))
+ if (D && shouldDropDLLAttribute(D, Entry))
Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
@@ -5090,8 +5217,11 @@ void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
EmitGlobalVarDefinition(D);
}
-void CodeGenModule::EmitExternalDeclaration(const VarDecl *D) {
- EmitExternalVarDeclaration(D);
+void CodeGenModule::EmitExternalDeclaration(const DeclaratorDecl *D) {
+ if (auto const *V = dyn_cast<const VarDecl>(D))
+ EmitExternalVarDeclaration(V);
+ if (auto const *FD = dyn_cast<const FunctionDecl>(D))
+ EmitExternalFunctionDeclaration(FD);
}
CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
@@ -5249,6 +5379,10 @@ void CodeGenModule::maybeSetTrivialComdat(const Decl &D,
GO.setComdat(TheModule.getOrInsertComdat(GO.getName()));
}
+const ABIInfo &CodeGenModule::getABIInfo() {
+ return getTargetCodeGenInfo().getABIInfo();
+}
+
/// Pass IsTentative as true if you want to create a tentative definition.
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
bool IsTentative) {
@@ -5275,6 +5409,18 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
!IsDefinitionAvailableExternally &&
D->needsDestruction(getContext()) == QualType::DK_cxx_destructor;
+ // It is helpless to emit the definition for an available_externally variable
+ // which can't be marked as const.
+ // We don't need to check if it needs global ctor or dtor. See the above
+ // comment for ideas.
+ if (IsDefinitionAvailableExternally &&
+ (!D->hasConstantInitialization() ||
+ // TODO: Update this when we have interface to check constexpr
+ // destructor.
+ D->needsDestruction(getContext()) ||
+ !D->getType().isConstantStorage(getContext(), true, true)))
+ return;
+
const VarDecl *InitDecl;
const Expr *InitExpr = D->getAnyInitializer(InitDecl);
@@ -5515,6 +5661,18 @@ void CodeGenModule::EmitExternalVarDeclaration(const VarDecl *D) {
}
}
+void CodeGenModule::EmitExternalFunctionDeclaration(const FunctionDecl *FD) {
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ if (getCodeGenOpts().hasReducedDebugInfo()) {
+ auto *Ty = getTypes().ConvertType(FD->getType());
+ StringRef MangledName = getMangledName(FD);
+ auto *Fn = dyn_cast<llvm::Function>(
+ GetOrCreateLLVMFunction(MangledName, Ty, FD, /* ForVTable */ false));
+ if (!Fn->getSubprogram())
+ DI->EmitFunctionDecl(FD, FD->getLocation(), FD->getType(), Fn);
+ }
+}
+
static bool isVarDeclStrongDefinition(const ASTContext &Context,
CodeGenModule &CGM, const VarDecl *D,
bool NoCommon) {
@@ -5674,15 +5832,17 @@ CodeGenModule::getLLVMLinkageVarDefinition(const VarDecl *VD) {
static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
llvm::Function *newFn) {
// Fast path.
- if (old->use_empty()) return;
+ if (old->use_empty())
+ return;
llvm::Type *newRetTy = newFn->getReturnType();
- SmallVector<llvm::Value*, 4> newArgs;
+ SmallVector<llvm::Value *, 4> newArgs;
+
+ SmallVector<llvm::CallBase *> callSitesToBeRemovedFromParent;
for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
- ui != ue; ) {
- llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
- llvm::User *user = use->getUser();
+ ui != ue; ui++) {
+ llvm::User *user = ui->getUser();
// Recognize and replace uses of bitcasts. Most calls to
// unprototyped functions will use bitcasts.
@@ -5694,8 +5854,9 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
// Recognize calls to the function.
llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(user);
- if (!callSite) continue;
- if (!callSite->isCallee(&*use))
+ if (!callSite)
+ continue;
+ if (!callSite->isCallee(&*ui))
continue;
// If the return types don't match exactly, then we can't
@@ -5764,6 +5925,10 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
if (callSite->getDebugLoc())
newCall->setDebugLoc(callSite->getDebugLoc());
+ callSitesToBeRemovedFromParent.push_back(callSite);
+ }
+
+ for (auto *callSite : callSitesToBeRemovedFromParent) {
callSite->eraseFromParent();
}
}
@@ -5787,7 +5952,8 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
auto DK = VD->isThisDeclarationADefinition();
- if (DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>())
+ if ((DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>()) ||
+ (LangOpts.CUDA && !shouldEmitCUDAGlobalVar(VD)))
return;
TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
@@ -5959,11 +6125,14 @@ void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
Aliases.push_back(GD);
- llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
- llvm::Type *ResolverTy = llvm::GlobalIFunc::getResolverFunctionType(DeclTy);
+ // The resolver might not be visited yet. Specify a dummy non-function type to
+ // indicate IsIncompleteFunction. Either the type is ignored (if the resolver
+ // was emitted) or the whole function will be replaced (if the resolver has
+ // not been emitted).
llvm::Constant *Resolver =
- GetOrCreateLLVMFunction(IFA->getResolver(), ResolverTy, {},
+ GetOrCreateLLVMFunction(IFA->getResolver(), VoidTy, {},
/*ForVTable=*/false);
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
llvm::GlobalIFunc *GIF =
llvm::GlobalIFunc::create(DeclTy, 0, llvm::Function::ExternalLinkage,
"", Resolver, &getModule());
@@ -5987,9 +6156,6 @@ void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
Entry->eraseFromParent();
} else
GIF->setName(MangledName);
- if (auto *F = dyn_cast<llvm::Function>(Resolver)) {
- F->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
- }
SetCommonAttributes(GD, GIF);
}
@@ -6046,9 +6212,6 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
return ConstantAddress(
C, C->getValueType(), CharUnits::fromQuantity(C->getAlignment()));
- llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
- llvm::Constant *Zeros[] = { Zero, Zero };
-
const ASTContext &Context = getContext();
const llvm::Triple &Triple = getTriple();
@@ -6119,8 +6282,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
// Decay array -> ptr
CFConstantStringClassRef =
- IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty)
- : llvm::ConstantExpr::getGetElementPtr(Ty, C, Zeros);
+ IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty) : C;
}
QualType CFTy = Context.getCFConstantStringType();
@@ -6176,10 +6338,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
GV->setSection(".rodata");
// String.
- llvm::Constant *Str =
- llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
-
- Fields.add(Str);
+ Fields.add(GV);
// String length.
llvm::IntegerType *LengthTy =
@@ -6273,7 +6432,7 @@ CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
// Resize the string to the right size, which is indicated by its type.
const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
assert(CAT && "String literal not of constant array type!");
- Str.resize(CAT->getSize().getZExtValue());
+ Str.resize(CAT->getZExtSize());
return llvm::ConstantDataArray::getString(VMContext, Str, false);
}
@@ -6330,7 +6489,8 @@ GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
ConstantAddress
CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
StringRef Name) {
- CharUnits Alignment = getContext().getAlignOfGlobalVarInChars(S->getType());
+ CharUnits Alignment =
+ getContext().getAlignOfGlobalVarInChars(S->getType(), /*VD=*/nullptr);
llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
llvm::GlobalVariable **Entry = nullptr;
@@ -6393,8 +6553,8 @@ CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
ConstantAddress CodeGenModule::GetAddrOfConstantCString(
const std::string &Str, const char *GlobalName) {
StringRef StrWithNull(Str.c_str(), Str.size() + 1);
- CharUnits Alignment =
- getContext().getAlignOfGlobalVarInChars(getContext().CharTy);
+ CharUnits Alignment = getContext().getAlignOfGlobalVarInChars(
+ getContext().CharTy, /*VD=*/nullptr);
llvm::Constant *C =
llvm::ConstantDataArray::getString(getLLVMContext(), StrWithNull, false);
@@ -6609,7 +6769,7 @@ static bool AllTrivialInitializers(CodeGenModule &CGM,
void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
// We might need a .cxx_destruct even if we don't have any ivar initializers.
if (needsDestructMethod(D)) {
- IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
+ const IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create(
getContext(), D->getLocation(), D->getLocation(), cxxSelector,
@@ -6629,7 +6789,7 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
AllTrivialInitializers(*this, D))
return;
- IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
+ const IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
// The constructor returns 'self'.
ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(
@@ -6920,8 +7080,8 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// For C++ standard modules we are done - we will call the module
// initializer for imported modules, and that will likewise call those for
// any imports it has.
- if (CXX20ModuleInits && Import->getImportedOwningModule() &&
- !Import->getImportedOwningModule()->isModuleMapModule())
+ if (CXX20ModuleInits && Import->getImportedModule() &&
+ Import->getImportedModule()->isNamedModule())
break;
// For clang C++ module map modules the initializers for sub-modules are
@@ -7027,6 +7187,9 @@ void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
SourceManager &SM = getContext().getSourceManager();
if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(D->getBeginLoc()))
break;
+ if (!llvm::coverage::SystemHeadersCoverage &&
+ SM.isInSystemHeader(D->getBeginLoc()))
+ break;
DeferredEmptyCoverageMappingDecls.try_emplace(D, true);
break;
}
@@ -7197,7 +7360,7 @@ void CodeGenModule::EmitStaticExternCAliases() {
if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases())
return;
for (auto &I : StaticExternCValues) {
- IdentifierInfo *Name = I.first;
+ const IdentifierInfo *Name = I.first;
llvm::GlobalValue *Val = I.second;
// If Val is null, that implies there were multiple declarations that each
@@ -7258,7 +7421,7 @@ void CodeGenFunction::EmitDeclMetadata() {
for (auto &I : LocalDeclMap) {
const Decl *D = I.first;
- llvm::Value *Addr = I.second.getPointer();
+ llvm::Value *Addr = I.second.emitRawPointer(*this);
if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
Alloca->setMetadata(
@@ -7628,7 +7791,5 @@ void CodeGenModule::moveLazyEmissionStates(CodeGenModule *NewBuilder) {
NewBuilder->WeakRefReferences = std::move(WeakRefReferences);
- NewBuilder->TBAA = std::move(TBAA);
-
NewBuilder->ABI->MangleCtx = std::move(ABI->MangleCtx);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
index ec34680fd3f7..c58bb88035ca 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
@@ -24,7 +24,6 @@
#include "clang/AST/Mangle.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/Module.h"
#include "clang/Basic/NoSanitizeList.h"
#include "clang/Basic/ProfileList.h"
#include "clang/Basic/TargetInfo.h"
@@ -70,6 +69,7 @@ class Expr;
class Stmt;
class StringLiteral;
class NamedDecl;
+class PointerAuthSchema;
class ValueDecl;
class VarDecl;
class LangOptions;
@@ -320,7 +320,7 @@ private:
// This should not be moved earlier, since its initialization depends on some
// of the previous reference members being already initialized and also checks
// if TheTargetCodeGenInfo is NULL
- CodeGenTypes Types;
+ std::unique_ptr<CodeGenTypes> Types;
/// Holds information about C++ vtables.
CodeGenVTables VTables;
@@ -348,6 +348,8 @@ private:
/// yet.
llvm::DenseMap<StringRef, GlobalDecl> DeferredDecls;
+ llvm::StringSet<llvm::BumpPtrAllocator> DeferredResolversToEmit;
+
/// This is a list of deferred decls which we have seen that *are* actually
/// referenced. These get code generated when the module is done.
std::vector<GlobalDecl> DeferredDeclsToEmit;
@@ -433,7 +435,7 @@ private:
// Store deferred function annotations so they can be emitted at the end with
// most up to date ValueDecl that will have all the inherited annotations.
- llvm::DenseMap<StringRef, const ValueDecl *> DeferredAnnotations;
+ llvm::MapVector<StringRef, const ValueDecl *> DeferredAnnotations;
/// Map used to get unique annotation strings.
llvm::StringMap<llvm::Constant*> AnnotationStrings;
@@ -483,6 +485,14 @@ private:
typedef std::pair<OrderGlobalInitsOrStermFinalizers, llvm::Function *>
GlobalInitData;
+ // When a tail call is performed on an "undefined" symbol, on PPC without pc
+ // relative feature, the tail call is not allowed. In "EmitCall" for such
+ // tail calls, the "undefined" symbols may be forward declarations, their
+ // definitions are provided in the module after the callsites. For such tail
+ // calls, diagnose message should not be emitted.
+ llvm::SmallSetVector<std::pair<const FunctionDecl *, SourceLocation>, 4>
+ MustTailCallUndefinedGlobals;
+
struct GlobalInitPriorityCmp {
bool operator()(const GlobalInitData &LHS,
const GlobalInitData &RHS) const {
@@ -553,6 +563,9 @@ private:
bool isTriviallyRecursive(const FunctionDecl *F);
bool shouldEmitFunction(GlobalDecl GD);
+ // Whether a global variable should be emitted by CUDA/HIP host/device
+ // related attributes.
+ bool shouldEmitCUDAGlobalVar(const VarDecl *VD) const;
bool shouldOpportunisticallyEmitVTables();
/// Map used to be sure we don't emit the same CompoundLiteral twice.
llvm::DenseMap<const CompoundLiteralExpr *, llvm::GlobalVariable *>
@@ -607,6 +620,13 @@ private:
std::pair<std::unique_ptr<CodeGenFunction>, const TopLevelStmtDecl *>
GlobalTopLevelStmtBlockInFlight;
+ llvm::DenseMap<GlobalDecl, uint16_t> PtrAuthDiscriminatorHashes;
+
+ llvm::DenseMap<const CXXRecordDecl *, std::optional<PointerAuthQualifier>>
+ VTablePtrAuthInfos;
+ std::optional<PointerAuthQualifier>
+ computeVTPointerAuthentication(const CXXRecordDecl *ThisClass);
+
public:
CodeGenModule(ASTContext &C, IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
const HeaderSearchOptions &headersearchopts,
@@ -756,6 +776,7 @@ public:
bool supportsCOMDAT() const;
void maybeSetTrivialComdat(const Decl &D, llvm::GlobalObject &GO);
+ const ABIInfo &getABIInfo();
CGCXXABI &getCXXABI() const { return *ABI; }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
@@ -763,7 +784,7 @@ public:
const TargetCodeGenInfo &getTargetCodeGenInfo();
- CodeGenTypes &getTypes() { return Types; }
+ CodeGenTypes &getTypes() { return *Types; }
CodeGenVTables &getVTables() { return VTables; }
@@ -936,11 +957,69 @@ public:
// Return the function body address of the given function.
llvm::Constant *GetFunctionStart(const ValueDecl *Decl);
+ /// Return a function pointer for a reference to the given function.
+ /// This correctly handles weak references, but does not apply a
+ /// pointer signature.
+ llvm::Constant *getRawFunctionPointer(GlobalDecl GD,
+ llvm::Type *Ty = nullptr);
+
+ /// Return the ABI-correct function pointer value for a reference
+ /// to the given function. This will apply a pointer signature if
+ /// necessary, caching the result for the given function.
+ llvm::Constant *getFunctionPointer(GlobalDecl GD, llvm::Type *Ty = nullptr);
+
+ /// Return the ABI-correct function pointer value for a reference
+ /// to the given function. This will apply a pointer signature if
+ /// necessary.
+ llvm::Constant *getFunctionPointer(llvm::Constant *Pointer,
+ QualType FunctionType);
+
+ llvm::Constant *getMemberFunctionPointer(const FunctionDecl *FD,
+ llvm::Type *Ty = nullptr);
+
+ llvm::Constant *getMemberFunctionPointer(llvm::Constant *Pointer,
+ QualType FT);
+
+ CGPointerAuthInfo getFunctionPointerAuthInfo(QualType T);
+
+ CGPointerAuthInfo getMemberFunctionPointerAuthInfo(QualType FT);
+
+ CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type);
+
+ CGPointerAuthInfo getPointerAuthInfoForType(QualType type);
+
+ bool shouldSignPointer(const PointerAuthSchema &Schema);
+ llvm::Constant *getConstantSignedPointer(llvm::Constant *Pointer,
+ const PointerAuthSchema &Schema,
+ llvm::Constant *StorageAddress,
+ GlobalDecl SchemaDecl,
+ QualType SchemaType);
+
+ llvm::Constant *
+ getConstantSignedPointer(llvm::Constant *Pointer, unsigned Key,
+ llvm::Constant *StorageAddress,
+ llvm::ConstantInt *OtherDiscriminator);
+
+ llvm::ConstantInt *
+ getPointerAuthOtherDiscriminator(const PointerAuthSchema &Schema,
+ GlobalDecl SchemaDecl, QualType SchemaType);
+
+ uint16_t getPointerAuthDeclDiscriminator(GlobalDecl GD);
+ std::optional<CGPointerAuthInfo>
+ getVTablePointerAuthInfo(CodeGenFunction *Context,
+ const CXXRecordDecl *Record,
+ llvm::Value *StorageAddress);
+
+ std::optional<PointerAuthQualifier>
+ getVTablePointerAuthentication(const CXXRecordDecl *thisClass);
+
+ CGPointerAuthInfo EmitPointerAuthInfo(const RecordDecl *RD);
+
// Return whether RTTI information should be emitted for this target.
bool shouldEmitRTTI(bool ForEH = false) {
return (ForEH || getLangOpts().RTTI) && !getLangOpts().CUDAIsDevice &&
!(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
- getTriple().isNVPTX());
+ (getTriple().isNVPTX() || getTriple().isAMDGPU()));
}
/// Get the address of the RTTI descriptor for the given type.
@@ -1239,6 +1318,9 @@ public:
/// Return true iff the given type uses 'sret' when used as a return type.
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
+ /// Return true iff the given type has `inreg` set.
+ bool ReturnTypeHasInReg(const CGFunctionInfo &FI);
+
/// Return true iff the given type uses an argument slot when 'sret' is used
/// as a return type.
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI);
@@ -1280,7 +1362,7 @@ public:
void EmitTentativeDefinition(const VarDecl *D);
- void EmitExternalDeclaration(const VarDecl *D);
+ void EmitExternalDeclaration(const DeclaratorDecl *D);
void EmitVTable(CXXRecordDecl *Class);
@@ -1581,13 +1663,31 @@ public:
void AddGlobalDtor(llvm::Function *Dtor, int Priority = 65535,
bool IsDtorAttrFunc = false);
+ // Return whether structured convergence intrinsics should be generated for
+ // this target.
+ bool shouldEmitConvergenceTokens() const {
+ // TODO: this should probably become unconditional once the controlled
+ // convergence becomes the norm.
+ return getTriple().isSPIRVLogical();
+ }
+
+ void addUndefinedGlobalForTailCall(
+ std::pair<const FunctionDecl *, SourceLocation> Global) {
+ MustTailCallUndefinedGlobals.insert(Global);
+ }
+
private:
+ bool shouldDropDLLAttribute(const Decl *D, const llvm::GlobalValue *GV) const;
+
llvm::Constant *GetOrCreateLLVMFunction(
StringRef MangledName, llvm::Type *Ty, GlobalDecl D, bool ForVTable,
bool DontDefer = false, bool IsThunk = false,
llvm::AttributeList ExtraAttrs = llvm::AttributeList(),
ForDefinition_t IsForDefinition = NotForDefinition);
+ // Adds a declaration to the list of multi version functions if not present.
+ void AddDeferredMultiVersionResolverToEmit(GlobalDecl GD);
+
// References to multiversion functions are resolved through an implicitly
// defined resolver function. This function is responsible for creating
// the resolver symbol for the provided declaration. The value returned
@@ -1619,6 +1719,7 @@ private:
void EmitGlobalVarDefinition(const VarDecl *D, bool IsTentative = false);
void EmitExternalVarDeclaration(const VarDecl *D);
+ void EmitExternalFunctionDeclaration(const FunctionDecl *D);
void EmitAliasDefinition(GlobalDecl GD);
void emitIFuncDefinition(GlobalDecl GD);
void emitCPUDispatchDefinition(GlobalDecl GD);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
index fb4e86e8bd80..cfcdb5911b58 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -23,13 +23,15 @@
#include "llvm/Support/MD5.h"
#include <optional>
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
static llvm::cl::opt<bool>
EnableValueProfiling("enable-value-profiling",
llvm::cl::desc("Enable value profiling"),
llvm::cl::Hidden, llvm::cl::init(false));
-extern llvm::cl::opt<bool> SystemHeadersCoverage;
-
using namespace clang;
using namespace CodeGen;
@@ -163,10 +165,8 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
PGOHash Hash;
/// The map of statements to counters.
llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
- /// The next bitmap byte index to assign.
- unsigned NextMCDCBitmapIdx;
- /// The map of statements to MC/DC bitmap coverage objects.
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap;
+ /// The state of MC/DC Coverage in this function.
+ MCDC::State &MCDCState;
/// Maximum number of supported MC/DC conditions in a boolean expression.
unsigned MCDCMaxCond;
/// The profile version.
@@ -176,11 +176,11 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
MapRegionCounters(PGOHashVersion HashVersion, uint64_t ProfileVersion,
llvm::DenseMap<const Stmt *, unsigned> &CounterMap,
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap,
- unsigned MCDCMaxCond, DiagnosticsEngine &Diag)
+ MCDC::State &MCDCState, unsigned MCDCMaxCond,
+ DiagnosticsEngine &Diag)
: NextCounter(0), Hash(HashVersion), CounterMap(CounterMap),
- NextMCDCBitmapIdx(0), MCDCBitmapMap(MCDCBitmapMap),
- MCDCMaxCond(MCDCMaxCond), ProfileVersion(ProfileVersion), Diag(Diag) {}
+ MCDCState(MCDCState), MCDCMaxCond(MCDCMaxCond),
+ ProfileVersion(ProfileVersion), Diag(Diag) {}
// Blocks and lambdas are handled as separate functions, so we need not
// traverse them in the parent context.
@@ -310,11 +310,8 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
return true;
}
- // Otherwise, allocate the number of bytes required for the bitmap
- // based on the number of conditions. Must be at least 1-byte long.
- MCDCBitmapMap[BinOp] = NextMCDCBitmapIdx;
- unsigned SizeInBits = std::max<unsigned>(1L << NumCond, CHAR_BIT);
- NextMCDCBitmapIdx += SizeInBits / CHAR_BIT;
+ // Otherwise, allocate the Decision.
+ MCDCState.DecisionByStmt[BinOp].BitmapIdx = 0;
}
return true;
}
@@ -346,6 +343,14 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
return Base::VisitBinaryOperator(S);
}
+ bool VisitConditionalOperator(ConditionalOperator *S) {
+ if (llvm::EnableSingleByteCoverage && S->getTrueExpr())
+ CounterMap[S->getTrueExpr()] = NextCounter++;
+ if (llvm::EnableSingleByteCoverage && S->getFalseExpr())
+ CounterMap[S->getFalseExpr()] = NextCounter++;
+ return Base::VisitConditionalOperator(S);
+ }
+
/// Include \p S in the function hash.
bool VisitStmt(Stmt *S) {
auto Type = updateCounterMappings(S);
@@ -361,8 +366,21 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
if (Hash.getHashVersion() == PGO_HASH_V1)
return Base::TraverseIfStmt(If);
+ // When single byte coverage mode is enabled, add a counter to then and
+ // else.
+ bool NoSingleByteCoverage = !llvm::EnableSingleByteCoverage;
+ for (Stmt *CS : If->children()) {
+ if (!CS || NoSingleByteCoverage)
+ continue;
+ if (CS == If->getThen())
+ CounterMap[If->getThen()] = NextCounter++;
+ else if (CS == If->getElse())
+ CounterMap[If->getElse()] = NextCounter++;
+ }
+
// Otherwise, keep track of which branch we're in while traversing.
VisitStmt(If);
+
for (Stmt *CS : If->children()) {
if (!CS)
continue;
@@ -376,6 +394,81 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
return true;
}
+ bool TraverseWhileStmt(WhileStmt *While) {
+ // When single byte coverage mode is enabled, add a counter to condition and
+ // body.
+ bool NoSingleByteCoverage = !llvm::EnableSingleByteCoverage;
+ for (Stmt *CS : While->children()) {
+ if (!CS || NoSingleByteCoverage)
+ continue;
+ if (CS == While->getCond())
+ CounterMap[While->getCond()] = NextCounter++;
+ else if (CS == While->getBody())
+ CounterMap[While->getBody()] = NextCounter++;
+ }
+
+ Base::TraverseWhileStmt(While);
+ if (Hash.getHashVersion() != PGO_HASH_V1)
+ Hash.combine(PGOHash::EndOfScope);
+ return true;
+ }
+
+ bool TraverseDoStmt(DoStmt *Do) {
+ // When single byte coverage mode is enabled, add a counter to condition and
+ // body.
+ bool NoSingleByteCoverage = !llvm::EnableSingleByteCoverage;
+ for (Stmt *CS : Do->children()) {
+ if (!CS || NoSingleByteCoverage)
+ continue;
+ if (CS == Do->getCond())
+ CounterMap[Do->getCond()] = NextCounter++;
+ else if (CS == Do->getBody())
+ CounterMap[Do->getBody()] = NextCounter++;
+ }
+
+ Base::TraverseDoStmt(Do);
+ if (Hash.getHashVersion() != PGO_HASH_V1)
+ Hash.combine(PGOHash::EndOfScope);
+ return true;
+ }
+
+ bool TraverseForStmt(ForStmt *For) {
+ // When single byte coverage mode is enabled, add a counter to condition,
+ // increment and body.
+ bool NoSingleByteCoverage = !llvm::EnableSingleByteCoverage;
+ for (Stmt *CS : For->children()) {
+ if (!CS || NoSingleByteCoverage)
+ continue;
+ if (CS == For->getCond())
+ CounterMap[For->getCond()] = NextCounter++;
+ else if (CS == For->getInc())
+ CounterMap[For->getInc()] = NextCounter++;
+ else if (CS == For->getBody())
+ CounterMap[For->getBody()] = NextCounter++;
+ }
+
+ Base::TraverseForStmt(For);
+ if (Hash.getHashVersion() != PGO_HASH_V1)
+ Hash.combine(PGOHash::EndOfScope);
+ return true;
+ }
+
+ bool TraverseCXXForRangeStmt(CXXForRangeStmt *ForRange) {
+ // When single byte coverage mode is enabled, add a counter to body.
+ bool NoSingleByteCoverage = !llvm::EnableSingleByteCoverage;
+ for (Stmt *CS : ForRange->children()) {
+ if (!CS || NoSingleByteCoverage)
+ continue;
+ if (CS == ForRange->getBody())
+ CounterMap[ForRange->getBody()] = NextCounter++;
+ }
+
+ Base::TraverseCXXForRangeStmt(ForRange);
+ if (Hash.getHashVersion() != PGO_HASH_V1)
+ Hash.combine(PGOHash::EndOfScope);
+ return true;
+ }
+
// If the statement type \p N is nestable, and its nesting impacts profile
// stability, define a custom traversal which tracks the end of the statement
// in the hash (provided we're not using the V1 hash).
@@ -387,10 +480,6 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
return true; \
}
- DEFINE_NESTABLE_TRAVERSAL(WhileStmt)
- DEFINE_NESTABLE_TRAVERSAL(DoStmt)
- DEFINE_NESTABLE_TRAVERSAL(ForStmt)
- DEFINE_NESTABLE_TRAVERSAL(CXXForRangeStmt)
DEFINE_NESTABLE_TRAVERSAL(ObjCForCollectionStmt)
DEFINE_NESTABLE_TRAVERSAL(CXXTryStmt)
DEFINE_NESTABLE_TRAVERSAL(CXXCatchStmt)
@@ -955,13 +1044,17 @@ void CodeGenPGO::assignRegionCounters(GlobalDecl GD, llvm::Function *Fn) {
if (Fn->hasFnAttribute(llvm::Attribute::SkipProfile))
return;
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ if (!llvm::coverage::SystemHeadersCoverage &&
+ SM.isInSystemHeader(D->getLocation()))
+ return;
+
setFuncName(Fn);
mapRegionCounters(D);
if (CGM.getCodeGenOpts().CoverageMapping)
emitCounterRegionMapping(D);
if (PGOReader) {
- SourceManager &SM = CGM.getContext().getSourceManager();
loadRegionCounts(PGOReader, SM.isInMainFile(D->getLocation()));
computeRegionCounts(D);
applyFunctionAttributes(PGOReader, Fn);
@@ -987,13 +1080,14 @@ void CodeGenPGO::mapRegionCounters(const Decl *D) {
// for most embedded applications. Setting a maximum value prevents the
// bitmap footprint from growing too large without the user's knowledge. In
// the future, this value could be adjusted with a command-line option.
- unsigned MCDCMaxConditions = (CGM.getCodeGenOpts().MCDCCoverage) ? 6 : 0;
+ unsigned MCDCMaxConditions =
+ (CGM.getCodeGenOpts().MCDCCoverage ? CGM.getCodeGenOpts().MCDCMaxConds
+ : 0);
RegionCounterMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
- RegionMCDCBitmapMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
+ RegionMCDCState.reset(new MCDC::State);
MapRegionCounters Walker(HashVersion, ProfileVersion, *RegionCounterMap,
- *RegionMCDCBitmapMap, MCDCMaxConditions,
- CGM.getDiags());
+ *RegionMCDCState, MCDCMaxConditions, CGM.getDiags());
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
Walker.TraverseDecl(const_cast<FunctionDecl *>(FD));
else if (const ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(D))
@@ -1004,7 +1098,6 @@ void CodeGenPGO::mapRegionCounters(const Decl *D) {
Walker.TraverseDecl(const_cast<CapturedDecl *>(CD));
assert(Walker.NextCounter > 0 && "no entry counter mapped for decl");
NumRegionCounters = Walker.NextCounter;
- MCDCBitmapBytes = Walker.NextMCDCBitmapIdx;
FunctionHash = Walker.Hash.finalize();
}
@@ -1027,7 +1120,7 @@ bool CodeGenPGO::skipRegionMappingForDecl(const Decl *D) {
// Don't map the functions in system headers.
const auto &SM = CGM.getContext().getSourceManager();
auto Loc = D->getBody()->getBeginLoc();
- return !SystemHeadersCoverage && SM.isInSystemHeader(Loc);
+ return !llvm::coverage::SystemHeadersCoverage && SM.isInSystemHeader(Loc);
}
void CodeGenPGO::emitCounterRegionMapping(const Decl *D) {
@@ -1036,11 +1129,10 @@ void CodeGenPGO::emitCounterRegionMapping(const Decl *D) {
std::string CoverageMapping;
llvm::raw_string_ostream OS(CoverageMapping);
- RegionCondIDMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
+ RegionMCDCState->BranchByStmt.clear();
CoverageMappingGen MappingGen(
*CGM.getCoverageMapping(), CGM.getContext().getSourceManager(),
- CGM.getLangOpts(), RegionCounterMap.get(), RegionMCDCBitmapMap.get(),
- RegionCondIDMap.get());
+ CGM.getLangOpts(), RegionCounterMap.get(), RegionMCDCState.get());
MappingGen.emitCounterMapping(D, OS);
OS.flush();
@@ -1096,8 +1188,8 @@ CodeGenPGO::applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
Fn->setEntryCount(FunctionCount);
}
-void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S,
- llvm::Value *StepV) {
+void CodeGenPGO::emitCounterSetOrIncrement(CGBuilderTy &Builder, const Stmt *S,
+ llvm::Value *StepV) {
if (!RegionCounterMap || !Builder.GetInsertBlock())
return;
@@ -1107,13 +1199,18 @@ void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S,
Builder.getInt64(FunctionHash),
Builder.getInt32(NumRegionCounters),
Builder.getInt32(Counter), StepV};
- if (!StepV)
- Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::instrprof_increment),
+
+ if (llvm::EnableSingleByteCoverage)
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::instrprof_cover),
ArrayRef(Args, 4));
- else
- Builder.CreateCall(
- CGM.getIntrinsic(llvm::Intrinsic::instrprof_increment_step),
- ArrayRef(Args));
+ else {
+ if (!StepV)
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::instrprof_increment),
+ ArrayRef(Args, 4));
+ else
+ Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::instrprof_increment_step), Args);
+ }
}
bool CodeGenPGO::canEmitMCDCCoverage(const CGBuilderTy &Builder) {
@@ -1122,7 +1219,7 @@ bool CodeGenPGO::canEmitMCDCCoverage(const CGBuilderTy &Builder) {
}
void CodeGenPGO::emitMCDCParameters(CGBuilderTy &Builder) {
- if (!canEmitMCDCCoverage(Builder) || !RegionMCDCBitmapMap)
+ if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState)
return;
auto *I8PtrTy = llvm::PointerType::getUnqual(CGM.getLLVMContext());
@@ -1132,25 +1229,31 @@ void CodeGenPGO::emitMCDCParameters(CGBuilderTy &Builder) {
// anything.
llvm::Value *Args[3] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
Builder.getInt64(FunctionHash),
- Builder.getInt32(MCDCBitmapBytes)};
+ Builder.getInt32(RegionMCDCState->BitmapBits)};
Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_parameters), Args);
}
void CodeGenPGO::emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder,
const Expr *S,
- Address MCDCCondBitmapAddr) {
- if (!canEmitMCDCCoverage(Builder) || !RegionMCDCBitmapMap)
+ Address MCDCCondBitmapAddr,
+ CodeGenFunction &CGF) {
+ if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState)
return;
S = S->IgnoreParens();
- auto ExprMCDCBitmapMapIterator = RegionMCDCBitmapMap->find(S);
- if (ExprMCDCBitmapMapIterator == RegionMCDCBitmapMap->end())
+ auto DecisionStateIter = RegionMCDCState->DecisionByStmt.find(S);
+ if (DecisionStateIter == RegionMCDCState->DecisionByStmt.end())
return;
- // Extract the ID of the global bitmap associated with this expression.
- unsigned MCDCTestVectorBitmapID = ExprMCDCBitmapMapIterator->second;
+ // Don't create tvbitmap_update if the record is allocated but excluded.
+ // Or `bitmap |= (1 << 0)` would be wrongly executed to the next bitmap.
+ if (DecisionStateIter->second.Indices.size() == 0)
+ return;
+
+ // Extract the offset of the global bitmap associated with this expression.
+ unsigned MCDCTestVectorBitmapOffset = DecisionStateIter->second.BitmapIdx;
auto *I8PtrTy = llvm::PointerType::getUnqual(CGM.getLLVMContext());
// Emit intrinsic responsible for updating the global bitmap corresponding to
@@ -1158,23 +1261,22 @@ void CodeGenPGO::emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder,
// from a pointer to a dedicated temporary value on the stack that is itself
// updated via emitMCDCCondBitmapReset() and emitMCDCCondBitmapUpdate(). The
// index represents an executed test vector.
- llvm::Value *Args[5] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
+ llvm::Value *Args[4] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
Builder.getInt64(FunctionHash),
- Builder.getInt32(MCDCBitmapBytes),
- Builder.getInt32(MCDCTestVectorBitmapID),
- MCDCCondBitmapAddr.getPointer()};
+ Builder.getInt32(MCDCTestVectorBitmapOffset),
+ MCDCCondBitmapAddr.emitRawPointer(CGF)};
Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_tvbitmap_update), Args);
}
void CodeGenPGO::emitMCDCCondBitmapReset(CGBuilderTy &Builder, const Expr *S,
Address MCDCCondBitmapAddr) {
- if (!canEmitMCDCCoverage(Builder) || !RegionMCDCBitmapMap)
+ if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState)
return;
S = S->IgnoreParens();
- if (RegionMCDCBitmapMap->find(S) == RegionMCDCBitmapMap->end())
+ if (!RegionMCDCState->DecisionByStmt.contains(S))
return;
// Emit intrinsic that resets a dedicated temporary value on the stack to 0.
@@ -1183,8 +1285,9 @@ void CodeGenPGO::emitMCDCCondBitmapReset(CGBuilderTy &Builder, const Expr *S,
void CodeGenPGO::emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
Address MCDCCondBitmapAddr,
- llvm::Value *Val) {
- if (!canEmitMCDCCoverage(Builder) || !RegionCondIDMap)
+ llvm::Value *Val,
+ CodeGenFunction &CGF) {
+ if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState)
return;
// Even though, for simplicity, parentheses and unary logical-NOT operators
@@ -1196,26 +1299,29 @@ void CodeGenPGO::emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
// also make debugging a bit easier.
S = CodeGenFunction::stripCond(S);
- auto ExprMCDCConditionIDMapIterator = RegionCondIDMap->find(S);
- if (ExprMCDCConditionIDMapIterator == RegionCondIDMap->end())
+ auto BranchStateIter = RegionMCDCState->BranchByStmt.find(S);
+ if (BranchStateIter == RegionMCDCState->BranchByStmt.end())
return;
// Extract the ID of the condition we are setting in the bitmap.
- unsigned CondID = ExprMCDCConditionIDMapIterator->second;
- assert(CondID > 0 && "Condition has no ID!");
+ const auto &Branch = BranchStateIter->second;
+ assert(Branch.ID >= 0 && "Condition has no ID!");
+ assert(Branch.DecisionStmt);
+
+ // Cancel the emission if the Decision is erased after the allocation.
+ const auto DecisionIter =
+ RegionMCDCState->DecisionByStmt.find(Branch.DecisionStmt);
+ if (DecisionIter == RegionMCDCState->DecisionByStmt.end())
+ return;
- auto *I8PtrTy = llvm::PointerType::getUnqual(CGM.getLLVMContext());
+ const auto &TVIdxs = DecisionIter->second.Indices[Branch.ID];
- // Emit intrinsic that updates a dedicated temporary value on the stack after
- // a condition is evaluated. After the set of conditions has been updated,
- // the resulting value is used to update the boolean expression's bitmap.
- llvm::Value *Args[5] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
- Builder.getInt64(FunctionHash),
- Builder.getInt32(CondID - 1),
- MCDCCondBitmapAddr.getPointer(), Val};
- Builder.CreateCall(
- CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_condbitmap_update),
- Args);
+ auto *CurTV = Builder.CreateLoad(MCDCCondBitmapAddr,
+ "mcdc." + Twine(Branch.ID + 1) + ".cur");
+ auto *NewTV = Builder.CreateAdd(CurTV, Builder.getInt32(TVIdxs[true]));
+ NewTV = Builder.CreateSelect(
+ Val, NewTV, Builder.CreateAdd(CurTV, Builder.getInt32(TVIdxs[false])));
+ Builder.CreateStore(NewTV, MCDCCondBitmapAddr);
}
void CodeGenPGO::setValueProfilingFlag(llvm::Module &M) {
@@ -1224,6 +1330,30 @@ void CodeGenPGO::setValueProfilingFlag(llvm::Module &M) {
uint32_t(EnableValueProfiling));
}
+void CodeGenPGO::setProfileVersion(llvm::Module &M) {
+ if (CGM.getCodeGenOpts().hasProfileClangInstr() &&
+ llvm::EnableSingleByteCoverage) {
+ const StringRef VarName(INSTR_PROF_QUOTE(INSTR_PROF_RAW_VERSION_VAR));
+ llvm::Type *IntTy64 = llvm::Type::getInt64Ty(M.getContext());
+ uint64_t ProfileVersion =
+ (INSTR_PROF_RAW_VERSION | VARIANT_MASK_BYTE_COVERAGE);
+
+ auto IRLevelVersionVariable = new llvm::GlobalVariable(
+ M, IntTy64, true, llvm::GlobalValue::WeakAnyLinkage,
+ llvm::Constant::getIntegerValue(IntTy64,
+ llvm::APInt(64, ProfileVersion)),
+ VarName);
+
+ IRLevelVersionVariable->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ llvm::Triple TT(M.getTargetTriple());
+ if (TT.supportsCOMDAT()) {
+ IRLevelVersionVariable->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ IRLevelVersionVariable->setComdat(M.getOrInsertComdat(VarName));
+ }
+ IRLevelVersionVariable->setDSOLocal(true);
+ }
+}
+
// This method either inserts a call to the profile run-time during
// instrumentation or puts profile data into metadata for PGO use.
void CodeGenPGO::valueProfile(CGBuilderTy &Builder, uint32_t ValueKind,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
index 6596b6c35277..9d66ffad6f43 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
@@ -16,6 +16,7 @@
#include "CGBuilder.h"
#include "CodeGenModule.h"
#include "CodeGenTypes.h"
+#include "MCDCState.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include <array>
#include <memory>
@@ -33,21 +34,18 @@ private:
std::array <unsigned, llvm::IPVK_Last + 1> NumValueSites;
unsigned NumRegionCounters;
- unsigned MCDCBitmapBytes;
uint64_t FunctionHash;
std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionCounterMap;
- std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionMCDCBitmapMap;
- std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionCondIDMap;
std::unique_ptr<llvm::DenseMap<const Stmt *, uint64_t>> StmtCountMap;
std::unique_ptr<llvm::InstrProfRecord> ProfRecord;
+ std::unique_ptr<MCDC::State> RegionMCDCState;
std::vector<uint64_t> RegionCounts;
uint64_t CurrentRegionCount;
public:
CodeGenPGO(CodeGenModule &CGModule)
: CGM(CGModule), FuncNameVar(nullptr), NumValueSites({{0}}),
- NumRegionCounters(0), MCDCBitmapBytes(0), FunctionHash(0),
- CurrentRegionCount(0) {}
+ NumRegionCounters(0), FunctionHash(0), CurrentRegionCount(0) {}
/// Whether or not we have PGO region data for the current function. This is
/// false both when we have no data at all and when our data has been
@@ -96,6 +94,8 @@ public:
// Set a module flag indicating if value profiling is enabled.
void setValueProfilingFlag(llvm::Module &M);
+ void setProfileVersion(llvm::Module &M);
+
private:
void setFuncName(llvm::Function *Fn);
void setFuncName(StringRef Name, llvm::GlobalValue::LinkageTypes Linkage);
@@ -110,15 +110,17 @@ private:
bool canEmitMCDCCoverage(const CGBuilderTy &Builder);
public:
- void emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S,
- llvm::Value *StepV);
+ void emitCounterSetOrIncrement(CGBuilderTy &Builder, const Stmt *S,
+ llvm::Value *StepV);
void emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
- Address MCDCCondBitmapAddr);
+ Address MCDCCondBitmapAddr,
+ CodeGenFunction &CGF);
void emitMCDCParameters(CGBuilderTy &Builder);
void emitMCDCCondBitmapReset(CGBuilderTy &Builder, const Expr *S,
Address MCDCCondBitmapAddr);
void emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
- Address MCDCCondBitmapAddr, llvm::Value *Val);
+ Address MCDCCondBitmapAddr, llvm::Value *Val,
+ CodeGenFunction &CGF);
/// Return the region count for the counter at the given index.
uint64_t getRegionCount(const Stmt *S) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
index dc288bc3f615..2ce558d4bdf3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -15,27 +15,32 @@
//===----------------------------------------------------------------------===//
#include "CodeGenTBAA.h"
+#include "ABIInfoImpl.h"
+#include "CGCXXABI.h"
+#include "CGRecordLayout.h"
+#include "CodeGenTypes.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
+#include "llvm/Support/Debug.h"
using namespace clang;
using namespace CodeGen;
-CodeGenTBAA::CodeGenTBAA(ASTContext &Ctx, llvm::Module &M,
- const CodeGenOptions &CGO,
- const LangOptions &Features, MangleContext &MContext)
- : Context(Ctx), Module(M), CodeGenOpts(CGO),
- Features(Features), MContext(MContext), MDHelper(M.getContext()),
- Root(nullptr), Char(nullptr)
-{}
+CodeGenTBAA::CodeGenTBAA(ASTContext &Ctx, CodeGenTypes &CGTypes,
+ llvm::Module &M, const CodeGenOptions &CGO,
+ const LangOptions &Features)
+ : Context(Ctx), CGTypes(CGTypes), Module(M), CodeGenOpts(CGO),
+ Features(Features), MDHelper(M.getContext()), Root(nullptr),
+ Char(nullptr) {}
CodeGenTBAA::~CodeGenTBAA() {
}
@@ -95,8 +100,6 @@ static bool TypeHasMayAlias(QualType QTy) {
/// Check if the given type is a valid base type to be used in access tags.
static bool isValidBaseType(QualType QTy) {
- if (QTy->isReferenceType())
- return false;
if (const RecordType *TTy = QTy->getAs<RecordType>()) {
const RecordDecl *RD = TTy->getDecl()->getDefinition();
// Incomplete types are not valid base access types.
@@ -184,10 +187,56 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
return getChar();
// Handle pointers and references.
- // TODO: Implement C++'s type "similarity" and consider dis-"similar"
- // pointers distinct.
- if (Ty->isPointerType() || Ty->isReferenceType())
- return createScalarTypeNode("any pointer", getChar(), Size);
+ //
+ // C has a very strict rule for pointer aliasing. C23 6.7.6.1p2:
+ // For two pointer types to be compatible, both shall be identically
+ // qualified and both shall be pointers to compatible types.
+ //
+ // This rule is impractically strict; we want to at least ignore CVR
+ // qualifiers. Distinguishing by CVR qualifiers would make it UB to
+ // e.g. cast a `char **` to `const char * const *` and dereference it,
+ // which is too common and useful to invalidate. C++'s similar types
+ // rule permits qualifier differences in these nested positions; in fact,
+ // C++ even allows that cast as an implicit conversion.
+ //
+ // Other qualifiers could theoretically be distinguished, especially if
+ // they involve a significant representation difference. We don't
+ // currently do so, however.
+ //
+ // Computing the pointee type string recursively is implicitly more
+ // forgiving than the standards require. Effectively, we are turning
+ // the question "are these types compatible/similar" into "are
+ // accesses to these types allowed to alias". In both C and C++,
+ // the latter question has special carve-outs for signedness
+ // mismatches that only apply at the top level. As a result, we are
+ // allowing e.g. `int *` l-values to access `unsigned *` objects.
+ if (Ty->isPointerType() || Ty->isReferenceType()) {
+ llvm::MDNode *AnyPtr = createScalarTypeNode("any pointer", getChar(), Size);
+ if (!CodeGenOpts.PointerTBAA)
+ return AnyPtr;
+ // Compute the depth of the pointer and generate a tag of the form "p<depth>
+ // <base type tag>".
+ unsigned PtrDepth = 0;
+ do {
+ PtrDepth++;
+ Ty = Ty->getPointeeType().getTypePtr();
+ } while (Ty->isPointerType());
+ // TODO: Implement C++'s type "similarity" and consider dis-"similar"
+ // pointers distinct for non-builtin types.
+ if (isa<BuiltinType>(Ty)) {
+ llvm::MDNode *ScalarMD = getTypeInfoHelper(Ty);
+ StringRef Name =
+ cast<llvm::MDString>(
+ ScalarMD->getOperand(CodeGenOpts.NewStructPathTBAA ? 2 : 0))
+ ->getString();
+ SmallString<256> OutName("p");
+ OutName += std::to_string(PtrDepth);
+ OutName += " ";
+ OutName += Name;
+ return createScalarTypeNode(OutName, AnyPtr, Size);
+ }
+ return AnyPtr;
+ }
// Accesses to arrays are accesses to objects of their element types.
if (CodeGenOpts.NewStructPathTBAA && Ty->isArrayType())
@@ -208,7 +257,8 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
- MContext.mangleCanonicalTypeName(QualType(ETy, 0), Out);
+ CGTypes.getCXXABI().getMangleContext().mangleCanonicalTypeName(
+ QualType(ETy, 0), Out);
return createScalarTypeNode(OutName, getChar(), Size);
}
@@ -240,9 +290,10 @@ llvm::MDNode *CodeGenTBAA::getTypeInfo(QualType QTy) {
// aggregate will result into the may-alias access descriptor, meaning all
// subsequent accesses to direct and indirect members of that aggregate will
// be considered may-alias too.
- // TODO: Combine getTypeInfo() and getBaseTypeInfo() into a single function.
+ // TODO: Combine getTypeInfo() and getValidBaseTypeInfo() into a single
+ // function.
if (isValidBaseType(QTy))
- return getBaseTypeInfo(QTy);
+ return getValidBaseTypeInfo(QTy);
const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
if (llvm::MDNode *N = MetadataCache[Ty])
@@ -284,6 +335,14 @@ CodeGenTBAA::CollectFields(uint64_t BaseOffset,
/* Things not handled yet include: C++ base classes, bitfields, */
if (const RecordType *TTy = QTy->getAs<RecordType>()) {
+ if (TTy->isUnionType()) {
+ uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
+ llvm::MDNode *TBAAType = getChar();
+ llvm::MDNode *TBAATag = getAccessTagInfo(TBAAAccessInfo(TBAAType, Size));
+ Fields.push_back(
+ llvm::MDBuilder::TBAAStructField(BaseOffset, Size, TBAATag));
+ return true;
+ }
const RecordDecl *RD = TTy->getDecl()->getDefinition();
if (RD->hasFlexibleArrayMember())
return false;
@@ -294,14 +353,40 @@ CodeGenTBAA::CollectFields(uint64_t BaseOffset,
return false;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CGRecordLayout &CGRL = CGTypes.getCGRecordLayout(RD);
unsigned idx = 0;
- for (RecordDecl::field_iterator i = RD->field_begin(),
- e = RD->field_end(); i != e; ++i, ++idx) {
- if ((*i)->isZeroSize(Context) || (*i)->isUnnamedBitfield())
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ if (isEmptyFieldForLayout(Context, *i))
+ continue;
+
+ uint64_t Offset =
+ BaseOffset + Layout.getFieldOffset(idx) / Context.getCharWidth();
+
+ // Create a single field for consecutive named bitfields using char as
+ // base type.
+ if ((*i)->isBitField()) {
+ const CGBitFieldInfo &Info = CGRL.getBitFieldInfo(*i);
+ // For big endian targets the first bitfield in the consecutive run is
+ // at the most-significant end; see CGRecordLowering::setBitFieldInfo
+ // for more information.
+ bool IsBE = Context.getTargetInfo().isBigEndian();
+ bool IsFirst = IsBE ? Info.StorageSize - (Info.Offset + Info.Size) == 0
+ : Info.Offset == 0;
+ if (!IsFirst)
+ continue;
+ unsigned CurrentBitFieldSize = Info.StorageSize;
+ uint64_t Size =
+ llvm::divideCeil(CurrentBitFieldSize, Context.getCharWidth());
+ llvm::MDNode *TBAAType = getChar();
+ llvm::MDNode *TBAATag =
+ getAccessTagInfo(TBAAAccessInfo(TBAAType, Size));
+ Fields.push_back(
+ llvm::MDBuilder::TBAAStructField(Offset, Size, TBAATag));
continue;
- uint64_t Offset = BaseOffset +
- Layout.getFieldOffset(idx) / Context.getCharWidth();
+ }
+
QualType FieldQTy = i->getType();
if (!CollectFields(Offset, FieldQTy, Fields,
MayAlias || TypeHasMayAlias(FieldQTy)))
@@ -321,6 +406,9 @@ CodeGenTBAA::CollectFields(uint64_t BaseOffset,
llvm::MDNode *
CodeGenTBAA::getTBAAStructInfo(QualType QTy) {
+ if (CodeGenOpts.OptimizationLevel == 0 || CodeGenOpts.RelaxedAliasing)
+ return nullptr;
+
const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
if (llvm::MDNode *N = StructMetadataCache[Ty])
@@ -354,7 +442,7 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
if (BaseRD->isEmpty())
continue;
llvm::MDNode *TypeNode = isValidBaseType(BaseQTy)
- ? getBaseTypeInfo(BaseQTy)
+ ? getValidBaseTypeInfo(BaseQTy)
: getTypeInfo(BaseQTy);
if (!TypeNode)
return nullptr;
@@ -375,11 +463,12 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
});
}
for (FieldDecl *Field : RD->fields()) {
- if (Field->isZeroSize(Context) || Field->isUnnamedBitfield())
+ if (Field->isZeroSize(Context) || Field->isUnnamedBitField())
continue;
QualType FieldQTy = Field->getType();
- llvm::MDNode *TypeNode = isValidBaseType(FieldQTy) ?
- getBaseTypeInfo(FieldQTy) : getTypeInfo(FieldQTy);
+ llvm::MDNode *TypeNode = isValidBaseType(FieldQTy)
+ ? getValidBaseTypeInfo(FieldQTy)
+ : getTypeInfo(FieldQTy);
if (!TypeNode)
return nullptr;
@@ -394,7 +483,8 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
if (Features.CPlusPlus) {
// Don't use the mangler for C code.
llvm::raw_svector_ostream Out(OutName);
- MContext.mangleCanonicalTypeName(QualType(Ty, 0), Out);
+ CGTypes.getCXXABI().getMangleContext().mangleCanonicalTypeName(
+ QualType(Ty, 0), Out);
} else {
OutName = RD->getName();
}
@@ -416,9 +506,8 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
return nullptr;
}
-llvm::MDNode *CodeGenTBAA::getBaseTypeInfo(QualType QTy) {
- if (!isValidBaseType(QTy))
- return nullptr;
+llvm::MDNode *CodeGenTBAA::getValidBaseTypeInfo(QualType QTy) {
+ assert(isValidBaseType(QTy) && "Must be a valid base type");
const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
@@ -437,6 +526,10 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfo(QualType QTy) {
return TypeNode;
}
+llvm::MDNode *CodeGenTBAA::getBaseTypeInfo(QualType QTy) {
+ return isValidBaseType(QTy) ? getValidBaseTypeInfo(QTy) : nullptr;
+}
+
llvm::MDNode *CodeGenTBAA::getAccessTagInfo(TBAAAccessInfo Info) {
assert(!Info.isIncomplete() && "Access to an object of an incomplete type!");
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
index a65963596fe9..ba74a39a4d25 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
@@ -24,11 +24,11 @@ namespace clang {
class ASTContext;
class CodeGenOptions;
class LangOptions;
- class MangleContext;
class QualType;
class Type;
namespace CodeGen {
+class CodeGenTypes;
// TBAAAccessKind - A kind of TBAA memory access descriptor.
enum class TBAAAccessKind : unsigned {
@@ -115,10 +115,10 @@ struct TBAAAccessInfo {
/// while lowering AST types to LLVM types.
class CodeGenTBAA {
ASTContext &Context;
+ CodeGenTypes &CGTypes;
llvm::Module &Module;
const CodeGenOptions &CodeGenOpts;
const LangOptions &Features;
- MangleContext &MContext;
// MDHelper - Helper for creating metadata.
llvm::MDBuilder MDHelper;
@@ -166,9 +166,13 @@ class CodeGenTBAA {
/// used to describe accesses to objects of the given base type.
llvm::MDNode *getBaseTypeInfoHelper(const Type *Ty);
+ /// getValidBaseTypeInfo - Return metadata that describes the given base
+ /// access type. The type must be suitable.
+ llvm::MDNode *getValidBaseTypeInfo(QualType QTy);
+
public:
- CodeGenTBAA(ASTContext &Ctx, llvm::Module &M, const CodeGenOptions &CGO,
- const LangOptions &Features, MangleContext &MContext);
+ CodeGenTBAA(ASTContext &Ctx, CodeGenTypes &CGTypes, llvm::Module &M,
+ const CodeGenOptions &CGO, const LangOptions &Features);
~CodeGenTBAA();
/// getTypeInfo - Get metadata used to describe accesses to objects of the
@@ -187,8 +191,9 @@ public:
/// the given type.
llvm::MDNode *getTBAAStructInfo(QualType QTy);
- /// getBaseTypeInfo - Get metadata that describes the given base access type.
- /// Return null if the type is not suitable for use in TBAA access tags.
+ /// getBaseTypeInfo - Get metadata that describes the given base access
+ /// type. Return null if the type is not suitable for use in TBAA access
+ /// tags.
llvm::MDNode *getBaseTypeInfo(QualType QTy);
/// getAccessTagInfo - Get TBAA tag for a given memory access.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h
index 083d69214fb3..e273ebe3b060 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -51,7 +51,7 @@ struct CodeGenTypeCache {
llvm::IntegerType *PtrDiffTy;
};
- /// void*, void** in address space 0
+ /// void*, void** in the target's default address space (often 0)
union {
llvm::PointerType *UnqualPtrTy;
llvm::PointerType *VoidPtrTy;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
index a6b51bfef876..f5deccdc1ba7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -30,9 +30,8 @@ using namespace clang;
using namespace CodeGen;
CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
- : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
- Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()),
- TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) {
+ : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
+ Target(cgm.getTarget()) {
SkippedLayout = false;
LongDoubleReferenced = false;
}
@@ -43,6 +42,8 @@ CodeGenTypes::~CodeGenTypes() {
delete &*I++;
}
+CGCXXABI &CodeGenTypes::getCXXABI() const { return getCGM().getCXXABI(); }
+
const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const {
return CGM.getCodeGenOpts();
}
@@ -89,7 +90,14 @@ void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
-llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
+///
+/// We generally assume that the alloc size of this type under the LLVM
+/// data layout is the same as the size of the AST type. The alignment
+/// does not have to match: Clang should always use explicit alignments
+/// and packed structs as necessary to produce the layout it needs.
+/// But the size does need to be exactly right or else things like struct
+/// layout will break.
+llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
if (T->isConstantMatrixType()) {
const Type *Ty = Context.getCanonicalType(T).getTypePtr();
const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
@@ -107,10 +115,28 @@ llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
return llvm::IntegerType::get(FixedVT->getContext(), BytePadded);
}
- // If this is a bool type, or a bit-precise integer type in a bitfield
- // representation, map this integer to the target-specified size.
- if ((ForBitField && T->isBitIntType()) ||
- (!T->isBitIntType() && R->isIntegerTy(1)))
+ // If T is _Bool or a _BitInt type, ConvertType will produce an IR type
+ // with the exact semantic bit-width of the AST type; for example,
+ // _BitInt(17) will turn into i17. In memory, however, we need to store
+ // such values extended to their full storage size as decided by AST
+ // layout; this is an ABI requirement. Ideally, we would always use an
+ // integer type that's just the bit-size of the AST type; for example, if
+ // sizeof(_BitInt(17)) == 4, _BitInt(17) would turn into i32. That is what's
+ // returned by convertTypeForLoadStore. However, that type does not
+ // always satisfy the size requirement on memory representation types
+ // describe above. For example, a 32-bit platform might reasonably set
+ // sizeof(_BitInt(65)) == 12, but i96 is likely to have to have an alloc size
+ // of 16 bytes in the LLVM data layout. In these cases, we simply return
+ // a byte array of the appropriate size.
+ if (T->isBitIntType()) {
+ if (typeRequiresSplitIntoByteArray(T, R))
+ return llvm::ArrayType::get(CGM.Int8Ty,
+ Context.getTypeSizeInChars(T).getQuantity());
+ return llvm::IntegerType::get(getLLVMContext(),
+ (unsigned)Context.getTypeSize(T));
+ }
+
+ if (R->isIntegerTy(1))
return llvm::IntegerType::get(getLLVMContext(),
(unsigned)Context.getTypeSize(T));
@@ -118,6 +144,36 @@ llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
return R;
}
+bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy,
+ llvm::Type *LLVMTy) {
+ if (!LLVMTy)
+ LLVMTy = ConvertType(ASTTy);
+
+ CharUnits ASTSize = Context.getTypeSizeInChars(ASTTy);
+ CharUnits LLVMSize =
+ CharUnits::fromQuantity(getDataLayout().getTypeAllocSize(LLVMTy));
+ return ASTSize != LLVMSize;
+}
+
+llvm::Type *CodeGenTypes::convertTypeForLoadStore(QualType T,
+ llvm::Type *LLVMTy) {
+ if (!LLVMTy)
+ LLVMTy = ConvertType(T);
+
+ if (T->isBitIntType())
+ return llvm::Type::getIntNTy(
+ getLLVMContext(), Context.getTypeSizeInChars(T).getQuantity() * 8);
+
+ if (LLVMTy->isIntegerTy(1))
+ return llvm::IntegerType::get(getLLVMContext(),
+ (unsigned)Context.getTypeSize(T));
+
+ if (T->isExtVectorBoolType())
+ return ConvertTypeForMem(T);
+
+ return LLVMTy;
+}
+
/// isRecordLayoutComplete - Return true if the specified type is already
/// completely laid out.
bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
@@ -409,7 +465,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
break;
case BuiltinType::LongDouble:
LongDoubleReferenced = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case BuiltinType::BFloat16:
case BuiltinType::Float:
case BuiltinType::Double:
@@ -523,8 +579,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
return llvm::StructType::get(getLLVMContext(), EltTys);
}
return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
- Info.EC.getKnownMinValue() *
- Info.NumVectors);
+ Info.EC.getKnownMinValue());
}
#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
case BuiltinType::Id: { \
@@ -534,6 +589,11 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
llvm_unreachable("Unexpected wasm reference builtin type!"); \
} break;
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_OPAQUE_PTR_TYPE(Name, MangledName, AS, Width, Align, Id, \
+ SingletonId) \
+ case BuiltinType::Id: \
+ return llvm::PointerType::get(getLLVMContext(), AS);
+#include "clang/Basic/AMDGPUTypes.def"
case BuiltinType::Dependent:
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
@@ -590,6 +650,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
ResultType = llvm::ArrayType::get(ResultType, 0);
break;
}
+ case Type::ArrayParameter:
case Type::ConstantArray: {
const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
@@ -601,7 +662,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
EltTy = llvm::Type::getInt8Ty(getLLVMContext());
}
- ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
+ ResultType = llvm::ArrayType::get(EltTy, A->getZExtSize());
break;
}
case Type::ExtVector:
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
index 01c0c673795c..5aebf9a21223 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
@@ -57,11 +57,6 @@ class CodeGenTypes {
ASTContext &Context;
llvm::Module &TheModule;
const TargetInfo &Target;
- CGCXXABI &TheCXXABI;
-
- // This should not be moved earlier, since its initialization depends on some
- // of the previous reference members being already initialized
- const ABIInfo &TheABIInfo;
/// The opaque type map for Objective-C interfaces. All direct
/// manipulation is done by the runtime interfaces, which are
@@ -106,9 +101,8 @@ public:
}
CodeGenModule &getCGM() const { return CGM; }
ASTContext &getContext() const { return Context; }
- const ABIInfo &getABIInfo() const { return TheABIInfo; }
const TargetInfo &getTarget() const { return Target; }
- CGCXXABI &getCXXABI() const { return TheCXXABI; }
+ CGCXXABI &getCXXABI() const;
llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
const CodeGenOptions &getCodeGenOpts() const;
@@ -126,7 +120,30 @@ public:
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
- llvm::Type *ConvertTypeForMem(QualType T, bool ForBitField = false);
+ llvm::Type *ConvertTypeForMem(QualType T);
+
+ /// Check whether the given type needs to be laid out in memory
+ /// using an opaque byte-array type because its load/store type
+ /// does not have the correct alloc size in the LLVM data layout.
+ /// If this is false, the load/store type (convertTypeForLoadStore)
+ /// and memory representation type (ConvertTypeForMem) will
+ /// be the same type.
+ bool typeRequiresSplitIntoByteArray(QualType ASTTy,
+ llvm::Type *LLVMTy = nullptr);
+
+ /// Given that T is a scalar type, return the IR type that should
+ /// be used for load and store operations. For example, this might
+ /// be i8 for _Bool or i96 for _BitInt(65). The store size of the
+ /// load/store type (as reported by LLVM's data layout) is always
+ /// the same as the alloc size of the memory representation type
+ /// returned by ConvertTypeForMem.
+ ///
+ /// As an optimization, if you already know the scalar value type
+ /// for T (as would be returned by ConvertType), you can pass
+ /// it as the second argument so that it does not need to be
+ /// recomputed in common cases where the value type and
+ /// load/store type are the same.
+ llvm::Type *convertTypeForLoadStore(QualType T, llvm::Type *LLVMTy = nullptr);
/// GetFunctionType - Get the LLVM function type for \arg Info.
llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h b/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
index a55da0dcad79..581b05ae87ad 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
@@ -103,8 +103,9 @@ public:
/// expression is known to be a constant expression with either a fairly
/// simple type or a known simple form.
llvm::Constant *emitAbstract(const Expr *E, QualType T);
- llvm::Constant *emitAbstract(SourceLocation loc, const APValue &value,
- QualType T);
+ llvm::Constant *
+ emitAbstract(SourceLocation loc, const APValue &value, QualType T,
+ bool EnablePtrAuthFunctionTypeDiscrimination = true);
/// Try to emit the result of the given expression as an abstract constant.
llvm::Constant *tryEmitAbstract(const Expr *E, QualType T);
@@ -113,6 +114,9 @@ public:
llvm::Constant *tryEmitAbstract(const APValue &value, QualType T);
llvm::Constant *tryEmitAbstractForMemory(const APValue &value, QualType T);
+ llvm::Constant *tryEmitConstantSignedPointer(llvm::Constant *Ptr,
+ PointerAuthQualifier Auth);
+
llvm::Constant *tryEmitConstantExpr(const ConstantExpr *CE);
llvm::Constant *emitNullForMemory(QualType T) {
@@ -135,7 +139,9 @@ public:
llvm::Constant *tryEmitPrivate(const Expr *E, QualType T);
llvm::Constant *tryEmitPrivateForMemory(const Expr *E, QualType T);
- llvm::Constant *tryEmitPrivate(const APValue &value, QualType T);
+ llvm::Constant *
+ tryEmitPrivate(const APValue &value, QualType T,
+ bool EnablePtrAuthFunctionTypeDiscrimination = true);
llvm::Constant *tryEmitPrivateForMemory(const APValue &value, QualType T);
/// Get the address of the current location. This is a constant
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
index 3cf69f3b6415..549d5dd66b12 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
@@ -296,3 +296,21 @@ ConstantAggregateBuilderBase::finishStruct(llvm::StructType *ty) {
buffer.erase(buffer.begin() + Begin, buffer.end());
return constant;
}
+
+/// Sign the given pointer and add it to the constant initializer
+/// currently being built.
+void ConstantAggregateBuilderBase::addSignedPointer(
+ llvm::Constant *Pointer, const PointerAuthSchema &Schema,
+ GlobalDecl CalleeDecl, QualType CalleeType) {
+ if (!Schema || !Builder.CGM.shouldSignPointer(Schema))
+ return add(Pointer);
+
+ llvm::Constant *StorageAddress = nullptr;
+ if (Schema.isAddressDiscriminated()) {
+ StorageAddress = getAddrOfCurrentPosition(Pointer->getType());
+ }
+
+ llvm::Constant *SignedPointer = Builder.CGM.getConstantSignedPointer(
+ Pointer, Schema, StorageAddress, CalleeDecl, CalleeType);
+ add(SignedPointer);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
index 0c43317642bc..67a9caf8b4ec 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -17,6 +17,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ProfileData/Coverage/CoverageMapping.h"
@@ -31,16 +32,26 @@
// is textually included.
#define COVMAP_V3
+namespace llvm {
+cl::opt<bool>
+ EnableSingleByteCoverage("enable-single-byte-coverage",
+ llvm::cl::ZeroOrMore,
+ llvm::cl::desc("Enable single byte coverage"),
+ llvm::cl::Hidden, llvm::cl::init(false));
+} // namespace llvm
+
static llvm::cl::opt<bool> EmptyLineCommentCoverage(
"emptyline-comment-coverage",
llvm::cl::desc("Emit emptylines and comment lines as skipped regions (only "
"disable it on test)"),
llvm::cl::init(true), llvm::cl::Hidden);
-llvm::cl::opt<bool> SystemHeadersCoverage(
+namespace llvm::coverage {
+cl::opt<bool> SystemHeadersCoverage(
"system-headers-coverage",
- llvm::cl::desc("Enable collecting coverage from system headers"),
- llvm::cl::init(false), llvm::cl::Hidden);
+ cl::desc("Enable collecting coverage from system headers"), cl::init(false),
+ cl::Hidden);
+}
using namespace clang;
using namespace CodeGen;
@@ -95,9 +106,6 @@ void CoverageSourceInfo::updateNextTokLoc(SourceLocation Loc) {
}
namespace {
-using MCDCConditionID = CounterMappingRegion::MCDCConditionID;
-using MCDCParameters = CounterMappingRegion::MCDCParameters;
-
/// A region of source code that can be mapped to a counter.
class SourceMappingRegion {
/// Primary Counter that is also used for Branch Regions for "True" branches.
@@ -107,7 +115,7 @@ class SourceMappingRegion {
std::optional<Counter> FalseCount;
/// Parameters used for Modified Condition/Decision Coverage
- MCDCParameters MCDCParams;
+ mcdc::Parameters MCDCParams;
/// The region's starting location.
std::optional<SourceLocation> LocStart;
@@ -131,7 +139,7 @@ public:
SkippedRegion(false) {}
SourceMappingRegion(Counter Count, std::optional<Counter> FalseCount,
- MCDCParameters MCDCParams,
+ mcdc::Parameters MCDCParams,
std::optional<SourceLocation> LocStart,
std::optional<SourceLocation> LocEnd,
bool GapRegion = false)
@@ -139,7 +147,7 @@ public:
LocStart(LocStart), LocEnd(LocEnd), GapRegion(GapRegion),
SkippedRegion(false) {}
- SourceMappingRegion(MCDCParameters MCDCParams,
+ SourceMappingRegion(mcdc::Parameters MCDCParams,
std::optional<SourceLocation> LocStart,
std::optional<SourceLocation> LocEnd)
: MCDCParams(MCDCParams), LocStart(LocStart), LocEnd(LocEnd),
@@ -185,9 +193,25 @@ public:
bool isBranch() const { return FalseCount.has_value(); }
- bool isMCDCDecision() const { return MCDCParams.NumConditions != 0; }
+ bool isMCDCBranch() const {
+ return std::holds_alternative<mcdc::BranchParameters>(MCDCParams);
+ }
- const MCDCParameters &getMCDCParams() const { return MCDCParams; }
+ const auto &getMCDCBranchParams() const {
+ return mcdc::getParams<const mcdc::BranchParameters>(MCDCParams);
+ }
+
+ bool isMCDCDecision() const {
+ return std::holds_alternative<mcdc::DecisionParameters>(MCDCParams);
+ }
+
+ const auto &getMCDCDecisionParams() const {
+ return mcdc::getParams<const mcdc::DecisionParameters>(MCDCParams);
+ }
+
+ const mcdc::Parameters &getMCDCParams() const { return MCDCParams; }
+
+ void resetMCDCParams() { MCDCParams = mcdc::Parameters(); }
};
/// Spelling locations for the start and end of a source region.
@@ -278,10 +302,36 @@ public:
return SM.getLocForEndOfFile(SM.getFileID(Loc));
}
- /// Find out where the current file is included or macro is expanded.
- SourceLocation getIncludeOrExpansionLoc(SourceLocation Loc) {
- return Loc.isMacroID() ? SM.getImmediateExpansionRange(Loc).getBegin()
- : SM.getIncludeLoc(SM.getFileID(Loc));
+ /// Find out where a macro is expanded. If the immediate result is a
+ /// <scratch space>, keep looking until the result isn't. Return a pair of
+ /// \c SourceLocation. The first object is always the begin sloc of found
+ /// result. The second should be checked by the caller: if it has value, it's
+ /// the end sloc of the found result. Otherwise the while loop didn't get
+ /// executed, which means the location wasn't changed and the caller has to
+ /// learn the end sloc from somewhere else.
+ std::pair<SourceLocation, std::optional<SourceLocation>>
+ getNonScratchExpansionLoc(SourceLocation Loc) {
+ std::optional<SourceLocation> EndLoc = std::nullopt;
+ while (Loc.isMacroID() &&
+ SM.isWrittenInScratchSpace(SM.getSpellingLoc(Loc))) {
+ auto ExpansionRange = SM.getImmediateExpansionRange(Loc);
+ Loc = ExpansionRange.getBegin();
+ EndLoc = ExpansionRange.getEnd();
+ }
+ return std::make_pair(Loc, EndLoc);
+ }
+
+ /// Find out where the current file is included or macro is expanded. If
+ /// \c AcceptScratch is set to false, keep looking for expansions until the
+ /// found sloc is not a <scratch space>.
+ SourceLocation getIncludeOrExpansionLoc(SourceLocation Loc,
+ bool AcceptScratch = true) {
+ if (!Loc.isMacroID())
+ return SM.getIncludeLoc(SM.getFileID(Loc));
+ Loc = SM.getImmediateExpansionRange(Loc).getBegin();
+ if (AcceptScratch)
+ return Loc;
+ return getNonScratchExpansionLoc(Loc).first;
}
/// Return true if \c Loc is a location in a built-in macro.
@@ -325,16 +375,35 @@ public:
llvm::SmallSet<FileID, 8> Visited;
SmallVector<std::pair<SourceLocation, unsigned>, 8> FileLocs;
- for (const auto &Region : SourceRegions) {
+ for (auto &Region : SourceRegions) {
SourceLocation Loc = Region.getBeginLoc();
+
+ // Replace Region with its definition if it is in <scratch space>.
+ auto NonScratchExpansionLoc = getNonScratchExpansionLoc(Loc);
+ auto EndLoc = NonScratchExpansionLoc.second;
+ if (EndLoc.has_value()) {
+ Loc = NonScratchExpansionLoc.first;
+ Region.setStartLoc(Loc);
+ Region.setEndLoc(EndLoc.value());
+ }
+
+ // Replace Loc with FileLoc if it is expanded with system headers.
+ if (!SystemHeadersCoverage && SM.isInSystemMacro(Loc)) {
+ auto BeginLoc = SM.getSpellingLoc(Loc);
+ auto EndLoc = SM.getSpellingLoc(Region.getEndLoc());
+ if (SM.isWrittenInSameFile(BeginLoc, EndLoc)) {
+ Loc = SM.getFileLoc(Loc);
+ Region.setStartLoc(Loc);
+ Region.setEndLoc(SM.getFileLoc(Region.getEndLoc()));
+ }
+ }
+
FileID File = SM.getFileID(Loc);
if (!Visited.insert(File).second)
continue;
- // Do not map FileID's associated with system headers unless collecting
- // coverage from system headers is explicitly enabled.
- if (!SystemHeadersCoverage && SM.isInSystemHeader(SM.getSpellingLoc(Loc)))
- continue;
+ assert(SystemHeadersCoverage ||
+ !SM.isInSystemHeader(SM.getSpellingLoc(Loc)));
unsigned Depth = 0;
for (SourceLocation Parent = getIncludeOrExpansionLoc(Loc);
@@ -450,13 +519,19 @@ public:
// Ignore regions from system headers unless collecting coverage from
// system headers is explicitly enabled.
if (!SystemHeadersCoverage &&
- SM.isInSystemHeader(SM.getSpellingLoc(LocStart)))
+ SM.isInSystemHeader(SM.getSpellingLoc(LocStart))) {
+ assert(!Region.isMCDCBranch() && !Region.isMCDCDecision() &&
+ "Don't suppress the condition in system headers");
continue;
+ }
auto CovFileID = getCoverageFileID(LocStart);
// Ignore regions that don't have a file, such as builtin macros.
- if (!CovFileID)
+ if (!CovFileID) {
+ assert(!Region.isMCDCBranch() && !Region.isMCDCDecision() &&
+ "Don't suppress the condition in non-file regions");
continue;
+ }
SourceLocation LocEnd = Region.getEndLoc();
assert(SM.isWrittenInSameFile(LocStart, LocEnd) &&
@@ -466,8 +541,11 @@ public:
// This not only suppresses redundant regions, but sometimes prevents
// creating regions with wrong counters if, for example, a statement's
// body ends at the end of a nested macro.
- if (Filter.count(std::make_pair(LocStart, LocEnd)))
+ if (Filter.count(std::make_pair(LocStart, LocEnd))) {
+ assert(!Region.isMCDCBranch() && !Region.isMCDCDecision() &&
+ "Don't suppress the condition");
continue;
+ }
// Find the spelling locations for the mapping region.
SpellingRegion SR{SM, LocStart, LocEnd};
@@ -483,13 +561,13 @@ public:
SR.ColumnEnd));
} else if (Region.isBranch()) {
MappingRegions.push_back(CounterMappingRegion::makeBranchRegion(
- Region.getCounter(), Region.getFalseCounter(),
- Region.getMCDCParams(), *CovFileID, SR.LineStart, SR.ColumnStart,
- SR.LineEnd, SR.ColumnEnd));
+ Region.getCounter(), Region.getFalseCounter(), *CovFileID,
+ SR.LineStart, SR.ColumnStart, SR.LineEnd, SR.ColumnEnd,
+ Region.getMCDCParams()));
} else if (Region.isMCDCDecision()) {
MappingRegions.push_back(CounterMappingRegion::makeDecisionRegion(
- Region.getMCDCParams(), *CovFileID, SR.LineStart, SR.ColumnStart,
- SR.LineEnd, SR.ColumnEnd));
+ Region.getMCDCDecisionParams(), *CovFileID, SR.LineStart,
+ SR.ColumnStart, SR.LineEnd, SR.ColumnEnd));
} else {
MappingRegions.push_back(CounterMappingRegion::makeRegion(
Region.getCounter(), *CovFileID, SR.LineStart, SR.ColumnStart,
@@ -503,7 +581,7 @@ public:
SourceRegionFilter Filter;
for (const auto &FM : FileIDMapping) {
SourceLocation ExpandedLoc = FM.second.second;
- SourceLocation ParentLoc = getIncludeOrExpansionLoc(ExpandedLoc);
+ SourceLocation ParentLoc = getIncludeOrExpansionLoc(ExpandedLoc, false);
if (ParentLoc.isInvalid())
continue;
@@ -586,11 +664,6 @@ struct EmptyCoverageMappingBuilder : public CoverageMappingBuilder {
/// creation.
struct MCDCCoverageBuilder {
- struct DecisionIDPair {
- MCDCConditionID TrueID = 0;
- MCDCConditionID FalseID = 0;
- };
-
/// The AST walk recursively visits nested logical-AND or logical-OR binary
/// operator nodes and then visits their LHS and RHS children nodes. As this
/// happens, the algorithm will assign IDs to each operator's LHS and RHS side
@@ -681,14 +754,15 @@ struct MCDCCoverageBuilder {
private:
CodeGenModule &CGM;
- llvm::SmallVector<DecisionIDPair> DecisionStack;
- llvm::DenseMap<const Stmt *, MCDCConditionID> &CondIDs;
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap;
- MCDCConditionID NextID = 1;
+ llvm::SmallVector<mcdc::ConditionIDs> DecisionStack;
+ MCDC::State &MCDCState;
+ const Stmt *DecisionStmt = nullptr;
+ mcdc::ConditionID NextID = 0;
bool NotMapped = false;
- /// Represent a sentinel value of [0,0] for the bottom of DecisionStack.
- static constexpr DecisionIDPair DecisionStackSentinel{0, 0};
+ /// Represent a sentinel value as a pair of final decisions for the bottom
+ // of DecisionStack.
+ static constexpr mcdc::ConditionIDs DecisionStackSentinel{-1, -1};
/// Is this a logical-AND operation?
bool isLAnd(const BinaryOperator *E) const {
@@ -696,38 +770,37 @@ private:
}
public:
- MCDCCoverageBuilder(CodeGenModule &CGM,
- llvm::DenseMap<const Stmt *, MCDCConditionID> &CondIDMap,
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap)
- : CGM(CGM), DecisionStack(1, DecisionStackSentinel), CondIDs(CondIDMap),
- MCDCBitmapMap(MCDCBitmapMap) {}
+ MCDCCoverageBuilder(CodeGenModule &CGM, MCDC::State &MCDCState)
+ : CGM(CGM), DecisionStack(1, DecisionStackSentinel),
+ MCDCState(MCDCState) {}
/// Return whether the build of the control flow map is at the top-level
/// (root) of a logical operator nest in a boolean expression prior to the
/// assignment of condition IDs.
- bool isIdle() const { return (NextID == 1 && !NotMapped); }
+ bool isIdle() const { return (NextID == 0 && !NotMapped); }
/// Return whether any IDs have been assigned in the build of the control
/// flow map, indicating that the map is being generated for this boolean
/// expression.
- bool isBuilding() const { return (NextID > 1); }
+ bool isBuilding() const { return (NextID > 0); }
/// Set the given condition's ID.
- void setCondID(const Expr *Cond, MCDCConditionID ID) {
- CondIDs[CodeGenFunction::stripCond(Cond)] = ID;
+ void setCondID(const Expr *Cond, mcdc::ConditionID ID) {
+ MCDCState.BranchByStmt[CodeGenFunction::stripCond(Cond)] = {ID,
+ DecisionStmt};
}
/// Return the ID of a given condition.
- MCDCConditionID getCondID(const Expr *Cond) const {
- auto I = CondIDs.find(CodeGenFunction::stripCond(Cond));
- if (I == CondIDs.end())
- return 0;
+ mcdc::ConditionID getCondID(const Expr *Cond) const {
+ auto I = MCDCState.BranchByStmt.find(CodeGenFunction::stripCond(Cond));
+ if (I == MCDCState.BranchByStmt.end())
+ return -1;
else
- return I->second;
+ return I->second.ID;
}
/// Return the LHS Decision ([0,0] if not set).
- const DecisionIDPair &back() const { return DecisionStack.back(); }
+ const mcdc::ConditionIDs &back() const { return DecisionStack.back(); }
/// Push the binary operator statement to track the nest level and assign IDs
/// to the operator's LHS and RHS. The RHS may be a larger subtree that is
@@ -737,43 +810,47 @@ public:
return;
// If binary expression is disqualified, don't do mapping.
- if (!isBuilding() && !MCDCBitmapMap.contains(CodeGenFunction::stripCond(E)))
+ if (!isBuilding() &&
+ !MCDCState.DecisionByStmt.contains(CodeGenFunction::stripCond(E)))
NotMapped = true;
// Don't go any further if we don't need to map condition IDs.
if (NotMapped)
return;
- const DecisionIDPair &ParentDecision = DecisionStack.back();
+ if (NextID == 0) {
+ DecisionStmt = E;
+ assert(MCDCState.DecisionByStmt.contains(E));
+ }
+
+ const mcdc::ConditionIDs &ParentDecision = DecisionStack.back();
// If the operator itself has an assigned ID, this means it represents a
// larger subtree. In this case, assign that ID to its LHS node. Its RHS
// will receive a new ID below. Otherwise, assign ID+1 to LHS.
- if (CondIDs.contains(CodeGenFunction::stripCond(E)))
+ if (MCDCState.BranchByStmt.contains(CodeGenFunction::stripCond(E)))
setCondID(E->getLHS(), getCondID(E));
else
setCondID(E->getLHS(), NextID++);
// Assign a ID+1 for the RHS.
- MCDCConditionID RHSid = NextID++;
+ mcdc::ConditionID RHSid = NextID++;
setCondID(E->getRHS(), RHSid);
// Push the LHS decision IDs onto the DecisionStack.
if (isLAnd(E))
- DecisionStack.push_back({RHSid, ParentDecision.FalseID});
+ DecisionStack.push_back({ParentDecision[false], RHSid});
else
- DecisionStack.push_back({ParentDecision.TrueID, RHSid});
+ DecisionStack.push_back({RHSid, ParentDecision[true]});
}
/// Pop and return the LHS Decision ([0,0] if not set).
- DecisionIDPair pop() {
+ mcdc::ConditionIDs pop() {
if (!CGM.getCodeGenOpts().MCDCCoverage || NotMapped)
- return DecisionStack.front();
+ return DecisionStackSentinel;
assert(DecisionStack.size() > 1);
- DecisionIDPair D = DecisionStack.back();
- DecisionStack.pop_back();
- return D;
+ return DecisionStack.pop_back_val();
}
/// Return the total number of conditions and reset the state. The number of
@@ -788,15 +865,15 @@ public:
// Reset state if not doing mapping.
if (NotMapped) {
NotMapped = false;
- assert(NextID == 1);
+ assert(NextID == 0);
return 0;
}
// Set number of conditions and reset.
- unsigned TotalConds = NextID - 1;
+ unsigned TotalConds = NextID;
// Reset ID back to beginning.
- NextID = 1;
+ NextID = 0;
return TotalConds;
}
@@ -810,12 +887,15 @@ struct CounterCoverageMappingBuilder
/// The map of statements to count values.
llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
- /// The map of statements to bitmap coverage object values.
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap;
+ MCDC::State &MCDCState;
/// A stack of currently live regions.
llvm::SmallVector<SourceMappingRegion> RegionStack;
+ /// Set if the Expr should be handled as a leaf even if it is kind of binary
+ /// logical ops (&&, ||).
+ llvm::DenseSet<const Stmt *> LeafExprSet;
+
/// An object to manage MCDC regions.
MCDCCoverageBuilder MCDCBuilder;
@@ -835,16 +915,22 @@ struct CounterCoverageMappingBuilder
/// Return a counter for the subtraction of \c RHS from \c LHS
Counter subtractCounters(Counter LHS, Counter RHS, bool Simplify = true) {
+ assert(!llvm::EnableSingleByteCoverage &&
+ "cannot add counters when single byte coverage mode is enabled");
return Builder.subtract(LHS, RHS, Simplify);
}
/// Return a counter for the sum of \c LHS and \c RHS.
Counter addCounters(Counter LHS, Counter RHS, bool Simplify = true) {
+ assert(!llvm::EnableSingleByteCoverage &&
+ "cannot add counters when single byte coverage mode is enabled");
return Builder.add(LHS, RHS, Simplify);
}
Counter addCounters(Counter C1, Counter C2, Counter C3,
bool Simplify = true) {
+ assert(!llvm::EnableSingleByteCoverage &&
+ "cannot add counters when single byte coverage mode is enabled");
return addCounters(addCounters(C1, C2, Simplify), C3, Simplify);
}
@@ -855,8 +941,6 @@ struct CounterCoverageMappingBuilder
return Counter::getCounter(CounterMap[S]);
}
- unsigned getRegionBitmap(const Stmt *S) { return MCDCBitmapMap[S]; }
-
/// Push a region onto the stack.
///
/// Returns the index on the stack where the region was pushed. This can be
@@ -865,8 +949,7 @@ struct CounterCoverageMappingBuilder
std::optional<SourceLocation> StartLoc = std::nullopt,
std::optional<SourceLocation> EndLoc = std::nullopt,
std::optional<Counter> FalseCount = std::nullopt,
- MCDCConditionID ID = 0, MCDCConditionID TrueID = 0,
- MCDCConditionID FalseID = 0) {
+ const mcdc::Parameters &BranchParams = std::monostate()) {
if (StartLoc && !FalseCount) {
MostRecentLocation = *StartLoc;
@@ -885,19 +968,16 @@ struct CounterCoverageMappingBuilder
StartLoc = std::nullopt;
if (EndLoc && EndLoc->isInvalid())
EndLoc = std::nullopt;
- RegionStack.emplace_back(Count, FalseCount,
- MCDCParameters{0, 0, ID, TrueID, FalseID},
- StartLoc, EndLoc);
+ RegionStack.emplace_back(Count, FalseCount, BranchParams, StartLoc, EndLoc);
return RegionStack.size() - 1;
}
- size_t pushRegion(unsigned BitmapIdx, unsigned Conditions,
+ size_t pushRegion(const mcdc::DecisionParameters &DecisionParams,
std::optional<SourceLocation> StartLoc = std::nullopt,
std::optional<SourceLocation> EndLoc = std::nullopt) {
- RegionStack.emplace_back(MCDCParameters{BitmapIdx, Conditions}, StartLoc,
- EndLoc);
+ RegionStack.emplace_back(DecisionParams, StartLoc, EndLoc);
return RegionStack.size() - 1;
}
@@ -1024,15 +1104,12 @@ struct CounterCoverageMappingBuilder
return (Cond->EvaluateAsInt(Result, CVM.getCodeGenModule().getContext()));
}
- using MCDCDecisionIDPair = MCDCCoverageBuilder::DecisionIDPair;
-
/// Create a Branch Region around an instrumentable condition for coverage
/// and add it to the function's SourceRegions. A branch region tracks a
/// "True" counter and a "False" counter for boolean expressions that
/// result in the generation of a branch.
- void
- createBranchRegion(const Expr *C, Counter TrueCnt, Counter FalseCnt,
- const MCDCDecisionIDPair &IDPair = MCDCDecisionIDPair()) {
+ void createBranchRegion(const Expr *C, Counter TrueCnt, Counter FalseCnt,
+ const mcdc::ConditionIDs &Conds = {}) {
// Check for NULL conditions.
if (!C)
return;
@@ -1041,10 +1118,14 @@ struct CounterCoverageMappingBuilder
// region onto RegionStack but immediately pop it (which adds it to the
// function's SourceRegions) because it doesn't apply to any other source
// code other than the Condition.
- if (CodeGenFunction::isInstrumentedCondition(C)) {
- MCDCConditionID ID = MCDCBuilder.getCondID(C);
- MCDCConditionID TrueID = IDPair.TrueID;
- MCDCConditionID FalseID = IDPair.FalseID;
+ // With !SystemHeadersCoverage, binary logical ops in system headers may be
+ // treated as instrumentable conditions.
+ if (CodeGenFunction::isInstrumentedCondition(C) ||
+ LeafExprSet.count(CodeGenFunction::stripCond(C))) {
+ mcdc::Parameters BranchParams;
+ mcdc::ConditionID ID = MCDCBuilder.getCondID(C);
+ if (ID >= 0)
+ BranchParams = mcdc::BranchParameters{ID, Conds};
// If a condition can fold to true or false, the corresponding branch
// will be removed. Create a region with both counters hard-coded to
@@ -1054,19 +1135,20 @@ struct CounterCoverageMappingBuilder
// CodeGenFunction.c always returns false, but that is very heavy-handed.
if (ConditionFoldsToBool(C))
popRegions(pushRegion(Counter::getZero(), getStart(C), getEnd(C),
- Counter::getZero(), ID, TrueID, FalseID));
+ Counter::getZero(), BranchParams));
else
// Otherwise, create a region with the True counter and False counter.
- popRegions(pushRegion(TrueCnt, getStart(C), getEnd(C), FalseCnt, ID,
- TrueID, FalseID));
+ popRegions(pushRegion(TrueCnt, getStart(C), getEnd(C), FalseCnt,
+ BranchParams));
}
}
/// Create a Decision Region with a BitmapIdx and number of Conditions. This
/// type of region "contains" branch regions, one for each of the conditions.
/// The visualization tool will group everything together.
- void createDecisionRegion(const Expr *C, unsigned BitmapIdx, unsigned Conds) {
- popRegions(pushRegion(BitmapIdx, Conds, getStart(C), getEnd(C)));
+ void createDecisionRegion(const Expr *C,
+ const mcdc::DecisionParameters &DecisionParams) {
+ popRegions(pushRegion(DecisionParams, getStart(C), getEnd(C)));
}
/// Create a Branch Region around a SwitchCase for code coverage
@@ -1149,12 +1231,9 @@ struct CounterCoverageMappingBuilder
// we've seen this region.
if (StartLocs.insert(Loc).second) {
if (I.isBranch())
- SourceRegions.emplace_back(
- I.getCounter(), I.getFalseCounter(),
- MCDCParameters{0, 0, I.getMCDCParams().ID,
- I.getMCDCParams().TrueID,
- I.getMCDCParams().FalseID},
- Loc, getEndOfFileOrMacro(Loc), I.isBranch());
+ SourceRegions.emplace_back(I.getCounter(), I.getFalseCounter(),
+ I.getMCDCParams(), Loc,
+ getEndOfFileOrMacro(Loc), I.isBranch());
else
SourceRegions.emplace_back(I.getCounter(), Loc,
getEndOfFileOrMacro(Loc));
@@ -1207,6 +1286,12 @@ struct CounterCoverageMappingBuilder
/// Find a valid gap range between \p AfterLoc and \p BeforeLoc.
std::optional<SourceRange> findGapAreaBetween(SourceLocation AfterLoc,
SourceLocation BeforeLoc) {
+ // Some statements (like AttributedStmt and ImplicitValueInitExpr) don't
+ // have valid source locations. Do not emit a gap region if this is the case
+ // in either AfterLoc end or BeforeLoc end.
+ if (AfterLoc.isInvalid() || BeforeLoc.isInvalid())
+ return std::nullopt;
+
// If AfterLoc is in function-like macro, use the right parenthesis
// location.
if (AfterLoc.isMacroID()) {
@@ -1321,7 +1406,7 @@ struct CounterCoverageMappingBuilder
return;
assert(SpellingRegion(SM, NewStartLoc, EndLoc).isInSourceOrder());
handleFileExit(NewStartLoc);
- size_t Index = pushRegion({}, NewStartLoc, EndLoc);
+ size_t Index = pushRegion(Counter{}, NewStartLoc, EndLoc);
getRegion().setSkipped(true);
handleFileExit(EndLoc);
popRegions(Index);
@@ -1337,12 +1422,9 @@ struct CounterCoverageMappingBuilder
CounterCoverageMappingBuilder(
CoverageMappingModuleGen &CVM,
llvm::DenseMap<const Stmt *, unsigned> &CounterMap,
- llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap,
- llvm::DenseMap<const Stmt *, MCDCConditionID> &CondIDMap,
- SourceManager &SM, const LangOptions &LangOpts)
+ MCDC::State &MCDCState, SourceManager &SM, const LangOptions &LangOpts)
: CoverageMappingBuilder(CVM, SM, LangOpts), CounterMap(CounterMap),
- MCDCBitmapMap(MCDCBitmapMap),
- MCDCBuilder(CVM.getCodeGenModule(), CondIDMap, MCDCBitmapMap) {}
+ MCDCState(MCDCState), MCDCBuilder(CVM.getCodeGenModule(), MCDCState) {}
/// Write the mapping data to the output stream
void write(llvm::raw_ostream &OS) {
@@ -1370,9 +1452,8 @@ struct CounterCoverageMappingBuilder
for (const Stmt *Child : S->children())
if (Child) {
// If last statement contains terminate statements, add a gap area
- // between the two statements. Skipping attributed statements, because
- // they don't have valid start location.
- if (LastStmt && HasTerminateStmt && !isa<AttributedStmt>(Child)) {
+ // between the two statements.
+ if (LastStmt && HasTerminateStmt) {
auto Gap = findGapAreaBetween(getEnd(LastStmt), getStart(Child));
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(),
@@ -1439,6 +1520,10 @@ struct CounterCoverageMappingBuilder
terminateRegion(S);
}
+ void VisitCoroutineSuspendExpr(const CoroutineSuspendExpr *E) {
+ Visit(E->getOperand());
+ }
+
void VisitCXXThrowExpr(const CXXThrowExpr *E) {
extendRegion(E);
if (E->getSubExpr())
@@ -1459,8 +1544,9 @@ struct CounterCoverageMappingBuilder
void VisitBreakStmt(const BreakStmt *S) {
assert(!BreakContinueStack.empty() && "break not in a loop or switch!");
- BreakContinueStack.back().BreakCount = addCounters(
- BreakContinueStack.back().BreakCount, getRegion().getCounter());
+ if (!llvm::EnableSingleByteCoverage)
+ BreakContinueStack.back().BreakCount = addCounters(
+ BreakContinueStack.back().BreakCount, getRegion().getCounter());
// FIXME: a break in a switch should terminate regions for all preceding
// case statements, not just the most recent one.
terminateRegion(S);
@@ -1468,8 +1554,9 @@ struct CounterCoverageMappingBuilder
void VisitContinueStmt(const ContinueStmt *S) {
assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
- BreakContinueStack.back().ContinueCount = addCounters(
- BreakContinueStack.back().ContinueCount, getRegion().getCounter());
+ if (!llvm::EnableSingleByteCoverage)
+ BreakContinueStack.back().ContinueCount = addCounters(
+ BreakContinueStack.back().ContinueCount, getRegion().getCounter());
terminateRegion(S);
}
@@ -1487,7 +1574,9 @@ struct CounterCoverageMappingBuilder
extendRegion(S);
Counter ParentCount = getRegion().getCounter();
- Counter BodyCount = getRegionCounter(S);
+ Counter BodyCount = llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S->getBody())
+ : getRegionCounter(S);
// Handle the body first so that we can get the backedge count.
BreakContinueStack.push_back(BreakContinue());
@@ -1500,7 +1589,9 @@ struct CounterCoverageMappingBuilder
// Go back to handle the condition.
Counter CondCount =
- addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
+ llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S->getCond())
+ : addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
propagateCounts(CondCount, S->getCond());
adjustForOutOfOrderTraversal(getEnd(S));
@@ -1510,7 +1601,11 @@ struct CounterCoverageMappingBuilder
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), BodyCount);
Counter OutCount =
- addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount));
+ llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S)
+ : addCounters(BC.BreakCount,
+ subtractCounters(CondCount, BodyCount));
+
if (OutCount != ParentCount) {
pushRegion(OutCount);
GapRegionCounter = OutCount;
@@ -1519,38 +1614,53 @@ struct CounterCoverageMappingBuilder
}
// Create Branch Region around condition.
- createBranchRegion(S->getCond(), BodyCount,
- subtractCounters(CondCount, BodyCount));
+ if (!llvm::EnableSingleByteCoverage)
+ createBranchRegion(S->getCond(), BodyCount,
+ subtractCounters(CondCount, BodyCount));
}
void VisitDoStmt(const DoStmt *S) {
extendRegion(S);
Counter ParentCount = getRegion().getCounter();
- Counter BodyCount = getRegionCounter(S);
+ Counter BodyCount = llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S->getBody())
+ : getRegionCounter(S);
BreakContinueStack.push_back(BreakContinue());
extendRegion(S->getBody());
- Counter BackedgeCount =
- propagateCounts(addCounters(ParentCount, BodyCount), S->getBody());
+
+ Counter BackedgeCount;
+ if (llvm::EnableSingleByteCoverage)
+ propagateCounts(BodyCount, S->getBody());
+ else
+ BackedgeCount =
+ propagateCounts(addCounters(ParentCount, BodyCount), S->getBody());
+
BreakContinue BC = BreakContinueStack.pop_back_val();
bool BodyHasTerminateStmt = HasTerminateStmt;
HasTerminateStmt = false;
- Counter CondCount = addCounters(BackedgeCount, BC.ContinueCount);
+ Counter CondCount = llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S->getCond())
+ : addCounters(BackedgeCount, BC.ContinueCount);
propagateCounts(CondCount, S->getCond());
Counter OutCount =
- addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount));
+ llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S)
+ : addCounters(BC.BreakCount,
+ subtractCounters(CondCount, BodyCount));
if (OutCount != ParentCount) {
pushRegion(OutCount);
GapRegionCounter = OutCount;
}
// Create Branch Region around condition.
- createBranchRegion(S->getCond(), BodyCount,
- subtractCounters(CondCount, BodyCount));
+ if (!llvm::EnableSingleByteCoverage)
+ createBranchRegion(S->getCond(), BodyCount,
+ subtractCounters(CondCount, BodyCount));
if (BodyHasTerminateStmt)
HasTerminateStmt = true;
@@ -1562,7 +1672,9 @@ struct CounterCoverageMappingBuilder
Visit(S->getInit());
Counter ParentCount = getRegion().getCounter();
- Counter BodyCount = getRegionCounter(S);
+ Counter BodyCount = llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S->getBody())
+ : getRegionCounter(S);
// The loop increment may contain a break or continue.
if (S->getInc())
@@ -1581,14 +1693,23 @@ struct CounterCoverageMappingBuilder
// the count for all the continue statements.
BreakContinue IncrementBC;
if (const Stmt *Inc = S->getInc()) {
- propagateCounts(addCounters(BackedgeCount, BodyBC.ContinueCount), Inc);
+ Counter IncCount;
+ if (llvm::EnableSingleByteCoverage)
+ IncCount = getRegionCounter(S->getInc());
+ else
+ IncCount = addCounters(BackedgeCount, BodyBC.ContinueCount);
+ propagateCounts(IncCount, Inc);
IncrementBC = BreakContinueStack.pop_back_val();
}
// Go back to handle the condition.
- Counter CondCount = addCounters(
- addCounters(ParentCount, BackedgeCount, BodyBC.ContinueCount),
- IncrementBC.ContinueCount);
+ Counter CondCount =
+ llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S->getCond())
+ : addCounters(
+ addCounters(ParentCount, BackedgeCount, BodyBC.ContinueCount),
+ IncrementBC.ContinueCount);
+
if (const Expr *Cond = S->getCond()) {
propagateCounts(CondCount, Cond);
adjustForOutOfOrderTraversal(getEnd(S));
@@ -1599,8 +1720,11 @@ struct CounterCoverageMappingBuilder
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), BodyCount);
- Counter OutCount = addCounters(BodyBC.BreakCount, IncrementBC.BreakCount,
- subtractCounters(CondCount, BodyCount));
+ Counter OutCount =
+ llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S)
+ : addCounters(BodyBC.BreakCount, IncrementBC.BreakCount,
+ subtractCounters(CondCount, BodyCount));
if (OutCount != ParentCount) {
pushRegion(OutCount);
GapRegionCounter = OutCount;
@@ -1609,8 +1733,9 @@ struct CounterCoverageMappingBuilder
}
// Create Branch Region around condition.
- createBranchRegion(S->getCond(), BodyCount,
- subtractCounters(CondCount, BodyCount));
+ if (!llvm::EnableSingleByteCoverage)
+ createBranchRegion(S->getCond(), BodyCount,
+ subtractCounters(CondCount, BodyCount));
}
void VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
@@ -1621,7 +1746,9 @@ struct CounterCoverageMappingBuilder
Visit(S->getRangeStmt());
Counter ParentCount = getRegion().getCounter();
- Counter BodyCount = getRegionCounter(S);
+ Counter BodyCount = llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S->getBody())
+ : getRegionCounter(S);
BreakContinueStack.push_back(BreakContinue());
extendRegion(S->getBody());
@@ -1636,10 +1763,15 @@ struct CounterCoverageMappingBuilder
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), BodyCount);
- Counter LoopCount =
- addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
- Counter OutCount =
- addCounters(BC.BreakCount, subtractCounters(LoopCount, BodyCount));
+ Counter OutCount;
+ Counter LoopCount;
+ if (llvm::EnableSingleByteCoverage)
+ OutCount = getRegionCounter(S);
+ else {
+ LoopCount = addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
+ OutCount =
+ addCounters(BC.BreakCount, subtractCounters(LoopCount, BodyCount));
+ }
if (OutCount != ParentCount) {
pushRegion(OutCount);
GapRegionCounter = OutCount;
@@ -1648,8 +1780,9 @@ struct CounterCoverageMappingBuilder
}
// Create Branch Region around condition.
- createBranchRegion(S->getCond(), BodyCount,
- subtractCounters(LoopCount, BodyCount));
+ if (!llvm::EnableSingleByteCoverage)
+ createBranchRegion(S->getCond(), BodyCount,
+ subtractCounters(LoopCount, BodyCount));
}
void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
@@ -1710,7 +1843,7 @@ struct CounterCoverageMappingBuilder
propagateCounts(Counter::getZero(), Body);
BreakContinue BC = BreakContinueStack.pop_back_val();
- if (!BreakContinueStack.empty())
+ if (!BreakContinueStack.empty() && !llvm::EnableSingleByteCoverage)
BreakContinueStack.back().ContinueCount = addCounters(
BreakContinueStack.back().ContinueCount, BC.ContinueCount);
@@ -1725,6 +1858,11 @@ struct CounterCoverageMappingBuilder
MostRecentLocation = getStart(S);
handleFileExit(ExitLoc);
+ // When single byte coverage mode is enabled, do not create branch region by
+ // early returning.
+ if (llvm::EnableSingleByteCoverage)
+ return;
+
// Create a Branch Region around each Case. Subtract the case's
// counter from the Parent counter to track the "False" branch count.
Counter CaseCountSum;
@@ -1757,8 +1895,10 @@ struct CounterCoverageMappingBuilder
extendRegion(S);
SourceMappingRegion &Parent = getRegion();
+ Counter Count = llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S)
+ : addCounters(Parent.getCounter(), getRegionCounter(S));
- Counter Count = addCounters(Parent.getCounter(), getRegionCounter(S));
// Reuse the existing region if it starts at our label. This is typical of
// the first case in a switch.
if (Parent.hasStartLoc() && Parent.getBeginLoc() == getStart(S))
@@ -1876,7 +2016,9 @@ struct CounterCoverageMappingBuilder
extendRegion(S->getCond());
Counter ParentCount = getRegion().getCounter();
- Counter ThenCount = getRegionCounter(S);
+ Counter ThenCount = llvm::EnableSingleByteCoverage
+ ? getRegionCounter(S->getThen())
+ : getRegionCounter(S);
// Emitting a counter for the condition makes it easier to interpret the
// counter for the body when looking at the coverage.
@@ -1890,7 +2032,12 @@ struct CounterCoverageMappingBuilder
extendRegion(S->getThen());
Counter OutCount = propagateCounts(ThenCount, S->getThen());
- Counter ElseCount = subtractCounters(ParentCount, ThenCount);
+
+ Counter ElseCount;
+ if (!llvm::EnableSingleByteCoverage)
+ ElseCount = subtractCounters(ParentCount, ThenCount);
+ else if (S->getElse())
+ ElseCount = getRegionCounter(S->getElse());
if (const Stmt *Else = S->getElse()) {
bool ThenHasTerminateStmt = HasTerminateStmt;
@@ -1901,21 +2048,28 @@ struct CounterCoverageMappingBuilder
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), ElseCount);
extendRegion(Else);
- OutCount = addCounters(OutCount, propagateCounts(ElseCount, Else));
+
+ Counter ElseOutCount = propagateCounts(ElseCount, Else);
+ if (!llvm::EnableSingleByteCoverage)
+ OutCount = addCounters(OutCount, ElseOutCount);
if (ThenHasTerminateStmt)
HasTerminateStmt = true;
- } else
+ } else if (!llvm::EnableSingleByteCoverage)
OutCount = addCounters(OutCount, ElseCount);
+ if (llvm::EnableSingleByteCoverage)
+ OutCount = getRegionCounter(S);
+
if (OutCount != ParentCount) {
pushRegion(OutCount);
GapRegionCounter = OutCount;
}
- // Create Branch Region around condition.
- createBranchRegion(S->getCond(), ThenCount,
- subtractCounters(ParentCount, ThenCount));
+ if (!S->isConsteval() && !llvm::EnableSingleByteCoverage)
+ // Create Branch Region around condition.
+ createBranchRegion(S->getCond(), ThenCount,
+ subtractCounters(ParentCount, ThenCount));
}
void VisitCXXTryStmt(const CXXTryStmt *S) {
@@ -1941,12 +2095,16 @@ struct CounterCoverageMappingBuilder
extendRegion(E);
Counter ParentCount = getRegion().getCounter();
- Counter TrueCount = getRegionCounter(E);
-
- propagateCounts(ParentCount, E->getCond());
+ Counter TrueCount = llvm::EnableSingleByteCoverage
+ ? getRegionCounter(E->getTrueExpr())
+ : getRegionCounter(E);
Counter OutCount;
- if (!isa<BinaryConditionalOperator>(E)) {
+ if (const auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) {
+ propagateCounts(ParentCount, BCO->getCommon());
+ OutCount = TrueCount;
+ } else {
+ propagateCounts(ParentCount, E->getCond());
// The 'then' count applies to the area immediately after the condition.
auto Gap =
findGapAreaBetween(E->getQuestionLoc(), getStart(E->getTrueExpr()));
@@ -1958,9 +2116,15 @@ struct CounterCoverageMappingBuilder
}
extendRegion(E->getFalseExpr());
- OutCount = addCounters(
- OutCount, propagateCounts(subtractCounters(ParentCount, TrueCount),
- E->getFalseExpr()));
+ Counter FalseCount = llvm::EnableSingleByteCoverage
+ ? getRegionCounter(E->getFalseExpr())
+ : subtractCounters(ParentCount, TrueCount);
+
+ Counter FalseOutCount = propagateCounts(FalseCount, E->getFalseExpr());
+ if (llvm::EnableSingleByteCoverage)
+ OutCount = getRegionCounter(E);
+ else
+ OutCount = addCounters(OutCount, FalseOutCount);
if (OutCount != ParentCount) {
pushRegion(OutCount);
@@ -1968,13 +2132,93 @@ struct CounterCoverageMappingBuilder
}
// Create Branch Region around condition.
- createBranchRegion(E->getCond(), TrueCount,
- subtractCounters(ParentCount, TrueCount));
+ if (!llvm::EnableSingleByteCoverage)
+ createBranchRegion(E->getCond(), TrueCount,
+ subtractCounters(ParentCount, TrueCount));
+ }
+
+ void createOrCancelDecision(const BinaryOperator *E, unsigned Since) {
+ unsigned NumConds = MCDCBuilder.getTotalConditionsAndReset(E);
+ if (NumConds == 0)
+ return;
+
+ // Extract [ID, Conds] to construct the graph.
+ llvm::SmallVector<mcdc::ConditionIDs> CondIDs(NumConds);
+ for (const auto &SR : ArrayRef(SourceRegions).slice(Since)) {
+ if (SR.isMCDCBranch()) {
+ auto [ID, Conds] = SR.getMCDCBranchParams();
+ CondIDs[ID] = Conds;
+ }
+ }
+
+ // Construct the graph and calculate `Indices`.
+ mcdc::TVIdxBuilder Builder(CondIDs);
+ unsigned NumTVs = Builder.NumTestVectors;
+ unsigned MaxTVs = CVM.getCodeGenModule().getCodeGenOpts().MCDCMaxTVs;
+ assert(MaxTVs < mcdc::TVIdxBuilder::HardMaxTVs);
+
+ if (NumTVs > MaxTVs) {
+ // NumTVs exceeds MaxTVs -- warn and cancel the Decision.
+ cancelDecision(E, Since, NumTVs, MaxTVs);
+ return;
+ }
+
+ // Update the state for CodeGenPGO
+ assert(MCDCState.DecisionByStmt.contains(E));
+ MCDCState.DecisionByStmt[E] = {
+ MCDCState.BitmapBits, // Top
+ std::move(Builder.Indices),
+ };
+
+ auto DecisionParams = mcdc::DecisionParameters{
+ MCDCState.BitmapBits += NumTVs, // Tail
+ NumConds,
+ };
+
+ // Create MCDC Decision Region.
+ createDecisionRegion(E, DecisionParams);
+ }
+
+ // Warn and cancel the Decision.
+ void cancelDecision(const BinaryOperator *E, unsigned Since, int NumTVs,
+ int MaxTVs) {
+ auto &Diag = CVM.getCodeGenModule().getDiags();
+ unsigned DiagID =
+ Diag.getCustomDiagID(DiagnosticsEngine::Warning,
+ "unsupported MC/DC boolean expression; "
+ "number of test vectors (%0) exceeds max (%1). "
+ "Expression will not be covered");
+ Diag.Report(E->getBeginLoc(), DiagID) << NumTVs << MaxTVs;
+
+ // Restore MCDCBranch to Branch.
+ for (auto &SR : MutableArrayRef(SourceRegions).slice(Since)) {
+ assert(!SR.isMCDCDecision() && "Decision shouldn't be seen here");
+ if (SR.isMCDCBranch())
+ SR.resetMCDCParams();
+ }
+
+ // Tell CodeGenPGO not to instrument.
+ MCDCState.DecisionByStmt.erase(E);
+ }
+
+ /// Check if E belongs to system headers.
+ bool isExprInSystemHeader(const BinaryOperator *E) const {
+ return (!SystemHeadersCoverage &&
+ SM.isInSystemHeader(SM.getSpellingLoc(E->getOperatorLoc())) &&
+ SM.isInSystemHeader(SM.getSpellingLoc(E->getBeginLoc())) &&
+ SM.isInSystemHeader(SM.getSpellingLoc(E->getEndLoc())));
}
void VisitBinLAnd(const BinaryOperator *E) {
+ if (isExprInSystemHeader(E)) {
+ LeafExprSet.insert(E);
+ return;
+ }
+
bool IsRootNode = MCDCBuilder.isIdle();
+ unsigned SourceRegionsSince = SourceRegions.size();
+
// Keep track of Binary Operator and assign MCDC condition IDs.
MCDCBuilder.pushAndAssignIDs(E);
@@ -1992,11 +2236,6 @@ struct CounterCoverageMappingBuilder
// Track RHS True/False Decision.
const auto DecisionRHS = MCDCBuilder.back();
- // Create MCDC Decision Region if at top-level (root).
- unsigned NumConds = 0;
- if (IsRootNode && (NumConds = MCDCBuilder.getTotalConditionsAndReset(E)))
- createDecisionRegion(E, getRegionBitmap(E), NumConds);
-
// Extract the RHS's Execution Counter.
Counter RHSExecCnt = getRegionCounter(E);
@@ -2007,12 +2246,18 @@ struct CounterCoverageMappingBuilder
Counter ParentCnt = getRegion().getCounter();
// Create Branch Region around LHS condition.
- createBranchRegion(E->getLHS(), RHSExecCnt,
- subtractCounters(ParentCnt, RHSExecCnt), DecisionLHS);
+ if (!llvm::EnableSingleByteCoverage)
+ createBranchRegion(E->getLHS(), RHSExecCnt,
+ subtractCounters(ParentCnt, RHSExecCnt), DecisionLHS);
// Create Branch Region around RHS condition.
- createBranchRegion(E->getRHS(), RHSTrueCnt,
- subtractCounters(RHSExecCnt, RHSTrueCnt), DecisionRHS);
+ if (!llvm::EnableSingleByteCoverage)
+ createBranchRegion(E->getRHS(), RHSTrueCnt,
+ subtractCounters(RHSExecCnt, RHSTrueCnt), DecisionRHS);
+
+ // Create MCDC Decision Region if at top-level (root).
+ if (IsRootNode)
+ createOrCancelDecision(E, SourceRegionsSince);
}
// Determine whether the right side of OR operation need to be visited.
@@ -2026,8 +2271,15 @@ struct CounterCoverageMappingBuilder
}
void VisitBinLOr(const BinaryOperator *E) {
+ if (isExprInSystemHeader(E)) {
+ LeafExprSet.insert(E);
+ return;
+ }
+
bool IsRootNode = MCDCBuilder.isIdle();
+ unsigned SourceRegionsSince = SourceRegions.size();
+
// Keep track of Binary Operator and assign MCDC condition IDs.
MCDCBuilder.pushAndAssignIDs(E);
@@ -2045,11 +2297,6 @@ struct CounterCoverageMappingBuilder
// Track RHS True/False Decision.
const auto DecisionRHS = MCDCBuilder.back();
- // Create MCDC Decision Region if at top-level (root).
- unsigned NumConds = 0;
- if (IsRootNode && (NumConds = MCDCBuilder.getTotalConditionsAndReset(E)))
- createDecisionRegion(E, getRegionBitmap(E), NumConds);
-
// Extract the RHS's Execution Counter.
Counter RHSExecCnt = getRegionCounter(E);
@@ -2064,12 +2311,18 @@ struct CounterCoverageMappingBuilder
Counter ParentCnt = getRegion().getCounter();
// Create Branch Region around LHS condition.
- createBranchRegion(E->getLHS(), subtractCounters(ParentCnt, RHSExecCnt),
- RHSExecCnt, DecisionLHS);
+ if (!llvm::EnableSingleByteCoverage)
+ createBranchRegion(E->getLHS(), subtractCounters(ParentCnt, RHSExecCnt),
+ RHSExecCnt, DecisionLHS);
// Create Branch Region around RHS condition.
- createBranchRegion(E->getRHS(), subtractCounters(RHSExecCnt, RHSFalseCnt),
- RHSFalseCnt, DecisionRHS);
+ if (!llvm::EnableSingleByteCoverage)
+ createBranchRegion(E->getRHS(), subtractCounters(RHSExecCnt, RHSFalseCnt),
+ RHSFalseCnt, DecisionRHS);
+
+ // Create MCDC Decision Region if at top-level (root).
+ if (IsRootNode)
+ createOrCancelDecision(E, SourceRegionsSince);
}
void VisitLambdaExpr(const LambdaExpr *LE) {
@@ -2077,13 +2330,18 @@ struct CounterCoverageMappingBuilder
// propagate counts into them.
}
+ void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *AILE) {
+ Visit(AILE->getCommonExpr()->getSourceExpr());
+ }
+
void VisitPseudoObjectExpr(const PseudoObjectExpr *POE) {
// Just visit syntatic expression as this is what users actually write.
VisitStmt(POE->getSyntacticForm());
}
void VisitOpaqueValueExpr(const OpaqueValueExpr* OVE) {
- Visit(OVE->getSourceExpr());
+ if (OVE->isUnique())
+ Visit(OVE->getSourceExpr());
}
};
@@ -2120,9 +2378,10 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
OS << "File " << R.FileID << ", " << R.LineStart << ":" << R.ColumnStart
<< " -> " << R.LineEnd << ":" << R.ColumnEnd << " = ";
- if (R.Kind == CounterMappingRegion::MCDCDecisionRegion) {
- OS << "M:" << R.MCDCParams.BitmapIdx;
- OS << ", C:" << R.MCDCParams.NumConditions;
+ if (const auto *DecisionParams =
+ std::get_if<mcdc::DecisionParameters>(&R.MCDCParams)) {
+ OS << "M:" << DecisionParams->BitmapIdx;
+ OS << ", C:" << DecisionParams->NumConditions;
} else {
Ctx.dump(R.Count, OS);
@@ -2133,9 +2392,11 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
}
}
- if (R.Kind == CounterMappingRegion::MCDCBranchRegion) {
- OS << " [" << R.MCDCParams.ID << "," << R.MCDCParams.TrueID;
- OS << "," << R.MCDCParams.FalseID << "] ";
+ if (const auto *BranchParams =
+ std::get_if<mcdc::BranchParameters>(&R.MCDCParams)) {
+ OS << " [" << BranchParams->ID + 1 << ","
+ << BranchParams->Conds[true] + 1;
+ OS << "," << BranchParams->Conds[false] + 1 << "] ";
}
if (R.Kind == CounterMappingRegion::ExpansionRegion)
@@ -2344,9 +2605,9 @@ unsigned CoverageMappingModuleGen::getFileID(FileEntryRef File) {
void CoverageMappingGen::emitCounterMapping(const Decl *D,
llvm::raw_ostream &OS) {
- assert(CounterMap && MCDCBitmapMap);
- CounterCoverageMappingBuilder Walker(CVM, *CounterMap, *MCDCBitmapMap,
- *CondIDMap, SM, LangOpts);
+ assert(CounterMap && MCDCState);
+ CounterCoverageMappingBuilder Walker(CVM, *CounterMap, *MCDCState, SM,
+ LangOpts);
Walker.VisitDecl(D);
Walker.write(OS);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
index 62cea173c9fc..fe4b93f3af85 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
@@ -19,8 +19,13 @@
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
+namespace llvm::coverage {
+extern cl::opt<bool> SystemHeadersCoverage;
+}
+
namespace clang {
class LangOptions;
@@ -91,6 +96,10 @@ namespace CodeGen {
class CodeGenModule;
+namespace MCDC {
+struct State;
+}
+
/// Organizes the cross-function state that is used while generating
/// code coverage mapping data.
class CoverageMappingModuleGen {
@@ -150,22 +159,20 @@ class CoverageMappingGen {
SourceManager &SM;
const LangOptions &LangOpts;
llvm::DenseMap<const Stmt *, unsigned> *CounterMap;
- llvm::DenseMap<const Stmt *, unsigned> *MCDCBitmapMap;
- llvm::DenseMap<const Stmt *, unsigned> *CondIDMap;
+ MCDC::State *MCDCState;
public:
CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM,
const LangOptions &LangOpts)
: CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(nullptr),
- MCDCBitmapMap(nullptr), CondIDMap(nullptr) {}
+ MCDCState(nullptr) {}
CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM,
const LangOptions &LangOpts,
llvm::DenseMap<const Stmt *, unsigned> *CounterMap,
- llvm::DenseMap<const Stmt *, unsigned> *MCDCBitmapMap,
- llvm::DenseMap<const Stmt *, unsigned> *CondIDMap)
+ MCDC::State *MCDCState)
: CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(CounterMap),
- MCDCBitmapMap(MCDCBitmapMap), CondIDMap(CondIDMap) {}
+ MCDCState(MCDCState) {}
/// Emit the coverage mapping data which maps the regions of
/// code to counters that will be used to find the execution
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
index d173806ec8ce..0be92fb2e275 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -23,6 +23,7 @@
#include "CGVTables.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantEmitter.h"
#include "TargetInfo.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Mangle.h"
@@ -178,7 +179,7 @@ public:
return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
}
- bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
+ bool shouldTypeidBeNullChecked(QualType SrcRecordTy) override;
void EmitBadTypeidCall(CodeGenFunction &CGF) override;
llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
Address ThisPtr,
@@ -307,10 +308,6 @@ public:
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
BaseSubobject Base, const CXXRecordDecl *NearestVBase);
- llvm::Constant *
- getVTableAddressPointForConstExpr(BaseSubobject Base,
- const CXXRecordDecl *VTableClass) override;
-
llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) override;
@@ -340,9 +337,11 @@ public:
bool exportThunk() override { return true; }
llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
- const ThisAdjustment &TA) override;
+ const CXXRecordDecl *UnadjustedThisClass,
+ const ThunkInfo &TI) override;
llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
+ const CXXRecordDecl *UnadjustedRetClass,
const ReturnAdjustment &RA) override;
size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
@@ -389,6 +388,9 @@ public:
bool NeedsVTTParameter(GlobalDecl GD) override;
+ llvm::Constant *
+ getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD);
+
/**************************** RTTI Uniqueness ******************************/
protected:
@@ -427,6 +429,9 @@ public:
const CXXRecordDecl *RD) override;
private:
+ llvm::Constant *
+ getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD);
+
bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
const auto &VtableLayout =
CGM.getItaniumVTableContext().getVTableLayout(RD);
@@ -646,7 +651,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// Apply the adjustment and cast back to the original struct type
// for consistency.
- llvm::Value *This = ThisAddr.getPointer();
+ llvm::Value *This = ThisAddr.emitRawPointer(CGF);
This = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), This, Adj);
ThisPtrForCall = This;
@@ -836,7 +841,25 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CalleePtr->addIncoming(VirtualFn, FnVirtual);
CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
- CGCallee Callee(FPT, CalleePtr);
+ CGPointerAuthInfo PointerAuth;
+
+ if (const auto &Schema =
+ CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers) {
+ llvm::PHINode *DiscriminatorPHI = Builder.CreatePHI(CGF.IntPtrTy, 2);
+ DiscriminatorPHI->addIncoming(llvm::ConstantInt::get(CGF.IntPtrTy, 0),
+ FnVirtual);
+ const auto &AuthInfo =
+ CGM.getMemberFunctionPointerAuthInfo(QualType(MPT, 0));
+ assert(Schema.getKey() == AuthInfo.getKey() &&
+ "Keys for virtual and non-virtual member functions must match");
+ auto *NonVirtualDiscriminator = AuthInfo.getDiscriminator();
+ DiscriminatorPHI->addIncoming(NonVirtualDiscriminator, FnNonVirtual);
+ PointerAuth = CGPointerAuthInfo(
+ Schema.getKey(), Schema.getAuthenticationMode(), Schema.isIsaPointer(),
+ Schema.authenticatesNullValues(), DiscriminatorPHI);
+ }
+
+ CGCallee Callee(FPT, CalleePtr, PointerAuth);
return Callee;
}
@@ -850,10 +873,29 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
CGBuilderTy &Builder = CGF.Builder;
// Apply the offset, which we assume is non-null.
- return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.getPointer(), MemPtr,
+ return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.emitRawPointer(CGF), MemPtr,
"memptr.offset");
}
+// See if it's possible to return a constant signed pointer.
+static llvm::Constant *pointerAuthResignConstant(
+ llvm::Value *Ptr, const CGPointerAuthInfo &CurAuthInfo,
+ const CGPointerAuthInfo &NewAuthInfo, CodeGenModule &CGM) {
+ const auto *CPA = dyn_cast<llvm::ConstantPtrAuth>(Ptr);
+
+ if (!CPA)
+ return nullptr;
+
+ assert(CPA->getKey()->getZExtValue() == CurAuthInfo.getKey() &&
+ CPA->getAddrDiscriminator()->isZeroValue() &&
+ CPA->getDiscriminator() == CurAuthInfo.getDiscriminator() &&
+ "unexpected key or discriminators");
+
+ return CGM.getConstantSignedPointer(
+ CPA->getPointer(), NewAuthInfo.getKey(), nullptr,
+ cast<llvm::ConstantInt>(NewAuthInfo.getDiscriminator()));
+}
+
/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
/// conversion.
///
@@ -881,21 +923,63 @@ llvm::Value *
ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *src) {
+ // Use constant emission if we can.
+ if (isa<llvm::Constant>(src))
+ return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
+
assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
E->getCastKind() == CK_BaseToDerivedMemberPointer ||
E->getCastKind() == CK_ReinterpretMemberPointer);
+ CGBuilderTy &Builder = CGF.Builder;
+ QualType DstType = E->getType();
+
+ if (DstType->isMemberFunctionPointerType()) {
+ if (const auto &NewAuthInfo =
+ CGM.getMemberFunctionPointerAuthInfo(DstType)) {
+ QualType SrcType = E->getSubExpr()->getType();
+ assert(SrcType->isMemberFunctionPointerType());
+ const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(SrcType);
+ llvm::Value *MemFnPtr = Builder.CreateExtractValue(src, 0, "memptr.ptr");
+ llvm::Type *OrigTy = MemFnPtr->getType();
+
+ llvm::BasicBlock *StartBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *ResignBB = CGF.createBasicBlock("resign");
+ llvm::BasicBlock *MergeBB = CGF.createBasicBlock("merge");
+
+ // Check whether we have a virtual offset or a pointer to a function.
+ assert(UseARMMethodPtrABI && "ARM ABI expected");
+ llvm::Value *Adj = Builder.CreateExtractValue(src, 1, "memptr.adj");
+ llvm::Constant *Ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
+ llvm::Value *AndVal = Builder.CreateAnd(Adj, Ptrdiff_1);
+ llvm::Value *IsVirtualOffset =
+ Builder.CreateIsNotNull(AndVal, "is.virtual.offset");
+ Builder.CreateCondBr(IsVirtualOffset, MergeBB, ResignBB);
+
+ CGF.EmitBlock(ResignBB);
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(CGM.Int8Ty);
+ MemFnPtr = Builder.CreateIntToPtr(MemFnPtr, PtrTy);
+ MemFnPtr =
+ CGF.emitPointerAuthResign(MemFnPtr, SrcType, CurAuthInfo, NewAuthInfo,
+ isa<llvm::Constant>(src));
+ MemFnPtr = Builder.CreatePtrToInt(MemFnPtr, OrigTy);
+ llvm::Value *ResignedVal = Builder.CreateInsertValue(src, MemFnPtr, 0);
+ ResignBB = Builder.GetInsertBlock();
+
+ CGF.EmitBlock(MergeBB);
+ llvm::PHINode *NewSrc = Builder.CreatePHI(src->getType(), 2);
+ NewSrc->addIncoming(src, StartBB);
+ NewSrc->addIncoming(ResignedVal, ResignBB);
+ src = NewSrc;
+ }
+ }
+
// Under Itanium, reinterprets don't require any additional processing.
if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
- // Use constant emission if we can.
- if (isa<llvm::Constant>(src))
- return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
-
llvm::Constant *adj = getMemberPointerAdjustment(E);
if (!adj) return src;
- CGBuilderTy &Builder = CGF.Builder;
bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
const MemberPointerType *destTy =
@@ -933,6 +1017,34 @@ ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
return Builder.CreateInsertValue(src, dstAdj, 1);
}
+static llvm::Constant *
+pointerAuthResignMemberFunctionPointer(llvm::Constant *Src, QualType DestType,
+ QualType SrcType, CodeGenModule &CGM) {
+ assert(DestType->isMemberFunctionPointerType() &&
+ SrcType->isMemberFunctionPointerType() &&
+ "member function pointers expected");
+ if (DestType == SrcType)
+ return Src;
+
+ const auto &NewAuthInfo = CGM.getMemberFunctionPointerAuthInfo(DestType);
+ const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(SrcType);
+
+ if (!NewAuthInfo && !CurAuthInfo)
+ return Src;
+
+ llvm::Constant *MemFnPtr = Src->getAggregateElement(0u);
+ if (MemFnPtr->getNumOperands() == 0) {
+ // src must be a pair of null pointers.
+ assert(isa<llvm::ConstantInt>(MemFnPtr) && "constant int expected");
+ return Src;
+ }
+
+ llvm::Constant *ConstPtr = pointerAuthResignConstant(
+ cast<llvm::User>(MemFnPtr)->getOperand(0), CurAuthInfo, NewAuthInfo, CGM);
+ ConstPtr = llvm::ConstantExpr::getPtrToInt(ConstPtr, MemFnPtr->getType());
+ return ConstantFoldInsertValueInstruction(Src, ConstPtr, 0);
+}
+
llvm::Constant *
ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
llvm::Constant *src) {
@@ -940,6 +1052,12 @@ ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
E->getCastKind() == CK_BaseToDerivedMemberPointer ||
E->getCastKind() == CK_ReinterpretMemberPointer);
+ QualType DstType = E->getType();
+
+ if (DstType->isMemberFunctionPointerType())
+ src = pointerAuthResignMemberFunctionPointer(
+ src, DstType, E->getSubExpr()->getType(), CGM);
+
// Under Itanium, reinterprets don't require any additional processing.
if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
@@ -1037,9 +1155,32 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
// least significant bit of adj then makes exactly the same
// discrimination as the least significant bit of ptr does for
// Itanium.
- MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
- MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
- 2 * ThisAdjustment.getQuantity() + 1);
+
+ // We cannot use the Itanium ABI's representation for virtual member
+ // function pointers under pointer authentication because it would
+ // require us to store both the virtual offset and the constant
+ // discriminator in the pointer, which would be immediately vulnerable
+ // to attack. Instead we introduce a thunk that does the virtual dispatch
+ // and store it as if it were a non-virtual member function. This means
+ // that virtual function pointers may not compare equal anymore, but
+ // fortunately they aren't required to by the standard, and we do make
+ // a best-effort attempt to re-use the thunk.
+ //
+ // To support interoperation with code in which pointer authentication
+ // is disabled, derefencing a member function pointer must still handle
+ // the virtual case, but it can use a discriminator which should never
+ // be valid.
+ const auto &Schema =
+ CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers;
+ if (Schema)
+ MemPtr[0] = llvm::ConstantExpr::getPtrToInt(
+ getSignedVirtualMemberFunctionPointer(MD), CGM.PtrDiffTy);
+ else
+ MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
+ // Don't set the LSB of adj to 1 if pointer authentication for member
+ // function pointers is enabled.
+ MemPtr[1] = llvm::ConstantInt::get(
+ CGM.PtrDiffTy, 2 * ThisAdjustment.getQuantity() + !Schema);
} else {
// Itanium C++ ABI 2.3:
// For a virtual function, [the pointer field] is 1 plus the
@@ -1061,7 +1202,7 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
// function type is incomplete.
Ty = CGM.PtrDiffTy;
}
- llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
+ llvm::Constant *addr = CGM.getMemberFunctionPointer(MD, Ty);
MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
@@ -1081,8 +1222,12 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
- return BuildMemberPointer(MD, ThisAdjustment);
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) {
+ llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment);
+ QualType SrcType = getContext().getMemberPointerType(
+ MD->getType(), MD->getParent()->getTypeForDecl());
+ return pointerAuthResignMemberFunctionPointer(Src, MPType, SrcType, CGM);
+ }
CharUnits FieldOffset =
getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
@@ -1245,7 +1390,7 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
CGF.getPointerAlign());
// Apply the offset.
- llvm::Value *CompletePtr = Ptr.getPointer();
+ llvm::Value *CompletePtr = Ptr.emitRawPointer(CGF);
CompletePtr =
CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
@@ -1322,8 +1467,16 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
if (!Record->hasTrivialDestructor()) {
+ // __cxa_throw is declared to take its destructor as void (*)(void *). We
+ // must match that if function pointers can be authenticated with a
+ // discriminator based on their type.
+ const ASTContext &Ctx = getContext();
+ QualType DtorTy = Ctx.getFunctionType(Ctx.VoidTy, {Ctx.VoidPtrTy},
+ FunctionProtoType::ExtProtoInfo());
+
CXXDestructorDecl *DtorD = Record->getDestructor();
Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
+ Dtor = CGM.getFunctionPointer(Dtor, DtorTy);
}
}
if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
@@ -1347,9 +1500,10 @@ static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
- // Mark the function as nounwind readonly.
+ // Mark the function as nounwind willreturn readonly.
llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+ FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
llvm::AttributeList Attrs = llvm::AttributeList::get(
CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
@@ -1422,9 +1576,8 @@ static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
}
-bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
- QualType SrcRecordTy) {
- return IsDeref;
+bool ItaniumCXXABI::shouldTypeidBeNullChecked(QualType SrcRecordTy) {
+ return true;
}
void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
@@ -1481,9 +1634,22 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
// Emit the call to __dynamic_cast.
- llvm::Value *Args[] = {ThisAddr.getPointer(), SrcRTTI, DestRTTI, OffsetHint};
- llvm::Value *Value =
- CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), Args);
+ llvm::Value *Value = ThisAddr.emitRawPointer(CGF);
+ if (CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) {
+ // We perform a no-op load of the vtable pointer here to force an
+ // authentication. In environments that do not support pointer
+ // authentication this is a an actual no-op that will be elided. When
+ // pointer authentication is supported and enforced on vtable pointers this
+ // load can trap.
+ llvm::Value *Vtable =
+ CGF.GetVTablePtr(ThisAddr, CGM.Int8PtrTy, SrcDecl,
+ CodeGenFunction::VTableAuthMode::MustTrap);
+ assert(Vtable);
+ (void)Vtable;
+ }
+
+ llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
+ Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
/// C++ [expr.dynamic.cast]p9:
/// A failed cast to reference type throws std::bad_cast
@@ -1570,7 +1736,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy));
llvm::Value *Success = CGF.Builder.CreateICmpEQ(
VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl));
- llvm::Value *Result = ThisAddr.getPointer();
+ llvm::Value *Result = ThisAddr.emitRawPointer(CGF);
if (!Offset->isZero())
Result = CGF.Builder.CreateInBoundsGEP(
CGF.CharTy, Result,
@@ -1610,7 +1776,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
}
// Finally, add the offset to the pointer.
- return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.getPointer(),
+ return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.emitRawPointer(CGF),
OffsetToTop);
}
@@ -1791,8 +1957,39 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
else
Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
- CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
- nullptr);
+ CGF.EmitCXXDestructorCall(GD, Callee, CGF.getAsNaturalPointerTo(This, ThisTy),
+ ThisTy, VTT, VTTTy, nullptr);
+}
+
+// Check if any non-inline method has the specified attribute.
+template <typename T>
+static bool CXXRecordNonInlineHasAttr(const CXXRecordDecl *RD) {
+ for (const auto *D : RD->noload_decls()) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isInlined() || FD->doesThisDeclarationHaveABody() ||
+ FD->isPureVirtual())
+ continue;
+ if (D->hasAttr<T>())
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void setVTableSelectiveDLLImportExport(CodeGenModule &CGM,
+ llvm::GlobalVariable *VTable,
+ const CXXRecordDecl *RD) {
+ if (VTable->getDLLStorageClass() !=
+ llvm::GlobalVariable::DefaultStorageClass ||
+ RD->hasAttr<DLLImportAttr>() || RD->hasAttr<DLLExportAttr>())
+ return;
+
+ if (CGM.getVTables().isVTableExternal(RD)) {
+ if (CXXRecordNonInlineHasAttr<DLLImportAttr>(RD))
+ VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ } else if (CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
+ VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
}
void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -1820,6 +2017,9 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
+ if (CGM.getTarget().hasPS4DLLImportExport())
+ setVTableSelectiveDLLImportExport(CGM, VTable, RD);
+
// Set the right visibility.
CGM.setGVProperties(VTable, RD);
@@ -1884,42 +2084,27 @@ ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
// Find the appropriate vtable within the vtable group, and the address point
// within that vtable.
+ const VTableLayout &Layout =
+ CGM.getItaniumVTableContext().getVTableLayout(VTableClass);
VTableLayout::AddressPointLocation AddressPoint =
- CGM.getItaniumVTableContext()
- .getVTableLayout(VTableClass)
- .getAddressPoint(Base);
+ Layout.getAddressPoint(Base);
llvm::Value *Indices[] = {
llvm::ConstantInt::get(CGM.Int32Ty, 0),
llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
};
- return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
- Indices, /*InBounds=*/true,
- /*InRangeIndex=*/1);
-}
-
-// Check whether all the non-inline virtual methods for the class have the
-// specified attribute.
-template <typename T>
-static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
- bool FoundNonInlineVirtualMethodWithAttr = false;
- for (const auto *D : RD->noload_decls()) {
- if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
- if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
- FD->doesThisDeclarationHaveABody())
- continue;
- if (!D->hasAttr<T>())
- return false;
- FoundNonInlineVirtualMethodWithAttr = true;
- }
- }
-
- // We didn't find any non-inline virtual methods missing the attribute. We
- // will return true when we found at least one non-inline virtual with the
- // attribute. (This lets our caller know that the attribute needs to be
- // propagated up to the vtable.)
- return FoundNonInlineVirtualMethodWithAttr;
+ // Add inrange attribute to indicate that only the VTableIndex can be
+ // accessed.
+ unsigned ComponentSize =
+ CGM.getDataLayout().getTypeAllocSize(CGM.getVTableComponentType());
+ unsigned VTableSize =
+ ComponentSize * Layout.getVTableSize(AddressPoint.VTableIndex);
+ unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
+ llvm::ConstantRange InRange(llvm::APInt(32, -Offset, true),
+ llvm::APInt(32, VTableSize - Offset, true));
+ return llvm::ConstantExpr::getGetElementPtr(
+ VTable->getValueType(), VTable, Indices, /*InBounds=*/true, InRange);
}
llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
@@ -1939,13 +2124,18 @@ llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
VirtualPointerIndex);
// And load the address point from the VTT.
- return CGF.Builder.CreateAlignedLoad(CGF.GlobalsVoidPtrTy, VTT,
- CGF.getPointerAlign());
-}
+ llvm::Value *AP =
+ CGF.Builder.CreateAlignedLoad(CGF.GlobalsVoidPtrTy, VTT,
+ CGF.getPointerAlign());
+
+ if (auto &Schema = CGF.CGM.getCodeGenOpts().PointerAuth.CXXVTTVTablePointers) {
+ CGPointerAuthInfo PointerAuth = CGF.EmitPointerAuthInfo(Schema, VTT,
+ GlobalDecl(),
+ QualType());
+ AP = CGF.EmitPointerAuthAuth(PointerAuth, AP);
+ }
-llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
- BaseSubobject Base, const CXXRecordDecl *VTableClass) {
- return getVTableAddressPoint(Base, VTableClass);
+ return AP;
}
llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
@@ -1980,26 +2170,10 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
getContext().toCharUnitsFromBits(PAlign).getAsAlign());
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- // In MS C++ if you have a class with virtual functions in which you are using
- // selective member import/export, then all virtual functions must be exported
- // unless they are inline, otherwise a link error will result. To match this
- // behavior, for such classes, we dllimport the vtable if it is defined
- // externally and all the non-inline virtual methods are marked dllimport, and
- // we dllexport the vtable if it is defined in this TU and all the non-inline
- // virtual methods are marked dllexport.
- if (CGM.getTarget().hasPS4DLLImportExport()) {
- if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
- if (CGM.getVTables().isVTableExternal(RD)) {
- if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
- VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
- } else {
- if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
- VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
- }
- }
- }
- CGM.setGVProperties(VTable, RD);
+ if (CGM.getTarget().hasPS4DLLImportExport())
+ setVTableSelectiveDLLImportExport(CGM, VTable, RD);
+ CGM.setGVProperties(VTable, RD);
return VTable;
}
@@ -2013,8 +2187,9 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *VTable = CGF.GetVTablePtr(This, PtrTy, MethodDecl->getParent());
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
- llvm::Value *VFunc;
- if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
+ llvm::Value *VFunc, *VTableSlotPtr = nullptr;
+ auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers;
+ if (!Schema && CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
VFunc = CGF.EmitVTableTypeCheckedLoad(
MethodDecl->getParent(), VTable, PtrTy,
VTableIndex *
@@ -2029,7 +2204,7 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
{VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
} else {
- llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
+ VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
PtrTy, VTable, VTableIndex, "vfn");
VFuncLoad = CGF.Builder.CreateAlignedLoad(PtrTy, VTableSlotPtr,
CGF.getPointerAlign());
@@ -2053,7 +2228,13 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
VFunc = VFuncLoad;
}
- CGCallee Callee(GD, VFunc);
+ CGPointerAuthInfo PointerAuth;
+ if (Schema) {
+ assert(VTableSlotPtr && "virtual function pointer not set");
+ GD = CGM.getItaniumVTableContext().findOriginalMethod(GD.getCanonicalDecl());
+ PointerAuth = CGF.EmitPointerAuthInfo(Schema, VTableSlotPtr, GD, QualType());
+ }
+ CGCallee Callee(GD, VFunc, PointerAuth);
return Callee;
}
@@ -2079,8 +2260,8 @@ llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
ThisTy = D->getDestroyedType();
}
- CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
- QualType(), nullptr);
+ CGF.EmitCXXDestructorCall(GD, Callee, This.emitRawPointer(CGF), ThisTy,
+ nullptr, QualType(), nullptr);
return nullptr;
}
@@ -2134,6 +2315,9 @@ bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
if (!canSpeculativelyEmitVTableAsBaseClass(RD))
return false;
+ if (RD->shouldEmitInExternalSource())
+ return false;
+
// For a complete-object vtable (or more specifically, for the VTT), we need
// to be able to speculatively emit the vtables of all dynamic virtual bases.
for (const auto &B : RD->vbases()) {
@@ -2149,11 +2333,12 @@ bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
}
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
Address InitialPtr,
+ const CXXRecordDecl *UnadjustedClass,
int64_t NonVirtualAdjustment,
int64_t VirtualAdjustment,
bool IsReturnAdjustment) {
if (!NonVirtualAdjustment && !VirtualAdjustment)
- return InitialPtr.getPointer();
+ return InitialPtr.emitRawPointer(CGF);
Address V = InitialPtr.withElementType(CGF.Int8Ty);
@@ -2166,8 +2351,8 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
// Perform the virtual adjustment if we have one.
llvm::Value *ResultPtr;
if (VirtualAdjustment) {
- Address VTablePtrPtr = V.withElementType(CGF.Int8PtrTy);
- llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
+ llvm::Value *VTablePtr =
+ CGF.GetVTablePtr(V, CGF.Int8PtrTy, UnadjustedClass);
llvm::Value *Offset;
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
@@ -2186,10 +2371,10 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
CGF.getPointerAlign());
}
// Adjust our pointer.
- ResultPtr = CGF.Builder.CreateInBoundsGEP(
- V.getElementType(), V.getPointer(), Offset);
+ ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getElementType(),
+ V.emitRawPointer(CGF), Offset);
} else {
- ResultPtr = V.getPointer();
+ ResultPtr = V.emitRawPointer(CGF);
}
// In a derived-to-base conversion, the non-virtual adjustment is
@@ -2202,18 +2387,20 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
return ResultPtr;
}
-llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
- Address This,
- const ThisAdjustment &TA) {
- return performTypeAdjustment(CGF, This, TA.NonVirtual,
- TA.Virtual.Itanium.VCallOffsetOffset,
+llvm::Value *
+ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, Address This,
+ const CXXRecordDecl *UnadjustedClass,
+ const ThunkInfo &TI) {
+ return performTypeAdjustment(CGF, This, UnadjustedClass, TI.This.NonVirtual,
+ TI.This.Virtual.Itanium.VCallOffsetOffset,
/*IsReturnAdjustment=*/false);
}
llvm::Value *
ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
+ const CXXRecordDecl *UnadjustedClass,
const ReturnAdjustment &RA) {
- return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
+ return performTypeAdjustment(CGF, Ret, UnadjustedClass, RA.NonVirtual,
RA.Virtual.Itanium.VBaseOffsetOffset,
/*IsReturnAdjustment=*/true);
}
@@ -2275,7 +2462,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
- CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
+ CGF.Builder.CreateCall(F, NumElementsPtr.emitRawPointer(CGF));
}
// Finally, compute a pointer to the actual data buffer by skipping
@@ -2306,7 +2493,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
- return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
+ return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF));
}
CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
@@ -2618,7 +2805,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// Call __cxa_guard_release. This cannot throw.
CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
- guardAddr.getPointer());
+ guardAddr.emitRawPointer(CGF));
} else if (D.isLocalVarDecl()) {
// For local variables, store 1 into the first byte of the guard variable
// after the object initialization completes so that initialization is
@@ -2669,6 +2856,14 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
fn->setDoesNotThrow();
+ const auto &Context = CGF.CGM.getContext();
+ FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/false));
+ QualType fnType =
+ Context.getFunctionType(Context.VoidTy, {Context.VoidPtrTy}, EPI);
+ llvm::Constant *dtorCallee = cast<llvm::Constant>(dtor.getCallee());
+ dtorCallee = CGF.CGM.getFunctionPointer(dtorCallee, fnType);
+
if (!addr)
// addr is null when we are trying to register a dtor annotated with
// __attribute__((destructor)) in a constructor function. Using null here is
@@ -2676,7 +2871,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
// function.
addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
- llvm::Value *args[] = {dtor.getCallee(), addr, handle};
+ llvm::Value *args[] = {dtorCallee, addr, handle};
CGF.EmitNounwindRuntimeCall(atexit, args);
}
@@ -3111,10 +3306,10 @@ LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
LValue LV;
if (VD->getType()->isReferenceType())
- LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
+ LV = CGF.MakeNaturalAlignRawAddrLValue(CallVal, LValType);
else
- LV = CGF.MakeAddrLValue(CallVal, LValType,
- CGF.getContext().getDeclAlign(VD));
+ LV = CGF.MakeRawAddrLValue(CallVal, LValType,
+ CGF.getContext().getDeclAlign(VD));
// FIXME: need setObjCGCLValueClass?
return LV;
}
@@ -3139,6 +3334,78 @@ bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
return false;
}
+llvm::Constant *
+ItaniumCXXABI::getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD) {
+ SmallString<256> MethodName;
+ llvm::raw_svector_ostream Out(MethodName);
+ getMangleContext().mangleCXXName(MD, Out);
+ MethodName += "_vfpthunk_";
+ StringRef ThunkName = MethodName.str();
+ llvm::Function *ThunkFn;
+ if ((ThunkFn = cast_or_null<llvm::Function>(
+ CGM.getModule().getNamedValue(ThunkName))))
+ return ThunkFn;
+
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeCXXMethodDeclaration(MD);
+ llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
+ llvm::GlobalValue::LinkageTypes Linkage =
+ MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage
+ : llvm::GlobalValue::InternalLinkage;
+ ThunkFn =
+ llvm::Function::Create(ThunkTy, Linkage, ThunkName, &CGM.getModule());
+ if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
+ ThunkFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ assert(ThunkFn->getName() == ThunkName && "name was uniqued!");
+
+ CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn, /*IsThunk=*/true);
+ CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn);
+
+ // Stack protection sometimes gets inserted after the musttail call.
+ ThunkFn->removeFnAttr(llvm::Attribute::StackProtect);
+ ThunkFn->removeFnAttr(llvm::Attribute::StackProtectStrong);
+ ThunkFn->removeFnAttr(llvm::Attribute::StackProtectReq);
+
+ // Start codegen.
+ CodeGenFunction CGF(CGM);
+ CGF.CurGD = GlobalDecl(MD);
+ CGF.CurFuncIsThunk = true;
+
+ // Build FunctionArgs.
+ FunctionArgList FunctionArgs;
+ CGF.BuildFunctionArgList(CGF.CurGD, FunctionArgs);
+
+ CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
+ FunctionArgs, MD->getLocation(), SourceLocation());
+ llvm::Value *ThisVal = loadIncomingCXXThis(CGF);
+ setCXXABIThisValue(CGF, ThisVal);
+
+ CallArgList CallArgs;
+ for (const VarDecl *VD : FunctionArgs)
+ CGF.EmitDelegateCallArg(CallArgs, VD, SourceLocation());
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, /*this*/ 1);
+ const CGFunctionInfo &CallInfo =
+ CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, Required, 0);
+ CGCallee Callee = CGCallee::forVirtual(nullptr, GlobalDecl(MD),
+ getThisAddress(CGF), ThunkTy);
+ llvm::CallBase *CallOrInvoke;
+ CGF.EmitCall(CallInfo, Callee, ReturnValueSlot(), CallArgs, &CallOrInvoke,
+ /*IsMustTail=*/true, SourceLocation(), true);
+ auto *Call = cast<llvm::CallInst>(CallOrInvoke);
+ Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
+ if (Call->getType()->isVoidTy())
+ CGF.Builder.CreateRetVoid();
+ else
+ CGF.Builder.CreateRet(Call);
+
+ // Finish the function to maintain CodeGenFunction invariants.
+ // FIXME: Don't emit unreachable code.
+ CGF.EmitBlock(CGF.createBasicBlock());
+ CGF.FinishFunction();
+ return ThunkFn;
+}
+
namespace {
class ItaniumRTTIBuilder {
CodeGenModule &CGM; // Per-module state.
@@ -3284,7 +3551,7 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
// Import the typeinfo symbol when all non-inline virtual methods are
// imported.
if (CGM.getTarget().hasPS4DLLImportExport()) {
- if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
+ if (RD && CXXRecordNonInlineHasAttr<DLLImportAttr>(RD)) {
GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
CGM.setDSOLocal(GV);
}
@@ -3364,6 +3631,8 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
#include "clang/Basic/RISCVVTypes.def"
#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
@@ -3583,6 +3852,9 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
case Type::Pipe:
llvm_unreachable("Pipe types shouldn't get here");
+ case Type::ArrayParameter:
+ llvm_unreachable("Array Parameter types should not get here.");
+
case Type::Builtin:
case Type::BitInt:
// GCC treats vector and complex types as fundamental types.
@@ -3691,6 +3963,10 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
VTable, Two);
}
+ if (auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXTypeInfoVTablePointer)
+ VTable = CGM.getConstantSignedPointer(VTable, Schema, nullptr, GlobalDecl(),
+ QualType(Ty, 0));
+
Fields.push_back(VTable);
}
@@ -3867,6 +4143,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
+ case Type::ArrayParameter:
// Itanium C++ ABI 2.9.5p5:
// abi::__array_type_info adds no data members to std::type_info.
break;
@@ -3933,13 +4210,13 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
// Export the typeinfo in the same circumstances as the vtable is exported.
auto GVDLLStorageClass = DLLStorageClass;
- if (CGM.getTarget().hasPS4DLLImportExport()) {
+ if (CGM.getTarget().hasPS4DLLImportExport() &&
+ GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
if (RD->hasAttr<DLLExportAttr>() ||
- CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
+ CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
- }
}
}
@@ -3979,9 +4256,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
CGM.setDSOLocal(GV);
TypeName->setDLLStorageClass(DLLStorageClass);
- GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
- ? GVDLLStorageClass
- : DLLStorageClass);
+ GV->setDLLStorageClass(GVDLLStorageClass);
TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
@@ -4595,7 +4870,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.Builder.CreateStore(Casted, ExnPtrTmp);
// Bind the reference to the temporary.
- AdjustedExn = ExnPtrTmp.getPointer();
+ AdjustedExn = ExnPtrTmp.emitRawPointer(CGF);
}
}
@@ -4821,6 +5096,18 @@ ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
}
+llvm::Constant *
+ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) {
+ const CXXMethodDecl *origMD =
+ cast<CXXMethodDecl>(CGM.getItaniumVTableContext()
+ .findOriginalMethod(MD->getCanonicalDecl())
+ .getDecl());
+ llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(origMD);
+ QualType funcType = CGM.getContext().getMemberPointerType(
+ MD->getType(), MD->getParent()->getTypeForDecl());
+ return CGM.getMemberFunctionPointer(thunk, funcType);
+}
+
void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
const CXXCatchStmt *C) {
if (CGF.getTarget().hasFeature("exception-handling"))
@@ -4869,7 +5156,8 @@ void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
}
// Create __dtor function for the var decl.
- llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
+ llvm::Function *DtorStub =
+ cast<llvm::Function>(CGF.createAtExitStub(D, Dtor, Addr));
// Register above __dtor with atexit().
CGF.registerGlobalDtorWithAtExit(DtorStub);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.cpp b/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.cpp
index 6ce2b94c1db8..44b2df52f001 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.cpp
@@ -14,16 +14,20 @@
#include "LinkInModulesPass.h"
#include "BackendConsumer.h"
+#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+
using namespace llvm;
-LinkInModulesPass::LinkInModulesPass(clang::BackendConsumer *BC,
- bool ShouldLinkFiles)
- : BC(BC), ShouldLinkFiles(ShouldLinkFiles) {}
+LinkInModulesPass::LinkInModulesPass(clang::BackendConsumer *BC) : BC(BC) {}
PreservedAnalyses LinkInModulesPass::run(Module &M, ModuleAnalysisManager &AM) {
+ if (!BC)
+ return PreservedAnalyses::all();
- if (BC && BC->LinkInModules(&M, ShouldLinkFiles))
- report_fatal_error("Bitcode module linking failed, compilation aborted!");
+ if (BC->LinkInModules(&M))
+ report_fatal_error("Bitcode module postopt linking failed, aborted!");
- return PreservedAnalyses::all();
+ return PreservedAnalyses::none();
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.h b/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.h
index 7fe94d625058..3edbfd076e15 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.h
@@ -28,10 +28,9 @@ class Pass;
/// for use with the legacy pass manager.
class LinkInModulesPass : public PassInfoMixin<LinkInModulesPass> {
clang::BackendConsumer *BC;
- bool ShouldLinkFiles;
public:
- LinkInModulesPass(clang::BackendConsumer *BC, bool ShouldLinkFiles = true);
+ LinkInModulesPass(clang::BackendConsumer *BC);
PreservedAnalyses run(Module &M, AnalysisManager<Module> &);
static bool isRequired() { return true; }
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MCDCState.h b/contrib/llvm-project/clang/lib/CodeGen/MCDCState.h
new file mode 100644
index 000000000000..e0dd28ff90ed
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/MCDCState.h
@@ -0,0 +1,49 @@
+//===---- MCDCState.h - Per-Function MC/DC state ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Per-Function MC/DC state for PGO
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_MCDCSTATE_H
+#define LLVM_CLANG_LIB_CODEGEN_MCDCSTATE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ProfileData/Coverage/MCDCTypes.h"
+
+namespace clang {
+class Stmt;
+} // namespace clang
+
+namespace clang::CodeGen::MCDC {
+
+using namespace llvm::coverage::mcdc;
+
+/// Per-Function MC/DC state
+struct State {
+ unsigned BitmapBits = 0;
+
+ struct Decision {
+ unsigned BitmapIdx;
+ llvm::SmallVector<std::array<int, 2>> Indices;
+ };
+
+ llvm::DenseMap<const Stmt *, Decision> DecisionByStmt;
+
+ struct Branch {
+ ConditionID ID;
+ const Stmt *DecisionStmt;
+ };
+
+ llvm::DenseMap<const Stmt *, Branch> BranchByStmt;
+};
+
+} // namespace clang::CodeGen::MCDC
+
+#endif // LLVM_CLANG_LIB_CODEGEN_MCDCSTATE_H
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp
index 8589869f6e2f..c5d1e3ad5a20 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp
@@ -168,8 +168,8 @@ void MacroPPCallbacks::FileChanged(SourceLocation Loc, FileChangeReason Reason,
void MacroPPCallbacks::InclusionDirective(
SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
bool IsAngled, CharSourceRange FilenameRange, OptionalFileEntryRef File,
- StringRef SearchPath, StringRef RelativePath, const Module *Imported,
- SrcMgr::CharacteristicKind FileType) {
+ StringRef SearchPath, StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported, SrcMgr::CharacteristicKind FileType) {
// Record the line location of the current included file.
LastHashLoc = HashLoc;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
index 5af177d0c3fa..5f468648da04 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
@@ -102,7 +102,8 @@ public:
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange,
OptionalFileEntryRef File, StringRef SearchPath,
- StringRef RelativePath, const Module *Imported,
+ StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported,
SrcMgr::CharacteristicKind FileType) override;
/// Hook called whenever a macro definition is seen.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 172c4c937b97..76d0191a7e63 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -144,7 +144,7 @@ public:
return CatchTypeInfo{nullptr, 0x40};
}
- bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
+ bool shouldTypeidBeNullChecked(QualType SrcRecordTy) override;
void EmitBadTypeidCall(CodeGenFunction &CGF) override;
llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
Address ThisPtr,
@@ -327,10 +327,6 @@ public:
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
- llvm::Constant *
- getVTableAddressPointForConstExpr(BaseSubobject Base,
- const CXXRecordDecl *VTableClass) override;
-
llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) override;
@@ -419,9 +415,11 @@ public:
bool exportThunk() override { return false; }
llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
- const ThisAdjustment &TA) override;
+ const CXXRecordDecl * /*UnadjustedClass*/,
+ const ThunkInfo &TI) override;
llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
+ const CXXRecordDecl * /*UnadjustedClass*/,
const ReturnAdjustment &RA) override;
void EmitThreadLocalInitFuncs(
@@ -937,7 +935,7 @@ void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
}
CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
- CPI->setArgOperand(2, var.getObjectAddress(CGF).getPointer());
+ CPI->setArgOperand(2, var.getObjectAddress(CGF).emitRawPointer(CGF));
CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
CGF.EmitAutoVarCleanups(var);
}
@@ -974,18 +972,16 @@ MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
llvm::Value *Offset =
GetVirtualBaseClassOffset(CGF, Value, SrcDecl, PolymorphicBase);
llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
- Value.getElementType(), Value.getPointer(), Offset);
+ Value.getElementType(), Value.emitRawPointer(CGF), Offset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Value.getAlignment(), SrcDecl, PolymorphicBase);
return std::make_tuple(Address(Ptr, CGF.Int8Ty, VBaseAlign), Offset,
PolymorphicBase);
}
-bool MicrosoftCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
- QualType SrcRecordTy) {
+bool MicrosoftCXXABI::shouldTypeidBeNullChecked(QualType SrcRecordTy) {
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
- return IsDeref &&
- !getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr();
+ return !getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr();
}
static llvm::CallBase *emitRTtypeidCall(CodeGenFunction &CGF,
@@ -1011,7 +1007,7 @@ llvm::Value *MicrosoftCXXABI::EmitTypeid(CodeGenFunction &CGF,
llvm::Type *StdTypeInfoPtrTy) {
std::tie(ThisPtr, std::ignore, std::ignore) =
performBaseAdjustment(CGF, ThisPtr, SrcRecordTy);
- llvm::CallBase *Typeid = emitRTtypeidCall(CGF, ThisPtr.getPointer());
+ llvm::CallBase *Typeid = emitRTtypeidCall(CGF, ThisPtr.emitRawPointer(CGF));
return CGF.Builder.CreateBitCast(Typeid, StdTypeInfoPtrTy);
}
@@ -1033,7 +1029,7 @@ llvm::Value *MicrosoftCXXABI::emitDynamicCastCall(
llvm::Value *Offset;
std::tie(This, Offset, std::ignore) =
performBaseAdjustment(CGF, This, SrcRecordTy);
- llvm::Value *ThisPtr = This.getPointer();
+ llvm::Value *ThisPtr = This.emitRawPointer(CGF);
Offset = CGF.Builder.CreateTrunc(Offset, CGF.Int32Ty);
// PVOID __RTDynamicCast(
@@ -1065,7 +1061,7 @@ llvm::Value *MicrosoftCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
llvm::FunctionCallee Function = CGF.CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
"__RTCastToVoid");
- llvm::Value *Args[] = {Value.getPointer()};
+ llvm::Value *Args[] = {Value.emitRawPointer(CGF)};
return CGF.EmitRuntimeCall(Function, Args);
}
@@ -1115,7 +1111,7 @@ static bool isTrivialForMSVC(const CXXRecordDecl *RD, QualType Ty,
const Type *Base = nullptr;
uint64_t NumElts = 0;
if (CGM.getTarget().getTriple().isAArch64() &&
- CGM.getTypes().getABIInfo().isHomogeneousAggregate(Ty, Base, NumElts) &&
+ CGM.getABIInfo().isHomogeneousAggregate(Ty, Base, NumElts) &&
isa<VectorType>(Base)) {
return true;
}
@@ -1126,7 +1122,22 @@ static bool isTrivialForMSVC(const CXXRecordDecl *RD, QualType Ty,
// No base classes
// No virtual functions
// Additionally, we need to ensure that there is a trivial copy assignment
- // operator, a trivial destructor and no user-provided constructors.
+ // operator, a trivial destructor, no user-provided constructors and no
+ // deleted copy assignment operator.
+
+ // We need to cover two cases when checking for a deleted copy assignment
+ // operator.
+ //
+ // struct S { int& r; };
+ // The above will have an implicit copy assignment operator that is deleted
+ // and there will not be a `CXXMethodDecl` for the copy assignment operator.
+ // This is handled by the `needsImplicitCopyAssignment()` check below.
+ //
+ // struct S { S& operator=(const S&) = delete; int i; };
+ // The above will not have an implicit copy assignment operator that is
+ // deleted but there is a deleted `CXXMethodDecl` for the declared copy
+ // assignment operator. This is handled by the `isDeleted()` check below.
+
if (RD->hasProtectedFields() || RD->hasPrivateFields())
return false;
if (RD->getNumBases() > 0)
@@ -1135,9 +1146,20 @@ static bool isTrivialForMSVC(const CXXRecordDecl *RD, QualType Ty,
return false;
if (RD->hasNonTrivialCopyAssignment())
return false;
- for (const CXXConstructorDecl *Ctor : RD->ctors())
- if (Ctor->isUserProvided())
- return false;
+ if (RD->needsImplicitCopyAssignment() && !RD->hasSimpleCopyAssignment())
+ return false;
+ for (const Decl *D : RD->decls()) {
+ if (auto *Ctor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (Ctor->isUserProvided())
+ return false;
+ } else if (auto *Template = dyn_cast<FunctionTemplateDecl>(D)) {
+ if (isa<CXXConstructorDecl>(Template->getTemplatedDecl()))
+ return false;
+ } else if (auto *MethodDecl = dyn_cast<CXXMethodDecl>(D)) {
+ if (MethodDecl->isCopyAssignmentOperator() && MethodDecl->isDeleted())
+ return false;
+ }
+ }
if (RD->hasNonTrivialDestructor())
return false;
return true;
@@ -1493,7 +1515,7 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
llvm::Value *VBaseOffset =
GetVirtualBaseClassOffset(CGF, Result, Derived, VBase);
llvm::Value *VBasePtr = CGF.Builder.CreateInBoundsGEP(
- Result.getElementType(), Result.getPointer(), VBaseOffset);
+ Result.getElementType(), Result.emitRawPointer(CGF), VBaseOffset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Result.getAlignment(), Derived, VBase);
Result = Address(VBasePtr, CGF.Int8Ty, VBaseAlign);
@@ -1660,7 +1682,8 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
llvm::Value *Implicit =
getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase,
Delegating); // = nullptr
- CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
+ CGF.EmitCXXDestructorCall(GD, Callee, CGF.getAsNaturalPointerTo(This, ThisTy),
+ ThisTy,
/*ImplicitParam=*/Implicit,
/*ImplicitParamTy=*/QualType(), nullptr);
if (BaseDtorEndBB) {
@@ -1791,13 +1814,6 @@ MicrosoftCXXABI::getVTableAddressPoint(BaseSubobject Base,
return VFTablesMap[ID];
}
-llvm::Constant *MicrosoftCXXABI::getVTableAddressPointForConstExpr(
- BaseSubobject Base, const CXXRecordDecl *VTableClass) {
- llvm::Constant *VFTable = getVTableAddressPoint(Base, VTableClass);
- assert(VFTable && "Couldn't find a vftable for the given base?");
- return VFTable;
-}
-
llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) {
// getAddrOfVTable may return 0 if asked to get an address of a vtable which
@@ -2013,8 +2029,9 @@ llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
}
This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
- RValue RV = CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
- ImplicitParam, Context.IntTy, CE);
+ RValue RV =
+ CGF.EmitCXXDestructorCall(GD, Callee, This.emitRawPointer(CGF), ThisTy,
+ ImplicitParam, Context.IntTy, CE);
return RV.getScalarVal();
}
@@ -2208,17 +2225,18 @@ void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT,
GV->setLinkage(llvm::GlobalVariable::AvailableExternallyLinkage);
}
-llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
- Address This,
- const ThisAdjustment &TA) {
+llvm::Value *MicrosoftCXXABI::performThisAdjustment(
+ CodeGenFunction &CGF, Address This,
+ const CXXRecordDecl * /*UnadjustedClass*/, const ThunkInfo &TI) {
+ const ThisAdjustment &TA = TI.This;
if (TA.isEmpty())
- return This.getPointer();
+ return This.emitRawPointer(CGF);
This = This.withElementType(CGF.Int8Ty);
llvm::Value *V;
if (TA.Virtual.isEmpty()) {
- V = This.getPointer();
+ V = This.emitRawPointer(CGF);
} else {
assert(TA.Virtual.Microsoft.VtordispOffset < 0);
// Adjust the this argument based on the vtordisp value.
@@ -2227,7 +2245,7 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
CharUnits::fromQuantity(TA.Virtual.Microsoft.VtordispOffset));
VtorDispPtr = VtorDispPtr.withElementType(CGF.Int32Ty);
llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp");
- V = CGF.Builder.CreateGEP(This.getElementType(), This.getPointer(),
+ V = CGF.Builder.CreateGEP(This.getElementType(), This.emitRawPointer(CGF),
CGF.Builder.CreateNeg(VtorDisp));
// Unfortunately, having applied the vtordisp means that we no
@@ -2260,15 +2278,16 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
return V;
}
-llvm::Value *
-MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
- const ReturnAdjustment &RA) {
+llvm::Value *MicrosoftCXXABI::performReturnAdjustment(
+ CodeGenFunction &CGF, Address Ret,
+ const CXXRecordDecl * /*UnadjustedClass*/, const ReturnAdjustment &RA) {
+
if (RA.isEmpty())
- return Ret.getPointer();
+ return Ret.emitRawPointer(CGF);
Ret = Ret.withElementType(CGF.Int8Ty);
- llvm::Value *V = Ret.getPointer();
+ llvm::Value *V = Ret.emitRawPointer(CGF);
if (RA.Virtual.Microsoft.VBIndex) {
assert(RA.Virtual.Microsoft.VBIndex > 0);
int32_t IntSize = CGF.getIntSize().getQuantity();
@@ -2583,7 +2602,7 @@ struct ResetGuardBit final : EHScopeStack::Cleanup {
struct CallInitThreadAbort final : EHScopeStack::Cleanup {
llvm::Value *Guard;
- CallInitThreadAbort(Address Guard) : Guard(Guard.getPointer()) {}
+ CallInitThreadAbort(RawAddress Guard) : Guard(Guard.getPointer()) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Calling _Init_thread_abort will reset the guard's state.
@@ -3123,8 +3142,8 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
llvm::Value **VBPtrOut) {
CGBuilderTy &Builder = CGF.Builder;
// Load the vbtable pointer from the vbptr in the instance.
- llvm::Value *VBPtr = Builder.CreateInBoundsGEP(CGM.Int8Ty, This.getPointer(),
- VBPtrOffset, "vbptr");
+ llvm::Value *VBPtr = Builder.CreateInBoundsGEP(
+ CGM.Int8Ty, This.emitRawPointer(CGF), VBPtrOffset, "vbptr");
if (VBPtrOut)
*VBPtrOut = VBPtr;
@@ -3203,7 +3222,7 @@ llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
Builder.CreateBr(SkipAdjustBB);
CGF.EmitBlock(SkipAdjustBB);
llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base");
- Phi->addIncoming(Base.getPointer(), OriginalBB);
+ Phi->addIncoming(Base.emitRawPointer(CGF), OriginalBB);
Phi->addIncoming(AdjustedBase, VBaseAdjustBB);
return Phi;
}
@@ -3238,7 +3257,7 @@ llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
Addr = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset,
VBPtrOffset);
} else {
- Addr = Base.getPointer();
+ Addr = Base.emitRawPointer(CGF);
}
// Apply the offset, which we assume is non-null.
@@ -3526,7 +3545,7 @@ CGCallee MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
ThisPtrForCall = AdjustVirtualBase(CGF, E, RD, This,
VirtualBaseAdjustmentOffset, VBPtrOffset);
} else {
- ThisPtrForCall = This.getPointer();
+ ThisPtrForCall = This.emitRawPointer(CGF);
}
if (NonVirtualBaseAdjustment)
@@ -4445,10 +4464,7 @@ void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
llvm::GlobalVariable *TI = getThrowInfo(ThrowType);
// Call into the runtime to throw the exception.
- llvm::Value *Args[] = {
- AI.getPointer(),
- TI
- };
+ llvm::Value *Args[] = {AI.emitRawPointer(CGF), TI};
CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(), Args);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
index 3594f4c66e67..d4e0ab0339a8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -180,7 +180,7 @@ namespace {
bool HandleTopLevelDecl(DeclGroupRef DG) override {
// FIXME: Why not return false and abort parsing?
- if (Diags.hasErrorOccurred())
+ if (Diags.hasUnrecoverableErrorOccurred())
return true;
HandlingTopLevelDeclRAII HandlingDecl(*this);
@@ -206,7 +206,7 @@ namespace {
}
void HandleInlineFunctionDefinition(FunctionDecl *D) override {
- if (Diags.hasErrorOccurred())
+ if (Diags.hasUnrecoverableErrorOccurred())
return;
assert(D->doesThisDeclarationHaveABody());
@@ -233,7 +233,7 @@ namespace {
/// client hack on the type, which can occur at any point in the file
/// (because these can be defined in declspecs).
void HandleTagDeclDefinition(TagDecl *D) override {
- if (Diags.hasErrorOccurred())
+ if (Diags.hasUnrecoverableErrorOccurred())
return;
// Don't allow re-entrant calls to CodeGen triggered by PCH
@@ -269,7 +269,7 @@ namespace {
}
void HandleTagDeclRequiredDefinition(const TagDecl *D) override {
- if (Diags.hasErrorOccurred())
+ if (Diags.hasUnrecoverableErrorOccurred())
return;
// Don't allow re-entrant calls to CodeGen triggered by PCH
@@ -283,7 +283,7 @@ namespace {
void HandleTranslationUnit(ASTContext &Ctx) override {
// Release the Builder when there is no error.
- if (!Diags.hasErrorOccurred() && Builder)
+ if (!Diags.hasUnrecoverableErrorOccurred() && Builder)
Builder->Release();
// If there are errors before or when releasing the Builder, reset
@@ -297,25 +297,25 @@ namespace {
}
void AssignInheritanceModel(CXXRecordDecl *RD) override {
- if (Diags.hasErrorOccurred())
+ if (Diags.hasUnrecoverableErrorOccurred())
return;
Builder->RefreshTypeCacheForClass(RD);
}
void CompleteTentativeDefinition(VarDecl *D) override {
- if (Diags.hasErrorOccurred())
+ if (Diags.hasUnrecoverableErrorOccurred())
return;
Builder->EmitTentativeDefinition(D);
}
- void CompleteExternalDeclaration(VarDecl *D) override {
+ void CompleteExternalDeclaration(DeclaratorDecl *D) override {
Builder->EmitExternalDeclaration(D);
}
void HandleVTable(CXXRecordDecl *RD) override {
- if (Diags.hasErrorOccurred())
+ if (Diags.hasUnrecoverableErrorOccurred())
return;
Builder->EmitVTable(RD);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp b/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
index 16fbf52a517d..ab2e2bd0b306 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
@@ -78,7 +78,7 @@ void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
QualType eltType = arrayType->getElementType();
auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
- for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) {
+ for (uint64_t i = 0, e = arrayType->getZExtSize(); i != e; ++i) {
addTypedData(eltType, begin + i * eltSize);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
index 60224d458f6a..64a9a5554caf 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
@@ -19,6 +19,7 @@
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/raw_ostream.h"
@@ -206,6 +207,51 @@ llvm::Value *TargetCodeGenInfo::createEnqueuedBlockKernel(
return F;
}
+void TargetCodeGenInfo::setBranchProtectionFnAttributes(
+ const TargetInfo::BranchProtectionInfo &BPI, llvm::Function &F) {
+ // Called on already created and initialized function where attributes already
+ // set from command line attributes but some might need to be removed as the
+ // actual BPI is different.
+ if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
+ F.addFnAttr("sign-return-address", BPI.getSignReturnAddrStr());
+ F.addFnAttr("sign-return-address-key", BPI.getSignKeyStr());
+ } else {
+ if (F.hasFnAttribute("sign-return-address"))
+ F.removeFnAttr("sign-return-address");
+ if (F.hasFnAttribute("sign-return-address-key"))
+ F.removeFnAttr("sign-return-address-key");
+ }
+
+ auto AddRemoveAttributeAsSet = [&](bool Set, const StringRef &ModAttr) {
+ if (Set)
+ F.addFnAttr(ModAttr);
+ else if (F.hasFnAttribute(ModAttr))
+ F.removeFnAttr(ModAttr);
+ };
+
+ AddRemoveAttributeAsSet(BPI.BranchTargetEnforcement,
+ "branch-target-enforcement");
+ AddRemoveAttributeAsSet(BPI.BranchProtectionPAuthLR,
+ "branch-protection-pauth-lr");
+ AddRemoveAttributeAsSet(BPI.GuardedControlStack, "guarded-control-stack");
+}
+
+void TargetCodeGenInfo::initBranchProtectionFnAttributes(
+ const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs) {
+ // Only used for initializing attributes in the AttrBuilder, which will not
+ // contain any of these attributes so no need to remove anything.
+ if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
+ FuncAttrs.addAttribute("sign-return-address", BPI.getSignReturnAddrStr());
+ FuncAttrs.addAttribute("sign-return-address-key", BPI.getSignKeyStr());
+ }
+ if (BPI.BranchTargetEnforcement)
+ FuncAttrs.addAttribute("branch-target-enforcement");
+ if (BPI.BranchProtectionPAuthLR)
+ FuncAttrs.addAttribute("branch-protection-pauth-lr");
+ if (BPI.GuardedControlStack)
+ FuncAttrs.addAttribute("guarded-control-stack");
+}
+
namespace {
class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
public:
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
index 7682f197041c..156b4ff4353b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
@@ -15,11 +15,12 @@
#define LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
#include "CGBuilder.h"
-#include "CodeGenModule.h"
#include "CGValue.h"
+#include "CodeGenModule.h"
#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SyncScope.h"
+#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
@@ -84,12 +85,18 @@ public:
/// Provides a convenient hook to handle extra target-specific globals.
virtual void emitTargetGlobals(CodeGen::CodeGenModule &CGM) const {}
+ /// Any further codegen related checks that need to be done on a function
+ /// signature in a target specific manner.
+ virtual void checkFunctionABI(CodeGenModule &CGM,
+ const FunctionDecl *Decl) const {}
+
/// Any further codegen related checks that need to be done on a function call
/// in a target specific manner.
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
const FunctionDecl *Caller,
const FunctionDecl *Callee,
- const CallArgList &Args) const {}
+ const CallArgList &Args,
+ QualType ReturnType) const {}
/// Determines the size of struct _Unwind_Exception on this platform,
/// in 8-bit units. The Itanium ABI defines this as:
@@ -290,6 +297,11 @@ public:
/// Get the AST address space for alloca.
virtual LangAS getASTAllocaAddressSpace() const { return LangAS::Default; }
+ Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr,
+ LangAS SrcAddr, LangAS DestAddr,
+ llvm::Type *DestTy,
+ bool IsNonNull = false) const;
+
/// Perform address space cast of an expression of pointer type.
/// \param V is the LLVM value to be casted to another address space.
/// \param SrcAddr is the language address space of \p V.
@@ -402,6 +414,17 @@ public:
return nullptr;
}
+ // Set the Branch Protection Attributes of the Function accordingly to the
+ // BPI. Remove attributes that contradict with current BPI.
+ static void
+ setBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI,
+ llvm::Function &F);
+
+ // Add the Branch Protection Attributes of the FuncAttrs.
+ static void
+ initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI,
+ llvm::AttrBuilder &FuncAttrs);
+
protected:
static std::string qualifyWindowsLibrary(StringRef Lib);
@@ -416,6 +439,8 @@ enum class AArch64ABIKind {
AAPCS = 0,
DarwinPCS,
Win64,
+ AAPCSSoft,
+ PAuthTest,
};
std::unique_ptr<TargetCodeGenInfo>
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp
index ee7f95084d2e..97381f673c28 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp
@@ -8,6 +8,9 @@
#include "ABIInfoImpl.h"
#include "TargetInfo.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/DiagnosticFrontend.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
using namespace clang;
using namespace clang::CodeGen;
@@ -25,6 +28,8 @@ public:
AArch64ABIInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
: ABIInfo(CGT), Kind(Kind) {}
+ bool isSoftFloat() const { return Kind == AArch64ABIKind::AAPCSSoft; }
+
private:
AArch64ABIKind getABIKind() const { return Kind; }
bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; }
@@ -50,30 +55,37 @@ private:
FI.getCallingConvention());
}
- Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
+ RValue EmitDarwinVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
+ AggValueSlot Slot) const;
- Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
+ RValue EmitAAPCSVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
+ AArch64ABIKind Kind, AggValueSlot Slot) const;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override {
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override {
llvm::Type *BaseTy = CGF.ConvertType(Ty);
if (isa<llvm::ScalableVectorType>(BaseTy))
llvm::report_fatal_error("Passing SVE types to variadic functions is "
"currently not supported");
- return Kind == AArch64ABIKind::Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
- : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
- : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
+ return Kind == AArch64ABIKind::Win64
+ ? EmitMSVAArg(CGF, VAListAddr, Ty, Slot)
+ : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF, Slot)
+ : EmitAAPCSVAArg(VAListAddr, Ty, CGF, Kind, Slot);
}
- Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
bool allowBFloatArgsAndRet() const override {
return getTarget().hasBFloat16Type();
}
+
+ using ABIInfo::appendAttributeMangling;
+ void appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
+ raw_ostream &Out) const override;
+ void appendAttributeMangling(StringRef AttrStr,
+ raw_ostream &Out) const override;
};
class AArch64SwiftABIInfo : public SwiftABIInfo {
@@ -108,38 +120,20 @@ public:
if (!FD)
return;
- const auto *TA = FD->getAttr<TargetAttr>();
- if (TA == nullptr)
- return;
+ TargetInfo::BranchProtectionInfo BPI(CGM.getLangOpts());
- ParsedTargetAttr Attr =
- CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
- if (Attr.BranchProtection.empty())
- return;
-
- TargetInfo::BranchProtectionInfo BPI;
- StringRef Error;
- (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
- Attr.CPU, BPI, Error);
- assert(Error.empty());
-
- auto *Fn = cast<llvm::Function>(GV);
- static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
- Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
-
- if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
- Fn->addFnAttr("sign-return-address-key",
- BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
- ? "a_key"
- : "b_key");
+ if (const auto *TA = FD->getAttr<TargetAttr>()) {
+ ParsedTargetAttr Attr =
+ CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
+ if (!Attr.BranchProtection.empty()) {
+ StringRef Error;
+ (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
+ Attr.CPU, BPI, Error);
+ assert(Error.empty());
+ }
}
-
- Fn->addFnAttr("branch-target-enforcement",
- BPI.BranchTargetEnforcement ? "true" : "false");
- Fn->addFnAttr("branch-protection-pauth-lr",
- BPI.BranchProtectionPAuthLR ? "true" : "false");
- Fn->addFnAttr("guarded-control-stack",
- BPI.GuardedControlStack ? "true" : "false");
+ auto *Fn = cast<llvm::Function>(GV);
+ setBranchProtectionFnAttributes(BPI, *Fn);
}
bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
@@ -155,6 +149,28 @@ public:
}
return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty);
}
+
+ void checkFunctionABI(CodeGenModule &CGM,
+ const FunctionDecl *Decl) const override;
+
+ void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee, const CallArgList &Args,
+ QualType ReturnType) const override;
+
+private:
+ // Diagnose calls between functions with incompatible Streaming SVE
+ // attributes.
+ void checkFunctionCallABIStreaming(CodeGenModule &CGM, SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee) const;
+ // Diagnose calls which must pass arguments in floating-point registers when
+ // the selected target does not have floating-point registers.
+ void checkFunctionCallABISoftFloat(CodeGenModule &CGM, SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ const CallArgList &Args,
+ QualType ReturnType) const;
};
class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
@@ -285,7 +301,7 @@ AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
- return getNaturalAlignIndirect(Ty);
+ return getNaturalAlignIndirect(Ty, false);
return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
? ABIArgInfo::getExtend(Ty)
@@ -482,6 +498,11 @@ bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
}
bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // For the soft-float ABI variant, no types are considered to be homogeneous
+ // aggregates.
+ if (Kind == AArch64ABIKind::AAPCSSoft)
+ return false;
+
// Homogeneous aggregates for AAPCS64 must have base types of a floating
// point type or a short-vector type. This is the same as the 32-bit ABI,
// but with the difference that any floating-point type is allowed,
@@ -512,18 +533,14 @@ bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
return true;
}
-Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+RValue AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF, AArch64ABIKind Kind,
+ AggValueSlot Slot) const {
ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
CGF.CurFnInfo->getCallingConvention());
// Empty records are ignored for parameter passing purposes.
- if (AI.isIgnore()) {
- uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
- CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
- VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
- auto *Load = CGF.Builder.CreateLoad(VAListAddr);
- return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
- }
+ if (AI.isIgnore())
+ return Slot.asRValue();
bool IsIndirect = AI.isIndirect();
@@ -538,7 +555,8 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
BaseTy = ArrTy->getElementType();
NumRegs = ArrTy->getNumElements();
}
- bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
+ bool IsFPR = Kind != AArch64ABIKind::AAPCSSoft &&
+ (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());
// The AArch64 va_list type and handling is specified in the Procedure Call
// Standard, section B.4:
@@ -711,18 +729,7 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
// Again, stack arguments may need realignment. In this case both integer and
// floating-point ones might be affected.
if (!IsIndirect && TyAlign.getQuantity() > 8) {
- int Align = TyAlign.getQuantity();
-
- OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
-
- OnStackPtr = CGF.Builder.CreateAdd(
- OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
- "align_stack");
- OnStackPtr = CGF.Builder.CreateAnd(
- OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
- "align_stack");
-
- OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
+ OnStackPtr = emitRoundPointerUpToAlignment(CGF, OnStackPtr, TyAlign);
}
Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
std::max(CharUnits::fromQuantity(8), TyAlign));
@@ -761,27 +768,34 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
OnStackBlock, "vaargs.addr");
if (IsIndirect)
- return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
- TyAlign);
-
- return ResAddr;
+ return CGF.EmitLoadOfAnyValue(
+ CGF.MakeAddrLValue(
+ Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
+ TyAlign),
+ Ty),
+ Slot);
+
+ return CGF.EmitLoadOfAnyValue(CGF.MakeAddrLValue(ResAddr, Ty), Slot);
}
-Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+RValue AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF,
+ AggValueSlot Slot) const {
// The backend's lowering doesn't support va_arg for aggregates or
// illegal vector types. Lower VAArg here for these cases and use
// the LLVM va_arg instruction for everything else.
if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
- return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
+ return CGF.EmitLoadOfAnyValue(
+ CGF.MakeAddrLValue(
+ EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()), Ty),
+ Slot);
uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
// Empty records are ignored for parameter passing purposes.
if (isEmptyRecord(getContext(), Ty, true))
- return Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
- CGF.ConvertTypeForMem(Ty), SlotSize);
+ return Slot.asRValue();
// The size of the actual thing passed, which might end up just
// being a pointer for indirect types.
@@ -796,12 +810,12 @@ Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
}
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
- TyInfo, SlotSize, /*AllowHigherAlign*/ true);
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, SlotSize,
+ /*AllowHigherAlign*/ true, Slot);
}
-Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
bool IsIndirect = false;
// Composites larger than 16 bytes are passed by reference.
@@ -811,7 +825,136 @@ Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
CGF.getContext().getTypeInfoInChars(Ty),
CharUnits::fromQuantity(8),
- /*allowHigherAlign*/ false);
+ /*allowHigherAlign*/ false, Slot);
+}
+
+static bool isStreamingCompatible(const FunctionDecl *F) {
+ if (const auto *T = F->getType()->getAs<FunctionProtoType>())
+ return T->getAArch64SMEAttributes() &
+ FunctionType::SME_PStateSMCompatibleMask;
+ return false;
+}
+
+// Report an error if an argument or return value of type Ty would need to be
+// passed in a floating-point register.
+static void diagnoseIfNeedsFPReg(DiagnosticsEngine &Diags,
+ const StringRef ABIName,
+ const AArch64ABIInfo &ABIInfo,
+ const QualType &Ty, const NamedDecl *D,
+ SourceLocation loc) {
+ const Type *HABase = nullptr;
+ uint64_t HAMembers = 0;
+ if (Ty->isFloatingType() || Ty->isVectorType() ||
+ ABIInfo.isHomogeneousAggregate(Ty, HABase, HAMembers)) {
+ Diags.Report(loc, diag::err_target_unsupported_type_for_abi)
+ << D->getDeclName() << Ty << ABIName;
+ }
+}
+
+// If we are using a hard-float ABI, but do not have floating point registers,
+// then report an error for any function arguments or returns which would be
+// passed in floating-pint registers.
+void AArch64TargetCodeGenInfo::checkFunctionABI(
+ CodeGenModule &CGM, const FunctionDecl *FuncDecl) const {
+ const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
+ const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
+
+ if (!TI.hasFeature("fp") && !ABIInfo.isSoftFloat()) {
+ diagnoseIfNeedsFPReg(CGM.getDiags(), TI.getABI(), ABIInfo,
+ FuncDecl->getReturnType(), FuncDecl,
+ FuncDecl->getLocation());
+ for (ParmVarDecl *PVD : FuncDecl->parameters()) {
+ diagnoseIfNeedsFPReg(CGM.getDiags(), TI.getABI(), ABIInfo, PVD->getType(),
+ PVD, FuncDecl->getLocation());
+ }
+ }
+}
+
+void AArch64TargetCodeGenInfo::checkFunctionCallABIStreaming(
+ CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
+ const FunctionDecl *Callee) const {
+ if (!Caller || !Callee || !Callee->hasAttr<AlwaysInlineAttr>())
+ return;
+
+ bool CallerIsStreaming =
+ IsArmStreamingFunction(Caller, /*IncludeLocallyStreaming=*/true);
+ bool CalleeIsStreaming =
+ IsArmStreamingFunction(Callee, /*IncludeLocallyStreaming=*/true);
+ bool CallerIsStreamingCompatible = isStreamingCompatible(Caller);
+ bool CalleeIsStreamingCompatible = isStreamingCompatible(Callee);
+
+ if (!CalleeIsStreamingCompatible &&
+ (CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible))
+ CGM.getDiags().Report(
+ CallLoc, CalleeIsStreaming
+ ? diag::err_function_always_inline_attribute_mismatch
+ : diag::warn_function_always_inline_attribute_mismatch)
+ << Caller->getDeclName() << Callee->getDeclName() << "streaming";
+ if (auto *NewAttr = Callee->getAttr<ArmNewAttr>())
+ if (NewAttr->isNewZA())
+ CGM.getDiags().Report(CallLoc, diag::err_function_always_inline_new_za)
+ << Callee->getDeclName();
+}
+
+// If the target does not have floating-point registers, but we are using a
+// hard-float ABI, there is no way to pass floating-point, vector or HFA values
+// to functions, so we report an error.
+void AArch64TargetCodeGenInfo::checkFunctionCallABISoftFloat(
+ CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
+ const FunctionDecl *Callee, const CallArgList &Args,
+ QualType ReturnType) const {
+ const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
+ const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
+
+ if (!Caller || TI.hasFeature("fp") || ABIInfo.isSoftFloat())
+ return;
+
+ diagnoseIfNeedsFPReg(CGM.getDiags(), TI.getABI(), ABIInfo, ReturnType,
+ Callee ? Callee : Caller, CallLoc);
+
+ for (const CallArg &Arg : Args)
+ diagnoseIfNeedsFPReg(CGM.getDiags(), TI.getABI(), ABIInfo, Arg.getType(),
+ Callee ? Callee : Caller, CallLoc);
+}
+
+void AArch64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
+ SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ const CallArgList &Args,
+ QualType ReturnType) const {
+ checkFunctionCallABIStreaming(CGM, CallLoc, Caller, Callee);
+ checkFunctionCallABISoftFloat(CGM, CallLoc, Caller, Callee, Args, ReturnType);
+}
+
+void AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr,
+ unsigned Index,
+ raw_ostream &Out) const {
+ appendAttributeMangling(Attr->getFeatureStr(Index), Out);
+}
+
+void AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
+ raw_ostream &Out) const {
+ if (AttrStr == "default") {
+ Out << ".default";
+ return;
+ }
+
+ Out << "._";
+ SmallVector<StringRef, 8> Features;
+ AttrStr.split(Features, "+");
+ for (auto &Feat : Features)
+ Feat = Feat.trim();
+
+ llvm::sort(Features, [](const StringRef LHS, const StringRef RHS) {
+ return LHS.compare(RHS) < 0;
+ });
+
+ llvm::SmallDenseSet<StringRef, 8> UniqueFeats;
+ for (auto &Feat : Features)
+ if (auto Ext = llvm::AArch64::parseFMVExtension(Feat))
+ if (UniqueFeats.insert(Ext->Name).second)
+ Out << 'M' << Ext->Name;
}
std::unique_ptr<TargetCodeGenInfo>
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 03ac6b78598f..4d3275e17c38 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -45,11 +45,12 @@ public:
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
- ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
+ ABIArgInfo classifyArgumentType(QualType Ty, bool Variadic,
+ unsigned &NumRegsLeft) const;
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
};
bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
@@ -103,19 +104,27 @@ void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ unsigned ArgumentIndex = 0;
+ const unsigned numFixedArguments = FI.getNumRequiredArgs();
+
unsigned NumRegsLeft = MaxNumRegsForArgsRet;
for (auto &Arg : FI.arguments()) {
if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
Arg.info = classifyKernelArgumentType(Arg.type);
} else {
- Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
+ bool FixedArgument = ArgumentIndex++ < numFixedArguments;
+ Arg.info = classifyArgumentType(Arg.type, !FixedArgument, NumRegsLeft);
}
}
}
-Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- llvm_unreachable("AMDGPU does not support varargs");
+RValue AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
+ const bool IsIndirect = false;
+ const bool AllowHigherAlign = false;
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4), AllowHigherAlign, Slot);
}
ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
@@ -197,12 +206,20 @@ ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
}
-ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
+ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, bool Variadic,
unsigned &NumRegsLeft) const {
assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
Ty = useFirstFieldIfTransparentUnion(Ty);
+ if (Variadic) {
+ return ABIArgInfo::getDirect(/*T=*/nullptr,
+ /*Offset=*/0,
+ /*Padding=*/nullptr,
+ /*CanBeFlattened=*/false,
+ /*Align=*/0);
+ }
+
if (isAggregateTypeForABI(Ty)) {
// Records with non-trivial destructors/copy-constructors should not be
// passed by value.
@@ -356,6 +373,29 @@ void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
if (NumVGPR != 0)
F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
}
+
+ if (const auto *Attr = FD->getAttr<AMDGPUMaxNumWorkGroupsAttr>()) {
+ uint32_t X = Attr->getMaxNumWorkGroupsX()
+ ->EvaluateKnownConstInt(M.getContext())
+ .getExtValue();
+ // Y and Z dimensions default to 1 if not specified
+ uint32_t Y = Attr->getMaxNumWorkGroupsY()
+ ? Attr->getMaxNumWorkGroupsY()
+ ->EvaluateKnownConstInt(M.getContext())
+ .getExtValue()
+ : 1;
+ uint32_t Z = Attr->getMaxNumWorkGroupsZ()
+ ? Attr->getMaxNumWorkGroupsZ()
+ ->EvaluateKnownConstInt(M.getContext())
+ .getExtValue()
+ : 1;
+
+ llvm::SmallString<32> AttrVal;
+ llvm::raw_svector_ostream OS(AttrVal);
+ OS << X << ',' << Y << ',' << Z;
+
+ F->addFnAttr("amdgpu-max-num-workgroups", AttrVal.str());
+ }
}
/// Emits control constants used to change per-architecture behaviour in the
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp
index 550eb4068f25..1904e8fdb388 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp
@@ -24,8 +24,8 @@ public:
using DefaultABIInfo::DefaultABIInfo;
private:
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
if (!State.FreeRegs)
@@ -81,11 +81,11 @@ ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
TypeAlign > MinABIStackAlignInBytes);
}
-Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(4), true);
+ CharUnits::fromQuantity(4), true, Slot);
}
ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp
index d7d175ff1724..457d761039a0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp
@@ -35,7 +35,9 @@ public:
case llvm::Triple::EABI:
case llvm::Triple::EABIHF:
case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIT64:
case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::GNUEABIHFT64:
case llvm::Triple::MuslEABI:
case llvm::Triple::MuslEABIHF:
return true;
@@ -48,6 +50,7 @@ public:
switch (getTarget().getTriple().getEnvironment()) {
case llvm::Triple::EABIHF:
case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::GNUEABIHFT64:
case llvm::Triple::MuslEABIHF:
return true;
default:
@@ -81,8 +84,8 @@ private:
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
llvm::CallingConv::ID getLLVMDefaultCC() const;
llvm::CallingConv::ID getABIDefaultCC() const;
@@ -141,7 +144,7 @@ public:
ParsedTargetAttr Attr =
CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
if (!Attr.BranchProtection.empty()) {
- TargetInfo::BranchProtectionInfo BPI;
+ TargetInfo::BranchProtectionInfo BPI{};
StringRef DiagMsg;
StringRef Arch =
Attr.CPU.empty() ? CGM.getTarget().getTargetOpts().CPU : Attr.CPU;
@@ -151,17 +154,8 @@ public:
D->getLocation(),
diag::warn_target_unsupported_branch_protection_attribute)
<< Arch;
- } else {
- static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
- assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 &&
- "Unexpected SignReturnAddressScopeKind");
- Fn->addFnAttr(
- "sign-return-address",
- SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
-
- Fn->addFnAttr("branch-target-enforcement",
- BPI.BranchTargetEnforcement ? "true" : "false");
- }
+ } else
+ setBranchProtectionFnAttributes(BPI, (*Fn));
} else if (CGM.getLangOpts().BranchTargetEnforcement ||
CGM.getLangOpts().hasSignReturnAddress()) {
// If the Branch Protection attribute is missing, validate the target
@@ -173,6 +167,10 @@ public:
diag::warn_target_unsupported_branch_protection_attribute)
<< Attr.CPU;
}
+ } else if (CGM.getTarget().isBranchProtectionSupportedArch(
+ CGM.getTarget().getTargetOpts().CPU)) {
+ TargetInfo::BranchProtectionInfo BPI(CGM.getLangOpts());
+ setBranchProtectionFnAttributes(BPI, (*Fn));
}
const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
@@ -677,7 +675,7 @@ bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
/// Return true if a type contains any 16-bit floating point vectors
bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- uint64_t NElements = AT->getSize().getZExtValue();
+ uint64_t NElements = AT->getZExtSize();
if (NElements == 0)
return false;
return containsAnyFP16Vectors(AT->getElementType());
@@ -759,16 +757,13 @@ bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
(acceptHalf && (getABIKind() == ARMABIKind::AAPCS16_VFP));
}
-Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
CharUnits SlotSize = CharUnits::fromQuantity(4);
// Empty records are ignored for parameter passing purposes.
- if (isEmptyRecord(getContext(), Ty, true)) {
- VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
- auto *Load = CGF.Builder.CreateLoad(VAListAddr);
- return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
- }
+ if (isEmptyRecord(getContext(), Ty, true))
+ return Slot.asRValue();
CharUnits TySize = getContext().getTypeSizeInChars(Ty);
CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
@@ -804,8 +799,8 @@ Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
}
TypeInfoChars TyInfo(TySize, TyAlignForABI, AlignRequirementKind::None);
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
- SlotSize, /*AllowHigherAlign*/ true);
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, SlotSize,
+ /*AllowHigherAlign*/ true, Slot);
}
std::unique_ptr<TargetCodeGenInfo>
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp
index 924eced700e1..d8720afd1a71 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp
@@ -33,8 +33,8 @@ public:
bool isReturnType = false) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
};
} // end anonymous namespace
@@ -57,20 +57,18 @@ void CSKYABIInfo::computeInfo(CGFunctionInfo &FI) const {
}
}
-Address CSKYABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue CSKYABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
// Empty records are ignored for parameter passing purposes.
- if (isEmptyRecord(getContext(), Ty, true)) {
- return Address(CGF.Builder.CreateLoad(VAListAddr),
- CGF.ConvertTypeForMem(Ty), SlotSize);
- }
+ if (isEmptyRecord(getContext(), Ty, true))
+ return Slot.asRValue();
auto TInfo = getContext().getTypeInfoInChars(Ty);
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, TInfo, SlotSize,
- /*AllowHigherAlign=*/true);
+ /*AllowHigherAlign=*/true, Slot);
}
ABIArgInfo CSKYABIInfo::classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp
index 944a8d002ecf..8fd2a81494d9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp
@@ -29,8 +29,8 @@ private:
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
QualType Ty) const;
Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
@@ -408,13 +408,16 @@ Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
return Address(ArgAddr, MemTy, CharUnits::fromQuantity(ArgAlign));
}
-Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
if (getTarget().getTriple().isMusl())
- return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
+ return CGF.EmitLoadOfAnyValue(
+ CGF.MakeAddrLValue(EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty), Ty),
+ Slot);
- return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
+ return CGF.EmitLoadOfAnyValue(
+ CGF.MakeAddrLValue(EmitVAArgForHexagon(CGF, VAListAddr, Ty), Ty), Slot);
}
std::unique_ptr<TargetCodeGenInfo>
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp
index 63b9a1fdb988..6af9375461f0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp
@@ -44,8 +44,8 @@ public:
int &FARsLeft) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
ABIArgInfo extendType(QualType Ty) const;
@@ -146,7 +146,7 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
}
if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
- uint64_t ArraySize = ATy->getSize().getZExtValue();
+ uint64_t ArraySize = ATy->getZExtSize();
QualType EltTy = ATy->getElementType();
// Non-zero-length arrays of empty records make the struct ineligible to be
// passed via FARs in C++.
@@ -417,14 +417,13 @@ ABIArgInfo LoongArchABIInfo::classifyReturnType(QualType RetTy) const {
return classifyArgumentType(RetTy, /*IsFixed=*/true, GARsLeft, FARsLeft);
}
-Address LoongArchABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue LoongArchABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
CharUnits SlotSize = CharUnits::fromQuantity(GRLen / 8);
// Empty records are ignored for parameter passing purposes.
if (isEmptyRecord(getContext(), Ty, true))
- return Address(CGF.Builder.CreateLoad(VAListAddr),
- CGF.ConvertTypeForMem(Ty), SlotSize);
+ return Slot.asRValue();
auto TInfo = getContext().getTypeInfoInChars(Ty);
@@ -432,7 +431,7 @@ Address LoongArchABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
return emitVoidPtrVAArg(CGF, VAListAddr, Ty,
/*IsIndirect=*/TInfo.Width > 2 * SlotSize, TInfo,
SlotSize,
- /*AllowHigherAlign=*/true);
+ /*AllowHigherAlign=*/true, Slot);
}
ABIArgInfo LoongArchABIInfo::extendType(QualType Ty) const {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp
index bb67d97f4421..8ce70e2111cc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp
@@ -51,9 +51,12 @@ public:
I.info = classifyArgumentType(I.type);
}
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override {
- return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override {
+ return CGF.EmitLoadOfAnyValue(
+ CGF.MakeAddrLValue(
+ EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)), Ty),
+ Slot);
}
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp
index 8f11c63dcd85..06d9b6d4a576 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp
@@ -34,8 +34,8 @@ public:
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
ABIArgInfo extendType(QualType Ty) const;
};
@@ -346,8 +346,8 @@ void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
I.info = classifyArgumentType(I.type, Offset);
}
-Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType OrigTy) const {
+RValue MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType OrigTy, AggValueSlot Slot) const {
QualType Ty = OrigTy;
// Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
@@ -373,28 +373,25 @@ Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// MinABIStackAlignInBytes is the size of argument slots on the stack.
CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
- Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
- TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
+ RValue Res = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, TyInfo,
+ ArgSlotSize, /*AllowHigherAlign*/ true, Slot);
-
- // If there was a promotion, "unpromote" into a temporary.
+ // If there was a promotion, "unpromote".
// TODO: can we just use a pointer into a subset of the original slot?
if (DidPromote) {
- Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
- llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
+ llvm::Type *ValTy = CGF.ConvertType(OrigTy);
+ llvm::Value *Promoted = Res.getScalarVal();
// Truncate down to the right width.
- llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
- : CGF.IntPtrTy);
+ llvm::Type *IntTy = (OrigTy->isIntegerType() ? ValTy : CGF.IntPtrTy);
llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
if (OrigTy->isPointerType())
- V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
+ V = CGF.Builder.CreateIntToPtr(V, ValTy);
- CGF.Builder.CreateStore(V, Temp);
- Addr = Temp;
+ return RValue::get(V);
}
- return Addr;
+ return Res;
}
ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp
index d0dc7c258a03..ec7f1c439b18 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -32,8 +32,8 @@ public:
ABIArgInfo classifyArgumentType(QualType Ty) const;
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
bool isUnsupportedType(QualType T) const;
ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
};
@@ -47,6 +47,10 @@ public:
CodeGen::CodeGenModule &M) const override;
bool shouldEmitStaticExternCAliases() const override;
+ llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
+ llvm::PointerType *T,
+ QualType QT) const override;
+
llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
// On the device side, surface reference is represented as an object handle
// in 64-bit integer.
@@ -81,7 +85,7 @@ private:
LValue Src) {
llvm::Value *Handle = nullptr;
llvm::Constant *C =
- llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
+ llvm::dyn_cast<llvm::Constant>(Src.getAddress().emitRawPointer(CGF));
// Lookup `addrspacecast` through the constant pointer if any.
if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
@@ -199,8 +203,11 @@ ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
+
+ for (auto &&[ArgumentsCount, I] : llvm::enumerate(FI.arguments()))
+ I.info = ArgumentsCount < FI.getNumRequiredArgs()
+ ? classifyArgumentType(I.type)
+ : ABIArgInfo::getDirect();
// Always honor user-specified calling convention.
if (FI.getCallingConvention() != llvm::CallingConv::C)
@@ -209,9 +216,12 @@ void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.setEffectiveCallingConvention(getRuntimeCC());
}
-Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- llvm_unreachable("NVPTX does not support varargs");
+RValue NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*IsIndirect=*/false,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(1),
+ /*AllowHigherAlign=*/true, Slot);
}
void NVPTXTargetCodeGenInfo::setTargetAttributes(
@@ -285,6 +295,20 @@ void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
return false;
}
+
+llvm::Constant *
+NVPTXTargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
+ llvm::PointerType *PT,
+ QualType QT) const {
+ auto &Ctx = CGM.getContext();
+ if (PT->getAddressSpace() != Ctx.getTargetAddressSpace(LangAS::opencl_local))
+ return llvm::ConstantPointerNull::get(PT);
+
+ auto NPT = llvm::PointerType::get(
+ PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
+ return llvm::ConstantExpr::getAddrSpaceCast(
+ llvm::ConstantPointerNull::get(NPT), PT);
+}
}
void CodeGenModule::handleCUDALaunchBoundsAttr(llvm::Function *F,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp
index 771aa7469da2..9b7d757df3a3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp
@@ -27,8 +27,8 @@ class PNaClABIInfo : public ABIInfo {
ABIArgInfo classifyArgumentType(QualType RetTy) const;
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF,
- Address VAListAddr, QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
};
class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -45,15 +45,18 @@ void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
I.info = classifyArgumentType(I.type);
}
-Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
// The PNaCL ABI is a bit odd, in that varargs don't use normal
// function classification. Structs get passed directly for varargs
// functions, through a rewriting transform in
// pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
// this target to actually support a va_arg instructions with an
// aggregate type, unlike other targets.
- return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
+ return CGF.EmitLoadOfAnyValue(
+ CGF.MakeAddrLValue(
+ EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()), Ty),
+ Slot);
}
/// Classify argument of given type \p Ty.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp
index 40dddde508c1..e4155810963e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp
@@ -8,13 +8,14 @@
#include "ABIInfoImpl.h"
#include "TargetInfo.h"
+#include "clang/Basic/DiagnosticFrontend.h"
using namespace clang;
using namespace clang::CodeGen;
-static Address complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty, CharUnits SlotSize,
- CharUnits EltSize, const ComplexType *CTy) {
+static RValue complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, CharUnits SlotSize,
+ CharUnits EltSize, const ComplexType *CTy) {
Address Addr =
emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, SlotSize * 2,
SlotSize, SlotSize, /*AllowHigher*/ true);
@@ -36,10 +37,7 @@ static Address complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
- Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
- CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
- /*init*/ true);
- return Temp;
+ return RValue::getComplex(Real, Imag);
}
static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
@@ -128,8 +126,8 @@ public:
I.info = classifyArgumentType(I.type);
}
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
};
class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -145,6 +143,9 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override;
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
};
} // namespace
@@ -235,8 +236,8 @@ CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
return CharUnits::fromQuantity(PtrByteSize);
}
-Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
auto TypeInfo = getContext().getTypeInfoInChars(Ty);
TypeInfo.Align = getParamTypeAlignment(Ty);
@@ -257,7 +258,7 @@ Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
}
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
- SlotSize, /*AllowHigher*/ true);
+ SlotSize, /*AllowHigher*/ true, Slot);
}
bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
@@ -265,6 +266,61 @@ bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
}
+void AIXTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (!isa<llvm::GlobalVariable>(GV))
+ return;
+
+ auto *GVar = cast<llvm::GlobalVariable>(GV);
+ auto GVId = GV->getName();
+
+ // Is this a global variable specified by the user as toc-data?
+ bool UserSpecifiedTOC =
+ llvm::binary_search(M.getCodeGenOpts().TocDataVarsUserSpecified, GVId);
+ // Assumes the same variable cannot be in both TocVarsUserSpecified and
+ // NoTocVars.
+ if (UserSpecifiedTOC ||
+ ((M.getCodeGenOpts().AllTocData) &&
+ !llvm::binary_search(M.getCodeGenOpts().NoTocDataVars, GVId))) {
+ const unsigned long PointerSize =
+ GV->getParent()->getDataLayout().getPointerSizeInBits() / 8;
+ auto *VarD = dyn_cast<VarDecl>(D);
+ assert(VarD && "Invalid declaration of global variable.");
+
+ ASTContext &Context = D->getASTContext();
+ unsigned Alignment = Context.toBits(Context.getDeclAlign(D)) / 8;
+ const auto *Ty = VarD->getType().getTypePtr();
+ const RecordDecl *RDecl =
+ Ty->isRecordType() ? Ty->getAs<RecordType>()->getDecl() : nullptr;
+
+ bool EmitDiagnostic = UserSpecifiedTOC && GV->hasExternalLinkage();
+ auto reportUnsupportedWarning = [&](bool ShouldEmitWarning, StringRef Msg) {
+ if (ShouldEmitWarning)
+ M.getDiags().Report(D->getLocation(), diag::warn_toc_unsupported_type)
+ << GVId << Msg;
+ };
+ if (!Ty || Ty->isIncompleteType())
+ reportUnsupportedWarning(EmitDiagnostic, "of incomplete type");
+ else if (RDecl && RDecl->hasFlexibleArrayMember())
+ reportUnsupportedWarning(EmitDiagnostic,
+ "it contains a flexible array member");
+ else if (VarD->getTLSKind() != VarDecl::TLS_None)
+ reportUnsupportedWarning(EmitDiagnostic, "of thread local storage");
+ else if (PointerSize < Context.getTypeInfo(VarD->getType()).Width / 8)
+ reportUnsupportedWarning(EmitDiagnostic,
+ "variable is larger than a pointer");
+ else if (PointerSize < Alignment)
+ reportUnsupportedWarning(EmitDiagnostic,
+ "variable is aligned wider than a pointer");
+ else if (D->hasAttr<SectionAttr>())
+ reportUnsupportedWarning(EmitDiagnostic,
+ "variable has a section attribute");
+ else if (GV->hasExternalLinkage() ||
+ (M.getCodeGenOpts().AllTocData && !GV->hasLocalLinkage()))
+ GVar->addAttribute("toc-data");
+ }
+}
+
// PowerPC-32
namespace {
/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
@@ -289,8 +345,8 @@ public:
I.info = classifyArgumentType(I.type);
}
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
};
class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -367,8 +423,8 @@ ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
// TODO: this implementation is now likely redundant with
// DefaultABIInfo::EmitVAArg.
-Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
- QualType Ty) const {
+RValue PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
+ QualType Ty, AggValueSlot Slot) const {
if (getTarget().getTriple().isOSDarwin()) {
auto TI = getContext().getTypeInfoInChars(Ty);
TI.Align = getParamTypeAlignment(Ty);
@@ -376,14 +432,14 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
CharUnits SlotSize = CharUnits::fromQuantity(4);
return emitVoidPtrVAArg(CGF, VAList, Ty,
classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
- /*AllowHigherAlign=*/true);
+ /*AllowHigherAlign=*/true, Slot);
}
const unsigned OverflowLimit = 8;
if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
// TODO: Implement this. For now ignore.
(void)CTy;
- return Address::invalid(); // FIXME?
+ return RValue::getAggregate(Address::invalid()); // FIXME?
}
// struct __va_list_tag {
@@ -454,9 +510,10 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
llvm::Value *RegOffset =
Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
- RegAddr = Address(
- Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset),
- DirectTy, RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
+ RegAddr = Address(Builder.CreateInBoundsGEP(
+ CGF.Int8Ty, RegAddr.emitRawPointer(CGF), RegOffset),
+ DirectTy,
+ RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
// Increase the used-register count.
NumRegs =
@@ -492,7 +549,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
// Round up address of argument to alignment
CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
if (Align > OverflowAreaAlign) {
- llvm::Value *Ptr = OverflowArea.getPointer();
+ llvm::Value *Ptr = OverflowArea.emitRawPointer(CGF);
OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
OverflowArea.getElementType(), Align);
}
@@ -501,7 +558,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
// Increase the overflow area.
OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
- Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
+ Builder.CreateStore(OverflowArea.emitRawPointer(CGF), OverflowAreaAddr);
CGF.EmitBranch(Cont);
}
@@ -517,7 +574,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
getContext().getTypeAlignInChars(Ty));
}
- return Result;
+ return CGF.EmitLoadOfAnyValue(CGF.MakeAddrLValue(Result, Ty), Slot);
}
bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
@@ -598,8 +655,8 @@ public:
}
}
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
};
class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -898,8 +955,8 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
}
// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
-Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
auto TypeInfo = getContext().getTypeInfoInChars(Ty);
TypeInfo.Align = getParamTypeAlignment(Ty);
@@ -931,7 +988,7 @@ Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// types this way, and so right-alignment only applies to fundamental types.
// So on PPC64, we must force the use of right-alignment even for aggregates.
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
- SlotSize, /*AllowHigher*/ true,
+ SlotSize, /*AllowHigher*/ true, Slot,
/*ForceRightAdjust*/ true);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp
index 02c86ad2e58c..f2add9351c03 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp
@@ -48,8 +48,8 @@ public:
int &ArgFPRsLeft) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
ABIArgInfo extendType(QualType Ty) const;
@@ -152,7 +152,7 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
}
if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
- uint64_t ArraySize = ATy->getSize().getZExtValue();
+ uint64_t ArraySize = ATy->getZExtSize();
QualType EltTy = ATy->getElementType();
// Non-zero-length arrays of empty records make the struct ineligible for
// the FP calling convention in C++.
@@ -361,12 +361,13 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
CGCXXABI::RAA_DirectInMemory);
}
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
uint64_t Size = getContext().getTypeSize(Ty);
+ // Ignore empty structs/unions whose size is zero. According to the calling
+ // convention empty structs/unions are required to be sized types in C++.
+ if (isEmptyRecord(getContext(), Ty, true) && Size == 0)
+ return ABIArgInfo::getIgnore();
+
// Pass floating point values via FPRs if possible.
if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
FLen >= Size && ArgFPRsLeft) {
@@ -441,7 +442,13 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
}
- return ABIArgInfo::getDirect();
+ ABIArgInfo Info = ABIArgInfo::getDirect();
+
+ // If it is tuple type, it can't be flattened.
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)))
+ Info.setCanBeFlattened(!STy->containsHomogeneousScalableVectorTypes());
+
+ return Info;
}
if (const VectorType *VT = Ty->getAs<VectorType>())
@@ -483,15 +490,13 @@ ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
ArgFPRsLeft);
}
-Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
// Empty records are ignored for parameter passing purposes.
- if (isEmptyRecord(getContext(), Ty, true)) {
- return Address(CGF.Builder.CreateLoad(VAListAddr),
- CGF.ConvertTypeForMem(Ty), SlotSize);
- }
+ if (isEmptyRecord(getContext(), Ty, true))
+ return Slot.asRValue();
auto TInfo = getContext().getTypeInfoInChars(Ty);
@@ -505,8 +510,8 @@ Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Arguments bigger than 2*Xlen bytes are passed indirectly.
bool IsIndirect = TInfo.Width > 2 * SlotSize;
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
- SlotSize, /*AllowHigherAlign=*/true);
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo, SlotSize,
+ /*AllowHigherAlign=*/true, Slot);
}
ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
@@ -523,7 +528,10 @@ public:
RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
unsigned FLen, bool EABI)
: TargetCodeGenInfo(
- std::make_unique<RISCVABIInfo>(CGT, XLen, FLen, EABI)) {}
+ std::make_unique<RISCVABIInfo>(CGT, XLen, FLen, EABI)) {
+ SwiftInfo =
+ std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
+ }
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp
index a337a52a94ec..da8c7219be26 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp
@@ -111,8 +111,8 @@ public:
private:
ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
// Coercion type builder for structs passed in registers. The coercion type
// serves two purposes:
@@ -263,7 +263,11 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
CoerceBuilder CB(getVMContext(), getDataLayout());
CB.addStruct(0, StrTy);
- CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
+ // All structs, even empty ones, should take up a register argument slot,
+ // so pin the minimum struct size to one bit.
+ CB.pad(llvm::alignTo(
+ std::max(CB.DL.getTypeSizeInBits(StrTy).getKnownMinValue(), uint64_t(1)),
+ 64));
// Try to use the original type for coercion.
llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
@@ -274,8 +278,8 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
return ABIArgInfo::getDirect(CoerceTy);
}
-Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
ABIArgInfo AI = classifyType(Ty, 16 * 8);
llvm::Type *ArgTy = CGT.ConvertType(Ty);
if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
@@ -321,14 +325,15 @@ Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
break;
case ABIArgInfo::Ignore:
- return Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeInfo.Align);
+ return Slot.asRValue();
}
// Update VAList.
Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
- Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
+ Builder.CreateStore(NextPtr.emitRawPointer(CGF), VAListAddr);
- return ArgAddr.withElementType(ArgTy);
+ return CGF.EmitLoadOfAnyValue(
+ CGF.MakeAddrLValue(ArgAddr.withElementType(ArgTy), Ty), Slot);
}
void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp
index 6eb0c6ef2f7d..4d61f5137934 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp
@@ -38,8 +38,8 @@ public:
ABIArgInfo classifyArgumentType(QualType ArgTy) const;
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
};
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -243,8 +243,8 @@ QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
return Ty;
}
-Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i64 __gpr;
@@ -306,11 +306,11 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Update overflow_arg_area_ptr pointer
llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP(
- OverflowArgArea.getElementType(), OverflowArgArea.getPointer(),
+ OverflowArgArea.getElementType(), OverflowArgArea.emitRawPointer(CGF),
PaddedSizeV, "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
- return MemAddr;
+ return CGF.EmitLoadOfAnyValue(CGF.MakeAddrLValue(MemAddr, Ty), Slot);
}
assert(PaddedSize.getQuantity() == 8);
@@ -382,10 +382,9 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
Address MemAddr = RawMemAddr.withElementType(DirectTy);
// Update overflow_arg_area_ptr pointer
- llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
- OverflowArgArea.getPointer(), PaddedSizeV,
- "overflow_arg_area");
+ llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP(
+ OverflowArgArea.getElementType(), OverflowArgArea.emitRawPointer(CGF),
+ PaddedSizeV, "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
CGF.EmitBranch(ContBlock);
@@ -398,7 +397,7 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), ArgTy,
TyInfo.Align);
- return ResAddr;
+ return CGF.EmitLoadOfAnyValue(CGF.MakeAddrLValue(ResAddr, Ty), Slot);
}
ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
@@ -413,13 +412,16 @@ ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
}
ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
+ // Handle transparent union types.
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
// Handle the generic C++ ABI.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
// Integers and enums are extended to full register width.
if (isPromotableIntegerTypeForABI(Ty))
- return ABIArgInfo::getExtend(Ty);
+ return ABIArgInfo::getExtend(Ty, CGT.ConvertType(Ty));
// Handle vector types and vector-like structure types. Note that
// as opposed to float-like structure types, we do not allow any
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp
index bd332228ce5b..70a968fe93ca 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp
@@ -41,8 +41,8 @@ private:
Arg.info = classifyArgumentType(Arg.type);
}
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
};
class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
@@ -155,15 +155,15 @@ ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
return defaultInfo.classifyReturnType(RetTy);
}
-Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
bool IsIndirect = isAggregateTypeForABI(Ty) &&
!isEmptyRecord(getContext(), Ty, true) &&
!isSingleElementStruct(Ty, getContext());
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
getContext().getTypeInfoInChars(Ty),
CharUnits::fromQuantity(4),
- /*AllowHigherAlign=*/true);
+ /*AllowHigherAlign=*/true, Slot);
}
std::unique_ptr<TargetCodeGenInfo>
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp
index 2291c991fb11..1dc3172a6bdf 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp
@@ -173,8 +173,8 @@ class X86_32ABIInfo : public ABIInfo {
public:
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
bool RetSmallStructInRegABI, bool Win32StructABI,
@@ -327,7 +327,7 @@ void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
ResultTruncRegTypes.push_back(CoerceTy);
// Coerce the integer by bitcasting the return slot pointer.
- ReturnSlot.setAddress(ReturnSlot.getAddress(CGF).withElementType(CoerceTy));
+ ReturnSlot.setAddress(ReturnSlot.getAddress().withElementType(CoerceTy));
ResultRegDests.push_back(ReturnSlot);
rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
@@ -469,7 +469,8 @@ bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
// If the return value is indirect, then the hidden argument is consuming one
// integer register.
- if (State.FreeRegs) {
+ if (State.CC != llvm::CallingConv::X86_FastCall &&
+ State.CC != llvm::CallingConv::X86_VectorCall && State.FreeRegs) {
--State.FreeRegs;
if (!IsMCUABI)
return getNaturalAlignIndirectInReg(RetTy);
@@ -792,6 +793,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
return ABIArgInfo::getDirect();
return ABIArgInfo::getExpand();
}
+ if (IsVectorCall && Ty->isBuiltinType())
+ return ABIArgInfo::getDirect();
return getIndirectResult(Ty, /*ByVal=*/false, State);
}
@@ -1064,11 +1067,17 @@ void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
StackAlign);
}
-Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
- Address VAListAddr, QualType Ty) const {
+RValue X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+ CCState State(*const_cast<CGFunctionInfo *>(CGF.CurFnInfo));
+ ABIArgInfo AI = classifyArgumentType(Ty, State, /*ArgIndex*/ 0);
+ // Empty records are ignored for parameter passing purposes.
+ if (AI.isIgnore())
+ return Slot.asRValue();
+
// x86-32 changes the alignment of certain arguments on the stack.
//
// Just messing with TypeInfo like this works because we never pass
@@ -1076,9 +1085,9 @@ Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
TypeInfo.Align = CharUnits::fromQuantity(
getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
- TypeInfo, CharUnits::fromQuantity(4),
- /*AllowHigherAlign*/ true);
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
+ CharUnits::fromQuantity(4),
+ /*AllowHigherAlign*/ true, Slot);
}
bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
@@ -1359,10 +1368,10 @@ public:
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
- Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
+ RValue EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
bool has64BitPointers() const {
return Has64BitPointers;
@@ -1378,8 +1387,8 @@ public:
void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
bool isHomogeneousAggregateBaseType(QualType Ty) const override {
// FIXME: Assumes vectorcall is in use.
@@ -1476,8 +1485,8 @@ public:
void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
const FunctionDecl *Caller,
- const FunctionDecl *Callee,
- const CallArgList &Args) const override;
+ const FunctionDecl *Callee, const CallArgList &Args,
+ QualType ReturnType) const override;
};
} // namespace
@@ -1552,9 +1561,15 @@ static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
return false;
}
-void X86_64TargetCodeGenInfo::checkFunctionCallABI(
- CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
- const FunctionDecl *Callee, const CallArgList &Args) const {
+void X86_64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
+ SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ const CallArgList &Args,
+ QualType ReturnType) const {
+ if (!Callee)
+ return;
+
llvm::StringMap<bool> CallerMap;
llvm::StringMap<bool> CalleeMap;
unsigned ArgIndex = 0;
@@ -1993,7 +2008,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
// this, but it isn't worth it and would be harder to verify.
Current = NoClass;
uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
- uint64_t ArraySize = AT->getSize().getZExtValue();
+ uint64_t ArraySize = AT->getZExtSize();
// The only case a 256-bit wide vector could be used is when the array
// contains a single 256-bit element. Since Lo and Hi logic isn't extended
@@ -2081,7 +2096,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
bool BitField = i->isBitField();
// Ignore padding bit-fields.
- if (BitField && i->isUnnamedBitfield())
+ if (BitField && i->isUnnamedBitField())
continue;
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
@@ -2100,8 +2115,11 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
postMerge(Size, Lo, Hi);
return;
}
+
+ bool IsInMemory =
+ Offset % getContext().getTypeAlign(i->getType().getCanonicalType());
// Note, skip this test for bit-fields, see below.
- if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
+ if (!BitField && IsInMemory) {
Lo = Memory;
postMerge(Size, Lo, Hi);
return;
@@ -2119,7 +2137,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
// structure to be passed in memory even if unaligned, and
// therefore they can straddle an eightbyte.
if (BitField) {
- assert(!i->isUnnamedBitfield());
+ assert(!i->isUnnamedBitField());
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
uint64_t Size = i->getBitWidthValue(getContext());
@@ -2295,7 +2313,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
- unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
+ unsigned NumElts = (unsigned)AT->getZExtSize();
// Check each element to see if the element overlaps with the queried range.
for (unsigned i = 0; i != NumElts; ++i) {
@@ -2788,12 +2806,11 @@ X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
// memory), except in situations involving unions.
case X87Up:
case SSE:
+ ++neededSSE;
HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
-
- ++neededSSE;
break;
// AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
@@ -3004,8 +3021,8 @@ static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
return Address(Res, LTy, Align);
}
-Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i32 gp_offset;
@@ -3019,10 +3036,16 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
/*isNamedArg*/false);
+ // Empty records are ignored for parameter passing purposes.
+ if (AI.isIgnore())
+ return Slot.asRValue();
+
// AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
// in the registers. If not go to step 7.
if (!neededInt && !neededSSE)
- return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
+ return CGF.EmitLoadOfAnyValue(
+ CGF.MakeAddrLValue(EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty), Ty),
+ Slot);
// AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
// general purpose registers needed to pass type and num_fp to hold
@@ -3185,11 +3208,11 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
CGF.EmitBlock(ContBlock);
Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
"vaarg.addr");
- return ResAddr;
+ return CGF.EmitLoadOfAnyValue(CGF.MakeAddrLValue(ResAddr, Ty), Slot);
}
-Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
// MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
// not 1, 2, 4, or 8 bytes, must be passed by reference."
uint64_t Width = getContext().getTypeSize(Ty);
@@ -3198,7 +3221,7 @@ Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
CGF.getContext().getTypeInfoInChars(Ty),
CharUnits::fromQuantity(8),
- /*allowHigherAlign*/ false);
+ /*allowHigherAlign*/ false, Slot);
}
ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
@@ -3390,8 +3413,8 @@ void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
}
}
-Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
// MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
// not 1, 2, 4, or 8 bytes, must be passed by reference."
uint64_t Width = getContext().getTypeSize(Ty);
@@ -3400,7 +3423,7 @@ Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
CGF.getContext().getTypeInfoInChars(Ty),
CharUnits::fromQuantity(8),
- /*allowHigherAlign*/ false);
+ /*allowHigherAlign*/ false, Slot);
}
std::unique_ptr<TargetCodeGenInfo> CodeGen::createX86_32TargetCodeGenInfo(
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp
index aeb48f851e16..f3e241171b87 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp
@@ -113,8 +113,8 @@ public:
class XCoreABIInfo : public DefaultABIInfo {
public:
XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
+ RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ AggValueSlot Slot) const override;
};
class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -134,8 +134,8 @@ public:
// TODO: this implementation is likely now redundant with the default
// EmitVAArg.
-Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
+RValue XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, AggValueSlot Slot) const {
CGBuilderTy &Builder = CGF.Builder;
// Get the VAList.
@@ -180,10 +180,10 @@ Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Increment the VAList.
if (!ArgSize.isZero()) {
Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
- Builder.CreateStore(APN.getPointer(), VAListAddr);
+ Builder.CreateStore(APN.emitRawPointer(CGF), VAListAddr);
}
- return Val;
+ return CGF.EmitLoadOfAnyValue(CGF.MakeAddrLValue(Val, Ty), Slot);
}
/// During the expansion of a RecordType, an incomplete TypeString is placed
diff --git a/contrib/llvm-project/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp b/contrib/llvm-project/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
index beca9586988b..2ffbc1a22695 100644
--- a/contrib/llvm-project/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
+++ b/contrib/llvm-project/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
@@ -333,8 +333,7 @@ llvm::Expected<std::unique_ptr<DirectoryWatcher>> clang::DirectoryWatcher::creat
const int InotifyFD = inotify_init1(IN_CLOEXEC);
if (InotifyFD == -1)
return llvm::make_error<llvm::StringError>(
- std::string("inotify_init1() error: ") + strerror(errno),
- llvm::inconvertibleErrorCode());
+ llvm::errnoAsErrorCode(), std::string(": inotify_init1()"));
const int InotifyWD = inotify_add_watch(
InotifyFD, Path.str().c_str(),
@@ -346,15 +345,13 @@ llvm::Expected<std::unique_ptr<DirectoryWatcher>> clang::DirectoryWatcher::creat
);
if (InotifyWD == -1)
return llvm::make_error<llvm::StringError>(
- std::string("inotify_add_watch() error: ") + strerror(errno),
- llvm::inconvertibleErrorCode());
+ llvm::errnoAsErrorCode(), std::string(": inotify_add_watch()"));
auto InotifyPollingStopper = SemaphorePipe::create();
if (!InotifyPollingStopper)
return llvm::make_error<llvm::StringError>(
- std::string("SemaphorePipe::create() error: ") + strerror(errno),
- llvm::inconvertibleErrorCode());
+ llvm::errnoAsErrorCode(), std::string(": SemaphorePipe::create()"));
return std::make_unique<DirectoryWatcherLinux>(
Path, Receiver, WaitForInitialSync, InotifyFD, InotifyWD,
diff --git a/contrib/llvm-project/clang/lib/Driver/Distro.cpp b/contrib/llvm-project/clang/lib/Driver/Distro.cpp
index a7e7f169dc14..6f49e641104c 100644
--- a/contrib/llvm-project/clang/lib/Driver/Distro.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Distro.cpp
@@ -95,6 +95,7 @@ static Distro::DistroType DetectLsbRelease(llvm::vfs::FileSystem &VFS) {
.Case("lunar", Distro::UbuntuLunar)
.Case("mantic", Distro::UbuntuMantic)
.Case("noble", Distro::UbuntuNoble)
+ .Case("oracular", Distro::UbuntuOracular)
.Default(Distro::UnknownDistro);
return Version;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/Driver.cpp b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
index 93cddf742d52..f9dc8ab24fa9 100644
--- a/contrib/llvm-project/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
@@ -49,6 +49,7 @@
#include "ToolChains/WebAssembly.h"
#include "ToolChains/XCore.h"
#include "ToolChains/ZOS.h"
+#include "clang/Basic/DiagnosticDriver.h"
#include "clang/Basic/TargetID.h"
#include "clang/Basic/Version.h"
#include "clang/Config/config.h"
@@ -86,11 +87,12 @@
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
-#include "llvm/Support/RISCVISAInfo.h"
+#include "llvm/Support/Regex.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/RISCVISAInfo.h"
#include <cstdlib> // ::getenv
#include <map>
#include <memory>
@@ -145,6 +147,14 @@ getNVIDIAOffloadTargetTriple(const Driver &D, const ArgList &Args,
static std::optional<llvm::Triple>
getHIPOffloadTargetTriple(const Driver &D, const ArgList &Args) {
if (!Args.hasArg(options::OPT_offload_EQ)) {
+ auto OffloadArchs = Args.getAllArgValues(options::OPT_offload_arch_EQ);
+ if (llvm::find(OffloadArchs, "amdgcnspirv") != OffloadArchs.cend()) {
+ if (OffloadArchs.size() == 1)
+ return llvm::Triple("spirv64-amd-amdhsa");
+ // Mixing specific & SPIR-V compilation is not supported for now.
+ D.Diag(diag::err_drv_only_one_offload_target_supported);
+ return std::nullopt;
+ }
return llvm::Triple("amdgcn-amd-amdhsa"); // Default HIP triple.
}
auto TT = getOffloadTargetTriple(D, Args);
@@ -209,11 +219,10 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
Name = std::string(llvm::sys::path::filename(ClangExecutable));
Dir = std::string(llvm::sys::path::parent_path(ClangExecutable));
- InstalledDir = Dir; // Provide a sensible default installed dir.
if ((!SysRoot.empty()) && llvm::sys::path::is_relative(SysRoot)) {
// Prepend InstalledDir if SysRoot is relative
- SmallString<128> P(InstalledDir);
+ SmallString<128> P(Dir);
llvm::sys::path::append(P, SysRoot);
SysRoot = std::string(P);
}
@@ -354,12 +363,14 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
// -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
} else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
(PhaseArg = DAL.getLastArg(options::OPT_print_supported_cpus)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_print_enabled_extensions)) ||
(PhaseArg = DAL.getLastArg(options::OPT_module_file_info)) ||
(PhaseArg = DAL.getLastArg(options::OPT_verify_pch)) ||
(PhaseArg = DAL.getLastArg(options::OPT_rewrite_objc)) ||
(PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) ||
(PhaseArg = DAL.getLastArg(options::OPT__migrate)) ||
(PhaseArg = DAL.getLastArg(options::OPT__analyze)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_emit_cir)) ||
(PhaseArg = DAL.getLastArg(options::OPT_emit_ast))) {
FinalPhase = phases::Compile;
@@ -562,9 +573,9 @@ static llvm::Triple computeTargetTriple(const Driver &D,
StringRef ObjectMode = *ObjectModeValue;
llvm::Triple::ArchType AT = llvm::Triple::UnknownArch;
- if (ObjectMode.equals("64")) {
+ if (ObjectMode == "64") {
AT = Target.get64BitArchVariant().getArch();
- } else if (ObjectMode.equals("32")) {
+ } else if (ObjectMode == "32") {
AT = Target.get32BitArchVariant().getArch();
} else {
D.Diag(diag::err_drv_invalid_object_mode) << ObjectMode;
@@ -591,7 +602,8 @@ static llvm::Triple computeTargetTriple(const Driver &D,
if (A->getOption().matches(options::OPT_m64) ||
A->getOption().matches(options::OPT_maix64)) {
AT = Target.get64BitArchVariant().getArch();
- if (Target.getEnvironment() == llvm::Triple::GNUX32)
+ if (Target.getEnvironment() == llvm::Triple::GNUX32 ||
+ Target.getEnvironment() == llvm::Triple::GNUT64)
Target.setEnvironment(llvm::Triple::GNU);
else if (Target.getEnvironment() == llvm::Triple::MuslX32)
Target.setEnvironment(llvm::Triple::Musl);
@@ -654,11 +666,13 @@ static llvm::Triple computeTargetTriple(const Driver &D,
} else if (ABIName == "n32") {
Target = Target.get64BitArchVariant();
if (Target.getEnvironment() == llvm::Triple::GNU ||
+ Target.getEnvironment() == llvm::Triple::GNUT64 ||
Target.getEnvironment() == llvm::Triple::GNUABI64)
Target.setEnvironment(llvm::Triple::GNUABIN32);
} else if (ABIName == "64") {
Target = Target.get64BitArchVariant();
if (Target.getEnvironment() == llvm::Triple::GNU ||
+ Target.getEnvironment() == llvm::Triple::GNUT64 ||
Target.getEnvironment() == llvm::Triple::GNUABIN32)
Target.setEnvironment(llvm::Triple::GNUABI64);
}
@@ -670,7 +684,7 @@ static llvm::Triple computeTargetTriple(const Driver &D,
if (Target.isRISCV()) {
if (Args.hasArg(options::OPT_march_EQ) ||
Args.hasArg(options::OPT_mcpu_EQ)) {
- StringRef ArchName = tools::riscv::getRISCVArch(Args, Target);
+ std::string ArchName = tools::riscv::getRISCVArch(Args, Target);
auto ISAInfo = llvm::RISCVISAInfo::parseArchString(
ArchName, /*EnableExperimentalExtensions=*/true);
if (!llvm::errorToBool(ISAInfo.takeError())) {
@@ -888,11 +902,11 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
}
for (StringRef Arch : Archs) {
- if (NVPTXTriple && IsNVIDIAGpuArch(StringToCudaArch(
+ if (NVPTXTriple && IsNVIDIAOffloadArch(StringToOffloadArch(
getProcessorFromTargetID(*NVPTXTriple, Arch)))) {
DerivedArchs[NVPTXTriple->getTriple()].insert(Arch);
} else if (AMDTriple &&
- IsAMDGpuArch(StringToCudaArch(
+ IsAMDOffloadArch(StringToOffloadArch(
getProcessorFromTargetID(*AMDTriple, Arch)))) {
DerivedArchs[AMDTriple->getTriple()].insert(Arch);
} else {
@@ -1253,6 +1267,14 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
if (VFS->setCurrentWorkingDirectory(WD->getValue()))
Diag(diag::err_drv_unable_to_set_working_directory) << WD->getValue();
+ // Check for missing include directories.
+ if (!Diags.isIgnored(diag::warn_missing_include_dirs, SourceLocation())) {
+ for (auto IncludeDir : Args.getAllArgValues(options::OPT_I_Group)) {
+ if (!VFS->exists(IncludeDir))
+ Diag(diag::warn_missing_include_dirs) << IncludeDir;
+ }
+ }
+
// FIXME: This stuff needs to go into the Compilation, not the driver.
bool CCCPrintPhases;
@@ -1337,7 +1359,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
if (const Arg *A = Args.getLastArg(options::OPT_target))
TargetTriple = A->getValue();
if (const Arg *A = Args.getLastArg(options::OPT_ccc_install_dir))
- Dir = InstalledDir = A->getValue();
+ Dir = Dir = A->getValue();
for (const Arg *A : Args.filtered(options::OPT_B)) {
A->claim();
PrefixDirs.push_back(A->getValue(0));
@@ -1443,11 +1465,14 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
const ToolChain &TC = getToolChain(
*UArgs, computeTargetTriple(*this, TargetTriple, *UArgs));
- if (TC.getTriple().isAndroid()) {
- llvm::Triple Triple = TC.getTriple();
+ // Check if the environment version is valid except wasm case.
+ llvm::Triple Triple = TC.getTriple();
+ if (!Triple.isWasm()) {
StringRef TripleVersionName = Triple.getEnvironmentVersionString();
-
- if (Triple.getEnvironmentVersion().empty() && TripleVersionName != "") {
+ StringRef TripleObjectFormat =
+ Triple.getObjectFormatTypeName(Triple.getObjectFormat());
+ if (Triple.getEnvironmentVersion().empty() && TripleVersionName != "" &&
+ TripleVersionName != TripleObjectFormat) {
Diags.Report(diag::err_drv_triple_version_invalid)
<< TripleVersionName << TC.getTripleString();
ContainsError = true;
@@ -1997,7 +2022,13 @@ void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
OS << '\n';
// Print out the install directory.
- OS << "InstalledDir: " << InstalledDir << '\n';
+ OS << "InstalledDir: " << Dir << '\n';
+
+ // Print the build config if it's non-default.
+ // Intended to help LLVM developers understand the configs of compilers
+ // they're investigating.
+ if (!llvm::cl::getCompilerBuildConfig().empty())
+ llvm::cl::printBuildConfig(OS);
// If configuration files were used, print their paths.
for (auto ConfigFile : ConfigFiles)
@@ -2100,7 +2131,7 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
llvm::outs() << llvm::join(SuggestedCompletions, "\n") << '\n';
}
-bool Driver::HandleImmediateArgs(const Compilation &C) {
+bool Driver::HandleImmediateArgs(Compilation &C) {
// The order these options are handled in gcc is all over the place, but we
// don't expect inconsistencies w.r.t. that to matter in practice.
@@ -2136,7 +2167,8 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
if (C.getArgs().hasArg(options::OPT_v) ||
C.getArgs().hasArg(options::OPT__HASH_HASH_HASH) ||
C.getArgs().hasArg(options::OPT_print_supported_cpus) ||
- C.getArgs().hasArg(options::OPT_print_supported_extensions)) {
+ C.getArgs().hasArg(options::OPT_print_supported_extensions) ||
+ C.getArgs().hasArg(options::OPT_print_enabled_extensions)) {
PrintVersion(C, llvm::errs());
SuppressMissingInputWarning = true;
}
@@ -2194,6 +2226,12 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
return false;
}
+ if (C.getArgs().hasArg(options::OPT_print_std_module_manifest_path)) {
+ llvm::outs() << GetStdModuleManifestPath(C, C.getDefaultToolChain())
+ << '\n';
+ return false;
+ }
+
if (C.getArgs().hasArg(options::OPT_print_runtime_dir)) {
if (std::optional<std::string> RuntimePath = TC.getRuntimePath())
llvm::outs() << *RuntimePath << '\n';
@@ -2236,6 +2274,14 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
if (C.getArgs().hasArg(options::OPT_print_libgcc_file_name)) {
ToolChain::RuntimeLibType RLT = TC.GetRuntimeLibType(C.getArgs());
const llvm::Triple Triple(TC.ComputeEffectiveClangTriple(C.getArgs()));
+ // The 'Darwin' toolchain is initialized only when its arguments are
+ // computed. Get the default arguments for OFK_None to ensure that
+ // initialization is performed before trying to access properties of
+ // the toolchain in the functions below.
+ // FIXME: Remove when darwin's toolchain is initialized during construction.
+ // FIXME: For some more esoteric targets the default toolchain is not the
+ // correct one.
+ C.getArgsForToolChain(&TC, Triple.getArchName(), Action::OFK_None);
RegisterEffectiveTriple TripleRAII(TC, Triple);
switch (RLT) {
case ToolChain::RLT_CompilerRT:
@@ -2620,22 +2666,13 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
Diag(clang::diag::note_drv_t_option_is_global);
}
- // CUDA/HIP and their preprocessor expansions can be accepted by CL mode.
// Warn -x after last input file has no effect
- auto LastXArg = Args.getLastArgValue(options::OPT_x);
- const llvm::StringSet<> ValidXArgs = {"cuda", "hip", "cui", "hipi"};
- if (!IsCLMode() || ValidXArgs.contains(LastXArg)) {
+ {
Arg *LastXArg = Args.getLastArgNoClaim(options::OPT_x);
Arg *LastInputArg = Args.getLastArgNoClaim(options::OPT_INPUT);
if (LastXArg && LastInputArg &&
LastInputArg->getIndex() < LastXArg->getIndex())
Diag(clang::diag::warn_drv_unused_x) << LastXArg->getValue();
- } else {
- // In CL mode suggest /TC or /TP since -x doesn't make sense if passed via
- // /clang:.
- if (auto *A = Args.getLastArg(options::OPT_x))
- Diag(diag::err_drv_unsupported_opt_with_suggestion)
- << A->getAsString(Args) << "/TC' or '/TP";
}
for (Arg *A : Args) {
@@ -2922,7 +2959,7 @@ class OffloadingActionBuilder final {
struct TargetID {
/// Target ID string which is persistent throughout the compilation.
const char *ID;
- TargetID(CudaArch Arch) { ID = CudaArchToString(Arch); }
+ TargetID(OffloadArch Arch) { ID = OffloadArchToString(Arch); }
TargetID(const char *ID) : ID(ID) {}
operator const char *() { return ID; }
operator StringRef() { return StringRef(ID); }
@@ -2943,7 +2980,7 @@ class OffloadingActionBuilder final {
bool Relocatable = false;
/// Default GPU architecture if there's no one specified.
- CudaArch DefaultCudaArch = CudaArch::UNKNOWN;
+ OffloadArch DefaultOffloadArch = OffloadArch::UNKNOWN;
/// Method to generate compilation unit ID specified by option
/// '-fuse-cuid='.
@@ -3072,7 +3109,7 @@ class OffloadingActionBuilder final {
// If we have a fat binary, add it to the list.
if (CudaFatBinary) {
- AddTopLevel(CudaFatBinary, CudaArch::UNUSED);
+ AddTopLevel(CudaFatBinary, OffloadArch::UNUSED);
CudaDeviceActions.clear();
CudaFatBinary = nullptr;
return;
@@ -3215,10 +3252,14 @@ class OffloadingActionBuilder final {
// supported GPUs. sm_20 code should work correctly, if
// suboptimally, on all newer GPUs.
if (GpuArchList.empty()) {
- if (ToolChains.front()->getTriple().isSPIRV())
- GpuArchList.push_back(CudaArch::Generic);
- else
- GpuArchList.push_back(DefaultCudaArch);
+ if (ToolChains.front()->getTriple().isSPIRV()) {
+ if (ToolChains.front()->getTriple().getVendor() == llvm::Triple::AMD)
+ GpuArchList.push_back(OffloadArch::AMDGCNSPIRV);
+ else
+ GpuArchList.push_back(OffloadArch::Generic);
+ } else {
+ GpuArchList.push_back(DefaultOffloadArch);
+ }
}
return Error;
@@ -3232,16 +3273,16 @@ class OffloadingActionBuilder final {
CudaActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
: CudaActionBuilderBase(C, Args, Inputs, Action::OFK_Cuda) {
- DefaultCudaArch = CudaArch::SM_35;
+ DefaultOffloadArch = OffloadArch::CudaDefault;
}
StringRef getCanonicalOffloadArch(StringRef ArchStr) override {
- CudaArch Arch = StringToCudaArch(ArchStr);
- if (Arch == CudaArch::UNKNOWN || !IsNVIDIAGpuArch(Arch)) {
+ OffloadArch Arch = StringToOffloadArch(ArchStr);
+ if (Arch == OffloadArch::UNKNOWN || !IsNVIDIAOffloadArch(Arch)) {
C.getDriver().Diag(clang::diag::err_drv_cuda_bad_gpu_arch) << ArchStr;
return StringRef();
}
- return CudaArchToString(Arch);
+ return OffloadArchToString(Arch);
}
std::optional<std::pair<llvm::StringRef, llvm::StringRef>>
@@ -3371,7 +3412,7 @@ class OffloadingActionBuilder final {
const Driver::InputList &Inputs)
: CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP) {
- DefaultCudaArch = CudaArch::GFX906;
+ DefaultOffloadArch = OffloadArch::HIPDefault;
if (Args.hasArg(options::OPT_fhip_emit_relocatable,
options::OPT_fno_hip_emit_relocatable)) {
@@ -4319,13 +4360,14 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
}
for (auto Opt : {options::OPT_print_supported_cpus,
- options::OPT_print_supported_extensions}) {
+ options::OPT_print_supported_extensions,
+ options::OPT_print_enabled_extensions}) {
// If --print-supported-cpus, -mcpu=? or -mtune=? is specified, build a
// custom Compile phase that prints out supported cpu models and quits.
//
- // If --print-supported-extensions is specified, call the helper function
- // RISCVMarchHelp in RISCVISAInfo.cpp that prints out supported extensions
- // and quits.
+ // If either --print-supported-extensions or --print-enabled-extensions is
+ // specified, call the corresponding helper function that prints out the
+ // supported/enabled extensions and quits.
if (Arg *A = Args.getLastArg(Opt)) {
if (Opt == options::OPT_print_supported_extensions &&
!C.getDefaultToolChain().getTriple().isRISCV() &&
@@ -4335,6 +4377,13 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
<< "--print-supported-extensions";
return;
}
+ if (Opt == options::OPT_print_enabled_extensions &&
+ !C.getDefaultToolChain().getTriple().isRISCV() &&
+ !C.getDefaultToolChain().getTriple().isAArch64()) {
+ C.getDriver().Diag(diag::err_opt_not_valid_on_target)
+ << "--print-enabled-extensions";
+ return;
+ }
// Use the -mcpu=? flag as the dummy input to cc1.
Actions.clear();
@@ -4371,23 +4420,24 @@ static StringRef getCanonicalArchString(Compilation &C,
bool SuppressError = false) {
// Lookup the CUDA / HIP architecture string. Only report an error if we were
// expecting the triple to be only NVPTX / AMDGPU.
- CudaArch Arch = StringToCudaArch(getProcessorFromTargetID(Triple, ArchStr));
+ OffloadArch Arch =
+ StringToOffloadArch(getProcessorFromTargetID(Triple, ArchStr));
if (!SuppressError && Triple.isNVPTX() &&
- (Arch == CudaArch::UNKNOWN || !IsNVIDIAGpuArch(Arch))) {
+ (Arch == OffloadArch::UNKNOWN || !IsNVIDIAOffloadArch(Arch))) {
C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch)
<< "CUDA" << ArchStr;
return StringRef();
} else if (!SuppressError && Triple.isAMDGPU() &&
- (Arch == CudaArch::UNKNOWN || !IsAMDGpuArch(Arch))) {
+ (Arch == OffloadArch::UNKNOWN || !IsAMDOffloadArch(Arch))) {
C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch)
<< "HIP" << ArchStr;
return StringRef();
}
- if (IsNVIDIAGpuArch(Arch))
- return Args.MakeArgStringRef(CudaArchToString(Arch));
+ if (IsNVIDIAOffloadArch(Arch))
+ return Args.MakeArgStringRef(OffloadArchToString(Arch));
- if (IsAMDGpuArch(Arch)) {
+ if (IsAMDOffloadArch(Arch)) {
llvm::StringMap<bool> Features;
auto HIPTriple = getHIPOffloadTargetTriple(C.getDriver(), C.getInputArgs());
if (!HIPTriple)
@@ -4508,9 +4558,9 @@ Driver::getOffloadArchs(Compilation &C, const llvm::opt::DerivedArgList &Args,
if (Archs.empty()) {
if (Kind == Action::OFK_Cuda)
- Archs.insert(CudaArchToString(CudaArch::CudaDefault));
+ Archs.insert(OffloadArchToString(OffloadArch::CudaDefault));
else if (Kind == Action::OFK_HIP)
- Archs.insert(CudaArchToString(CudaArch::HIPDefault));
+ Archs.insert(OffloadArchToString(OffloadArch::HIPDefault));
else if (Kind == Action::OFK_OpenMP)
Archs.insert(StringRef());
} else {
@@ -4560,9 +4610,13 @@ Action *Driver::BuildOffloadingActions(Compilation &C,
// Get the product of all bound architectures and toolchains.
SmallVector<std::pair<const ToolChain *, StringRef>> TCAndArchs;
- for (const ToolChain *TC : ToolChains)
- for (StringRef Arch : getOffloadArchs(C, Args, Kind, TC))
+ for (const ToolChain *TC : ToolChains) {
+ llvm::DenseSet<StringRef> Arches = getOffloadArchs(C, Args, Kind, TC);
+ SmallVector<StringRef, 0> Sorted(Arches.begin(), Arches.end());
+ llvm::sort(Sorted);
+ for (StringRef Arch : Sorted)
TCAndArchs.push_back(std::make_pair(TC, Arch));
+ }
for (unsigned I = 0, E = TCAndArchs.size(); I != E; ++I)
DeviceActions.push_back(C.MakeAction<InputAction>(*InputArg, InputType));
@@ -4623,12 +4677,30 @@ Action *Driver::BuildOffloadingActions(Compilation &C,
DDeps.add(*A, *TCAndArch->first, TCAndArch->second.data(), Kind);
OffloadAction::DeviceDependences DDep;
DDep.add(*A, *TCAndArch->first, TCAndArch->second.data(), Kind);
+
+ // Compiling CUDA in non-RDC mode uses the PTX output if available.
+ for (Action *Input : A->getInputs())
+ if (Kind == Action::OFK_Cuda && A->getType() == types::TY_Object &&
+ !Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
+ false))
+ DDep.add(*Input, *TCAndArch->first, TCAndArch->second.data(), Kind);
OffloadActions.push_back(C.MakeAction<OffloadAction>(DDep, A->getType()));
+
++TCAndArch;
}
}
- if (offloadDeviceOnly())
+ // HIP code in non-RDC mode will bundle the output if it invoked the linker.
+ bool ShouldBundleHIP =
+ C.isOffloadingHostKind(Action::OFK_HIP) &&
+ Args.hasFlag(options::OPT_gpu_bundle_output,
+ options::OPT_no_gpu_bundle_output, true) &&
+ !Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false) &&
+ !llvm::any_of(OffloadActions,
+ [](Action *A) { return A->getType() != types::TY_Image; });
+
+ // All kinds exit now in device-only mode except for non-RDC mode HIP.
+ if (offloadDeviceOnly() && !ShouldBundleHIP)
return C.MakeAction<OffloadAction>(DDeps, types::TY_Nothing);
if (OffloadActions.empty())
@@ -4661,6 +4733,10 @@ Action *Driver::BuildOffloadingActions(Compilation &C,
nullptr, C.getActiveOffloadKinds());
}
+ // HIP wants '--offload-device-only' to create a fatbinary by default.
+ if (offloadDeviceOnly())
+ return C.MakeAction<OffloadAction>(DDep, types::TY_Nothing);
+
// If we are unable to embed a single device output into the host, we need to
// add each device output as a host dependency to ensure they are still built.
bool SingleDeviceOutput = !llvm::any_of(OffloadActions, [](Action *A) {
@@ -4718,6 +4794,14 @@ Action *Driver::ConstructPhaseAction(
if (Args.hasArg(options::OPT_extract_api))
return C.MakeAction<ExtractAPIJobAction>(Input, types::TY_API_INFO);
+ // With 'fexperimental-modules-reduced-bmi', we don't want to run the
+ // precompile phase unless the user specified '--precompile'. In the case
+ // the '--precompile' flag is enabled, we will try to emit the reduced BMI
+ // as a by product in GenerateModuleInterfaceAction.
+ if (Args.hasArg(options::OPT_modules_reduced_bmi) &&
+ !Args.getLastArg(options::OPT__precompile))
+ return Input;
+
types::ID OutputTy = getPrecompiledType(Input->getType());
assert(OutputTy != types::TY_INVALID &&
"Cannot precompile this input type!");
@@ -4753,6 +4837,8 @@ Action *Driver::ConstructPhaseAction(
return C.MakeAction<MigrateJobAction>(Input, types::TY_Remap);
if (Args.hasArg(options::OPT_emit_ast))
return C.MakeAction<CompileJobAction>(Input, types::TY_AST);
+ if (Args.hasArg(options::OPT_emit_cir))
+ return C.MakeAction<CompileJobAction>(Input, types::TY_CIR);
if (Args.hasArg(options::OPT_module_file_info))
return C.MakeAction<CompileJobAction>(Input, types::TY_ModuleFile);
if (Args.hasArg(options::OPT_verify_pch))
@@ -5783,19 +5869,9 @@ static const char *GetModuleOutputPath(Compilation &C, const JobAction &JA,
(C.getArgs().hasArg(options::OPT_fmodule_output) ||
C.getArgs().hasArg(options::OPT_fmodule_output_EQ)));
- if (Arg *ModuleOutputEQ =
- C.getArgs().getLastArg(options::OPT_fmodule_output_EQ))
- return C.addResultFile(ModuleOutputEQ->getValue(), &JA);
+ SmallString<256> OutputPath =
+ tools::getCXX20NamedModuleOutputPath(C.getArgs(), BaseInput);
- SmallString<64> OutputPath;
- Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o);
- if (FinalOutput && C.getArgs().hasArg(options::OPT_c))
- OutputPath = FinalOutput->getValue();
- else
- OutputPath = BaseInput;
-
- const char *Extension = types::getTypeTempSuffix(JA.getType());
- llvm::sys::path::replace_extension(OutputPath, Extension);
return C.addResultFile(C.getArgs().MakeArgString(OutputPath.c_str()), &JA);
}
@@ -5868,6 +5944,12 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
&JA);
}
+ if (JA.getType() == types::TY_API_INFO &&
+ C.getArgs().hasArg(options::OPT_emit_extension_symbol_graphs) &&
+ C.getArgs().hasArg(options::OPT_o))
+ Diag(clang::diag::err_drv_unexpected_symbol_graph_output)
+ << C.getArgs().getLastArgValue(options::OPT_o);
+
// DXC defaults to standard out when generating assembly. We check this after
// any DXC flags that might specify a file.
if (AtTopLevel && JA.getType() == types::TY_PP_Asm && IsDXCMode())
@@ -5882,8 +5964,10 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
// If we're emitting a module output with the specified option
// `-fmodule-output`.
if (!AtTopLevel && isa<PrecompileJobAction>(JA) &&
- JA.getType() == types::TY_ModuleFile && SpecifiedModuleOutput)
+ JA.getType() == types::TY_ModuleFile && SpecifiedModuleOutput) {
+ assert(!C.getArgs().hasArg(options::OPT_modules_reduced_bmi));
return GetModuleOutputPath(C, JA, BaseInput);
+ }
// Output to a temporary file?
if ((!AtTopLevel && !isSaveTempsEnabled() &&
@@ -6105,6 +6189,11 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
if (auto P = SearchPaths(TC.getFilePaths()))
return *P;
+ SmallString<128> R2(ResourceDir);
+ llvm::sys::path::append(R2, "..", "..", Name);
+ if (llvm::sys::fs::exists(Twine(R2)))
+ return std::string(R2);
+
return std::string(Name);
}
@@ -6166,6 +6255,51 @@ std::string Driver::GetProgramPath(StringRef Name, const ToolChain &TC) const {
return std::string(Name);
}
+std::string Driver::GetStdModuleManifestPath(const Compilation &C,
+ const ToolChain &TC) const {
+ std::string error = "<NOT PRESENT>";
+
+ switch (TC.GetCXXStdlibType(C.getArgs())) {
+ case ToolChain::CST_Libcxx: {
+ auto evaluate = [&](const char *library) -> std::optional<std::string> {
+ std::string lib = GetFilePath(library, TC);
+
+ // Note when there are multiple flavours of libc++ the module json needs
+ // to look at the command-line arguments for the proper json. These
+ // flavours do not exist at the moment, but there are plans to provide a
+ // variant that is built with sanitizer instrumentation enabled.
+
+ // For example
+ // StringRef modules = [&] {
+ // const SanitizerArgs &Sanitize = TC.getSanitizerArgs(C.getArgs());
+ // if (Sanitize.needsAsanRt())
+ // return "libc++.modules-asan.json";
+ // return "libc++.modules.json";
+ // }();
+
+ SmallString<128> path(lib.begin(), lib.end());
+ llvm::sys::path::remove_filename(path);
+ llvm::sys::path::append(path, "libc++.modules.json");
+ if (TC.getVFS().exists(path))
+ return static_cast<std::string>(path);
+
+ return {};
+ };
+
+ if (std::optional<std::string> result = evaluate("libc++.so"); result)
+ return *result;
+
+ return evaluate("libc++.a").value_or(error);
+ }
+
+ case ToolChain::CST_Libstdcxx:
+ // libstdc++ does not provide Standard library modules yet.
+ return error;
+ }
+
+ return error;
+}
+
std::string Driver::GetTemporaryPath(StringRef Prefix, StringRef Suffix) const {
SmallString<128> Path;
std::error_code EC = llvm::sys::fs::createTemporaryFile(Prefix, Suffix, Path);
@@ -6409,9 +6543,11 @@ const ToolChain &Driver::getOffloadingDeviceToolChain(
// things.
switch (TargetDeviceOffloadKind) {
case Action::OFK_HIP: {
- if (Target.getArch() == llvm::Triple::amdgcn &&
- Target.getVendor() == llvm::Triple::AMD &&
- Target.getOS() == llvm::Triple::AMDHSA)
+ if (((Target.getArch() == llvm::Triple::amdgcn ||
+ Target.getArch() == llvm::Triple::spirv64) &&
+ Target.getVendor() == llvm::Triple::AMD &&
+ Target.getOS() == llvm::Triple::AMDHSA) ||
+ !Args.hasArgNoClaim(options::OPT_offload_EQ))
TC = std::make_unique<toolchains::HIPAMDToolChain>(*this, Target,
HostTC, Args);
else if (Target.getArch() == llvm::Triple::spirv64 &&
@@ -6482,18 +6618,15 @@ bool Driver::GetReleaseVersion(StringRef Str, unsigned &Major, unsigned &Minor,
return false;
if (Str.empty())
return true;
- if (Str[0] != '.')
+ if (!Str.consume_front("."))
return false;
- Str = Str.drop_front(1);
-
if (Str.consumeInteger(10, Minor))
return false;
if (Str.empty())
return true;
- if (Str[0] != '.')
+ if (!Str.consume_front("."))
return false;
- Str = Str.drop_front(1);
if (Str.consumeInteger(10, Micro))
return false;
@@ -6521,9 +6654,8 @@ bool Driver::GetReleaseVersion(StringRef Str,
Digits[CurDigit] = Digit;
if (Str.empty())
return true;
- if (Str[0] != '.')
+ if (!Str.consume_front("."))
return false;
- Str = Str.drop_front(1);
CurDigit++;
}
@@ -6606,7 +6738,7 @@ llvm::StringRef clang::driver::getDriverMode(StringRef ProgName,
return Opt.consume_front(OptName) ? Opt : "";
}
-bool driver::IsClangCL(StringRef DriverMode) { return DriverMode.equals("cl"); }
+bool driver::IsClangCL(StringRef DriverMode) { return DriverMode == "cl"; }
llvm::Error driver::expandResponseFiles(SmallVectorImpl<const char *> &Args,
bool ClangCLMode,
@@ -6662,3 +6794,131 @@ llvm::Error driver::expandResponseFiles(SmallVectorImpl<const char *> &Args,
return llvm::Error::success();
}
+
+static const char *GetStableCStr(llvm::StringSet<> &SavedStrings, StringRef S) {
+ return SavedStrings.insert(S).first->getKeyData();
+}
+
+/// Apply a list of edits to the input argument lists.
+///
+/// The input string is a space separated list of edits to perform,
+/// they are applied in order to the input argument lists. Edits
+/// should be one of the following forms:
+///
+/// '#': Silence information about the changes to the command line arguments.
+///
+/// '^': Add FOO as a new argument at the beginning of the command line.
+///
+/// '+': Add FOO as a new argument at the end of the command line.
+///
+/// 's/XXX/YYY/': Substitute the regular expression XXX with YYY in the command
+/// line.
+///
+/// 'xOPTION': Removes all instances of the literal argument OPTION.
+///
+/// 'XOPTION': Removes all instances of the literal argument OPTION,
+/// and the following argument.
+///
+/// 'Ox': Removes all flags matching 'O' or 'O[sz0-9]' and adds 'Ox'
+/// at the end of the command line.
+///
+/// \param OS - The stream to write edit information to.
+/// \param Args - The vector of command line arguments.
+/// \param Edit - The override command to perform.
+/// \param SavedStrings - Set to use for storing string representations.
+static void applyOneOverrideOption(raw_ostream &OS,
+ SmallVectorImpl<const char *> &Args,
+ StringRef Edit,
+ llvm::StringSet<> &SavedStrings) {
+ // This does not need to be efficient.
+
+ if (Edit[0] == '^') {
+ const char *Str = GetStableCStr(SavedStrings, Edit.substr(1));
+ OS << "### Adding argument " << Str << " at beginning\n";
+ Args.insert(Args.begin() + 1, Str);
+ } else if (Edit[0] == '+') {
+ const char *Str = GetStableCStr(SavedStrings, Edit.substr(1));
+ OS << "### Adding argument " << Str << " at end\n";
+ Args.push_back(Str);
+ } else if (Edit[0] == 's' && Edit[1] == '/' && Edit.ends_with("/") &&
+ Edit.slice(2, Edit.size() - 1).contains('/')) {
+ StringRef MatchPattern = Edit.substr(2).split('/').first;
+ StringRef ReplPattern = Edit.substr(2).split('/').second;
+ ReplPattern = ReplPattern.slice(0, ReplPattern.size() - 1);
+
+ for (unsigned i = 1, e = Args.size(); i != e; ++i) {
+ // Ignore end-of-line response file markers
+ if (Args[i] == nullptr)
+ continue;
+ std::string Repl = llvm::Regex(MatchPattern).sub(ReplPattern, Args[i]);
+
+ if (Repl != Args[i]) {
+ OS << "### Replacing '" << Args[i] << "' with '" << Repl << "'\n";
+ Args[i] = GetStableCStr(SavedStrings, Repl);
+ }
+ }
+ } else if (Edit[0] == 'x' || Edit[0] == 'X') {
+ auto Option = Edit.substr(1);
+ for (unsigned i = 1; i < Args.size();) {
+ if (Option == Args[i]) {
+ OS << "### Deleting argument " << Args[i] << '\n';
+ Args.erase(Args.begin() + i);
+ if (Edit[0] == 'X') {
+ if (i < Args.size()) {
+ OS << "### Deleting argument " << Args[i] << '\n';
+ Args.erase(Args.begin() + i);
+ } else
+ OS << "### Invalid X edit, end of command line!\n";
+ }
+ } else
+ ++i;
+ }
+ } else if (Edit[0] == 'O') {
+ for (unsigned i = 1; i < Args.size();) {
+ const char *A = Args[i];
+ // Ignore end-of-line response file markers
+ if (A == nullptr)
+ continue;
+ if (A[0] == '-' && A[1] == 'O' &&
+ (A[2] == '\0' || (A[3] == '\0' && (A[2] == 's' || A[2] == 'z' ||
+ ('0' <= A[2] && A[2] <= '9'))))) {
+ OS << "### Deleting argument " << Args[i] << '\n';
+ Args.erase(Args.begin() + i);
+ } else
+ ++i;
+ }
+ OS << "### Adding argument " << Edit << " at end\n";
+ Args.push_back(GetStableCStr(SavedStrings, '-' + Edit.str()));
+ } else {
+ OS << "### Unrecognized edit: " << Edit << "\n";
+ }
+}
+
+void driver::applyOverrideOptions(SmallVectorImpl<const char *> &Args,
+ const char *OverrideStr,
+ llvm::StringSet<> &SavedStrings,
+ raw_ostream *OS) {
+ if (!OS)
+ OS = &llvm::nulls();
+
+ if (OverrideStr[0] == '#') {
+ ++OverrideStr;
+ OS = &llvm::nulls();
+ }
+
+ *OS << "### CCC_OVERRIDE_OPTIONS: " << OverrideStr << "\n";
+
+ // This does not need to be efficient.
+
+ const char *S = OverrideStr;
+ while (*S) {
+ const char *End = ::strchr(S, ' ');
+ if (!End)
+ End = S + strlen(S);
+ if (End != S)
+ applyOneOverrideOption(*OS, Args, std::string(S, End), SavedStrings);
+ S = End;
+ if (*S != '\0')
+ ++S;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp b/contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp
index b1091aca5616..a4ab846ed2c5 100644
--- a/contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp
@@ -79,7 +79,8 @@ OffloadTargetInfo::OffloadTargetInfo(const StringRef Target,
auto TargetFeatures = Target.split(':');
auto TripleOrGPU = TargetFeatures.first.rsplit('-');
- if (clang::StringToCudaArch(TripleOrGPU.second) != clang::CudaArch::UNKNOWN) {
+ if (clang::StringToOffloadArch(TripleOrGPU.second) !=
+ clang::OffloadArch::UNKNOWN) {
auto KindTriple = TripleOrGPU.first.split('-');
this->OffloadKind = KindTriple.first;
@@ -113,8 +114,11 @@ bool OffloadTargetInfo::isOffloadKindValid() const {
bool OffloadTargetInfo::isOffloadKindCompatible(
const StringRef TargetOffloadKind) const {
- if (OffloadKind == TargetOffloadKind)
+ if ((OffloadKind == TargetOffloadKind) ||
+ (OffloadKind == "hip" && TargetOffloadKind == "hipv4") ||
+ (OffloadKind == "hipv4" && TargetOffloadKind == "hip"))
return true;
+
if (BundlerConfig.HipOpenmpCompatible) {
bool HIPCompatibleWithOpenMP = OffloadKind.starts_with_insensitive("hip") &&
TargetOffloadKind == "openmp";
@@ -588,8 +592,16 @@ public:
StringRef Content = *ContentOrErr;
// Copy fat object contents to the output when extracting host bundle.
- if (Content.size() == 1u && Content.front() == 0)
- Content = StringRef(Input.getBufferStart(), Input.getBufferSize());
+ std::string ModifiedContent;
+ if (Content.size() == 1u && Content.front() == 0) {
+ auto HostBundleOrErr = getHostBundle(
+ StringRef(Input.getBufferStart(), Input.getBufferSize()));
+ if (!HostBundleOrErr)
+ return HostBundleOrErr.takeError();
+
+ ModifiedContent = std::move(*HostBundleOrErr);
+ Content = ModifiedContent;
+ }
OS.write(Content.data(), Content.size());
return Error::success();
@@ -692,6 +704,52 @@ private:
}
return Error::success();
}
+
+ Expected<std::string> getHostBundle(StringRef Input) {
+ TempFileHandlerRAII TempFiles;
+
+ auto ModifiedObjPathOrErr = TempFiles.Create(std::nullopt);
+ if (!ModifiedObjPathOrErr)
+ return ModifiedObjPathOrErr.takeError();
+ StringRef ModifiedObjPath = *ModifiedObjPathOrErr;
+
+ BumpPtrAllocator Alloc;
+ StringSaver SS{Alloc};
+ SmallVector<StringRef, 16> ObjcopyArgs{"llvm-objcopy"};
+
+ ObjcopyArgs.push_back("--regex");
+ ObjcopyArgs.push_back("--remove-section=__CLANG_OFFLOAD_BUNDLE__.*");
+ ObjcopyArgs.push_back("--");
+
+ StringRef ObjcopyInputFileName;
+ // When unbundling an archive, the content of each object file in the
+ // archive is passed to this function by parameter Input, which is different
+ // from the content of the original input archive file, therefore it needs
+ // to be saved to a temporary file before passed to llvm-objcopy. Otherwise,
+ // Input is the same as the content of the original input file, therefore
+ // temporary file is not needed.
+ if (StringRef(BundlerConfig.FilesType).starts_with("a")) {
+ auto InputFileOrErr =
+ TempFiles.Create(ArrayRef<char>(Input.data(), Input.size()));
+ if (!InputFileOrErr)
+ return InputFileOrErr.takeError();
+ ObjcopyInputFileName = *InputFileOrErr;
+ } else
+ ObjcopyInputFileName = BundlerConfig.InputFileNames.front();
+
+ ObjcopyArgs.push_back(ObjcopyInputFileName);
+ ObjcopyArgs.push_back(ModifiedObjPath);
+
+ if (Error Err = executeObjcopy(BundlerConfig.ObjcopyPath, ObjcopyArgs))
+ return std::move(Err);
+
+ auto BufOrErr = MemoryBuffer::getFile(ModifiedObjPath);
+ if (!BufOrErr)
+ return createStringError(BufOrErr.getError(),
+ "Failed to read back the modified object file");
+
+ return BufOrErr->get()->getBuffer().str();
+ }
};
/// Handler for text files. The bundled file will have the following format.
@@ -870,6 +928,17 @@ CreateFileHandler(MemoryBuffer &FirstInput,
}
OffloadBundlerConfig::OffloadBundlerConfig() {
+ if (llvm::compression::zstd::isAvailable()) {
+ CompressionFormat = llvm::compression::Format::Zstd;
+ // Compression level 3 is usually sufficient for zstd since long distance
+ // matching is enabled.
+ CompressionLevel = 3;
+ } else if (llvm::compression::zlib::isAvailable()) {
+ CompressionFormat = llvm::compression::Format::Zlib;
+ // Use default level for zlib since higher level does not have significant
+ // improvement.
+ CompressionLevel = llvm::compression::zlib::DefaultCompression;
+ }
auto IgnoreEnvVarOpt =
llvm::sys::Process::GetEnv("OFFLOAD_BUNDLER_IGNORE_ENV_VAR");
if (IgnoreEnvVarOpt.has_value() && IgnoreEnvVarOpt.value() == "1")
@@ -883,11 +952,41 @@ OffloadBundlerConfig::OffloadBundlerConfig() {
llvm::sys::Process::GetEnv("OFFLOAD_BUNDLER_COMPRESS");
if (CompressEnvVarOpt.has_value())
Compress = CompressEnvVarOpt.value() == "1";
+
+ auto CompressionLevelEnvVarOpt =
+ llvm::sys::Process::GetEnv("OFFLOAD_BUNDLER_COMPRESSION_LEVEL");
+ if (CompressionLevelEnvVarOpt.has_value()) {
+ llvm::StringRef CompressionLevelStr = CompressionLevelEnvVarOpt.value();
+ int Level;
+ if (!CompressionLevelStr.getAsInteger(10, Level))
+ CompressionLevel = Level;
+ else
+ llvm::errs()
+ << "Warning: Invalid value for OFFLOAD_BUNDLER_COMPRESSION_LEVEL: "
+ << CompressionLevelStr.str() << ". Ignoring it.\n";
+ }
+}
+
+// Utility function to format numbers with commas
+static std::string formatWithCommas(unsigned long long Value) {
+ std::string Num = std::to_string(Value);
+ int InsertPosition = Num.length() - 3;
+ while (InsertPosition > 0) {
+ Num.insert(InsertPosition, ",");
+ InsertPosition -= 3;
+ }
+ return Num;
}
llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
-CompressedOffloadBundle::compress(const llvm::MemoryBuffer &Input,
+CompressedOffloadBundle::compress(llvm::compression::Params P,
+ const llvm::MemoryBuffer &Input,
bool Verbose) {
+ if (!llvm::compression::zstd::isAvailable() &&
+ !llvm::compression::zlib::isAvailable())
+ return createStringError(llvm::inconvertibleErrorCode(),
+ "Compression not supported");
+
llvm::Timer HashTimer("Hash Calculation Timer", "Hash calculation time",
ClangOffloadBundlerTimerGroup);
if (Verbose)
@@ -905,26 +1004,20 @@ CompressedOffloadBundle::compress(const llvm::MemoryBuffer &Input,
reinterpret_cast<const uint8_t *>(Input.getBuffer().data()),
Input.getBuffer().size());
- llvm::compression::Format CompressionFormat;
-
- if (llvm::compression::zstd::isAvailable())
- CompressionFormat = llvm::compression::Format::Zstd;
- else if (llvm::compression::zlib::isAvailable())
- CompressionFormat = llvm::compression::Format::Zlib;
- else
- return createStringError(llvm::inconvertibleErrorCode(),
- "Compression not supported");
-
llvm::Timer CompressTimer("Compression Timer", "Compression time",
ClangOffloadBundlerTimerGroup);
if (Verbose)
CompressTimer.startTimer();
- llvm::compression::compress(CompressionFormat, BufferUint8, CompressedBuffer);
+ llvm::compression::compress(P, BufferUint8, CompressedBuffer);
if (Verbose)
CompressTimer.stopTimer();
- uint16_t CompressionMethod = static_cast<uint16_t>(CompressionFormat);
+ uint16_t CompressionMethod = static_cast<uint16_t>(P.format);
uint32_t UncompressedSize = Input.getBuffer().size();
+ uint32_t TotalFileSize = MagicNumber.size() + sizeof(TotalFileSize) +
+ sizeof(Version) + sizeof(CompressionMethod) +
+ sizeof(UncompressedSize) + sizeof(TruncatedHash) +
+ CompressedBuffer.size();
SmallVector<char, 0> FinalBuffer;
llvm::raw_svector_ostream OS(FinalBuffer);
@@ -932,6 +1025,8 @@ CompressedOffloadBundle::compress(const llvm::MemoryBuffer &Input,
OS.write(reinterpret_cast<const char *>(&Version), sizeof(Version));
OS.write(reinterpret_cast<const char *>(&CompressionMethod),
sizeof(CompressionMethod));
+ OS.write(reinterpret_cast<const char *>(&TotalFileSize),
+ sizeof(TotalFileSize));
OS.write(reinterpret_cast<const char *>(&UncompressedSize),
sizeof(UncompressedSize));
OS.write(reinterpret_cast<const char *>(&TruncatedHash),
@@ -941,17 +1036,31 @@ CompressedOffloadBundle::compress(const llvm::MemoryBuffer &Input,
if (Verbose) {
auto MethodUsed =
- CompressionFormat == llvm::compression::Format::Zstd ? "zstd" : "zlib";
+ P.format == llvm::compression::Format::Zstd ? "zstd" : "zlib";
+ double CompressionRate =
+ static_cast<double>(UncompressedSize) / CompressedBuffer.size();
+ double CompressionTimeSeconds = CompressTimer.getTotalTime().getWallTime();
+ double CompressionSpeedMBs =
+ (UncompressedSize / (1024.0 * 1024.0)) / CompressionTimeSeconds;
+
llvm::errs() << "Compressed bundle format version: " << Version << "\n"
+ << "Total file size (including headers): "
+ << formatWithCommas(TotalFileSize) << " bytes\n"
<< "Compression method used: " << MethodUsed << "\n"
- << "Binary size before compression: " << UncompressedSize
- << " bytes\n"
- << "Binary size after compression: " << CompressedBuffer.size()
- << " bytes\n"
+ << "Compression level: " << P.level << "\n"
+ << "Binary size before compression: "
+ << formatWithCommas(UncompressedSize) << " bytes\n"
+ << "Binary size after compression: "
+ << formatWithCommas(CompressedBuffer.size()) << " bytes\n"
+ << "Compression rate: "
+ << llvm::format("%.2lf", CompressionRate) << "\n"
+ << "Compression ratio: "
+ << llvm::format("%.2lf%%", 100.0 / CompressionRate) << "\n"
+ << "Compression speed: "
+ << llvm::format("%.2lf MB/s", CompressionSpeedMBs) << "\n"
<< "Truncated MD5 hash: "
<< llvm::format_hex(TruncatedHash, 16) << "\n";
}
-
return llvm::MemoryBuffer::getMemBufferCopy(
llvm::StringRef(FinalBuffer.data(), FinalBuffer.size()));
}
@@ -962,9 +1071,9 @@ CompressedOffloadBundle::decompress(const llvm::MemoryBuffer &Input,
StringRef Blob = Input.getBuffer();
- if (Blob.size() < HeaderSize) {
+ if (Blob.size() < V1HeaderSize)
return llvm::MemoryBuffer::getMemBufferCopy(Blob);
- }
+
if (llvm::identify_magic(Blob) !=
llvm::file_magic::offload_bundle_compressed) {
if (Verbose)
@@ -972,21 +1081,32 @@ CompressedOffloadBundle::decompress(const llvm::MemoryBuffer &Input,
return llvm::MemoryBuffer::getMemBufferCopy(Blob);
}
+ size_t CurrentOffset = MagicSize;
+
uint16_t ThisVersion;
+ memcpy(&ThisVersion, Blob.data() + CurrentOffset, sizeof(uint16_t));
+ CurrentOffset += VersionFieldSize;
+
uint16_t CompressionMethod;
+ memcpy(&CompressionMethod, Blob.data() + CurrentOffset, sizeof(uint16_t));
+ CurrentOffset += MethodFieldSize;
+
+ uint32_t TotalFileSize;
+ if (ThisVersion >= 2) {
+ if (Blob.size() < V2HeaderSize)
+ return createStringError(inconvertibleErrorCode(),
+ "Compressed bundle header size too small");
+ memcpy(&TotalFileSize, Blob.data() + CurrentOffset, sizeof(uint32_t));
+ CurrentOffset += FileSizeFieldSize;
+ }
+
uint32_t UncompressedSize;
+ memcpy(&UncompressedSize, Blob.data() + CurrentOffset, sizeof(uint32_t));
+ CurrentOffset += UncompressedSizeFieldSize;
+
uint64_t StoredHash;
- memcpy(&ThisVersion, Input.getBuffer().data() + MagicNumber.size(),
- sizeof(uint16_t));
- memcpy(&CompressionMethod, Blob.data() + MagicSize + VersionFieldSize,
- sizeof(uint16_t));
- memcpy(&UncompressedSize,
- Blob.data() + MagicSize + VersionFieldSize + MethodFieldSize,
- sizeof(uint32_t));
- memcpy(&StoredHash,
- Blob.data() + MagicSize + VersionFieldSize + MethodFieldSize +
- SizeFieldSize,
- sizeof(uint64_t));
+ memcpy(&StoredHash, Blob.data() + CurrentOffset, sizeof(uint64_t));
+ CurrentOffset += HashFieldSize;
llvm::compression::Format CompressionFormat;
if (CompressionMethod ==
@@ -1005,7 +1125,7 @@ CompressedOffloadBundle::decompress(const llvm::MemoryBuffer &Input,
DecompressTimer.startTimer();
SmallVector<uint8_t, 0> DecompressedData;
- StringRef CompressedData = Blob.substr(HeaderSize);
+ StringRef CompressedData = Blob.substr(CurrentOffset);
if (llvm::Error DecompressionError = llvm::compression::decompress(
CompressionFormat, llvm::arrayRefFromStringRef(CompressedData),
DecompressedData, UncompressedSize))
@@ -1016,7 +1136,10 @@ CompressedOffloadBundle::decompress(const llvm::MemoryBuffer &Input,
if (Verbose) {
DecompressTimer.stopTimer();
- // Recalculate MD5 hash
+ double DecompressionTimeSeconds =
+ DecompressTimer.getTotalTime().getWallTime();
+
+ // Recalculate MD5 hash for integrity check
llvm::Timer HashRecalcTimer("Hash Recalculation Timer",
"Hash recalculation time",
ClangOffloadBundlerTimerGroup);
@@ -1030,16 +1153,30 @@ CompressedOffloadBundle::decompress(const llvm::MemoryBuffer &Input,
HashRecalcTimer.stopTimer();
bool HashMatch = (StoredHash == RecalculatedHash);
- llvm::errs() << "Compressed bundle format version: " << ThisVersion << "\n"
- << "Decompression method: "
+ double CompressionRate =
+ static_cast<double>(UncompressedSize) / CompressedData.size();
+ double DecompressionSpeedMBs =
+ (UncompressedSize / (1024.0 * 1024.0)) / DecompressionTimeSeconds;
+
+ llvm::errs() << "Compressed bundle format version: " << ThisVersion << "\n";
+ if (ThisVersion >= 2)
+ llvm::errs() << "Total file size (from header): "
+ << formatWithCommas(TotalFileSize) << " bytes\n";
+ llvm::errs() << "Decompression method: "
<< (CompressionFormat == llvm::compression::Format::Zlib
? "zlib"
: "zstd")
<< "\n"
- << "Size before decompression: " << CompressedData.size()
- << " bytes\n"
- << "Size after decompression: " << UncompressedSize
- << " bytes\n"
+ << "Size before decompression: "
+ << formatWithCommas(CompressedData.size()) << " bytes\n"
+ << "Size after decompression: "
+ << formatWithCommas(UncompressedSize) << " bytes\n"
+ << "Compression rate: "
+ << llvm::format("%.2lf", CompressionRate) << "\n"
+ << "Compression ratio: "
+ << llvm::format("%.2lf%%", 100.0 / CompressionRate) << "\n"
+ << "Decompression speed: "
+ << llvm::format("%.2lf MB/s", DecompressionSpeedMBs) << "\n"
<< "Stored hash: " << llvm::format_hex(StoredHash, 16) << "\n"
<< "Recalculated hash: "
<< llvm::format_hex(RecalculatedHash, 16) << "\n"
@@ -1233,8 +1370,10 @@ Error OffloadBundler::BundleFiles() {
std::unique_ptr<llvm::MemoryBuffer> BufferMemory =
llvm::MemoryBuffer::getMemBufferCopy(
llvm::StringRef(Buffer.data(), Buffer.size()));
- auto CompressionResult =
- CompressedOffloadBundle::compress(*BufferMemory, BundlerConfig.Verbose);
+ auto CompressionResult = CompressedOffloadBundle::compress(
+ {BundlerConfig.CompressionFormat, BundlerConfig.CompressionLevel,
+ /*zstdEnableLdm=*/true},
+ *BufferMemory, BundlerConfig.Verbose);
if (auto Error = CompressionResult.takeError())
return Error;
@@ -1592,10 +1731,8 @@ Error OffloadBundler::UnbundleArchive() {
while (!CodeObject.empty()) {
SmallVector<StringRef> CompatibleTargets;
auto CodeObjectInfo = OffloadTargetInfo(CodeObject, BundlerConfig);
- if (CodeObjectInfo.hasHostKind()) {
- // Do nothing, we don't extract host code yet.
- } else if (getCompatibleOffloadTargets(CodeObjectInfo, CompatibleTargets,
- BundlerConfig)) {
+ if (getCompatibleOffloadTargets(CodeObjectInfo, CompatibleTargets,
+ BundlerConfig)) {
std::string BundleData;
raw_string_ostream DataStream(BundleData);
if (Error Err = FileHandler->ReadBundle(DataStream, CodeObjectBuffer))
diff --git a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
index 56d497eb4c32..1fd870b72286 100644
--- a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
@@ -41,7 +41,8 @@ static const SanitizerMask NotAllowedWithExecuteOnly =
SanitizerKind::Function | SanitizerKind::KCFI;
static const SanitizerMask NeedsUnwindTables =
SanitizerKind::Address | SanitizerKind::HWAddress | SanitizerKind::Thread |
- SanitizerKind::Memory | SanitizerKind::DataFlow;
+ SanitizerKind::Memory | SanitizerKind::DataFlow |
+ SanitizerKind::NumericalStability;
static const SanitizerMask SupportsCoverage =
SanitizerKind::Address | SanitizerKind::HWAddress |
SanitizerKind::KernelAddress | SanitizerKind::KernelHWAddress |
@@ -53,7 +54,8 @@ static const SanitizerMask SupportsCoverage =
SanitizerKind::DataFlow | SanitizerKind::Fuzzer |
SanitizerKind::FuzzerNoLink | SanitizerKind::FloatDivideByZero |
SanitizerKind::SafeStack | SanitizerKind::ShadowCallStack |
- SanitizerKind::Thread | SanitizerKind::ObjCCast | SanitizerKind::KCFI;
+ SanitizerKind::Thread | SanitizerKind::ObjCCast | SanitizerKind::KCFI |
+ SanitizerKind::NumericalStability;
static const SanitizerMask RecoverableByDefault =
SanitizerKind::Undefined | SanitizerKind::Integer |
SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
@@ -175,6 +177,7 @@ static void addDefaultIgnorelists(const Driver &D, SanitizerMask Kinds,
{"hwasan_ignorelist.txt", SanitizerKind::HWAddress},
{"memtag_ignorelist.txt", SanitizerKind::MemTag},
{"msan_ignorelist.txt", SanitizerKind::Memory},
+ {"nsan_ignorelist.txt", SanitizerKind::NumericalStability},
{"tsan_ignorelist.txt", SanitizerKind::Thread},
{"dfsan_abilist.txt", SanitizerKind::DataFlow},
{"cfi_ignorelist.txt", SanitizerKind::CFI},
@@ -282,8 +285,8 @@ bool SanitizerArgs::needsFuzzerInterceptors() const {
bool SanitizerArgs::needsUbsanRt() const {
// All of these include ubsan.
- if (needsAsanRt() || needsMsanRt() || needsHwasanRt() || needsTsanRt() ||
- needsDfsanRt() || needsLsanRt() || needsCfiDiagRt() ||
+ if (needsAsanRt() || needsMsanRt() || needsNsanRt() || needsHwasanRt() ||
+ needsTsanRt() || needsDfsanRt() || needsLsanRt() || needsCfiDiagRt() ||
(needsScudoRt() && !requiresMinimalRuntime()))
return false;
@@ -487,6 +490,14 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
Add &= ~NotAllowedWithExecuteOnly;
if (CfiCrossDso)
Add &= ~SanitizerKind::CFIMFCall;
+ // -fsanitize=undefined does not expand to signed-integer-overflow in
+ // -fwrapv (implied by -fno-strict-overflow) mode.
+ if (Add & SanitizerKind::UndefinedGroup) {
+ bool S = Args.hasFlagNoClaim(options::OPT_fno_strict_overflow,
+ options::OPT_fstrict_overflow, false);
+ if (Args.hasFlagNoClaim(options::OPT_fwrapv, options::OPT_fno_wrapv, S))
+ Add &= ~SanitizerKind::SignedIntegerOverflow;
+ }
Add &= Supported;
if (Add & SanitizerKind::Fuzzer)
@@ -789,7 +800,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
Arg->claim();
if (LegacySanitizeCoverage != 0 && DiagnoseErrors) {
D.Diag(diag::warn_drv_deprecated_arg)
- << Arg->getAsString(Args) << "-fsanitize-coverage=trace-pc-guard";
+ << Arg->getAsString(Args) << /*hasReplacement=*/true
+ << "-fsanitize-coverage=trace-pc-guard";
}
continue;
}
@@ -825,11 +837,11 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// enabled.
if (CoverageFeatures & CoverageTraceBB)
D.Diag(clang::diag::warn_drv_deprecated_arg)
- << "-fsanitize-coverage=trace-bb"
+ << "-fsanitize-coverage=trace-bb" << /*hasReplacement=*/true
<< "-fsanitize-coverage=trace-pc-guard";
if (CoverageFeatures & Coverage8bitCounters)
D.Diag(clang::diag::warn_drv_deprecated_arg)
- << "-fsanitize-coverage=8bit-counters"
+ << "-fsanitize-coverage=8bit-counters" << /*hasReplacement=*/true
<< "-fsanitize-coverage=trace-pc-guard";
}
@@ -841,7 +853,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if ((CoverageFeatures & InsertionPointTypes) &&
!(CoverageFeatures & InstrumentationTypes) && DiagnoseErrors) {
D.Diag(clang::diag::warn_drv_deprecated_arg)
- << "-fsanitize-coverage=[func|bb|edge]"
+ << "-fsanitize-coverage=[func|bb|edge]" << /*hasReplacement=*/true
<< "-fsanitize-coverage=[func|bb|edge],[trace-pc-guard|trace-pc],["
"control-flow]";
}
@@ -1184,7 +1196,9 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
BinaryMetadataIgnorelistFiles);
}
- if (TC.getTriple().isOSWindows() && needsUbsanRt()) {
+ if (TC.getTriple().isOSWindows() && needsUbsanRt() &&
+ Args.hasFlag(options::OPT_frtlib_defaultlib,
+ options::OPT_fno_rtlib_defaultlib, true)) {
// Instruct the code generator to embed linker directives in the object file
// that cause the required runtime libraries to be linked.
CmdArgs.push_back(
@@ -1195,7 +1209,9 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
"--dependent-lib=" +
TC.getCompilerRTBasename(Args, "ubsan_standalone_cxx")));
}
- if (TC.getTriple().isOSWindows() && needsStatsRt()) {
+ if (TC.getTriple().isOSWindows() && needsStatsRt() &&
+ Args.hasFlag(options::OPT_frtlib_defaultlib,
+ options::OPT_fno_rtlib_defaultlib, true)) {
CmdArgs.push_back(Args.MakeArgString(
"--dependent-lib=" + TC.getCompilerRTBasename(Args, "stats_client")));
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
index 388030592b48..20a555afb809 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
@@ -77,10 +77,19 @@ static ToolChain::RTTIMode CalculateRTTIMode(const ArgList &Args,
return NoRTTI ? ToolChain::RM_Disabled : ToolChain::RM_Enabled;
}
+static ToolChain::ExceptionsMode CalculateExceptionsMode(const ArgList &Args) {
+ if (Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions,
+ true)) {
+ return ToolChain::EM_Enabled;
+ }
+ return ToolChain::EM_Disabled;
+}
+
ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
const ArgList &Args)
: D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)),
- CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)) {
+ CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)),
+ CachedExceptionsMode(CalculateExceptionsMode(Args)) {
auto addIfExists = [this](path_list &List, const std::string &Path) {
if (getVFS().exists(Path))
List.push_back(Path);
@@ -95,7 +104,8 @@ ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
}
llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
-ToolChain::executeToolChainProgram(StringRef Executable) const {
+ToolChain::executeToolChainProgram(StringRef Executable,
+ unsigned SecondsToWait) const {
llvm::SmallString<64> OutputFile;
llvm::sys::fs::createTemporaryFile("toolchain-program", "txt", OutputFile);
llvm::FileRemover OutputRemover(OutputFile.c_str());
@@ -106,9 +116,8 @@ ToolChain::executeToolChainProgram(StringRef Executable) const {
};
std::string ErrorMessage;
- if (llvm::sys::ExecuteAndWait(Executable, {}, {}, Redirects,
- /* SecondsToWait */ 0,
- /*MemoryLimit*/ 0, &ErrorMessage))
+ if (llvm::sys::ExecuteAndWait(Executable, {}, {}, Redirects, SecondsToWait,
+ /*MemoryLimit=*/0, &ErrorMessage))
return llvm::createStringError(std::error_code(),
Executable + ": " + ErrorMessage);
@@ -186,12 +195,19 @@ static void getAArch64MultilibFlags(const Driver &D,
UnifiedFeatures.end());
std::vector<std::string> MArch;
for (const auto &Ext : AArch64::Extensions)
- if (FeatureSet.contains(Ext.Feature))
- MArch.push_back(Ext.Name.str());
+ if (!Ext.UserVisibleName.empty())
+ if (FeatureSet.contains(Ext.PosTargetFeature))
+ MArch.push_back(Ext.UserVisibleName.str());
for (const auto &Ext : AArch64::Extensions)
- if (FeatureSet.contains(Ext.NegFeature))
- MArch.push_back(("no" + Ext.Name).str());
- MArch.insert(MArch.begin(), ("-march=" + Triple.getArchName()).str());
+ if (!Ext.UserVisibleName.empty())
+ if (FeatureSet.contains(Ext.NegTargetFeature))
+ MArch.push_back(("no" + Ext.UserVisibleName).str());
+ StringRef ArchName;
+ for (const auto &ArchInfo : AArch64::ArchInfos)
+ if (FeatureSet.contains(ArchInfo->ArchFeature))
+ ArchName = ArchInfo->Name;
+ assert(!ArchName.empty() && "at least one architecture should be found");
+ MArch.insert(MArch.begin(), ("-march=" + ArchName).str());
Result.push_back(llvm::join(MArch, "+"));
}
@@ -207,11 +223,13 @@ static void getARMMultilibFlags(const Driver &D,
UnifiedFeatures.end());
std::vector<std::string> MArch;
for (const auto &Ext : ARM::ARCHExtNames)
- if (FeatureSet.contains(Ext.Feature))
- MArch.push_back(Ext.Name.str());
+ if (!Ext.Name.empty())
+ if (FeatureSet.contains(Ext.Feature))
+ MArch.push_back(Ext.Name.str());
for (const auto &Ext : ARM::ARCHExtNames)
- if (FeatureSet.contains(Ext.NegFeature))
- MArch.push_back(("no" + Ext.Name).str());
+ if (!Ext.Name.empty())
+ if (FeatureSet.contains(Ext.NegFeature))
+ MArch.push_back(("no" + Ext.Name).str());
MArch.insert(MArch.begin(), ("-march=" + Triple.getArchName()).str());
Result.push_back(llvm::join(MArch, "+"));
@@ -264,6 +282,18 @@ ToolChain::getMultilibFlags(const llvm::opt::ArgList &Args) const {
break;
}
+ // Include fno-exceptions and fno-rtti
+ // to improve multilib selection
+ if (getRTTIMode() == ToolChain::RTTIMode::RM_Disabled)
+ Result.push_back("-fno-rtti");
+ else
+ Result.push_back("-frtti");
+
+ if (getExceptionsMode() == ToolChain::ExceptionsMode::EM_Disabled)
+ Result.push_back("-fno-exceptions");
+ else
+ Result.push_back("-fexceptions");
+
// Sort and remove duplicates.
std::sort(Result.begin(), Result.end());
Result.erase(std::unique(Result.begin(), Result.end()), Result.end());
@@ -427,12 +457,6 @@ ToolChain::getDefaultUnwindTableLevel(const ArgList &Args) const {
return UnwindTableLevel::None;
}
-unsigned ToolChain::GetDefaultDwarfVersion() const {
- // TODO: Remove the RISC-V special case when R_RISCV_SET_ULEB128 linker
- // support becomes more widely available.
- return getTriple().isRISCV() ? 4 : 5;
-}
-
Tool *ToolChain::getClang() const {
if (!Clang)
Clang.reset(new tools::Clang(*this, useIntegratedBackend()));
@@ -655,19 +679,29 @@ std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
// Check for runtime files in the new layout without the architecture first.
std::string CRTBasename =
buildCompilerRTBasename(Args, Component, Type, /*AddArch=*/false);
+ SmallString<128> Path;
for (const auto &LibPath : getLibraryPaths()) {
SmallString<128> P(LibPath);
llvm::sys::path::append(P, CRTBasename);
if (getVFS().exists(P))
return std::string(P);
+ if (Path.empty())
+ Path = P;
}
+ if (getTriple().isOSAIX())
+ Path.clear();
- // Fall back to the old expected compiler-rt name if the new one does not
- // exist.
+ // Check the filename for the old layout if the new one does not exist.
CRTBasename =
buildCompilerRTBasename(Args, Component, Type, /*AddArch=*/true);
- SmallString<128> Path(getCompilerRTPath());
- llvm::sys::path::append(Path, CRTBasename);
+ SmallString<128> OldPath(getCompilerRTPath());
+ llvm::sys::path::append(OldPath, CRTBasename);
+ if (Path.empty() || getVFS().exists(OldPath))
+ return std::string(OldPath);
+
+ // If none is found, use a file name from the new layout, which may get
+ // printed in an error message, aiding users in knowing what Clang is
+ // looking for.
return std::string(Path);
}
@@ -766,7 +800,13 @@ ToolChain::getTargetSubDirPath(StringRef BaseDir) const {
std::optional<std::string> ToolChain::getRuntimePath() const {
SmallString<128> P(D.ResourceDir);
llvm::sys::path::append(P, "lib");
- return getTargetSubDirPath(P);
+ if (auto Ret = getTargetSubDirPath(P))
+ return Ret;
+ // Darwin does not use per-target runtime directory.
+ if (Triple.isOSDarwin())
+ return {};
+ llvm::sys::path::append(P, Triple.str());
+ return std::string(P);
}
std::optional<std::string> ToolChain::getStdlibPath() const {
@@ -775,6 +815,12 @@ std::optional<std::string> ToolChain::getStdlibPath() const {
return getTargetSubDirPath(P);
}
+std::optional<std::string> ToolChain::getStdlibIncludePath() const {
+ SmallString<128> P(D.Dir);
+ llvm::sys::path::append(P, "..", "include");
+ return getTargetSubDirPath(P);
+}
+
ToolChain::path_list ToolChain::getArchSpecificLibPaths() const {
path_list Paths;
@@ -985,11 +1031,12 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
}
case llvm::Triple::aarch64: {
llvm::Triple Triple = getTriple();
+ tools::aarch64::setPAuthABIInTriple(getDriver(), Args, Triple);
if (!Triple.isOSBinFormatMachO())
- return getTripleString();
+ return Triple.getTriple();
if (Triple.isArm64e())
- return getTripleString();
+ return Triple.getTriple();
// FIXME: older versions of ld64 expect the "arm64" component in the actual
// triple string and query it to determine whether an LTO file can be
@@ -1271,19 +1318,35 @@ void ToolChain::AddCCKextLibArgs(const ArgList &Args,
bool ToolChain::isFastMathRuntimeAvailable(const ArgList &Args,
std::string &Path) const {
+ // Don't implicitly link in mode-changing libraries in a shared library, since
+ // this can have very deleterious effects. See the various links from
+ // https://github.com/llvm/llvm-project/issues/57589 for more information.
+ bool Default = !Args.hasArgNoClaim(options::OPT_shared);
+
// Do not check for -fno-fast-math or -fno-unsafe-math when -Ofast passed
// (to keep the linker options consistent with gcc and clang itself).
- if (!isOptimizationLevelFast(Args)) {
+ if (Default && !isOptimizationLevelFast(Args)) {
// Check if -ffast-math or -funsafe-math.
- Arg *A =
- Args.getLastArg(options::OPT_ffast_math, options::OPT_fno_fast_math,
- options::OPT_funsafe_math_optimizations,
- options::OPT_fno_unsafe_math_optimizations);
+ Arg *A = Args.getLastArg(
+ options::OPT_ffast_math, options::OPT_fno_fast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations, options::OPT_ffp_model_EQ);
if (!A || A->getOption().getID() == options::OPT_fno_fast_math ||
A->getOption().getID() == options::OPT_fno_unsafe_math_optimizations)
- return false;
+ Default = false;
+ if (A && A->getOption().getID() == options::OPT_ffp_model_EQ) {
+ StringRef Model = A->getValue();
+ if (Model != "fast")
+ Default = false;
+ }
}
+
+ // Whatever decision came as a result of the above implicit settings, either
+ // -mdaz-ftz or -mno-daz-ftz is capable of overriding it.
+ if (!Args.hasFlag(options::OPT_mdaz_ftz, options::OPT_mno_daz_ftz, Default))
+ return false;
+
// If crtfastmath.o exists add it to the arguments.
Path = GetFilePath("crtfastmath.o");
return (Path != "crtfastmath.o"); // Not found.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
index e6126ff62db3..b04502a57a9f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
@@ -17,6 +17,8 @@
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Path.h"
+#include <set>
+
using AIX = clang::driver::toolchains::AIX;
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -342,9 +344,7 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
/// AIX - AIX tool chain which can call as(1) and ld(1) directly.
AIX::AIX(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: ToolChain(D, Triple, Args) {
- getProgramPaths().push_back(getDriver().getInstalledDir());
- if (getDriver().getInstalledDir() != getDriver().Dir)
- getProgramPaths().push_back(getDriver().Dir);
+ getProgramPaths().push_back(getDriver().Dir);
ParseInlineAsmUsingAsmParser = Args.hasFlag(
options::OPT_fintegrated_as, options::OPT_fno_integrated_as, true);
@@ -362,6 +362,28 @@ AIX::GetHeaderSysroot(const llvm::opt::ArgList &DriverArgs) const {
return "/";
}
+void AIX::AddOpenMPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // Add OpenMP include paths if -fopenmp is specified.
+ if (DriverArgs.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false)) {
+ SmallString<128> PathOpenMP;
+ switch (getDriver().getOpenMPRuntime(DriverArgs)) {
+ case Driver::OMPRT_OMP:
+ PathOpenMP = GetHeaderSysroot(DriverArgs);
+ llvm::sys::path::append(PathOpenMP, "opt/IBM/openxlCSDK", "include",
+ "openmp");
+ addSystemInclude(DriverArgs, CC1Args, PathOpenMP.str());
+ break;
+ case Driver::OMPRT_IOMP5:
+ case Driver::OMPRT_GOMP:
+ case Driver::OMPRT_Unknown:
+ // Unknown / unsupported include paths.
+ break;
+ }
+ }
+}
+
void AIX::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
// Return if -nostdinc is specified as a driver option.
@@ -380,6 +402,11 @@ void AIX::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addSystemInclude(DriverArgs, CC1Args, path::parent_path(P.str()));
}
+ // Add the include directory containing omp.h. This needs to be before
+ // adding the system include directory because other compilers put their
+ // omp.h in /usr/include.
+ AddOpenMPIncludeArgs(DriverArgs, CC1Args);
+
// Return if -nostdlibinc is specified as a driver option.
if (DriverArgs.hasArg(options::OPT_nostdlibinc))
return;
@@ -435,6 +462,80 @@ void AIX::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm_unreachable("Unexpected C++ library type; only libc++ is supported.");
}
+// This function processes all the mtocdata options to build the final
+// simplified toc data options to pass to CC1.
+static void addTocDataOptions(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CC1Args,
+ const Driver &D) {
+
+ // Check the global toc-data setting. The default is -mno-tocdata.
+ // To enable toc-data globally, -mtocdata must be specified.
+ // Additionally, it must be last to take effect.
+ const bool TOCDataGloballyinEffect = [&Args]() {
+ if (const Arg *LastArg =
+ Args.getLastArg(options::OPT_mtocdata, options::OPT_mno_tocdata))
+ return LastArg->getOption().matches(options::OPT_mtocdata);
+ else
+ return false;
+ }();
+
+ enum TOCDataSetting {
+ AddressInTOC = 0, // Address of the symbol stored in the TOC.
+ DataInTOC = 1 // Symbol defined in the TOC.
+ };
+
+ const TOCDataSetting DefaultTocDataSetting =
+ TOCDataGloballyinEffect ? DataInTOC : AddressInTOC;
+
+ // Process the list of variables in the explicitly specified options
+ // -mtocdata= and -mno-tocdata= to see which variables are opposite to
+ // the global setting of tocdata in TOCDataGloballyinEffect.
+ // Those that have the opposite setting to TOCDataGloballyinEffect, are added
+ // to ExplicitlySpecifiedGlobals.
+ std::set<llvm::StringRef> ExplicitlySpecifiedGlobals;
+ for (const auto Arg :
+ Args.filtered(options::OPT_mtocdata_EQ, options::OPT_mno_tocdata_EQ)) {
+ TOCDataSetting ArgTocDataSetting =
+ Arg->getOption().matches(options::OPT_mtocdata_EQ) ? DataInTOC
+ : AddressInTOC;
+
+ if (ArgTocDataSetting != DefaultTocDataSetting)
+ for (const char *Val : Arg->getValues())
+ ExplicitlySpecifiedGlobals.insert(Val);
+ else
+ for (const char *Val : Arg->getValues())
+ ExplicitlySpecifiedGlobals.erase(Val);
+ }
+
+ auto buildExceptionList = [](const std::set<llvm::StringRef> &ExplicitValues,
+ const char *OptionSpelling) {
+ std::string Option(OptionSpelling);
+ bool IsFirst = true;
+ for (const auto &E : ExplicitValues) {
+ if (!IsFirst)
+ Option += ",";
+
+ IsFirst = false;
+ Option += E.str();
+ }
+ return Option;
+ };
+
+ // Pass the final tocdata options to CC1 consisting of the default
+ // tocdata option (-mtocdata/-mno-tocdata) along with the list
+ // option (-mno-tocdata=/-mtocdata=) if there are any explicitly specified
+ // variables which would be exceptions to the default setting.
+ const char *TocDataGlobalOption =
+ TOCDataGloballyinEffect ? "-mtocdata" : "-mno-tocdata";
+ CC1Args.push_back(TocDataGlobalOption);
+
+ const char *TocDataListOption =
+ TOCDataGloballyinEffect ? "-mno-tocdata=" : "-mtocdata=";
+ if (!ExplicitlySpecifiedGlobals.empty())
+ CC1Args.push_back(Args.MakeArgString(llvm::Twine(
+ buildExceptionList(ExplicitlySpecifiedGlobals, TocDataListOption))));
+}
+
void AIX::addClangTargetOptions(
const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadingKind) const {
@@ -442,9 +543,20 @@ void AIX::addClangTargetOptions(
Args.AddLastArg(CC1Args, options::OPT_mdefault_visibility_export_mapping_EQ);
Args.addOptInFlag(CC1Args, options::OPT_mxcoff_roptr, options::OPT_mno_xcoff_roptr);
+ // Forward last mtocdata/mno_tocdata options to -cc1.
+ if (Args.hasArg(options::OPT_mtocdata_EQ, options::OPT_mno_tocdata_EQ,
+ options::OPT_mtocdata))
+ addTocDataOptions(Args, CC1Args, getDriver());
+
if (Args.hasFlag(options::OPT_fxl_pragma_pack,
options::OPT_fno_xl_pragma_pack, true))
CC1Args.push_back("-fxl-pragma-pack");
+
+ // Pass "-fno-sized-deallocation" only when the user hasn't manually enabled
+ // or disabled sized deallocations.
+ if (!Args.getLastArgNoClaim(options::OPT_fsized_deallocation,
+ options::OPT_fno_sized_deallocation))
+ CC1Args.push_back("-fno-sized-deallocation");
}
void AIX::addProfileRTLibs(const llvm::opt::ArgList &Args,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
index 755d87e07ec5..8f130f6b5454 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
@@ -105,6 +105,8 @@ protected:
private:
llvm::StringRef GetHeaderSysroot(const llvm::opt::ArgList &DriverArgs) const;
bool ParseInlineAsmUsingAsmParser;
+ void AddOpenMPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
index b3c9d5908654..453daed7cc7d 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
@@ -14,6 +14,7 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Error.h"
@@ -209,7 +210,7 @@ RocmInstallationDetector::getInstallationPathCandidates() {
}
// Try to find relative to the compiler binary.
- const char *InstallDir = D.getInstalledDir();
+ StringRef InstallDir = D.Dir;
// Check both a normal Unix prefix position of the clang binary, as well as
// the Windows-esque layout the ROCm packages use with the host architecture
@@ -486,10 +487,16 @@ void RocmInstallationDetector::detectHIPRuntime() {
return newpath;
};
// If HIP version file can be found and parsed, use HIP version from there.
- for (const auto &VersionFilePath :
- {Append(SharePath, "hip", "version"),
- Append(ParentSharePath, "hip", "version"),
- Append(BinPath, ".hipVersion")}) {
+ std::vector<SmallString<0>> VersionFilePaths = {
+ Append(SharePath, "hip", "version"),
+ InstallPath != D.SysRoot + "/usr/local"
+ ? Append(ParentSharePath, "hip", "version")
+ : SmallString<0>(),
+ Append(BinPath, ".hipVersion")};
+
+ for (const auto &VersionFilePath : VersionFilePaths) {
+ if (VersionFilePath.empty())
+ continue;
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> VersionFile =
FS.getBufferForFile(VersionFilePath);
if (!VersionFile)
@@ -611,14 +618,14 @@ void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
-
- std::string Linker = getToolChain().GetProgramPath(getShortName());
+ std::string Linker = getToolChain().GetLinkerPath();
ArgStringList CmdArgs;
CmdArgs.push_back("--no-undefined");
CmdArgs.push_back("-shared");
addLinkerCompressDebugSectionsOption(getToolChain(), Args, CmdArgs);
Args.AddAllArgs(CmdArgs, options::OPT_L);
+ getToolChain().AddFilePathLibArgs(Args, CmdArgs);
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
if (C.getDriver().isUsingLTO())
addLTOOptions(getToolChain(), Args, CmdArgs, Output, Inputs[0],
@@ -639,7 +646,11 @@ void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
std::vector<StringRef> &Features) {
// Add target ID features to -target-feature options. No diagnostics should
// be emitted here since invalid target ID is diagnosed at other places.
- StringRef TargetID = Args.getLastArgValue(options::OPT_mcpu_EQ);
+ StringRef TargetID;
+ if (Args.hasArg(options::OPT_mcpu_EQ))
+ TargetID = Args.getLastArgValue(options::OPT_mcpu_EQ);
+ else if (Args.hasArg(options::OPT_march_EQ))
+ TargetID = Args.getLastArgValue(options::OPT_march_EQ);
if (!TargetID.empty()) {
llvm::StringMap<bool> FeatureMap;
auto OptionalGpuArch = parseTargetID(Triple, TargetID, &FeatureMap);
@@ -663,6 +674,10 @@ void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
options::OPT_mno_wavefrontsize64, false))
Features.push_back("+wavefrontsize64");
+ if (Args.hasFlag(options::OPT_mamdgpu_precise_memory_op,
+ options::OPT_mno_amdgpu_precise_memory_op, false))
+ Features.push_back("+precise-memory");
+
handleTargetFeaturesGroup(D, Triple, Args, Features,
options::OPT_m_amdgpu_Features_Group);
}
@@ -722,7 +737,7 @@ AMDGPUToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
checkTargetID(*DAL);
- if (!Args.getLastArgValue(options::OPT_x).equals("cl"))
+ if (Args.getLastArgValue(options::OPT_x) != "cl")
return DAL;
// Phase 1 (.cl -> .bc)
@@ -823,6 +838,12 @@ void AMDGPUToolChain::addClangTargetOptions(
}
}
+void AMDGPUToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
+ // AMDGPU does not support atomic lib call. Treat atomic alignment
+ // warnings as errors.
+ CC1Args.push_back("-Werror=atomic-alignment");
+}
+
StringRef
AMDGPUToolChain::getGPUArch(const llvm::opt::ArgList &DriverArgs) const {
return getProcessorFromTargetID(
@@ -861,7 +882,7 @@ AMDGPUToolChain::getSystemGPUArchs(const ArgList &Args) const {
else
Program = GetProgramPath("amdgpu-arch");
- auto StdoutOrErr = executeToolChainProgram(Program);
+ auto StdoutOrErr = executeToolChainProgram(Program, /*SecondsToWait=*/10);
if (!StdoutOrErr)
return StdoutOrErr.takeError();
@@ -926,6 +947,11 @@ void ROCMToolChain::addClangTargetOptions(
DriverArgs, LibDeviceFile, Wave64, DAZ, FiniteOnly, UnsafeMathOpt,
FastRelaxedMath, CorrectSqrt, ABIVer, false));
+ if (getSanitizerArgs(DriverArgs).needsAsanRt()) {
+ CC1Args.push_back("-mlink-bitcode-file");
+ CC1Args.push_back(
+ DriverArgs.MakeArgString(RocmInstallation->getAsanRTLPath()));
+ }
for (StringRef BCFile : BCLibs) {
CC1Args.push_back("-mlink-builtin-bitcode");
CC1Args.push_back(DriverArgs.MakeArgString(BCFile));
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
index b3361b1e3607..7e70dae8ce15 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
@@ -121,6 +121,9 @@ protected:
/// Get GPU arch from -mcpu without checking.
StringRef getGPUArch(const llvm::opt::ArgList &DriverArgs) const;
+ /// Common warning options shared by AMDGPU HIP, OpenCL and OpenMP toolchains.
+ /// Language specific warning options should go to derived classes.
+ void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override;
};
class LLVM_LIBRARY_VISIBILITY ROCMToolChain : public AMDGPUToolChain {
@@ -137,6 +140,9 @@ public:
getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs,
const std::string &GPUArch,
bool isOpenMP = false) const;
+ SanitizerMask getSupportedSanitizers() const override {
+ return SanitizerKind::Address;
+ }
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
index b012b7cb7293..b75d400e6ce9 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
@@ -44,16 +44,9 @@ void AMDGPUOpenMPToolChain::addClangTargetOptions(
Action::OffloadKind DeviceOffloadingKind) const {
HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
- StringRef GPUArch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
- assert(!GPUArch.empty() && "Must have an explicit GPU arch.");
-
assert(DeviceOffloadingKind == Action::OFK_OpenMP &&
"Only OpenMP offloading kinds are supported.");
- CC1Args.push_back("-target-cpu");
- CC1Args.push_back(DriverArgs.MakeArgStringRef(GPUArch));
- CC1Args.push_back("-fcuda-is-device");
-
if (DriverArgs.hasArg(options::OPT_nogpulib))
return;
@@ -92,7 +85,7 @@ llvm::opt::DerivedArgList *AMDGPUOpenMPToolChain::TranslateArgs(
llvm::formatv("{0}", llvm::fmt_consume(ArchsOrErr.takeError()));
getDriver().Diag(diag::err_drv_undetermined_gpu_arch)
<< llvm::Triple::getArchTypeName(getArch()) << ErrMsg << "-march";
- Arch = CudaArchToString(CudaArch::HIPDefault);
+ Arch = OffloadArchToString(OffloadArch::HIPDefault);
} else {
Arch = Args.MakeArgString(ArchsOrErr->front());
}
@@ -118,6 +111,7 @@ llvm::opt::DerivedArgList *AMDGPUOpenMPToolChain::TranslateArgs(
void AMDGPUOpenMPToolChain::addClangWarningOptions(
ArgStringList &CC1Args) const {
+ AMDGPUToolChain::addClangWarningOptions(CC1Args);
HostTC.addClangWarningOptions(CC1Args);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index 0cf96bb5c9cb..f083e40df131 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -98,16 +98,12 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
if (CPU == "native")
CPU = llvm::sys::getHostCPUName();
- if (CPU == "generic") {
- Extensions.enable(llvm::AArch64::AEK_SIMD);
- } else {
- const std::optional<llvm::AArch64::CpuInfo> CpuInfo =
- llvm::AArch64::parseCpu(CPU);
- if (!CpuInfo)
- return false;
+ const std::optional<llvm::AArch64::CpuInfo> CpuInfo =
+ llvm::AArch64::parseCpu(CPU);
+ if (!CpuInfo)
+ return false;
- Extensions.addCPUDefaults(*CpuInfo);
- }
+ Extensions.addCPUDefaults(*CpuInfo);
if (Split.second.size() &&
!DecodeAArch64Features(D, Split.second, Extensions))
@@ -165,11 +161,14 @@ getAArch64MicroArchFeaturesFromMtune(const Driver &D, StringRef Mtune,
// Handle CPU name is 'native'.
if (MtuneLowerCase == "native")
MtuneLowerCase = std::string(llvm::sys::getHostCPUName());
+
+ // 'cyclone' and later have zero-cycle register moves and zeroing.
if (MtuneLowerCase == "cyclone" ||
StringRef(MtuneLowerCase).starts_with("apple")) {
Features.push_back("+zcm");
Features.push_back("+zcz");
}
+
return true;
}
@@ -318,9 +317,11 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
}
}
- if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
- options::OPT_munaligned_access)) {
- if (A->getOption().matches(options::OPT_mno_unaligned_access))
+ if (Arg *A = Args.getLastArg(
+ options::OPT_mstrict_align, options::OPT_mno_strict_align,
+ options::OPT_mno_unaligned_access, options::OPT_munaligned_access)) {
+ if (A->getOption().matches(options::OPT_mstrict_align) ||
+ A->getOption().matches(options::OPT_mno_unaligned_access))
Features.push_back("+strict-align");
} else if (Triple.isOSOpenBSD())
Features.push_back("+strict-align");
@@ -397,8 +398,8 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
if (Args.hasArg(options::OPT_ffixed_x28))
Features.push_back("+reserve-x28");
- if (Args.hasArg(options::OPT_ffixed_x30))
- Features.push_back("+reserve-x30");
+ if (Args.hasArg(options::OPT_mlr_for_calls_only))
+ Features.push_back("+reserve-lr-for-ra");
if (Args.hasArg(options::OPT_fcall_saved_x8))
Features.push_back("+call-saved-x8");
@@ -448,3 +449,24 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
if (Args.getLastArg(options::OPT_mno_bti_at_return_twice))
Features.push_back("+no-bti-at-return-twice");
}
+
+void aarch64::setPAuthABIInTriple(const Driver &D, const ArgList &Args,
+ llvm::Triple &Triple) {
+ Arg *ABIArg = Args.getLastArg(options::OPT_mabi_EQ);
+ bool HasPAuthABI =
+ ABIArg ? (StringRef(ABIArg->getValue()) == "pauthtest") : false;
+
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::UnknownEnvironment:
+ if (HasPAuthABI)
+ Triple.setEnvironment(llvm::Triple::PAuthTest);
+ break;
+ case llvm::Triple::PAuthTest:
+ break;
+ default:
+ if (HasPAuthABI)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << ABIArg->getAsString(Args) << Triple.getTriple();
+ break;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h
index d47c402d4a42..6d071167bd39 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h
@@ -28,6 +28,9 @@ void getAArch64TargetFeatures(const Driver &D, const llvm::Triple &Triple,
std::string getAArch64TargetCPU(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple, llvm::opt::Arg *&A);
+void setPAuthABIInTriple(const Driver &D, const llvm::opt::ArgList &Args,
+ llvm::Triple &triple);
+
} // end namespace aarch64
} // end namespace target
} // end namespace driver
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index e6ee2f88a84e..0489911ecd9d 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -327,6 +327,11 @@ void arm::setFloatABIInTriple(const Driver &D, const ArgList &Args,
Triple.setEnvironment(isHardFloat ? llvm::Triple::GNUEABIHF
: llvm::Triple::GNUEABI);
break;
+ case llvm::Triple::GNUEABIT64:
+ case llvm::Triple::GNUEABIHFT64:
+ Triple.setEnvironment(isHardFloat ? llvm::Triple::GNUEABIHFT64
+ : llvm::Triple::GNUEABIT64);
+ break;
case llvm::Triple::EABI:
case llvm::Triple::EABIHF:
Triple.setEnvironment(isHardFloat ? llvm::Triple::EABIHF
@@ -414,10 +419,12 @@ arm::FloatABI arm::getDefaultFloatABI(const llvm::Triple &Triple) {
return FloatABI::Soft;
switch (Triple.getEnvironment()) {
case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::GNUEABIHFT64:
case llvm::Triple::MuslEABIHF:
case llvm::Triple::EABIHF:
return FloatABI::Hard;
case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIT64:
case llvm::Triple::MuslEABI:
case llvm::Triple::EABI:
// EABI is always AAPCS, and if it was not marked 'hard', it's softfp
@@ -591,11 +598,9 @@ llvm::ARM::FPUKind arm::getARMTargetFeatures(const Driver &D,
// Add CPU features for generic CPUs
if (CPUName == "native") {
- llvm::StringMap<bool> HostFeatures;
- if (llvm::sys::getHostCPUFeatures(HostFeatures))
- for (auto &F : HostFeatures)
- Features.push_back(
- Args.MakeArgString((F.second ? "+" : "-") + F.first()));
+ for (auto &F : llvm::sys::getHostCPUFeatures())
+ Features.push_back(
+ Args.MakeArgString((F.second ? "+" : "-") + F.first()));
} else if (!CPUName.empty()) {
// This sets the default features for the specified CPU. We certainly don't
// want to override the features that have been explicitly specified on the
@@ -799,8 +804,6 @@ fp16_fml_fallthrough:
StringRef FrameChainOption = A->getValue();
if (FrameChainOption.starts_with("aapcs"))
Features.push_back("+aapcs-frame-chain");
- if (FrameChainOption == "aapcs+leaf")
- Features.push_back("+aapcs-frame-chain-leaf");
}
// CMSE: Check for target 8M (for -mcmse to be applicable) is performed later.
@@ -868,12 +871,16 @@ fp16_fml_fallthrough:
}
}
- // Kernel code has more strict alignment requirements.
- if (KernelOrKext) {
- Features.push_back("+strict-align");
- } else if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
- options::OPT_munaligned_access)) {
- if (A->getOption().matches(options::OPT_munaligned_access)) {
+ if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
+ options::OPT_munaligned_access,
+ options::OPT_mstrict_align,
+ options::OPT_mno_strict_align)) {
+ // Kernel code has more strict alignment requirements.
+ if (KernelOrKext ||
+ A->getOption().matches(options::OPT_mno_unaligned_access) ||
+ A->getOption().matches(options::OPT_mstrict_align)) {
+ Features.push_back("+strict-align");
+ } else {
// No v6M core supports unaligned memory access (v6M ARM ARM A3.2).
if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
D.Diag(diag::err_target_unsupported_unaligned) << "v6m";
@@ -881,8 +888,7 @@ fp16_fml_fallthrough:
// access either.
else if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v8m_baseline)
D.Diag(diag::err_target_unsupported_unaligned) << "v8m.base";
- } else
- Features.push_back("+strict-align");
+ }
} else {
// Assume pre-ARMv6 doesn't support unaligned accesses.
//
@@ -890,25 +896,25 @@ fp16_fml_fallthrough:
// SCTLR.U bit, which is architecture-specific. We assume ARMv6
// Darwin and NetBSD targets support unaligned accesses, and others don't.
//
- // ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit
- // which raises an alignment fault on unaligned accesses. Linux
- // defaults this bit to 0 and handles it as a system-wide (not
- // per-process) setting. It is therefore safe to assume that ARMv7+
- // Linux targets support unaligned accesses. The same goes for NaCl
- // and Windows.
+ // ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit which
+ // raises an alignment fault on unaligned accesses. Assume ARMv7+ supports
+ // unaligned accesses, except ARMv6-M, and ARMv8-M without the Main
+ // Extension. This aligns with the default behavior of ARM's downstream
+ // versions of GCC and Clang.
//
- // The above behavior is consistent with GCC.
+ // Users can change the default behavior via -m[no-]unaliged-access.
int VersionNum = getARMSubArchVersionNumber(Triple);
if (Triple.isOSDarwin() || Triple.isOSNetBSD()) {
if (VersionNum < 6 ||
Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
Features.push_back("+strict-align");
- } else if (Triple.isOSLinux() || Triple.isOSNaCl() ||
- Triple.isOSWindows()) {
- if (VersionNum < 7)
- Features.push_back("+strict-align");
- } else
+ } else if (VersionNum < 7 ||
+ Triple.getSubArch() ==
+ llvm::Triple::SubArchType::ARMSubArch_v6m ||
+ Triple.getSubArch() ==
+ llvm::Triple::SubArchType::ARMSubArch_v8m_baseline) {
Features.push_back("+strict-align");
+ }
}
// llvm does not support reserving registers in general. There is support
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
index 31153a67ad28..1e8aac71dc9b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
@@ -127,6 +127,11 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
const llvm::Triple &Triple,
const ArgList &Args,
std::vector<StringRef> &Features) {
+ // Enable the `lsx` feature on 64-bit LoongArch by default.
+ if (Triple.isLoongArch64() &&
+ (!Args.hasArgNoClaim(clang::driver::options::OPT_march_EQ)))
+ Features.push_back("+lsx");
+
std::string ArchName;
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
ArchName = A->getValue();
@@ -145,9 +150,11 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
} else if (A->getOption().matches(options::OPT_msingle_float)) {
Features.push_back("+f");
Features.push_back("-d");
+ Features.push_back("-lsx");
} else /*Soft-float*/ {
Features.push_back("-f");
Features.push_back("-d");
+ Features.push_back("-lsx");
}
} else if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ)) {
StringRef FPU = A->getValue();
@@ -157,24 +164,61 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
} else if (FPU == "32") {
Features.push_back("+f");
Features.push_back("-d");
+ Features.push_back("-lsx");
} else if (FPU == "0" || FPU == "none") {
Features.push_back("-f");
Features.push_back("-d");
+ Features.push_back("-lsx");
} else {
D.Diag(diag::err_drv_loongarch_invalid_mfpu_EQ) << FPU;
}
}
- // Select the `ual` feature determined by -m[no-]unaligned-access
- // or the alias -m[no-]strict-align.
- AddTargetFeature(Args, Features, options::OPT_munaligned_access,
- options::OPT_mno_unaligned_access, "ual");
+ // Select the `ual` feature determined by -m[no-]strict-align.
+ AddTargetFeature(Args, Features, options::OPT_mno_strict_align,
+ options::OPT_mstrict_align, "ual");
// Accept but warn about these TargetSpecific options.
if (Arg *A = Args.getLastArgNoClaim(options::OPT_mabi_EQ))
A->ignoreTargetSpecific();
if (Arg *A = Args.getLastArgNoClaim(options::OPT_mfpu_EQ))
A->ignoreTargetSpecific();
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_msimd_EQ))
+ A->ignoreTargetSpecific();
+
+ // Select lsx/lasx feature determined by -msimd=.
+ // Option -msimd= precedes -m[no-]lsx and -m[no-]lasx.
+ if (const Arg *A = Args.getLastArg(options::OPT_msimd_EQ)) {
+ StringRef MSIMD = A->getValue();
+ if (MSIMD == "lsx") {
+ // Option -msimd=lsx depends on 64-bit FPU.
+ // -m*-float and -mfpu=none/0/32 conflict with -msimd=lsx.
+ if (llvm::find(Features, "-d") != Features.end())
+ D.Diag(diag::err_drv_loongarch_wrong_fpu_width) << /*LSX*/ 0;
+ else
+ Features.push_back("+lsx");
+ } else if (MSIMD == "lasx") {
+ // Option -msimd=lasx depends on 64-bit FPU and LSX.
+ // -m*-float, -mfpu=none/0/32 and -mno-lsx conflict with -msimd=lasx.
+ if (llvm::find(Features, "-d") != Features.end())
+ D.Diag(diag::err_drv_loongarch_wrong_fpu_width) << /*LASX*/ 1;
+ else if (llvm::find(Features, "-lsx") != Features.end())
+ D.Diag(diag::err_drv_loongarch_invalid_simd_option_combination);
+
+ // The command options do not contain -mno-lasx.
+ if (!Args.getLastArg(options::OPT_mno_lasx)) {
+ Features.push_back("+lsx");
+ Features.push_back("+lasx");
+ }
+ } else if (MSIMD == "none") {
+ if (llvm::find(Features, "+lsx") != Features.end())
+ Features.push_back("-lsx");
+ if (llvm::find(Features, "+lasx") != Features.end())
+ Features.push_back("-lasx");
+ } else {
+ D.Diag(diag::err_drv_loongarch_invalid_msimd_EQ) << MSIMD;
+ }
+ }
// Select lsx feature determined by -m[no-]lsx.
if (const Arg *A = Args.getLastArg(options::OPT_mlsx, options::OPT_mno_lsx)) {
@@ -182,7 +226,7 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
// -m*-float and -mfpu=none/0/32 conflict with -mlsx.
if (A->getOption().matches(options::OPT_mlsx)) {
if (llvm::find(Features, "-d") != Features.end())
- D.Diag(diag::err_drv_loongarch_wrong_fpu_width_for_lsx);
+ D.Diag(diag::err_drv_loongarch_wrong_fpu_width) << /*LSX*/ 0;
else /*-mlsx*/
Features.push_back("+lsx");
} else /*-mno-lsx*/ {
@@ -197,9 +241,7 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
// -mno-lsx conflicts with -mlasx.
if (A->getOption().matches(options::OPT_mlasx)) {
if (llvm::find(Features, "-d") != Features.end())
- D.Diag(diag::err_drv_loongarch_wrong_fpu_width_for_lasx);
- else if (llvm::find(Features, "-lsx") != Features.end())
- D.Diag(diag::err_drv_loongarch_invalid_simd_option_combination);
+ D.Diag(diag::err_drv_loongarch_wrong_fpu_width) << /*LASX*/ 1;
else { /*-mlasx*/
Features.push_back("+lsx");
Features.push_back("+lasx");
@@ -225,8 +267,14 @@ std::string loongarch::postProcessTargetCPUString(const std::string &CPU,
std::string loongarch::getLoongArchTargetCPU(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple) {
std::string CPU;
+ std::string Arch;
// If we have -march, use that.
- if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
- CPU = A->getValue();
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ Arch = A->getValue();
+ if (Arch == "la64v1.0" || Arch == "la64v1.1")
+ CPU = llvm::LoongArch::getDefaultArch(Triple.isLoongArch64());
+ else
+ CPU = Arch;
+ }
return postProcessTargetCPUString(CPU, Triple);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp
index fe9d112b8800..79a00711e6f5 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -341,6 +341,15 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
"dspr2");
AddTargetFeature(Args, Features, options::OPT_mmsa, options::OPT_mno_msa,
"msa");
+ if (Arg *A = Args.getLastArg(
+ options::OPT_mstrict_align, options::OPT_mno_strict_align,
+ options::OPT_mno_unaligned_access, options::OPT_munaligned_access)) {
+ if (A->getOption().matches(options::OPT_mstrict_align) ||
+ A->getOption().matches(options::OPT_mno_unaligned_access))
+ Features.push_back(Args.MakeArgString("+strict-align"));
+ else
+ Features.push_back(Args.MakeArgString("-strict-align"));
+ }
// Add the last -mfp32/-mfpxx/-mfp64, if none are given and the ABI is O32
// pass -mfpxx, or if none are given and fp64a is default, pass fp64 and
@@ -360,6 +369,9 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
} else if (mips::isFP64ADefault(Triple, CPUName)) {
Features.push_back("+fp64");
Features.push_back("+nooddspreg");
+ } else if (Arg *A = Args.getLastArg(options::OPT_mmsa)) {
+ if (A->getOption().matches(options::OPT_mmsa))
+ Features.push_back("+fp64");
}
AddTargetFeature(Args, Features, options::OPT_mno_odd_spreg,
@@ -490,6 +502,13 @@ bool mips::shouldUseFPXX(const ArgList &Args, const llvm::Triple &Triple,
options::OPT_mdouble_float))
if (A->getOption().matches(options::OPT_msingle_float))
UseFPXX = false;
+ // FP64 should be used for MSA.
+ if (Arg *A = Args.getLastArg(options::OPT_mmsa))
+ if (A->getOption().matches(options::OPT_mmsa))
+ UseFPXX = llvm::StringSwitch<bool>(CPUName)
+ .Cases("mips32r2", "mips32r3", "mips32r5", false)
+ .Cases("mips64r2", "mips64r3", "mips64r5", false)
+ .Default(UseFPXX);
return UseFPXX;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h
index 62211c711420..674c21744b52 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h
@@ -1,4 +1,4 @@
-//===--- Mips.h - Mips-specific Tool Helpers ----------------------*- C++ -*-===//
+//===--- Mips.h - Mips-specific Tool Helpers --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
index ab24d14992cd..acd5757d6ea9 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -70,6 +70,7 @@ static std::string normalizeCPUName(StringRef CPUName, const llvm::Triple &T) {
.Case("power8", "pwr8")
.Case("power9", "pwr9")
.Case("power10", "pwr10")
+ .Case("power11", "pwr11")
.Case("future", "future")
.Case("powerpc", "ppc")
.Case("powerpc64", "ppc64")
@@ -103,6 +104,8 @@ const char *ppc::getPPCAsmModeForCPU(StringRef Name) {
.Case("power9", "-mpower9")
.Case("pwr10", "-mpower10")
.Case("power10", "-mpower10")
+ .Case("pwr11", "-mpower11")
+ .Case("power11", "-mpower11")
.Default("-many");
}
@@ -122,6 +125,26 @@ void ppc::getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
ppc::ReadGOTPtrMode ReadGOT = ppc::getPPCReadGOTPtrMode(D, Triple, Args);
if (ReadGOT == ppc::ReadGOTPtrMode::SecurePlt)
Features.push_back("+secure-plt");
+
+ bool UseSeparateSections = isUseSeparateSections(Triple);
+ bool HasDefaultDataSections = Triple.isOSBinFormatXCOFF();
+ if (Args.hasArg(options::OPT_maix_small_local_exec_tls) ||
+ Args.hasArg(options::OPT_maix_small_local_dynamic_tls)) {
+ if (!Triple.isOSAIX() || !Triple.isArch64Bit())
+ D.Diag(diag::err_opt_not_valid_on_target)
+ << "-maix-small-local-[exec|dynamic]-tls";
+
+ // The -maix-small-local-[exec|dynamic]-tls option should only be used with
+ // -fdata-sections, as having data sections turned off with this option
+ // is not ideal for performance. Moreover, the
+ // small-local-[exec|dynamic]-tls region is a limited resource, and should
+ // not be used for variables that may be replaced.
+ if (!Args.hasFlag(options::OPT_fdata_sections,
+ options::OPT_fno_data_sections,
+ UseSeparateSections || HasDefaultDataSections))
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << "-maix-small-local-[exec|dynamic]-tls" << "-fdata-sections";
+ }
}
ppc::ReadGOTPtrMode ppc::getPPCReadGOTPtrMode(const Driver &D, const llvm::Triple &Triple,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index 47b29e1577c2..7f5771e21615 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -15,9 +15,9 @@
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/RISCVISAInfo.h"
#include "llvm/TargetParser/RISCVTargetParser.h"
using namespace clang::driver;
@@ -67,19 +67,19 @@ static void getRISCFeaturesFromMcpu(const Driver &D, const Arg *A,
D.Diag(clang::diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Mcpu;
}
-
- if (llvm::RISCV::hasFastUnalignedAccess(Mcpu))
- Features.push_back("+fast-unaligned-access");
}
void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args,
std::vector<StringRef> &Features) {
- StringRef MArch = getRISCVArch(Args, Triple);
+ std::string MArch = getRISCVArch(Args, Triple);
if (!getArchFeatures(D, MArch, Features, Args))
return;
+ bool CPUFastScalarUnaligned = false;
+ bool CPUFastVectorUnaligned = false;
+
// If users give march and mcpu, get std extension feature from MArch
// and other features (ex. mirco architecture feature) from mcpu
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
@@ -88,6 +88,11 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
CPU = llvm::sys::getHostCPUName();
getRISCFeaturesFromMcpu(D, A, Triple, CPU, Features);
+
+ if (llvm::RISCV::hasFastScalarUnalignedAccess(CPU))
+ CPUFastScalarUnaligned = true;
+ if (llvm::RISCV::hasFastVectorUnalignedAccess(CPU))
+ CPUFastVectorUnaligned = true;
}
// Handle features corresponding to "-ffixed-X" options
@@ -168,9 +173,39 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
Features.push_back("-relax");
}
- // -mno-unaligned-access is default, unless -munaligned-access is specified.
- AddTargetFeature(Args, Features, options::OPT_munaligned_access,
- options::OPT_mno_unaligned_access, "fast-unaligned-access");
+ // If -mstrict-align, -mno-strict-align, -mscalar-strict-align, or
+ // -mno-scalar-strict-align is passed, use it. Otherwise, the
+ // unaligned-scalar-mem is enabled if the CPU supports it or the target is
+ // Android.
+ if (const Arg *A = Args.getLastArg(
+ options::OPT_mno_strict_align, options::OPT_mscalar_strict_align,
+ options::OPT_mstrict_align, options::OPT_mno_scalar_strict_align)) {
+ if (A->getOption().matches(options::OPT_mno_strict_align) ||
+ A->getOption().matches(options::OPT_mno_scalar_strict_align)) {
+ Features.push_back("+unaligned-scalar-mem");
+ } else {
+ Features.push_back("-unaligned-scalar-mem");
+ }
+ } else if (CPUFastScalarUnaligned || Triple.isAndroid()) {
+ Features.push_back("+unaligned-scalar-mem");
+ }
+
+ // If -mstrict-align, -mno-strict-align, -mvector-strict-align, or
+ // -mno-vector-strict-align is passed, use it. Otherwise, the
+ // unaligned-vector-mem is enabled if the CPU supports it or the target is
+ // Android.
+ if (const Arg *A = Args.getLastArg(
+ options::OPT_mno_strict_align, options::OPT_mvector_strict_align,
+ options::OPT_mstrict_align, options::OPT_mno_vector_strict_align)) {
+ if (A->getOption().matches(options::OPT_mno_strict_align) ||
+ A->getOption().matches(options::OPT_mno_vector_strict_align)) {
+ Features.push_back("+unaligned-vector-mem");
+ } else {
+ Features.push_back("-unaligned-vector-mem");
+ }
+ } else if (CPUFastVectorUnaligned || Triple.isAndroid()) {
+ Features.push_back("+unaligned-vector-mem");
+ }
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
@@ -213,7 +248,7 @@ StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
// rv64g | rv64*d -> lp64d
// rv64e -> lp64e
// rv64* -> lp64
- StringRef Arch = getRISCVArch(Args, Triple);
+ std::string Arch = getRISCVArch(Args, Triple);
auto ParseResult = llvm::RISCVISAInfo::parseArchString(
Arch, /* EnableExperimentalExtension */ true);
@@ -239,8 +274,8 @@ StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
}
}
-StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
- const llvm::Triple &Triple) {
+std::string riscv::getRISCVArch(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple) {
assert(Triple.isRISCV() && "Unexpected triple");
// GCC's logic around choosing a default `-march=` is complex. If GCC is not
@@ -276,12 +311,28 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
// 2. Get march (isa string) based on `-mcpu=`
if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
StringRef CPU = A->getValue();
- if (CPU == "native")
+ if (CPU == "native") {
CPU = llvm::sys::getHostCPUName();
+ // If the target cpu is unrecognized, use target features.
+ if (CPU.starts_with("generic")) {
+ auto FeatureMap = llvm::sys::getHostCPUFeatures();
+ // hwprobe may be unavailable on older Linux versions.
+ if (!FeatureMap.empty()) {
+ std::vector<std::string> Features;
+ for (auto &F : FeatureMap)
+ Features.push_back(((F.second ? "+" : "-") + F.first()).str());
+ auto ParseResult = llvm::RISCVISAInfo::parseFeatures(
+ Triple.isRISCV32() ? 32 : 64, Features);
+ if (ParseResult)
+ return (*ParseResult)->toString();
+ }
+ }
+ }
+
StringRef MArch = llvm::RISCV::getMArchFromMcpu(CPU);
// Bypass if target cpu's default march is empty.
if (MArch != "")
- return MArch;
+ return MArch.str();
}
// 3. Choose a default based on `-mabi=`
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h
index fcaf9d57ad13..388786b9c4c1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h
@@ -24,8 +24,8 @@ void getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
std::vector<llvm::StringRef> &Features);
StringRef getRISCVABI(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple);
-StringRef getRISCVArch(const llvm::opt::ArgList &Args,
- const llvm::Triple &Triple);
+std::string getRISCVArch(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple);
std::string getRISCVTargetCPU(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple);
} // end namespace riscv
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.h
index 44658c4259c6..2b178d9df1ee 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.h
@@ -1,4 +1,4 @@
-//===--- Sparc.h - Sparc-specific Tool Helpers ----------------------*- C++ -*-===//
+//===--- Sparc.h - Sparc-specific Tool Helpers ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
index 588bc3176d73..2213f431eb81 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
@@ -71,4 +71,12 @@ void systemz::getSystemZTargetFeatures(const Driver &D, const ArgList &Args,
systemz::FloatABI FloatABI = systemz::getSystemZFloatABI(D, Args);
if (FloatABI == systemz::FloatABI::Soft)
Features.push_back("+soft-float");
+
+ if (const Arg *A = Args.getLastArg(options::OPT_munaligned_symbols,
+ options::OPT_mno_unaligned_symbols)) {
+ if (A->getOption().matches(options::OPT_munaligned_symbols))
+ Features.push_back("+unaligned-symbols");
+ else
+ Features.push_back("-unaligned-symbols");
+ }
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
index 53e26a9f8e22..2f63333b732f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
@@ -131,11 +131,9 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
// If -march=native, autodetect the feature list.
if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_march_EQ)) {
if (StringRef(A->getValue()) == "native") {
- llvm::StringMap<bool> HostFeatures;
- if (llvm::sys::getHostCPUFeatures(HostFeatures))
- for (auto &F : HostFeatures)
- Features.push_back(
- Args.MakeArgString((F.second ? "+" : "-") + F.first()));
+ for (auto &F : llvm::sys::getHostCPUFeatures())
+ Features.push_back(
+ Args.MakeArgString((F.second ? "+" : "-") + F.first()));
}
}
@@ -273,7 +271,8 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
for (StringRef Value : A->getValues()) {
if (Value == "egpr" || Value == "push2pop2" || Value == "ppx" ||
- Value == "ndd" || Value == "ccmp" || Value == "cf") {
+ Value == "ndd" || Value == "ccmp" || Value == "nf" ||
+ Value == "cf" || Value == "zu") {
Features.push_back(
Args.MakeArgString((IsNegative ? "-" : "+") + Value));
continue;
@@ -309,4 +308,19 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
Features.push_back("+prefer-no-gather");
if (Args.hasArg(options::OPT_mno_scatter))
Features.push_back("+prefer-no-scatter");
+ if (Args.hasArg(options::OPT_mapx_inline_asm_use_gpr32))
+ Features.push_back("+inline-asm-use-gpr32");
+
+ // Warn for removed 3dnow support
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_m3dnowa, options::OPT_mno_3dnowa,
+ options::OPT_mno_3dnow)) {
+ if (A->getOption().matches(options::OPT_m3dnowa))
+ D.Diag(diag::warn_drv_clang_unsupported) << A->getAsString(Args);
+ }
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_m3dnow, options::OPT_mno_3dnow)) {
+ if (A->getOption().matches(options::OPT_m3dnow))
+ D.Diag(diag::warn_drv_clang_unsupported) << A->getAsString(Args);
+ }
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
index 391c47f88bde..852e0442f50a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
@@ -37,7 +37,7 @@ static bool findRISCVMultilibs(const Driver &D,
const llvm::Triple &TargetTriple,
const ArgList &Args, DetectedMultilibs &Result) {
Multilib::flags_list Flags;
- StringRef Arch = riscv::getRISCVArch(Args, TargetTriple);
+ std::string Arch = riscv::getRISCVArch(Args, TargetTriple);
StringRef Abi = tools::riscv::getRISCVABI(Args, TargetTriple);
if (TargetTriple.isRISCV64()) {
@@ -100,9 +100,7 @@ static bool findRISCVMultilibs(const Driver &D,
BareMetal::BareMetal(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args) {
- getProgramPaths().push_back(getDriver().getInstalledDir());
- if (getDriver().getInstalledDir() != getDriver().Dir)
- getProgramPaths().push_back(getDriver().Dir);
+ getProgramPaths().push_back(getDriver().Dir);
findMultilibs(D, Triple, Args);
SmallString<128> SysRoot(computeSysRoot());
@@ -272,15 +270,19 @@ void BareMetal::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addSystemInclude(DriverArgs, CC1Args, Dir.str());
}
- if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) {
- const SmallString<128> SysRoot(computeSysRoot());
- if (!SysRoot.empty()) {
- for (const Multilib &M : getOrderedMultilibs()) {
- SmallString<128> Dir(SysRoot);
- llvm::sys::path::append(Dir, M.includeSuffix());
- llvm::sys::path::append(Dir, "include");
- addSystemInclude(DriverArgs, CC1Args, Dir.str());
- }
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ if (std::optional<std::string> Path = getStdlibIncludePath())
+ addSystemInclude(DriverArgs, CC1Args, *Path);
+
+ const SmallString<128> SysRoot(computeSysRoot());
+ if (!SysRoot.empty()) {
+ for (const Multilib &M : getOrderedMultilibs()) {
+ SmallString<128> Dir(SysRoot);
+ llvm::sys::path::append(Dir, M.includeSuffix());
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
}
}
}
@@ -298,6 +300,40 @@ void BareMetal::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
return;
const Driver &D = getDriver();
+ std::string Target = getTripleString();
+
+ auto AddCXXIncludePath = [&](StringRef Path) {
+ std::string Version = detectLibcxxVersion(Path);
+ if (Version.empty())
+ return;
+
+ {
+ // First the per-target include dir: include/<target>/c++/v1.
+ SmallString<128> TargetDir(Path);
+ llvm::sys::path::append(TargetDir, Target, "c++", Version);
+ addSystemInclude(DriverArgs, CC1Args, TargetDir);
+ }
+
+ {
+ // Then the generic dir: include/c++/v1.
+ SmallString<128> Dir(Path);
+ llvm::sys::path::append(Dir, "c++", Version);
+ addSystemInclude(DriverArgs, CC1Args, Dir);
+ }
+ };
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx: {
+ SmallString<128> P(D.Dir);
+ llvm::sys::path::append(P, "..", "include");
+ AddCXXIncludePath(P);
+ break;
+ }
+ case ToolChain::CST_Libstdcxx:
+ // We only support libc++ toolchain installation.
+ break;
+ }
+
std::string SysRoot(computeSysRoot());
if (SysRoot.empty())
return;
@@ -368,11 +404,7 @@ void BareMetal::AddLinkRuntimeLib(const ArgList &Args,
ToolChain::RuntimeLibType RLT = GetRuntimeLibType(Args);
switch (RLT) {
case ToolChain::RLT_CompilerRT: {
- const std::string FileName = getCompilerRT(Args, "builtins");
- llvm::StringRef BaseName = llvm::sys::path::filename(FileName);
- BaseName.consume_front("lib");
- BaseName.consume_back(".a");
- CmdArgs.push_back(Args.MakeArgString("-l" + BaseName));
+ CmdArgs.push_back(getCompilerRTArgString(Args, "builtins"));
return;
}
case ToolChain::RLT_Libgcc:
@@ -435,6 +467,7 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ArgStringList CmdArgs;
auto &TC = static_cast<const toolchains::BareMetal &>(getToolChain());
+ const Driver &D = getToolChain().getDriver();
const llvm::Triple::ArchType Arch = TC.getArch();
const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
@@ -442,6 +475,9 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-Bstatic");
+ if (TC.getTriple().isRISCV() && Args.hasArg(options::OPT_mno_relax))
+ CmdArgs.push_back("--no-relax");
+
if (Triple.isARM() || Triple.isThumb()) {
bool IsBigEndian = arm::isARMBigEndian(Triple, Args);
if (IsBigEndian)
@@ -459,11 +495,6 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
for (const auto &LibPath : TC.getLibraryPaths())
CmdArgs.push_back(Args.MakeArgString(llvm::Twine("-L", LibPath)));
- const std::string FileName = TC.getCompilerRT(Args, "builtins");
- llvm::SmallString<128> PathBuf{FileName};
- llvm::sys::path::remove_filename(PathBuf);
- CmdArgs.push_back(Args.MakeArgString("-L" + PathBuf));
-
if (TC.ShouldLinkCXXStdlib(Args))
TC.AddCXXStdlibLibArgs(Args, CmdArgs);
@@ -474,6 +505,19 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
TC.AddLinkRuntimeLib(Args, CmdArgs);
}
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ // Find the first filename InputInfo object.
+ auto Input = llvm::find_if(
+ Inputs, [](const InputInfo &II) -> bool { return II.isFilename(); });
+ if (Input == Inputs.end())
+ // For a very rare case, all of the inputs to the linker are
+ // InputArg. If that happens, just use the first InputInfo.
+ Input = Inputs.begin();
+
+ addLTOOptions(TC, Args, CmdArgs, Output, *Input,
+ D.getLTOMode() == LTOK_Thin);
+ }
if (TC.getTriple().isRISCV())
CmdArgs.push_back("-X");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
index aa344b3465ab..8858c318aba7 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
@@ -45,6 +45,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/BinaryFormat/Magic.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Frontend/Debug/Options.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CodeGen.h"
@@ -54,11 +55,12 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
-#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/Support/YAMLParser.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
#include "llvm/TargetParser/ARMTargetParserCommon.h"
#include "llvm/TargetParser/Host.h"
#include "llvm/TargetParser/LoongArchTargetParser.h"
+#include "llvm/TargetParser/RISCVISAInfo.h"
#include "llvm/TargetParser/RISCVTargetParser.h"
#include <cctype>
@@ -346,11 +348,14 @@ static bool addExceptionArgs(const ArgList &Args, types::ID InputType,
bool EH = Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions,
false);
- bool EHa = Args.hasFlag(options::OPT_fasync_exceptions,
- options::OPT_fno_async_exceptions, false);
- if (EHa) {
- CmdArgs.push_back("-fasync-exceptions");
- EH = true;
+ // Async exceptions are Windows MSVC only.
+ if (Triple.isWindowsMSVCEnvironment()) {
+ bool EHa = Args.hasFlag(options::OPT_fasync_exceptions,
+ options::OPT_fno_async_exceptions, false);
+ if (EHa) {
+ CmdArgs.push_back("-fasync-exceptions");
+ EH = true;
+ }
}
// Obj-C exceptions are enabled by default, regardless of -fexceptions. This
@@ -634,7 +639,9 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
ProfileGenerateArg->getValue()));
// The default is to use Clang Instrumentation.
CmdArgs.push_back("-fprofile-instrument=clang");
- if (TC.getTriple().isWindowsMSVCEnvironment()) {
+ if (TC.getTriple().isWindowsMSVCEnvironment() &&
+ Args.hasFlag(options::OPT_frtlib_defaultlib,
+ options::OPT_fno_rtlib_defaultlib, true)) {
// Add dependent lib for clang_rt.profile
CmdArgs.push_back(Args.MakeArgString(
"--dependent-lib=" + TC.getCompilerRTBasename(Args, "profile")));
@@ -653,7 +660,9 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
CmdArgs.push_back("-fprofile-instrument=csllvm");
}
if (PGOGenArg) {
- if (TC.getTriple().isWindowsMSVCEnvironment()) {
+ if (TC.getTriple().isWindowsMSVCEnvironment() &&
+ Args.hasFlag(options::OPT_frtlib_defaultlib,
+ options::OPT_fno_rtlib_defaultlib, true)) {
// Add dependent lib for clang_rt.profile
CmdArgs.push_back(Args.MakeArgString(
"--dependent-lib=" + TC.getCompilerRTBasename(Args, "profile")));
@@ -828,36 +837,6 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
}
}
-/// Check whether the given input tree contains any compilation actions.
-static bool ContainsCompileAction(const Action *A) {
- if (isa<CompileJobAction>(A) || isa<BackendJobAction>(A))
- return true;
-
- return llvm::any_of(A->inputs(), ContainsCompileAction);
-}
-
-/// Check if -relax-all should be passed to the internal assembler.
-/// This is done by default when compiling non-assembler source with -O0.
-static bool UseRelaxAll(Compilation &C, const ArgList &Args) {
- bool RelaxDefault = true;
-
- if (Arg *A = Args.getLastArg(options::OPT_O_Group))
- RelaxDefault = A->getOption().matches(options::OPT_O0);
-
- if (RelaxDefault) {
- RelaxDefault = false;
- for (const auto &Act : C.getActions()) {
- if (ContainsCompileAction(Act)) {
- RelaxDefault = true;
- break;
- }
- }
- }
-
- return Args.hasFlag(options::OPT_mrelax_all, options::OPT_mno_relax_all,
- RelaxDefault);
-}
-
static void
RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs,
llvm::codegenoptions::DebugInfoKind DebugInfoKind,
@@ -1052,7 +1031,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
// If user provided -o, that is the dependency target, except
// when we are only generating a dependency file.
- Arg *OutputOpt = Args.getLastArg(options::OPT_o);
+ Arg *OutputOpt = Args.getLastArg(options::OPT_o, options::OPT__SLASH_Fo);
if (OutputOpt && Output.getType() != types::TY_Dependencies) {
DepTarget = OutputOpt->getValue();
} else {
@@ -1098,33 +1077,6 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
if (JA.isOffloading(Action::OFK_HIP))
getToolChain().AddHIPIncludeArgs(Args, CmdArgs);
- // If we are compiling for a GPU target we want to override the system headers
- // with ones created by the 'libc' project if present.
- if (!Args.hasArg(options::OPT_nostdinc) &&
- !Args.hasArg(options::OPT_nogpuinc) &&
- !Args.hasArg(options::OPT_nobuiltininc)) {
- // Without an offloading language we will include these headers directly.
- // Offloading languages will instead only use the declarations stored in
- // the resource directory at clang/lib/Headers/llvm_libc_wrappers.
- if ((getToolChain().getTriple().isNVPTX() ||
- getToolChain().getTriple().isAMDGCN()) &&
- C.getActiveOffloadKinds() == Action::OFK_None) {
- SmallString<128> P(llvm::sys::path::parent_path(D.InstalledDir));
- llvm::sys::path::append(P, "include");
- llvm::sys::path::append(P, "gpu-none-llvm");
- CmdArgs.push_back("-c-isystem");
- CmdArgs.push_back(Args.MakeArgString(P));
- } else if (C.getActiveOffloadKinds() == Action::OFK_OpenMP) {
- // TODO: CUDA / HIP include their own headers for some common functions
- // implemented here. We'll need to clean those up so they do not conflict.
- SmallString<128> P(D.ResourceDir);
- llvm::sys::path::append(P, "include");
- llvm::sys::path::append(P, "llvm_libc_wrappers");
- CmdArgs.push_back("-internal-isystem");
- CmdArgs.push_back(Args.MakeArgString(P));
- }
- }
-
// If we are offloading to a target via OpenMP we need to include the
// openmp_wrappers folder which contains alternative system headers.
if (JA.isDeviceOffloading(Action::OFK_OpenMP) &&
@@ -1241,7 +1193,8 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
Args.addAllArgs(CmdArgs,
{options::OPT_D, options::OPT_U, options::OPT_I_Group,
- options::OPT_F, options::OPT_index_header_map});
+ options::OPT_F, options::OPT_index_header_map,
+ options::OPT_embed_dir_EQ});
// Add -Wp, and -Xpreprocessor if using the preprocessor.
@@ -1296,6 +1249,35 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
});
}
+ // If we are compiling for a GPU target we want to override the system headers
+ // with ones created by the 'libc' project if present.
+ // TODO: This should be moved to `AddClangSystemIncludeArgs` by passing the
+ // OffloadKind as an argument.
+ if (!Args.hasArg(options::OPT_nostdinc) &&
+ !Args.hasArg(options::OPT_nogpuinc) &&
+ !Args.hasArg(options::OPT_nobuiltininc)) {
+ // Without an offloading language we will include these headers directly.
+ // Offloading languages will instead only use the declarations stored in
+ // the resource directory at clang/lib/Headers/llvm_libc_wrappers.
+ if ((getToolChain().getTriple().isNVPTX() ||
+ getToolChain().getTriple().isAMDGCN()) &&
+ C.getActiveOffloadKinds() == Action::OFK_None) {
+ SmallString<128> P(llvm::sys::path::parent_path(D.Dir));
+ llvm::sys::path::append(P, "include");
+ llvm::sys::path::append(P, getToolChain().getTripleString());
+ CmdArgs.push_back("-internal-isystem");
+ CmdArgs.push_back(Args.MakeArgString(P));
+ } else if (C.getActiveOffloadKinds() == Action::OFK_OpenMP) {
+ // TODO: CUDA / HIP include their own headers for some common functions
+ // implemented here. We'll need to clean those up so they do not conflict.
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ llvm::sys::path::append(P, "llvm_libc_wrappers");
+ CmdArgs.push_back("-internal-isystem");
+ CmdArgs.push_back(Args.MakeArgString(P));
+ }
+ }
+
// Add system include arguments for all targets but IAMCU.
if (!IsIAMCU)
forAllAssociatedToolChains(C, JA, getToolChain(),
@@ -1504,6 +1486,45 @@ void AddUnalignedAccessWarning(ArgStringList &CmdArgs) {
}
}
+// Each combination of options here forms a signing schema, and in most cases
+// each signing schema is its own incompatible ABI. The default values of the
+// options represent the default signing schema.
+static void handlePAuthABI(const ArgList &DriverArgs, ArgStringList &CC1Args) {
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_intrinsics,
+ options::OPT_fno_ptrauth_intrinsics))
+ CC1Args.push_back("-fptrauth-intrinsics");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_calls,
+ options::OPT_fno_ptrauth_calls))
+ CC1Args.push_back("-fptrauth-calls");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_returns,
+ options::OPT_fno_ptrauth_returns))
+ CC1Args.push_back("-fptrauth-returns");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_auth_traps,
+ options::OPT_fno_ptrauth_auth_traps))
+ CC1Args.push_back("-fptrauth-auth-traps");
+
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_vtable_pointer_address_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_address_discrimination))
+ CC1Args.push_back("-fptrauth-vtable-pointer-address-discrimination");
+
+ if (!DriverArgs.hasArg(
+ options::OPT_fptrauth_vtable_pointer_type_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_type_discrimination))
+ CC1Args.push_back("-fptrauth-vtable-pointer-type-discrimination");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_indirect_gotos,
+ options::OPT_fno_ptrauth_indirect_gotos))
+ CC1Args.push_back("-fptrauth-indirect-gotos");
+
+ if (!DriverArgs.hasArg(options::OPT_fptrauth_init_fini,
+ options::OPT_fno_ptrauth_init_fini))
+ CC1Args.push_back("-fptrauth-init-fini");
+}
+
static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs, bool isAArch64) {
const Arg *A = isAArch64
@@ -1534,7 +1555,24 @@ static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
} else {
StringRef DiagMsg;
llvm::ARM::ParsedBranchProtection PBP;
- if (!llvm::ARM::parseBranchProtection(A->getValue(), PBP, DiagMsg))
+ bool EnablePAuthLR = false;
+
+ // To know if we need to enable PAuth-LR As part of the standard branch
+ // protection option, it needs to be determined if the feature has been
+ // activated in the `march` argument. This information is stored within the
+ // CmdArgs variable and can be found using a search.
+ if (isAArch64) {
+ auto isPAuthLR = [](const char *member) {
+ llvm::AArch64::ExtensionInfo pauthlr_extension =
+ llvm::AArch64::getExtensionByID(llvm::AArch64::AEK_PAUTHLR);
+ return pauthlr_extension.PosTargetFeature == member;
+ };
+
+ if (std::any_of(CmdArgs.begin(), CmdArgs.end(), isPAuthLR))
+ EnablePAuthLR = true;
+ }
+ if (!llvm::ARM::parseBranchProtection(A->getValue(), PBP, DiagMsg,
+ EnablePAuthLR))
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << DiagMsg;
if (!isAArch64 && PBP.Key == "b_key")
@@ -1549,16 +1587,30 @@ static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
CmdArgs.push_back(
Args.MakeArgString(Twine("-msign-return-address=") + Scope));
- if (!Scope.equals("none"))
+ if (Scope != "none") {
+ if (Triple.getEnvironment() == llvm::Triple::PAuthTest)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << Triple.getTriple();
CmdArgs.push_back(
Args.MakeArgString(Twine("-msign-return-address-key=") + Key));
- if (BranchProtectionPAuthLR)
+ }
+ if (BranchProtectionPAuthLR) {
+ if (Triple.getEnvironment() == llvm::Triple::PAuthTest)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << Triple.getTriple();
CmdArgs.push_back(
Args.MakeArgString(Twine("-mbranch-protection-pauth-lr")));
+ }
if (IndirectBranches)
CmdArgs.push_back("-mbranch-target-enforce");
- if (GuardedControlStack)
+ // GCS is currently untested with PAuthABI, but enabling this could be allowed
+ // in future after testing with a suitable system.
+ if (GuardedControlStack) {
+ if (Triple.getEnvironment() == llvm::Triple::PAuthTest)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << Triple.getTriple();
CmdArgs.push_back("-mguarded-control-stack");
+ }
}
void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
@@ -1702,6 +1754,8 @@ void RenderAArch64ABI(const llvm::Triple &Triple, const ArgList &Args,
ABIName = A->getValue();
else if (Triple.isOSDarwin())
ABIName = "darwinpcs";
+ else if (Triple.getEnvironment() == llvm::Triple::PAuthTest)
+ ABIName = "pauthtest";
else
ABIName = "aapcs";
@@ -1738,18 +1792,18 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
// Enable/disable return address signing and indirect branch targets.
CollectARMPACBTIOptions(getToolChain(), Args, CmdArgs, true /*isAArch64*/);
+ if (Triple.getEnvironment() == llvm::Triple::PAuthTest)
+ handlePAuthABI(Args, CmdArgs);
+
// Handle -msve_vector_bits=<bits>
if (Arg *A = Args.getLastArg(options::OPT_msve_vector_bits_EQ)) {
StringRef Val = A->getValue();
const Driver &D = getToolChain().getDriver();
- if (Val.equals("128") || Val.equals("256") || Val.equals("512") ||
- Val.equals("1024") || Val.equals("2048") || Val.equals("128+") ||
- Val.equals("256+") || Val.equals("512+") || Val.equals("1024+") ||
- Val.equals("2048+")) {
+ if (Val == "128" || Val == "256" || Val == "512" || Val == "1024" ||
+ Val == "2048" || Val == "128+" || Val == "256+" || Val == "512+" ||
+ Val == "1024+" || Val == "2048+") {
unsigned Bits = 0;
- if (Val.ends_with("+"))
- Val = Val.substr(0, Val.size() - 1);
- else {
+ if (!Val.consume_back("+")) {
bool Invalid = Val.getAsInteger(10, Bits); (void)Invalid;
assert(!Invalid && "Failed to parse value");
CmdArgs.push_back(
@@ -1761,7 +1815,7 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
CmdArgs.push_back(
Args.MakeArgString("-mvscale-min=" + llvm::Twine(Bits / 128)));
// Silently drop requests for vector-length agnostic code as it's implied.
- } else if (!Val.equals("scalable"))
+ } else if (Val != "scalable")
// Handle the unsupported values passed to msve-vector-bits.
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Val;
@@ -1778,6 +1832,32 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
}
AddUnalignedAccessWarning(CmdArgs);
+
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_intrinsics,
+ options::OPT_fno_ptrauth_intrinsics);
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_calls,
+ options::OPT_fno_ptrauth_calls);
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_returns,
+ options::OPT_fno_ptrauth_returns);
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_auth_traps,
+ options::OPT_fno_ptrauth_auth_traps);
+ Args.addOptInFlag(
+ CmdArgs, options::OPT_fptrauth_vtable_pointer_address_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_address_discrimination);
+ Args.addOptInFlag(
+ CmdArgs, options::OPT_fptrauth_vtable_pointer_type_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_type_discrimination);
+ Args.addOptInFlag(
+ CmdArgs, options::OPT_fptrauth_type_info_vtable_pointer_discrimination,
+ options::OPT_fno_ptrauth_type_info_vtable_pointer_discrimination);
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_init_fini,
+ options::OPT_fno_ptrauth_init_fini);
+ Args.addOptInFlag(
+ CmdArgs, options::OPT_fptrauth_function_pointer_type_discrimination,
+ options::OPT_fno_ptrauth_function_pointer_type_discrimination);
+
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_indirect_gotos,
+ options::OPT_fno_ptrauth_indirect_gotos);
}
void Clang::AddLoongArchTargetArgs(const ArgList &Args,
@@ -2096,7 +2176,7 @@ void Clang::AddRISCVTargetArgs(const ArgList &Args,
// Get minimum VLen from march.
unsigned MinVLen = 0;
- StringRef Arch = riscv::getRISCVArch(Args, Triple);
+ std::string Arch = riscv::getRISCVArch(Args, Triple);
auto ISAInfo = llvm::RISCVISAInfo::parseArchString(
Arch, /*EnableExperimentalExtensions*/ true);
// Ignore parsing error.
@@ -2106,7 +2186,7 @@ void Clang::AddRISCVTargetArgs(const ArgList &Args,
// If the value is "zvl", use MinVLen from march. Otherwise, try to parse
// as integer as long as we have a MinVLen.
unsigned Bits = 0;
- if (Val.equals("zvl") && MinVLen >= llvm::RISCV::RVVBitsPerBlock) {
+ if (Val == "zvl" && MinVLen >= llvm::RISCV::RVVBitsPerBlock) {
Bits = MinVLen;
} else if (!Val.getAsInteger(10, Bits)) {
// Only accept power of 2 values beteen RVVBitsPerBlock and 65536 that
@@ -2123,7 +2203,7 @@ void Clang::AddRISCVTargetArgs(const ArgList &Args,
Args.MakeArgString("-mvscale-max=" + llvm::Twine(VScaleMin)));
CmdArgs.push_back(
Args.MakeArgString("-mvscale-min=" + llvm::Twine(VScaleMin)));
- } else if (!Val.equals("scalable")) {
+ } else if (Val != "scalable") {
// Handle the unsupported values passed to mrvv-vector-bits.
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Val;
@@ -2454,8 +2534,16 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
const ArgList &Args,
ArgStringList &CmdArgs,
const Driver &D) {
- if (UseRelaxAll(C, Args))
- CmdArgs.push_back("-mrelax-all");
+ // Default to -mno-relax-all.
+ //
+ // Note: RISC-V requires an indirect jump for offsets larger than 1MiB. This
+ // cannot be done by assembler branch relaxation as it needs a free temporary
+ // register. Because of this, branch relaxation is handled by a MachineIR pass
+ // before the assembler. Forcing assembler branch relaxation for -O0 makes the
+ // MachineIR branch relaxation inaccurate and it will miss cases where an
+ // indirect branch is necessary.
+ Args.addOptInFlag(CmdArgs, options::OPT_mrelax_all,
+ options::OPT_mno_relax_all);
// Only default to -mincremental-linker-compatible if we think we are
// targeting the MSVC linker.
@@ -2481,6 +2569,8 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
// arg after parsing the '-I' arg.
bool TakeNextArg = false;
+ const llvm::Triple &Triple = C.getDefaultToolChain().getTriple();
+ bool Crel = false, ExperimentalCrel = false;
bool UseRelaxRelocations = C.getDefaultToolChain().useRelaxRelocations();
bool UseNoExecStack = false;
const char *MipsTargetFeature = nullptr;
@@ -2521,6 +2611,13 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
switch (C.getDefaultToolChain().getArch()) {
default:
break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ if (Value == "-msse2avx") {
+ CmdArgs.push_back("-msse2avx");
+ continue;
+ }
+ break;
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
if (Value == "--no-type-check") {
@@ -2604,6 +2701,12 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
Value == "-nocompress-debug-sections" ||
Value == "--nocompress-debug-sections") {
CmdArgs.push_back(Value.data());
+ } else if (Value == "--crel") {
+ Crel = true;
+ } else if (Value == "--no-crel") {
+ Crel = false;
+ } else if (Value == "--allow-experimental-crel") {
+ ExperimentalCrel = true;
} else if (Value == "-mrelax-relocations=yes" ||
Value == "--mrelax-relocations=yes") {
UseRelaxRelocations = true;
@@ -2629,7 +2732,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
} else if (Value.starts_with("-mcpu") || Value.starts_with("-mfpu") ||
Value.starts_with("-mhwdiv") || Value.starts_with("-march")) {
// Do nothing, we'll validate it later.
- } else if (Value == "-defsym") {
+ } else if (Value == "-defsym" || Value == "--defsym") {
if (A->getNumValues() != 2) {
D.Diag(diag::err_drv_defsym_invalid_format) << Value;
break;
@@ -2648,7 +2751,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
D.Diag(diag::err_drv_defsym_invalid_symval) << SVal;
break;
}
- CmdArgs.push_back(Value.data());
+ CmdArgs.push_back("--defsym");
TakeNextArg = true;
} else if (Value == "-fdebug-compilation-dir") {
CmdArgs.push_back("-fdebug-compilation-dir");
@@ -2669,6 +2772,16 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
}
if (ImplicitIt.size())
AddARMImplicitITArgs(Args, CmdArgs, ImplicitIt);
+ if (Crel) {
+ if (!ExperimentalCrel)
+ D.Diag(diag::err_drv_experimental_crel);
+ if (Triple.isOSBinFormatELF() && !Triple.isMIPS()) {
+ CmdArgs.push_back("--crel");
+ } else {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << "-Wa,--crel" << D.getTargetTriple();
+ }
+ }
if (!UseRelaxRelocations)
CmdArgs.push_back("-mrelax-relocations=no");
if (UseNoExecStack)
@@ -2689,45 +2802,43 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
}
}
-static StringRef EnumComplexRangeToStr(LangOptions::ComplexRangeKind Range) {
- StringRef RangeStr = "";
+static std::string ComplexRangeKindToStr(LangOptions::ComplexRangeKind Range) {
switch (Range) {
- case LangOptions::ComplexRangeKind::CX_Limited:
- return "-fcx-limited-range";
+ case LangOptions::ComplexRangeKind::CX_Full:
+ return "full";
break;
- case LangOptions::ComplexRangeKind::CX_Fortran:
- return "-fcx-fortran-rules";
+ case LangOptions::ComplexRangeKind::CX_Basic:
+ return "basic";
break;
- default:
- return RangeStr;
+ case LangOptions::ComplexRangeKind::CX_Improved:
+ return "improved";
+ break;
+ case LangOptions::ComplexRangeKind::CX_Promoted:
+ return "promoted";
break;
+ default:
+ return "";
}
}
-static void EmitComplexRangeDiag(const Driver &D,
- LangOptions::ComplexRangeKind Range1,
- LangOptions::ComplexRangeKind Range2) {
- if (Range1 != LangOptions::ComplexRangeKind::CX_Full)
- D.Diag(clang::diag::warn_drv_overriding_option)
- << EnumComplexRangeToStr(Range1) << EnumComplexRangeToStr(Range2);
+static std::string ComplexArithmeticStr(LangOptions::ComplexRangeKind Range) {
+ return (Range == LangOptions::ComplexRangeKind::CX_None)
+ ? ""
+ : "-fcomplex-arithmetic=" + ComplexRangeKindToStr(Range);
+}
+
+static void EmitComplexRangeDiag(const Driver &D, std::string str1,
+ std::string str2) {
+ if ((str1.compare(str2) != 0) && !str2.empty() && !str1.empty()) {
+ D.Diag(clang::diag::warn_drv_overriding_option) << str1 << str2;
+ }
}
static std::string
RenderComplexRangeOption(LangOptions::ComplexRangeKind Range) {
- std::string ComplexRangeStr = "-complex-range=";
- switch (Range) {
- case LangOptions::ComplexRangeKind::CX_Full:
- ComplexRangeStr += "full";
- break;
- case LangOptions::ComplexRangeKind::CX_Limited:
- ComplexRangeStr += "limited";
- break;
- case LangOptions::ComplexRangeKind::CX_Fortran:
- ComplexRangeStr += "fortran";
- break;
- default:
- assert(0 && "Unexpected range option");
- }
+ std::string ComplexRangeStr = ComplexRangeKindToStr(Range);
+ if (!ComplexRangeStr.empty())
+ return "-complex-range=" + ComplexRangeStr;
return ComplexRangeStr;
}
@@ -2752,20 +2863,17 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
bool TrappingMathPresent = false; // Is trapping-math in args, and not
// overriden by ffp-exception-behavior?
bool RoundingFPMath = false;
- bool RoundingMathPresent = false; // Is rounding-math in args?
// -ffp-model values: strict, fast, precise
StringRef FPModel = "";
// -ffp-exception-behavior options: strict, maytrap, ignore
StringRef FPExceptionBehavior = "";
// -ffp-eval-method options: double, extended, source
StringRef FPEvalMethod = "";
- const llvm::DenormalMode DefaultDenormalFPMath =
+ llvm::DenormalMode DenormalFPMath =
TC.getDefaultDenormalModeForType(Args, JA);
- const llvm::DenormalMode DefaultDenormalFP32Math =
+ llvm::DenormalMode DenormalFP32Math =
TC.getDefaultDenormalModeForType(Args, JA, &llvm::APFloat::IEEEsingle());
- llvm::DenormalMode DenormalFPMath = DefaultDenormalFPMath;
- llvm::DenormalMode DenormalFP32Math = DefaultDenormalFP32Math;
// CUDA and HIP don't rely on the frontend to pass an ffp-contract option.
// If one wasn't given by the user, don't pass it here.
StringRef FPContract;
@@ -2779,6 +2887,37 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
StringRef BFloat16ExcessPrecision = "";
LangOptions::ComplexRangeKind Range = LangOptions::ComplexRangeKind::CX_None;
std::string ComplexRangeStr = "";
+ std::string GccRangeComplexOption = "";
+
+ // Lambda to set fast-math options. This is also used by -ffp-model=fast
+ auto applyFastMath = [&]() {
+ HonorINFs = false;
+ HonorNaNs = false;
+ MathErrno = false;
+ AssociativeMath = true;
+ ReciprocalMath = true;
+ ApproxFunc = true;
+ SignedZeros = false;
+ TrappingMath = false;
+ RoundingFPMath = false;
+ FPExceptionBehavior = "";
+ // If fast-math is set then set the fp-contract mode to fast.
+ FPContract = "fast";
+ // ffast-math enables basic range rules for complex multiplication and
+ // division.
+ // Warn if user expects to perform full implementation of complex
+ // multiplication or division in the presence of nan or ninf flags.
+ if (Range == LangOptions::ComplexRangeKind::CX_Full ||
+ Range == LangOptions::ComplexRangeKind::CX_Improved ||
+ Range == LangOptions::ComplexRangeKind::CX_Promoted)
+ EmitComplexRangeDiag(
+ D, ComplexArithmeticStr(Range),
+ !GccRangeComplexOption.empty()
+ ? GccRangeComplexOption
+ : ComplexArithmeticStr(LangOptions::ComplexRangeKind::CX_Basic));
+ Range = LangOptions::ComplexRangeKind::CX_Basic;
+ SeenUnsafeMathModeOption = true;
+ };
if (const Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
CmdArgs.push_back("-mlimit-float-precision");
@@ -2786,29 +2925,91 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
}
for (const Arg *A : Args) {
- auto optID = A->getOption().getID();
- bool PreciseFPModel = false;
- switch (optID) {
- default:
- break;
- case options::OPT_fcx_limited_range: {
- EmitComplexRangeDiag(D, Range, LangOptions::ComplexRangeKind::CX_Limited);
- Range = LangOptions::ComplexRangeKind::CX_Limited;
+ switch (A->getOption().getID()) {
+ // If this isn't an FP option skip the claim below
+ default: continue;
+
+ case options::OPT_fcx_limited_range:
+ if (GccRangeComplexOption.empty()) {
+ if (Range != LangOptions::ComplexRangeKind::CX_Basic)
+ EmitComplexRangeDiag(D, RenderComplexRangeOption(Range),
+ "-fcx-limited-range");
+ } else {
+ if (GccRangeComplexOption != "-fno-cx-limited-range")
+ EmitComplexRangeDiag(D, GccRangeComplexOption, "-fcx-limited-range");
+ }
+ GccRangeComplexOption = "-fcx-limited-range";
+ Range = LangOptions::ComplexRangeKind::CX_Basic;
break;
- }
case options::OPT_fno_cx_limited_range:
- EmitComplexRangeDiag(D, Range, LangOptions::ComplexRangeKind::CX_Full);
+ if (GccRangeComplexOption.empty()) {
+ EmitComplexRangeDiag(D, RenderComplexRangeOption(Range),
+ "-fno-cx-limited-range");
+ } else {
+ if (GccRangeComplexOption.compare("-fcx-limited-range") != 0 &&
+ GccRangeComplexOption.compare("-fno-cx-fortran-rules") != 0)
+ EmitComplexRangeDiag(D, GccRangeComplexOption,
+ "-fno-cx-limited-range");
+ }
+ GccRangeComplexOption = "-fno-cx-limited-range";
Range = LangOptions::ComplexRangeKind::CX_Full;
break;
- case options::OPT_fcx_fortran_rules: {
- EmitComplexRangeDiag(D, Range, LangOptions::ComplexRangeKind::CX_Fortran);
- Range = LangOptions::ComplexRangeKind::CX_Fortran;
+ case options::OPT_fcx_fortran_rules:
+ if (GccRangeComplexOption.empty())
+ EmitComplexRangeDiag(D, RenderComplexRangeOption(Range),
+ "-fcx-fortran-rules");
+ else
+ EmitComplexRangeDiag(D, GccRangeComplexOption, "-fcx-fortran-rules");
+ GccRangeComplexOption = "-fcx-fortran-rules";
+ Range = LangOptions::ComplexRangeKind::CX_Improved;
break;
- }
case options::OPT_fno_cx_fortran_rules:
- EmitComplexRangeDiag(D, Range, LangOptions::ComplexRangeKind::CX_Full);
+ if (GccRangeComplexOption.empty()) {
+ EmitComplexRangeDiag(D, RenderComplexRangeOption(Range),
+ "-fno-cx-fortran-rules");
+ } else {
+ if (GccRangeComplexOption != "-fno-cx-limited-range")
+ EmitComplexRangeDiag(D, GccRangeComplexOption,
+ "-fno-cx-fortran-rules");
+ }
+ GccRangeComplexOption = "-fno-cx-fortran-rules";
Range = LangOptions::ComplexRangeKind::CX_Full;
break;
+ case options::OPT_fcomplex_arithmetic_EQ: {
+ LangOptions::ComplexRangeKind RangeVal;
+ StringRef Val = A->getValue();
+ if (Val == "full")
+ RangeVal = LangOptions::ComplexRangeKind::CX_Full;
+ else if (Val == "improved")
+ RangeVal = LangOptions::ComplexRangeKind::CX_Improved;
+ else if (Val == "promoted")
+ RangeVal = LangOptions::ComplexRangeKind::CX_Promoted;
+ else if (Val == "basic")
+ RangeVal = LangOptions::ComplexRangeKind::CX_Basic;
+ else {
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Val;
+ break;
+ }
+ if (!GccRangeComplexOption.empty()) {
+ if (GccRangeComplexOption.compare("-fcx-limited-range") != 0) {
+ if (GccRangeComplexOption.compare("-fcx-fortran-rules") != 0) {
+ if (RangeVal != LangOptions::ComplexRangeKind::CX_Improved)
+ EmitComplexRangeDiag(D, GccRangeComplexOption,
+ ComplexArithmeticStr(RangeVal));
+ } else {
+ EmitComplexRangeDiag(D, GccRangeComplexOption,
+ ComplexArithmeticStr(RangeVal));
+ }
+ } else {
+ if (RangeVal != LangOptions::ComplexRangeKind::CX_Basic)
+ EmitComplexRangeDiag(D, GccRangeComplexOption,
+ ComplexArithmeticStr(RangeVal));
+ }
+ }
+ Range = RangeVal;
+ break;
+ }
case options::OPT_ffp_model_EQ: {
// If -ffp-model= is seen, reset to fno-fast-math
HonorINFs = true;
@@ -2819,56 +3020,38 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
AssociativeMath = false;
ReciprocalMath = false;
SignedZeros = true;
- // -fno_fast_math restores default denormal and fpcontract handling
FPContract = "on";
- DenormalFPMath = llvm::DenormalMode::getIEEE();
-
- // FIXME: The target may have picked a non-IEEE default mode here based on
- // -cl-denorms-are-zero. Should the target consider -fp-model interaction?
- DenormalFP32Math = llvm::DenormalMode::getIEEE();
StringRef Val = A->getValue();
- if (OFastEnabled && !Val.equals("fast")) {
- // Only -ffp-model=fast is compatible with OFast, ignore.
+ if (OFastEnabled && Val != "fast") {
+ // Only -ffp-model=fast is compatible with OFast, ignore.
D.Diag(clang::diag::warn_drv_overriding_option)
<< Args.MakeArgString("-ffp-model=" + Val) << "-Ofast";
break;
}
StrictFPModel = false;
- PreciseFPModel = true;
- // ffp-model= is a Driver option, it is entirely rewritten into more
- // granular options before being passed into cc1.
- // Use the gcc option in the switch below.
- if (!FPModel.empty() && !FPModel.equals(Val))
+ if (!FPModel.empty() && FPModel != Val)
D.Diag(clang::diag::warn_drv_overriding_option)
<< Args.MakeArgString("-ffp-model=" + FPModel)
<< Args.MakeArgString("-ffp-model=" + Val);
- if (Val.equals("fast")) {
- optID = options::OPT_ffast_math;
+ if (Val == "fast") {
FPModel = Val;
- FPContract = "fast";
- } else if (Val.equals("precise")) {
- optID = options::OPT_ffp_contract;
+ applyFastMath();
+ } else if (Val == "precise") {
FPModel = Val;
FPContract = "on";
- PreciseFPModel = true;
- } else if (Val.equals("strict")) {
+ } else if (Val == "strict") {
StrictFPModel = true;
- optID = options::OPT_frounding_math;
FPExceptionBehavior = "strict";
FPModel = Val;
FPContract = "off";
TrappingMath = true;
+ RoundingFPMath = true;
} else
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Val;
break;
}
- }
-
- switch (optID) {
- // If this isn't an FP option skip the claim below
- default: continue;
// Options controlling individual features
case options::OPT_fhonor_infinities: HonorINFs = true; break;
@@ -2887,7 +3070,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
case options::OPT_fno_signed_zeros: SignedZeros = false; break;
case options::OPT_ftrapping_math:
if (!TrappingMathPresent && !FPExceptionBehavior.empty() &&
- !FPExceptionBehavior.equals("strict"))
+ FPExceptionBehavior != "strict")
// Warn that previous value of option is overridden.
D.Diag(clang::diag::warn_drv_overriding_option)
<< Args.MakeArgString("-ffp-exception-behavior=" +
@@ -2899,7 +3082,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
break;
case options::OPT_fno_trapping_math:
if (!TrappingMathPresent && !FPExceptionBehavior.empty() &&
- !FPExceptionBehavior.equals("ignore"))
+ FPExceptionBehavior != "ignore")
// Warn that previous value of option is overridden.
D.Diag(clang::diag::warn_drv_overriding_option)
<< Args.MakeArgString("-ffp-exception-behavior=" +
@@ -2912,12 +3095,10 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
case options::OPT_frounding_math:
RoundingFPMath = true;
- RoundingMathPresent = true;
break;
case options::OPT_fno_rounding_math:
RoundingFPMath = false;
- RoundingMathPresent = false;
break;
case options::OPT_fdenormal_fp_math_EQ:
@@ -2940,13 +3121,8 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
// Validate and pass through -ffp-contract option.
case options::OPT_ffp_contract: {
StringRef Val = A->getValue();
- if (PreciseFPModel) {
- // -ffp-model=precise enables ffp-contract=on.
- // -ffp-model=precise sets PreciseFPModel to on and Val to
- // "precise". FPContract is set.
- ;
- } else if (Val.equals("fast") || Val.equals("on") || Val.equals("off") ||
- Val.equals("fast-honor-pragmas")) {
+ if (Val == "fast" || Val == "on" || Val == "off" ||
+ Val == "fast-honor-pragmas") {
FPContract = Val;
LastSeenFfpContractOption = Val;
} else
@@ -2955,27 +3131,20 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
break;
}
- // Validate and pass through -ffp-model option.
- case options::OPT_ffp_model_EQ:
- // This should only occur in the error case
- // since the optID has been replaced by a more granular
- // floating point option.
- break;
-
// Validate and pass through -ffp-exception-behavior option.
case options::OPT_ffp_exception_behavior_EQ: {
StringRef Val = A->getValue();
if (!TrappingMathPresent && !FPExceptionBehavior.empty() &&
- !FPExceptionBehavior.equals(Val))
+ FPExceptionBehavior != Val)
// Warn that previous value of option is overridden.
D.Diag(clang::diag::warn_drv_overriding_option)
<< Args.MakeArgString("-ffp-exception-behavior=" +
FPExceptionBehavior)
<< Args.MakeArgString("-ffp-exception-behavior=" + Val);
TrappingMath = TrappingMathPresent = false;
- if (Val.equals("ignore") || Val.equals("maytrap"))
+ if (Val == "ignore" || Val == "maytrap")
FPExceptionBehavior = Val;
- else if (Val.equals("strict")) {
+ else if (Val == "strict") {
FPExceptionBehavior = Val;
TrappingMath = TrappingMathPresent = true;
} else
@@ -2987,8 +3156,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
// Validate and pass through -ffp-eval-method option.
case options::OPT_ffp_eval_method_EQ: {
StringRef Val = A->getValue();
- if (Val.equals("double") || Val.equals("extended") ||
- Val.equals("source"))
+ if (Val == "double" || Val == "extended" || Val == "source")
FPEvalMethod = Val;
else
D.Diag(diag::err_drv_unsupported_option_argument)
@@ -3000,18 +3168,18 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
StringRef Val = A->getValue();
const llvm::Triple::ArchType Arch = TC.getArch();
if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) {
- if (Val.equals("standard") || Val.equals("fast"))
+ if (Val == "standard" || Val == "fast")
Float16ExcessPrecision = Val;
// To make it GCC compatible, allow the value of "16" which
// means disable excess precision, the same meaning than clang's
// equivalent value "none".
- else if (Val.equals("16"))
+ else if (Val == "16")
Float16ExcessPrecision = "none";
else
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Val;
} else {
- if (!(Val.equals("standard") || Val.equals("fast")))
+ if (!(Val == "standard" || Val == "fast"))
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Val;
}
@@ -3042,12 +3210,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
ReciprocalMath = false;
SignedZeros = true;
ApproxFunc = false;
- TrappingMath = true;
- FPExceptionBehavior = "strict";
- // The target may have opted to flush by default, so force IEEE.
- DenormalFPMath = llvm::DenormalMode::getIEEE();
- DenormalFP32Math = llvm::DenormalMode::getIEEE();
if (!JA.isDeviceOffloading(Action::OFK_Cuda) &&
!JA.isOffloading(Action::OFK_HIP)) {
if (LastSeenFfpContractOption != "") {
@@ -3063,22 +3226,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
continue;
[[fallthrough]];
case options::OPT_ffast_math: {
- HonorINFs = false;
- HonorNaNs = false;
- MathErrno = false;
- AssociativeMath = true;
- ReciprocalMath = true;
- ApproxFunc = true;
- SignedZeros = false;
- TrappingMath = false;
- RoundingFPMath = false;
- FPExceptionBehavior = "";
- // If fast-math is set then set the fp-contract mode to fast.
- FPContract = "fast";
- SeenUnsafeMathModeOption = true;
- // ffast-math enables fortran rules for complex multiplication and
- // division.
- Range = LangOptions::ComplexRangeKind::CX_Limited;
+ applyFastMath();
break;
}
case options::OPT_fno_fast_math:
@@ -3092,9 +3240,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
ReciprocalMath = false;
ApproxFunc = false;
SignedZeros = true;
- // -fno_fast_math restores default denormal and fpcontract handling
- DenormalFPMath = DefaultDenormalFPMath;
- DenormalFP32Math = llvm::DenormalMode::getIEEE();
+ // -fno_fast_math restores default fpcontract handling
if (!JA.isDeviceOffloading(Action::OFK_Cuda) &&
!JA.isOffloading(Action::OFK_HIP)) {
if (LastSeenFfpContractOption != "") {
@@ -3104,14 +3250,18 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
}
break;
}
+ // The StrictFPModel local variable is needed to report warnings
+ // in the way we intend. If -ffp-model=strict has been used, we
+ // want to report a warning for the next option encountered that
+ // takes us out of the settings described by fp-model=strict, but
+ // we don't want to continue issuing warnings for other conflicting
+ // options after that.
if (StrictFPModel) {
// If -ffp-model=strict has been specified on command line but
// subsequent options conflict then emit warning diagnostic.
if (HonorINFs && HonorNaNs && !AssociativeMath && !ReciprocalMath &&
SignedZeros && TrappingMath && RoundingFPMath && !ApproxFunc &&
- DenormalFPMath == llvm::DenormalMode::getIEEE() &&
- DenormalFP32Math == llvm::DenormalMode::getIEEE() &&
- FPContract.equals("off"))
+ FPContract == "off")
// OK: Current Arg doesn't conflict with -ffp-model=strict
;
else {
@@ -3157,7 +3307,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (TrappingMath) {
// FP Exception Behavior is also set to strict
- assert(FPExceptionBehavior.equals("strict"));
+ assert(FPExceptionBehavior == "strict");
}
// The default is IEEE.
@@ -3179,11 +3329,10 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (!FPContract.empty())
CmdArgs.push_back(Args.MakeArgString("-ffp-contract=" + FPContract));
- if (!RoundingFPMath)
- CmdArgs.push_back(Args.MakeArgString("-fno-rounding-math"));
-
- if (RoundingFPMath && RoundingMathPresent)
+ if (RoundingFPMath)
CmdArgs.push_back(Args.MakeArgString("-frounding-math"));
+ else
+ CmdArgs.push_back(Args.MakeArgString("-fno-rounding-math"));
if (!FPExceptionBehavior.empty())
CmdArgs.push_back(Args.MakeArgString("-ffp-exception-behavior=" +
@@ -3207,8 +3356,8 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (!HonorINFs && !HonorNaNs && !MathErrno && AssociativeMath && ApproxFunc &&
ReciprocalMath && !SignedZeros && !TrappingMath && !RoundingFPMath) {
CmdArgs.push_back("-ffast-math");
- if (FPModel.equals("fast")) {
- if (FPContract.equals("fast"))
+ if (FPModel == "fast") {
+ if (FPContract == "fast")
// All set, do nothing.
;
else if (FPContract.empty())
@@ -3237,8 +3386,12 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (Range != LangOptions::ComplexRangeKind::CX_None)
ComplexRangeStr = RenderComplexRangeOption(Range);
- if (!ComplexRangeStr.empty())
+ if (!ComplexRangeStr.empty()) {
CmdArgs.push_back(Args.MakeArgString(ComplexRangeStr));
+ if (Args.hasArg(options::OPT_fcomplex_arithmetic_EQ))
+ CmdArgs.push_back(Args.MakeArgString("-fcomplex-arithmetic=" +
+ ComplexRangeKindToStr(Range)));
+ }
if (Args.hasArg(options::OPT_fcx_limited_range))
CmdArgs.push_back("-fcx-limited-range");
if (Args.hasArg(options::OPT_fcx_fortran_rules))
@@ -3607,7 +3760,6 @@ static void RenderHLSLOptions(const ArgList &Args, ArgStringList &CmdArgs,
const unsigned ForwardedArguments[] = {options::OPT_dxil_validator_version,
options::OPT_D,
options::OPT_I,
- options::OPT_S,
options::OPT_O,
options::OPT_emit_llvm,
options::OPT_emit_obj,
@@ -3758,6 +3910,24 @@ bool Driver::getDefaultModuleCachePath(SmallVectorImpl<char> &Result) {
return false;
}
+llvm::SmallString<256>
+clang::driver::tools::getCXX20NamedModuleOutputPath(const ArgList &Args,
+ const char *BaseInput) {
+ if (Arg *ModuleOutputEQ = Args.getLastArg(options::OPT_fmodule_output_EQ))
+ return StringRef(ModuleOutputEQ->getValue());
+
+ SmallString<256> OutputPath;
+ if (Arg *FinalOutput = Args.getLastArg(options::OPT_o);
+ FinalOutput && Args.hasArg(options::OPT_c))
+ OutputPath = FinalOutput->getValue();
+ else
+ OutputPath = BaseInput;
+
+ const char *Extension = types::getTypeTempSuffix(types::TY_ModuleFile);
+ llvm::sys::path::replace_extension(OutputPath, Extension);
+ return OutputPath;
+}
+
static bool RenderModulesOptions(Compilation &C, const Driver &D,
const ArgList &Args, const InputInfo &Input,
const InputInfo &Output, bool HaveStd20,
@@ -3946,9 +4116,36 @@ static bool RenderModulesOptions(Compilation &C, const Driver &D,
// module fragment.
CmdArgs.push_back("-fskip-odr-check-in-gmf");
- // Claim `-fmodule-output` and `-fmodule-output=` to avoid unused warnings.
- Args.ClaimAllArgs(options::OPT_fmodule_output);
- Args.ClaimAllArgs(options::OPT_fmodule_output_EQ);
+ if (Args.hasArg(options::OPT_modules_reduced_bmi) &&
+ (Input.getType() == driver::types::TY_CXXModule ||
+ Input.getType() == driver::types::TY_PP_CXXModule)) {
+ CmdArgs.push_back("-fexperimental-modules-reduced-bmi");
+
+ if (Args.hasArg(options::OPT_fmodule_output_EQ))
+ Args.AddLastArg(CmdArgs, options::OPT_fmodule_output_EQ);
+ else
+ CmdArgs.push_back(Args.MakeArgString(
+ "-fmodule-output=" +
+ getCXX20NamedModuleOutputPath(Args, Input.getBaseInput())));
+ }
+
+ // Noop if we see '-fexperimental-modules-reduced-bmi' with other translation
+ // units than module units. This is more user friendly to allow end uers to
+ // enable this feature without asking for help from build systems.
+ Args.ClaimAllArgs(options::OPT_modules_reduced_bmi);
+
+ // We need to include the case the input file is a module file here.
+ // Since the default compilation model for C++ module interface unit will
+ // create temporary module file and compile the temporary module file
+ // to get the object file. Then the `-fmodule-output` flag will be
+ // brought to the second compilation process. So we have to claim it for
+ // the case too.
+ if (Input.getType() == driver::types::TY_CXXModule ||
+ Input.getType() == driver::types::TY_PP_CXXModule ||
+ Input.getType() == driver::types::TY_ModuleFile) {
+ Args.ClaimAllArgs(options::OPT_fmodule_output);
+ Args.ClaimAllArgs(options::OPT_fmodule_output_EQ);
+ }
return HaveModules;
}
@@ -4476,14 +4673,20 @@ renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T,
Args.getLastArg(options::OPT_ggnu_pubnames, options::OPT_gno_gnu_pubnames,
options::OPT_gpubnames, options::OPT_gno_pubnames);
if (DwarfFission != DwarfFissionKind::None ||
- (PubnamesArg && checkDebugInfoOption(PubnamesArg, Args, D, TC)))
- if (!PubnamesArg ||
- (!PubnamesArg->getOption().matches(options::OPT_gno_gnu_pubnames) &&
- !PubnamesArg->getOption().matches(options::OPT_gno_pubnames)))
+ (PubnamesArg && checkDebugInfoOption(PubnamesArg, Args, D, TC))) {
+ const bool OptionSet =
+ (PubnamesArg &&
+ (PubnamesArg->getOption().matches(options::OPT_gpubnames) ||
+ PubnamesArg->getOption().matches(options::OPT_ggnu_pubnames)));
+ if ((DebuggerTuning != llvm::DebuggerKind::LLDB || OptionSet) &&
+ (!PubnamesArg ||
+ (!PubnamesArg->getOption().matches(options::OPT_gno_gnu_pubnames) &&
+ !PubnamesArg->getOption().matches(options::OPT_gno_pubnames))))
CmdArgs.push_back(PubnamesArg && PubnamesArg->getOption().matches(
options::OPT_gpubnames)
? "-gpubnames"
: "-ggnu-pubnames");
+ }
const auto *SimpleTemplateNamesArg =
Args.getLastArg(options::OPT_gsimple_template_names,
options::OPT_gno_simple_template_names);
@@ -4497,6 +4700,21 @@ renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T,
}
}
+ // Emit DW_TAG_template_alias for template aliases? True by default for SCE.
+ bool UseDebugTemplateAlias =
+ DebuggerTuning == llvm::DebuggerKind::SCE && RequestedDWARFVersion >= 4;
+ if (const auto *DebugTemplateAlias = Args.getLastArg(
+ options::OPT_gtemplate_alias, options::OPT_gno_template_alias)) {
+ // DW_TAG_template_alias is only supported from DWARFv5 but if a user
+ // asks for it we should let them have it (if the target supports it).
+ if (checkDebugInfoOption(DebugTemplateAlias, Args, D, TC)) {
+ const auto &Opt = DebugTemplateAlias->getOption();
+ UseDebugTemplateAlias = Opt.matches(options::OPT_gtemplate_alias);
+ }
+ }
+ if (UseDebugTemplateAlias)
+ CmdArgs.push_back("-gtemplate-alias");
+
if (const Arg *A = Args.getLastArg(options::OPT_gsrc_hash_EQ)) {
StringRef v = A->getValue();
CmdArgs.push_back(Args.MakeArgString("-gsrc-hash=" + v));
@@ -4519,6 +4737,7 @@ renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T,
Args.addOptInFlag(CmdArgs, options::OPT_fforce_dwarf_frame,
options::OPT_fno_force_dwarf_frame);
+ bool EnableTypeUnits = false;
if (Args.hasFlag(options::OPT_fdebug_types_section,
options::OPT_fno_debug_types_section, false)) {
if (!(T.isOSBinFormatELF() || T.isOSBinFormatWasm())) {
@@ -4529,11 +4748,24 @@ renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T,
} else if (checkDebugInfoOption(
Args.getLastArg(options::OPT_fdebug_types_section), Args, D,
TC)) {
+ EnableTypeUnits = true;
CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-generate-type-units");
}
}
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_gomit_unreferenced_methods,
+ options::OPT_gno_omit_unreferenced_methods))
+ (void)checkDebugInfoOption(A, Args, D, TC);
+ if (Args.hasFlag(options::OPT_gomit_unreferenced_methods,
+ options::OPT_gno_omit_unreferenced_methods, false) &&
+ (DebugInfoKind == llvm::codegenoptions::DebugInfoConstructor ||
+ DebugInfoKind == llvm::codegenoptions::LimitedDebugInfo) &&
+ !EnableTypeUnits) {
+ CmdArgs.push_back("-gomit-unreferenced-methods");
+ }
+
// To avoid join/split of directory+filename, the integrated assembler prefers
// the directory form of .file on all DWARF versions. GNU as doesn't allow the
// form before DWARF v5.
@@ -4583,7 +4815,7 @@ renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T,
Output.getFilename());
}
-static void ProcessVSRuntimeLibrary(const ArgList &Args,
+static void ProcessVSRuntimeLibrary(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
unsigned RTOptionID = options::OPT__SLASH_MT;
@@ -4646,6 +4878,12 @@ static void ProcessVSRuntimeLibrary(const ArgList &Args,
// implemented in clang.
CmdArgs.push_back("--dependent-lib=oldnames");
}
+
+ // All Arm64EC object files implicitly add softintrin.lib. This is necessary
+ // even if the file doesn't actually refer to any of the routines because
+ // the CRT itself has incomplete dependency markings.
+ if (TC.getTriple().isWindowsArm64EC())
+ CmdArgs.push_back("--dependent-lib=softintrin");
}
void Clang::ConstructJob(Compilation &C, const JobAction &JA,
@@ -4794,7 +5032,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(NormalizedTriple));
if (JA.isDeviceOffloading(Action::OFK_HIP) &&
- getToolChain().getTriple().isAMDGPU()) {
+ (getToolChain().getTriple().isAMDGPU() ||
+ (getToolChain().getTriple().isSPIRV() &&
+ getToolChain().getTriple().getVendor() == llvm::Triple::AMD))) {
// Device side compilation printf
if (Args.getLastArg(options::OPT_mprintf_kind_EQ)) {
CmdArgs.push_back(Args.MakeArgString(
@@ -4932,11 +5172,26 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
assert(JA.getType() == types::TY_API_INFO &&
"Extract API actions must generate a API information.");
CmdArgs.push_back("-extract-api");
+
+ if (Arg *PrettySGFArg = Args.getLastArg(options::OPT_emit_pretty_sgf))
+ PrettySGFArg->render(Args, CmdArgs);
+
+ Arg *SymbolGraphDirArg = Args.getLastArg(options::OPT_symbol_graph_dir_EQ);
+
if (Arg *ProductNameArg = Args.getLastArg(options::OPT_product_name_EQ))
ProductNameArg->render(Args, CmdArgs);
if (Arg *ExtractAPIIgnoresFileArg =
Args.getLastArg(options::OPT_extract_api_ignores_EQ))
ExtractAPIIgnoresFileArg->render(Args, CmdArgs);
+ if (Arg *EmitExtensionSymbolGraphs =
+ Args.getLastArg(options::OPT_emit_extension_symbol_graphs)) {
+ if (!SymbolGraphDirArg)
+ D.Diag(diag::err_drv_missing_symbol_graph_dir);
+
+ EmitExtensionSymbolGraphs->render(Args, CmdArgs);
+ }
+ if (SymbolGraphDirArg)
+ SymbolGraphDirArg->render(Args, CmdArgs);
} else {
assert((isa<CompileJobAction>(JA) || isa<BackendJobAction>(JA)) &&
"Invalid action for clang tool.");
@@ -5519,6 +5774,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
case CodeGenOptions::FramePointerKind::None:
FPKeepKindStr = "-mframe-pointer=none";
break;
+ case CodeGenOptions::FramePointerKind::Reserved:
+ FPKeepKindStr = "-mframe-pointer=reserved";
+ break;
case CodeGenOptions::FramePointerKind::NonLeaf:
FPKeepKindStr = "-mframe-pointer=non-leaf";
break;
@@ -5533,16 +5791,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_zero_initialized_in_bss);
bool OFastEnabled = isOptimizationLevelFast(Args);
+ if (OFastEnabled)
+ D.Diag(diag::warn_drv_deprecated_arg_ofast);
// If -Ofast is the optimization level, then -fstrict-aliasing should be
// enabled. This alias option is being used to simplify the hasFlag logic.
OptSpecifier StrictAliasingAliasOption =
OFastEnabled ? options::OPT_Ofast : options::OPT_fstrict_aliasing;
- // We turn strict aliasing off by default if we're in CL mode, since MSVC
+ // We turn strict aliasing off by default if we're Windows MSVC since MSVC
// doesn't do any TBAA.
- bool TBAAOnByDefault = !D.IsCLMode();
if (!Args.hasFlag(options::OPT_fstrict_aliasing, StrictAliasingAliasOption,
- options::OPT_fno_strict_aliasing, TBAAOnByDefault))
+ options::OPT_fno_strict_aliasing, !IsWindowsMSVC))
CmdArgs.push_back("-relaxed-aliasing");
+ if (Args.hasFlag(options::OPT_fpointer_tbaa, options::OPT_fno_pointer_tbaa,
+ false))
+ CmdArgs.push_back("-pointer-tbaa");
if (!Args.hasFlag(options::OPT_fstruct_path_tbaa,
options::OPT_fno_struct_path_tbaa, true))
CmdArgs.push_back("-no-struct-path-tbaa");
@@ -5570,6 +5832,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fexperimental_omit_vtable_rtti,
options::OPT_fno_experimental_omit_vtable_rtti);
+ Args.AddLastArg(CmdArgs, options::OPT_fdisable_block_signature_string,
+ options::OPT_fno_disable_block_signature_string);
+
// Handle segmented stacks.
Args.addOptInFlag(CmdArgs, options::OPT_fsplit_stack,
options::OPT_fno_split_stack);
@@ -5638,7 +5903,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// If toolchain choose to use MCAsmParser for inline asm don't pass the
- // option to disable integrated-as explictly.
+ // option to disable integrated-as explicitly.
if (!TC.useIntegratedAs() && !TC.parseInlineAsmUsingAsmParser())
CmdArgs.push_back("-no-integrated-as");
@@ -5746,80 +6011,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
TC.addClangTargetOptions(Args, CmdArgs, JA.getOffloadingDeviceKind());
- if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
- StringRef CM = A->getValue();
- bool Ok = false;
- if (Triple.isOSAIX() && CM == "medium")
- CM = "large";
- if (Triple.isAArch64(64)) {
- Ok = CM == "tiny" || CM == "small" || CM == "large";
- if (CM == "large" && RelocationModel != llvm::Reloc::Static)
- D.Diag(diag::err_drv_argument_only_allowed_with)
- << A->getAsString(Args) << "-fno-pic";
- } else if (Triple.isLoongArch()) {
- if (CM == "extreme" &&
- Args.hasFlagNoClaim(options::OPT_fplt, options::OPT_fno_plt, false))
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << A->getAsString(Args) << "-fplt";
- Ok = CM == "normal" || CM == "medium" || CM == "extreme";
- // Convert to LLVM recognizable names.
- if (Ok)
- CM = llvm::StringSwitch<StringRef>(CM)
- .Case("normal", "small")
- .Case("extreme", "large")
- .Default(CM);
- } else if (Triple.isPPC64() || Triple.isOSAIX()) {
- Ok = CM == "small" || CM == "medium" || CM == "large";
- } else if (Triple.isRISCV()) {
- if (CM == "medlow")
- CM = "small";
- else if (CM == "medany")
- CM = "medium";
- Ok = CM == "small" || CM == "medium";
- } else if (Triple.getArch() == llvm::Triple::x86_64) {
- Ok = llvm::is_contained({"small", "kernel", "medium", "large", "tiny"},
- CM);
- } else if (Triple.isNVPTX() || Triple.isAMDGPU()) {
- // NVPTX/AMDGPU does not care about the code model and will accept
- // whatever works for the host.
- Ok = true;
- } else if (Triple.isSPARC64()) {
- if (CM == "medlow")
- CM = "small";
- else if (CM == "medmid")
- CM = "medium";
- else if (CM == "medany")
- CM = "large";
- Ok = CM == "small" || CM == "medium" || CM == "large";
- }
- if (Ok) {
- CmdArgs.push_back(Args.MakeArgString("-mcmodel=" + CM));
- } else {
- D.Diag(diag::err_drv_unsupported_option_argument_for_target)
- << A->getSpelling() << CM << TripleStr;
- }
- }
-
- if (Triple.getArch() == llvm::Triple::x86_64) {
- bool IsMediumCM = false;
- bool IsLargeCM = false;
- if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
- IsMediumCM = StringRef(A->getValue()) == "medium";
- IsLargeCM = StringRef(A->getValue()) == "large";
- }
- if (Arg *A = Args.getLastArg(options::OPT_mlarge_data_threshold_EQ)) {
- if (!IsMediumCM && !IsLargeCM) {
- D.Diag(diag::warn_drv_large_data_threshold_invalid_code_model)
- << A->getOption().getRenderName();
- } else {
- A->render(Args, CmdArgs);
- }
- } else if (IsMediumCM) {
- CmdArgs.push_back("-mlarge-data-threshold=65536");
- } else if (IsLargeCM) {
- CmdArgs.push_back("-mlarge-data-threshold=0");
- }
- }
+ addMCModel(D, Args, Triple, RelocationModel, CmdArgs);
if (Arg *A = Args.getLastArg(options::OPT_mtls_size_EQ)) {
StringRef Value = A->getValue();
@@ -5954,6 +6146,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-ffunction-sections");
}
+ if (Arg *A = Args.getLastArg(options::OPT_fbasic_block_address_map,
+ options::OPT_fno_basic_block_address_map)) {
+ if ((Triple.isX86() || Triple.isAArch64()) && Triple.isOSBinFormatELF()) {
+ if (A->getOption().matches(options::OPT_fbasic_block_address_map))
+ A->render(Args, CmdArgs);
+ } else {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_fbasic_block_sections_EQ)) {
StringRef Val = A->getValue();
if (Triple.isX86() && Triple.isOSBinFormatELF()) {
@@ -5963,6 +6166,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
<< A->getAsString(Args) << A->getValue();
else
A->render(Args, CmdArgs);
+ } else if (Triple.isAArch64() && Triple.isOSBinFormatELF()) {
+ // "all" is not supported on AArch64 since branch relaxation creates new
+ // basic blocks for some cross-section branches.
+ if (Val != "labels" && Val != "none" && !Val.starts_with("list="))
+ D.Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ else
+ A->render(Args, CmdArgs);
} else if (Triple.isNVPTX()) {
// Do not pass the option to the GPU compilation. We still want it enabled
// for the host-side compilation, so seeing it here is not an error.
@@ -5982,6 +6193,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.addOptOutFlag(CmdArgs, options::OPT_funique_section_names,
options::OPT_fno_unique_section_names);
+ Args.addOptInFlag(CmdArgs, options::OPT_fseparate_named_sections,
+ options::OPT_fno_separate_named_sections);
Args.addOptInFlag(CmdArgs, options::OPT_funique_internal_linkage_names,
options::OPT_fno_unique_internal_linkage_names);
Args.addOptInFlag(CmdArgs, options::OPT_funique_basic_block_section_names,
@@ -6356,7 +6569,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (const Arg *A =
Args.getLastArg(options::OPT_fvisibility_global_new_delete_hidden)) {
D.Diag(diag::warn_drv_deprecated_arg)
- << A->getAsString(Args)
+ << A->getAsString(Args) << /*hasReplacement=*/true
<< "-fvisibility-global-new-delete=force-hidden";
}
@@ -6387,6 +6600,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
Args.AddLastArg(CmdArgs, options::OPT_fdigraphs, options::OPT_fno_digraphs);
Args.AddLastArg(CmdArgs, options::OPT_fzero_call_used_regs_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fraw_string_literals,
+ options::OPT_fno_raw_string_literals);
if (Args.hasFlag(options::OPT_femulated_tls, options::OPT_fno_emulated_tls,
Triple.hasDefaultEmulatedTLS()))
@@ -6515,7 +6730,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
StringRef S0 = A->getValue(), S = S0;
unsigned Size, Offset = 0;
if (!Triple.isAArch64() && !Triple.isLoongArch() && !Triple.isRISCV() &&
- !Triple.isX86())
+ !Triple.isX86() &&
+ !(!Triple.isOSAIX() && (Triple.getArch() == llvm::Triple::ppc ||
+ Triple.getArch() == llvm::Triple::ppc64)))
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
else if (S.consumeInteger(10, Size) ||
@@ -6608,6 +6825,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (const char *Name = C.getTimeTraceFile(&JA)) {
CmdArgs.push_back(Args.MakeArgString("-ftime-trace=" + Twine(Name)));
Args.AddLastArg(CmdArgs, options::OPT_ftime_trace_granularity_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_ftime_trace_verbose);
}
if (Arg *A = Args.getLastArg(options::OPT_ftrapv_handler_EQ)) {
@@ -6628,11 +6846,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fwrapv");
}
- if (Arg *A = Args.getLastArg(options::OPT_freroll_loops,
- options::OPT_fno_reroll_loops))
- if (A->getOption().matches(options::OPT_freroll_loops))
- CmdArgs.push_back("-freroll-loops");
-
Args.AddLastArg(CmdArgs, options::OPT_ffinite_loops,
options::OPT_fno_finite_loops);
@@ -6866,12 +7079,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fms_compatibility, options::OPT_fno_ms_compatibility,
(IsWindowsMSVC && Args.hasFlag(options::OPT_fms_extensions,
options::OPT_fno_ms_extensions, true)));
- if (IsMSVCCompat)
+ if (IsMSVCCompat) {
CmdArgs.push_back("-fms-compatibility");
+ if (!types::isCXX(Input.getType()) &&
+ Args.hasArg(options::OPT_fms_define_stdc))
+ CmdArgs.push_back("-fms-define-stdc");
+ }
if (Triple.isWindowsMSVCEnvironment() && !D.IsCLMode() &&
Args.hasArg(options::OPT_fms_runtime_lib_EQ))
- ProcessVSRuntimeLibrary(Args, CmdArgs);
+ ProcessVSRuntimeLibrary(getToolChain(), Args, CmdArgs);
// Handle -fgcc-version, if present.
VersionTuple GNUCVer;
@@ -6960,6 +7177,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
(!IsWindowsMSVC || IsMSVC2015Compatible)))
CmdArgs.push_back("-fno-threadsafe-statics");
+ // Add -fno-assumptions, if it was specified.
+ if (!Args.hasFlag(options::OPT_fassumptions, options::OPT_fno_assumptions,
+ true))
+ CmdArgs.push_back("-fno-assumptions");
+
// -fgnu-keywords default varies depending on language; only pass if
// specified.
Args.AddLastArg(CmdArgs, options::OPT_fgnu_keywords,
@@ -7083,16 +7305,24 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.addOptOutFlag(CmdArgs, options::OPT_fassume_unique_vtables,
options::OPT_fno_assume_unique_vtables);
- // -frelaxed-template-template-args is off by default, as it is a severe
- // breaking change until a corresponding change to template partial ordering
- // is provided.
- Args.addOptInFlag(CmdArgs, options::OPT_frelaxed_template_template_args,
- options::OPT_fno_relaxed_template_template_args);
+ // -frelaxed-template-template-args is deprecated.
+ if (Arg *A =
+ Args.getLastArg(options::OPT_frelaxed_template_template_args,
+ options::OPT_fno_relaxed_template_template_args)) {
+ if (A->getOption().matches(
+ options::OPT_fno_relaxed_template_template_args)) {
+ D.Diag(diag::warn_drv_deprecated_arg_no_relaxed_template_template_args);
+ CmdArgs.push_back("-fno-relaxed-template-template-args");
+ } else {
+ D.Diag(diag::warn_drv_deprecated_arg)
+ << A->getAsString(Args) << /*hasReplacement=*/false;
+ }
+ }
- // -fsized-deallocation is off by default, as it is an ABI-breaking change for
- // most platforms.
- Args.addOptInFlag(CmdArgs, options::OPT_fsized_deallocation,
- options::OPT_fno_sized_deallocation);
+ // -fsized-deallocation is on by default in C++14 onwards and otherwise off
+ // by default.
+ Args.addLastArg(CmdArgs, options::OPT_fsized_deallocation,
+ options::OPT_fno_sized_deallocation);
// -faligned-allocation is on by default in C++17 onwards and otherwise off
// by default.
@@ -7331,6 +7561,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.addOptInFlag(CmdArgs, options::OPT_fsafe_buffer_usage_suggestions,
options::OPT_fno_safe_buffer_usage_suggestions);
+ Args.addOptInFlag(CmdArgs, options::OPT_fexperimental_late_parse_attributes,
+ options::OPT_fno_experimental_late_parse_attributes);
+
// Setup statistics file output.
SmallString<128> StatsFile = getStatsFileName(Args, Output, Input, D);
if (!StatsFile.empty()) {
@@ -7664,26 +7897,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
addMachineOutlinerArgs(D, Args, CmdArgs, Triple, /*IsLTO=*/false);
- if (Arg *A = Args.getLastArg(options::OPT_moutline_atomics,
- options::OPT_mno_outline_atomics)) {
- // Option -moutline-atomics supported for AArch64 target only.
- if (!Triple.isAArch64()) {
- D.Diag(diag::warn_drv_moutline_atomics_unsupported_opt)
- << Triple.getArchName() << A->getOption().getName();
- } else {
- if (A->getOption().matches(options::OPT_moutline_atomics)) {
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("+outline-atomics");
- } else {
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("-outline-atomics");
- }
- }
- } else if (Triple.isAArch64() &&
- getToolChain().IsAArch64OutlineAtomicsDefault(Args)) {
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("+outline-atomics");
- }
+ addOutlineAtomicsArgs(D, getToolChain(), Args, CmdArgs, Triple);
if (Triple.isAArch64() &&
(Args.hasArg(options::OPT_mno_fmv) ||
@@ -7954,18 +8168,26 @@ struct EHFlags {
/// The 'a' modifier is unimplemented and fundamentally hard in LLVM IR.
/// - c: Assume that extern "C" functions are implicitly nounwind.
/// The default is /EHs-c-, meaning cleanups are disabled.
-static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args) {
+static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args,
+ bool isWindowsMSVC) {
EHFlags EH;
std::vector<std::string> EHArgs =
Args.getAllArgValues(options::OPT__SLASH_EH);
- for (auto EHVal : EHArgs) {
+ for (const auto &EHVal : EHArgs) {
for (size_t I = 0, E = EHVal.size(); I != E; ++I) {
switch (EHVal[I]) {
case 'a':
EH.Asynch = maybeConsumeDash(EHVal, I);
- if (EH.Asynch)
+ if (EH.Asynch) {
+ // Async exceptions are Windows MSVC only.
+ if (!isWindowsMSVC) {
+ EH.Asynch = false;
+ D.Diag(clang::diag::warn_drv_unused_argument) << "/EHa" << EHVal;
+ continue;
+ }
EH.Synch = false;
+ }
continue;
case 'c':
EH.NoUnwindC = maybeConsumeDash(EHVal, I);
@@ -8004,7 +8226,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
ArgStringList &CmdArgs) const {
bool isNVPTX = getToolChain().getTriple().isNVPTX();
- ProcessVSRuntimeLibrary(Args, CmdArgs);
+ ProcessVSRuntimeLibrary(getToolChain(), Args, CmdArgs);
if (Arg *ShowIncludes =
Args.getLastArg(options::OPT__SLASH_showIncludes,
@@ -8029,7 +8251,8 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
const Driver &D = getToolChain().getDriver();
- EHFlags EH = parseClangCLEHFlags(D, Args);
+ bool IsWindowsMSVC = getToolChain().getTriple().isWindowsMSVCEnvironment();
+ EHFlags EH = parseClangCLEHFlags(D, Args, IsWindowsMSVC);
if (!isNVPTX && (EH.Synch || EH.Asynch)) {
if (types::isCXX(InputType))
CmdArgs.push_back("-fcxx-exceptions");
@@ -8317,6 +8540,9 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// Pass along any -I options so we get proper .include search paths.
Args.AddAllArgs(CmdArgs, options::OPT_I_Group);
+ // Pass along any --embed-dir or similar options so we get proper embed paths.
+ Args.AddAllArgs(CmdArgs, options::OPT_embed_dir_EQ);
+
// Determine the original source input.
auto FindSource = [](const Action *S) -> const Action * {
while (S->getKind() != Action::InputClass) {
@@ -8335,6 +8561,32 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
WantDebug = !A->getOption().matches(options::OPT_g0) &&
!A->getOption().matches(options::OPT_ggdb0);
+ // If a -gdwarf argument appeared, remember it.
+ bool EmitDwarf = false;
+ if (const Arg *A = getDwarfNArg(Args))
+ EmitDwarf = checkDebugInfoOption(A, Args, D, getToolChain());
+
+ bool EmitCodeView = false;
+ if (const Arg *A = Args.getLastArg(options::OPT_gcodeview))
+ EmitCodeView = checkDebugInfoOption(A, Args, D, getToolChain());
+
+ // If the user asked for debug info but did not explicitly specify -gcodeview
+ // or -gdwarf, ask the toolchain for the default format.
+ if (!EmitCodeView && !EmitDwarf && WantDebug) {
+ switch (getToolChain().getDefaultDebugFormat()) {
+ case llvm::codegenoptions::DIF_CodeView:
+ EmitCodeView = true;
+ break;
+ case llvm::codegenoptions::DIF_DWARF:
+ EmitDwarf = true;
+ break;
+ }
+ }
+
+ // If the arguments don't imply DWARF, don't emit any debug info here.
+ if (!EmitDwarf)
+ WantDebug = false;
+
llvm::codegenoptions::DebugInfoKind DebugInfoKind =
llvm::codegenoptions::NoDebugInfo;
@@ -8452,6 +8704,14 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::riscv64:
AddRISCVTargetArgs(Args, CmdArgs);
break;
+
+ case llvm::Triple::hexagon:
+ if (Args.hasFlag(options::OPT_mdefault_build_attributes,
+ options::OPT_mno_default_build_attributes, true)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-hexagon-add-build-attributes");
+ }
+ break;
}
// Consume all the warning flags. Usually this would be handled more
@@ -8521,7 +8781,6 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
}
// Begin OffloadBundler
-
void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -8619,11 +8878,7 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
}
CmdArgs.push_back(TCArgs.MakeArgString(UB));
}
- if (TCArgs.hasFlag(options::OPT_offload_compress,
- options::OPT_no_offload_compress, false))
- CmdArgs.push_back("-compress");
- if (TCArgs.hasArg(options::OPT_v))
- CmdArgs.push_back("-verbose");
+ addOffloadCompressArgs(TCArgs, CmdArgs);
// All the inputs are encoded as commands.
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::None(),
@@ -8889,10 +9144,11 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA,
// Add the linker arguments to be forwarded by the wrapper.
CmdArgs.push_back(Args.MakeArgString(Twine("--linker-path=") +
LinkCommand->getExecutable()));
- CmdArgs.push_back("--");
for (const char *LinkArg : LinkCommand->getArguments())
CmdArgs.push_back(LinkArg);
+ addOffloadCompressArgs(Args, CmdArgs);
+
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("clang-linker-wrapper"));
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
index 0f503c4bd1c4..18f6c5ed06a5 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
@@ -193,6 +193,21 @@ DwarfFissionKind getDebugFissionKind(const Driver &D,
const llvm::opt::ArgList &Args,
llvm::opt::Arg *&Arg);
+// Calculate the output path of the module file when compiling a module unit
+// with the `-fmodule-output` option or `-fmodule-output=` option specified.
+// The behavior is:
+// - If `-fmodule-output=` is specfied, then the module file is
+// writing to the value.
+// - Otherwise if the output object file of the module unit is specified, the
+// output path
+// of the module file should be the same with the output object file except
+// the corresponding suffix. This requires both `-o` and `-c` are specified.
+// - Otherwise, the output path of the module file will be the same with the
+// input with the corresponding suffix.
+llvm::SmallString<256>
+getCXX20NamedModuleOutputPath(const llvm::opt::ArgList &Args,
+ const char *BaseInput);
+
} // end namespace tools
} // end namespace driver
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 2b916f000336..019df16a909f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -78,19 +78,8 @@ static bool useFramePointerForTargetByDefault(const llvm::opt::ArgList &Args,
!Args.hasArg(clang::driver::options::OPT_mfentry))
return true;
- if (Triple.isAndroid()) {
- switch (Triple.getArch()) {
- case llvm::Triple::aarch64:
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- case llvm::Triple::riscv64:
- return true;
- default:
- break;
- }
- }
+ if (Triple.isAndroid())
+ return true;
switch (Triple.getArch()) {
case llvm::Triple::xcore:
@@ -114,6 +103,7 @@ static bool useFramePointerForTargetByDefault(const llvm::opt::ArgList &Args,
case llvm::Triple::csky:
case llvm::Triple::loongarch32:
case llvm::Triple::loongarch64:
+ case llvm::Triple::m68k:
return !clang::driver::tools::areOptimizationsEnabled(Args);
default:
break;
@@ -163,6 +153,14 @@ static bool useFramePointerForTargetByDefault(const llvm::opt::ArgList &Args,
return true;
}
+static bool useLeafFramePointerForTargetByDefault(const llvm::Triple &Triple) {
+ if (Triple.isAArch64() || Triple.isPS() || Triple.isVE() ||
+ (Triple.isAndroid() && !Triple.isARM()))
+ return false;
+
+ return true;
+}
+
static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
switch (Triple.getArch()) {
default:
@@ -175,38 +173,91 @@ static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
}
}
+// True if a target-specific option requires the frame chain to be preserved,
+// even if new frame records are not created.
+static bool mustMaintainValidFrameChain(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (Triple.isARM() || Triple.isThumb()) {
+ // For 32-bit Arm, the -mframe-chain=aapcs and -mframe-chain=aapcs+leaf
+ // options require the frame pointer register to be reserved (or point to a
+ // new AAPCS-compilant frame record), even with -fno-omit-frame-pointer.
+ if (Arg *A = Args.getLastArg(options::OPT_mframe_chain)) {
+ StringRef V = A->getValue();
+ return V != "none";
+ }
+ return false;
+ }
+ return false;
+}
+
+// True if a target-specific option causes -fno-omit-frame-pointer to also
+// cause frame records to be created in leaf functions.
+static bool framePointerImpliesLeafFramePointer(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (Triple.isARM() || Triple.isThumb()) {
+ // For 32-bit Arm, the -mframe-chain=aapcs+leaf option causes the
+ // -fno-omit-frame-pointer optiion to imply -mno-omit-leaf-frame-pointer,
+ // but does not by itself imply either option.
+ if (Arg *A = Args.getLastArg(options::OPT_mframe_chain)) {
+ StringRef V = A->getValue();
+ return V == "aapcs+leaf";
+ }
+ return false;
+ }
+ return false;
+}
+
clang::CodeGenOptions::FramePointerKind
getFramePointerKind(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple) {
- // We have 4 states:
+ // There are three things to consider here:
+ // * Should a frame record be created for non-leaf functions?
+ // * Should a frame record be created for leaf functions?
+ // * Is the frame pointer register reserved, i.e. must it always point to
+ // either a new, valid frame record or be un-modified?
//
- // 00) leaf retained, non-leaf retained
- // 01) leaf retained, non-leaf omitted (this is invalid)
- // 10) leaf omitted, non-leaf retained
- // (what -momit-leaf-frame-pointer was designed for)
- // 11) leaf omitted, non-leaf omitted
+ // Not all combinations of these are valid:
+ // * It's not useful to have leaf frame records without non-leaf ones.
+ // * It's not useful to have frame records without reserving the frame
+ // pointer.
//
- // "omit" options taking precedence over "no-omit" options is the only way
- // to make 3 valid states representable
- llvm::opt::Arg *A =
- Args.getLastArg(clang::driver::options::OPT_fomit_frame_pointer,
- clang::driver::options::OPT_fno_omit_frame_pointer);
-
- bool OmitFP = A && A->getOption().matches(
- clang::driver::options::OPT_fomit_frame_pointer);
- bool NoOmitFP = A && A->getOption().matches(
- clang::driver::options::OPT_fno_omit_frame_pointer);
- bool OmitLeafFP =
- Args.hasFlag(clang::driver::options::OPT_momit_leaf_frame_pointer,
- clang::driver::options::OPT_mno_omit_leaf_frame_pointer,
- Triple.isAArch64() || Triple.isPS() || Triple.isVE() ||
- (Triple.isAndroid() && Triple.isRISCV64()));
- if (NoOmitFP || mustUseNonLeafFramePointerForTarget(Triple) ||
- (!OmitFP && useFramePointerForTargetByDefault(Args, Triple))) {
- if (OmitLeafFP)
- return clang::CodeGenOptions::FramePointerKind::NonLeaf;
- return clang::CodeGenOptions::FramePointerKind::All;
- }
+ // | Non-leaf | Leaf | Reserved |
+ // | N | N | N | FramePointerKind::None
+ // | N | N | Y | FramePointerKind::Reserved
+ // | N | Y | N | Invalid
+ // | N | Y | Y | Invalid
+ // | Y | N | N | Invalid
+ // | Y | N | Y | FramePointerKind::NonLeaf
+ // | Y | Y | N | Invalid
+ // | Y | Y | Y | FramePointerKind::All
+ //
+ // The FramePointerKind::Reserved case is currently only reachable for Arm,
+ // which has the -mframe-chain= option which can (in combination with
+ // -fno-omit-frame-pointer) specify that the frame chain must be valid,
+ // without requiring new frame records to be created.
+
+ bool DefaultFP = useFramePointerForTargetByDefault(Args, Triple);
+ bool EnableFP =
+ mustUseNonLeafFramePointerForTarget(Triple) ||
+ Args.hasFlag(clang::driver::options::OPT_fno_omit_frame_pointer,
+ clang::driver::options::OPT_fomit_frame_pointer, DefaultFP);
+
+ bool DefaultLeafFP =
+ useLeafFramePointerForTargetByDefault(Triple) ||
+ (EnableFP && framePointerImpliesLeafFramePointer(Args, Triple));
+ bool EnableLeafFP = Args.hasFlag(
+ clang::driver::options::OPT_mno_omit_leaf_frame_pointer,
+ clang::driver::options::OPT_momit_leaf_frame_pointer, DefaultLeafFP);
+
+ bool FPRegReserved = EnableFP || mustMaintainValidFrameChain(Args, Triple);
+
+ if (EnableFP) {
+ if (EnableLeafFP)
+ return clang::CodeGenOptions::FramePointerKind::All;
+ return clang::CodeGenOptions::FramePointerKind::NonLeaf;
+ }
+ if (FPRegReserved)
+ return clang::CodeGenOptions::FramePointerKind::Reserved;
return clang::CodeGenOptions::FramePointerKind::None;
}
@@ -317,9 +368,7 @@ void tools::handleTargetFeaturesGroup(const Driver &D,
continue;
}
- bool IsNegative = Name.starts_with("no-");
- if (IsNegative)
- Name = Name.substr(3);
+ bool IsNegative = Name.consume_front("no-");
Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
}
@@ -347,7 +396,7 @@ void tools::addDirectoryList(const ArgList &Args, ArgStringList &CmdArgs,
return; // Nothing to do.
StringRef Name(ArgName);
- if (Name.equals("-I") || Name.equals("-L") || Name.empty())
+ if (Name == "-I" || Name == "-L" || Name.empty())
CombinedArg = true;
StringRef Dirs(DirList);
@@ -738,11 +787,12 @@ bool tools::isTLSDESCEnabled(const ToolChain &TC,
StringRef V = A->getValue();
bool SupportedArgument = false, EnableTLSDESC = false;
bool Unsupported = !Triple.isOSBinFormatELF();
- if (Triple.isRISCV()) {
+ if (Triple.isLoongArch() || Triple.isRISCV()) {
SupportedArgument = V == "desc" || V == "trad";
EnableTLSDESC = V == "desc";
} else if (Triple.isX86()) {
- SupportedArgument = V == "gnu";
+ SupportedArgument = V == "gnu" || V == "gnu2";
+ EnableTLSDESC = V == "gnu2";
} else {
Unsupported = true;
}
@@ -759,15 +809,15 @@ bool tools::isTLSDESCEnabled(const ToolChain &TC,
void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
ArgStringList &CmdArgs, const InputInfo &Output,
const InputInfo &Input, bool IsThinLTO) {
- const bool IsOSAIX = ToolChain.getTriple().isOSAIX();
- const bool IsAMDGCN = ToolChain.getTriple().isAMDGCN();
+ const llvm::Triple &Triple = ToolChain.getTriple();
+ const bool IsOSAIX = Triple.isOSAIX();
+ const bool IsAMDGCN = Triple.isAMDGCN();
const char *Linker = Args.MakeArgString(ToolChain.GetLinkerPath());
const Driver &D = ToolChain.getDriver();
const bool IsFatLTO = Args.hasArg(options::OPT_ffat_lto_objects);
const bool IsUnifiedLTO = Args.hasArg(options::OPT_funified_lto);
if (llvm::sys::path::filename(Linker) != "ld.lld" &&
- llvm::sys::path::stem(Linker) != "ld.lld" &&
- !ToolChain.getTriple().isOSOpenBSD()) {
+ llvm::sys::path::stem(Linker) != "ld.lld" && !Triple.isOSOpenBSD()) {
// Tell the linker to load the plugin. This has to come before
// AddLinkerInputs as gold requires -plugin and AIX ld requires -bplugin to
// come before any -plugin-opt/-bplugin_opt that -Wl might forward.
@@ -836,7 +886,7 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
// the plugin.
// Handle flags for selecting CPU variants.
- std::string CPU = getCPUName(D, Args, ToolChain.getTriple());
+ std::string CPU = getCPUName(D, Args, Triple);
if (!CPU.empty())
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + ExtraDash + "mcpu=" + CPU));
@@ -967,10 +1017,9 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
bool HasRoptr = Args.hasFlag(options::OPT_mxcoff_roptr,
options::OPT_mno_xcoff_roptr, false);
StringRef OptStr = HasRoptr ? "-mxcoff-roptr" : "-mno-xcoff-roptr";
-
if (!IsOSAIX)
D.Diag(diag::err_drv_unsupported_opt_for_target)
- << OptStr << ToolChain.getTriple().str();
+ << OptStr << Triple.str();
if (HasRoptr) {
// The data sections option is on by default on AIX. We only need to error
@@ -1033,7 +1082,7 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
}
if (Args.hasFlag(options::OPT_femulated_tls, options::OPT_fno_emulated_tls,
- ToolChain.getTriple().hasDefaultEmulatedTLS())) {
+ Triple.hasDefaultEmulatedTLS())) {
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + "-emulated-tls"));
}
@@ -1073,25 +1122,62 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
addMachineOutlinerArgs(D, Args, CmdArgs, ToolChain.getEffectiveTriple(),
/*IsLTO=*/true, PluginOptPrefix);
+
+ for (const Arg *A : Args.filtered(options::OPT_Wa_COMMA)) {
+ bool Crel = false;
+ for (StringRef V : A->getValues()) {
+ if (V == "--crel")
+ Crel = true;
+ else if (V == "--no-crel")
+ Crel = false;
+ else
+ continue;
+ A->claim();
+ }
+ if (Crel) {
+ if (Triple.isOSBinFormatELF() && !Triple.isMIPS()) {
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) + "-crel"));
+ } else {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << "-Wa,--crel" << D.getTargetTriple();
+ }
+ }
+ }
}
/// Adds the '-lcgpu' and '-lmgpu' libraries to the compilation to include the
/// LLVM C library for GPUs.
-static void addOpenMPDeviceLibC(const ToolChain &TC, const ArgList &Args,
+static void addOpenMPDeviceLibC(const Compilation &C, const ArgList &Args,
ArgStringList &CmdArgs) {
if (Args.hasArg(options::OPT_nogpulib) || Args.hasArg(options::OPT_nolibc))
return;
// Check the resource directory for the LLVM libc GPU declarations. If it's
// found we can assume that LLVM was built with support for the GPU libc.
- SmallString<256> LibCDecls(TC.getDriver().ResourceDir);
+ SmallString<256> LibCDecls(C.getDriver().ResourceDir);
llvm::sys::path::append(LibCDecls, "include", "llvm_libc_wrappers",
"llvm-libc-decls");
bool HasLibC = llvm::sys::fs::exists(LibCDecls) &&
llvm::sys::fs::is_directory(LibCDecls);
- if (Args.hasFlag(options::OPT_gpulibc, options::OPT_nogpulibc, HasLibC)) {
- CmdArgs.push_back("-lcgpu");
- CmdArgs.push_back("-lmgpu");
+ if (!Args.hasFlag(options::OPT_gpulibc, options::OPT_nogpulibc, HasLibC))
+ return;
+
+ SmallVector<const ToolChain *> ToolChains;
+ auto TCRange = C.getOffloadToolChains(Action::OFK_OpenMP);
+ for (auto TI = TCRange.first, TE = TCRange.second; TI != TE; ++TI)
+ ToolChains.push_back(TI->second);
+
+ if (llvm::any_of(ToolChains, [](const ToolChain *TC) {
+ return TC->getTriple().isAMDGPU();
+ })) {
+ CmdArgs.push_back("-lcgpu-amdgpu");
+ CmdArgs.push_back("-lmgpu-amdgpu");
+ }
+ if (llvm::any_of(ToolChains, [](const ToolChain *TC) {
+ return TC->getTriple().isNVPTX();
+ })) {
+ CmdArgs.push_back("-lcgpu-nvptx");
+ CmdArgs.push_back("-lmgpu-nvptx");
}
}
@@ -1112,7 +1198,11 @@ void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args,
options::OPT_fno_rtlib_add_rpath, false))
return;
- for (const auto &CandidateRPath : TC.getArchSpecificLibPaths()) {
+ SmallVector<std::string> CandidateRPaths(TC.getArchSpecificLibPaths());
+ if (const auto CandidateRPath = TC.getStdlibPath())
+ CandidateRPaths.emplace_back(*CandidateRPath);
+
+ for (const auto &CandidateRPath : CandidateRPaths) {
if (TC.getVFS().exists(CandidateRPath)) {
CmdArgs.push_back("-rpath");
CmdArgs.push_back(Args.MakeArgString(CandidateRPath));
@@ -1120,9 +1210,10 @@ void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args,
}
}
-bool tools::addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
- const ArgList &Args, bool ForceStaticHostRuntime,
- bool IsOffloadingHost, bool GompNeedsRT) {
+bool tools::addOpenMPRuntime(const Compilation &C, ArgStringList &CmdArgs,
+ const ToolChain &TC, const ArgList &Args,
+ bool ForceStaticHostRuntime, bool IsOffloadingHost,
+ bool GompNeedsRT) {
if (!Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
options::OPT_fno_openmp, false))
return false;
@@ -1163,7 +1254,7 @@ bool tools::addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
CmdArgs.push_back("-lomptarget.devicertl");
if (IsOffloadingHost)
- addOpenMPDeviceLibC(TC, Args, CmdArgs);
+ addOpenMPDeviceLibC(C, Args, CmdArgs);
addArchSpecificRPath(TC, Args, CmdArgs);
addOpenMPRuntimeLibraryPath(TC, Args, CmdArgs);
@@ -1171,122 +1262,25 @@ bool tools::addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
return true;
}
-/// Determines if --whole-archive is active in the list of arguments.
-static bool isWholeArchivePresent(const ArgList &Args) {
- bool WholeArchiveActive = false;
- for (auto *Arg : Args.filtered(options::OPT_Wl_COMMA)) {
- if (Arg) {
- for (StringRef ArgValue : Arg->getValues()) {
- if (ArgValue == "--whole-archive")
- WholeArchiveActive = true;
- if (ArgValue == "--no-whole-archive")
- WholeArchiveActive = false;
- }
- }
- }
-
- return WholeArchiveActive;
-}
-
-/// Determine if driver is invoked to create a shared object library (-static)
-static bool isSharedLinkage(const ArgList &Args) {
- return Args.hasArg(options::OPT_shared);
-}
-
-/// Determine if driver is invoked to create a static object library (-shared)
-static bool isStaticLinkage(const ArgList &Args) {
- return Args.hasArg(options::OPT_static);
-}
-
-/// Add Fortran runtime libs for MSVC
-static void addFortranRuntimeLibsMSVC(const ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) {
- unsigned RTOptionID = options::OPT__SLASH_MT;
- if (auto *rtl = Args.getLastArg(options::OPT_fms_runtime_lib_EQ)) {
- RTOptionID = llvm::StringSwitch<unsigned>(rtl->getValue())
- .Case("static", options::OPT__SLASH_MT)
- .Case("static_dbg", options::OPT__SLASH_MTd)
- .Case("dll", options::OPT__SLASH_MD)
- .Case("dll_dbg", options::OPT__SLASH_MDd)
- .Default(options::OPT__SLASH_MT);
- }
- switch (RTOptionID) {
- case options::OPT__SLASH_MT:
- CmdArgs.push_back("/WHOLEARCHIVE:Fortran_main.static.lib");
- break;
- case options::OPT__SLASH_MTd:
- CmdArgs.push_back("/WHOLEARCHIVE:Fortran_main.static_dbg.lib");
- break;
- case options::OPT__SLASH_MD:
- CmdArgs.push_back("/WHOLEARCHIVE:Fortran_main.dynamic.lib");
- break;
- case options::OPT__SLASH_MDd:
- CmdArgs.push_back("/WHOLEARCHIVE:Fortran_main.dynamic_dbg.lib");
- break;
- }
-}
-
-// Add FortranMain runtime lib
-static void addFortranMain(const ToolChain &TC, const ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) {
- // 0. Shared-library linkage
- // If we are attempting to link a library, we should not add
- // -lFortran_main.a to the link line, as the `main` symbol is not
- // required for a library and should also be provided by one of
- // the translation units of the code that this shared library
- // will be linked against eventually.
- if (isSharedLinkage(Args) || isStaticLinkage(Args)) {
- return;
- }
-
- // 1. MSVC
- if (TC.getTriple().isKnownWindowsMSVCEnvironment()) {
- addFortranRuntimeLibsMSVC(Args, CmdArgs);
- return;
- }
-
- // 2. GNU and similar
- const Driver &D = TC.getDriver();
- const char *FortranMainLinkFlag = "-lFortran_main";
-
- // Warn if the user added `-lFortran_main` - this library is an implementation
- // detail of Flang and should be handled automaticaly by the driver.
- for (const char *arg : CmdArgs) {
- if (strncmp(arg, FortranMainLinkFlag, strlen(FortranMainLinkFlag)) == 0)
- D.Diag(diag::warn_drv_deprecated_custom)
- << FortranMainLinkFlag
- << "see the Flang driver documentation for correct usage";
- }
-
- // The --whole-archive option needs to be part of the link line to make
- // sure that the main() function from Fortran_main.a is pulled in by the
- // linker. However, it shouldn't be used if it's already active.
- // TODO: Find an equivalent of `--whole-archive` for Darwin and AIX.
- if (!isWholeArchivePresent(Args) && !TC.getTriple().isMacOSX() &&
- !TC.getTriple().isOSAIX()) {
- CmdArgs.push_back("--whole-archive");
- CmdArgs.push_back(FortranMainLinkFlag);
- CmdArgs.push_back("--no-whole-archive");
- return;
- }
-
- CmdArgs.push_back(FortranMainLinkFlag);
-}
-
/// Add Fortran runtime libs
void tools::addFortranRuntimeLibs(const ToolChain &TC, const ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) {
- // 1. Link FortranMain
- // FortranMain depends on FortranRuntime, so needs to be listed first. If
- // -fno-fortran-main has been passed, skip linking Fortran_main.a
- if (!Args.hasArg(options::OPT_no_fortran_main))
- addFortranMain(TC, Args, CmdArgs);
-
- // 2. Link FortranRuntime and FortranDecimal
+ // Link FortranRuntime and FortranDecimal
// These are handled earlier on Windows by telling the frontend driver to
// add the correct libraries to link against as dependents in the object
// file.
if (!TC.getTriple().isKnownWindowsMSVCEnvironment()) {
+ StringRef F128LibName = TC.getDriver().getFlangF128MathLibrary();
+ F128LibName.consume_front_insensitive("lib");
+ if (!F128LibName.empty()) {
+ bool AsNeeded = !TC.getTriple().isOSAIX();
+ CmdArgs.push_back("-lFortranFloat128Math");
+ if (AsNeeded)
+ addAsNeededOption(TC, Args, CmdArgs, /*as_needed=*/true);
+ CmdArgs.push_back(Args.MakeArgString("-l" + F128LibName));
+ if (AsNeeded)
+ addAsNeededOption(TC, Args, CmdArgs, /*as_needed=*/false);
+ }
CmdArgs.push_back("-lFortranRuntime");
CmdArgs.push_back("-lFortranDecimal");
}
@@ -1415,6 +1409,8 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
if (!Args.hasArg(options::OPT_shared) && !TC.getTriple().isAndroid())
HelperStaticRuntimes.push_back("memprof-preinit");
}
+ if (SanArgs.needsNsanRt())
+ SharedRuntimes.push_back("nsan");
if (SanArgs.needsUbsanRt()) {
if (SanArgs.requiresMinimalRuntime())
SharedRuntimes.push_back("ubsan_minimal");
@@ -1485,6 +1481,8 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("msan_cxx");
}
+ if (!SanArgs.needsSharedRt() && SanArgs.needsNsanRt())
+ StaticRuntimes.push_back("nsan");
if (!SanArgs.needsSharedRt() && SanArgs.needsTsanRt()) {
StaticRuntimes.push_back("tsan");
if (SanArgs.linkCXXRuntimes())
@@ -1536,6 +1534,12 @@ bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
RequiredSymbols);
}
+ // -u options must be added before the runtime libs that resolve them.
+ for (auto S : RequiredSymbols) {
+ CmdArgs.push_back("-u");
+ CmdArgs.push_back(Args.MakeArgString(S));
+ }
+
// Inject libfuzzer dependencies.
if (SanArgs.needsFuzzer() && SanArgs.linkRuntimes() &&
!Args.hasArg(options::OPT_shared)) {
@@ -1568,10 +1572,6 @@ bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
addSanitizerRuntime(TC, Args, CmdArgs, RT, false, false);
AddExportDynamic |= !addSanitizerDynamicList(TC, Args, CmdArgs, RT);
}
- for (auto S : RequiredSymbols) {
- CmdArgs.push_back("-u");
- CmdArgs.push_back(Args.MakeArgString(S));
- }
// If there is a static runtime with no dynamic list, force all the symbols
// to be dynamic to be sure we export sanitizer interface functions.
if (AddExportDynamic)
@@ -2085,8 +2085,12 @@ unsigned tools::getDwarfVersion(const ToolChain &TC,
const llvm::opt::ArgList &Args) {
unsigned DwarfVersion = ParseDebugDefaultVersion(TC, Args);
if (const Arg *GDwarfN = getDwarfNArg(Args))
- if (int N = DwarfVersionNum(GDwarfN->getSpelling()))
+ if (int N = DwarfVersionNum(GDwarfN->getSpelling())) {
DwarfVersion = N;
+ if (DwarfVersion == 5 && TC.getTriple().isOSAIX())
+ TC.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
+ << GDwarfN->getSpelling() << TC.getTriple().str();
+ }
if (DwarfVersion == 0) {
DwarfVersion = TC.GetDefaultDwarfVersion();
assert(DwarfVersion && "toolchain default DWARF version must be nonzero");
@@ -2652,7 +2656,7 @@ getAMDGPUCodeObjectArgument(const Driver &D, const llvm::opt::ArgList &Args) {
void tools::checkAMDGPUCodeObjectVersion(const Driver &D,
const llvm::opt::ArgList &Args) {
const unsigned MinCodeObjVer = 4;
- const unsigned MaxCodeObjVer = 5;
+ const unsigned MaxCodeObjVer = 6;
if (auto *CodeObjArg = getAMDGPUCodeObjectArgument(D, Args)) {
if (CodeObjArg->getOption().getID() ==
@@ -2663,6 +2667,12 @@ void tools::checkAMDGPUCodeObjectVersion(const Driver &D,
if (Remnant || CodeObjVer < MinCodeObjVer || CodeObjVer > MaxCodeObjVer)
D.Diag(diag::err_drv_invalid_int_value)
<< CodeObjArg->getAsString(Args) << CodeObjArg->getValue();
+
+ // COV6 is only supported by LLVM at the time of writing this, and it's
+ // expected to take some time before all ROCm components fully
+ // support it. In the meantime, make sure users are aware of this.
+ if (CodeObjVer == 6)
+ D.Diag(diag::warn_drv_amdgpu_cov6);
}
}
}
@@ -2717,14 +2727,10 @@ void tools::addOpenMPDeviceRTL(const Driver &D,
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
StringRef BitcodeSuffix,
- const llvm::Triple &Triple) {
+ const llvm::Triple &Triple,
+ const ToolChain &HostTC) {
SmallVector<StringRef, 8> LibraryPaths;
- // Add path to clang lib / lib64 folder.
- SmallString<256> DefaultLibPath = llvm::sys::path::parent_path(D.Dir);
- llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME);
- LibraryPaths.emplace_back(DefaultLibPath.c_str());
-
// Add user defined library paths from LIBRARY_PATH.
std::optional<std::string> LibPath =
llvm::sys::Process::GetEnv("LIBRARY_PATH");
@@ -2736,6 +2742,10 @@ void tools::addOpenMPDeviceRTL(const Driver &D,
LibraryPaths.emplace_back(Path.trim());
}
+ // Check all of the standard library search paths used by the compiler.
+ for (const auto &LibPath : HostTC.getFilePaths())
+ LibraryPaths.emplace_back(LibPath);
+
OptSpecifier LibomptargetBCPathOpt =
Triple.isAMDGCN() ? options::OPT_libomptarget_amdgpu_bc_path_EQ
: options::OPT_libomptarget_nvptx_bc_path_EQ;
@@ -2783,7 +2793,7 @@ void tools::addHIPRuntimeLibArgs(const ToolChain &TC, Compilation &C,
llvm::opt::ArgStringList &CmdArgs) {
if ((C.getActiveOffloadKinds() & Action::OFK_HIP) &&
!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_no_hip_rt)) {
+ !Args.hasArg(options::OPT_no_hip_rt) && !Args.hasArg(options::OPT_r)) {
TC.AddHIPRuntimeLibArgs(Args, CmdArgs);
} else {
// Claim "no HIP libraries" arguments if any
@@ -2792,3 +2802,121 @@ void tools::addHIPRuntimeLibArgs(const ToolChain &TC, Compilation &C,
}
}
}
+
+void tools::addOutlineAtomicsArgs(const Driver &D, const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const llvm::Triple &Triple) {
+ if (Arg *A = Args.getLastArg(options::OPT_moutline_atomics,
+ options::OPT_mno_outline_atomics)) {
+ // Option -moutline-atomics supported for AArch64 target only.
+ if (!Triple.isAArch64()) {
+ D.Diag(diag::warn_drv_moutline_atomics_unsupported_opt)
+ << Triple.getArchName() << A->getOption().getName();
+ } else {
+ if (A->getOption().matches(options::OPT_moutline_atomics)) {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+outline-atomics");
+ } else {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-outline-atomics");
+ }
+ }
+ } else if (Triple.isAArch64() && TC.IsAArch64OutlineAtomicsDefault(Args)) {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+outline-atomics");
+ }
+}
+
+void tools::addOffloadCompressArgs(const llvm::opt::ArgList &TCArgs,
+ llvm::opt::ArgStringList &CmdArgs) {
+ if (TCArgs.hasFlag(options::OPT_offload_compress,
+ options::OPT_no_offload_compress, false))
+ CmdArgs.push_back("-compress");
+ if (TCArgs.hasArg(options::OPT_v))
+ CmdArgs.push_back("-verbose");
+ if (auto *Arg = TCArgs.getLastArg(options::OPT_offload_compression_level_EQ))
+ CmdArgs.push_back(
+ TCArgs.MakeArgString(Twine("-compression-level=") + Arg->getValue()));
+}
+
+void tools::addMCModel(const Driver &D, const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple,
+ const llvm::Reloc::Model &RelocationModel,
+ llvm::opt::ArgStringList &CmdArgs) {
+ if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
+ StringRef CM = A->getValue();
+ bool Ok = false;
+ if (Triple.isOSAIX() && CM == "medium")
+ CM = "large";
+ if (Triple.isAArch64(64)) {
+ Ok = CM == "tiny" || CM == "small" || CM == "large";
+ if (CM == "large" && !Triple.isOSBinFormatMachO() &&
+ RelocationModel != llvm::Reloc::Static)
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-fno-pic";
+ } else if (Triple.isLoongArch()) {
+ if (CM == "extreme" &&
+ Args.hasFlagNoClaim(options::OPT_fplt, options::OPT_fno_plt, false))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-fplt";
+ Ok = CM == "normal" || CM == "medium" || CM == "extreme";
+ // Convert to LLVM recognizable names.
+ if (Ok)
+ CM = llvm::StringSwitch<StringRef>(CM)
+ .Case("normal", "small")
+ .Case("extreme", "large")
+ .Default(CM);
+ } else if (Triple.isPPC64() || Triple.isOSAIX()) {
+ Ok = CM == "small" || CM == "medium" || CM == "large";
+ } else if (Triple.isRISCV()) {
+ if (CM == "medlow")
+ CM = "small";
+ else if (CM == "medany")
+ CM = "medium";
+ Ok = CM == "small" || CM == "medium";
+ } else if (Triple.getArch() == llvm::Triple::x86_64) {
+ Ok = llvm::is_contained({"small", "kernel", "medium", "large", "tiny"},
+ CM);
+ } else if (Triple.isNVPTX() || Triple.isAMDGPU() || Triple.isSPIRV()) {
+ // NVPTX/AMDGPU/SPIRV does not care about the code model and will accept
+ // whatever works for the host.
+ Ok = true;
+ } else if (Triple.isSPARC64()) {
+ if (CM == "medlow")
+ CM = "small";
+ else if (CM == "medmid")
+ CM = "medium";
+ else if (CM == "medany")
+ CM = "large";
+ Ok = CM == "small" || CM == "medium" || CM == "large";
+ }
+ if (Ok) {
+ CmdArgs.push_back(Args.MakeArgString("-mcmodel=" + CM));
+ } else {
+ D.Diag(diag::err_drv_unsupported_option_argument_for_target)
+ << A->getSpelling() << CM << Triple.getTriple();
+ }
+ }
+
+ if (Triple.getArch() == llvm::Triple::x86_64) {
+ bool IsMediumCM = false;
+ bool IsLargeCM = false;
+ if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
+ IsMediumCM = StringRef(A->getValue()) == "medium";
+ IsLargeCM = StringRef(A->getValue()) == "large";
+ }
+ if (Arg *A = Args.getLastArg(options::OPT_mlarge_data_threshold_EQ)) {
+ if (!IsMediumCM && !IsLargeCM) {
+ D.Diag(diag::warn_drv_large_data_threshold_invalid_code_model)
+ << A->getOption().getRenderName();
+ } else {
+ A->render(Args, CmdArgs);
+ }
+ } else if (IsMediumCM) {
+ CmdArgs.push_back("-mlarge-data-threshold=65536");
+ } else if (IsLargeCM) {
+ CmdArgs.push_back("-mlarge-data-threshold=0");
+ }
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
index 807867f13a5c..52818ecde924 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -111,8 +111,8 @@ void addOpenMPRuntimeLibraryPath(const ToolChain &TC,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
/// Returns true, if an OpenMP runtime has been added.
-bool addOpenMPRuntime(llvm::opt::ArgStringList &CmdArgs, const ToolChain &TC,
- const llvm::opt::ArgList &Args,
+bool addOpenMPRuntime(const Compilation &C, llvm::opt::ArgStringList &CmdArgs,
+ const ToolChain &TC, const llvm::opt::ArgList &Args,
bool ForceStaticHostRuntime = false,
bool IsOffloadingHost = false, bool GompNeedsRT = false);
@@ -214,7 +214,20 @@ void addMachineOutlinerArgs(const Driver &D, const llvm::opt::ArgList &Args,
void addOpenMPDeviceRTL(const Driver &D, const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
- StringRef BitcodeSuffix, const llvm::Triple &Triple);
+ StringRef BitcodeSuffix, const llvm::Triple &Triple,
+ const ToolChain &HostTC);
+
+void addOutlineAtomicsArgs(const Driver &D, const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const llvm::Triple &Triple);
+void addOffloadCompressArgs(const llvm::opt::ArgList &TCArgs,
+ llvm::opt::ArgStringList &CmdArgs);
+void addMCModel(const Driver &D, const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple,
+ const llvm::Reloc::Model &RelocationModel,
+ llvm::opt::ArgStringList &CmdArgs);
+
} // end namespace tools
} // end namespace driver
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
index 1462576ca870..61d12b10dfb6 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -82,6 +82,10 @@ CudaVersion getCudaVersion(uint32_t raw_version) {
return CudaVersion::CUDA_122;
if (raw_version < 12040)
return CudaVersion::CUDA_123;
+ if (raw_version < 12050)
+ return CudaVersion::CUDA_124;
+ if (raw_version < 12060)
+ return CudaVersion::CUDA_125;
return CudaVersion::NEW;
}
@@ -219,13 +223,13 @@ CudaInstallationDetector::CudaInstallationDetector(
// CUDA-9+ uses single libdevice file for all GPU variants.
std::string FilePath = LibDevicePath + "/libdevice.10.bc";
if (FS.exists(FilePath)) {
- for (int Arch = (int)CudaArch::SM_30, E = (int)CudaArch::LAST; Arch < E;
- ++Arch) {
- CudaArch GpuArch = static_cast<CudaArch>(Arch);
- if (!IsNVIDIAGpuArch(GpuArch))
+ for (int Arch = (int)OffloadArch::SM_30, E = (int)OffloadArch::LAST;
+ Arch < E; ++Arch) {
+ OffloadArch OA = static_cast<OffloadArch>(Arch);
+ if (!IsNVIDIAOffloadArch(OA))
continue;
- std::string GpuArchName(CudaArchToString(GpuArch));
- LibDeviceMap[GpuArchName] = FilePath;
+ std::string OffloadArchName(OffloadArchToString(OA));
+ LibDeviceMap[OffloadArchName] = FilePath;
}
}
} else {
@@ -308,17 +312,17 @@ void CudaInstallationDetector::AddCudaIncludeArgs(
}
void CudaInstallationDetector::CheckCudaVersionSupportsArch(
- CudaArch Arch) const {
- if (Arch == CudaArch::UNKNOWN || Version == CudaVersion::UNKNOWN ||
+ OffloadArch Arch) const {
+ if (Arch == OffloadArch::UNKNOWN || Version == CudaVersion::UNKNOWN ||
ArchsWithBadVersion[(int)Arch])
return;
- auto MinVersion = MinVersionForCudaArch(Arch);
- auto MaxVersion = MaxVersionForCudaArch(Arch);
+ auto MinVersion = MinVersionForOffloadArch(Arch);
+ auto MaxVersion = MaxVersionForOffloadArch(Arch);
if (Version < MinVersion || Version > MaxVersion) {
ArchsWithBadVersion[(int)Arch] = true;
D.Diag(diag::err_drv_cuda_version_unsupported)
- << CudaArchToString(Arch) << CudaVersionToString(MinVersion)
+ << OffloadArchToString(Arch) << CudaVersionToString(MinVersion)
<< CudaVersionToString(MaxVersion) << InstallPath
<< CudaVersionToString(Version);
}
@@ -389,12 +393,16 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
GPUArchName = JA.getOffloadingArch();
} else {
GPUArchName = Args.getLastArgValue(options::OPT_march_EQ);
- assert(!GPUArchName.empty() && "Must have an architecture passed in.");
+ if (GPUArchName.empty()) {
+ C.getDriver().Diag(diag::err_drv_offload_missing_gpu_arch)
+ << getToolChain().getArchName() << getShortName();
+ return;
+ }
}
// Obtain architecture from the action.
- CudaArch gpu_arch = StringToCudaArch(GPUArchName);
- assert(gpu_arch != CudaArch::UNKNOWN &&
+ OffloadArch gpu_arch = StringToOffloadArch(GPUArchName);
+ assert(gpu_arch != OffloadArch::UNKNOWN &&
"Device action expected to have an architecture.");
// Check that our installation's ptxas supports gpu_arch.
@@ -449,17 +457,10 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-v");
CmdArgs.push_back("--gpu-name");
- CmdArgs.push_back(Args.MakeArgString(CudaArchToString(gpu_arch)));
+ CmdArgs.push_back(Args.MakeArgString(OffloadArchToString(gpu_arch)));
CmdArgs.push_back("--output-file");
std::string OutputFileName = TC.getInputFilename(Output);
- // If we are invoking `nvlink` internally we need to output a `.cubin` file.
- // FIXME: This should hopefully be removed if NVIDIA updates their tooling.
- if (!C.getInputArgs().getLastArg(options::OPT_c)) {
- SmallString<256> Filename(Output.getFilename());
- llvm::sys::path::replace_extension(Filename, "cubin");
- OutputFileName = Filename.str();
- }
if (Output.isFilename() && OutputFileName != Output.getFilename())
C.addTempFile(Args.MakeArgString(OutputFileName));
@@ -499,18 +500,20 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
Exec, CmdArgs, Inputs, Output));
}
-static bool shouldIncludePTX(const ArgList &Args, const char *gpu_arch) {
- bool includePTX = true;
- for (Arg *A : Args) {
- if (!(A->getOption().matches(options::OPT_cuda_include_ptx_EQ) ||
- A->getOption().matches(options::OPT_no_cuda_include_ptx_EQ)))
- continue;
+static bool shouldIncludePTX(const ArgList &Args, StringRef InputArch) {
+ // The new driver does not include PTX by default to avoid overhead.
+ bool includePTX = !Args.hasFlag(options::OPT_offload_new_driver,
+ options::OPT_no_offload_new_driver, false);
+ for (Arg *A : Args.filtered(options::OPT_cuda_include_ptx_EQ,
+ options::OPT_no_cuda_include_ptx_EQ)) {
A->claim();
const StringRef ArchStr = A->getValue();
- if (ArchStr == "all" || ArchStr == gpu_arch) {
- includePTX = A->getOption().matches(options::OPT_cuda_include_ptx_EQ);
- continue;
- }
+ if (A->getOption().matches(options::OPT_cuda_include_ptx_EQ) &&
+ (ArchStr == "all" || ArchStr == InputArch))
+ includePTX = true;
+ else if (A->getOption().matches(options::OPT_no_cuda_include_ptx_EQ) &&
+ (ArchStr == "all" || ArchStr == InputArch))
+ includePTX = false;
}
return includePTX;
}
@@ -543,7 +546,7 @@ void NVPTX::FatBinary::ConstructJob(Compilation &C, const JobAction &JA,
const char *gpu_arch_str = A->getOffloadingArch();
assert(gpu_arch_str &&
"Device action expected to have associated a GPU architecture!");
- CudaArch gpu_arch = StringToCudaArch(gpu_arch_str);
+ OffloadArch gpu_arch = StringToOffloadArch(gpu_arch_str);
if (II.getType() == types::TY_PP_Asm &&
!shouldIncludePTX(Args, gpu_arch_str))
@@ -551,7 +554,7 @@ void NVPTX::FatBinary::ConstructJob(Compilation &C, const JobAction &JA,
// We need to pass an Arch of the form "sm_XX" for cubin files and
// "compute_XX" for ptx.
const char *Arch = (II.getType() == types::TY_PP_Asm)
- ? CudaArchToVirtualArchString(gpu_arch)
+ ? OffloadArchToVirtualArchString(gpu_arch)
: gpu_arch_str;
CmdArgs.push_back(
Args.MakeArgString(llvm::Twine("--image=profile=") + Arch +
@@ -593,66 +596,47 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-v");
StringRef GPUArch = Args.getLastArgValue(options::OPT_march_EQ);
- assert(!GPUArch.empty() && "At least one GPU Arch required for nvlink.");
+ if (GPUArch.empty()) {
+ C.getDriver().Diag(diag::err_drv_offload_missing_gpu_arch)
+ << getToolChain().getArchName() << getShortName();
+ return;
+ }
CmdArgs.push_back("-arch");
CmdArgs.push_back(Args.MakeArgString(GPUArch));
+ if (Args.hasArg(options::OPT_ptxas_path_EQ))
+ CmdArgs.push_back(Args.MakeArgString(
+ "--pxtas-path=" + Args.getLastArgValue(options::OPT_ptxas_path_EQ)));
+
+ if (Args.hasArg(options::OPT_cuda_path_EQ))
+ CmdArgs.push_back(Args.MakeArgString(
+ "--cuda-path=" + Args.getLastArgValue(options::OPT_cuda_path_EQ)));
+
// Add paths specified in LIBRARY_PATH environment variable as -L options.
addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
+ // Add standard library search paths passed on the command line.
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ getToolChain().AddFilePathLibArgs(Args, CmdArgs);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+
+ if (C.getDriver().isUsingLTO())
+ addLTOOptions(getToolChain(), Args, CmdArgs, Output, Inputs[0],
+ C.getDriver().getLTOMode() == LTOK_Thin);
+
// Add paths for the default clang library path.
SmallString<256> DefaultLibPath =
llvm::sys::path::parent_path(TC.getDriver().Dir);
llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME);
CmdArgs.push_back(Args.MakeArgString(Twine("-L") + DefaultLibPath));
- for (const auto &II : Inputs) {
- if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
- II.getType() == types::TY_LTO_BC || II.getType() == types::TY_LLVM_BC) {
- C.getDriver().Diag(diag::err_drv_no_linker_llvm_support)
- << getToolChain().getTripleString();
- continue;
- }
-
- // Currently, we only pass the input files to the linker, we do not pass
- // any libraries that may be valid only for the host.
- if (!II.isFilename())
- continue;
-
- // The 'nvlink' application performs RDC-mode linking when given a '.o'
- // file and device linking when given a '.cubin' file. We always want to
- // perform device linking, so just rename any '.o' files.
- // FIXME: This should hopefully be removed if NVIDIA updates their tooling.
- auto InputFile = getToolChain().getInputFilename(II);
- if (llvm::sys::path::extension(InputFile) != ".cubin") {
- // If there are no actions above this one then this is direct input and we
- // can copy it. Otherwise the input is internal so a `.cubin` file should
- // exist.
- if (II.getAction() && II.getAction()->getInputs().size() == 0) {
- const char *CubinF =
- Args.MakeArgString(getToolChain().getDriver().GetTemporaryPath(
- llvm::sys::path::stem(InputFile), "cubin"));
- if (llvm::sys::fs::copy_file(InputFile, C.addTempFile(CubinF)))
- continue;
-
- CmdArgs.push_back(CubinF);
- } else {
- SmallString<256> Filename(InputFile);
- llvm::sys::path::replace_extension(Filename, "cubin");
- CmdArgs.push_back(Args.MakeArgString(Filename));
- }
- } else {
- CmdArgs.push_back(Args.MakeArgString(InputFile));
- }
- }
-
C.addCommand(std::make_unique<Command>(
JA, *this,
ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
"--options-file"},
- Args.MakeArgString(getToolChain().GetProgramPath("nvlink")), CmdArgs,
- Inputs, Output));
+ Args.MakeArgString(getToolChain().GetProgramPath("clang-nvlink-wrapper")),
+ CmdArgs, Inputs, Output));
}
void NVPTX::getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple,
@@ -675,6 +659,8 @@ void NVPTX::getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case CudaVersion::CUDA_##CUDA_VER: \
PtxFeature = "+ptx" #PTX_VER; \
break;
+ CASE_CUDA_VERSION(125, 85);
+ CASE_CUDA_VERSION(124, 84);
CASE_CUDA_VERSION(123, 83);
CASE_CUDA_VERSION(122, 82);
CASE_CUDA_VERSION(121, 81);
@@ -726,9 +712,8 @@ NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
llvm::opt::DerivedArgList *
NVPTXToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
StringRef BoundArch,
- Action::OffloadKind DeviceOffloadKind) const {
- DerivedArgList *DAL =
- ToolChain::TranslateArgs(Args, BoundArch, DeviceOffloadKind);
+ Action::OffloadKind OffloadKind) const {
+ DerivedArgList *DAL = ToolChain::TranslateArgs(Args, BoundArch, OffloadKind);
if (!DAL)
DAL = new DerivedArgList(Args.getBaseArgs());
@@ -738,9 +723,25 @@ NVPTXToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
if (!llvm::is_contained(*DAL, A))
DAL->append(A);
- if (!DAL->hasArg(options::OPT_march_EQ))
+ if (!DAL->hasArg(options::OPT_march_EQ) && OffloadKind != Action::OFK_None) {
DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ),
- CudaArchToString(CudaArch::CudaDefault));
+ OffloadArchToString(OffloadArch::CudaDefault));
+ } else if (DAL->getLastArgValue(options::OPT_march_EQ) == "generic" &&
+ OffloadKind == Action::OFK_None) {
+ DAL->eraseArg(options::OPT_march_EQ);
+ } else if (DAL->getLastArgValue(options::OPT_march_EQ) == "native") {
+ auto GPUsOrErr = getSystemGPUArchs(Args);
+ if (!GPUsOrErr) {
+ getDriver().Diag(diag::err_drv_undetermined_gpu_arch)
+ << getArchName() << llvm::toString(GPUsOrErr.takeError()) << "-march";
+ } else {
+ if (GPUsOrErr->size() > 1)
+ getDriver().Diag(diag::warn_drv_multi_gpu_arch)
+ << getArchName() << llvm::join(*GPUsOrErr, ", ") << "-march";
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ),
+ Args.MakeArgString(GPUsOrErr->front()));
+ }
+ }
return DAL;
}
@@ -783,6 +784,31 @@ void NVPTXToolChain::adjustDebugInfoKind(
}
}
+Expected<SmallVector<std::string>>
+NVPTXToolChain::getSystemGPUArchs(const ArgList &Args) const {
+ // Detect NVIDIA GPUs availible on the system.
+ std::string Program;
+ if (Arg *A = Args.getLastArg(options::OPT_nvptx_arch_tool_EQ))
+ Program = A->getValue();
+ else
+ Program = GetProgramPath("nvptx-arch");
+
+ auto StdoutOrErr = executeToolChainProgram(Program, /*SecondsToWait=*/10);
+ if (!StdoutOrErr)
+ return StdoutOrErr.takeError();
+
+ SmallVector<std::string, 1> GPUArchs;
+ for (StringRef Arch : llvm::split((*StdoutOrErr)->getBuffer(), "\n"))
+ if (!Arch.empty())
+ GPUArchs.push_back(Arch.str());
+
+ if (GPUArchs.empty())
+ return llvm::createStringError(std::error_code(),
+ "No NVIDIA GPU detected in the system");
+
+ return std::move(GPUArchs);
+}
+
/// CUDA toolchain. Our assembler is ptxas, and our "linker" is fatbinary,
/// which isn't properly a linker but nonetheless performs the step of stitching
/// together object files from the assembler into a single blob.
@@ -854,7 +880,7 @@ void CudaToolChain::addClangTargetOptions(
return;
addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, GpuArch.str(),
- getTriple());
+ getTriple(), HostTC);
}
}
@@ -879,7 +905,7 @@ void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
!DriverArgs.hasArg(options::OPT_no_cuda_version_check)) {
StringRef Arch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
assert(!Arch.empty() && "Must have an explicit GPU arch.");
- CudaInstallation.CheckCudaVersionSupportsArch(StringToCudaArch(Arch));
+ CudaInstallation.CheckCudaVersionSupportsArch(StringToOffloadArch(Arch));
}
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
@@ -890,11 +916,7 @@ std::string CudaToolChain::getInputFilename(const InputInfo &Input) const {
if (Input.getType() != types::TY_Object || getDriver().offloadDeviceOnly())
return ToolChain::getInputFilename(Input);
- // Replace extension for object files with cubin because nvlink relies on
- // these particular file names.
- SmallString<256> Filename(ToolChain::getInputFilename(Input));
- llvm::sys::path::replace_extension(Filename, "cubin");
- return std::string(Filename);
+ return ToolChain::getInputFilename(Input);
}
llvm::opt::DerivedArgList *
@@ -925,7 +947,7 @@ CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
llvm::formatv("{0}", llvm::fmt_consume(ArchsOrErr.takeError()));
getDriver().Diag(diag::err_drv_undetermined_gpu_arch)
<< llvm::Triple::getArchTypeName(getArch()) << ErrMsg << "-march";
- Arch = CudaArchToString(CudaArch::CudaDefault);
+ Arch = OffloadArchToString(OffloadArch::CudaDefault);
} else {
Arch = Args.MakeArgString(ArchsOrErr->front());
}
@@ -937,7 +959,10 @@ CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
}
for (Arg *A : Args) {
- DAL->append(A);
+ // Make sure flags are not duplicated.
+ if (!llvm::is_contained(*DAL, A)) {
+ DAL->append(A);
+ }
}
if (!BoundArch.empty()) {
@@ -948,31 +973,6 @@ CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
return DAL;
}
-Expected<SmallVector<std::string>>
-CudaToolChain::getSystemGPUArchs(const ArgList &Args) const {
- // Detect NVIDIA GPUs availible on the system.
- std::string Program;
- if (Arg *A = Args.getLastArg(options::OPT_nvptx_arch_tool_EQ))
- Program = A->getValue();
- else
- Program = GetProgramPath("nvptx-arch");
-
- auto StdoutOrErr = executeToolChainProgram(Program);
- if (!StdoutOrErr)
- return StdoutOrErr.takeError();
-
- SmallVector<std::string, 1> GPUArchs;
- for (StringRef Arch : llvm::split((*StdoutOrErr)->getBuffer(), "\n"))
- if (!Arch.empty())
- GPUArchs.push_back(Arch.str());
-
- if (GPUArchs.empty())
- return llvm::createStringError(std::error_code(),
- "No NVIDIA GPU detected in the system");
-
- return std::move(GPUArchs);
-}
-
Tool *NVPTXToolChain::buildAssembler() const {
return new tools::NVPTX::Assembler(*this);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
index 8a053f3393e1..7a6a6fb20901 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
@@ -37,7 +37,7 @@ private:
// CUDA architectures for which we have raised an error in
// CheckCudaVersionSupportsArch.
- mutable std::bitset<(int)CudaArch::LAST> ArchsWithBadVersion;
+ mutable std::bitset<(int)OffloadArch::LAST> ArchsWithBadVersion;
public:
CudaInstallationDetector(const Driver &D, const llvm::Triple &HostTriple,
@@ -50,7 +50,7 @@ public:
///
/// If either Version or Arch is unknown, does not emit an error. Emits at
/// most one error per Arch.
- void CheckCudaVersionSupportsArch(CudaArch Arch) const;
+ void CheckCudaVersionSupportsArch(OffloadArch Arch) const;
/// Check whether we detected a valid Cuda install.
bool isValid() const { return IsValid; }
@@ -155,6 +155,7 @@ public:
bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
return false;
}
+ bool HasNativeLLVMSupport() const override { return true; }
bool isPICDefaultForced() const override { return false; }
bool SupportsProfiling() const override { return false; }
@@ -168,6 +169,11 @@ public:
unsigned GetDefaultDwarfVersion() const override { return 2; }
unsigned getMaxDwarfVersion() const override { return 2; }
+ /// Uses nvptx-arch tool to get arch of the system GPU. Will return error
+ /// if unable to find one.
+ virtual Expected<SmallVector<std::string>>
+ getSystemGPUArchs(const llvm::opt::ArgList &Args) const override;
+
CudaInstallationDetector CudaInstallation;
protected:
@@ -187,6 +193,8 @@ public:
return &HostTC.getTriple();
}
+ bool HasNativeLLVMSupport() const override { return false; }
+
std::string getInputFilename(const InputInfo &Input) const override;
llvm::opt::DerivedArgList *
@@ -223,11 +231,6 @@ public:
const ToolChain &HostTC;
- /// Uses nvptx-arch tool to get arch of the system GPU. Will return error
- /// if unable to find one.
- virtual Expected<SmallVector<std::string>>
- getSystemGPUArchs(const llvm::opt::ArgList &Args) const override;
-
protected:
Tool *buildAssembler() const override; // ptxas
Tool *buildLinker() const override; // fatbinary (ok, not really a linker)
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
index fae8ad1a958a..e576efaf5ca8 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -643,9 +643,8 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// It seems that the 'e' option is completely ignored for dynamic executables
// (the default), and with static executables, the last one wins, as expected.
- Args.addAllArgs(CmdArgs,
- {options::OPT_d_Flag, options::OPT_s, options::OPT_t,
- options::OPT_Z_Flag, options::OPT_u_Group, options::OPT_r});
+ Args.addAllArgs(CmdArgs, {options::OPT_d_Flag, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_u_Group});
// Forward -ObjC when either -ObjC or -ObjC++ is used, to force loading
// members of static archive libraries which implement Objective-C classes or
@@ -687,7 +686,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
- addOpenMPRuntime(CmdArgs, getToolChain(), Args);
+ addOpenMPRuntime(C, CmdArgs, getToolChain(), Args);
if (isObjCRuntimeLinked(Args) &&
!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
@@ -926,9 +925,7 @@ void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
MachO::MachO(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: ToolChain(D, Triple, Args) {
// We expect 'as', 'ld', etc. to be adjacent to our install dir.
- getProgramPaths().push_back(getDriver().getInstalledDir());
- if (getDriver().getInstalledDir() != getDriver().Dir)
- getProgramPaths().push_back(getDriver().Dir);
+ getProgramPaths().push_back(getDriver().Dir);
}
/// Darwin - Darwin tool chain for i386 and x86_64.
@@ -1260,29 +1257,23 @@ unsigned DarwinClang::GetDefaultDwarfVersion() const {
if ((isTargetMacOSBased() && isMacosxVersionLT(10, 11)) ||
(isTargetIOSBased() && isIPhoneOSVersionLT(9)))
return 2;
- return 4;
+ // Default to use DWARF 4 on OS X 10.11 - macOS 14 / iOS 9 - iOS 17.
+ if ((isTargetMacOSBased() && isMacosxVersionLT(15)) ||
+ (isTargetIOSBased() && isIPhoneOSVersionLT(18)) ||
+ (isTargetWatchOSBased() && TargetVersion < llvm::VersionTuple(11)) ||
+ (isTargetXROS() && TargetVersion < llvm::VersionTuple(2)) ||
+ (isTargetDriverKit() && TargetVersion < llvm::VersionTuple(24)) ||
+ (isTargetMacOSBased() &&
+ TargetVersion.empty())) // apple-darwin, no version.
+ return 4;
+ return 5;
}
void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
StringRef Component, RuntimeLinkOptions Opts,
bool IsShared) const {
- SmallString<64> DarwinLibName = StringRef("libclang_rt.");
- // an Darwin the builtins compomnent is not in the library name
- if (Component != "builtins") {
- DarwinLibName += Component;
- if (!(Opts & RLO_IsEmbedded))
- DarwinLibName += "_";
- }
-
- DarwinLibName += getOSLibraryNameSuffix();
- DarwinLibName += IsShared ? "_dynamic.dylib" : ".a";
- SmallString<128> Dir(getDriver().ResourceDir);
- llvm::sys::path::append(Dir, "lib", "darwin");
- if (Opts & RLO_IsEmbedded)
- llvm::sys::path::append(Dir, "macho_embedded");
-
- SmallString<128> P(Dir);
- llvm::sys::path::append(P, DarwinLibName);
+ std::string P = getCompilerRT(
+ Args, Component, IsShared ? ToolChain::FT_Shared : ToolChain::FT_Static);
// For now, allow missing resource libraries to support developers who may
// not have compiler-rt checked out or integrated into their build (unless
@@ -1297,18 +1288,56 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
// rpaths. This is currently true from this place, but we need to be
// careful if this function is ever called before user's rpaths are emitted.
if (Opts & RLO_AddRPath) {
- assert(DarwinLibName.ends_with(".dylib") && "must be a dynamic library");
+ assert(StringRef(P).ends_with(".dylib") && "must be a dynamic library");
// Add @executable_path to rpath to support having the dylib copied with
// the executable.
CmdArgs.push_back("-rpath");
CmdArgs.push_back("@executable_path");
- // Add the path to the resource dir to rpath to support using the dylib
- // from the default location without copying.
+ // Add the compiler-rt library's directory to rpath to support using the
+ // dylib from the default location without copying.
CmdArgs.push_back("-rpath");
- CmdArgs.push_back(Args.MakeArgString(Dir));
+ CmdArgs.push_back(Args.MakeArgString(llvm::sys::path::parent_path(P)));
+ }
+}
+
+std::string MachO::getCompilerRT(const ArgList &, StringRef Component,
+ FileType Type) const {
+ assert(Type != ToolChain::FT_Object &&
+ "it doesn't make sense to ask for the compiler-rt library name as an "
+ "object file");
+ SmallString<64> MachOLibName = StringRef("libclang_rt");
+ // On MachO, the builtins component is not in the library name
+ if (Component != "builtins") {
+ MachOLibName += '.';
+ MachOLibName += Component;
}
+ MachOLibName += Type == ToolChain::FT_Shared ? "_dynamic.dylib" : ".a";
+
+ SmallString<128> FullPath(getDriver().ResourceDir);
+ llvm::sys::path::append(FullPath, "lib", "darwin", "macho_embedded",
+ MachOLibName);
+ return std::string(FullPath);
+}
+
+std::string Darwin::getCompilerRT(const ArgList &, StringRef Component,
+ FileType Type) const {
+ assert(Type != ToolChain::FT_Object &&
+ "it doesn't make sense to ask for the compiler-rt library name as an "
+ "object file");
+ SmallString<64> DarwinLibName = StringRef("libclang_rt.");
+ // On Darwin, the builtins component is not in the library name
+ if (Component != "builtins") {
+ DarwinLibName += Component;
+ DarwinLibName += '_';
+ }
+ DarwinLibName += getOSLibraryNameSuffix();
+ DarwinLibName += Type == ToolChain::FT_Shared ? "_dynamic.dylib" : ".a";
+
+ SmallString<128> FullPath(getDriver().ResourceDir);
+ llvm::sys::path::append(FullPath, "lib", "darwin", DarwinLibName);
+ return std::string(FullPath);
}
StringRef Darwin::getPlatformFamily() const {
@@ -1902,6 +1931,7 @@ getDeploymentTargetFromEnvironmentVariables(const Driver &TheDriver,
"TVOS_DEPLOYMENT_TARGET",
"WATCHOS_DEPLOYMENT_TARGET",
"DRIVERKIT_DEPLOYMENT_TARGET",
+ "XROS_DEPLOYMENT_TARGET"
};
static_assert(std::size(EnvVars) == Darwin::LastDarwinPlatform + 1,
"Missing platform");
@@ -1914,14 +1944,15 @@ getDeploymentTargetFromEnvironmentVariables(const Driver &TheDriver,
// default platform.
if (!Targets[Darwin::MacOS].empty() &&
(!Targets[Darwin::IPhoneOS].empty() ||
- !Targets[Darwin::WatchOS].empty() || !Targets[Darwin::TvOS].empty())) {
+ !Targets[Darwin::WatchOS].empty() || !Targets[Darwin::TvOS].empty() ||
+ !Targets[Darwin::XROS].empty())) {
if (Triple.getArch() == llvm::Triple::arm ||
Triple.getArch() == llvm::Triple::aarch64 ||
Triple.getArch() == llvm::Triple::thumb)
Targets[Darwin::MacOS] = "";
else
Targets[Darwin::IPhoneOS] = Targets[Darwin::WatchOS] =
- Targets[Darwin::TvOS] = "";
+ Targets[Darwin::TvOS] = Targets[Darwin::XROS] = "";
} else {
// Don't allow conflicts in any other platform.
unsigned FirstTarget = std::size(Targets);
@@ -2515,25 +2546,19 @@ void DarwinClang::AddClangCXXStdlibIncludeArgs(
switch (GetCXXStdlibType(DriverArgs)) {
case ToolChain::CST_Libcxx: {
// On Darwin, libc++ can be installed in one of the following places:
- // 1. Alongside the compiler in <install>/include/c++/v1
- // 2. Alongside the compiler in <clang-executable-folder>/../include/c++/v1
- // 3. In a SDK (or a custom sysroot) in <sysroot>/usr/include/c++/v1
+ // 1. Alongside the compiler in <clang-executable-folder>/../include/c++/v1
+ // 2. In a SDK (or a custom sysroot) in <sysroot>/usr/include/c++/v1
//
// The precedence of paths is as listed above, i.e. we take the first path
// that exists. Note that we never include libc++ twice -- we take the first
// path that exists and don't send the other paths to CC1 (otherwise
// include_next could break).
- //
- // Also note that in most cases, (1) and (2) are exactly the same path.
- // Those two paths will differ only when the `clang` program being run
- // is actually a symlink to the real executable.
// Check for (1)
// Get from '<install>/bin' to '<install>/include/c++/v1'.
// Note that InstallBin can be relative, so we use '..' instead of
// parent_path.
- llvm::SmallString<128> InstallBin =
- llvm::StringRef(getDriver().getInstalledDir()); // <install>/bin
+ llvm::SmallString<128> InstallBin(getDriver().Dir); // <install>/bin
llvm::sys::path::append(InstallBin, "..", "include", "c++", "v1");
if (getVFS().exists(InstallBin)) {
addSystemInclude(DriverArgs, CC1Args, InstallBin);
@@ -2543,20 +2568,7 @@ void DarwinClang::AddClangCXXStdlibIncludeArgs(
<< "\"\n";
}
- // (2) Check for the folder where the executable is located, if different.
- if (getDriver().getInstalledDir() != getDriver().Dir) {
- InstallBin = llvm::StringRef(getDriver().Dir);
- llvm::sys::path::append(InstallBin, "..", "include", "c++", "v1");
- if (getVFS().exists(InstallBin)) {
- addSystemInclude(DriverArgs, CC1Args, InstallBin);
- return;
- } else if (DriverArgs.hasArg(options::OPT_v)) {
- llvm::errs() << "ignoring nonexistent directory \"" << InstallBin
- << "\"\n";
- }
- }
-
- // Otherwise, check for (3)
+ // Otherwise, check for (2)
llvm::SmallString<128> SysrootUsr = Sysroot;
llvm::sys::path::append(SysrootUsr, "usr", "include", "c++", "v1");
if (getVFS().exists(SysrootUsr)) {
@@ -2911,30 +2923,106 @@ bool Darwin::isAlignedAllocationUnavailable() const {
return TargetVersion < alignedAllocMinVersion(OS);
}
-static bool sdkSupportsBuiltinModules(const Darwin::DarwinPlatformKind &TargetPlatform, const std::optional<DarwinSDKInfo> &SDKInfo) {
+static bool sdkSupportsBuiltinModules(
+ const Darwin::DarwinPlatformKind &TargetPlatform,
+ const Darwin::DarwinEnvironmentKind &TargetEnvironment,
+ const std::optional<DarwinSDKInfo> &SDKInfo) {
+ if (TargetEnvironment == Darwin::NativeEnvironment ||
+ TargetEnvironment == Darwin::Simulator ||
+ TargetEnvironment == Darwin::MacCatalyst) {
+ // Standard xnu/Mach/Darwin based environments
+ // depend on the SDK version.
+ } else {
+ // All other environments support builtin modules from the start.
+ return true;
+ }
+
if (!SDKInfo)
+ // If there is no SDK info, assume this is building against a
+ // pre-SDK version of macOS (i.e. before Mac OS X 10.4). Those
+ // don't support modules anyway, but the headers definitely
+ // don't support builtin modules either. It might also be some
+ // kind of degenerate build environment, err on the side of
+ // the old behavior which is to not use builtin modules.
return false;
VersionTuple SDKVersion = SDKInfo->getVersion();
switch (TargetPlatform) {
+ // Existing SDKs added support for builtin modules in the fall
+ // 2024 major releases.
case Darwin::MacOS:
- return SDKVersion >= VersionTuple(99U);
+ return SDKVersion >= VersionTuple(15U);
case Darwin::IPhoneOS:
- return SDKVersion >= VersionTuple(99U);
+ switch (TargetEnvironment) {
+ case Darwin::MacCatalyst:
+ // Mac Catalyst uses `-target arm64-apple-ios18.0-macabi` so the platform
+ // is iOS, but it builds with the macOS SDK, so it's the macOS SDK version
+ // that's relevant.
+ return SDKVersion >= VersionTuple(15U);
+ default:
+ return SDKVersion >= VersionTuple(18U);
+ }
case Darwin::TvOS:
- return SDKVersion >= VersionTuple(99U);
+ return SDKVersion >= VersionTuple(18U);
case Darwin::WatchOS:
- return SDKVersion >= VersionTuple(99U);
+ return SDKVersion >= VersionTuple(11U);
case Darwin::XROS:
- return SDKVersion >= VersionTuple(99U);
+ return SDKVersion >= VersionTuple(2U);
+
+ // New SDKs support builtin modules from the start.
default:
return true;
}
}
-void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadKind) const {
+static inline llvm::VersionTuple
+sizedDeallocMinVersion(llvm::Triple::OSType OS) {
+ switch (OS) {
+ default:
+ break;
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX: // Earliest supporting version is 10.12.
+ return llvm::VersionTuple(10U, 12U);
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS: // Earliest supporting version is 10.0.0.
+ return llvm::VersionTuple(10U);
+ case llvm::Triple::WatchOS: // Earliest supporting version is 3.0.0.
+ return llvm::VersionTuple(3U);
+ }
+
+ llvm_unreachable("Unexpected OS");
+}
+
+bool Darwin::isSizedDeallocationUnavailable() const {
+ llvm::Triple::OSType OS;
+
+ if (isTargetMacCatalyst())
+ return TargetVersion < sizedDeallocMinVersion(llvm::Triple::MacOSX);
+ switch (TargetPlatform) {
+ case MacOS: // Earlier than 10.12.
+ OS = llvm::Triple::MacOSX;
+ break;
+ case IPhoneOS:
+ OS = llvm::Triple::IOS;
+ break;
+ case TvOS: // Earlier than 10.0.
+ OS = llvm::Triple::TvOS;
+ break;
+ case WatchOS: // Earlier than 3.0.
+ OS = llvm::Triple::WatchOS;
+ break;
+ case DriverKit:
+ case XROS:
+ // Always available.
+ return false;
+ }
+
+ return TargetVersion < sizedDeallocMinVersion(OS);
+}
+
+void Darwin::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const {
// Pass "-faligned-alloc-unavailable" only when the user hasn't manually
// enabled or disabled aligned allocations.
if (!DriverArgs.hasArgNoClaim(options::OPT_faligned_allocation,
@@ -2942,6 +3030,13 @@ void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
isAlignedAllocationUnavailable())
CC1Args.push_back("-faligned-alloc-unavailable");
+ // Pass "-fno-sized-deallocation" only when the user hasn't manually enabled
+ // or disabled sized deallocations.
+ if (!DriverArgs.hasArgNoClaim(options::OPT_fsized_deallocation,
+ options::OPT_fno_sized_deallocation) &&
+ isSizedDeallocationUnavailable())
+ CC1Args.push_back("-fno-sized-deallocation");
+
addClangCC1ASTargetOptions(DriverArgs, CC1Args);
// Enable compatibility mode for NSItemProviderCompletionHandler in
@@ -2966,7 +3061,7 @@ void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
// i.e. when the builtin stdint.h is in the Darwin module too, the cycle
// goes away. Note that -fbuiltin-headers-in-system-modules does nothing
// to fix the same problem with C++ headers, and is generally fragile.
- if (!sdkSupportsBuiltinModules(TargetPlatform, SDKInfo))
+ if (!sdkSupportsBuiltinModules(TargetPlatform, TargetEnvironment, SDKInfo))
CC1Args.push_back("-fbuiltin-headers-in-system-modules");
if (!DriverArgs.hasArgNoClaim(options::OPT_fdefine_target_os_macros,
@@ -2988,7 +3083,7 @@ void Darwin::addClangCC1ASTargetOptions(
std::string Arg;
llvm::raw_string_ostream OS(Arg);
OS << "-target-sdk-version=" << V;
- CC1ASArgs.push_back(Args.MakeArgString(OS.str()));
+ CC1ASArgs.push_back(Args.MakeArgString(Arg));
};
if (isTargetMacCatalyst()) {
@@ -3011,7 +3106,7 @@ void Darwin::addClangCC1ASTargetOptions(
std::string Arg;
llvm::raw_string_ostream OS(Arg);
OS << "-darwin-target-variant-sdk-version=" << SDKInfo->getVersion();
- CC1ASArgs.push_back(Args.MakeArgString(OS.str()));
+ CC1ASArgs.push_back(Args.MakeArgString(Arg));
} else if (const auto *MacOStoMacCatalystMapping =
SDKInfo->getVersionMapping(
DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
@@ -3022,7 +3117,7 @@ void Darwin::addClangCC1ASTargetOptions(
std::string Arg;
llvm::raw_string_ostream OS(Arg);
OS << "-darwin-target-variant-sdk-version=" << *SDKVersion;
- CC1ASArgs.push_back(Args.MakeArgString(OS.str()));
+ CC1ASArgs.push_back(Args.MakeArgString(Arg));
}
}
}
@@ -3432,6 +3527,10 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
isTargetTvOSSimulator() || isTargetWatchOSSimulator())) {
Res |= SanitizerKind::Thread;
}
+
+ if (IsX86_64)
+ Res |= SanitizerKind::NumericalStability;
+
return Res;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
index 5e60b0841d6d..2e55b49682a7 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
@@ -223,6 +223,13 @@ public:
// There aren't any profiling libs for embedded targets currently.
}
+ // Return the full path of the compiler-rt library on a non-Darwin MachO
+ // system. Those are under
+ // <resourcedir>/lib/darwin/macho_embedded/<...>(.dylib|.a).
+ std::string
+ getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
+ FileType Type = ToolChain::FT_Static) const override;
+
/// }
/// @name ToolChain Implementation
/// {
@@ -300,7 +307,7 @@ public:
WatchOS,
DriverKit,
XROS,
- LastDarwinPlatform = DriverKit
+ LastDarwinPlatform = XROS
};
enum DarwinEnvironmentKind {
NativeEnvironment,
@@ -356,6 +363,12 @@ public:
void addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
+ // Return the full path of the compiler-rt library on a Darwin MachO system.
+ // Those are under <resourcedir>/lib/darwin/<...>(.dylib|.a).
+ std::string
+ getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
+ FileType Type = ToolChain::FT_Static) const override;
+
protected:
/// }
/// @name Darwin specific Toolchain functions
@@ -511,6 +524,10 @@ protected:
/// targeting.
bool isAlignedAllocationUnavailable() const;
+ /// Return true if c++14 sized deallocation functions are not implemented in
+ /// the c++ standard library of the deployment target we are targeting.
+ bool isSizedDeallocationUnavailable() const;
+
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp
index 9942fc632e0a..1dbc46763c11 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp
@@ -122,7 +122,7 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_s, options::OPT_t, options::OPT_r});
+ options::OPT_s, options::OPT_t});
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
@@ -136,7 +136,7 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Use the static OpenMP runtime with -static-openmp
bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) && !Static;
- addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
+ addOpenMPRuntime(C, CmdArgs, ToolChain, Args, StaticOpenMP);
if (D.CCCIsCXX()) {
if (ToolChain.ShouldLinkCXXStdlib(Args))
@@ -205,11 +205,8 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
DragonFly::DragonFly(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
-
// Path mangling to find libexec
- getProgramPaths().push_back(getDriver().getInstalledDir());
- if (getDriver().getInstalledDir() != getDriver().Dir)
- getProgramPaths().push_back(getDriver().Dir);
+ getProgramPaths().push_back(getDriver().Dir);
getFilePaths().push_back(getDriver().Dir + "/../lib");
getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib"));
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
index 03d68c3df7fb..c4f2375c6403 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
@@ -15,7 +15,8 @@
#include "llvm/Frontend/Debug/Options.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/RISCVISAInfo.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/RISCVISAInfo.h"
#include "llvm/TargetParser/RISCVTargetParser.h"
#include <cassert>
@@ -38,8 +39,6 @@ void Flang::addFortranDialectOptions(const ArgList &Args,
Args.addAllArgs(CmdArgs, {options::OPT_ffixed_form,
options::OPT_ffree_form,
options::OPT_ffixed_line_length_EQ,
- options::OPT_fopenmp,
- options::OPT_fopenmp_version_EQ,
options::OPT_fopenacc,
options::OPT_finput_charset_EQ,
options::OPT_fimplicit_none,
@@ -55,7 +54,8 @@ void Flang::addFortranDialectOptions(const ArgList &Args,
options::OPT_fdefault_integer_8,
options::OPT_fdefault_double_8,
options::OPT_flarge_sizes,
- options::OPT_fno_automatic});
+ options::OPT_fno_automatic,
+ options::OPT_fhermetic_module_files});
}
void Flang::addPreprocessingOptions(const ArgList &Args,
@@ -127,7 +127,7 @@ void Flang::addOtherOptions(const ArgList &Args, ArgStringList &CmdArgs) const {
Arg *gNArg = Args.getLastArg(options::OPT_gN_Group);
DebugInfoKind = debugLevelToInfoKind(*gNArg);
} else if (Args.hasArg(options::OPT_g_Flag)) {
- DebugInfoKind = llvm::codegenoptions::DebugLineTablesOnly;
+ DebugInfoKind = llvm::codegenoptions::FullDebugInfo;
} else {
DebugInfoKind = llvm::codegenoptions::NoDebugInfo;
}
@@ -148,7 +148,7 @@ void Flang::addCodegenOptions(const ArgList &Args,
Args.addAllArgs(CmdArgs, {options::OPT_flang_experimental_hlfir,
options::OPT_flang_deprecated_no_hlfir,
- options::OPT_flang_experimental_polymorphism,
+ options::OPT_flang_experimental_integer_overflow,
options::OPT_fno_ppc_native_vec_elem_order,
options::OPT_fppc_native_vec_elem_order});
}
@@ -180,14 +180,11 @@ void Flang::AddAArch64TargetArgs(const ArgList &Args,
if (Arg *A = Args.getLastArg(options::OPT_msve_vector_bits_EQ)) {
StringRef Val = A->getValue();
const Driver &D = getToolChain().getDriver();
- if (Val.equals("128") || Val.equals("256") || Val.equals("512") ||
- Val.equals("1024") || Val.equals("2048") || Val.equals("128+") ||
- Val.equals("256+") || Val.equals("512+") || Val.equals("1024+") ||
- Val.equals("2048+")) {
+ if (Val == "128" || Val == "256" || Val == "512" || Val == "1024" ||
+ Val == "2048" || Val == "128+" || Val == "256+" || Val == "512+" ||
+ Val == "1024+" || Val == "2048+") {
unsigned Bits = 0;
- if (Val.ends_with("+"))
- Val = Val.substr(0, Val.size() - 1);
- else {
+ if (!Val.consume_back("+")) {
[[maybe_unused]] bool Invalid = Val.getAsInteger(10, Bits);
assert(!Invalid && "Failed to parse value");
CmdArgs.push_back(
@@ -199,7 +196,7 @@ void Flang::AddAArch64TargetArgs(const ArgList &Args,
CmdArgs.push_back(
Args.MakeArgString("-mvscale-min=" + llvm::Twine(Bits / 128)));
// Silently drop requests for vector-length agnostic code as it's implied.
- } else if (!Val.equals("scalable"))
+ } else if (Val != "scalable")
// Handle the unsupported values passed to msve-vector-bits.
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Val;
@@ -216,7 +213,7 @@ void Flang::AddRISCVTargetArgs(const ArgList &Args,
// Get minimum VLen from march.
unsigned MinVLen = 0;
- StringRef Arch = riscv::getRISCVArch(Args, Triple);
+ std::string Arch = riscv::getRISCVArch(Args, Triple);
auto ISAInfo = llvm::RISCVISAInfo::parseArchString(
Arch, /*EnableExperimentalExtensions*/ true);
// Ignore parsing error.
@@ -226,7 +223,7 @@ void Flang::AddRISCVTargetArgs(const ArgList &Args,
// If the value is "zvl", use MinVLen from march. Otherwise, try to parse
// as integer as long as we have a MinVLen.
unsigned Bits = 0;
- if (Val.equals("zvl") && MinVLen >= llvm::RISCV::RVVBitsPerBlock) {
+ if (Val == "zvl" && MinVLen >= llvm::RISCV::RVVBitsPerBlock) {
Bits = MinVLen;
} else if (!Val.getAsInteger(10, Bits)) {
// Only accept power of 2 values beteen RVVBitsPerBlock and 65536 that
@@ -243,7 +240,7 @@ void Flang::AddRISCVTargetArgs(const ArgList &Args,
Args.MakeArgString("-mvscale-max=" + llvm::Twine(VScaleMin)));
CmdArgs.push_back(
Args.MakeArgString("-mvscale-min=" + llvm::Twine(VScaleMin)));
- } else if (!Val.equals("scalable")) {
+ } else if (Val != "scalable") {
// Handle the unsupported values passed to mrvv-vector-bits.
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Val;
@@ -251,6 +248,20 @@ void Flang::AddRISCVTargetArgs(const ArgList &Args,
}
}
+void Flang::AddX86_64TargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ if (Arg *A = Args.getLastArg(options::OPT_masm_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "intel" || Value == "att") {
+ CmdArgs.push_back(Args.MakeArgString("-mllvm"));
+ CmdArgs.push_back(Args.MakeArgString("-x86-asm-syntax=" + Value));
+ } else {
+ getToolChain().getDriver().Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Value;
+ }
+ }
+}
+
static void addVSDefines(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
@@ -262,7 +273,7 @@ static void addVSDefines(const ToolChain &TC, const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString("-D_MSC_FULL_VER=" + Twine(ver)));
CmdArgs.push_back(Args.MakeArgString("-D_WIN32"));
- llvm::Triple triple = TC.getTriple();
+ const llvm::Triple &triple = TC.getTriple();
if (triple.isAArch64()) {
CmdArgs.push_back("-D_M_ARM64=1");
} else if (triple.isX86() && triple.isArch32Bit()) {
@@ -280,7 +291,6 @@ static void processVSRuntimeLibrary(const ToolChain &TC, const ArgList &Args,
assert(TC.getTriple().isKnownWindowsMSVCEnvironment() &&
"can only add VS runtime library on Windows!");
// if -fno-fortran-main has been passed, skip linking Fortran_main.a
- bool LinkFortranMain = !Args.hasArg(options::OPT_no_fortran_main);
if (TC.getTriple().isKnownWindowsMSVCEnvironment()) {
CmdArgs.push_back(Args.MakeArgString(
"--dependent-lib=" + TC.getCompilerRTBasename(Args, "builtins")));
@@ -298,8 +308,6 @@ static void processVSRuntimeLibrary(const ToolChain &TC, const ArgList &Args,
case options::OPT__SLASH_MT:
CmdArgs.push_back("-D_MT");
CmdArgs.push_back("--dependent-lib=libcmt");
- if (LinkFortranMain)
- CmdArgs.push_back("--dependent-lib=Fortran_main.static.lib");
CmdArgs.push_back("--dependent-lib=FortranRuntime.static.lib");
CmdArgs.push_back("--dependent-lib=FortranDecimal.static.lib");
break;
@@ -307,8 +315,6 @@ static void processVSRuntimeLibrary(const ToolChain &TC, const ArgList &Args,
CmdArgs.push_back("-D_MT");
CmdArgs.push_back("-D_DEBUG");
CmdArgs.push_back("--dependent-lib=libcmtd");
- if (LinkFortranMain)
- CmdArgs.push_back("--dependent-lib=Fortran_main.static_dbg.lib");
CmdArgs.push_back("--dependent-lib=FortranRuntime.static_dbg.lib");
CmdArgs.push_back("--dependent-lib=FortranDecimal.static_dbg.lib");
break;
@@ -316,8 +322,6 @@ static void processVSRuntimeLibrary(const ToolChain &TC, const ArgList &Args,
CmdArgs.push_back("-D_MT");
CmdArgs.push_back("-D_DLL");
CmdArgs.push_back("--dependent-lib=msvcrt");
- if (LinkFortranMain)
- CmdArgs.push_back("--dependent-lib=Fortran_main.dynamic.lib");
CmdArgs.push_back("--dependent-lib=FortranRuntime.dynamic.lib");
CmdArgs.push_back("--dependent-lib=FortranDecimal.dynamic.lib");
break;
@@ -326,8 +330,6 @@ static void processVSRuntimeLibrary(const ToolChain &TC, const ArgList &Args,
CmdArgs.push_back("-D_DEBUG");
CmdArgs.push_back("-D_DLL");
CmdArgs.push_back("--dependent-lib=msvcrtd");
- if (LinkFortranMain)
- CmdArgs.push_back("--dependent-lib=Fortran_main.dynamic_dbg.lib");
CmdArgs.push_back("--dependent-lib=FortranRuntime.dynamic_dbg.lib");
CmdArgs.push_back("--dependent-lib=FortranDecimal.dynamic_dbg.lib");
break;
@@ -354,6 +356,8 @@ void Flang::addTargetOptions(const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString(CPU));
}
+ addOutlineAtomicsArgs(D, getToolChain(), Args, CmdArgs, Triple);
+
// Add the target features.
switch (TC.getArch()) {
default:
@@ -374,6 +378,7 @@ void Flang::addTargetOptions(const ArgList &Args,
break;
case llvm::Triple::x86_64:
getTargetFeatures(D, Triple, Args, CmdArgs, /*ForAs*/ false);
+ AddX86_64TargetArgs(Args, CmdArgs);
break;
}
@@ -415,6 +420,13 @@ void Flang::addTargetOptions(const ArgList &Args,
}
// TODO: Add target specific flags, ABI, mtune option etc.
+ if (const Arg *A = Args.getLastArg(options::OPT_mtune_EQ)) {
+ CmdArgs.push_back("-tune-cpu");
+ if (A->getValue() == StringRef{"native"})
+ CmdArgs.push_back(Args.MakeArgString(llvm::sys::getHostCPUName()));
+ else
+ CmdArgs.push_back(A->getValue());
+ }
}
void Flang::addOffloadOptions(Compilation &C, const InputInfoList &Inputs,
@@ -584,7 +596,7 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args,
if (!HonorINFs && !HonorNaNs && AssociativeMath && ReciprocalMath &&
ApproxFunc && !SignedZeros &&
- (FPContract == "fast" || FPContract == "")) {
+ (FPContract == "fast" || FPContract.empty())) {
CmdArgs.push_back("-ffast-math");
return;
}
@@ -674,7 +686,10 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(TripleStr));
if (isa<PreprocessJobAction>(JA)) {
- CmdArgs.push_back("-E");
+ CmdArgs.push_back("-E");
+ if (Args.getLastArg(options::OPT_dM)) {
+ CmdArgs.push_back("-dM");
+ }
} else if (isa<CompileJobAction>(JA) || isa<BackendJobAction>(JA)) {
if (JA.getType() == types::TY_Nothing) {
CmdArgs.push_back("-fsyntax-only");
@@ -736,6 +751,11 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
// Add target args, features, etc.
addTargetOptions(Args, CmdArgs);
+ llvm::Reloc::Model RelocationModel =
+ std::get<0>(ParsePICArgs(getToolChain(), Args));
+ // Add MCModel information
+ addMCModel(D, Args, Triple, RelocationModel, CmdArgs);
+
// Add Codegen options
addCodegenOptions(Args, CmdArgs);
@@ -749,6 +769,46 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
// Add other compile options
addOtherOptions(Args, CmdArgs);
+ // Disable all warnings
+ // TODO: Handle interactions between -w, -pedantic, -Wall, -WOption
+ Args.AddLastArg(CmdArgs, options::OPT_w);
+
+ // Forward flags for OpenMP. We don't do this if the current action is an
+ // device offloading action other than OpenMP.
+ if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false) &&
+ (JA.isDeviceOffloading(Action::OFK_None) ||
+ JA.isDeviceOffloading(Action::OFK_OpenMP))) {
+ switch (D.getOpenMPRuntime(Args)) {
+ case Driver::OMPRT_OMP:
+ case Driver::OMPRT_IOMP5:
+ // Clang can generate useful OpenMP code for these two runtime libraries.
+ CmdArgs.push_back("-fopenmp");
+ Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_version_EQ);
+
+ if (Args.hasArg(options::OPT_fopenmp_force_usm))
+ CmdArgs.push_back("-fopenmp-force-usm");
+
+ // FIXME: Clang supports a whole bunch more flags here.
+ break;
+ default:
+ // By default, if Clang doesn't know how to generate useful OpenMP code
+ // for a specific runtime library, we just don't pass the '-fopenmp' flag
+ // down to the actual compilation.
+ // FIXME: It would be better to have a mode which *only* omits IR
+ // generation based on the OpenMP support so that we get consistent
+ // semantic analysis, etc.
+ const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ);
+ D.Diag(diag::warn_drv_unsupported_openmp_library)
+ << A->getSpelling() << A->getValue();
+ break;
+ }
+ }
+
+ // Pass the path to compiler resource files.
+ CmdArgs.push_back("-resource-dir");
+ CmdArgs.push_back(D.ResourceDir.c_str());
+
// Offloading related options
addOffloadOptions(C, Inputs, JA, Args, CmdArgs);
@@ -763,6 +823,9 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
case CodeGenOptions::FramePointerKind::None:
FPKeepKindStr = "-mframe-pointer=none";
break;
+ case CodeGenOptions::FramePointerKind::Reserved:
+ FPKeepKindStr = "-mframe-pointer=reserved";
+ break;
case CodeGenOptions::FramePointerKind::NonLeaf:
FPKeepKindStr = "-mframe-pointer=non-leaf";
break;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h
index ec2e545a1d0b..9f5e26b86083 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h
@@ -77,6 +77,13 @@ private:
void AddRISCVTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ /// Add specific options for X86_64 target.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void AddX86_64TargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
/// Extract offload options from the driver arguments and add them to
/// the command arguments.
/// \param [in] C The current compilation for the driver invocation
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
index b7c9e0e51cdb..a8ee6540001e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -133,6 +133,7 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
const auto &ToolChain = static_cast<const FreeBSD &>(getToolChain());
const Driver &D = ToolChain.getDriver();
+ const llvm::Triple &Triple = ToolChain.getTriple();
const llvm::Triple::ArchType Arch = ToolChain.getArch();
const bool IsPIE =
!Args.hasArg(options::OPT_shared) &&
@@ -165,8 +166,7 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back("/libexec/ld-elf.so.1");
}
- const llvm::Triple &T = ToolChain.getTriple();
- if (Arch == llvm::Triple::arm || T.isX86())
+ if (Arch == llvm::Triple::arm || Triple.isX86())
CmdArgs.push_back("--hash-style=both");
CmdArgs.push_back("--enable-new-dtags");
}
@@ -212,12 +212,17 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::riscv64:
CmdArgs.push_back("-m");
CmdArgs.push_back("elf64lriscv");
- CmdArgs.push_back("-X");
break;
default:
break;
}
+ if (Triple.isRISCV64()) {
+ CmdArgs.push_back("-X");
+ if (Args.hasArg(options::OPT_mno_relax))
+ CmdArgs.push_back("--no-relax");
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_G)) {
if (ToolChain.getTriple().isMIPS()) {
StringRef v = A->getValue();
@@ -261,8 +266,8 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- Args.addAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_s,
- options::OPT_t, options::OPT_r});
+ Args.addAllArgs(CmdArgs,
+ {options::OPT_T_Group, options::OPT_s, options::OPT_t});
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
@@ -290,7 +295,7 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Use the static OpenMP runtime with -static-openmp
bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) &&
!Args.hasArg(options::OPT_static);
- addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
+ addOpenMPRuntime(C, CmdArgs, ToolChain, Args, StaticOpenMP);
if (D.CCCIsCXX()) {
if (ToolChain.ShouldLinkCXXStdlib(Args))
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
index 14b838500bec..6daa73c7a54c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -119,8 +119,11 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(Dyld));
}
- if (ToolChain.getArch() == llvm::Triple::riscv64)
+ if (Triple.isRISCV64()) {
CmdArgs.push_back("-X");
+ if (Args.hasArg(options::OPT_mno_relax))
+ CmdArgs.push_back("--no-relax");
+ }
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -251,9 +254,7 @@ void fuchsia::StaticLibTool::ConstructJob(Compilation &C, const JobAction &JA,
Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args) {
- getProgramPaths().push_back(getDriver().getInstalledDir());
- if (getDriver().getInstalledDir() != D.Dir)
- getProgramPaths().push_back(D.Dir);
+ getProgramPaths().push_back(getDriver().Dir);
if (!D.SysRoot.empty()) {
SmallString<128> P(D.SysRoot);
@@ -432,13 +433,23 @@ void Fuchsia::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
if (Version.empty())
return;
- // First add the per-target include path.
+ // First add the per-target multilib include dir.
+ if (!SelectedMultilibs.empty() && !SelectedMultilibs.back().isDefault()) {
+ const Multilib &M = SelectedMultilibs.back();
+ SmallString<128> TargetDir(Path);
+ llvm::sys::path::append(TargetDir, Target, M.gccSuffix(), "c++", Version);
+ if (getVFS().exists(TargetDir)) {
+ addSystemInclude(DriverArgs, CC1Args, TargetDir);
+ }
+ }
+
+ // Second add the per-target include dir.
SmallString<128> TargetDir(Path);
llvm::sys::path::append(TargetDir, Target, "c++", Version);
if (getVFS().exists(TargetDir))
addSystemInclude(DriverArgs, CC1Args, TargetDir);
- // Second add the generic one.
+ // Third the generic one.
SmallString<128> Dir(Path);
llvm::sys::path::append(Dir, "c++", Version);
addSystemInclude(DriverArgs, CC1Args, Dir);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
index e5e1b1d77269..5e9a655eaf82 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -30,8 +30,8 @@
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/RISCVISAInfo.h"
#include "llvm/TargetParser/TargetParser.h"
#include <system_error>
@@ -423,8 +423,12 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
D.Diag(diag::err_target_unknown_triple) << Triple.str();
return;
}
- if (Triple.isRISCV())
+
+ if (Triple.isRISCV()) {
CmdArgs.push_back("-X");
+ if (Args.hasArg(options::OPT_mno_relax))
+ CmdArgs.push_back("--no-relax");
+ }
const bool IsShared = Args.hasArg(options::OPT_shared);
if (IsShared)
@@ -594,7 +598,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// FIXME: Only pass GompNeedsRT = true for platforms with libgomp that
// require librt. Most modern Linux platforms do, but some may not.
- if (addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP,
+ if (addOpenMPRuntime(C, CmdArgs, ToolChain, Args, StaticOpenMP,
JA.isHostOffloading(Action::OFK_OpenMP),
/* GompNeedsRT= */ true))
// OpenMP runtimes implies pthreads when using the GNU toolchain.
@@ -765,9 +769,10 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
StringRef ABIName = riscv::getRISCVABI(Args, getToolChain().getTriple());
CmdArgs.push_back("-mabi");
CmdArgs.push_back(ABIName.data());
- StringRef MArchName = riscv::getRISCVArch(Args, getToolChain().getTriple());
+ std::string MArchName =
+ riscv::getRISCVArch(Args, getToolChain().getTriple());
CmdArgs.push_back("-march");
- CmdArgs.push_back(MArchName.data());
+ CmdArgs.push_back(Args.MakeArgString(MArchName));
if (!Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, true))
Args.addOptOutFlag(CmdArgs, options::OPT_mrelax, options::OPT_mno_relax);
break;
@@ -1792,9 +1797,7 @@ selectRISCVMultilib(const MultilibSet &RISCVMultilibSet, StringRef Arch,
}
auto &MLConfigISAInfo = *MLConfigParseResult;
- const llvm::RISCVISAInfo::OrderedExtensionMap &MLConfigArchExts =
- MLConfigISAInfo->getExtensions();
- for (auto MLConfigArchExt : MLConfigArchExts) {
+ for (auto &MLConfigArchExt : MLConfigISAInfo->getExtensions()) {
auto ExtName = MLConfigArchExt.first;
NewMultilib.flag(Twine("-", ExtName).str());
@@ -1880,7 +1883,7 @@ static void findRISCVBareMetalMultilibs(const Driver &D,
Multilib::flags_list Flags;
llvm::StringSet<> Added_ABIs;
StringRef ABIName = tools::riscv::getRISCVABI(Args, TargetTriple);
- StringRef MArch = tools::riscv::getRISCVArch(Args, TargetTriple);
+ std::string MArch = tools::riscv::getRISCVArch(Args, TargetTriple);
for (auto Element : RISCVMultilibSet) {
addMultilibFlag(MArch == Element.march,
Twine("-march=", Element.march).str().c_str(), Flags);
@@ -2225,10 +2228,19 @@ void Generic_GCC::GCCInstallationDetector::init(
SmallVector<StringRef, 16> CandidateBiarchTripleAliases;
// Add some triples that we want to check first.
CandidateTripleAliases.push_back(TargetTriple.str());
- std::string TripleNoVendor = TargetTriple.getArchName().str() + "-" +
- TargetTriple.getOSAndEnvironmentName().str();
- if (TargetTriple.getVendor() == llvm::Triple::UnknownVendor)
+ std::string TripleNoVendor, BiarchTripleNoVendor;
+ if (TargetTriple.getVendor() == llvm::Triple::UnknownVendor) {
+ StringRef OSEnv = TargetTriple.getOSAndEnvironmentName();
+ if (TargetTriple.getEnvironment() == llvm::Triple::GNUX32)
+ OSEnv = "linux-gnu";
+ TripleNoVendor = (TargetTriple.getArchName().str() + '-' + OSEnv).str();
CandidateTripleAliases.push_back(TripleNoVendor);
+ if (BiarchVariantTriple.getArch() != llvm::Triple::UnknownArch) {
+ BiarchTripleNoVendor =
+ (BiarchVariantTriple.getArchName().str() + '-' + OSEnv).str();
+ CandidateBiarchTripleAliases.push_back(BiarchTripleNoVendor);
+ }
+ }
CollectLibDirsAndTriples(TargetTriple, BiarchVariantTriple, CandidateLibDirs,
CandidateTripleAliases, CandidateBiarchLibDirs,
@@ -2281,7 +2293,7 @@ void Generic_GCC::GCCInstallationDetector::init(
}
// Then look for gcc installed alongside clang.
- Prefixes.push_back(D.InstalledDir + "/..");
+ Prefixes.push_back(D.Dir + "/..");
// Next, look for prefix(es) that correspond to distribution-supplied gcc
// installations.
@@ -2454,8 +2466,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
"aarch64-none-linux-gnu", "aarch64-linux-gnu", "aarch64-redhat-linux",
"aarch64-suse-linux"};
static const char *const AArch64beLibDirs[] = {"/lib"};
- static const char *const AArch64beTriples[] = {"aarch64_be-none-linux-gnu",
- "aarch64_be-linux-gnu"};
+ static const char *const AArch64beTriples[] = {"aarch64_be-none-linux-gnu"};
static const char *const ARMLibDirs[] = {"/lib"};
static const char *const ARMTriples[] = {"arm-linux-gnueabi"};
@@ -2480,9 +2491,8 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
"x86_64-linux-gnu", "x86_64-unknown-linux-gnu",
"x86_64-pc-linux-gnu", "x86_64-redhat-linux6E",
"x86_64-redhat-linux", "x86_64-suse-linux",
- "x86_64-manbo-linux-gnu", "x86_64-linux-gnu",
- "x86_64-slackware-linux", "x86_64-unknown-linux",
- "x86_64-amazon-linux"};
+ "x86_64-manbo-linux-gnu", "x86_64-slackware-linux",
+ "x86_64-unknown-linux", "x86_64-amazon-linux"};
static const char *const X32Triples[] = {"x86_64-linux-gnux32",
"x86_64-pc-linux-gnux32"};
static const char *const X32LibDirs[] = {"/libx32", "/lib"};
@@ -2498,26 +2508,24 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
"loongarch64-linux-gnu", "loongarch64-unknown-linux-gnu"};
static const char *const M68kLibDirs[] = {"/lib"};
- static const char *const M68kTriples[] = {
- "m68k-linux-gnu", "m68k-unknown-linux-gnu", "m68k-suse-linux"};
+ static const char *const M68kTriples[] = {"m68k-unknown-linux-gnu",
+ "m68k-suse-linux"};
static const char *const MIPSLibDirs[] = {"/libo32", "/lib"};
static const char *const MIPSTriples[] = {
"mips-linux-gnu", "mips-mti-linux", "mips-mti-linux-gnu",
"mips-img-linux-gnu", "mipsisa32r6-linux-gnu"};
static const char *const MIPSELLibDirs[] = {"/libo32", "/lib"};
- static const char *const MIPSELTriples[] = {
- "mipsel-linux-gnu", "mips-img-linux-gnu", "mipsisa32r6el-linux-gnu"};
+ static const char *const MIPSELTriples[] = {"mipsel-linux-gnu",
+ "mips-img-linux-gnu"};
static const char *const MIPS64LibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64Triples[] = {
- "mips64-linux-gnu", "mips-mti-linux-gnu",
- "mips-img-linux-gnu", "mips64-linux-gnuabi64",
+ "mips-mti-linux-gnu", "mips-img-linux-gnu", "mips64-linux-gnuabi64",
"mipsisa64r6-linux-gnu", "mipsisa64r6-linux-gnuabi64"};
static const char *const MIPS64ELLibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64ELTriples[] = {
- "mips64el-linux-gnu", "mips-mti-linux-gnu",
- "mips-img-linux-gnu", "mips64el-linux-gnuabi64",
+ "mips-mti-linux-gnu", "mips-img-linux-gnu", "mips64el-linux-gnuabi64",
"mipsisa64r6el-linux-gnu", "mipsisa64r6el-linux-gnuabi64"};
static const char *const MIPSN32LibDirs[] = {"/lib32"};
@@ -2532,32 +2540,28 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const PPCLibDirs[] = {"/lib32", "/lib"};
static const char *const PPCTriples[] = {
- "powerpc-linux-gnu", "powerpc-unknown-linux-gnu", "powerpc-linux-gnuspe",
+ "powerpc-unknown-linux-gnu",
// On 32-bit PowerPC systems running SUSE Linux, gcc is configured as a
// 64-bit compiler which defaults to "-m32", hence "powerpc64-suse-linux".
"powerpc64-suse-linux", "powerpc-montavista-linuxspe"};
static const char *const PPCLELibDirs[] = {"/lib32", "/lib"};
- static const char *const PPCLETriples[] = {"powerpcle-linux-gnu",
- "powerpcle-unknown-linux-gnu",
+ static const char *const PPCLETriples[] = {"powerpcle-unknown-linux-gnu",
"powerpcle-linux-musl"};
static const char *const PPC64LibDirs[] = {"/lib64", "/lib"};
- static const char *const PPC64Triples[] = {
- "powerpc64-linux-gnu", "powerpc64-unknown-linux-gnu",
- "powerpc64-suse-linux", "ppc64-redhat-linux"};
+ static const char *const PPC64Triples[] = {"powerpc64-unknown-linux-gnu",
+ "powerpc64-suse-linux",
+ "ppc64-redhat-linux"};
static const char *const PPC64LELibDirs[] = {"/lib64", "/lib"};
static const char *const PPC64LETriples[] = {
- "powerpc64le-linux-gnu", "powerpc64le-unknown-linux-gnu",
- "powerpc64le-none-linux-gnu", "powerpc64le-suse-linux",
- "ppc64le-redhat-linux"};
+ "powerpc64le-unknown-linux-gnu", "powerpc64le-none-linux-gnu",
+ "powerpc64le-suse-linux", "ppc64le-redhat-linux"};
static const char *const RISCV32LibDirs[] = {"/lib32", "/lib"};
static const char *const RISCV32Triples[] = {"riscv32-unknown-linux-gnu",
- "riscv32-linux-gnu",
"riscv32-unknown-elf"};
static const char *const RISCV64LibDirs[] = {"/lib64", "/lib"};
static const char *const RISCV64Triples[] = {"riscv64-unknown-linux-gnu",
- "riscv64-linux-gnu",
"riscv64-unknown-elf"};
static const char *const SPARCv8LibDirs[] = {"/lib32", "/lib"};
@@ -2569,9 +2573,8 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const SystemZLibDirs[] = {"/lib64", "/lib"};
static const char *const SystemZTriples[] = {
- "s390x-linux-gnu", "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu",
- "s390x-suse-linux", "s390x-redhat-linux"};
-
+ "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu", "s390x-suse-linux",
+ "s390x-redhat-linux"};
using std::begin;
using std::end;
@@ -2691,6 +2694,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
case llvm::Triple::thumb:
LibDirs.append(begin(ARMLibDirs), end(ARMLibDirs));
if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF ||
+ TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHFT64 ||
TargetTriple.getEnvironment() == llvm::Triple::MuslEABIHF ||
TargetTriple.getEnvironment() == llvm::Triple::EABIHF) {
TripleAliases.append(begin(ARMHFTriples), end(ARMHFTriples));
@@ -2702,6 +2706,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
case llvm::Triple::thumbeb:
LibDirs.append(begin(ARMebLibDirs), end(ARMebLibDirs));
if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF ||
+ TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHFT64 ||
TargetTriple.getEnvironment() == llvm::Triple::MuslEABIHF ||
TargetTriple.getEnvironment() == llvm::Triple::EABIHF) {
TripleAliases.append(begin(ARMebHFTriples), end(ARMebHFTriples));
@@ -3050,9 +3055,7 @@ Generic_GCC::Generic_GCC(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args), GCCInstallation(D),
CudaInstallation(D, Triple, Args), RocmInstallation(D, Triple, Args) {
- getProgramPaths().push_back(getDriver().getInstalledDir());
- if (getDriver().getInstalledDir() != getDriver().Dir)
- getProgramPaths().push_back(getDriver().Dir);
+ getProgramPaths().push_back(getDriver().Dir);
}
Generic_GCC::~Generic_GCC() {}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
index ccb36a6c846c..c35b0febb262 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
@@ -10,6 +10,7 @@
#include "AMDGPU.h"
#include "CommonArgs.h"
#include "HIPUtility.h"
+#include "SPIRV.h"
#include "clang/Basic/Cuda.h"
#include "clang/Basic/TargetID.h"
#include "clang/Driver/Compilation.h"
@@ -142,6 +143,11 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
if (IsThinLTO)
LldArgs.push_back(Args.MakeArgString("-plugin-opt=-force-import-all"));
+ for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
+ LldArgs.push_back(
+ Args.MakeArgString(Twine("-plugin-opt=") + A->getValue(0)));
+ }
+
if (C.getDriver().isSaveTempsEnabled())
LldArgs.push_back("-save-temps");
@@ -188,6 +194,33 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
Lld, LldArgs, Inputs, Output));
}
+// For SPIR-V the inputs for the job are device AMDGCN SPIR-V flavoured bitcode
+// and the output is either a compiled SPIR-V binary or bitcode (-emit-llvm). It
+// calls llvm-link and then the llvm-spirv translator. Once the SPIR-V BE will
+// be promoted from experimental, we will switch to using that. TODO: consider
+// if we want to run any targeted optimisations over IR here, over generic
+// SPIR-V.
+void AMDGCN::Linker::constructLinkAndEmitSpirvCommand(
+ Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
+ const InputInfo &Output, const llvm::opt::ArgList &Args) const {
+ assert(!Inputs.empty() && "Must have at least one input.");
+
+ constructLlvmLinkCommand(C, JA, Inputs, Output, Args);
+
+ // Linked BC is now in Output
+
+ // Emit SPIR-V binary.
+ llvm::opt::ArgStringList TrArgs{
+ "--spirv-max-version=1.6",
+ "--spirv-ext=+all",
+ "--spirv-allow-extra-diexpressions",
+ "--spirv-allow-unknown-intrinsics",
+ "--spirv-lower-const-expr",
+ "--spirv-preserve-auxdata",
+ "--spirv-debug-info-version=nonsemantic-shader-200"};
+ SPIRV::constructTranslateCommand(C, *this, JA, Output, Output, TrArgs);
+}
+
// For amdgcn the inputs of the linker job are device bitcode and output is
// either an object file or bitcode (-emit-llvm). It calls llvm-link, opt,
// llc, then lld steps.
@@ -209,6 +242,9 @@ void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (JA.getType() == types::TY_LLVM_BC)
return constructLlvmLinkCommand(C, JA, Inputs, Output, Args);
+ if (getToolChain().getTriple().isSPIRV())
+ return constructLinkAndEmitSpirvCommand(C, JA, Inputs, Output, Args);
+
return constructLldCommand(C, JA, Inputs, Output, Args);
}
@@ -265,6 +301,13 @@ void HIPAMDToolChain::addClangTargetOptions(
CC1Args.push_back("-fapply-global-visibility-to-externs");
}
+ // For SPIR-V we embed the command-line into the generated binary, in order to
+ // retrieve it at JIT time and be able to do target specific compilation with
+ // options that match the user-supplied ones.
+ if (getTriple().isSPIRV() &&
+ !DriverArgs.hasArg(options::OPT_fembed_bitcode_marker))
+ CC1Args.push_back("-fembed-bitcode=marker");
+
for (auto BCFile : getDeviceLibs(DriverArgs)) {
CC1Args.push_back(BCFile.ShouldInternalize ? "-mlink-builtin-bitcode"
: "-mlink-bitcode-file");
@@ -298,11 +341,13 @@ HIPAMDToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
}
Tool *HIPAMDToolChain::buildLinker() const {
- assert(getTriple().getArch() == llvm::Triple::amdgcn);
+ assert(getTriple().getArch() == llvm::Triple::amdgcn ||
+ getTriple().getArch() == llvm::Triple::spirv64);
return new tools::AMDGCN::Linker(*this);
}
void HIPAMDToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
+ AMDGPUToolChain::addClangWarningOptions(CC1Args);
HostTC.addClangWarningOptions(CC1Args);
}
@@ -352,7 +397,9 @@ VersionTuple HIPAMDToolChain::computeMSVCVersion(const Driver *D,
llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12>
HIPAMDToolChain::getDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
llvm::SmallVector<BitCodeLibraryInfo, 12> BCLibs;
- if (DriverArgs.hasArg(options::OPT_nogpulib))
+ if (DriverArgs.hasArg(options::OPT_nogpulib) ||
+ (getTriple().getArch() == llvm::Triple::spirv64 &&
+ getTriple().getVendor() == llvm::Triple::AMD))
return {};
ArgStringList LibraryPaths;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h
index d81a9733014c..c31894e22c5c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h
@@ -40,6 +40,10 @@ private:
const InputInfoList &Inputs,
const InputInfo &Output,
const llvm::opt::ArgList &Args) const;
+ void constructLinkAndEmitSpirvCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args) const;
};
} // end namespace AMDGCN
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp
index a144b28057f4..bdbcf9109129 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp
@@ -193,7 +193,7 @@ void HIPSPVToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
StringRef hipPath = DriverArgs.getLastArgValue(options::OPT_hip_path_EQ);
if (hipPath.empty()) {
- getDriver().Diag(diag::err_drv_hipspv_no_hip_path) << 1 << "'-nogpuinc'";
+ getDriver().Diag(diag::err_drv_hipspv_no_hip_path);
return;
}
SmallString<128> P(hipPath);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
index f692458b775d..f32a23f111e4 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
@@ -7,15 +7,27 @@
//===----------------------------------------------------------------------===//
#include "HIPUtility.h"
+#include "Clang.h"
#include "CommonArgs.h"
#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/MD5.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Triple.h"
+#include <deque>
+#include <set>
+using namespace clang;
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace llvm::opt;
+using llvm::dyn_cast;
#if defined(_WIN32) || defined(_WIN64)
#define NULL_FILE "nul"
@@ -36,6 +48,169 @@ static std::string normalizeForBundler(const llvm::Triple &T,
: T.normalize();
}
+// Collect undefined __hip_fatbin* and __hip_gpubin_handle* symbols from all
+// input object or archive files.
+class HIPUndefinedFatBinSymbols {
+public:
+ HIPUndefinedFatBinSymbols(const Compilation &C)
+ : C(C), DiagID(C.getDriver().getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "Error collecting HIP undefined fatbin symbols: %0")),
+ Quiet(C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)),
+ Verbose(C.getArgs().hasArg(options::OPT_v)) {
+ populateSymbols();
+ if (Verbose) {
+ for (const auto &Name : FatBinSymbols)
+ llvm::errs() << "Found undefined HIP fatbin symbol: " << Name << "\n";
+ for (const auto &Name : GPUBinHandleSymbols)
+ llvm::errs() << "Found undefined HIP gpubin handle symbol: " << Name
+ << "\n";
+ }
+ }
+
+ const std::set<std::string> &getFatBinSymbols() const {
+ return FatBinSymbols;
+ }
+
+ const std::set<std::string> &getGPUBinHandleSymbols() const {
+ return GPUBinHandleSymbols;
+ }
+
+private:
+ const Compilation &C;
+ unsigned DiagID;
+ bool Quiet;
+ bool Verbose;
+ std::set<std::string> FatBinSymbols;
+ std::set<std::string> GPUBinHandleSymbols;
+ std::set<std::string> DefinedFatBinSymbols;
+ std::set<std::string> DefinedGPUBinHandleSymbols;
+ const std::string FatBinPrefix = "__hip_fatbin";
+ const std::string GPUBinHandlePrefix = "__hip_gpubin_handle";
+
+ void populateSymbols() {
+ std::deque<const Action *> WorkList;
+ std::set<const Action *> Visited;
+
+ for (const auto &Action : C.getActions())
+ WorkList.push_back(Action);
+
+ while (!WorkList.empty()) {
+ const Action *CurrentAction = WorkList.front();
+ WorkList.pop_front();
+
+ if (!CurrentAction || !Visited.insert(CurrentAction).second)
+ continue;
+
+ if (const auto *IA = dyn_cast<InputAction>(CurrentAction)) {
+ std::string ID = IA->getId().str();
+ if (!ID.empty()) {
+ ID = llvm::utohexstr(llvm::MD5Hash(ID), /*LowerCase=*/true);
+ FatBinSymbols.insert((FatBinPrefix + Twine('_') + ID).str());
+ GPUBinHandleSymbols.insert(
+ (GPUBinHandlePrefix + Twine('_') + ID).str());
+ continue;
+ }
+ if (IA->getInputArg().getNumValues() == 0)
+ continue;
+ const char *Filename = IA->getInputArg().getValue();
+ if (!Filename)
+ continue;
+ auto BufferOrErr = llvm::MemoryBuffer::getFile(Filename);
+ // Input action could be options to linker, therefore, ignore it
+ // if cannot read it. If it turns out to be a file that cannot be read,
+ // the error will be caught by the linker.
+ if (!BufferOrErr)
+ continue;
+
+ processInput(BufferOrErr.get()->getMemBufferRef());
+ } else
+ WorkList.insert(WorkList.end(), CurrentAction->getInputs().begin(),
+ CurrentAction->getInputs().end());
+ }
+ }
+
+ void processInput(const llvm::MemoryBufferRef &Buffer) {
+ // Try processing as object file first.
+ auto ObjFileOrErr = llvm::object::ObjectFile::createObjectFile(Buffer);
+ if (ObjFileOrErr) {
+ processSymbols(**ObjFileOrErr);
+ return;
+ }
+
+ // Then try processing as archive files.
+ llvm::consumeError(ObjFileOrErr.takeError());
+ auto ArchiveOrErr = llvm::object::Archive::create(Buffer);
+ if (ArchiveOrErr) {
+ llvm::Error Err = llvm::Error::success();
+ llvm::object::Archive &Archive = *ArchiveOrErr.get();
+ for (auto &Child : Archive.children(Err)) {
+ auto ChildBufOrErr = Child.getMemoryBufferRef();
+ if (ChildBufOrErr)
+ processInput(*ChildBufOrErr);
+ else
+ errorHandler(ChildBufOrErr.takeError());
+ }
+
+ if (Err)
+ errorHandler(std::move(Err));
+ return;
+ }
+
+ // Ignore other files.
+ llvm::consumeError(ArchiveOrErr.takeError());
+ }
+
+ void processSymbols(const llvm::object::ObjectFile &Obj) {
+ for (const auto &Symbol : Obj.symbols()) {
+ auto FlagOrErr = Symbol.getFlags();
+ if (!FlagOrErr) {
+ errorHandler(FlagOrErr.takeError());
+ continue;
+ }
+
+ auto NameOrErr = Symbol.getName();
+ if (!NameOrErr) {
+ errorHandler(NameOrErr.takeError());
+ continue;
+ }
+ llvm::StringRef Name = *NameOrErr;
+
+ bool isUndefined =
+ FlagOrErr.get() & llvm::object::SymbolRef::SF_Undefined;
+ bool isFatBinSymbol = Name.starts_with(FatBinPrefix);
+ bool isGPUBinHandleSymbol = Name.starts_with(GPUBinHandlePrefix);
+
+ // Handling for defined symbols
+ if (!isUndefined) {
+ if (isFatBinSymbol) {
+ DefinedFatBinSymbols.insert(Name.str());
+ FatBinSymbols.erase(Name.str());
+ } else if (isGPUBinHandleSymbol) {
+ DefinedGPUBinHandleSymbols.insert(Name.str());
+ GPUBinHandleSymbols.erase(Name.str());
+ }
+ continue;
+ }
+
+ // Add undefined symbols if they are not in the defined sets
+ if (isFatBinSymbol &&
+ DefinedFatBinSymbols.find(Name.str()) == DefinedFatBinSymbols.end())
+ FatBinSymbols.insert(Name.str());
+ else if (isGPUBinHandleSymbol &&
+ DefinedGPUBinHandleSymbols.find(Name.str()) ==
+ DefinedGPUBinHandleSymbols.end())
+ GPUBinHandleSymbols.insert(Name.str());
+ }
+ }
+
+ void errorHandler(llvm::Error Err) {
+ if (Quiet)
+ return;
+ C.getDriver().Diag(DiagID) << llvm::toString(std::move(Err));
+ }
+};
+
// Construct a clang-offload-bundler command to bundle code objects for
// different devices into a HIP fat binary.
void HIP::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
@@ -84,11 +259,7 @@ void HIP::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
Args.MakeArgString(std::string("-output=").append(Output));
BundlerArgs.push_back(BundlerOutputArg);
- if (Args.hasFlag(options::OPT_offload_compress,
- options::OPT_no_offload_compress, false))
- BundlerArgs.push_back("-compress");
- if (Args.hasArg(options::OPT_v))
- BundlerArgs.push_back("-verbose");
+ addOffloadCompressArgs(Args, BundlerArgs);
const char *Bundler = Args.MakeArgString(
T.getToolChain().GetProgramPath("clang-offload-bundler"));
@@ -130,26 +301,84 @@ void HIP::constructGenerateObjFileFromHIPFatBinary(
auto HostTriple =
C.getSingleOffloadToolChain<Action::OFK_Host>()->getTriple();
+ HIPUndefinedFatBinSymbols Symbols(C);
+
+ std::string PrimaryHipFatbinSymbol;
+ std::string PrimaryGpuBinHandleSymbol;
+ bool FoundPrimaryHipFatbinSymbol = false;
+ bool FoundPrimaryGpuBinHandleSymbol = false;
+
+ std::vector<std::string> AliasHipFatbinSymbols;
+ std::vector<std::string> AliasGpuBinHandleSymbols;
+
+ // Iterate through symbols to find the primary ones and collect others for
+ // aliasing
+ for (const auto &Symbol : Symbols.getFatBinSymbols()) {
+ if (!FoundPrimaryHipFatbinSymbol) {
+ PrimaryHipFatbinSymbol = Symbol;
+ FoundPrimaryHipFatbinSymbol = true;
+ } else
+ AliasHipFatbinSymbols.push_back(Symbol);
+ }
+
+ for (const auto &Symbol : Symbols.getGPUBinHandleSymbols()) {
+ if (!FoundPrimaryGpuBinHandleSymbol) {
+ PrimaryGpuBinHandleSymbol = Symbol;
+ FoundPrimaryGpuBinHandleSymbol = true;
+ } else
+ AliasGpuBinHandleSymbols.push_back(Symbol);
+ }
+
// Add MC directives to embed target binaries. We ensure that each
// section and image is 16-byte aligned. This is not mandatory, but
// increases the likelihood of data to be aligned with a cache block
// in several main host machines.
ObjStream << "# HIP Object Generator\n";
ObjStream << "# *** Automatically generated by Clang ***\n";
- if (HostTriple.isWindowsMSVCEnvironment()) {
- ObjStream << " .section .hip_fatbin, \"dw\"\n";
- } else {
- ObjStream << " .protected __hip_fatbin\n";
- ObjStream << " .type __hip_fatbin,@object\n";
- ObjStream << " .section .hip_fatbin,\"a\",@progbits\n";
+ if (FoundPrimaryGpuBinHandleSymbol) {
+ // Define the first gpubin handle symbol
+ if (HostTriple.isWindowsMSVCEnvironment())
+ ObjStream << " .section .hip_gpubin_handle,\"dw\"\n";
+ else {
+ ObjStream << " .protected " << PrimaryGpuBinHandleSymbol << "\n";
+ ObjStream << " .type " << PrimaryGpuBinHandleSymbol << ",@object\n";
+ ObjStream << " .section .hip_gpubin_handle,\"aw\"\n";
+ }
+ ObjStream << " .globl " << PrimaryGpuBinHandleSymbol << "\n";
+ ObjStream << " .p2align 3\n"; // Align 8
+ ObjStream << PrimaryGpuBinHandleSymbol << ":\n";
+ ObjStream << " .zero 8\n"; // Size 8
+
+ // Generate alias directives for other gpubin handle symbols
+ for (const auto &AliasSymbol : AliasGpuBinHandleSymbols) {
+ ObjStream << " .globl " << AliasSymbol << "\n";
+ ObjStream << " .set " << AliasSymbol << "," << PrimaryGpuBinHandleSymbol
+ << "\n";
+ }
+ }
+ if (FoundPrimaryHipFatbinSymbol) {
+ // Define the first fatbin symbol
+ if (HostTriple.isWindowsMSVCEnvironment())
+ ObjStream << " .section .hip_fatbin,\"dw\"\n";
+ else {
+ ObjStream << " .protected " << PrimaryHipFatbinSymbol << "\n";
+ ObjStream << " .type " << PrimaryHipFatbinSymbol << ",@object\n";
+ ObjStream << " .section .hip_fatbin,\"a\",@progbits\n";
+ }
+ ObjStream << " .globl " << PrimaryHipFatbinSymbol << "\n";
+ ObjStream << " .p2align " << llvm::Log2(llvm::Align(HIPCodeObjectAlign))
+ << "\n";
+ // Generate alias directives for other fatbin symbols
+ for (const auto &AliasSymbol : AliasHipFatbinSymbols) {
+ ObjStream << " .globl " << AliasSymbol << "\n";
+ ObjStream << " .set " << AliasSymbol << "," << PrimaryHipFatbinSymbol
+ << "\n";
+ }
+ ObjStream << PrimaryHipFatbinSymbol << ":\n";
+ ObjStream << " .incbin ";
+ llvm::sys::printArg(ObjStream, BundleFile, /*Quote=*/true);
+ ObjStream << "\n";
}
- ObjStream << " .globl __hip_fatbin\n";
- ObjStream << " .p2align " << llvm::Log2(llvm::Align(HIPCodeObjectAlign))
- << "\n";
- ObjStream << "__hip_fatbin:\n";
- ObjStream << " .incbin ";
- llvm::sys::printArg(ObjStream, BundleFile, /*Quote=*/true);
- ObjStream << "\n";
if (HostTriple.isOSLinux() && HostTriple.isOSBinFormatELF())
ObjStream << " .section .note.GNU-stack, \"\", @progbits\n";
ObjStream.flush();
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp
index c6ad862b2294..8286e3be2180 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp
@@ -98,9 +98,49 @@ std::optional<std::string> tryParseProfile(StringRef Profile) {
else if (llvm::getAsUnsignedInteger(Parts[2], 0, Minor))
return std::nullopt;
- // dxil-unknown-shadermodel-hull
+ // Determine DXIL version using the minor version number of Shader
+ // Model version specified in target profile. Prior to decoupling DXIL version
+ // numbering from that of Shader Model DXIL version 1.Y corresponds to SM 6.Y.
+ // E.g., dxilv1.Y-unknown-shadermodelX.Y-hull
llvm::Triple T;
- T.setArch(Triple::ArchType::dxil);
+ Triple::SubArchType SubArch = llvm::Triple::NoSubArch;
+ switch (Minor) {
+ case 0:
+ SubArch = llvm::Triple::DXILSubArch_v1_0;
+ break;
+ case 1:
+ SubArch = llvm::Triple::DXILSubArch_v1_1;
+ break;
+ case 2:
+ SubArch = llvm::Triple::DXILSubArch_v1_2;
+ break;
+ case 3:
+ SubArch = llvm::Triple::DXILSubArch_v1_3;
+ break;
+ case 4:
+ SubArch = llvm::Triple::DXILSubArch_v1_4;
+ break;
+ case 5:
+ SubArch = llvm::Triple::DXILSubArch_v1_5;
+ break;
+ case 6:
+ SubArch = llvm::Triple::DXILSubArch_v1_6;
+ break;
+ case 7:
+ SubArch = llvm::Triple::DXILSubArch_v1_7;
+ break;
+ case 8:
+ SubArch = llvm::Triple::DXILSubArch_v1_8;
+ break;
+ case OfflineLibMinor:
+ // Always consider minor version x as the latest supported DXIL version
+ SubArch = llvm::Triple::LatestDXILSubArch;
+ break;
+ default:
+ // No DXIL Version corresponding to specified Shader Model version found
+ return std::nullopt;
+ }
+ T.setArch(Triple::ArchType::dxil, SubArch);
T.setOSName(Triple::getOSTypeName(Triple::OSType::ShaderModel).str() +
VersionTuple(Major, Minor).getAsString());
T.setEnvironment(Kind);
@@ -218,14 +258,28 @@ HLSLToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
}
}
if (A->getOption().getID() == options::OPT_emit_pristine_llvm) {
- // Translate fcgl into -S -emit-llvm and -disable-llvm-passes.
- DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_S));
+ // Translate -fcgl into -emit-llvm and -disable-llvm-passes.
DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_emit_llvm));
DAL->AddFlagArg(nullptr,
Opts.getOption(options::OPT_disable_llvm_passes));
A->claim();
continue;
}
+ if (A->getOption().getID() == options::OPT_dxc_hlsl_version) {
+ // Translate -HV into -std for llvm
+ // depending on the value given
+ LangStandard::Kind LangStd = LangStandard::getHLSLLangKind(A->getValue());
+ if (LangStd != LangStandard::lang_unspecified) {
+ LangStandard l = LangStandard::getLangStandardForKind(LangStd);
+ DAL->AddSeparateArg(nullptr, Opts.getOption(options::OPT_std_EQ),
+ l.getName());
+ } else {
+ getDriver().Diag(diag::err_drv_invalid_value) << "HV" << A->getValue();
+ }
+
+ A->claim();
+ continue;
+ }
DAL->append(A);
}
@@ -240,9 +294,7 @@ HLSLToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
if (!DAL->hasArg(options::OPT_O_Group)) {
DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_O), "3");
}
- // FIXME: add validation for enable_16bit_types should be after HLSL 2018 and
- // shader model 6.2.
- // See: https://github.com/llvm/llvm-project/issues/57876
+
return DAL;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h
index 7b775b897431..b2a31aabab7d 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h
@@ -52,6 +52,9 @@ public:
static std::optional<std::string> parseTargetProfile(StringRef TargetProfile);
bool requiresValidation(llvm::opt::DerivedArgList &Args) const;
+ // Set default DWARF version to 4 for DXIL uses version 4.
+ unsigned GetDefaultDwarfVersion() const override { return 4; }
+
private:
mutable std::unique_ptr<tools::hlsl::Validator> Validator;
};
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.cpp
index e0d94035823f..346652a7e4bd 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.cpp
@@ -25,7 +25,7 @@ void haiku::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
const auto &ToolChain = static_cast<const Haiku &>(getToolChain());
const Driver &D = ToolChain.getDriver();
- const llvm::Triple::ArchType Arch = ToolChain.getArch();
+ const llvm::Triple &Triple = ToolChain.getTriple();
const bool Static = Args.hasArg(options::OPT_static);
const bool Shared = Args.hasArg(options::OPT_shared);
ArgStringList CmdArgs;
@@ -61,8 +61,11 @@ void haiku::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Shared)
CmdArgs.push_back("--no-undefined");
- if (Arch == llvm::Triple::riscv64)
+ if (Triple.isRISCV64()) {
CmdArgs.push_back("-X");
+ if (Args.hasArg(options::OPT_mno_relax))
+ CmdArgs.push_back("--no-relax");
+ }
assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
@@ -80,7 +83,7 @@ void haiku::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_s, options::OPT_t, options::OPT_r});
+ options::OPT_s, options::OPT_t});
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
if (D.isUsingLTO()) {
@@ -104,7 +107,7 @@ void haiku::Linker::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_r)) {
// Use the static OpenMP runtime with -static-openmp
bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) && !Static;
- addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
+ addOpenMPRuntime(C, CmdArgs, ToolChain, Args, StaticOpenMP);
if (D.CCCIsCXX() && ToolChain.ShouldLinkCXXStdlib(Args))
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
index d1eed931be5f..76cedf312d68 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
@@ -54,8 +54,7 @@ static void handleHVXTargetFeatures(const Driver &D, const ArgList &Args,
auto makeFeature = [&Args](Twine T, bool Enable) -> StringRef {
const std::string &S = T.str();
StringRef Opt(S);
- if (Opt.ends_with("="))
- Opt = Opt.drop_back(1);
+ Opt.consume_back("=");
if (Opt.starts_with("mno-"))
Opt = Opt.drop_front(4);
else if (Opt.starts_with("m"))
@@ -295,9 +294,10 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
bool IncStartFiles = !Args.hasArg(options::OPT_nostartfiles);
bool IncDefLibs = !Args.hasArg(options::OPT_nodefaultlibs);
bool UseG0 = false;
- const char *Exec = Args.MakeArgString(HTC.GetLinkerPath());
- bool UseLLD = (llvm::sys::path::filename(Exec).equals_insensitive("ld.lld") ||
- llvm::sys::path::stem(Exec).equals_insensitive("ld.lld"));
+ bool UseLLD = false;
+ const char *Exec = Args.MakeArgString(HTC.GetLinkerPath(&UseLLD));
+ UseLLD = UseLLD || llvm::sys::path::filename(Exec).ends_with("ld.lld") ||
+ llvm::sys::path::stem(Exec).ends_with("ld.lld");
bool UseShared = IsShared && !IsStatic;
StringRef CpuVer = toolchains::HexagonToolChain::GetTargetCPUVersion(Args);
@@ -367,18 +367,21 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
options::OPT_t, options::OPT_u_Group});
AddLinkerInputs(HTC, Inputs, Args, CmdArgs, JA);
+ ToolChain::UnwindLibType UNW = HTC.GetUnwindLibType(Args);
+
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (NeedsSanitizerDeps) {
linkSanitizerRuntimeDeps(HTC, Args, CmdArgs);
- CmdArgs.push_back("-lunwind");
+ if (UNW != ToolChain::UNW_None)
+ CmdArgs.push_back("-lunwind");
}
if (NeedsXRayDeps)
linkXRayRuntimeDeps(HTC, Args, CmdArgs);
- CmdArgs.push_back("-lclang_rt.builtins-hexagon");
if (!Args.hasArg(options::OPT_nolibc))
CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lclang_rt.builtins-hexagon");
}
if (D.CCCIsCXX()) {
if (HTC.ShouldLinkCXXStdlib(Args))
@@ -412,7 +415,7 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
const std::string MCpuSuffix = "/" + CpuVer.str();
const std::string MCpuG0Suffix = MCpuSuffix + "/G0";
const std::string RootDir =
- HTC.getHexagonTargetDir(D.InstalledDir, D.PrefixDirs) + "/";
+ HTC.getHexagonTargetDir(D.Dir, D.PrefixDirs) + "/";
const std::string StartSubDir =
"hexagon/lib" + (UseG0 ? MCpuG0Suffix : MCpuSuffix);
@@ -570,8 +573,7 @@ void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
std::copy(D.PrefixDirs.begin(), D.PrefixDirs.end(),
std::back_inserter(RootDirs));
- std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
- D.PrefixDirs);
+ std::string TargetDir = getHexagonTargetDir(D.Dir, D.PrefixDirs);
if (!llvm::is_contained(RootDirs, TargetDir))
RootDirs.push_back(TargetDir);
@@ -598,8 +600,7 @@ void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
HexagonToolChain::HexagonToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args)
: Linux(D, Triple, Args) {
- const std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
- D.PrefixDirs);
+ const std::string TargetDir = getHexagonTargetDir(D.Dir, D.PrefixDirs);
// Note: Generic_GCC::Generic_GCC adds InstalledDir and getDriver().Dir to
// program paths
@@ -621,13 +622,24 @@ HexagonToolChain::~HexagonToolChain() {}
void HexagonToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
CXXStdlibType Type = GetCXXStdlibType(Args);
+ ToolChain::UnwindLibType UNW = GetUnwindLibType(Args);
+ if (UNW != ToolChain::UNW_None && UNW != ToolChain::UNW_CompilerRT) {
+ const Arg *A = Args.getLastArg(options::OPT_unwindlib_EQ);
+ if (A) {
+ getDriver().Diag(diag::err_drv_unsupported_unwind_for_platform)
+ << A->getValue() << getTriple().normalize();
+ return;
+ }
+ }
+
switch (Type) {
case ToolChain::CST_Libcxx:
CmdArgs.push_back("-lc++");
if (Args.hasArg(options::OPT_fexperimental_library))
CmdArgs.push_back("-lc++experimental");
CmdArgs.push_back("-lc++abi");
- CmdArgs.push_back("-lunwind");
+ if (UNW != ToolChain::UNW_None)
+ CmdArgs.push_back("-lunwind");
break;
case ToolChain::CST_Libstdcxx:
@@ -729,8 +741,7 @@ void HexagonToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (HasSysRoot)
return;
- std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
- D.PrefixDirs);
+ std::string TargetDir = getHexagonTargetDir(D.Dir, D.PrefixDirs);
addExternCSystemInclude(DriverArgs, CC1Args, TargetDir + "/hexagon/include");
}
@@ -745,7 +756,7 @@ void HexagonToolChain::addLibCxxIncludePaths(
addLibStdCXXIncludePaths("/usr/include/c++/v1", "", "", DriverArgs,
CC1Args);
else {
- std::string TargetDir = getHexagonTargetDir(D.InstalledDir, D.PrefixDirs);
+ std::string TargetDir = getHexagonTargetDir(D.Dir, D.PrefixDirs);
addLibStdCXXIncludePaths(TargetDir + "/hexagon/include/c++/v1", "", "",
DriverArgs, CC1Args);
}
@@ -754,7 +765,7 @@ void HexagonToolChain::addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
const Driver &D = getDriver();
- std::string TargetDir = getHexagonTargetDir(D.InstalledDir, D.PrefixDirs);
+ std::string TargetDir = getHexagonTargetDir(D.Dir, D.PrefixDirs);
addLibStdCXXIncludePaths(TargetDir + "/hexagon/include/c++", "", "",
DriverArgs, CC1Args);
}
@@ -801,7 +812,6 @@ StringRef HexagonToolChain::GetTargetCPUVersion(const ArgList &Args) {
CpuArg = A;
StringRef CPU = CpuArg ? CpuArg->getValue() : GetDefaultCPU();
- if (CPU.starts_with("hexagon"))
- return CPU.substr(sizeof("hexagon") - 1);
+ CPU.consume_front("hexagon");
return CPU;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
index 4300a2bdff17..35bf39069605 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
@@ -86,6 +86,9 @@ std::string Linux::getMultiarchTriple(const Driver &D,
case llvm::Triple::aarch64:
if (IsAndroid)
return "aarch64-linux-android";
+ if (hasEffectiveTriple() &&
+ getEffectiveTriple().getEnvironment() == llvm::Triple::PAuthTest)
+ return "aarch64-linux-pauthtest";
return "aarch64-linux-gnu";
case llvm::Triple::aarch64_be:
return "aarch64_be-linux-gnu";
@@ -237,11 +240,19 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
ExtraOpts.push_back("relro");
}
- // Android ARM/AArch64 use max-page-size=4096 to reduce VMA usage. Note, lld
- // from 11 onwards default max-page-size to 65536 for both ARM and AArch64.
- if ((Triple.isARM() || Triple.isAArch64()) && Triple.isAndroid()) {
- ExtraOpts.push_back("-z");
- ExtraOpts.push_back("max-page-size=4096");
+ // Note, lld from 11 onwards default max-page-size to 65536 for both ARM and
+ // AArch64.
+ if (Triple.isAndroid()) {
+ if (Triple.isARM()) {
+ // Android ARM uses max-page-size=4096 to reduce VMA usage.
+ ExtraOpts.push_back("-z");
+ ExtraOpts.push_back("max-page-size=4096");
+ } else if (Triple.isAArch64() || Triple.getArch() == llvm::Triple::x86_64) {
+ // Android AArch64 uses max-page-size=16384 to support 4k/16k page sizes.
+ // Android emulates a 16k page size for app testing on x86_64 machines.
+ ExtraOpts.push_back("-z");
+ ExtraOpts.push_back("max-page-size=16384");
+ }
}
if (GCCInstallation.getParentLibPath().contains("opt/rh/"))
@@ -377,7 +388,7 @@ std::string Linux::computeSysRoot() const {
if (getTriple().isAndroid()) {
// Android toolchains typically include a sysroot at ../sysroot relative to
// the clang binary.
- const StringRef ClangDir = getDriver().getInstalledDir();
+ const StringRef ClangDir = getDriver().Dir;
std::string AndroidSysRootPath = (ClangDir + "/../sysroot").str();
if (getVFS().exists(AndroidSysRootPath))
return AndroidSysRootPath;
@@ -497,6 +508,7 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
case llvm::Triple::thumbeb: {
const bool HF =
Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
+ Triple.getEnvironment() == llvm::Triple::GNUEABIHFT64 ||
tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard;
LibDir = "lib";
@@ -560,16 +572,12 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
Loader =
(tools::ppc::hasPPCAbiArg(Args, "elfv1")) ? "ld64.so.1" : "ld64.so.2";
break;
- case llvm::Triple::riscv32: {
- StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
- LibDir = "lib";
- Loader = ("ld-linux-riscv32-" + ABIName + ".so.1").str();
- break;
- }
+ case llvm::Triple::riscv32:
case llvm::Triple::riscv64: {
+ StringRef ArchName = llvm::Triple::getArchTypeName(Arch);
StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
LibDir = "lib";
- Loader = ("ld-linux-riscv64-" + ABIName + ".so.1").str();
+ Loader = ("ld-linux-" + ArchName + "-" + ABIName + ".so.1").str();
break;
}
case llvm::Triple::sparc:
@@ -807,7 +815,7 @@ SanitizerMask Linux::getSupportedSanitizers() const {
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64 || IsSystemZ ||
IsLoongArch64 || IsRISCV64)
Res |= SanitizerKind::Thread;
- if (IsX86_64 || IsSystemZ)
+ if (IsX86_64 || IsSystemZ || IsPowerPC64)
Res |= SanitizerKind::KernelMemory;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsMIPS || IsArmArch ||
IsPowerPC64 || IsHexagon || IsLoongArch64 || IsRISCV64)
@@ -818,6 +826,9 @@ SanitizerMask Linux::getSupportedSanitizers() const {
if (IsX86_64 || IsAArch64) {
Res |= SanitizerKind::KernelHWAddress;
}
+ if (IsX86_64)
+ Res |= SanitizerKind::NumericalStability;
+
// Work around "Cannot represent a difference across sections".
if (getTriple().getArch() == llvm::Triple::ppc64)
Res &= ~SanitizerKind::Function;
@@ -834,25 +845,6 @@ void Linux::addProfileRTLibs(const llvm::opt::ArgList &Args,
ToolChain::addProfileRTLibs(Args, CmdArgs);
}
-llvm::DenormalMode
-Linux::getDefaultDenormalModeForType(const llvm::opt::ArgList &DriverArgs,
- const JobAction &JA,
- const llvm::fltSemantics *FPType) const {
- switch (getTriple().getArch()) {
- case llvm::Triple::x86:
- case llvm::Triple::x86_64: {
- std::string Unused;
- // DAZ and FTZ are turned on in crtfastmath.o
- if (!DriverArgs.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles) &&
- isFastMathRuntimeAvailable(DriverArgs, Unused))
- return llvm::DenormalMode::getPreserveSign();
- return llvm::DenormalMode::getIEEE();
- }
- default:
- return llvm::DenormalMode::getIEEE();
- }
-}
-
void Linux::addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {
for (const auto &Opt : ExtraOpts)
CmdArgs.push_back(Opt.c_str());
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
index 524391743090..2d9e674e50a6 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
@@ -59,10 +59,6 @@ public:
std::vector<std::string> ExtraOpts;
- llvm::DenormalMode getDefaultDenormalModeForType(
- const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
- const llvm::fltSemantics *FPType = nullptr) const override;
-
const char *getDefaultLinker() const override;
protected:
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
index 396522225158..ca266e3e1d1d 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -79,6 +79,11 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(
Args.MakeArgString(std::string("-out:") + Output.getFilename()));
+ if (Args.hasArg(options::OPT_marm64x))
+ CmdArgs.push_back("-machine:arm64x");
+ else if (TC.getTriple().isWindowsArm64EC())
+ CmdArgs.push_back("-machine:arm64ec");
+
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles) &&
!C.getDriver().IsCLMode() && !C.getDriver().IsFlangMode()) {
CmdArgs.push_back("-defaultlib:libcmt");
@@ -129,6 +134,10 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(std::string("-libpath:") + WindowsSdkLibPath));
}
+ if (!C.getDriver().IsCLMode() && Args.hasArg(options::OPT_L))
+ for (const auto &LibPath : Args.getAllArgValues(options::OPT_L))
+ CmdArgs.push_back(Args.MakeArgString("-libpath:" + LibPath));
+
if (C.getDriver().IsFlangMode()) {
addFortranRuntimeLibraryPath(TC, Args, CmdArgs);
addFortranRuntimeLibs(TC, Args, CmdArgs);
@@ -149,10 +158,6 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (TC.getVFS().exists(CRTPath))
CmdArgs.push_back(Args.MakeArgString("-libpath:" + CRTPath));
- if (!C.getDriver().IsCLMode() && Args.hasArg(options::OPT_L))
- for (const auto &LibPath : Args.getAllArgValues(options::OPT_L))
- CmdArgs.push_back(Args.MakeArgString("-libpath:" + LibPath));
-
CmdArgs.push_back("-nologo");
if (Args.hasArg(options::OPT_g_Group, options::OPT__SLASH_Z7))
@@ -423,9 +428,7 @@ MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args),
RocmInstallation(D, Triple, Args) {
- getProgramPaths().push_back(getDriver().getInstalledDir());
- if (getDriver().getInstalledDir() != getDriver().Dir)
- getProgramPaths().push_back(getDriver().Dir);
+ getProgramPaths().push_back(getDriver().Dir);
std::optional<llvm::StringRef> VCToolsDir, VCToolsVersion;
if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsdir))
@@ -858,7 +861,7 @@ static void TranslateOptArg(Arg *A, llvm::opt::DerivedArgList &DAL,
DAL.AddJoinedArg(A, Opts.getOption(options::OPT_O), "s");
} else if (OptChar == '2' || OptChar == 'x') {
DAL.AddFlagArg(A, Opts.getOption(options::OPT_fbuiltin));
- DAL.AddJoinedArg(A, Opts.getOption(options::OPT_O), "2");
+ DAL.AddJoinedArg(A, Opts.getOption(options::OPT_O), "3");
}
if (SupportsForcingFramePointer &&
!DAL.hasArgNoClaim(options::OPT_fno_omit_frame_pointer))
@@ -877,6 +880,7 @@ static void TranslateOptArg(Arg *A, llvm::opt::DerivedArgList &DAL,
DAL.AddFlagArg(A, Opts.getOption(options::OPT_finline_hint_functions));
break;
case '2':
+ case '3':
DAL.AddFlagArg(A, Opts.getOption(options::OPT_finline_functions));
break;
}
@@ -898,7 +902,7 @@ static void TranslateOptArg(Arg *A, llvm::opt::DerivedArgList &DAL,
DAL.AddJoinedArg(A, Opts.getOption(options::OPT_O), "s");
break;
case 't':
- DAL.AddJoinedArg(A, Opts.getOption(options::OPT_O), "2");
+ DAL.AddJoinedArg(A, Opts.getOption(options::OPT_O), "3");
break;
case 'y': {
bool OmitFramePointer = true;
@@ -1019,4 +1023,7 @@ void MSVCToolChain::addClangTargetOptions(
if (DriverArgs.hasFlag(options::OPT_fno_rtti, options::OPT_frtti,
/*Default=*/false))
CC1Args.push_back("-D_HAS_STATIC_RTTI=0");
+
+ if (Arg *A = DriverArgs.getLastArgNoClaim(options::OPT_marm64x))
+ A->ignoreTargetSpecific();
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
index 48369e030aad..3950a8ed38e8 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
@@ -61,9 +61,8 @@ public:
/// formats, and to DWARF otherwise. Users can use -gcodeview and -gdwarf to
/// override the default.
llvm::codegenoptions::DebugInfoFormat getDefaultDebugFormat() const override {
- return getTriple().isOSBinFormatMachO()
- ? llvm::codegenoptions::DIF_DWARF
- : llvm::codegenoptions::DIF_CodeView;
+ return getTriple().isOSBinFormatCOFF() ? llvm::codegenoptions::DIF_CodeView
+ : llvm::codegenoptions::DIF_DWARF;
}
/// Set the debugger tuning to "default", since we're definitely not tuning
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
index 067758c05e97..c81a7ed17029 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -132,7 +132,10 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("thumb2pe");
break;
case llvm::Triple::aarch64:
- CmdArgs.push_back("arm64pe");
+ if (TC.getEffectiveTriple().isWindowsArm64EC())
+ CmdArgs.push_back("arm64ecpe");
+ else
+ CmdArgs.push_back("arm64pe");
break;
default:
D.Diag(diag::err_target_unknown_triple) << TC.getEffectiveTriple().str();
@@ -460,7 +463,7 @@ findClangRelativeSysroot(const Driver &D, const llvm::Triple &LiteralTriple,
Subdirs.back() += "-w64-mingw32";
Subdirs.emplace_back(T.getArchName());
Subdirs.back() += "-w64-mingw32ucrt";
- StringRef ClangRoot = llvm::sys::path::parent_path(D.getInstalledDir());
+ StringRef ClangRoot = llvm::sys::path::parent_path(D.Dir);
StringRef Sep = llvm::sys::path::get_separator();
for (StringRef CandidateSubdir : Subdirs) {
if (llvm::sys::fs::is_directory(ClangRoot + Sep + CandidateSubdir)) {
@@ -484,10 +487,10 @@ toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args),
RocmInstallation(D, Triple, Args) {
- getProgramPaths().push_back(getDriver().getInstalledDir());
+ getProgramPaths().push_back(getDriver().Dir);
std::string InstallBase =
- std::string(llvm::sys::path::parent_path(getDriver().getInstalledDir()));
+ std::string(llvm::sys::path::parent_path(getDriver().Dir));
// The sequence for detecting a sysroot here should be kept in sync with
// the testTriple function below.
llvm::Triple LiteralTriple = getLiteralTriple(D, getTriple());
@@ -723,6 +726,30 @@ void toolchains::MinGW::addClangTargetOptions(
}
}
+ // Default to not enabling sized deallocation, but let user provided options
+ // override it.
+ //
+ // If using sized deallocation, user code that invokes delete will end up
+ // calling delete(void*,size_t). If the user wanted to override the
+ // operator delete(void*), there may be a fallback operator
+ // delete(void*,size_t) which calls the regular operator delete(void*).
+ //
+ // However, if the C++ standard library is linked in the form of a DLL,
+ // and the fallback operator delete(void*,size_t) is within this DLL (which is
+ // the case for libc++ at least) it will only redirect towards the library's
+ // default operator delete(void*), not towards the user's provided operator
+ // delete(void*).
+ //
+ // This issue can be avoided, if the fallback operators are linked statically
+ // into the callers, even if the C++ standard library is linked as a DLL.
+ //
+ // This is meant as a temporary workaround until libc++ implements this
+ // technique, which is tracked in
+ // https://github.com/llvm/llvm-project/issues/96899.
+ if (!DriverArgs.hasArgNoClaim(options::OPT_fsized_deallocation,
+ options::OPT_fno_sized_deallocation))
+ CC1Args.push_back("-fno-sized-deallocation");
+
CC1Args.push_back("-fno-use-init-array");
for (auto Opt : {options::OPT_mthreads, options::OPT_mwindows,
@@ -793,8 +820,7 @@ static bool testTriple(const Driver &D, const llvm::Triple &Triple,
if (D.SysRoot.size())
return true;
llvm::Triple LiteralTriple = getLiteralTriple(D, Triple);
- std::string InstallBase =
- std::string(llvm::sys::path::parent_path(D.getInstalledDir()));
+ std::string InstallBase = std::string(llvm::sys::path::parent_path(D.Dir));
if (llvm::ErrorOr<std::string> TargetSubdir =
findClangRelativeSysroot(D, LiteralTriple, Triple, SubdirName))
return true;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
index 4183eccceedb..f61ae471b86d 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
@@ -57,8 +57,7 @@ void MipsLLVMToolChain::AddClangSystemIncludeArgs(
const auto &Callback = Multilibs.includeDirsCallback();
if (Callback) {
for (const auto &Path : Callback(SelectedMultilibs.back()))
- addExternCSystemIncludeIfExists(DriverArgs, CC1Args,
- D.getInstalledDir() + Path);
+ addExternCSystemIncludeIfExists(DriverArgs, CC1Args, D.Dir + Path);
}
}
@@ -70,7 +69,7 @@ std::string MipsLLVMToolChain::computeSysRoot() const {
if (!getDriver().SysRoot.empty())
return getDriver().SysRoot + SelectedMultilibs.back().osSuffix();
- const std::string InstalledDir(getDriver().getInstalledDir());
+ const std::string InstalledDir(getDriver().Dir);
std::string SysRootPath =
InstalledDir + "/../sysroot" + SelectedMultilibs.back().osSuffix();
if (llvm::sys::fs::exists(SysRootPath))
@@ -97,7 +96,7 @@ void MipsLLVMToolChain::addLibCxxIncludePaths(
llvm::opt::ArgStringList &CC1Args) const {
if (const auto &Callback = Multilibs.includeDirsCallback()) {
for (std::string Path : Callback(SelectedMultilibs.back())) {
- Path = getDriver().getInstalledDir() + Path + "/c++/v1";
+ Path = getDriver().Dir + Path + "/c++/v1";
if (llvm::sys::fs::exists(Path)) {
addSystemInclude(DriverArgs, CC1Args, Path);
return;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
index 240bf5764b9c..d54f22882949 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
@@ -240,8 +240,11 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
break;
}
- if (Triple.isRISCV())
+ if (Triple.isRISCV()) {
CmdArgs.push_back("-X");
+ if (Args.hasArg(options::OPT_mno_relax))
+ CmdArgs.push_back("--no-relax");
+ }
assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
@@ -268,7 +271,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_s, options::OPT_t, options::OPT_r});
+ options::OPT_s, options::OPT_t});
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
@@ -308,7 +311,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_r)) {
// Use the static OpenMP runtime with -static-openmp
bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) && !Static;
- addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
+ addOpenMPRuntime(C, CmdArgs, ToolChain, Args, StaticOpenMP);
if (D.CCCIsCXX()) {
if (ToolChain.ShouldLinkCXXStdlib(Args))
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp
index 1e50c9d71d59..4ceafa9e7139 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp
@@ -274,7 +274,7 @@ std::string OHOS::computeSysRoot() const {
std::string SysRoot =
!getDriver().SysRoot.empty()
? getDriver().SysRoot
- : makePath({getDriver().getInstalledDir(), "..", "..", "sysroot"});
+ : makePath({getDriver().Dir, "..", "..", "sysroot"});
if (!llvm::sys::fs::exists(SysRoot))
return std::string();
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
index fd6aa4d7e684..3770471bae7c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
@@ -111,6 +111,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
const auto &ToolChain = static_cast<const OpenBSD &>(getToolChain());
const Driver &D = ToolChain.getDriver();
+ const llvm::Triple &Triple = ToolChain.getTriple();
const llvm::Triple::ArchType Arch = ToolChain.getArch();
const bool Static = Args.hasArg(options::OPT_static);
const bool Shared = Args.hasArg(options::OPT_shared);
@@ -160,8 +161,11 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Nopie || Profiling)
CmdArgs.push_back("-nopie");
- if (Arch == llvm::Triple::riscv64)
+ if (Triple.isRISCV64()) {
CmdArgs.push_back("-X");
+ if (Args.hasArg(options::OPT_mno_relax))
+ CmdArgs.push_back("--no-relax");
+ }
assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
@@ -192,8 +196,8 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- Args.addAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_s,
- options::OPT_t, options::OPT_r});
+ Args.addAllArgs(CmdArgs,
+ {options::OPT_T_Group, options::OPT_s, options::OPT_t});
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
@@ -217,7 +221,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_r)) {
// Use the static OpenMP runtime with -static-openmp
bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) && !Static;
- addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
+ addOpenMPRuntime(C, CmdArgs, ToolChain, Args, StaticOpenMP);
if (D.CCCIsCXX()) {
if (ToolChain.ShouldLinkCXXStdlib(Args))
@@ -371,7 +375,8 @@ std::string OpenBSD::getCompilerRT(const ArgList &Args, StringRef Component,
if (Component == "builtins") {
SmallString<128> Path(getDriver().SysRoot);
llvm::sys::path::append(Path, "/usr/lib/libcompiler_rt.a");
- return std::string(Path);
+ if (getVFS().exists(Path))
+ return std::string(Path);
}
SmallString<128> P(getDriver().ResourceDir);
std::string CRTBasename =
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
index 8ba8b80cfec7..974e486a0082 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
@@ -118,11 +118,11 @@ void toolchains::PS5CPU::addSanitizerArgs(const ArgList &Args,
CmdArgs.push_back(arg("SceThreadSanitizer_nosubmission_stub_weak"));
}
-void tools::PScpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
+void tools::PS4cpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
auto &TC = static_cast<const toolchains::PS4PS5Base &>(getToolChain());
const Driver &D = TC.getDriver();
ArgStringList CmdArgs;
@@ -155,14 +155,120 @@ void tools::PScpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const bool UseLTO = D.isUsingLTO();
const bool UseJMC =
Args.hasFlag(options::OPT_fjmc, options::OPT_fno_jmc, false);
- const bool IsPS4 = TC.getTriple().isPS4();
- const char *PS4LTOArgs = "";
+ const char *LTOArgs = "";
auto AddCodeGenFlag = [&](Twine Flag) {
- if (IsPS4)
- PS4LTOArgs = Args.MakeArgString(Twine(PS4LTOArgs) + " " + Flag);
+ LTOArgs = Args.MakeArgString(Twine(LTOArgs) + " " + Flag);
+ };
+
+ if (UseLTO) {
+ // We default to creating the arange section, but LTO does not. Enable it
+ // here.
+ AddCodeGenFlag("-generate-arange-section");
+
+ // This tells LTO to perform JustMyCode instrumentation.
+ if (UseJMC)
+ AddCodeGenFlag("-enable-jmc-instrument");
+
+ if (Arg *A = Args.getLastArg(options::OPT_fcrash_diagnostics_dir))
+ AddCodeGenFlag(Twine("-crash-diagnostics-dir=") + A->getValue());
+
+ StringRef Parallelism = getLTOParallelism(Args, D);
+ if (!Parallelism.empty())
+ AddCodeGenFlag(Twine("-threads=") + Parallelism);
+
+ const char *Prefix = nullptr;
+ if (D.getLTOMode() == LTOK_Thin)
+ Prefix = "-lto-thin-debug-options=";
+ else if (D.getLTOMode() == LTOK_Full)
+ Prefix = "-lto-debug-options=";
else
- CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=") + Flag));
+ llvm_unreachable("new LTO mode?");
+
+ CmdArgs.push_back(Args.MakeArgString(Twine(Prefix) + LTOArgs));
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
+ TC.addSanitizerArgs(Args, CmdArgs, "-l", "");
+
+ if (D.isUsingLTO() && Args.hasArg(options::OPT_funified_lto)) {
+ if (D.getLTOMode() == LTOK_Thin)
+ CmdArgs.push_back("--lto=thin");
+ else if (D.getLTOMode() == LTOK_Full)
+ CmdArgs.push_back("--lto=full");
+ }
+
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_s, options::OPT_t});
+
+ if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ CmdArgs.push_back("--no-demangle");
+
+ AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
+
+ if (Args.hasArg(options::OPT_pthread)) {
+ CmdArgs.push_back("-lpthread");
+ }
+
+ if (UseJMC) {
+ CmdArgs.push_back("--whole-archive");
+ CmdArgs.push_back("-lSceDbgJmc");
+ CmdArgs.push_back("--no-whole-archive");
+ }
+
+ if (Args.hasArg(options::OPT_fuse_ld_EQ)) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << "-fuse-ld" << TC.getTriple().str();
+ }
+
+ std::string LdName = TC.qualifyPSCmdName(TC.getLinkerBaseName());
+ const char *Exec = Args.MakeArgString(TC.GetProgramPath(LdName.c_str()));
+
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF8(),
+ Exec, CmdArgs, Inputs, Output));
+}
+
+void tools::PS5cpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ auto &TC = static_cast<const toolchains::PS4PS5Base &>(getToolChain());
+ const Driver &D = TC.getDriver();
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back("-pie");
+
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("--shared");
+
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ }
+
+ const bool UseLTO = D.isUsingLTO();
+ const bool UseJMC =
+ Args.hasFlag(options::OPT_fjmc, options::OPT_fno_jmc, false);
+
+ auto AddCodeGenFlag = [&](Twine Flag) {
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=") + Flag));
};
if (UseLTO) {
@@ -178,24 +284,8 @@ void tools::PScpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddCodeGenFlag(Twine("-crash-diagnostics-dir=") + A->getValue());
StringRef Parallelism = getLTOParallelism(Args, D);
- if (!Parallelism.empty()) {
- if (IsPS4)
- AddCodeGenFlag(Twine("-threads=") + Parallelism);
- else
- CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=jobs=") + Parallelism));
- }
-
- if (IsPS4) {
- const char *Prefix = nullptr;
- if (D.getLTOMode() == LTOK_Thin)
- Prefix = "-lto-thin-debug-options=";
- else if (D.getLTOMode() == LTOK_Full)
- Prefix = "-lto-debug-options=";
- else
- llvm_unreachable("new LTO mode?");
-
- CmdArgs.push_back(Args.MakeArgString(Twine(Prefix) + PS4LTOArgs));
- }
+ if (!Parallelism.empty())
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=jobs=") + Parallelism));
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
@@ -208,9 +298,8 @@ void tools::PScpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--lto=full");
}
- Args.addAllArgs(CmdArgs,
- {options::OPT_L, options::OPT_T_Group, options::OPT_s,
- options::OPT_t, options::OPT_r});
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_s, options::OPT_t});
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
@@ -223,10 +312,7 @@ void tools::PScpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (UseJMC) {
CmdArgs.push_back("--whole-archive");
- if (IsPS4)
- CmdArgs.push_back("-lSceDbgJmc");
- else
- CmdArgs.push_back("-lSceJmc_nosubmission");
+ CmdArgs.push_back("-lSceJmc_nosubmission");
CmdArgs.push_back("--no-whole-archive");
}
@@ -322,14 +408,18 @@ Tool *toolchains::PS4CPU::buildAssembler() const {
return new tools::PScpu::Assembler(*this);
}
+Tool *toolchains::PS4CPU::buildLinker() const {
+ return new tools::PS4cpu::Linker(*this);
+}
+
Tool *toolchains::PS5CPU::buildAssembler() const {
// PS5 does not support an external assembler.
getDriver().Diag(clang::diag::err_no_external_assembler);
return nullptr;
}
-Tool *toolchains::PS4PS5Base::buildLinker() const {
- return new tools::PScpu::Linker(*this);
+Tool *toolchains::PS5CPU::buildLinker() const {
+ return new tools::PS5cpu::Linker(*this);
}
SanitizerMask toolchains::PS4PS5Base::getSupportedSanitizers() const {
@@ -359,6 +449,12 @@ void toolchains::PS4PS5Base::addClangTargetOptions(
CC1Args.push_back("-fno-use-init-array");
+ // Default to `hidden` visibility for PS5.
+ if (getTriple().isPS5() &&
+ !DriverArgs.hasArg(options::OPT_fvisibility_EQ,
+ options::OPT_fvisibility_ms_compat))
+ CC1Args.push_back("-fvisibility=hidden");
+
// Default to -fvisibility-global-new-delete=source for PS5.
if (getTriple().isPS5() &&
!DriverArgs.hasArg(options::OPT_fvisibility_global_new_delete_EQ,
@@ -377,11 +473,15 @@ void toolchains::PS4PS5Base::addClangTargetOptions(
else
CC1Args.push_back("-fvisibility-dllexport=protected");
+ // For PS4 we override the visibilty of globals definitions without
+ // dllimport or dllexport annotations.
if (DriverArgs.hasArg(options::OPT_fvisibility_nodllstorageclass_EQ))
DriverArgs.AddLastArg(CC1Args,
options::OPT_fvisibility_nodllstorageclass_EQ);
- else
+ else if (getTriple().isPS4())
CC1Args.push_back("-fvisibility-nodllstorageclass=hidden");
+ else
+ CC1Args.push_back("-fvisibility-nodllstorageclass=keep");
if (DriverArgs.hasArg(options::OPT_fvisibility_externs_dllimport_EQ))
DriverArgs.AddLastArg(CC1Args,
@@ -389,12 +489,16 @@ void toolchains::PS4PS5Base::addClangTargetOptions(
else
CC1Args.push_back("-fvisibility-externs-dllimport=default");
+ // For PS4 we override the visibilty of external globals without
+ // dllimport or dllexport annotations.
if (DriverArgs.hasArg(
options::OPT_fvisibility_externs_nodllstorageclass_EQ))
DriverArgs.AddLastArg(
CC1Args, options::OPT_fvisibility_externs_nodllstorageclass_EQ);
- else
+ else if (getTriple().isPS4())
CC1Args.push_back("-fvisibility-externs-nodllstorageclass=default");
+ else
+ CC1Args.push_back("-fvisibility-externs-nodllstorageclass=keep");
}
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
index fee80e77462f..0be90183c637 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
@@ -38,10 +38,12 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
+} // namespace PScpu
+namespace PS4cpu {
class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
- Linker(const ToolChain &TC) : Tool("PScpu::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("PS4cpu::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -51,7 +53,23 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
-} // namespace PScpu
+} // namespace PS4cpu
+
+namespace PS5cpu {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("PS5cpu::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // namespace PS5cpu
+
} // namespace tools
namespace toolchains {
@@ -110,9 +128,6 @@ public:
const char *Suffix) const = 0;
virtual const char *getProfileRTLibName() const = 0;
-protected:
- Tool *buildLinker() const override;
-
private:
// We compute the SDK root dir in the ctor, and use it later.
std::string SDKRootDir;
@@ -143,6 +158,7 @@ public:
protected:
Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
};
// PS5-specific Toolchain class.
@@ -168,6 +184,7 @@ public:
protected:
Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
index 5e4fa4d5331f..624099d21ae1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
@@ -86,6 +86,11 @@ RISCVToolChain::GetUnwindLibType(const llvm::opt::ArgList &Args) const {
return ToolChain::UNW_None;
}
+ToolChain::UnwindTableLevel RISCVToolChain::getDefaultUnwindTableLevel(
+ const llvm::opt::ArgList &Args) const {
+ return UnwindTableLevel::None;
+}
+
void RISCVToolChain::addClangTargetOptions(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
@@ -156,6 +161,9 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+ if (Args.hasArg(options::OPT_mno_relax))
+ CmdArgs.push_back("--no-relax");
+
bool IsRV64 = ToolChain.getArch() == llvm::Triple::riscv64;
CmdArgs.push_back("-m");
if (IsRV64) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
index cec817ef7190..fa0aa265d842 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
@@ -28,6 +28,8 @@ public:
RuntimeLibType GetDefaultRuntimeLibType() const override;
UnwindLibType
GetUnwindLibType(const llvm::opt::ArgList &Args) const override;
+ UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp
index 27de69550853..ce900600cbee 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "SPIRV.h"
#include "CommonArgs.h"
+#include "clang/Basic/Version.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/InputInfo.h"
@@ -32,8 +33,15 @@ void SPIRV::constructTranslateCommand(Compilation &C, const Tool &T,
CmdArgs.append({"-o", Output.getFilename()});
- const char *Exec =
- C.getArgs().MakeArgString(T.getToolChain().GetProgramPath("llvm-spirv"));
+ // Try to find "llvm-spirv-<LLVM_VERSION_MAJOR>". Otherwise, fall back to
+ // plain "llvm-spirv".
+ using namespace std::string_literals;
+ auto VersionedTool = "llvm-spirv-"s + std::to_string(LLVM_VERSION_MAJOR);
+ std::string ExeCand = T.getToolChain().GetProgramPath(VersionedTool.c_str());
+ if (!llvm::sys::fs::can_execute(ExeCand))
+ ExeCand = T.getToolChain().GetProgramPath("llvm-spirv");
+
+ const char *Exec = C.getArgs().MakeArgString(ExeCand);
C.addCommand(std::make_unique<Command>(JA, T, ResponseFileSupport::None(),
Exec, CmdArgs, Input, Output));
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
index 200ac46aa534..e82ed2ca79ff 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
@@ -201,8 +201,7 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- Args.addAllArgs(CmdArgs,
- {options::OPT_L, options::OPT_T_Group, options::OPT_r});
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group});
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
@@ -212,7 +211,7 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Use the static OpenMP runtime with -static-openmp
bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) &&
!Args.hasArg(options::OPT_static);
- addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
+ addOpenMPRuntime(C, CmdArgs, ToolChain, Args, StaticOpenMP);
if (D.CCCIsCXX()) {
if (ToolChain.ShouldLinkCXXStdlib(Args))
@@ -342,6 +341,7 @@ SanitizerMask Solaris::getSupportedSanitizers() const {
Res |= SanitizerKind::PointerCompare;
Res |= SanitizerKind::PointerSubtract;
}
+ Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Vptr;
return Res;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
index 0b16b660364f..60bd97e0ee98 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
@@ -158,44 +158,46 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- // When optimizing, if wasm-opt is available, run it.
- std::string WasmOptPath;
- if (Args.getLastArg(options::OPT_O_Group)) {
- WasmOptPath = ToolChain.GetProgramPath("wasm-opt");
- if (WasmOptPath == "wasm-opt") {
- WasmOptPath = {};
+ if (Args.hasFlag(options::OPT_wasm_opt, options::OPT_no_wasm_opt, true)) {
+ // When optimizing, if wasm-opt is available, run it.
+ std::string WasmOptPath;
+ if (Args.getLastArg(options::OPT_O_Group)) {
+ WasmOptPath = ToolChain.GetProgramPath("wasm-opt");
+ if (WasmOptPath == "wasm-opt") {
+ WasmOptPath = {};
+ }
}
- }
-
- if (!WasmOptPath.empty()) {
- CmdArgs.push_back("--keep-section=target_features");
- }
-
- C.addCommand(std::make_unique<Command>(JA, *this,
- ResponseFileSupport::AtFileCurCP(),
- Linker, CmdArgs, Inputs, Output));
- if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
if (!WasmOptPath.empty()) {
- StringRef OOpt = "s";
- if (A->getOption().matches(options::OPT_O4) ||
- A->getOption().matches(options::OPT_Ofast))
- OOpt = "4";
- else if (A->getOption().matches(options::OPT_O0))
- OOpt = "0";
- else if (A->getOption().matches(options::OPT_O))
- OOpt = A->getValue();
-
- if (OOpt != "0") {
- const char *WasmOpt = Args.MakeArgString(WasmOptPath);
- ArgStringList OptArgs;
- OptArgs.push_back(Output.getFilename());
- OptArgs.push_back(Args.MakeArgString(llvm::Twine("-O") + OOpt));
- OptArgs.push_back("-o");
- OptArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), WasmOpt, OptArgs,
- Inputs, Output));
+ CmdArgs.push_back("--keep-section=target_features");
+ }
+
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Linker, CmdArgs, Inputs, Output));
+
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (!WasmOptPath.empty()) {
+ StringRef OOpt = "s";
+ if (A->getOption().matches(options::OPT_O4) ||
+ A->getOption().matches(options::OPT_Ofast))
+ OOpt = "4";
+ else if (A->getOption().matches(options::OPT_O0))
+ OOpt = "0";
+ else if (A->getOption().matches(options::OPT_O))
+ OOpt = A->getValue();
+
+ if (OOpt != "0") {
+ const char *WasmOpt = Args.MakeArgString(WasmOptPath);
+ ArgStringList OptArgs;
+ OptArgs.push_back(Output.getFilename());
+ OptArgs.push_back(Args.MakeArgString(llvm::Twine("-O") + OOpt));
+ OptArgs.push_back("-o");
+ OptArgs.push_back(Output.getFilename());
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), WasmOpt, OptArgs,
+ Inputs, Output));
+ }
}
}
}
@@ -215,7 +217,7 @@ WebAssembly::WebAssembly(const Driver &D, const llvm::Triple &Triple,
assert(Triple.isArch32Bit() != Triple.isArch64Bit());
- getProgramPaths().push_back(getDriver().getInstalledDir());
+ getProgramPaths().push_back(getDriver().Dir);
auto SysRoot = getDriver().SysRoot;
if (getTriple().getOS() == llvm::Triple::UnknownOS) {
@@ -347,6 +349,23 @@ void WebAssembly::addClangTargetOptions(const ArgList &DriverArgs,
// Backend needs -wasm-enable-eh to enable Wasm EH
CC1Args.push_back("-mllvm");
CC1Args.push_back("-wasm-enable-eh");
+
+ // New Wasm EH spec (adopted in Oct 2023) requires multivalue and
+ // reference-types.
+ if (DriverArgs.hasFlag(options::OPT_mno_multivalue,
+ options::OPT_mmultivalue, false)) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fwasm-exceptions" << "-mno-multivalue";
+ }
+ if (DriverArgs.hasFlag(options::OPT_mno_reference_types,
+ options::OPT_mreference_types, false)) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fwasm-exceptions" << "-mno-reference-types";
+ }
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+multivalue");
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+reference-types");
}
for (const Arg *A : DriverArgs.filtered(options::OPT_mllvm)) {
@@ -408,6 +427,23 @@ void WebAssembly::addClangTargetOptions(const ArgList &DriverArgs,
CC1Args.push_back("+exception-handling");
// Backend needs '-exception-model=wasm' to use Wasm EH instructions
CC1Args.push_back("-exception-model=wasm");
+
+ // New Wasm EH spec (adopted in Oct 2023) requires multivalue and
+ // reference-types.
+ if (DriverArgs.hasFlag(options::OPT_mno_multivalue,
+ options::OPT_mmultivalue, false)) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << "-mllvm -wasm-enable-sjlj" << "-mno-multivalue";
+ }
+ if (DriverArgs.hasFlag(options::OPT_mno_reference_types,
+ options::OPT_mreference_types, false)) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << "-mllvm -wasm-enable-sjlj" << "-mno-reference-types";
+ }
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+multivalue");
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+reference-types");
}
}
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp
index 96dbf602e7c1..074e0556ecd2 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp
@@ -36,6 +36,12 @@ void ZOS::addClangTargetOptions(const ArgList &DriverArgs,
if (!DriverArgs.hasArgNoClaim(options::OPT_faligned_allocation,
options::OPT_fno_aligned_allocation))
CC1Args.push_back("-faligned-alloc-unavailable");
+
+ // Pass "-fno-sized-deallocation" only when the user hasn't manually enabled
+ // or disabled sized deallocations.
+ if (!DriverArgs.hasArgNoClaim(options::OPT_fsized_deallocation,
+ options::OPT_fno_sized_deallocation))
+ CC1Args.push_back("-fno-sized-deallocation");
}
void zos::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -325,8 +331,7 @@ void ZOS::AddClangCXXStdlibIncludeArgs(
switch (GetCXXStdlibType(DriverArgs)) {
case ToolChain::CST_Libcxx: {
// <install>/bin/../include/c++/v1
- llvm::SmallString<128> InstallBin =
- llvm::StringRef(getDriver().getInstalledDir());
+ llvm::SmallString<128> InstallBin(getDriver().Dir);
llvm::sys::path::append(InstallBin, "..", "include", "c++", "v1");
TryAddIncludeFromPath(InstallBin, DriverArgs, CC1Args);
break;
diff --git a/contrib/llvm-project/clang/lib/Driver/Types.cpp b/contrib/llvm-project/clang/lib/Driver/Types.cpp
index a7b6b9000e1d..2b9b391c19c9 100644
--- a/contrib/llvm-project/clang/lib/Driver/Types.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Types.cpp
@@ -242,7 +242,9 @@ bool types::isCXX(ID Id) {
case TY_CXXHUHeader:
case TY_PP_CXXHeaderUnit:
case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
- case TY_CXXModule: case TY_PP_CXXModule:
+ case TY_CXXModule:
+ case TY_PP_CXXModule:
+ case TY_ModuleFile:
case TY_PP_CLCXX:
case TY_CUDA: case TY_PP_CUDA: case TY_CUDA_DEVICE:
case TY_HIP:
diff --git a/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
index d5bf553e2412..81797c8c4dc7 100644
--- a/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -697,11 +697,7 @@ static bool getLiteralInfo(SourceRange literalRange,
struct Suff {
static bool has(StringRef suff, StringRef &text) {
- if (text.ends_with(suff)) {
- text = text.substr(0, text.size()-suff.size());
- return true;
- }
- return false;
+ return text.consume_back(suff);
}
};
@@ -1004,6 +1000,7 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
case CK_LValueToRValue:
case CK_NoOp:
case CK_UserDefinedConversion:
+ case CK_HLSLArrayRValue:
break;
case CK_IntegralCast: {
@@ -1087,6 +1084,10 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
case CK_BooleanToSignedIntegral:
llvm_unreachable("OpenCL-specific cast in Objective-C?");
+ case CK_HLSLVectorTruncation:
+ llvm_unreachable("HLSL-specific cast in Objective-C?");
+ break;
+
case CK_FloatingToFixedPoint:
case CK_FixedPointToFloating:
case CK_FixedPointCast:
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp
index aa7a1e9360f4..ab1108f663de 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp
@@ -13,514 +13,87 @@
//===----------------------------------------------------------------------===//
#include "clang/ExtractAPI/API.h"
-#include "clang/AST/CommentCommandTraits.h"
-#include "clang/AST/CommentLexer.h"
#include "clang/AST/RawCommentList.h"
#include "clang/Index/USRGeneration.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorHandling.h"
#include <memory>
using namespace clang::extractapi;
using namespace llvm;
-namespace {
+SymbolReference::SymbolReference(const APIRecord *R)
+ : Name(R->Name), USR(R->USR), Record(R) {}
-template <typename RecordTy, typename... CtorArgsTy>
-RecordTy *addTopLevelRecord(DenseMap<StringRef, APIRecord *> &USRLookupTable,
- APISet::RecordMap<RecordTy> &RecordMap,
- StringRef USR, CtorArgsTy &&...CtorArgs) {
- auto Result = RecordMap.insert({USR, nullptr});
-
- // Create the record if it does not already exist
- if (Result.second)
- Result.first->second =
- std::make_unique<RecordTy>(USR, std::forward<CtorArgsTy>(CtorArgs)...);
-
- auto *Record = Result.first->second.get();
- USRLookupTable.insert({USR, Record});
- return Record;
-}
-
-} // namespace
-
-NamespaceRecord *
-APISet::addNamespace(APIRecord *Parent, StringRef Name, StringRef USR,
- PresumedLoc Loc, AvailabilityInfo Availability,
- LinkageInfo Linkage, const DocComment &Comment,
- DeclarationFragments Declaration,
- DeclarationFragments SubHeading, bool IsFromSystemHeader) {
- auto *Record = addTopLevelRecord(
- USRBasedLookupTable, Namespaces, USR, Name, Loc, std::move(Availability),
- Linkage, Comment, Declaration, SubHeading, IsFromSystemHeader);
-
- if (Parent)
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Parent->USR, Parent->Name, Parent->getKind(), Parent);
- return Record;
-}
-
-GlobalVariableRecord *
-APISet::addGlobalVar(StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, LinkageInfo Linkage,
- const DocComment &Comment, DeclarationFragments Fragments,
- DeclarationFragments SubHeading, bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, GlobalVariables, USR, Name, Loc,
- std::move(Availability), Linkage, Comment, Fragments,
- SubHeading, IsFromSystemHeader);
-}
-
-GlobalVariableTemplateRecord *APISet::addGlobalVariableTemplate(
- StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, LinkageInfo Linkage,
- const DocComment &Comment, DeclarationFragments Declaration,
- DeclarationFragments SubHeading, Template Template,
- bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, GlobalVariableTemplates, USR,
- Name, Loc, std::move(Availability), Linkage, Comment,
- Declaration, SubHeading, Template,
- IsFromSystemHeader);
-}
-
-GlobalFunctionRecord *APISet::addGlobalFunction(
- StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, LinkageInfo Linkage,
- const DocComment &Comment, DeclarationFragments Fragments,
- DeclarationFragments SubHeading, FunctionSignature Signature,
- bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, GlobalFunctions, USR, Name, Loc,
- std::move(Availability), Linkage, Comment, Fragments,
- SubHeading, Signature, IsFromSystemHeader);
-}
-
-GlobalFunctionTemplateRecord *APISet::addGlobalFunctionTemplate(
- StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, LinkageInfo Linkage,
- const DocComment &Comment, DeclarationFragments Declaration,
- DeclarationFragments SubHeading, FunctionSignature Signature,
- Template Template, bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, GlobalFunctionTemplates, USR,
- Name, Loc, std::move(Availability), Linkage, Comment,
- Declaration, SubHeading, Signature, Template,
- IsFromSystemHeader);
-}
-
-GlobalFunctionTemplateSpecializationRecord *
-APISet::addGlobalFunctionTemplateSpecialization(
- StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, LinkageInfo Linkage,
- const DocComment &Comment, DeclarationFragments Declaration,
- DeclarationFragments SubHeading, FunctionSignature Signature,
- bool IsFromSystemHeader) {
- return addTopLevelRecord(
- USRBasedLookupTable, GlobalFunctionTemplateSpecializations, USR, Name,
- Loc, std::move(Availability), Linkage, Comment, Declaration, SubHeading,
- Signature, IsFromSystemHeader);
-}
-
-EnumConstantRecord *APISet::addEnumConstant(EnumRecord *Enum, StringRef Name,
- StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability,
- const DocComment &Comment,
- DeclarationFragments Declaration,
- DeclarationFragments SubHeading,
- bool IsFromSystemHeader) {
- auto Record = std::make_unique<EnumConstantRecord>(
- USR, Name, Loc, std::move(Availability), Comment, Declaration, SubHeading,
- IsFromSystemHeader);
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Enum->USR, Enum->Name, Enum->getKind(), Enum);
- USRBasedLookupTable.insert({USR, Record.get()});
- return Enum->Constants.emplace_back(std::move(Record)).get();
-}
-
-EnumRecord *APISet::addEnum(StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability,
- const DocComment &Comment,
- DeclarationFragments Declaration,
- DeclarationFragments SubHeading,
- bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, Enums, USR, Name, Loc,
- std::move(Availability), Comment, Declaration,
- SubHeading, IsFromSystemHeader);
-}
-
-RecordFieldRecord *APISet::addRecordField(
- RecordRecord *Record, StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- APIRecord::RecordKind Kind, bool IsFromSystemHeader) {
- auto RecordField = std::make_unique<RecordFieldRecord>(
- USR, Name, Loc, std::move(Availability), Comment, Declaration, SubHeading,
- Kind, IsFromSystemHeader);
- RecordField->ParentInformation = APIRecord::HierarchyInformation(
- Record->USR, Record->Name, Record->getKind(), Record);
- USRBasedLookupTable.insert({USR, RecordField.get()});
- return Record->Fields.emplace_back(std::move(RecordField)).get();
-}
-
-RecordRecord *APISet::addRecord(StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability,
- const DocComment &Comment,
- DeclarationFragments Declaration,
- DeclarationFragments SubHeading,
- APIRecord::RecordKind Kind,
- bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, Records, USR, Name, Loc,
- std::move(Availability), Comment, Declaration,
- SubHeading, Kind, IsFromSystemHeader);
-}
-
-StaticFieldRecord *
-APISet::addStaticField(StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, LinkageInfo Linkage,
- const DocComment &Comment,
- DeclarationFragments Declaration,
- DeclarationFragments SubHeading, SymbolReference Context,
- AccessControl Access, bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, StaticFields, USR, Name, Loc,
- std::move(Availability), Linkage, Comment,
- Declaration, SubHeading, Context, Access,
- IsFromSystemHeader);
-}
-
-CXXFieldRecord *
-APISet::addCXXField(APIRecord *CXXClass, StringRef Name, StringRef USR,
- PresumedLoc Loc, AvailabilityInfo Availability,
- const DocComment &Comment, DeclarationFragments Declaration,
- DeclarationFragments SubHeading, AccessControl Access,
- bool IsFromSystemHeader) {
- auto *Record = addTopLevelRecord(
- USRBasedLookupTable, CXXFields, USR, Name, Loc, std::move(Availability),
- Comment, Declaration, SubHeading, Access, IsFromSystemHeader);
- Record->ParentInformation = APIRecord::HierarchyInformation(
- CXXClass->USR, CXXClass->Name, CXXClass->getKind(), CXXClass);
- return Record;
-}
-
-CXXFieldTemplateRecord *APISet::addCXXFieldTemplate(
- APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- AccessControl Access, Template Template, bool IsFromSystemHeader) {
- auto *Record =
- addTopLevelRecord(USRBasedLookupTable, CXXFieldTemplates, USR, Name, Loc,
- std::move(Availability), Comment, Declaration,
- SubHeading, Access, Template, IsFromSystemHeader);
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Parent->USR, Parent->Name, Parent->getKind(), Parent);
-
- return Record;
-}
-
-CXXClassRecord *
-APISet::addCXXClass(APIRecord *Parent, StringRef Name, StringRef USR,
- PresumedLoc Loc, AvailabilityInfo Availability,
- const DocComment &Comment, DeclarationFragments Declaration,
- DeclarationFragments SubHeading, APIRecord::RecordKind Kind,
- AccessControl Access, bool IsFromSystemHeader) {
- auto *Record = addTopLevelRecord(
- USRBasedLookupTable, CXXClasses, USR, Name, Loc, std::move(Availability),
- Comment, Declaration, SubHeading, Kind, Access, IsFromSystemHeader);
- if (Parent)
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Parent->USR, Parent->Name, Parent->getKind(), Parent);
- return Record;
-}
-
-ClassTemplateRecord *APISet::addClassTemplate(
- APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- Template Template, AccessControl Access, bool IsFromSystemHeader) {
- auto *Record =
- addTopLevelRecord(USRBasedLookupTable, ClassTemplates, USR, Name, Loc,
- std::move(Availability), Comment, Declaration,
- SubHeading, Template, Access, IsFromSystemHeader);
- if (Parent)
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Parent->USR, Parent->Name, Parent->getKind(), Parent);
- return Record;
-}
-
-ClassTemplateSpecializationRecord *APISet::addClassTemplateSpecialization(
- APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- AccessControl Access, bool IsFromSystemHeader) {
- auto *Record =
- addTopLevelRecord(USRBasedLookupTable, ClassTemplateSpecializations, USR,
- Name, Loc, std::move(Availability), Comment,
- Declaration, SubHeading, Access, IsFromSystemHeader);
- if (Parent)
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Parent->USR, Parent->Name, Parent->getKind(), Parent);
- return Record;
-}
-
-ClassTemplatePartialSpecializationRecord *
-APISet::addClassTemplatePartialSpecialization(
- APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- Template Template, AccessControl Access, bool IsFromSystemHeader) {
- auto *Record = addTopLevelRecord(
- USRBasedLookupTable, ClassTemplatePartialSpecializations, USR, Name, Loc,
- std::move(Availability), Comment, Declaration, SubHeading, Template,
- Access, IsFromSystemHeader);
- if (Parent)
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Parent->USR, Parent->Name, Parent->getKind(), Parent);
- return Record;
-}
-
-GlobalVariableTemplateSpecializationRecord *
-APISet::addGlobalVariableTemplateSpecialization(
- StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, LinkageInfo Linkage,
- const DocComment &Comment, DeclarationFragments Declaration,
- DeclarationFragments SubHeading, bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable,
- GlobalVariableTemplateSpecializations, USR, Name,
- Loc, std::move(Availability), Linkage, Comment,
- Declaration, SubHeading, IsFromSystemHeader);
-}
-
-GlobalVariableTemplatePartialSpecializationRecord *
-APISet::addGlobalVariableTemplatePartialSpecialization(
- StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, LinkageInfo Linkage,
- const DocComment &Comment, DeclarationFragments Declaration,
- DeclarationFragments SubHeading, Template Template,
- bool IsFromSystemHeader) {
- return addTopLevelRecord(
- USRBasedLookupTable, GlobalVariableTemplatePartialSpecializations, USR,
- Name, Loc, std::move(Availability), Linkage, Comment, Declaration,
- SubHeading, Template, IsFromSystemHeader);
-}
-
-ConceptRecord *APISet::addConcept(StringRef Name, StringRef USR,
- PresumedLoc Loc,
- AvailabilityInfo Availability,
- const DocComment &Comment,
- DeclarationFragments Declaration,
- DeclarationFragments SubHeading,
- Template Template, bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, Concepts, USR, Name, Loc,
- std::move(Availability), Comment, Declaration,
- SubHeading, Template, IsFromSystemHeader);
-}
-
-CXXMethodRecord *APISet::addCXXInstanceMethod(
- APIRecord *CXXClassRecord, StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- FunctionSignature Signature, AccessControl Access,
- bool IsFromSystemHeader) {
- CXXMethodRecord *Record =
- addTopLevelRecord(USRBasedLookupTable, CXXInstanceMethods, USR, Name, Loc,
- std::move(Availability), Comment, Declaration,
- SubHeading, Signature, Access, IsFromSystemHeader);
-
- Record->ParentInformation = APIRecord::HierarchyInformation(
- CXXClassRecord->USR, CXXClassRecord->Name, CXXClassRecord->getKind(),
- CXXClassRecord);
- return Record;
-}
-
-CXXMethodRecord *APISet::addCXXStaticMethod(
- APIRecord *CXXClassRecord, StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- FunctionSignature Signature, AccessControl Access,
- bool IsFromSystemHeader) {
- CXXMethodRecord *Record =
- addTopLevelRecord(USRBasedLookupTable, CXXStaticMethods, USR, Name, Loc,
- std::move(Availability), Comment, Declaration,
- SubHeading, Signature, Access, IsFromSystemHeader);
-
- Record->ParentInformation = APIRecord::HierarchyInformation(
- CXXClassRecord->USR, CXXClassRecord->Name, CXXClassRecord->getKind(),
- CXXClassRecord);
- return Record;
-}
-
-CXXMethodTemplateRecord *APISet::addCXXMethodTemplate(
- APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- FunctionSignature Signature, AccessControl Access, Template Template,
- bool IsFromSystemHeader) {
- auto *Record = addTopLevelRecord(USRBasedLookupTable, CXXMethodTemplates, USR,
- Name, Loc, std::move(Availability), Comment,
- Declaration, SubHeading, Signature, Access,
- Template, IsFromSystemHeader);
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Parent->USR, Parent->Name, Parent->getKind(), Parent);
-
- return Record;
-}
-
-CXXMethodTemplateSpecializationRecord *APISet::addCXXMethodTemplateSpec(
- APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- FunctionSignature Signature, AccessControl Access,
- bool IsFromSystemHeader) {
-
- auto *Record = addTopLevelRecord(
- USRBasedLookupTable, CXXMethodTemplateSpecializations, USR, Name, Loc,
- std::move(Availability), Comment, Declaration, SubHeading, Signature,
- Access, IsFromSystemHeader);
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Parent->USR, Parent->Name, Parent->getKind(), Parent);
-
- return Record;
+APIRecord *APIRecord::castFromRecordContext(const RecordContext *Ctx) {
+ switch (Ctx->getKind()) {
+#define RECORD_CONTEXT(CLASS, KIND) \
+ case KIND: \
+ return static_cast<CLASS *>(const_cast<RecordContext *>(Ctx));
+#include "clang/ExtractAPI/APIRecords.inc"
+ default:
+ return nullptr;
+ // llvm_unreachable("RecordContext derived class isn't propertly
+ // implemented");
+ }
}
-ObjCCategoryRecord *APISet::addObjCCategory(
- StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- SymbolReference Interface, bool IsFromSystemHeader,
- bool IsFromExternalModule) {
- // Create the category record.
- auto *Record =
- addTopLevelRecord(USRBasedLookupTable, ObjCCategories, USR, Name, Loc,
- std::move(Availability), Comment, Declaration,
- SubHeading, Interface, IsFromSystemHeader);
-
- Record->IsFromExternalModule = IsFromExternalModule;
-
- auto It = ObjCInterfaces.find(Interface.USR);
- if (It != ObjCInterfaces.end())
- It->second->Categories.push_back(Record);
-
- return Record;
+RecordContext *APIRecord::castToRecordContext(const APIRecord *Record) {
+ if (!Record)
+ return nullptr;
+ switch (Record->getKind()) {
+#define RECORD_CONTEXT(CLASS, KIND) \
+ case KIND: \
+ return static_cast<CLASS *>(const_cast<APIRecord *>(Record));
+#include "clang/ExtractAPI/APIRecords.inc"
+ default:
+ return nullptr;
+ // llvm_unreachable("RecordContext derived class isn't propertly
+ // implemented");
+ }
}
-ObjCInterfaceRecord *
-APISet::addObjCInterface(StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, LinkageInfo Linkage,
- const DocComment &Comment,
- DeclarationFragments Declaration,
- DeclarationFragments SubHeading,
- SymbolReference SuperClass, bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, ObjCInterfaces, USR, Name, Loc,
- std::move(Availability), Linkage, Comment,
- Declaration, SubHeading, SuperClass,
- IsFromSystemHeader);
+bool RecordContext::IsWellFormed() const {
+ // Check that First and Last are both null or both non-null.
+ return (First == nullptr) == (Last == nullptr);
}
-ObjCMethodRecord *APISet::addObjCMethod(
- ObjCContainerRecord *Container, StringRef Name, StringRef USR,
- PresumedLoc Loc, AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- FunctionSignature Signature, bool IsInstanceMethod,
- bool IsFromSystemHeader) {
- std::unique_ptr<ObjCMethodRecord> Record;
- if (IsInstanceMethod)
- Record = std::make_unique<ObjCInstanceMethodRecord>(
- USR, Name, Loc, std::move(Availability), Comment, Declaration,
- SubHeading, Signature, IsFromSystemHeader);
+void RecordContext::stealRecordChain(RecordContext &Other) {
+ assert(IsWellFormed());
+ // If we don't have an empty chain append Other's chain into ours.
+ if (First)
+ Last->NextInContext = Other.First;
else
- Record = std::make_unique<ObjCClassMethodRecord>(
- USR, Name, Loc, std::move(Availability), Comment, Declaration,
- SubHeading, Signature, IsFromSystemHeader);
+ First = Other.First;
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Container->USR, Container->Name, Container->getKind(), Container);
- USRBasedLookupTable.insert({USR, Record.get()});
- return Container->Methods.emplace_back(std::move(Record)).get();
-}
+ Last = Other.Last;
-ObjCPropertyRecord *APISet::addObjCProperty(
- ObjCContainerRecord *Container, StringRef Name, StringRef USR,
- PresumedLoc Loc, AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- ObjCPropertyRecord::AttributeKind Attributes, StringRef GetterName,
- StringRef SetterName, bool IsOptional, bool IsInstanceProperty,
- bool IsFromSystemHeader) {
- std::unique_ptr<ObjCPropertyRecord> Record;
- if (IsInstanceProperty)
- Record = std::make_unique<ObjCInstancePropertyRecord>(
- USR, Name, Loc, std::move(Availability), Comment, Declaration,
- SubHeading, Attributes, GetterName, SetterName, IsOptional,
- IsFromSystemHeader);
- else
- Record = std::make_unique<ObjCClassPropertyRecord>(
- USR, Name, Loc, std::move(Availability), Comment, Declaration,
- SubHeading, Attributes, GetterName, SetterName, IsOptional,
- IsFromSystemHeader);
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Container->USR, Container->Name, Container->getKind(), Container);
- USRBasedLookupTable.insert({USR, Record.get()});
- return Container->Properties.emplace_back(std::move(Record)).get();
+ // Delete Other's chain to ensure we don't accidentally traverse it.
+ Other.First = nullptr;
+ Other.Last = nullptr;
}
-ObjCInstanceVariableRecord *APISet::addObjCInstanceVariable(
- ObjCContainerRecord *Container, StringRef Name, StringRef USR,
- PresumedLoc Loc, AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration, DeclarationFragments SubHeading,
- ObjCInstanceVariableRecord::AccessControl Access, bool IsFromSystemHeader) {
- auto Record = std::make_unique<ObjCInstanceVariableRecord>(
- USR, Name, Loc, std::move(Availability), Comment, Declaration, SubHeading,
- Access, IsFromSystemHeader);
- Record->ParentInformation = APIRecord::HierarchyInformation(
- Container->USR, Container->Name, Container->getKind(), Container);
- USRBasedLookupTable.insert({USR, Record.get()});
- return Container->Ivars.emplace_back(std::move(Record)).get();
-}
+void RecordContext::addToRecordChain(APIRecord *Record) const {
+ assert(IsWellFormed());
+ if (!First) {
+ First = Record;
+ Last = Record;
+ return;
+ }
-ObjCProtocolRecord *APISet::addObjCProtocol(StringRef Name, StringRef USR,
- PresumedLoc Loc,
- AvailabilityInfo Availability,
- const DocComment &Comment,
- DeclarationFragments Declaration,
- DeclarationFragments SubHeading,
- bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, ObjCProtocols, USR, Name, Loc,
- std::move(Availability), Comment, Declaration,
- SubHeading, IsFromSystemHeader);
-}
-
-MacroDefinitionRecord *
-APISet::addMacroDefinition(StringRef Name, StringRef USR, PresumedLoc Loc,
- DeclarationFragments Declaration,
- DeclarationFragments SubHeading,
- bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, Macros, USR, Name, Loc,
- Declaration, SubHeading, IsFromSystemHeader);
-}
-
-TypedefRecord *
-APISet::addTypedef(StringRef Name, StringRef USR, PresumedLoc Loc,
- AvailabilityInfo Availability, const DocComment &Comment,
- DeclarationFragments Declaration,
- DeclarationFragments SubHeading,
- SymbolReference UnderlyingType, bool IsFromSystemHeader) {
- return addTopLevelRecord(USRBasedLookupTable, Typedefs, USR, Name, Loc,
- std::move(Availability), Comment, Declaration,
- SubHeading, UnderlyingType, IsFromSystemHeader);
+ Last->NextInContext = Record;
+ Last = Record;
}
APIRecord *APISet::findRecordForUSR(StringRef USR) const {
if (USR.empty())
return nullptr;
- return USRBasedLookupTable.lookup(USR);
-}
+ auto FindIt = USRBasedLookupTable.find(USR);
+ if (FindIt != USRBasedLookupTable.end())
+ return FindIt->getSecond().get();
-StringRef APISet::recordUSR(const Decl *D) {
- SmallString<128> USR;
- index::generateUSRForDecl(D, USR);
- return copyString(USR);
-}
-
-StringRef APISet::recordUSRForMacro(StringRef Name, SourceLocation SL,
- const SourceManager &SM) {
- SmallString<128> USR;
- index::generateUSRForMacro(Name, SL, SM, USR);
- return copyString(USR);
+ return nullptr;
}
StringRef APISet::copyString(StringRef String) {
@@ -528,15 +101,23 @@ StringRef APISet::copyString(StringRef String) {
return {};
// No need to allocate memory and copy if the string has already been stored.
- if (StringAllocator.identifyObject(String.data()))
+ if (Allocator.identifyObject(String.data()))
return String;
- void *Ptr = StringAllocator.Allocate(String.size(), 1);
+ void *Ptr = Allocator.Allocate(String.size(), 1);
memcpy(Ptr, String.data(), String.size());
return StringRef(reinterpret_cast<const char *>(Ptr), String.size());
}
+SymbolReference APISet::createSymbolReference(StringRef Name, StringRef USR,
+ StringRef Source) {
+ return SymbolReference(copyString(Name), copyString(USR), copyString(Source));
+}
+
APIRecord::~APIRecord() {}
+TagRecord::~TagRecord() {}
+RecordRecord::~RecordRecord() {}
+RecordFieldRecord::~RecordFieldRecord() {}
ObjCContainerRecord::~ObjCContainerRecord() {}
ObjCMethodRecord::~ObjCMethodRecord() {}
ObjCPropertyRecord::~ObjCPropertyRecord() {}
@@ -546,8 +127,10 @@ void GlobalFunctionRecord::anchor() {}
void GlobalVariableRecord::anchor() {}
void EnumConstantRecord::anchor() {}
void EnumRecord::anchor() {}
-void RecordFieldRecord::anchor() {}
-void RecordRecord::anchor() {}
+void StructFieldRecord::anchor() {}
+void StructRecord::anchor() {}
+void UnionFieldRecord::anchor() {}
+void UnionRecord::anchor() {}
void CXXFieldRecord::anchor() {}
void CXXClassRecord::anchor() {}
void CXXConstructorRecord::anchor() {}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp
index 56c1f5bf5eab..6b85c7db9034 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp
@@ -12,16 +12,19 @@
//===----------------------------------------------------------------------===//
#include "clang/ExtractAPI/DeclarationFragments.h"
+#include "clang/AST/ASTFwd.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/QualTypeNames.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
-#include "clang/Basic/OperatorKinds.h"
#include "clang/ExtractAPI/TypedefUnderlyingTypeResolver.h"
#include "clang/Index/USRGeneration.h"
#include "llvm/ADT/StringSwitch.h"
-#include <typeinfo>
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang::extractapi;
using namespace llvm;
@@ -60,23 +63,44 @@ void findTypeLocForBlockDecl(const clang::TypeSourceInfo *TSInfo,
} // namespace
-DeclarationFragments &DeclarationFragments::appendSpace() {
+DeclarationFragments &
+DeclarationFragments::appendUnduplicatedTextCharacter(char Character) {
if (!Fragments.empty()) {
Fragment &Last = Fragments.back();
if (Last.Kind == FragmentKind::Text) {
// Merge the extra space into the last fragment if the last fragment is
// also text.
- if (Last.Spelling.back() != ' ') { // avoid extra trailing spaces.
- Last.Spelling.push_back(' ');
+ if (Last.Spelling.back() != Character) { // avoid duplicates at end
+ Last.Spelling.push_back(Character);
}
} else {
- append(" ", FragmentKind::Text);
+ append("", FragmentKind::Text);
+ Fragments.back().Spelling.push_back(Character);
}
}
return *this;
}
+DeclarationFragments &DeclarationFragments::appendSpace() {
+ return appendUnduplicatedTextCharacter(' ');
+}
+
+DeclarationFragments &DeclarationFragments::appendSemicolon() {
+ return appendUnduplicatedTextCharacter(';');
+}
+
+DeclarationFragments &DeclarationFragments::removeTrailingSemicolon() {
+ if (Fragments.empty())
+ return *this;
+
+ Fragment &Last = Fragments.back();
+ if (Last.Kind == FragmentKind::Text && Last.Spelling.back() == ';')
+ Last.Spelling.pop_back();
+
+ return *this;
+}
+
StringRef DeclarationFragments::getFragmentKindString(
DeclarationFragments::FragmentKind Kind) {
switch (Kind) {
@@ -368,6 +392,25 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
getFragmentsForType(AT->getElementType(), Context, After));
}
+ if (const TemplateSpecializationType *TemplSpecTy =
+ dyn_cast<TemplateSpecializationType>(T)) {
+ const auto TemplName = TemplSpecTy->getTemplateName();
+ std::string Str;
+ raw_string_ostream Stream(Str);
+ TemplName.print(Stream, Context.getPrintingPolicy(),
+ TemplateName::Qualified::AsWritten);
+ SmallString<64> USR("");
+ if (const auto *TemplDecl = TemplName.getAsTemplateDecl())
+ index::generateUSRForDecl(TemplDecl, USR);
+
+ return Fragments
+ .append(Str, DeclarationFragments::FragmentKind::TypeIdentifier, USR)
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForTemplateArguments(
+ TemplSpecTy->template_arguments(), Context, std::nullopt))
+ .append(">", DeclarationFragments::FragmentKind::Text);
+ }
+
// Everything we care about has been handled now, reduce to the canonical
// unqualified base type.
QualType Base = T->getCanonicalTypeUnqualified();
@@ -378,7 +421,8 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
const TagDecl *Decl = TagTy->getDecl();
// Anonymous decl, skip this fragment.
if (Decl->getName().empty())
- return Fragments;
+ return Fragments.append("{ ... }",
+ DeclarationFragments::FragmentKind::Text);
SmallString<128> TagUSR;
clang::index::generateUSRForDecl(Decl, TagUSR);
return Fragments.append(Decl->getName(),
@@ -469,7 +513,7 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForNamespace(
if (!Decl->isAnonymousNamespace())
Fragments.appendSpace().append(
Decl->getName(), DeclarationFragments::FragmentKind::Identifier);
- return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+ return Fragments.appendSemicolon();
}
DeclarationFragments
@@ -511,7 +555,7 @@ DeclarationFragmentsBuilder::getFragmentsForVar(const VarDecl *Var) {
return Fragments
.append(Var->getName(), DeclarationFragments::FragmentKind::Identifier)
.append(std::move(After))
- .append(";", DeclarationFragments::FragmentKind::Text);
+ .appendSemicolon();
}
DeclarationFragments
@@ -533,17 +577,15 @@ DeclarationFragmentsBuilder::getFragmentsForVarTemplate(const VarDecl *Var) {
DeclarationFragments After;
DeclarationFragments ArgumentFragment =
getFragmentsForType(T, Var->getASTContext(), After);
- if (ArgumentFragment.begin()->Spelling.substr(0, 14).compare(
- "type-parameter") == 0) {
- std::string ProperArgName = getNameForTemplateArgument(
- Var->getDescribedVarTemplate()->getTemplateParameters()->asArray(),
- ArgumentFragment.begin()->Spelling);
+ if (StringRef(ArgumentFragment.begin()->Spelling)
+ .starts_with("type-parameter")) {
+ std::string ProperArgName = T.getAsString();
ArgumentFragment.begin()->Spelling.swap(ProperArgName);
}
Fragments.append(std::move(ArgumentFragment))
.appendSpace()
.append(Var->getName(), DeclarationFragments::FragmentKind::Identifier)
- .append(";", DeclarationFragments::FragmentKind::Text);
+ .appendSemicolon();
return Fragments;
}
@@ -568,14 +610,9 @@ DeclarationFragmentsBuilder::getFragmentsForParam(const ParmVarDecl *Param) {
else
TypeFragments.append(getFragmentsForType(T, Param->getASTContext(), After));
- if (TypeFragments.begin()->Spelling.substr(0, 14).compare("type-parameter") ==
- 0) {
- std::string ProperArgName = getNameForTemplateArgument(
- dyn_cast<FunctionDecl>(Param->getDeclContext())
- ->getDescribedFunctionTemplate()
- ->getTemplateParameters()
- ->asArray(),
- TypeFragments.begin()->Spelling);
+ if (StringRef(TypeFragments.begin()->Spelling)
+ .starts_with("type-parameter")) {
+ std::string ProperArgName = Param->getOriginalType().getAsString();
TypeFragments.begin()->Spelling.swap(ProperArgName);
}
@@ -638,7 +675,6 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForBlock(
DeclarationFragments
DeclarationFragmentsBuilder::getFragmentsForFunction(const FunctionDecl *Func) {
DeclarationFragments Fragments;
- // FIXME: Handle template specialization
switch (Func->getStorageClass()) {
case SC_None:
case SC_PrivateExtern:
@@ -666,19 +702,16 @@ DeclarationFragmentsBuilder::getFragmentsForFunction(const FunctionDecl *Func) {
DeclarationFragments After;
auto ReturnValueFragment =
getFragmentsForType(Func->getReturnType(), Func->getASTContext(), After);
- if (ReturnValueFragment.begin()->Spelling.substr(0, 14).compare(
- "type-parameter") == 0) {
- std::string ProperArgName =
- getNameForTemplateArgument(Func->getDescribedFunctionTemplate()
- ->getTemplateParameters()
- ->asArray(),
- ReturnValueFragment.begin()->Spelling);
+ if (StringRef(ReturnValueFragment.begin()->Spelling)
+ .starts_with("type-parameter")) {
+ std::string ProperArgName = Func->getReturnType().getAsString();
ReturnValueFragment.begin()->Spelling.swap(ProperArgName);
}
Fragments.append(std::move(ReturnValueFragment))
.appendSpace()
- .append(Func->getName(), DeclarationFragments::FragmentKind::Identifier);
+ .append(Func->getNameAsString(),
+ DeclarationFragments::FragmentKind::Identifier);
if (Func->getTemplateSpecializationInfo()) {
Fragments.append("<", DeclarationFragments::FragmentKind::Text);
@@ -712,7 +745,7 @@ DeclarationFragmentsBuilder::getFragmentsForFunction(const FunctionDecl *Func) {
Fragments.append(DeclarationFragments::getExceptionSpecificationString(
Func->getExceptionSpecType()));
- return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+ return Fragments.appendSemicolon();
}
DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForEnumConstant(
@@ -736,12 +769,17 @@ DeclarationFragmentsBuilder::getFragmentsForEnum(const EnumDecl *EnumDecl) {
QualType IntegerType = EnumDecl->getIntegerType();
if (!IntegerType.isNull())
- Fragments.append(": ", DeclarationFragments::FragmentKind::Text)
+ Fragments.appendSpace()
+ .append(": ", DeclarationFragments::FragmentKind::Text)
.append(
getFragmentsForType(IntegerType, EnumDecl->getASTContext(), After))
.append(std::move(After));
- return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+ if (EnumDecl->getName().empty())
+ Fragments.appendSpace().append("{ ... }",
+ DeclarationFragments::FragmentKind::Text);
+
+ return Fragments.appendSemicolon();
}
DeclarationFragments
@@ -757,7 +795,7 @@ DeclarationFragmentsBuilder::getFragmentsForField(const FieldDecl *Field) {
.appendSpace()
.append(Field->getName(), DeclarationFragments::FragmentKind::Identifier)
.append(std::move(After))
- .append(";", DeclarationFragments::FragmentKind::Text);
+ .appendSemicolon();
}
DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForRecordDecl(
@@ -771,11 +809,14 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForRecordDecl(
else
Fragments.append("struct", DeclarationFragments::FragmentKind::Keyword);
+ Fragments.appendSpace();
if (!Record->getName().empty())
- Fragments.appendSpace().append(
- Record->getName(), DeclarationFragments::FragmentKind::Identifier);
+ Fragments.append(Record->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+ else
+ Fragments.append("{ ... }", DeclarationFragments::FragmentKind::Text);
- return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+ return Fragments.appendSemicolon();
}
DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForCXXClass(
@@ -790,7 +831,7 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForCXXClass(
Fragments.appendSpace().append(
Record->getName(), DeclarationFragments::FragmentKind::Identifier);
- return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+ return Fragments.appendSemicolon();
}
DeclarationFragments
@@ -820,7 +861,7 @@ DeclarationFragmentsBuilder::getFragmentsForSpecialCXXMethod(
Fragments.append(DeclarationFragments::getExceptionSpecificationString(
Method->getExceptionSpecType()));
- return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+ return Fragments.appendSemicolon();
}
DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForCXXMethod(
@@ -860,7 +901,7 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForCXXMethod(
Fragments.append(DeclarationFragments::getExceptionSpecificationString(
Method->getExceptionSpecType()));
- return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+ return Fragments.appendSemicolon();
}
DeclarationFragments
@@ -891,7 +932,7 @@ DeclarationFragmentsBuilder::getFragmentsForConversionFunction(
Fragments.appendSpace().append("const",
DeclarationFragments::FragmentKind::Keyword);
- return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+ return Fragments.appendSemicolon();
}
DeclarationFragments
@@ -923,7 +964,7 @@ DeclarationFragmentsBuilder::getFragmentsForOverloadedOperator(
Fragments.append(DeclarationFragments::getExceptionSpecificationString(
Method->getExceptionSpecType()));
- return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+ return Fragments.appendSemicolon();
}
// Get fragments for template parameters, e.g. T in tempalte<typename T> ...
@@ -936,50 +977,89 @@ DeclarationFragmentsBuilder::getFragmentsForTemplateParameters(
Fragments.append(",", DeclarationFragments::FragmentKind::Text)
.appendSpace();
- const auto *TemplateParam =
- dyn_cast<TemplateTypeParmDecl>(ParameterArray[i]);
- if (!TemplateParam)
- continue;
- if (TemplateParam->hasTypeConstraint())
- Fragments.append(TemplateParam->getTypeConstraint()
- ->getNamedConcept()
- ->getName()
- .str(),
- DeclarationFragments::FragmentKind::TypeIdentifier);
- else if (TemplateParam->wasDeclaredWithTypename())
- Fragments.append("typename", DeclarationFragments::FragmentKind::Keyword);
- else
- Fragments.append("class", DeclarationFragments::FragmentKind::Keyword);
-
- if (TemplateParam->isParameterPack())
- Fragments.append("...", DeclarationFragments::FragmentKind::Text);
-
- Fragments.appendSpace().append(
- TemplateParam->getName(),
- DeclarationFragments::FragmentKind::GenericParameter);
+ if (const auto *TemplateParam =
+ dyn_cast<TemplateTypeParmDecl>(ParameterArray[i])) {
+ if (TemplateParam->hasTypeConstraint())
+ Fragments.append(TemplateParam->getTypeConstraint()
+ ->getNamedConcept()
+ ->getName()
+ .str(),
+ DeclarationFragments::FragmentKind::TypeIdentifier);
+ else if (TemplateParam->wasDeclaredWithTypename())
+ Fragments.append("typename",
+ DeclarationFragments::FragmentKind::Keyword);
+ else
+ Fragments.append("class", DeclarationFragments::FragmentKind::Keyword);
+
+ if (TemplateParam->isParameterPack())
+ Fragments.append("...", DeclarationFragments::FragmentKind::Text);
+
+ if (!TemplateParam->getName().empty())
+ Fragments.appendSpace().append(
+ TemplateParam->getName(),
+ DeclarationFragments::FragmentKind::GenericParameter);
+
+ if (TemplateParam->hasDefaultArgument()) {
+ const auto Default = TemplateParam->getDefaultArgument();
+ Fragments.append(" = ", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForTemplateArguments(
+ {Default.getArgument()}, TemplateParam->getASTContext(),
+ {Default}));
+ }
+ } else if (const auto *NTP =
+ dyn_cast<NonTypeTemplateParmDecl>(ParameterArray[i])) {
+ DeclarationFragments After;
+ const auto TyFragments =
+ getFragmentsForType(NTP->getType(), NTP->getASTContext(), After);
+ Fragments.append(std::move(TyFragments)).append(std::move(After));
+
+ if (NTP->isParameterPack())
+ Fragments.append("...", DeclarationFragments::FragmentKind::Text);
+
+ if (!NTP->getName().empty())
+ Fragments.appendSpace().append(
+ NTP->getName(),
+ DeclarationFragments::FragmentKind::GenericParameter);
+
+ if (NTP->hasDefaultArgument()) {
+ SmallString<8> ExprStr;
+ raw_svector_ostream Output(ExprStr);
+ NTP->getDefaultArgument().getArgument().print(
+ NTP->getASTContext().getPrintingPolicy(), Output,
+ /*IncludeType=*/false);
+ Fragments.append(" = ", DeclarationFragments::FragmentKind::Text)
+ .append(ExprStr, DeclarationFragments::FragmentKind::Text);
+ }
+ } else if (const auto *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(ParameterArray[i])) {
+ Fragments.append("template", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForTemplateParameters(
+ TTP->getTemplateParameters()->asArray()))
+ .append(">", DeclarationFragments::FragmentKind::Text)
+ .appendSpace()
+ .append(TTP->wasDeclaredWithTypename() ? "typename" : "class",
+ DeclarationFragments::FragmentKind::Keyword);
+
+ if (TTP->isParameterPack())
+ Fragments.append("...", DeclarationFragments::FragmentKind::Text);
+
+ if (!TTP->getName().empty())
+ Fragments.appendSpace().append(
+ TTP->getName(),
+ DeclarationFragments::FragmentKind::GenericParameter);
+ if (TTP->hasDefaultArgument()) {
+ const auto Default = TTP->getDefaultArgument();
+ Fragments.append(" = ", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForTemplateArguments(
+ {Default.getArgument()}, TTP->getASTContext(), {Default}));
+ }
+ }
}
return Fragments;
}
-// Find the name of a template argument from the template's parameters.
-std::string DeclarationFragmentsBuilder::getNameForTemplateArgument(
- const ArrayRef<NamedDecl *> TemplateParameters, std::string TypeParameter) {
- // The arg is a generic parameter from a partial spec, e.g.
- // T in template<typename T> Foo<T, int>.
- //
- // Those names appear as "type-parameter-<index>-<depth>", so we must find its
- // name from the template's parameter list.
- for (unsigned i = 0; i < TemplateParameters.size(); ++i) {
- const auto *Parameter =
- dyn_cast<TemplateTypeParmDecl>(TemplateParameters[i]);
- if (TypeParameter.compare("type-parameter-" +
- std::to_string(Parameter->getDepth()) + "-" +
- std::to_string(Parameter->getIndex())) == 0)
- return std::string(TemplateParameters[i]->getName());
- }
- llvm_unreachable("Could not find the name of a template argument.");
-}
-
// Get fragments for template arguments, e.g. int in template<typename T>
// Foo<int>;
//
@@ -989,28 +1069,102 @@ std::string DeclarationFragmentsBuilder::getNameForTemplateArgument(
DeclarationFragments
DeclarationFragmentsBuilder::getFragmentsForTemplateArguments(
const ArrayRef<TemplateArgument> TemplateArguments, ASTContext &Context,
- const std::optional<ArrayRef<NamedDecl *>> TemplateParameters) {
+ const std::optional<ArrayRef<TemplateArgumentLoc>> TemplateArgumentLocs) {
DeclarationFragments Fragments;
for (unsigned i = 0, end = TemplateArguments.size(); i != end; ++i) {
if (i)
Fragments.append(",", DeclarationFragments::FragmentKind::Text)
.appendSpace();
- std::string Type = TemplateArguments[i].getAsType().getAsString();
- DeclarationFragments After;
- DeclarationFragments ArgumentFragment =
- getFragmentsForType(TemplateArguments[i].getAsType(), Context, After);
+ const auto &CTA = TemplateArguments[i];
+ switch (CTA.getKind()) {
+ case TemplateArgument::Type: {
+ DeclarationFragments After;
+ DeclarationFragments ArgumentFragment =
+ getFragmentsForType(CTA.getAsType(), Context, After);
+
+ if (StringRef(ArgumentFragment.begin()->Spelling)
+ .starts_with("type-parameter")) {
+ if (TemplateArgumentLocs.has_value() &&
+ TemplateArgumentLocs->size() > i) {
+ std::string ProperArgName = TemplateArgumentLocs.value()[i]
+ .getTypeSourceInfo()
+ ->getType()
+ .getAsString();
+ ArgumentFragment.begin()->Spelling.swap(ProperArgName);
+ } else {
+ auto &Spelling = ArgumentFragment.begin()->Spelling;
+ Spelling.clear();
+ raw_string_ostream OutStream(Spelling);
+ CTA.print(Context.getPrintingPolicy(), OutStream, false);
+ OutStream.flush();
+ }
+ }
- if (ArgumentFragment.begin()->Spelling.substr(0, 14).compare(
- "type-parameter") == 0) {
- std::string ProperArgName = getNameForTemplateArgument(
- TemplateParameters.value(), ArgumentFragment.begin()->Spelling);
- ArgumentFragment.begin()->Spelling.swap(ProperArgName);
+ Fragments.append(std::move(ArgumentFragment));
+ break;
}
- Fragments.append(std::move(ArgumentFragment));
+ case TemplateArgument::Declaration: {
+ const auto *VD = CTA.getAsDecl();
+ SmallString<128> USR;
+ index::generateUSRForDecl(VD, USR);
+ Fragments.append(VD->getNameAsString(),
+ DeclarationFragments::FragmentKind::Identifier, USR);
+ break;
+ }
+ case TemplateArgument::NullPtr:
+ Fragments.append("nullptr", DeclarationFragments::FragmentKind::Keyword);
+ break;
- if (TemplateArguments[i].isPackExpansion())
- Fragments.append("...", DeclarationFragments::FragmentKind::Text);
+ case TemplateArgument::Integral: {
+ SmallString<4> Str;
+ CTA.getAsIntegral().toString(Str);
+ Fragments.append(Str, DeclarationFragments::FragmentKind::Text);
+ break;
+ }
+
+ case TemplateArgument::StructuralValue: {
+ const auto SVTy = CTA.getStructuralValueType();
+ Fragments.append(CTA.getAsStructuralValue().getAsString(Context, SVTy),
+ DeclarationFragments::FragmentKind::Text);
+ break;
+ }
+
+ case TemplateArgument::TemplateExpansion:
+ case TemplateArgument::Template: {
+ std::string Str;
+ raw_string_ostream Stream(Str);
+ CTA.getAsTemplate().print(Stream, Context.getPrintingPolicy());
+ SmallString<64> USR("");
+ if (const auto *TemplDecl =
+ CTA.getAsTemplateOrTemplatePattern().getAsTemplateDecl())
+ index::generateUSRForDecl(TemplDecl, USR);
+ Fragments.append(Str, DeclarationFragments::FragmentKind::TypeIdentifier,
+ USR);
+ if (CTA.getKind() == TemplateArgument::TemplateExpansion)
+ Fragments.append("...", DeclarationFragments::FragmentKind::Text);
+ break;
+ }
+
+ case TemplateArgument::Pack:
+ Fragments.append("<", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForTemplateArguments(CTA.pack_elements(), Context,
+ {}))
+ .append(">", DeclarationFragments::FragmentKind::Text);
+ break;
+
+ case TemplateArgument::Expression: {
+ SmallString<8> ExprStr;
+ raw_svector_ostream Output(ExprStr);
+ CTA.getAsExpr()->printPretty(Output, nullptr,
+ Context.getPrintingPolicy());
+ Fragments.append(ExprStr, DeclarationFragments::FragmentKind::Text);
+ break;
+ }
+
+ case TemplateArgument::Null:
+ break;
+ }
}
return Fragments;
}
@@ -1020,15 +1174,17 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForConcept(
DeclarationFragments Fragments;
return Fragments
.append("template", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
.append("<", DeclarationFragments::FragmentKind::Text)
.append(getFragmentsForTemplateParameters(
Concept->getTemplateParameters()->asArray()))
.append("> ", DeclarationFragments::FragmentKind::Text)
+ .appendSpace()
.append("concept", DeclarationFragments::FragmentKind::Keyword)
.appendSpace()
.append(Concept->getName().str(),
DeclarationFragments::FragmentKind::Identifier)
- .append(";", DeclarationFragments::FragmentKind::Text);
+ .appendSemicolon();
}
DeclarationFragments
@@ -1036,6 +1192,7 @@ DeclarationFragmentsBuilder::getFragmentsForRedeclarableTemplate(
const RedeclarableTemplateDecl *RedeclarableTemplate) {
DeclarationFragments Fragments;
Fragments.append("template", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
.append("<", DeclarationFragments::FragmentKind::Text)
.append(getFragmentsForTemplateParameters(
RedeclarableTemplate->getTemplateParameters()->asArray()))
@@ -1058,6 +1215,7 @@ DeclarationFragmentsBuilder::getFragmentsForClassTemplateSpecialization(
DeclarationFragments Fragments;
return Fragments
.append("template", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
.append("<", DeclarationFragments::FragmentKind::Text)
.append(">", DeclarationFragments::FragmentKind::Text)
.appendSpace()
@@ -1065,11 +1223,11 @@ DeclarationFragmentsBuilder::getFragmentsForClassTemplateSpecialization(
cast<CXXRecordDecl>(Decl)))
.pop_back() // there is an extra semicolon now
.append("<", DeclarationFragments::FragmentKind::Text)
- .append(
- getFragmentsForTemplateArguments(Decl->getTemplateArgs().asArray(),
- Decl->getASTContext(), std::nullopt))
+ .append(getFragmentsForTemplateArguments(
+ Decl->getTemplateArgs().asArray(), Decl->getASTContext(),
+ Decl->getTemplateArgsAsWritten()->arguments()))
.append(">", DeclarationFragments::FragmentKind::Text)
- .append(";", DeclarationFragments::FragmentKind::Text);
+ .appendSemicolon();
}
DeclarationFragments
@@ -1078,6 +1236,7 @@ DeclarationFragmentsBuilder::getFragmentsForClassTemplatePartialSpecialization(
DeclarationFragments Fragments;
return Fragments
.append("template", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
.append("<", DeclarationFragments::FragmentKind::Text)
.append(getFragmentsForTemplateParameters(
Decl->getTemplateParameters()->asArray()))
@@ -1089,9 +1248,9 @@ DeclarationFragmentsBuilder::getFragmentsForClassTemplatePartialSpecialization(
.append("<", DeclarationFragments::FragmentKind::Text)
.append(getFragmentsForTemplateArguments(
Decl->getTemplateArgs().asArray(), Decl->getASTContext(),
- Decl->getTemplateParameters()->asArray()))
+ Decl->getTemplateArgsAsWritten()->arguments()))
.append(">", DeclarationFragments::FragmentKind::Text)
- .append(";", DeclarationFragments::FragmentKind::Text);
+ .appendSemicolon();
}
DeclarationFragments
@@ -1100,17 +1259,18 @@ DeclarationFragmentsBuilder::getFragmentsForVarTemplateSpecialization(
DeclarationFragments Fragments;
return Fragments
.append("template", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
.append("<", DeclarationFragments::FragmentKind::Text)
.append(">", DeclarationFragments::FragmentKind::Text)
.appendSpace()
.append(DeclarationFragmentsBuilder::getFragmentsForVarTemplate(Decl))
.pop_back() // there is an extra semicolon now
.append("<", DeclarationFragments::FragmentKind::Text)
- .append(
- getFragmentsForTemplateArguments(Decl->getTemplateArgs().asArray(),
- Decl->getASTContext(), std::nullopt))
+ .append(getFragmentsForTemplateArguments(
+ Decl->getTemplateArgs().asArray(), Decl->getASTContext(),
+ Decl->getTemplateArgsAsWritten()->arguments()))
.append(">", DeclarationFragments::FragmentKind::Text)
- .append(";", DeclarationFragments::FragmentKind::Text);
+ .appendSemicolon();
}
DeclarationFragments
@@ -1119,6 +1279,7 @@ DeclarationFragmentsBuilder::getFragmentsForVarTemplatePartialSpecialization(
DeclarationFragments Fragments;
return Fragments
.append("template", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
.append("<", DeclarationFragments::FragmentKind::Text)
// Partial specs may have new params.
.append(getFragmentsForTemplateParameters(
@@ -1130,9 +1291,9 @@ DeclarationFragmentsBuilder::getFragmentsForVarTemplatePartialSpecialization(
.append("<", DeclarationFragments::FragmentKind::Text)
.append(getFragmentsForTemplateArguments(
Decl->getTemplateArgs().asArray(), Decl->getASTContext(),
- Decl->getTemplateParameters()->asArray()))
+ Decl->getTemplateArgsAsWritten()->arguments()))
.append(">", DeclarationFragments::FragmentKind::Text)
- .append(";", DeclarationFragments::FragmentKind::Text);
+ .appendSemicolon();
}
DeclarationFragments
@@ -1141,6 +1302,7 @@ DeclarationFragmentsBuilder::getFragmentsForFunctionTemplate(
DeclarationFragments Fragments;
return Fragments
.append("template", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
.append("<", DeclarationFragments::FragmentKind::Text)
// Partial specs may have new params.
.append(getFragmentsForTemplateParameters(
@@ -1157,6 +1319,7 @@ DeclarationFragmentsBuilder::getFragmentsForFunctionTemplateSpecialization(
DeclarationFragments Fragments;
return Fragments
.append("template", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
.append("<>", DeclarationFragments::FragmentKind::Text)
.appendSpace()
.append(DeclarationFragmentsBuilder::getFragmentsForFunction(Decl));
@@ -1203,7 +1366,7 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCCategory(
Fragments.append("@interface", DeclarationFragments::FragmentKind::Keyword)
.appendSpace()
- .append(Category->getClassInterface()->getName(),
+ .append(Interface->getName(),
DeclarationFragments::FragmentKind::TypeIdentifier, InterfaceUSR,
Interface)
.append(" (", DeclarationFragments::FragmentKind::Text)
@@ -1277,7 +1440,7 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCMethod(
Fragments.append(getFragmentsForParam(Param));
}
- return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+ return Fragments.appendSemicolon();
}
DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCProperty(
@@ -1378,7 +1541,7 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCProperty(
.append(Property->getName(),
DeclarationFragments::FragmentKind::Identifier)
.append(std::move(After))
- .append(";", DeclarationFragments::FragmentKind::Text);
+ .appendSemicolon();
}
DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCProtocol(
@@ -1422,7 +1585,7 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForTypedef(
.appendSpace()
.append(Decl->getName(), DeclarationFragments::FragmentKind::Identifier);
- return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+ return Fragments.appendSemicolon();
}
// Instantiate template for FunctionDecl.
@@ -1448,9 +1611,12 @@ DeclarationFragmentsBuilder::getSubHeading(const NamedDecl *Decl) {
cast<CXXMethodDecl>(Decl)->isOverloadedOperator()) {
Fragments.append(Decl->getNameAsString(),
DeclarationFragments::FragmentKind::Identifier);
- } else if (!Decl->getName().empty())
+ } else if (Decl->getIdentifier()) {
Fragments.append(Decl->getName(),
DeclarationFragments::FragmentKind::Identifier);
+ } else
+ Fragments.append(Decl->getDeclName().getAsString(),
+ DeclarationFragments::FragmentKind::Identifier);
return Fragments;
}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
index fd62d841197d..d6335854cbf2 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
@@ -30,6 +30,8 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/Index/USRGeneration.h"
+#include "clang/InstallAPI/HeaderFile.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
@@ -38,6 +40,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
@@ -61,9 +64,6 @@ std::optional<std::string> getRelativeIncludeName(const CompilerInstance &CI,
"CompilerInstance does not have a FileNamager!");
using namespace llvm::sys;
- // Matches framework include patterns
- const llvm::Regex Rule("/(.+)\\.framework/(.+)?Headers/(.+)");
-
const auto &FS = CI.getVirtualFileSystem();
SmallString<128> FilePath(File.begin(), File.end());
@@ -147,7 +147,8 @@ std::optional<std::string> getRelativeIncludeName(const CompilerInstance &CI,
// include name `<Framework/Header.h>`
if (Entry.IsFramework) {
SmallVector<StringRef, 4> Matches;
- Rule.match(File, &Matches);
+ clang::installapi::HeaderFile::getFrameworkIncludeRule().match(
+ File, &Matches);
// Returned matches are always in stable order.
if (Matches.size() != 4)
return std::nullopt;
@@ -328,11 +329,12 @@ public:
StringRef Name = PM.MacroNameToken.getIdentifierInfo()->getName();
PresumedLoc Loc = SM.getPresumedLoc(PM.MacroNameToken.getLocation());
- StringRef USR =
- API.recordUSRForMacro(Name, PM.MacroNameToken.getLocation(), SM);
+ SmallString<128> USR;
+ index::generateUSRForMacro(Name, PM.MacroNameToken.getLocation(), SM,
+ USR);
- API.addMacroDefinition(
- Name, USR, Loc,
+ API.createRecord<extractapi::MacroDefinitionRecord>(
+ USR, Name, SymbolReference(), Loc,
DeclarationFragmentsBuilder::getFragmentsForMacro(Name, PM.MD),
DeclarationFragmentsBuilder::getSubHeadingForMacro(Name),
SM.isInSystemHeader(PM.MacroNameToken.getLocation()));
@@ -373,40 +375,57 @@ private:
LocationFileChecker &LCF;
};
+std::unique_ptr<llvm::raw_pwrite_stream>
+createAdditionalSymbolGraphFile(CompilerInstance &CI, Twine BaseName) {
+ auto OutputDirectory = CI.getFrontendOpts().SymbolGraphOutputDir;
+
+ SmallString<256> FileName;
+ llvm::sys::path::append(FileName, OutputDirectory,
+ BaseName + ".symbols.json");
+ return CI.createOutputFile(
+ FileName, /*Binary*/ false, /*RemoveFileOnSignal*/ false,
+ /*UseTemporary*/ true, /*CreateMissingDirectories*/ true);
+}
+
} // namespace
-void ExtractAPIActionBase::ImplEndSourceFileAction() {
- if (!OS)
- return;
+void ExtractAPIActionBase::ImplEndSourceFileAction(CompilerInstance &CI) {
+ SymbolGraphSerializerOption SerializationOptions;
+ SerializationOptions.Compact = !CI.getFrontendOpts().EmitPrettySymbolGraphs;
+ SerializationOptions.EmitSymbolLabelsForTesting =
+ CI.getFrontendOpts().EmitSymbolGraphSymbolLabelsForTesting;
+
+ if (CI.getFrontendOpts().EmitExtensionSymbolGraphs) {
+ auto ConstructOutputFile = [&CI](Twine BaseName) {
+ return createAdditionalSymbolGraphFile(CI, BaseName);
+ };
+
+ SymbolGraphSerializer::serializeWithExtensionGraphs(
+ *OS, *API, IgnoresList, ConstructOutputFile, SerializationOptions);
+ } else {
+ SymbolGraphSerializer::serializeMainSymbolGraph(*OS, *API, IgnoresList,
+ SerializationOptions);
+ }
- // Setup a SymbolGraphSerializer to write out collected API information in
- // the Symbol Graph format.
- // FIXME: Make the kind of APISerializer configurable.
- SymbolGraphSerializer SGSerializer(*API, IgnoresList);
- SGSerializer.serialize(*OS);
+ // Flush the stream and close the main output stream.
OS.reset();
}
-std::unique_ptr<raw_pwrite_stream>
-ExtractAPIAction::CreateOutputFile(CompilerInstance &CI, StringRef InFile) {
- std::unique_ptr<raw_pwrite_stream> OS;
- OS = CI.createDefaultOutputFile(/*Binary=*/false, InFile,
- /*Extension=*/"json",
- /*RemoveFileOnSignal=*/false);
- if (!OS)
- return nullptr;
- return OS;
-}
-
std::unique_ptr<ASTConsumer>
ExtractAPIAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
- OS = CreateOutputFile(CI, InFile);
+ auto ProductName = CI.getFrontendOpts().ProductName;
+
+ if (CI.getFrontendOpts().SymbolGraphOutputDir.empty())
+ OS = CI.createDefaultOutputFile(/*Binary*/ false, InFile,
+ /*Extension*/ "symbols.json",
+ /*RemoveFileOnSignal*/ false,
+ /*CreateMissingDirectories*/ true);
+ else
+ OS = createAdditionalSymbolGraphFile(CI, ProductName);
if (!OS)
return nullptr;
- auto ProductName = CI.getFrontendOpts().ProductName;
-
// Now that we have enough information about the language options and the
// target triple, let's create the APISet before anyone uses it.
API = std::make_unique<APISet>(
@@ -496,7 +515,9 @@ bool ExtractAPIAction::PrepareToExecuteAction(CompilerInstance &CI) {
return true;
}
-void ExtractAPIAction::EndSourceFileAction() { ImplEndSourceFileAction(); }
+void ExtractAPIAction::EndSourceFileAction() {
+ ImplEndSourceFileAction(getCompilerInstance());
+}
std::unique_ptr<ASTConsumer>
WrappingExtractAPIAction::CreateASTConsumer(CompilerInstance &CI,
@@ -507,11 +528,9 @@ WrappingExtractAPIAction::CreateASTConsumer(CompilerInstance &CI,
CreatedASTConsumer = true;
- OS = CreateOutputFile(CI, InFile);
- if (!OS)
- return nullptr;
-
- auto ProductName = CI.getFrontendOpts().ProductName;
+ ProductName = CI.getFrontendOpts().ProductName;
+ auto InputFilename = llvm::sys::path::filename(InFile);
+ OS = createAdditionalSymbolGraphFile(CI, InputFilename);
// Now that we have enough information about the language options and the
// target triple, let's create the APISet before anyone uses it.
@@ -553,32 +572,6 @@ void WrappingExtractAPIAction::EndSourceFileAction() {
WrapperFrontendAction::EndSourceFileAction();
if (CreatedASTConsumer) {
- ImplEndSourceFileAction();
+ ImplEndSourceFileAction(getCompilerInstance());
}
}
-
-std::unique_ptr<raw_pwrite_stream>
-WrappingExtractAPIAction::CreateOutputFile(CompilerInstance &CI,
- StringRef InFile) {
- std::unique_ptr<raw_pwrite_stream> OS;
- std::string OutputDir = CI.getFrontendOpts().SymbolGraphOutputDir;
-
- // The symbol graphs need to be generated as a side effect of regular
- // compilation so the output should be dumped in the directory provided with
- // the command line option.
- llvm::SmallString<128> OutFilePath(OutputDir);
- auto Seperator = llvm::sys::path::get_separator();
- auto Infilename = llvm::sys::path::filename(InFile);
- OutFilePath.append({Seperator, Infilename});
- llvm::sys::path::replace_extension(OutFilePath, "json");
- // StringRef outputFilePathref = *OutFilePath;
-
- // don't use the default output file
- OS = CI.createOutputFile(/*OutputPath=*/OutFilePath, /*Binary=*/false,
- /*RemoveFileOnSignal=*/true,
- /*UseTemporary=*/true,
- /*CreateMissingDirectories=*/true);
- if (!OS)
- return nullptr;
- return OS;
-}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
index 349b93e2a232..6e56ee5b573f 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
@@ -14,45 +14,49 @@
#include "clang/ExtractAPI/Serialization/SymbolGraphSerializer.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Version.h"
+#include "clang/ExtractAPI/API.h"
#include "clang/ExtractAPI/DeclarationFragments.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/raw_ostream.h"
+#include <iterator>
#include <optional>
#include <type_traits>
using namespace clang;
using namespace clang::extractapi;
using namespace llvm;
-using namespace llvm::json;
namespace {
/// Helper function to inject a JSON object \p Obj into another object \p Paren
/// at position \p Key.
-void serializeObject(Object &Paren, StringRef Key, std::optional<Object> Obj) {
+void serializeObject(Object &Paren, StringRef Key,
+ std::optional<Object> &&Obj) {
if (Obj)
Paren[Key] = std::move(*Obj);
}
-/// Helper function to inject a StringRef \p String into an object \p Paren at
-/// position \p Key
-void serializeString(Object &Paren, StringRef Key,
- std::optional<std::string> String) {
- if (String)
- Paren[Key] = std::move(*String);
-}
-
/// Helper function to inject a JSON array \p Array into object \p Paren at
/// position \p Key.
-void serializeArray(Object &Paren, StringRef Key, std::optional<Array> Array) {
+void serializeArray(Object &Paren, StringRef Key,
+ std::optional<Array> &&Array) {
if (Array)
Paren[Key] = std::move(*Array);
}
+/// Helper function to inject a JSON array composed of the values in \p C into
+/// object \p Paren at position \p Key.
+template <typename ContainerTy>
+void serializeArray(Object &Paren, StringRef Key, ContainerTy &&C) {
+ Paren[Key] = Array(C);
+}
+
/// Serialize a \c VersionTuple \p V with the Symbol Graph semantic version
/// format.
///
@@ -159,27 +163,29 @@ std::optional<Array> serializeAvailability(const AvailabilityInfo &Avail) {
if (Avail.isDefault())
return std::nullopt;
- Object Availability;
Array AvailabilityArray;
- Availability["domain"] = Avail.Domain;
- serializeObject(Availability, "introduced",
- serializeSemanticVersion(Avail.Introduced));
- serializeObject(Availability, "deprecated",
- serializeSemanticVersion(Avail.Deprecated));
- serializeObject(Availability, "obsoleted",
- serializeSemanticVersion(Avail.Obsoleted));
+
if (Avail.isUnconditionallyDeprecated()) {
Object UnconditionallyDeprecated;
UnconditionallyDeprecated["domain"] = "*";
UnconditionallyDeprecated["isUnconditionallyDeprecated"] = true;
AvailabilityArray.emplace_back(std::move(UnconditionallyDeprecated));
}
- if (Avail.isUnconditionallyUnavailable()) {
- Object UnconditionallyUnavailable;
- UnconditionallyUnavailable["domain"] = "*";
- UnconditionallyUnavailable["isUnconditionallyUnavailable"] = true;
- AvailabilityArray.emplace_back(std::move(UnconditionallyUnavailable));
+ Object Availability;
+
+ Availability["domain"] = Avail.Domain;
+
+ if (Avail.isUnavailable()) {
+ Availability["isUnconditionallyUnavailable"] = true;
+ } else {
+ serializeObject(Availability, "introduced",
+ serializeSemanticVersion(Avail.Introduced));
+ serializeObject(Availability, "deprecated",
+ serializeSemanticVersion(Avail.Deprecated));
+ serializeObject(Availability, "obsoleted",
+ serializeSemanticVersion(Avail.Obsoleted));
}
+
AvailabilityArray.emplace_back(std::move(Availability));
return AvailabilityArray;
}
@@ -208,6 +214,7 @@ StringRef getLanguageName(Language Lang) {
case Language::Unknown:
case Language::Asm:
case Language::LLVM_IR:
+ case Language::CIR:
llvm_unreachable("Unsupported language kind");
}
@@ -247,6 +254,7 @@ std::optional<Object> serializeDocComment(const DocComment &Comment) {
return std::nullopt;
Object DocComment;
+
Array LinesArray;
for (const auto &CommentLine : Comment) {
Object Line;
@@ -255,7 +263,8 @@ std::optional<Object> serializeDocComment(const DocComment &Comment) {
serializeSourceRange(CommentLine.Begin, CommentLine.End));
LinesArray.emplace_back(std::move(Line));
}
- serializeArray(DocComment, "lines", LinesArray);
+
+ serializeArray(DocComment, "lines", std::move(LinesArray));
return DocComment;
}
@@ -321,19 +330,14 @@ serializeDeclarationFragments(const DeclarationFragments &DF) {
/// - \c subHeading : An array of declaration fragments that provides tags,
/// and potentially more tokens (for example the \c +/- symbol for
/// Objective-C methods). Can be used as sub-headings for documentation.
-Object serializeNames(const APIRecord &Record) {
+Object serializeNames(const APIRecord *Record) {
Object Names;
- if (auto *CategoryRecord =
- dyn_cast_or_null<const ObjCCategoryRecord>(&Record))
- Names["title"] =
- (CategoryRecord->Interface.Name + " (" + Record.Name + ")").str();
- else
- Names["title"] = Record.Name;
+ Names["title"] = Record->Name;
serializeArray(Names, "subHeading",
- serializeDeclarationFragments(Record.SubHeading));
+ serializeDeclarationFragments(Record->SubHeading));
DeclarationFragments NavigatorFragments;
- NavigatorFragments.append(Record.Name,
+ NavigatorFragments.append(Record->Name,
DeclarationFragments::FragmentKind::Identifier,
/*PreciseIdentifier*/ "");
serializeArray(Names, "navigator",
@@ -350,7 +354,8 @@ Object serializeSymbolKind(APIRecord::RecordKind RK, Language Lang) {
Object Kind;
switch (RK) {
case APIRecord::RK_Unknown:
- llvm_unreachable("Records should have an explicit kind");
+ Kind["identifier"] = AddLangPrefix("unknown");
+ Kind["displayName"] = "Unknown";
break;
case APIRecord::RK_Namespace:
Kind["identifier"] = AddLangPrefix("namespace");
@@ -483,10 +488,6 @@ Object serializeSymbolKind(APIRecord::RecordKind RK, Language Lang) {
Kind["identifier"] = AddLangPrefix("class.extension");
Kind["displayName"] = "Class Extension";
break;
- case APIRecord::RK_ObjCCategoryModule:
- Kind["identifier"] = AddLangPrefix("module.extension");
- Kind["displayName"] = "Module Extension";
- break;
case APIRecord::RK_ObjCProtocol:
Kind["identifier"] = AddLangPrefix("protocol");
Kind["displayName"] = "Protocol";
@@ -499,6 +500,8 @@ Object serializeSymbolKind(APIRecord::RecordKind RK, Language Lang) {
Kind["identifier"] = AddLangPrefix("typealias");
Kind["displayName"] = "Type Alias";
break;
+ default:
+ llvm_unreachable("API Record with uninstantiable kind");
}
return Kind;
@@ -510,15 +513,21 @@ Object serializeSymbolKind(APIRecord::RecordKind RK, Language Lang) {
/// which is prefixed by the source language name, useful for tooling to parse
/// the kind, and a \c displayName for rendering human-readable names.
Object serializeSymbolKind(const APIRecord &Record, Language Lang) {
- return serializeSymbolKind(Record.getKind(), Lang);
+ return serializeSymbolKind(Record.KindForDisplay, Lang);
}
+/// Serialize the function signature field, as specified by the
+/// Symbol Graph format.
+///
+/// The Symbol Graph function signature property contains two arrays.
+/// - The \c returns array is the declaration fragments of the return type;
+/// - The \c parameters array contains names and declaration fragments of the
+/// parameters.
template <typename RecordTy>
-std::optional<Object>
-serializeFunctionSignatureMixinImpl(const RecordTy &Record, std::true_type) {
+void serializeFunctionSignatureMixin(Object &Paren, const RecordTy &Record) {
const auto &FS = Record.Signature;
if (FS.empty())
- return std::nullopt;
+ return;
Object Signature;
serializeArray(Signature, "returns",
@@ -536,63 +545,14 @@ serializeFunctionSignatureMixinImpl(const RecordTy &Record, std::true_type) {
if (!Parameters.empty())
Signature["parameters"] = std::move(Parameters);
- return Signature;
+ serializeObject(Paren, "functionSignature", std::move(Signature));
}
template <typename RecordTy>
-std::optional<Object>
-serializeFunctionSignatureMixinImpl(const RecordTy &Record, std::false_type) {
- return std::nullopt;
-}
-
-/// Serialize the function signature field, as specified by the
-/// Symbol Graph format.
-///
-/// The Symbol Graph function signature property contains two arrays.
-/// - The \c returns array is the declaration fragments of the return type;
-/// - The \c parameters array contains names and declaration fragments of the
-/// parameters.
-///
-/// \returns \c std::nullopt if \p FS is empty, or an \c Object containing the
-/// formatted function signature.
-template <typename RecordTy>
-void serializeFunctionSignatureMixin(Object &Paren, const RecordTy &Record) {
- serializeObject(Paren, "functionSignature",
- serializeFunctionSignatureMixinImpl(
- Record, has_function_signature<RecordTy>()));
-}
-
-template <typename RecordTy>
-std::optional<std::string> serializeAccessMixinImpl(const RecordTy &Record,
- std::true_type) {
- const auto &AccessControl = Record.Access;
- std::string Access;
- if (AccessControl.empty())
- return std::nullopt;
- Access = AccessControl.getAccess();
- return Access;
-}
-
-template <typename RecordTy>
-std::optional<std::string> serializeAccessMixinImpl(const RecordTy &Record,
- std::false_type) {
- return std::nullopt;
-}
-
-template <typename RecordTy>
-void serializeAccessMixin(Object &Paren, const RecordTy &Record) {
- auto accessLevel = serializeAccessMixinImpl(Record, has_access<RecordTy>());
- if (!accessLevel.has_value())
- accessLevel = "public";
- serializeString(Paren, "accessLevel", accessLevel);
-}
-
-template <typename RecordTy>
-std::optional<Object> serializeTemplateMixinImpl(const RecordTy &Record,
- std::true_type) {
+void serializeTemplateMixin(Object &Paren, const RecordTy &Record) {
const auto &Template = Record.Templ;
if (Template.empty())
- return std::nullopt;
+ return;
Object Generics;
Array GenericParameters;
@@ -618,97 +578,66 @@ std::optional<Object> serializeTemplateMixinImpl(const RecordTy &Record,
if (!GenericConstraints.empty())
Generics["constraints"] = std::move(GenericConstraints);
- return Generics;
+ serializeObject(Paren, "swiftGenerics", Generics);
}
-template <typename RecordTy>
-std::optional<Object> serializeTemplateMixinImpl(const RecordTy &Record,
- std::false_type) {
- return std::nullopt;
-}
-
-template <typename RecordTy>
-void serializeTemplateMixin(Object &Paren, const RecordTy &Record) {
- serializeObject(Paren, "swiftGenerics",
- serializeTemplateMixinImpl(Record, has_template<RecordTy>()));
-}
+Array generateParentContexts(const SmallVectorImpl<SymbolReference> &Parents,
+ Language Lang) {
+ Array ParentContexts;
-struct PathComponent {
- StringRef USR;
- StringRef Name;
- APIRecord::RecordKind Kind;
+ for (const auto &Parent : Parents) {
+ Object Elem;
+ Elem["usr"] = Parent.USR;
+ Elem["name"] = Parent.Name;
+ if (Parent.Record)
+ Elem["kind"] = serializeSymbolKind(Parent.Record->KindForDisplay,
+ Lang)["identifier"];
+ else
+ Elem["kind"] =
+ serializeSymbolKind(APIRecord::RK_Unknown, Lang)["identifier"];
+ ParentContexts.emplace_back(std::move(Elem));
+ }
- PathComponent(StringRef USR, StringRef Name, APIRecord::RecordKind Kind)
- : USR(USR), Name(Name), Kind(Kind) {}
-};
+ return ParentContexts;
+}
-template <typename RecordTy>
-bool generatePathComponents(
- const RecordTy &Record, const APISet &API,
- function_ref<void(const PathComponent &)> ComponentTransformer) {
- SmallVector<PathComponent, 4> ReverseComponenents;
- ReverseComponenents.emplace_back(Record.USR, Record.Name, Record.getKind());
- const auto *CurrentParent = &Record.ParentInformation;
- bool FailedToFindParent = false;
- while (CurrentParent && !CurrentParent->empty()) {
- PathComponent CurrentParentComponent(CurrentParent->ParentUSR,
- CurrentParent->ParentName,
- CurrentParent->ParentKind);
-
- auto *ParentRecord = CurrentParent->ParentRecord;
- // Slow path if we don't have a direct reference to the ParentRecord
- if (!ParentRecord)
- ParentRecord = API.findRecordForUSR(CurrentParent->ParentUSR);
-
- // If the parent is a category extended from internal module then we need to
- // pretend this belongs to the associated interface.
- if (auto *CategoryRecord =
- dyn_cast_or_null<ObjCCategoryRecord>(ParentRecord)) {
- if (!CategoryRecord->IsFromExternalModule) {
- ParentRecord = API.findRecordForUSR(CategoryRecord->Interface.USR);
- CurrentParentComponent = PathComponent(CategoryRecord->Interface.USR,
- CategoryRecord->Interface.Name,
- APIRecord::RK_ObjCInterface);
- }
- }
-
- // The parent record doesn't exist which means the symbol shouldn't be
- // treated as part of the current product.
- if (!ParentRecord) {
- FailedToFindParent = true;
- break;
- }
-
- ReverseComponenents.push_back(std::move(CurrentParentComponent));
- CurrentParent = &ParentRecord->ParentInformation;
+/// Walk the records parent information in reverse to generate a hierarchy
+/// suitable for serialization.
+SmallVector<SymbolReference, 8>
+generateHierarchyFromRecord(const APIRecord *Record) {
+ SmallVector<SymbolReference, 8> ReverseHierarchy;
+ for (const auto *Current = Record; Current != nullptr;
+ Current = Current->Parent.Record)
+ ReverseHierarchy.emplace_back(Current);
+
+ return SmallVector<SymbolReference, 8>(
+ std::make_move_iterator(ReverseHierarchy.rbegin()),
+ std::make_move_iterator(ReverseHierarchy.rend()));
+}
+
+SymbolReference getHierarchyReference(const APIRecord *Record,
+ const APISet &API) {
+ // If the parent is a category extended from internal module then we need to
+ // pretend this belongs to the associated interface.
+ if (auto *CategoryRecord = dyn_cast_or_null<ObjCCategoryRecord>(Record)) {
+ return CategoryRecord->Interface;
+ // FIXME: TODO generate path components correctly for categories extending
+ // an external module.
}
- for (const auto &PC : reverse(ReverseComponenents))
- ComponentTransformer(PC);
-
- return FailedToFindParent;
+ return SymbolReference(Record);
}
-Object serializeParentContext(const PathComponent &PC, Language Lang) {
- Object ParentContextElem;
- ParentContextElem["usr"] = PC.USR;
- ParentContextElem["name"] = PC.Name;
- ParentContextElem["kind"] = serializeSymbolKind(PC.Kind, Lang)["identifier"];
- return ParentContextElem;
-}
+} // namespace
-template <typename RecordTy>
-Array generateParentContexts(const RecordTy &Record, const APISet &API,
- Language Lang) {
- Array ParentContexts;
- generatePathComponents(
- Record, API, [Lang, &ParentContexts](const PathComponent &PC) {
- ParentContexts.push_back(serializeParentContext(PC, Lang));
- });
+Object *ExtendedModule::addSymbol(Object &&Symbol) {
+ Symbols.emplace_back(std::move(Symbol));
+ return Symbols.back().getAsObject();
+}
- return ParentContexts;
+void ExtendedModule::addRelationship(Object &&Relationship) {
+ Relationships.emplace_back(std::move(Relationship));
}
-} // namespace
/// Defines the format version emitted by SymbolGraphSerializer.
const VersionTuple SymbolGraphSerializer::FormatVersion{0, 5, 3};
@@ -721,84 +650,52 @@ Object SymbolGraphSerializer::serializeMetadata() const {
return Metadata;
}
-Object SymbolGraphSerializer::serializeModule() const {
+Object
+SymbolGraphSerializer::serializeModuleObject(StringRef ModuleName) const {
Object Module;
- // The user is expected to always pass `--product-name=` on the command line
- // to populate this field.
- Module["name"] = API.ProductName;
+ Module["name"] = ModuleName;
serializeObject(Module, "platform", serializePlatform(API.getTarget()));
return Module;
}
-bool SymbolGraphSerializer::shouldSkip(const APIRecord &Record) const {
- // Skip explicitly ignored symbols.
- if (IgnoresList.shouldIgnore(Record.Name))
+bool SymbolGraphSerializer::shouldSkip(const APIRecord *Record) const {
+ if (!Record)
return true;
// Skip unconditionally unavailable symbols
- if (Record.Availability.isUnconditionallyUnavailable())
+ if (Record->Availability.isUnconditionallyUnavailable())
return true;
+ // Filter out symbols without a name as we can generate correct symbol graphs
+ // for them. In practice these are anonymous record types that aren't attached
+ // to a declaration.
+ if (auto *Tag = dyn_cast<TagRecord>(Record)) {
+ if (Tag->IsEmbeddedInVarDeclarator)
+ return true;
+ }
+
// Filter out symbols prefixed with an underscored as they are understood to
// be symbols clients should not use.
- if (Record.Name.starts_with("_"))
+ if (Record->Name.starts_with("_"))
+ return true;
+
+ // Skip explicitly ignored symbols.
+ if (IgnoresList.shouldIgnore(Record->Name))
return true;
return false;
}
-template <typename RecordTy>
-std::optional<Object>
-SymbolGraphSerializer::serializeAPIRecord(const RecordTy &Record) const {
- if (shouldSkip(Record))
- return std::nullopt;
+ExtendedModule &SymbolGraphSerializer::getModuleForCurrentSymbol() {
+ if (!ForceEmitToMainModule && ModuleForCurrentSymbol)
+ return *ModuleForCurrentSymbol;
- Object Obj;
- serializeObject(Obj, "identifier",
- serializeIdentifier(Record, API.getLanguage()));
- serializeObject(Obj, "kind", serializeSymbolKind(Record, API.getLanguage()));
- serializeObject(Obj, "names", serializeNames(Record));
- serializeObject(
- Obj, "location",
- serializeSourceLocation(Record.Location, /*IncludeFileURI=*/true));
- serializeArray(Obj, "availability",
- serializeAvailability(Record.Availability));
- serializeObject(Obj, "docComment", serializeDocComment(Record.Comment));
- serializeArray(Obj, "declarationFragments",
- serializeDeclarationFragments(Record.Declaration));
- SmallVector<StringRef, 4> PathComponentsNames;
- // If this returns true it indicates that we couldn't find a symbol in the
- // hierarchy.
- if (generatePathComponents(Record, API,
- [&PathComponentsNames](const PathComponent &PC) {
- PathComponentsNames.push_back(PC.Name);
- }))
- return {};
-
- serializeArray(Obj, "pathComponents", Array(PathComponentsNames));
-
- serializeFunctionSignatureMixin(Obj, Record);
- serializeAccessMixin(Obj, Record);
- serializeTemplateMixin(Obj, Record);
-
- return Obj;
+ return MainModule;
}
-template <typename MemberTy>
-void SymbolGraphSerializer::serializeMembers(
- const APIRecord &Record,
- const SmallVector<std::unique_ptr<MemberTy>> &Members) {
- // Members should not be serialized if we aren't recursing.
- if (!ShouldRecurse)
- return;
- for (const auto &Member : Members) {
- auto MemberRecord = serializeAPIRecord(*Member);
- if (!MemberRecord)
- continue;
-
- Symbols.emplace_back(std::move(*MemberRecord));
- serializeRelationship(RelationshipKind::MemberOf, *Member, Record);
- }
+Array SymbolGraphSerializer::serializePathComponents(
+ const APIRecord *Record) const {
+ return Array(map_range(Hierarchy, [](auto Elt) { return Elt.Name; }));
}
StringRef SymbolGraphSerializer::getRelationshipString(RelationshipKind Kind) {
@@ -815,6 +712,33 @@ StringRef SymbolGraphSerializer::getRelationshipString(RelationshipKind Kind) {
llvm_unreachable("Unhandled relationship kind");
}
+void SymbolGraphSerializer::serializeRelationship(RelationshipKind Kind,
+ const SymbolReference &Source,
+ const SymbolReference &Target,
+ ExtendedModule &Into) {
+ Object Relationship;
+ SmallString<64> TestRelLabel;
+ if (EmitSymbolLabelsForTesting) {
+ llvm::raw_svector_ostream OS(TestRelLabel);
+ OS << SymbolGraphSerializer::getRelationshipString(Kind) << " $ "
+ << Source.USR << " $ ";
+ if (Target.USR.empty())
+ OS << Target.Name;
+ else
+ OS << Target.USR;
+ Relationship["!testRelLabel"] = TestRelLabel;
+ }
+ Relationship["source"] = Source.USR;
+ Relationship["target"] = Target.USR;
+ Relationship["targetFallback"] = Target.Name;
+ Relationship["kind"] = SymbolGraphSerializer::getRelationshipString(Kind);
+
+ if (ForceEmitToMainModule)
+ MainModule.addRelationship(std::move(Relationship));
+ else
+ Into.addRelationship(std::move(Relationship));
+}
+
StringRef SymbolGraphSerializer::getConstraintString(ConstraintKind Kind) {
switch (Kind) {
case ConstraintKind::Conformance:
@@ -825,430 +749,331 @@ StringRef SymbolGraphSerializer::getConstraintString(ConstraintKind Kind) {
llvm_unreachable("Unhandled constraint kind");
}
-void SymbolGraphSerializer::serializeRelationship(RelationshipKind Kind,
- SymbolReference Source,
- SymbolReference Target) {
- Object Relationship;
- Relationship["source"] = Source.USR;
- Relationship["target"] = Target.USR;
- Relationship["targetFallback"] = Target.Name;
- Relationship["kind"] = getRelationshipString(Kind);
-
- Relationships.emplace_back(std::move(Relationship));
-}
+void SymbolGraphSerializer::serializeAPIRecord(const APIRecord *Record) {
+ Object Obj;
-void SymbolGraphSerializer::visitNamespaceRecord(
- const NamespaceRecord &Record) {
- auto Namespace = serializeAPIRecord(Record);
- if (!Namespace)
- return;
- Symbols.emplace_back(std::move(*Namespace));
- if (!Record.ParentInformation.empty())
- serializeRelationship(RelationshipKind::MemberOf, Record,
- Record.ParentInformation.ParentRecord);
-}
+ // If we need symbol labels for testing emit the USR as the value and the key
+ // starts with '!'' to ensure it ends up at the top of the object.
+ if (EmitSymbolLabelsForTesting)
+ Obj["!testLabel"] = Record->USR;
-void SymbolGraphSerializer::visitGlobalFunctionRecord(
- const GlobalFunctionRecord &Record) {
- auto Obj = serializeAPIRecord(Record);
- if (!Obj)
- return;
+ serializeObject(Obj, "identifier",
+ serializeIdentifier(*Record, API.getLanguage()));
+ serializeObject(Obj, "kind", serializeSymbolKind(*Record, API.getLanguage()));
+ serializeObject(Obj, "names", serializeNames(Record));
+ serializeObject(
+ Obj, "location",
+ serializeSourceLocation(Record->Location, /*IncludeFileURI=*/true));
+ serializeArray(Obj, "availability",
+ serializeAvailability(Record->Availability));
+ serializeObject(Obj, "docComment", serializeDocComment(Record->Comment));
+ serializeArray(Obj, "declarationFragments",
+ serializeDeclarationFragments(Record->Declaration));
- Symbols.emplace_back(std::move(*Obj));
-}
+ Obj["pathComponents"] = serializePathComponents(Record);
+ Obj["accessLevel"] = Record->Access.getAccess();
-void SymbolGraphSerializer::visitGlobalVariableRecord(
- const GlobalVariableRecord &Record) {
- auto Obj = serializeAPIRecord(Record);
- if (!Obj)
- return;
+ ExtendedModule &Module = getModuleForCurrentSymbol();
+ // If the hierarchy has at least one parent and child.
+ if (Hierarchy.size() >= 2)
+ serializeRelationship(MemberOf, Hierarchy.back(),
+ Hierarchy[Hierarchy.size() - 2], Module);
- Symbols.emplace_back(std::move(*Obj));
+ CurrentSymbol = Module.addSymbol(std::move(Obj));
}
-void SymbolGraphSerializer::visitEnumRecord(const EnumRecord &Record) {
- auto Enum = serializeAPIRecord(Record);
- if (!Enum)
- return;
-
- Symbols.emplace_back(std::move(*Enum));
- serializeMembers(Record, Record.Constants);
+bool SymbolGraphSerializer::traverseAPIRecord(const APIRecord *Record) {
+ if (!Record)
+ return true;
+ if (shouldSkip(Record))
+ return true;
+ Hierarchy.push_back(getHierarchyReference(Record, API));
+ // Defer traversal mechanics to APISetVisitor base implementation
+ auto RetVal = Base::traverseAPIRecord(Record);
+ Hierarchy.pop_back();
+ return RetVal;
}
-void SymbolGraphSerializer::visitRecordRecord(const RecordRecord &Record) {
- auto SerializedRecord = serializeAPIRecord(Record);
- if (!SerializedRecord)
- return;
-
- Symbols.emplace_back(std::move(*SerializedRecord));
- serializeMembers(Record, Record.Fields);
+bool SymbolGraphSerializer::visitAPIRecord(const APIRecord *Record) {
+ serializeAPIRecord(Record);
+ return true;
}
-void SymbolGraphSerializer::visitStaticFieldRecord(
- const StaticFieldRecord &Record) {
- auto StaticField = serializeAPIRecord(Record);
- if (!StaticField)
- return;
- Symbols.emplace_back(std::move(*StaticField));
- serializeRelationship(RelationshipKind::MemberOf, Record, Record.Context);
+bool SymbolGraphSerializer::visitGlobalFunctionRecord(
+ const GlobalFunctionRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
+
+ serializeFunctionSignatureMixin(*CurrentSymbol, *Record);
+ return true;
}
-void SymbolGraphSerializer::visitCXXClassRecord(const CXXClassRecord &Record) {
- auto Class = serializeAPIRecord(Record);
- if (!Class)
- return;
+bool SymbolGraphSerializer::visitCXXClassRecord(const CXXClassRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
- Symbols.emplace_back(std::move(*Class));
- for (const auto &Base : Record.Bases)
- serializeRelationship(RelationshipKind::InheritsFrom, Record, Base);
- if (!Record.ParentInformation.empty())
- serializeRelationship(RelationshipKind::MemberOf, Record,
- Record.ParentInformation.ParentRecord);
+ for (const auto &Base : Record->Bases)
+ serializeRelationship(RelationshipKind::InheritsFrom, Record, Base,
+ getModuleForCurrentSymbol());
+ return true;
}
-void SymbolGraphSerializer::visitClassTemplateRecord(
- const ClassTemplateRecord &Record) {
- auto Class = serializeAPIRecord(Record);
- if (!Class)
- return;
+bool SymbolGraphSerializer::visitClassTemplateRecord(
+ const ClassTemplateRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
- Symbols.emplace_back(std::move(*Class));
- for (const auto &Base : Record.Bases)
- serializeRelationship(RelationshipKind::InheritsFrom, Record, Base);
- if (!Record.ParentInformation.empty())
- serializeRelationship(RelationshipKind::MemberOf, Record,
- Record.ParentInformation.ParentRecord);
+ serializeTemplateMixin(*CurrentSymbol, *Record);
+ return true;
}
-void SymbolGraphSerializer::visitClassTemplateSpecializationRecord(
- const ClassTemplateSpecializationRecord &Record) {
- auto Class = serializeAPIRecord(Record);
- if (!Class)
- return;
-
- Symbols.emplace_back(std::move(*Class));
+bool SymbolGraphSerializer::visitClassTemplatePartialSpecializationRecord(
+ const ClassTemplatePartialSpecializationRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
- for (const auto &Base : Record.Bases)
- serializeRelationship(RelationshipKind::InheritsFrom, Record, Base);
- if (!Record.ParentInformation.empty())
- serializeRelationship(RelationshipKind::MemberOf, Record,
- Record.ParentInformation.ParentRecord);
+ serializeTemplateMixin(*CurrentSymbol, *Record);
+ return true;
}
-void SymbolGraphSerializer::visitClassTemplatePartialSpecializationRecord(
- const ClassTemplatePartialSpecializationRecord &Record) {
- auto Class = serializeAPIRecord(Record);
- if (!Class)
- return;
-
- Symbols.emplace_back(std::move(*Class));
+bool SymbolGraphSerializer::visitCXXMethodRecord(
+ const CXXMethodRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
- for (const auto &Base : Record.Bases)
- serializeRelationship(RelationshipKind::InheritsFrom, Record, Base);
- if (!Record.ParentInformation.empty())
- serializeRelationship(RelationshipKind::MemberOf, Record,
- Record.ParentInformation.ParentRecord);
+ serializeFunctionSignatureMixin(*CurrentSymbol, *Record);
+ return true;
}
-void SymbolGraphSerializer::visitCXXInstanceMethodRecord(
- const CXXInstanceMethodRecord &Record) {
- auto InstanceMethod = serializeAPIRecord(Record);
- if (!InstanceMethod)
- return;
+bool SymbolGraphSerializer::visitCXXMethodTemplateRecord(
+ const CXXMethodTemplateRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
- Symbols.emplace_back(std::move(*InstanceMethod));
- serializeRelationship(RelationshipKind::MemberOf, Record,
- Record.ParentInformation.ParentRecord);
+ serializeTemplateMixin(*CurrentSymbol, *Record);
+ return true;
}
-void SymbolGraphSerializer::visitCXXStaticMethodRecord(
- const CXXStaticMethodRecord &Record) {
- auto StaticMethod = serializeAPIRecord(Record);
- if (!StaticMethod)
- return;
+bool SymbolGraphSerializer::visitCXXFieldTemplateRecord(
+ const CXXFieldTemplateRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
- Symbols.emplace_back(std::move(*StaticMethod));
- serializeRelationship(RelationshipKind::MemberOf, Record,
- Record.ParentInformation.ParentRecord);
+ serializeTemplateMixin(*CurrentSymbol, *Record);
+ return true;
}
-void SymbolGraphSerializer::visitMethodTemplateRecord(
- const CXXMethodTemplateRecord &Record) {
- if (!ShouldRecurse)
- // Ignore child symbols
- return;
- auto MethodTemplate = serializeAPIRecord(Record);
- if (!MethodTemplate)
- return;
- Symbols.emplace_back(std::move(*MethodTemplate));
- serializeRelationship(RelationshipKind::MemberOf, Record,
- Record.ParentInformation.ParentRecord);
-}
+bool SymbolGraphSerializer::visitConceptRecord(const ConceptRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
-void SymbolGraphSerializer::visitMethodTemplateSpecializationRecord(
- const CXXMethodTemplateSpecializationRecord &Record) {
- if (!ShouldRecurse)
- // Ignore child symbols
- return;
- auto MethodTemplateSpecialization = serializeAPIRecord(Record);
- if (!MethodTemplateSpecialization)
- return;
- Symbols.emplace_back(std::move(*MethodTemplateSpecialization));
- serializeRelationship(RelationshipKind::MemberOf, Record,
- Record.ParentInformation.ParentRecord);
+ serializeTemplateMixin(*CurrentSymbol, *Record);
+ return true;
}
-void SymbolGraphSerializer::visitCXXFieldRecord(const CXXFieldRecord &Record) {
- if (!ShouldRecurse)
- return;
- auto CXXField = serializeAPIRecord(Record);
- if (!CXXField)
- return;
- Symbols.emplace_back(std::move(*CXXField));
- serializeRelationship(RelationshipKind::MemberOf, Record,
- Record.ParentInformation.ParentRecord);
-}
+bool SymbolGraphSerializer::visitGlobalVariableTemplateRecord(
+ const GlobalVariableTemplateRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
-void SymbolGraphSerializer::visitCXXFieldTemplateRecord(
- const CXXFieldTemplateRecord &Record) {
- if (!ShouldRecurse)
- // Ignore child symbols
- return;
- auto CXXFieldTemplate = serializeAPIRecord(Record);
- if (!CXXFieldTemplate)
- return;
- Symbols.emplace_back(std::move(*CXXFieldTemplate));
- serializeRelationship(RelationshipKind::MemberOf, Record,
- Record.ParentInformation.ParentRecord);
+ serializeTemplateMixin(*CurrentSymbol, *Record);
+ return true;
}
-void SymbolGraphSerializer::visitConceptRecord(const ConceptRecord &Record) {
- auto Concept = serializeAPIRecord(Record);
- if (!Concept)
- return;
+bool SymbolGraphSerializer::
+ visitGlobalVariableTemplatePartialSpecializationRecord(
+ const GlobalVariableTemplatePartialSpecializationRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
- Symbols.emplace_back(std::move(*Concept));
+ serializeTemplateMixin(*CurrentSymbol, *Record);
+ return true;
}
-void SymbolGraphSerializer::visitGlobalVariableTemplateRecord(
- const GlobalVariableTemplateRecord &Record) {
- auto GlobalVariableTemplate = serializeAPIRecord(Record);
- if (!GlobalVariableTemplate)
- return;
- Symbols.emplace_back(std::move(*GlobalVariableTemplate));
-}
+bool SymbolGraphSerializer::visitGlobalFunctionTemplateRecord(
+ const GlobalFunctionTemplateRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
-void SymbolGraphSerializer::visitGlobalVariableTemplateSpecializationRecord(
- const GlobalVariableTemplateSpecializationRecord &Record) {
- auto GlobalVariableTemplateSpecialization = serializeAPIRecord(Record);
- if (!GlobalVariableTemplateSpecialization)
- return;
- Symbols.emplace_back(std::move(*GlobalVariableTemplateSpecialization));
+ serializeTemplateMixin(*CurrentSymbol, *Record);
+ return true;
}
-void SymbolGraphSerializer::
- visitGlobalVariableTemplatePartialSpecializationRecord(
- const GlobalVariableTemplatePartialSpecializationRecord &Record) {
- auto GlobalVariableTemplatePartialSpecialization = serializeAPIRecord(Record);
- if (!GlobalVariableTemplatePartialSpecialization)
- return;
- Symbols.emplace_back(std::move(*GlobalVariableTemplatePartialSpecialization));
-}
+bool SymbolGraphSerializer::visitObjCContainerRecord(
+ const ObjCContainerRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
-void SymbolGraphSerializer::visitGlobalFunctionTemplateRecord(
- const GlobalFunctionTemplateRecord &Record) {
- auto GlobalFunctionTemplate = serializeAPIRecord(Record);
- if (!GlobalFunctionTemplate)
- return;
- Symbols.emplace_back(std::move(*GlobalFunctionTemplate));
-}
+ for (const auto &Protocol : Record->Protocols)
+ serializeRelationship(ConformsTo, Record, Protocol,
+ getModuleForCurrentSymbol());
-void SymbolGraphSerializer::visitGlobalFunctionTemplateSpecializationRecord(
- const GlobalFunctionTemplateSpecializationRecord &Record) {
- auto GlobalFunctionTemplateSpecialization = serializeAPIRecord(Record);
- if (!GlobalFunctionTemplateSpecialization)
- return;
- Symbols.emplace_back(std::move(*GlobalFunctionTemplateSpecialization));
+ return true;
}
-void SymbolGraphSerializer::visitObjCContainerRecord(
- const ObjCContainerRecord &Record) {
- auto ObjCContainer = serializeAPIRecord(Record);
- if (!ObjCContainer)
- return;
+bool SymbolGraphSerializer::visitObjCInterfaceRecord(
+ const ObjCInterfaceRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
- Symbols.emplace_back(std::move(*ObjCContainer));
-
- serializeMembers(Record, Record.Ivars);
- serializeMembers(Record, Record.Methods);
- serializeMembers(Record, Record.Properties);
-
- for (const auto &Protocol : Record.Protocols)
- // Record that Record conforms to Protocol.
- serializeRelationship(RelationshipKind::ConformsTo, Record, Protocol);
-
- if (auto *ObjCInterface = dyn_cast<ObjCInterfaceRecord>(&Record)) {
- if (!ObjCInterface->SuperClass.empty())
- // If Record is an Objective-C interface record and it has a super class,
- // record that Record is inherited from SuperClass.
- serializeRelationship(RelationshipKind::InheritsFrom, Record,
- ObjCInterface->SuperClass);
-
- // Members of categories extending an interface are serialized as members of
- // the interface.
- for (const auto *Category : ObjCInterface->Categories) {
- serializeMembers(Record, Category->Ivars);
- serializeMembers(Record, Category->Methods);
- serializeMembers(Record, Category->Properties);
-
- // Surface the protocols of the category to the interface.
- for (const auto &Protocol : Category->Protocols)
- serializeRelationship(RelationshipKind::ConformsTo, Record, Protocol);
- }
- }
+ if (!Record->SuperClass.empty())
+ serializeRelationship(InheritsFrom, Record, Record->SuperClass,
+ getModuleForCurrentSymbol());
+ return true;
}
-void SymbolGraphSerializer::visitObjCCategoryRecord(
- const ObjCCategoryRecord &Record) {
- if (!Record.IsFromExternalModule)
- return;
+bool SymbolGraphSerializer::traverseObjCCategoryRecord(
+ const ObjCCategoryRecord *Record) {
+ if (SkipSymbolsInCategoriesToExternalTypes &&
+ !API.findRecordForUSR(Record->Interface.USR))
+ return true;
- // Check if the current Category' parent has been visited before, if so skip.
- if (!visitedCategories.contains(Record.Interface.Name)) {
- visitedCategories.insert(Record.Interface.Name);
- Object Obj;
- serializeObject(Obj, "identifier",
- serializeIdentifier(Record, API.getLanguage()));
- serializeObject(Obj, "kind",
- serializeSymbolKind(APIRecord::RK_ObjCCategoryModule,
- API.getLanguage()));
- Obj["accessLevel"] = "public";
- Symbols.emplace_back(std::move(Obj));
- }
+ auto *CurrentModule = ModuleForCurrentSymbol;
+ if (Record->isExtendingExternalModule())
+ ModuleForCurrentSymbol = &ExtendedModules[Record->Interface.Source];
- Object Relationship;
- Relationship["source"] = Record.USR;
- Relationship["target"] = Record.Interface.USR;
- Relationship["targetFallback"] = Record.Interface.Name;
- Relationship["kind"] = getRelationshipString(RelationshipKind::ExtensionTo);
- Relationships.emplace_back(std::move(Relationship));
+ if (!walkUpFromObjCCategoryRecord(Record))
+ return false;
- auto ObjCCategory = serializeAPIRecord(Record);
+ bool RetVal = traverseRecordContext(Record);
+ ModuleForCurrentSymbol = CurrentModule;
+ return RetVal;
+}
- if (!ObjCCategory)
- return;
+bool SymbolGraphSerializer::walkUpFromObjCCategoryRecord(
+ const ObjCCategoryRecord *Record) {
+ return visitObjCCategoryRecord(Record);
+}
- Symbols.emplace_back(std::move(*ObjCCategory));
- serializeMembers(Record, Record.Methods);
- serializeMembers(Record, Record.Properties);
+bool SymbolGraphSerializer::visitObjCCategoryRecord(
+ const ObjCCategoryRecord *Record) {
+ // If we need to create a record for the category in the future do so here,
+ // otherwise everything is set up to pretend that the category is in fact the
+ // interface it extends.
+ for (const auto &Protocol : Record->Protocols)
+ serializeRelationship(ConformsTo, Record->Interface, Protocol,
+ getModuleForCurrentSymbol());
- // Surface the protocols of the category to the interface.
- for (const auto &Protocol : Record.Protocols)
- serializeRelationship(RelationshipKind::ConformsTo, Record, Protocol);
+ return true;
}
-void SymbolGraphSerializer::visitMacroDefinitionRecord(
- const MacroDefinitionRecord &Record) {
- auto Macro = serializeAPIRecord(Record);
+bool SymbolGraphSerializer::visitObjCMethodRecord(
+ const ObjCMethodRecord *Record) {
+ if (!CurrentSymbol)
+ return true;
- if (!Macro)
- return;
+ serializeFunctionSignatureMixin(*CurrentSymbol, *Record);
+ return true;
+}
- Symbols.emplace_back(std::move(*Macro));
+bool SymbolGraphSerializer::visitObjCInstanceVariableRecord(
+ const ObjCInstanceVariableRecord *Record) {
+ // FIXME: serialize ivar access control here.
+ return true;
}
-void SymbolGraphSerializer::serializeSingleRecord(const APIRecord *Record) {
- switch (Record->getKind()) {
- case APIRecord::RK_Unknown:
- llvm_unreachable("Records should have a known kind!");
- case APIRecord::RK_GlobalFunction:
- visitGlobalFunctionRecord(*cast<GlobalFunctionRecord>(Record));
- break;
- case APIRecord::RK_GlobalVariable:
- visitGlobalVariableRecord(*cast<GlobalVariableRecord>(Record));
- break;
- case APIRecord::RK_Enum:
- visitEnumRecord(*cast<EnumRecord>(Record));
- break;
- case APIRecord::RK_Struct:
- LLVM_FALLTHROUGH;
- case APIRecord::RK_Union:
- visitRecordRecord(*cast<RecordRecord>(Record));
- break;
- case APIRecord::RK_StaticField:
- visitStaticFieldRecord(*cast<StaticFieldRecord>(Record));
- break;
- case APIRecord::RK_CXXClass:
- visitCXXClassRecord(*cast<CXXClassRecord>(Record));
- break;
- case APIRecord::RK_ObjCInterface:
- visitObjCContainerRecord(*cast<ObjCInterfaceRecord>(Record));
- break;
- case APIRecord::RK_ObjCProtocol:
- visitObjCContainerRecord(*cast<ObjCProtocolRecord>(Record));
- break;
- case APIRecord::RK_ObjCCategory:
- visitObjCCategoryRecord(*cast<ObjCCategoryRecord>(Record));
- break;
- case APIRecord::RK_MacroDefinition:
- visitMacroDefinitionRecord(*cast<MacroDefinitionRecord>(Record));
- break;
- case APIRecord::RK_Typedef:
- visitTypedefRecord(*cast<TypedefRecord>(Record));
- break;
- default:
- if (auto Obj = serializeAPIRecord(*Record)) {
- Symbols.emplace_back(std::move(*Obj));
- auto &ParentInformation = Record->ParentInformation;
- if (!ParentInformation.empty())
- serializeRelationship(RelationshipKind::MemberOf, *Record,
- *ParentInformation.ParentRecord);
- }
- break;
- }
+bool SymbolGraphSerializer::walkUpFromTypedefRecord(
+ const TypedefRecord *Record) {
+ // Short-circuit walking up the class hierarchy and handle creating typedef
+ // symbol objects manually as there are additional symbol dropping rules to
+ // respect.
+ return visitTypedefRecord(Record);
}
-void SymbolGraphSerializer::visitTypedefRecord(const TypedefRecord &Record) {
+bool SymbolGraphSerializer::visitTypedefRecord(const TypedefRecord *Record) {
// Typedefs of anonymous types have their entries unified with the underlying
// type.
- bool ShouldDrop = Record.UnderlyingType.Name.empty();
+ bool ShouldDrop = Record->UnderlyingType.Name.empty();
// enums declared with `NS_OPTION` have a named enum and a named typedef, with
// the same name
- ShouldDrop |= (Record.UnderlyingType.Name == Record.Name);
+ ShouldDrop |= (Record->UnderlyingType.Name == Record->Name);
if (ShouldDrop)
- return;
+ return true;
- auto Typedef = serializeAPIRecord(Record);
- if (!Typedef)
- return;
+ // Create the symbol record if the other symbol droppping rules permit it.
+ serializeAPIRecord(Record);
+ if (!CurrentSymbol)
+ return true;
- (*Typedef)["type"] = Record.UnderlyingType.USR;
+ (*CurrentSymbol)["type"] = Record->UnderlyingType.USR;
- Symbols.emplace_back(std::move(*Typedef));
+ return true;
}
-Object SymbolGraphSerializer::serialize() {
- traverseAPISet();
- return serializeCurrentGraph();
+void SymbolGraphSerializer::serializeSingleRecord(const APIRecord *Record) {
+ switch (Record->getKind()) {
+ // dispatch to the relevant walkUpFromMethod
+#define CONCRETE_RECORD(CLASS, BASE, KIND) \
+ case APIRecord::KIND: { \
+ walkUpFrom##CLASS(static_cast<const CLASS *>(Record)); \
+ break; \
+ }
+#include "clang/ExtractAPI/APIRecords.inc"
+ // otherwise fallback on the only behavior we can implement safely.
+ case APIRecord::RK_Unknown:
+ visitAPIRecord(Record);
+ break;
+ default:
+ llvm_unreachable("API Record with uninstantiable kind");
+ }
}
-Object SymbolGraphSerializer::serializeCurrentGraph() {
+Object SymbolGraphSerializer::serializeGraph(StringRef ModuleName,
+ ExtendedModule &&EM) {
Object Root;
serializeObject(Root, "metadata", serializeMetadata());
- serializeObject(Root, "module", serializeModule());
+ serializeObject(Root, "module", serializeModuleObject(ModuleName));
- Root["symbols"] = std::move(Symbols);
- Root["relationships"] = std::move(Relationships);
+ Root["symbols"] = std::move(EM.Symbols);
+ Root["relationships"] = std::move(EM.Relationships);
return Root;
}
-void SymbolGraphSerializer::serialize(raw_ostream &os) {
- Object root = serialize();
+void SymbolGraphSerializer::serializeGraphToStream(
+ raw_ostream &OS, SymbolGraphSerializerOption Options, StringRef ModuleName,
+ ExtendedModule &&EM) {
+ Object Root = serializeGraph(ModuleName, std::move(EM));
if (Options.Compact)
- os << formatv("{0}", Value(std::move(root))) << "\n";
+ OS << formatv("{0}", json::Value(std::move(Root))) << "\n";
else
- os << formatv("{0:2}", Value(std::move(root))) << "\n";
+ OS << formatv("{0:2}", json::Value(std::move(Root))) << "\n";
+}
+
+void SymbolGraphSerializer::serializeMainSymbolGraph(
+ raw_ostream &OS, const APISet &API, const APIIgnoresList &IgnoresList,
+ SymbolGraphSerializerOption Options) {
+ SymbolGraphSerializer Serializer(
+ API, IgnoresList, Options.EmitSymbolLabelsForTesting,
+ /*ForceEmitToMainModule=*/true,
+ /*SkipSymbolsInCategoriesToExternalTypes=*/true);
+
+ Serializer.traverseAPISet();
+ Serializer.serializeGraphToStream(OS, Options, API.ProductName,
+ std::move(Serializer.MainModule));
+ // FIXME: TODO handle extended modules here
+}
+
+void SymbolGraphSerializer::serializeWithExtensionGraphs(
+ raw_ostream &MainOutput, const APISet &API,
+ const APIIgnoresList &IgnoresList,
+ llvm::function_ref<std::unique_ptr<llvm::raw_pwrite_stream>(Twine BaseName)>
+ CreateOutputStream,
+ SymbolGraphSerializerOption Options) {
+ SymbolGraphSerializer Serializer(API, IgnoresList,
+ Options.EmitSymbolLabelsForTesting);
+ Serializer.traverseAPISet();
+
+ Serializer.serializeGraphToStream(MainOutput, Options, API.ProductName,
+ std::move(Serializer.MainModule));
+
+ for (auto &ExtensionSGF : Serializer.ExtendedModules) {
+ if (auto ExtensionOS =
+ CreateOutputStream(ExtensionSGF.getKey() + "@" + API.ProductName))
+ Serializer.serializeGraphToStream(*ExtensionOS, Options,
+ ExtensionSGF.getKey(),
+ std::move(ExtensionSGF.getValue()));
+ }
}
std::optional<Object>
@@ -1261,14 +1086,20 @@ SymbolGraphSerializer::serializeSingleSymbolSGF(StringRef USR,
Object Root;
APIIgnoresList EmptyIgnores;
SymbolGraphSerializer Serializer(API, EmptyIgnores,
- /*Options.Compact*/ {true},
- /*ShouldRecurse*/ false);
+ /*EmitSymbolLabelsForTesting*/ false,
+ /*ForceEmitToMainModule*/ true);
+
+ // Set up serializer parent chain
+ Serializer.Hierarchy = generateHierarchyFromRecord(Record);
+
Serializer.serializeSingleRecord(Record);
- serializeObject(Root, "symbolGraph", Serializer.serializeCurrentGraph());
+ serializeObject(Root, "symbolGraph",
+ Serializer.serializeGraph(API.ProductName,
+ std::move(Serializer.MainModule)));
Language Lang = API.getLanguage();
serializeArray(Root, "parentContexts",
- generateParentContexts(*Record, API, Lang));
+ generateParentContexts(Serializer.Hierarchy, Lang));
Array RelatedSymbols;
@@ -1286,14 +1117,15 @@ SymbolGraphSerializer::serializeSingleSymbolSGF(StringRef USR,
Object RelatedSymbol;
RelatedSymbol["usr"] = RelatedRecord->USR;
RelatedSymbol["declarationLanguage"] = getLanguageName(Lang);
- // TODO: once we record this properly let's serialize it right.
- RelatedSymbol["accessLevel"] = "public";
+ RelatedSymbol["accessLevel"] = RelatedRecord->Access.getAccess();
RelatedSymbol["filePath"] = RelatedRecord->Location.getFilename();
RelatedSymbol["moduleName"] = API.ProductName;
RelatedSymbol["isSystem"] = RelatedRecord->IsFromSystemHeader;
serializeArray(RelatedSymbol, "parentContexts",
- generateParentContexts(*RelatedRecord, API, Lang));
+ generateParentContexts(
+ generateHierarchyFromRecord(RelatedRecord), Lang));
+
RelatedSymbols.push_back(std::move(RelatedSymbol));
}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
index 3a5f62c9b2e6..41e4e0cf1795 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/ExtractAPI/TypedefUnderlyingTypeResolver.h"
+#include "clang/Basic/Module.h"
#include "clang/Index/USRGeneration.h"
using namespace clang;
@@ -50,17 +51,20 @@ TypedefUnderlyingTypeResolver::getSymbolReferenceForType(QualType Type,
SmallString<128> TypeUSR;
const NamedDecl *TypeDecl = getUnderlyingTypeDecl(Type);
const TypedefType *TypedefTy = Type->getAs<TypedefType>();
+ StringRef OwningModuleName;
if (TypeDecl) {
if (!TypedefTy)
TypeName = TypeDecl->getName().str();
clang::index::generateUSRForDecl(TypeDecl, TypeUSR);
+ if (auto *OwningModule = TypeDecl->getImportedOwningModule())
+ OwningModuleName = OwningModule->Name;
} else {
clang::index::generateUSRForType(Type, Context, TypeUSR);
}
- return {API.copyString(TypeName), API.copyString(TypeUSR)};
+ return API.createSymbolReference(TypeName, TypeUSR, OwningModuleName);
}
std::string TypedefUnderlyingTypeResolver::getUSRForType(QualType Type) const {
diff --git a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
index 473908e8fee3..75304908dc65 100644
--- a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
@@ -449,11 +449,11 @@ const FormatToken &BreakableComment::tokenAt(unsigned LineIndex) const {
static bool mayReflowContent(StringRef Content) {
Content = Content.trim(Blanks);
- // Lines starting with '@' commonly have special meaning.
+ // Lines starting with '@' or '\' commonly have special meaning.
// Lines starting with '-', '-#', '+' or '*' are bulleted/numbered lists.
bool hasSpecialMeaningPrefix = false;
for (StringRef Prefix :
- {"@", "TODO", "FIXME", "XXX", "-# ", "- ", "+ ", "* "}) {
+ {"@", "\\", "TODO", "FIXME", "XXX", "-# ", "- ", "+ ", "* "}) {
if (Content.starts_with(Prefix)) {
hasSpecialMeaningPrefix = true;
break;
diff --git a/contrib/llvm-project/clang/lib/Format/BreakableToken.h b/contrib/llvm-project/clang/lib/Format/BreakableToken.h
index e7c0680641e2..8b9360a3335e 100644
--- a/contrib/llvm-project/clang/lib/Format/BreakableToken.h
+++ b/contrib/llvm-project/clang/lib/Format/BreakableToken.h
@@ -18,11 +18,8 @@
#define LLVM_CLANG_LIB_FORMAT_BREAKABLETOKEN_H
#include "Encoding.h"
-#include "TokenAnnotator.h"
#include "WhitespaceManager.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/Support/Regex.h"
-#include <utility>
namespace clang {
namespace format {
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
index a3eb9138b218..7d89f0e63dd2 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
@@ -328,9 +328,17 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
// Don't break after very short return types (e.g. "void") as that is often
// unexpected.
- if (Current.is(TT_FunctionDeclarationName) && State.Column < 6) {
- if (Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_None)
+ if (Current.is(TT_FunctionDeclarationName)) {
+ if (Style.BreakAfterReturnType == FormatStyle::RTBS_None &&
+ State.Column < 6) {
return false;
+ }
+
+ if (Style.BreakAfterReturnType == FormatStyle::RTBS_ExceptShortType) {
+ assert(State.Column >= State.FirstIndent);
+ if (State.Column - State.FirstIndent < 6)
+ return false;
+ }
}
// If binary operators are moved to the next line (including commas for some
@@ -561,7 +569,9 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
return true;
}
}
- return Style.AlwaysBreakTemplateDeclarations != FormatStyle::BTDS_No;
+ return Style.BreakTemplateDeclarations != FormatStyle::BTDS_No &&
+ (Style.BreakTemplateDeclarations != FormatStyle::BTDS_Leave ||
+ Current.NewlinesBefore > 0);
}
if (Previous.is(TT_FunctionAnnotationRParen) &&
State.Line->Type != LT_PreprocessorDirective) {
@@ -587,7 +597,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
!State.Line->ReturnTypeWrapped &&
// Don't break before a C# function when no break after return type.
(!Style.isCSharp() ||
- Style.AlwaysBreakAfterReturnType != FormatStyle::RTBS_None) &&
+ Style.BreakAfterReturnType > FormatStyle::RTBS_ExceptShortType) &&
// Don't always break between a JavaScript `function` and the function
// name.
!Style.isJavaScript() && Previous.isNot(tok::kw_template) &&
@@ -674,7 +684,13 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// arguments to function calls. We do this by ensuring that either all
// arguments (including any lambdas) go on the same line as the function
// call, or we break before the first argument.
- auto PrevNonComment = Current.getPreviousNonComment();
+ const auto *Prev = Current.Previous;
+ if (!Prev)
+ return false;
+ // For example, `/*Newline=*/false`.
+ if (Prev->is(TT_BlockComment) && Current.SpacesRequiredBefore == 0)
+ return false;
+ const auto *PrevNonComment = Current.getPreviousNonComment();
if (!PrevNonComment || PrevNonComment->isNot(tok::l_paren))
return false;
if (Current.isOneOf(tok::comment, tok::l_paren, TT_LambdaLSquare))
@@ -811,6 +827,8 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign &&
!CurrentState.IsCSharpGenericTypeConstraint && Previous.opensScope() &&
Previous.isNot(TT_ObjCMethodExpr) && Previous.isNot(TT_RequiresClause) &&
+ Previous.isNot(TT_TableGenDAGArgOpener) &&
+ Previous.isNot(TT_TableGenDAGArgOpenerToBreak) &&
!(Current.MacroParent && Previous.MacroParent) &&
(Current.isNot(TT_LineComment) ||
Previous.isOneOf(BK_BracedInit, TT_VerilogMultiLineListLParen))) {
@@ -824,10 +842,8 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
CurrentState.ContainsUnwrappedBuilder = true;
}
- if (Current.is(TT_TrailingReturnArrow) &&
- Style.Language == FormatStyle::LK_Java) {
+ if (Current.is(TT_LambdaArrow) && Style.Language == FormatStyle::LK_Java)
CurrentState.NoLineBreak = true;
- }
if (Current.isMemberAccess() && Previous.is(tok::r_paren) &&
(Previous.MatchingParen &&
(Previous.TotalLength - Previous.MatchingParen->TotalLength > 10))) {
@@ -982,7 +998,7 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
//
// is common and should be formatted like a free-standing function. The same
// goes for wrapping before the lambda return type arrow.
- if (Current.isNot(TT_TrailingReturnArrow) &&
+ if (Current.isNot(TT_LambdaArrow) &&
(!Style.isJavaScript() || Current.NestingLevel != 0 ||
!PreviousNonComment || PreviousNonComment->isNot(tok::equal) ||
!Current.isOneOf(Keywords.kw_async, Keywords.kw_function))) {
@@ -1239,8 +1255,13 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
}
return CurrentState.Indent;
}
+ if (Current.is(TT_LambdaArrow) &&
+ Previous.isOneOf(tok::kw_noexcept, tok::kw_mutable, tok::kw_constexpr,
+ tok::kw_consteval, tok::kw_static, TT_AttributeSquare)) {
+ return ContinuationIndent;
+ }
if ((Current.isOneOf(tok::r_brace, tok::r_square) ||
- (Current.is(tok::greater) && Style.isProto())) &&
+ (Current.is(tok::greater) && (Style.isProto() || Style.isTableGen()))) &&
State.Stack.size() > 1) {
if (Current.closesBlockOrBlockTypeList(Style))
return State.Stack[State.Stack.size() - 2].NestedBlockIndent;
@@ -1268,6 +1289,12 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
Current.Next->isOneOf(tok::semi, tok::kw_const, tok::l_brace))) {
return State.Stack[State.Stack.size() - 2].LastSpace;
}
+ // When DAGArg closer exists top of line, it should be aligned in the similar
+ // way as function call above.
+ if (Style.isTableGen() && Current.is(TT_TableGenDAGArgCloser) &&
+ State.Stack.size() > 1) {
+ return State.Stack[State.Stack.size() - 2].LastSpace;
+ }
if (Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent &&
(Current.is(tok::r_paren) ||
(Current.is(tok::r_brace) && Current.MatchingParen &&
@@ -1398,7 +1425,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
// the next line.
if (State.Line->InPragmaDirective) {
FormatToken *PragmaType = State.Line->First->Next->Next;
- if (PragmaType && PragmaType->TokenText.equals("omp"))
+ if (PragmaType && PragmaType->TokenText == "omp")
return CurrentState.Indent + Style.ContinuationIndentWidth;
}
@@ -1427,7 +1454,9 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
Style.BreakInheritanceList == FormatStyle::BILS_AfterColon) {
return CurrentState.Indent;
}
- if (Previous.is(tok::r_paren) && !Current.isBinaryOperator() &&
+ if (Previous.is(tok::r_paren) &&
+ Previous.isNot(TT_TableGenDAGArgOperatorToBreak) &&
+ !Current.isBinaryOperator() &&
!Current.isOneOf(tok::colon, tok::comment)) {
return ContinuationIndent;
}
@@ -1559,7 +1588,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
}
if (Current.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) && Newline)
CurrentState.NestedBlockIndent = State.Column + Current.ColumnWidth + 1;
- if (Current.isOneOf(TT_LambdaLSquare, TT_TrailingReturnArrow))
+ if (Current.isOneOf(TT_LambdaLSquare, TT_LambdaArrow))
CurrentState.LastSpace = State.Column;
if (Current.is(TT_RequiresExpression) &&
Style.RequiresExpressionIndentation == FormatStyle::REI_Keyword) {
@@ -1686,7 +1715,10 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
(!Previous || Previous->isNot(tok::kw_return) ||
(Style.Language != FormatStyle::LK_Java && PrecedenceLevel > 0)) &&
(Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign ||
- PrecedenceLevel != prec::Comma || Current.NestingLevel == 0)) {
+ PrecedenceLevel > prec::Comma || Current.NestingLevel == 0) &&
+ (!Style.isTableGen() ||
+ (Previous && Previous->isOneOf(TT_TableGenDAGArgListComma,
+ TT_TableGenDAGArgListCommaToBreak)))) {
NewParenState.Indent = std::max(
std::max(State.Column, NewParenState.Indent), CurrentState.LastSpace);
}
@@ -1694,8 +1726,11 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// Special case for generic selection expressions, its comma-separated
// expressions are not aligned to the opening paren like regular calls, but
// rather continuation-indented relative to the _Generic keyword.
- if (Previous && Previous->endsSequence(tok::l_paren, tok::kw__Generic))
- NewParenState.Indent = CurrentState.LastSpace;
+ if (Previous && Previous->endsSequence(tok::l_paren, tok::kw__Generic) &&
+ State.Stack.size() > 1) {
+ NewParenState.Indent = State.Stack[State.Stack.size() - 2].Indent +
+ Style.ContinuationIndentWidth;
+ }
if ((shouldUnindentNextOperator(Current) ||
(Previous &&
@@ -1781,7 +1816,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
}
if (Current.MatchingParen && Current.is(BK_Block)) {
- moveStateToNewBlock(State);
+ moveStateToNewBlock(State, Newline);
return;
}
@@ -1820,6 +1855,17 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
Style.ContinuationIndentWidth +
std::max(CurrentState.LastSpace, CurrentState.StartOfFunctionCall);
+ if (Style.isTableGen() && Current.is(TT_TableGenDAGArgOpenerToBreak) &&
+ Style.TableGenBreakInsideDAGArg == FormatStyle::DAS_BreakElements) {
+ // For the case the next token is a TableGen DAGArg operator identifier
+ // that is not marked to have a line break after it.
+ // In this case the option DAS_BreakElements requires to align the
+ // DAGArg elements to the operator.
+ const FormatToken *Next = Current.Next;
+ if (Next && Next->is(TT_TableGenDAGArgOperatorID))
+ NewIndent = State.Column + Next->TokenText.size() + 2;
+ }
+
// Ensure that different different brackets force relative alignment, e.g.:
// void SomeFunction(vector< // break
// int> v);
@@ -1929,6 +1975,7 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
(Current.isOneOf(tok::r_paren, tok::r_square, TT_TemplateString) ||
(Current.is(tok::r_brace) && State.NextToken != State.Line->First) ||
State.NextToken->is(TT_TemplateCloser) ||
+ State.NextToken->is(TT_TableGenListCloser) ||
(Current.is(tok::greater) && Current.is(TT_DictLiteral)))) {
State.Stack.pop_back();
}
@@ -1968,7 +2015,7 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
}
}
-void ContinuationIndenter::moveStateToNewBlock(LineState &State) {
+void ContinuationIndenter::moveStateToNewBlock(LineState &State, bool NewLine) {
if (Style.LambdaBodyIndentation == FormatStyle::LBI_OuterScope &&
State.NextToken->is(TT_LambdaLBrace) &&
!State.Line->MightBeFunctionDecl) {
@@ -1980,10 +2027,18 @@ void ContinuationIndenter::moveStateToNewBlock(LineState &State) {
NestedBlockIndent + (State.NextToken->is(TT_ObjCBlockLBrace)
? Style.ObjCBlockIndentWidth
: Style.IndentWidth);
+
+ // Even when wrapping before lambda body, the left brace can still be added to
+ // the same line. This occurs when checking whether the whole lambda body can
+ // go on a single line. In this case we have to make sure there are no line
+ // breaks in the body, otherwise we could just end up with a regular lambda
+ // body without the brace wrapped.
+ bool NoLineBreak = Style.BraceWrapping.BeforeLambdaBody && !NewLine &&
+ State.NextToken->is(TT_LambdaLBrace);
+
State.Stack.push_back(ParenState(State.NextToken, NewIndent,
State.Stack.back().LastSpace,
- /*AvoidBinPacking=*/true,
- /*NoLineBreak=*/false));
+ /*AvoidBinPacking=*/true, NoLineBreak));
State.Stack.back().NestedBlockIndent = NestedBlockIndent;
State.Stack.back().BreakBeforeParameter = true;
}
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
index 057b85bd32d5..18441e10a124 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
@@ -17,11 +17,6 @@
#include "Encoding.h"
#include "FormatToken.h"
-#include "clang/Format/Format.h"
-#include "llvm/Support/Regex.h"
-#include <map>
-#include <optional>
-#include <tuple>
namespace clang {
class SourceManager;
@@ -104,7 +99,7 @@ private:
/// Update 'State' according to the next token being one of ")>}]".
void moveStatePastScopeCloser(LineState &State);
/// Update 'State' with the next token opening a nested block.
- void moveStateToNewBlock(LineState &State);
+ void moveStateToNewBlock(LineState &State, bool NewLine);
/// Reformats a raw string literal.
///
diff --git a/contrib/llvm-project/clang/lib/Format/Encoding.h b/contrib/llvm-project/clang/lib/Format/Encoding.h
index a0d664121b2b..12f9043bb95a 100644
--- a/contrib/llvm-project/clang/lib/Format/Encoding.h
+++ b/contrib/llvm-project/clang/lib/Format/Encoding.h
@@ -16,7 +16,6 @@
#define LLVM_CLANG_LIB_FORMAT_ENCODING_H
#include "clang/Basic/LLVM.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Unicode.h"
diff --git a/contrib/llvm-project/clang/lib/Format/Format.cpp b/contrib/llvm-project/clang/lib/Format/Format.cpp
index 10fe35c79a4f..7fd42e46e0cc 100644
--- a/contrib/llvm-project/clang/lib/Format/Format.cpp
+++ b/contrib/llvm-project/clang/lib/Format/Format.cpp
@@ -13,50 +13,22 @@
//===----------------------------------------------------------------------===//
#include "clang/Format/Format.h"
-#include "AffectedRangeManager.h"
-#include "BreakableToken.h"
-#include "ContinuationIndenter.h"
#include "DefinitionBlockSeparator.h"
-#include "FormatInternal.h"
-#include "FormatToken.h"
-#include "FormatTokenLexer.h"
#include "IntegerLiteralSeparatorFixer.h"
#include "NamespaceEndCommentsFixer.h"
#include "ObjCPropertyAttributeOrderFixer.h"
#include "QualifierAlignmentFixer.h"
#include "SortJavaScriptImports.h"
-#include "TokenAnalyzer.h"
-#include "TokenAnnotator.h"
#include "UnwrappedLineFormatter.h"
-#include "UnwrappedLineParser.h"
#include "UsingDeclarationsSorter.h"
-#include "WhitespaceManager.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/DiagnosticOptions.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Inclusions/HeaderIncludes.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/Allocator.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Path.h"
-#include "llvm/Support/Regex.h"
-#include "llvm/Support/VirtualFileSystem.h"
-#include "llvm/Support/YAMLTraits.h"
-#include <algorithm>
-#include <memory>
-#include <mutex>
-#include <optional>
-#include <string>
-#include <unordered_map>
#define DEBUG_TYPE "format-formatter"
using clang::format::FormatStyle;
-LLVM_YAML_IS_SEQUENCE_VECTOR(clang::format::FormatStyle::RawStringFormat)
+LLVM_YAML_IS_SEQUENCE_VECTOR(FormatStyle::RawStringFormat)
namespace llvm {
namespace yaml {
@@ -128,6 +100,7 @@ struct MappingTraits<FormatStyle::ShortCaseStatementsAlignmentStyle> {
IO.mapOptional("Enabled", Value.Enabled);
IO.mapOptional("AcrossEmptyLines", Value.AcrossEmptyLines);
IO.mapOptional("AcrossComments", Value.AcrossComments);
+ IO.mapOptional("AlignCaseArrows", Value.AlignCaseArrows);
IO.mapOptional("AlignCaseColons", Value.AlignCaseColons);
}
};
@@ -296,6 +269,7 @@ template <>
struct ScalarEnumerationTraits<FormatStyle::BreakTemplateDeclarationsStyle> {
static void enumeration(IO &IO,
FormatStyle::BreakTemplateDeclarationsStyle &Value) {
+ IO.enumCase(Value, "Leave", FormatStyle::BTDS_Leave);
IO.enumCase(Value, "No", FormatStyle::BTDS_No);
IO.enumCase(Value, "MultiLine", FormatStyle::BTDS_MultiLine);
IO.enumCase(Value, "Yes", FormatStyle::BTDS_Yes);
@@ -306,6 +280,14 @@ struct ScalarEnumerationTraits<FormatStyle::BreakTemplateDeclarationsStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::DAGArgStyle> {
+ static void enumeration(IO &IO, FormatStyle::DAGArgStyle &Value) {
+ IO.enumCase(Value, "DontBreak", FormatStyle::DAS_DontBreak);
+ IO.enumCase(Value, "BreakElements", FormatStyle::DAS_BreakElements);
+ IO.enumCase(Value, "BreakAll", FormatStyle::DAS_BreakAll);
+ }
+};
+
template <>
struct ScalarEnumerationTraits<FormatStyle::DefinitionReturnTypeBreakingStyle> {
static void
@@ -326,6 +308,7 @@ struct ScalarEnumerationTraits<FormatStyle::EscapedNewlineAlignmentStyle> {
FormatStyle::EscapedNewlineAlignmentStyle &Value) {
IO.enumCase(Value, "DontAlign", FormatStyle::ENAS_DontAlign);
IO.enumCase(Value, "Left", FormatStyle::ENAS_Left);
+ IO.enumCase(Value, "LeftWithLastLine", FormatStyle::ENAS_LeftWithLastLine);
IO.enumCase(Value, "Right", FormatStyle::ENAS_Right);
// For backward compatibility.
@@ -386,6 +369,14 @@ template <> struct ScalarEnumerationTraits<FormatStyle::JavaScriptQuoteStyle> {
}
};
+template <> struct MappingTraits<FormatStyle::KeepEmptyLinesStyle> {
+ static void mapping(IO &IO, FormatStyle::KeepEmptyLinesStyle &Value) {
+ IO.mapOptional("AtEndOfFile", Value.AtEndOfFile);
+ IO.mapOptional("AtStartOfBlock", Value.AtStartOfBlock);
+ IO.mapOptional("AtStartOfFile", Value.AtStartOfFile);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
static void enumeration(IO &IO, FormatStyle::LanguageKind &Value) {
IO.enumCase(Value, "Cpp", FormatStyle::LK_Cpp);
@@ -558,6 +549,8 @@ template <>
struct ScalarEnumerationTraits<FormatStyle::ReturnTypeBreakingStyle> {
static void enumeration(IO &IO, FormatStyle::ReturnTypeBreakingStyle &Value) {
IO.enumCase(Value, "None", FormatStyle::RTBS_None);
+ IO.enumCase(Value, "Automatic", FormatStyle::RTBS_Automatic);
+ IO.enumCase(Value, "ExceptShortType", FormatStyle::RTBS_ExceptShortType);
IO.enumCase(Value, "All", FormatStyle::RTBS_All);
IO.enumCase(Value, "TopLevel", FormatStyle::RTBS_TopLevel);
IO.enumCase(Value, "TopLevelDefinitions",
@@ -736,6 +729,7 @@ template <> struct MappingTraits<FormatStyle::SpacesInLineComment> {
template <> struct MappingTraits<FormatStyle::SpacesInParensCustom> {
static void mapping(IO &IO, FormatStyle::SpacesInParensCustom &Spaces) {
+ IO.mapOptional("ExceptDoubleParentheses", Spaces.ExceptDoubleParentheses);
IO.mapOptional("InCStyleCasts", Spaces.InCStyleCasts);
IO.mapOptional("InConditionalStatements", Spaces.InConditionalStatements);
IO.mapOptional("InEmptyParentheses", Spaces.InEmptyParentheses);
@@ -824,7 +818,6 @@ template <> struct MappingTraits<FormatStyle> {
FormatStyle PredefinedStyle;
if (getPredefinedStyle(StyleName, Style.Language, &PredefinedStyle) &&
Style == PredefinedStyle) {
- IO.mapOptional("# BasedOnStyle", StyleName);
BasedOnStyle = StyleName;
break;
}
@@ -874,6 +867,9 @@ template <> struct MappingTraits<FormatStyle> {
if (!IO.outputting()) {
IO.mapOptional("AlignEscapedNewlinesLeft", Style.AlignEscapedNewlines);
IO.mapOptional("AllowAllConstructorInitializersOnNextLine", OnNextLine);
+ IO.mapOptional("AlwaysBreakAfterReturnType", Style.BreakAfterReturnType);
+ IO.mapOptional("AlwaysBreakTemplateDeclarations",
+ Style.BreakTemplateDeclarations);
IO.mapOptional("BreakBeforeInheritanceComma",
BreakBeforeInheritanceComma);
IO.mapOptional("BreakConstructorInitializersBeforeComma",
@@ -882,6 +878,9 @@ template <> struct MappingTraits<FormatStyle> {
OnCurrentLine);
IO.mapOptional("DeriveLineEnding", DeriveLineEnding);
IO.mapOptional("DerivePointerBinding", Style.DerivePointerAlignment);
+ IO.mapOptional("KeepEmptyLinesAtEOF", Style.KeepEmptyLines.AtEndOfFile);
+ IO.mapOptional("KeepEmptyLinesAtTheStartOfBlocks",
+ Style.KeepEmptyLines.AtStartOfBlock);
IO.mapOptional("IndentFunctionDeclarationAfterType",
Style.IndentWrappedFunctionNames);
IO.mapOptional("IndentRequires", Style.IndentRequiresClause);
@@ -909,6 +908,12 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("AlignConsecutiveMacros", Style.AlignConsecutiveMacros);
IO.mapOptional("AlignConsecutiveShortCaseStatements",
Style.AlignConsecutiveShortCaseStatements);
+ IO.mapOptional("AlignConsecutiveTableGenBreakingDAGArgColons",
+ Style.AlignConsecutiveTableGenBreakingDAGArgColons);
+ IO.mapOptional("AlignConsecutiveTableGenCondOperatorColons",
+ Style.AlignConsecutiveTableGenCondOperatorColons);
+ IO.mapOptional("AlignConsecutiveTableGenDefinitionColons",
+ Style.AlignConsecutiveTableGenDefinitionColons);
IO.mapOptional("AlignEscapedNewlines", Style.AlignEscapedNewlines);
IO.mapOptional("AlignOperands", Style.AlignOperands);
IO.mapOptional("AlignTrailingComments", Style.AlignTrailingComments);
@@ -920,6 +925,8 @@ template <> struct MappingTraits<FormatStyle> {
Style.AllowBreakBeforeNoexceptSpecifier);
IO.mapOptional("AllowShortBlocksOnASingleLine",
Style.AllowShortBlocksOnASingleLine);
+ IO.mapOptional("AllowShortCaseExpressionOnASingleLine",
+ Style.AllowShortCaseExpressionOnASingleLine);
IO.mapOptional("AllowShortCaseLabelsOnASingleLine",
Style.AllowShortCaseLabelsOnASingleLine);
IO.mapOptional("AllowShortCompoundRequirementOnASingleLine",
@@ -936,12 +943,8 @@ template <> struct MappingTraits<FormatStyle> {
Style.AllowShortLoopsOnASingleLine);
IO.mapOptional("AlwaysBreakAfterDefinitionReturnType",
Style.AlwaysBreakAfterDefinitionReturnType);
- IO.mapOptional("AlwaysBreakAfterReturnType",
- Style.AlwaysBreakAfterReturnType);
IO.mapOptional("AlwaysBreakBeforeMultilineStrings",
Style.AlwaysBreakBeforeMultilineStrings);
- IO.mapOptional("AlwaysBreakTemplateDeclarations",
- Style.AlwaysBreakTemplateDeclarations);
IO.mapOptional("AttributeMacros", Style.AttributeMacros);
IO.mapOptional("BinPackArguments", Style.BinPackArguments);
IO.mapOptional("BinPackParameters", Style.BinPackParameters);
@@ -954,6 +957,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("BreakAfterAttributes", Style.BreakAfterAttributes);
IO.mapOptional("BreakAfterJavaFieldAnnotations",
Style.BreakAfterJavaFieldAnnotations);
+ IO.mapOptional("BreakAfterReturnType", Style.BreakAfterReturnType);
IO.mapOptional("BreakArrays", Style.BreakArrays);
IO.mapOptional("BreakBeforeBinaryOperators",
Style.BreakBeforeBinaryOperators);
@@ -966,8 +970,12 @@ template <> struct MappingTraits<FormatStyle> {
Style.BreakBeforeTernaryOperators);
IO.mapOptional("BreakConstructorInitializers",
Style.BreakConstructorInitializers);
+ IO.mapOptional("BreakFunctionDefinitionParameters",
+ Style.BreakFunctionDefinitionParameters);
IO.mapOptional("BreakInheritanceList", Style.BreakInheritanceList);
IO.mapOptional("BreakStringLiterals", Style.BreakStringLiterals);
+ IO.mapOptional("BreakTemplateDeclarations",
+ Style.BreakTemplateDeclarations);
IO.mapOptional("ColumnLimit", Style.ColumnLimit);
IO.mapOptional("CommentPragmas", Style.CommentPragmas);
IO.mapOptional("CompactNamespaces", Style.CompactNamespaces);
@@ -1008,14 +1016,13 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("JavaImportGroups", Style.JavaImportGroups);
IO.mapOptional("JavaScriptQuotes", Style.JavaScriptQuotes);
IO.mapOptional("JavaScriptWrapImports", Style.JavaScriptWrapImports);
- IO.mapOptional("KeepEmptyLinesAtTheStartOfBlocks",
- Style.KeepEmptyLinesAtTheStartOfBlocks);
- IO.mapOptional("KeepEmptyLinesAtEOF", Style.KeepEmptyLinesAtEOF);
+ IO.mapOptional("KeepEmptyLines", Style.KeepEmptyLines);
IO.mapOptional("LambdaBodyIndentation", Style.LambdaBodyIndentation);
IO.mapOptional("LineEnding", Style.LineEnding);
IO.mapOptional("MacroBlockBegin", Style.MacroBlockBegin);
IO.mapOptional("MacroBlockEnd", Style.MacroBlockEnd);
IO.mapOptional("Macros", Style.Macros);
+ IO.mapOptional("MainIncludeChar", Style.IncludeStyle.MainIncludeChar);
IO.mapOptional("MaxEmptyLinesToKeep", Style.MaxEmptyLinesToKeep);
IO.mapOptional("NamespaceIndentation", Style.NamespaceIndentation);
IO.mapOptional("NamespaceMacros", Style.NamespaceMacros);
@@ -1110,6 +1117,10 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("StatementAttributeLikeMacros",
Style.StatementAttributeLikeMacros);
IO.mapOptional("StatementMacros", Style.StatementMacros);
+ IO.mapOptional("TableGenBreakingDAGArgOperators",
+ Style.TableGenBreakingDAGArgOperators);
+ IO.mapOptional("TableGenBreakInsideDAGArg",
+ Style.TableGenBreakInsideDAGArg);
IO.mapOptional("TabWidth", Style.TabWidth);
IO.mapOptional("TypeNames", Style.TypeNames);
IO.mapOptional("TypenameMacros", Style.TypenameMacros);
@@ -1120,17 +1131,16 @@ template <> struct MappingTraits<FormatStyle> {
Style.WhitespaceSensitiveMacros);
// If AlwaysBreakAfterDefinitionReturnType was specified but
- // AlwaysBreakAfterReturnType was not, initialize the latter from the
- // former for backwards compatibility.
+ // BreakAfterReturnType was not, initialize the latter from the former for
+ // backwards compatibility.
if (Style.AlwaysBreakAfterDefinitionReturnType != FormatStyle::DRTBS_None &&
- Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_None) {
+ Style.BreakAfterReturnType == FormatStyle::RTBS_None) {
if (Style.AlwaysBreakAfterDefinitionReturnType ==
FormatStyle::DRTBS_All) {
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
} else if (Style.AlwaysBreakAfterDefinitionReturnType ==
FormatStyle::DRTBS_TopLevel) {
- Style.AlwaysBreakAfterReturnType =
- FormatStyle::RTBS_TopLevelDefinitions;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_TopLevelDefinitions;
}
}
@@ -1175,8 +1185,8 @@ template <> struct MappingTraits<FormatStyle> {
(SpacesInParentheses || SpaceInEmptyParentheses ||
SpacesInConditionalStatement || SpacesInCStyleCastParentheses)) {
if (SpacesInParentheses) {
- // set all options except InCStyleCasts and InEmptyParentheses
- // to true for backward compatibility.
+ // For backward compatibility.
+ Style.SpacesInParensOptions.ExceptDoubleParentheses = false;
Style.SpacesInParensOptions.InConditionalStatements = true;
Style.SpacesInParensOptions.InCStyleCasts =
SpacesInCStyleCastParentheses;
@@ -1236,7 +1246,7 @@ std::error_code make_error_code(ParseError e) {
return std::error_code(static_cast<int>(e), getParseCategory());
}
-inline llvm::Error make_string_error(const llvm::Twine &Message) {
+inline llvm::Error make_string_error(const Twine &Message) {
return llvm::make_error<llvm::StringError>(Message,
llvm::inconvertibleErrorCode());
}
@@ -1401,30 +1411,33 @@ static void expandPresetsSpacesInParens(FormatStyle &Expanded) {
FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
FormatStyle LLVMStyle;
- LLVMStyle.InheritsParentConfig = false;
- LLVMStyle.Language = Language;
LLVMStyle.AccessModifierOffset = -2;
- LLVMStyle.AlignEscapedNewlines = FormatStyle::ENAS_Right;
LLVMStyle.AlignAfterOpenBracket = FormatStyle::BAS_Align;
LLVMStyle.AlignArrayOfStructures = FormatStyle::AIAS_None;
- LLVMStyle.AlignOperands = FormatStyle::OAS_Align;
LLVMStyle.AlignConsecutiveAssignments = {};
- LLVMStyle.AlignConsecutiveAssignments.Enabled = false;
- LLVMStyle.AlignConsecutiveAssignments.AcrossEmptyLines = false;
LLVMStyle.AlignConsecutiveAssignments.AcrossComments = false;
+ LLVMStyle.AlignConsecutiveAssignments.AcrossEmptyLines = false;
LLVMStyle.AlignConsecutiveAssignments.AlignCompound = false;
LLVMStyle.AlignConsecutiveAssignments.AlignFunctionPointers = false;
+ LLVMStyle.AlignConsecutiveAssignments.Enabled = false;
LLVMStyle.AlignConsecutiveAssignments.PadOperators = true;
LLVMStyle.AlignConsecutiveBitFields = {};
LLVMStyle.AlignConsecutiveDeclarations = {};
LLVMStyle.AlignConsecutiveMacros = {};
LLVMStyle.AlignConsecutiveShortCaseStatements = {};
+ LLVMStyle.AlignConsecutiveTableGenBreakingDAGArgColons = {};
+ LLVMStyle.AlignConsecutiveTableGenCondOperatorColons = {};
+ LLVMStyle.AlignConsecutiveTableGenDefinitionColons = {};
+ LLVMStyle.AlignEscapedNewlines = FormatStyle::ENAS_Right;
+ LLVMStyle.AlignOperands = FormatStyle::OAS_Align;
LLVMStyle.AlignTrailingComments = {};
LLVMStyle.AlignTrailingComments.Kind = FormatStyle::TCAS_Always;
LLVMStyle.AlignTrailingComments.OverEmptyLines = 0;
LLVMStyle.AllowAllArgumentsOnNextLine = true;
LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
+ LLVMStyle.AllowBreakBeforeNoexceptSpecifier = FormatStyle::BBNSS_Never;
LLVMStyle.AllowShortBlocksOnASingleLine = FormatStyle::SBS_Never;
+ LLVMStyle.AllowShortCaseExpressionOnASingleLine = true;
LLVMStyle.AllowShortCaseLabelsOnASingleLine = false;
LLVMStyle.AllowShortCompoundRequirementOnASingleLine = true;
LLVMStyle.AllowShortEnumsOnASingleLine = true;
@@ -1432,14 +1445,12 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
LLVMStyle.AllowShortLambdasOnASingleLine = FormatStyle::SLS_All;
LLVMStyle.AllowShortLoopsOnASingleLine = false;
- LLVMStyle.AlwaysBreakAfterReturnType = FormatStyle::RTBS_None;
LLVMStyle.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_None;
LLVMStyle.AlwaysBreakBeforeMultilineStrings = false;
- LLVMStyle.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_MultiLine;
LLVMStyle.AttributeMacros.push_back("__capability");
- LLVMStyle.BitFieldColonSpacing = FormatStyle::BFCS_Both;
LLVMStyle.BinPackArguments = true;
LLVMStyle.BinPackParameters = true;
+ LLVMStyle.BitFieldColonSpacing = FormatStyle::BFCS_Both;
LLVMStyle.BracedInitializerIndentWidth = std::nullopt;
LLVMStyle.BraceWrapping = {/*AfterCaseLabel=*/false,
/*AfterClass=*/false,
@@ -1462,16 +1473,18 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.BreakAdjacentStringLiterals = true;
LLVMStyle.BreakAfterAttributes = FormatStyle::ABS_Leave;
LLVMStyle.BreakAfterJavaFieldAnnotations = false;
+ LLVMStyle.BreakAfterReturnType = FormatStyle::RTBS_None;
LLVMStyle.BreakArrays = true;
LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
LLVMStyle.BreakBeforeConceptDeclarations = FormatStyle::BBCDS_Always;
LLVMStyle.BreakBeforeInlineASMColon = FormatStyle::BBIAS_OnlyMultiline;
- LLVMStyle.AllowBreakBeforeNoexceptSpecifier = FormatStyle::BBNSS_Never;
LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakConstructorInitializers = FormatStyle::BCIS_BeforeColon;
+ LLVMStyle.BreakFunctionDefinitionParameters = false;
LLVMStyle.BreakInheritanceList = FormatStyle::BILS_BeforeColon;
LLVMStyle.BreakStringLiterals = true;
+ LLVMStyle.BreakTemplateDeclarations = FormatStyle::BTDS_MultiLine;
LLVMStyle.ColumnLimit = 80;
LLVMStyle.CommentPragmas = "^ IWYU pragma:";
LLVMStyle.CompactNamespaces = false;
@@ -1488,21 +1501,23 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.ForEachMacros.push_back("Q_FOREACH");
LLVMStyle.ForEachMacros.push_back("BOOST_FOREACH");
LLVMStyle.IfMacros.push_back("KJ_IF_MAYBE");
+ LLVMStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Preserve;
LLVMStyle.IncludeStyle.IncludeCategories = {
{"^\"(llvm|llvm-c|clang|clang-c)/", 2, 0, false},
{"^(<|\"(gtest|gmock|isl|json)/)", 3, 0, false},
{".*", 1, 0, false}};
LLVMStyle.IncludeStyle.IncludeIsMainRegex = "(Test)?$";
- LLVMStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Preserve;
+ LLVMStyle.IncludeStyle.MainIncludeChar = tooling::IncludeStyle::MICD_Quote;
LLVMStyle.IndentAccessModifiers = false;
- LLVMStyle.IndentCaseLabels = false;
LLVMStyle.IndentCaseBlocks = false;
+ LLVMStyle.IndentCaseLabels = false;
LLVMStyle.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
LLVMStyle.IndentGotoLabels = true;
LLVMStyle.IndentPPDirectives = FormatStyle::PPDIS_None;
LLVMStyle.IndentRequiresClause = true;
LLVMStyle.IndentWidth = 2;
LLVMStyle.IndentWrappedFunctionNames = false;
+ LLVMStyle.InheritsParentConfig = false;
LLVMStyle.InsertBraces = false;
LLVMStyle.InsertNewlineAtEOF = false;
LLVMStyle.InsertTrailingCommas = FormatStyle::TCS_None;
@@ -1512,9 +1527,13 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
/*Hex=*/0, /*HexMinDigits=*/0};
LLVMStyle.JavaScriptQuotes = FormatStyle::JSQS_Leave;
LLVMStyle.JavaScriptWrapImports = true;
- LLVMStyle.KeepEmptyLinesAtEOF = false;
- LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
+ LLVMStyle.KeepEmptyLines = {
+ /*AtEndOfFile=*/false,
+ /*AtStartOfBlock=*/true,
+ /*AtStartOfFile=*/true,
+ };
LLVMStyle.LambdaBodyIndentation = FormatStyle::LBI_Signature;
+ LLVMStyle.Language = Language;
LLVMStyle.LineEnding = FormatStyle::LE_DeriveLF;
LLVMStyle.MaxEmptyLinesToKeep = 1;
LLVMStyle.NamespaceIndentation = FormatStyle::NI_None;
@@ -1544,7 +1563,9 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceAfterLogicalNot = false;
LLVMStyle.SpaceAfterTemplateKeyword = true;
LLVMStyle.SpaceAroundPointerQualifiers = FormatStyle::SAPQ_Default;
+ LLVMStyle.SpaceBeforeAssignmentOperators = true;
LLVMStyle.SpaceBeforeCaseColon = false;
+ LLVMStyle.SpaceBeforeCpp11BracedList = false;
LLVMStyle.SpaceBeforeCtorInitializerColon = true;
LLVMStyle.SpaceBeforeInheritanceColon = true;
LLVMStyle.SpaceBeforeJsonColon = false;
@@ -1554,8 +1575,6 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceBeforeParensOptions.AfterForeachMacros = true;
LLVMStyle.SpaceBeforeParensOptions.AfterIfMacros = true;
LLVMStyle.SpaceBeforeRangeBasedForLoopColon = true;
- LLVMStyle.SpaceBeforeAssignmentOperators = true;
- LLVMStyle.SpaceBeforeCpp11BracedList = false;
LLVMStyle.SpaceBeforeSquareBrackets = false;
LLVMStyle.SpaceInEmptyBlock = false;
LLVMStyle.SpacesBeforeTrailingComments = 1;
@@ -1568,6 +1587,8 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.StatementAttributeLikeMacros.push_back("Q_EMIT");
LLVMStyle.StatementMacros.push_back("Q_UNUSED");
LLVMStyle.StatementMacros.push_back("QT_REQUIRE_VERSION");
+ LLVMStyle.TableGenBreakingDAGArgOperators = {};
+ LLVMStyle.TableGenBreakInsideDAGArg = FormatStyle::DAS_DontBreak;
LLVMStyle.TabWidth = 8;
LLVMStyle.UseTab = FormatStyle::UT_Never;
LLVMStyle.VerilogBreakBetweenInstancePorts = true;
@@ -1578,16 +1599,16 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.WhitespaceSensitiveMacros.push_back("STRINGIZE");
LLVMStyle.PenaltyBreakAssignment = prec::Assignment;
+ LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
LLVMStyle.PenaltyBreakComment = 300;
LLVMStyle.PenaltyBreakFirstLessLess = 120;
- LLVMStyle.PenaltyBreakString = 1000;
- LLVMStyle.PenaltyExcessCharacter = 1000000;
- LLVMStyle.PenaltyReturnTypeOnItsOwnLine = 60;
- LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
LLVMStyle.PenaltyBreakOpenParenthesis = 0;
LLVMStyle.PenaltyBreakScopeResolution = 500;
+ LLVMStyle.PenaltyBreakString = 1000;
LLVMStyle.PenaltyBreakTemplateDeclaration = prec::Relational;
+ LLVMStyle.PenaltyExcessCharacter = 1'000'000;
LLVMStyle.PenaltyIndentedWhitespace = 0;
+ LLVMStyle.PenaltyReturnTypeOnItsOwnLine = 60;
// Defaults that differ when not C++.
switch (Language) {
@@ -1624,16 +1645,16 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
FormatStyle::SIS_WithoutElse;
GoogleStyle.AllowShortLoopsOnASingleLine = true;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = true;
- GoogleStyle.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_Yes;
+ GoogleStyle.BreakTemplateDeclarations = FormatStyle::BTDS_Yes;
GoogleStyle.DerivePointerAlignment = true;
+ GoogleStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
GoogleStyle.IncludeStyle.IncludeCategories = {{"^<ext/.*\\.h>", 2, 0, false},
{"^<.*\\.h>", 1, 0, false},
{"^<.*", 2, 0, false},
{".*", 3, 0, false}};
GoogleStyle.IncludeStyle.IncludeIsMainRegex = "([-_](test|unittest))?$";
- GoogleStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
GoogleStyle.IndentCaseLabels = true;
- GoogleStyle.KeepEmptyLinesAtTheStartOfBlocks = false;
+ GoogleStyle.KeepEmptyLines.AtStartOfBlock = false;
GoogleStyle.ObjCBinPackProtocolList = FormatStyle::BPS_Never;
GoogleStyle.ObjCSpaceAfterProperty = false;
GoogleStyle.ObjCSpaceBeforeProtocolList = true;
@@ -1686,8 +1707,8 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.SpacesBeforeTrailingComments = 2;
GoogleStyle.Standard = FormatStyle::LS_Auto;
- GoogleStyle.PenaltyReturnTypeOnItsOwnLine = 200;
GoogleStyle.PenaltyBreakBeforeFirstCallParameter = 1;
+ GoogleStyle.PenaltyReturnTypeOnItsOwnLine = 200;
if (Language == FormatStyle::LK_Java) {
GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
@@ -1715,22 +1736,22 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
// TODO: enable once decided, in particular re disabling bin packing.
// https://google.github.io/styleguide/jsguide.html#features-arrays-trailing-comma
// GoogleStyle.InsertTrailingCommas = FormatStyle::TCS_Wrapped;
+ GoogleStyle.JavaScriptQuotes = FormatStyle::JSQS_Single;
+ GoogleStyle.JavaScriptWrapImports = false;
GoogleStyle.MaxEmptyLinesToKeep = 3;
GoogleStyle.NamespaceIndentation = FormatStyle::NI_All;
GoogleStyle.SpacesInContainerLiterals = false;
- GoogleStyle.JavaScriptQuotes = FormatStyle::JSQS_Single;
- GoogleStyle.JavaScriptWrapImports = false;
} else if (Language == FormatStyle::LK_Proto) {
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
- GoogleStyle.SpacesInContainerLiterals = false;
- GoogleStyle.Cpp11BracedListStyle = false;
// This affects protocol buffer options specifications and text protos.
// Text protos are currently mostly formatted inside C++ raw string literals
// and often the current breaking behavior of string literals is not
// beneficial there. Investigate turning this on once proper string reflow
// has been implemented.
GoogleStyle.BreakStringLiterals = false;
+ GoogleStyle.Cpp11BracedListStyle = false;
+ GoogleStyle.SpacesInContainerLiterals = false;
} else if (Language == FormatStyle::LK_ObjC) {
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.ColumnLimit = 100;
@@ -1814,15 +1835,15 @@ FormatStyle getMozillaStyle() {
FormatStyle MozillaStyle = getLLVMStyle();
MozillaStyle.AllowAllParametersOfDeclarationOnNextLine = false;
MozillaStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
- MozillaStyle.AlwaysBreakAfterReturnType = FormatStyle::RTBS_TopLevel;
MozillaStyle.AlwaysBreakAfterDefinitionReturnType =
FormatStyle::DRTBS_TopLevel;
- MozillaStyle.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_Yes;
- MozillaStyle.BinPackParameters = false;
MozillaStyle.BinPackArguments = false;
+ MozillaStyle.BinPackParameters = false;
+ MozillaStyle.BreakAfterReturnType = FormatStyle::RTBS_TopLevel;
MozillaStyle.BreakBeforeBraces = FormatStyle::BS_Mozilla;
MozillaStyle.BreakConstructorInitializers = FormatStyle::BCIS_BeforeComma;
MozillaStyle.BreakInheritanceList = FormatStyle::BILS_BeforeComma;
+ MozillaStyle.BreakTemplateDeclarations = FormatStyle::BTDS_Yes;
MozillaStyle.ConstructorInitializerIndentWidth = 2;
MozillaStyle.ContinuationIndentWidth = 2;
MozillaStyle.Cpp11BracedListStyle = false;
@@ -1847,8 +1868,8 @@ FormatStyle getWebKitStyle() {
Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
Style.BreakBeforeBraces = FormatStyle::BS_WebKit;
Style.BreakConstructorInitializers = FormatStyle::BCIS_BeforeComma;
- Style.Cpp11BracedListStyle = false;
Style.ColumnLimit = 0;
+ Style.Cpp11BracedListStyle = false;
Style.FixNamespaceComments = false;
Style.IndentWidth = 4;
Style.NamespaceIndentation = FormatStyle::NI_Inner;
@@ -1863,12 +1884,12 @@ FormatStyle getWebKitStyle() {
FormatStyle getGNUStyle() {
FormatStyle Style = getLLVMStyle();
Style.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_All;
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
Style.BreakBeforeBraces = FormatStyle::BS_GNU;
Style.BreakBeforeTernaryOperators = true;
- Style.Cpp11BracedListStyle = false;
Style.ColumnLimit = 79;
+ Style.Cpp11BracedListStyle = false;
Style.FixNamespaceComments = false;
Style.SpaceBeforeParens = FormatStyle::SBPO_Always;
Style.Standard = FormatStyle::LS_Cpp03;
@@ -1900,7 +1921,7 @@ FormatStyle getMicrosoftStyle(FormatStyle::LanguageKind Language) {
Style.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
Style.AllowShortLoopsOnASingleLine = false;
Style.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_None;
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_None;
+ Style.BreakAfterReturnType = FormatStyle::RTBS_None;
return Style;
}
@@ -1908,9 +1929,12 @@ FormatStyle getClangFormatStyle() {
FormatStyle Style = getLLVMStyle();
Style.InsertBraces = true;
Style.InsertNewlineAtEOF = true;
+ Style.IntegerLiteralSeparator.Decimal = 3;
+ Style.IntegerLiteralSeparator.DecimalMinDigits = 5;
Style.LineEnding = FormatStyle::LE_LF;
Style.RemoveBracesLLVM = true;
Style.RemoveParentheses = FormatStyle::RPS_ReturnStatement;
+ Style.RemoveSemicolon = true;
return Style;
}
@@ -2255,27 +2279,36 @@ public:
FormatTokenLexer &Tokens) override {
AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
tooling::Replacements Result;
- removeSemi(AnnotatedLines, Result);
+ removeSemi(Annotator, AnnotatedLines, Result);
return {Result, 0};
}
private:
- void removeSemi(SmallVectorImpl<AnnotatedLine *> &Lines,
+ void removeSemi(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &Lines,
tooling::Replacements &Result) {
+ auto PrecededByFunctionRBrace = [](const FormatToken &Tok) {
+ const auto *Prev = Tok.Previous;
+ if (!Prev || Prev->isNot(tok::r_brace))
+ return false;
+ const auto *LBrace = Prev->MatchingParen;
+ return LBrace && LBrace->is(TT_FunctionLBrace);
+ };
const auto &SourceMgr = Env.getSourceManager();
const auto End = Lines.end();
for (auto I = Lines.begin(); I != End; ++I) {
const auto Line = *I;
- removeSemi(Line->Children, Result);
+ removeSemi(Annotator, Line->Children, Result);
if (!Line->Affected)
continue;
+ Annotator.calculateFormattingInformation(*Line);
const auto NextLine = I + 1 == End ? nullptr : I[1];
for (auto Token = Line->First; Token && !Token->Finalized;
Token = Token->Next) {
- if (!Token->Optional)
- continue;
- if (Token->isNot(tok::semi))
+ if (Token->isNot(tok::semi) ||
+ (!Token->Optional && !PrecededByFunctionRBrace(*Token))) {
continue;
+ }
auto Next = Token->Next;
assert(Next || Token == Line->Last);
if (!Next && NextLine)
@@ -2342,7 +2375,7 @@ private:
// FIXME: handle error. For now, print error message and skip the
// replacement for release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
};
@@ -2783,7 +2816,7 @@ private:
// FIXME: better error handling. for now just print error message and skip
// for the release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false && "Fixes must not conflict!");
}
Idx = End + 1;
@@ -2942,9 +2975,9 @@ private:
<< getTokenTypeName(FormatTok->getType()) << "\n");
return true;
}
- if (guessIsObjC(SourceManager, Line->Children, Keywords))
- return true;
}
+ if (guessIsObjC(SourceManager, Line->Children, Keywords))
+ return true;
}
return false;
}
@@ -3055,7 +3088,7 @@ static void sortCppIncludes(const FormatStyle &Style,
llvm::to_vector<16>(llvm::seq<unsigned>(0, Includes.size()));
if (Style.SortIncludes == FormatStyle::SI_CaseInsensitive) {
- llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
const auto LHSFilenameLower = Includes[LHSI].Filename.lower();
const auto RHSFilenameLower = Includes[RHSI].Filename.lower();
return std::tie(Includes[LHSI].Priority, LHSFilenameLower,
@@ -3064,7 +3097,7 @@ static void sortCppIncludes(const FormatStyle &Style,
Includes[RHSI].Filename);
});
} else {
- llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
return std::tie(Includes[LHSI].Priority, Includes[LHSI].Filename) <
std::tie(Includes[RHSI].Priority, Includes[RHSI].Filename);
});
@@ -3096,11 +3129,12 @@ static void sortCppIncludes(const FormatStyle &Style,
// enough as additional newlines might be added or removed across #include
// blocks. This we handle below by generating the updated #include blocks and
// comparing it to the original.
- if (Indices.size() == Includes.size() && llvm::is_sorted(Indices) &&
+ if (Indices.size() == Includes.size() && is_sorted(Indices) &&
Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Preserve) {
return;
}
+ const auto OldCursor = Cursor ? *Cursor : 0;
std::string result;
for (unsigned Index : Indices) {
if (!result.empty()) {
@@ -3124,6 +3158,8 @@ static void sortCppIncludes(const FormatStyle &Style,
// the entire range of blocks. Otherwise, no replacement is generated.
if (replaceCRLF(result) == replaceCRLF(std::string(Code.substr(
IncludesBeginOffset, IncludesBlockSize)))) {
+ if (Cursor)
+ *Cursor = OldCursor;
return;
}
@@ -3132,7 +3168,7 @@ static void sortCppIncludes(const FormatStyle &Style,
// FIXME: better error handling. For now, just skip the replacement for the
// release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
}
@@ -3186,10 +3222,16 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
if (Trimmed.contains(RawStringTermination))
FormattingOff = false;
- if (isClangFormatOff(Trimmed))
+ bool IsBlockComment = false;
+
+ if (isClangFormatOff(Trimmed)) {
FormattingOff = true;
- else if (isClangFormatOn(Trimmed))
+ } else if (isClangFormatOn(Trimmed)) {
FormattingOff = false;
+ } else if (Trimmed.starts_with("/*")) {
+ IsBlockComment = true;
+ Pos = Code.find("*/", SearchFrom + 2);
+ }
const bool EmptyLineSkipped =
Trimmed.empty() &&
@@ -3199,9 +3241,10 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
bool MergeWithNextLine = Trimmed.ends_with("\\");
if (!FormattingOff && !MergeWithNextLine) {
- if (tooling::HeaderIncludes::IncludeRegex.match(Line, &Matches)) {
+ if (!IsBlockComment &&
+ tooling::HeaderIncludes::IncludeRegex.match(Trimmed, &Matches)) {
StringRef IncludeName = Matches[2];
- if (Line.contains("/*") && !Line.contains("*/")) {
+ if (Trimmed.contains("/*") && !Trimmed.contains("*/")) {
// #include with a start of a block comment, but without the end.
// Need to keep all the lines until the end of the comment together.
// FIXME: This is somehow simplified check that probably does not work
@@ -3285,7 +3328,7 @@ static void sortJavaImports(const FormatStyle &Style,
bool StaticImportAfterNormalImport =
Style.SortJavaStaticImport == FormatStyle::SJSIO_After;
- llvm::sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
// Negating IsStatic to push static imports above non-static imports.
return std::make_tuple(!Imports[LHSI].IsStatic ^
StaticImportAfterNormalImport,
@@ -3335,7 +3378,7 @@ static void sortJavaImports(const FormatStyle &Style,
// FIXME: better error handling. For now, just skip the replacement for the
// release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
}
@@ -3429,7 +3472,7 @@ tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
}
template <typename T>
-static llvm::Expected<tooling::Replacements>
+static Expected<tooling::Replacements>
processReplacements(T ProcessFunc, StringRef Code,
const tooling::Replacements &Replaces,
const FormatStyle &Style) {
@@ -3448,7 +3491,7 @@ processReplacements(T ProcessFunc, StringRef Code,
return Replaces.merge(FormatReplaces);
}
-llvm::Expected<tooling::Replacements>
+Expected<tooling::Replacements>
formatReplacements(StringRef Code, const tooling::Replacements &Replaces,
const FormatStyle &Style) {
// We need to use lambda function here since there are two versions of
@@ -3493,13 +3536,13 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
return Replaces;
tooling::Replacements HeaderInsertions;
- std::set<llvm::StringRef> HeadersToDelete;
+ std::set<StringRef> HeadersToDelete;
tooling::Replacements Result;
for (const auto &R : Replaces) {
if (isHeaderInsertion(R)) {
// Replacements from \p Replaces must be conflict-free already, so we can
// simply consume the error.
- llvm::consumeError(HeaderInsertions.add(R));
+ consumeError(HeaderInsertions.add(R));
} else if (isHeaderDeletion(R)) {
HeadersToDelete.insert(R.getReplacementText());
} else if (R.getOffset() == UINT_MAX) {
@@ -3507,7 +3550,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
"not supported! "
<< R.getReplacementText() << "\n";
} else {
- llvm::consumeError(Result.add(R));
+ consumeError(Result.add(R));
}
}
if (HeaderInsertions.empty() && HeadersToDelete.empty())
@@ -3524,13 +3567,12 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
if (Err) {
// Ignore the deletion on conflict.
llvm::errs() << "Failed to add header deletion replacement for "
- << Header << ": " << llvm::toString(std::move(Err))
- << "\n";
+ << Header << ": " << toString(std::move(Err)) << "\n";
}
}
}
- llvm::SmallVector<StringRef, 4> Matches;
+ SmallVector<StringRef, 4> Matches;
for (const auto &R : HeaderInsertions) {
auto IncludeDirective = R.getReplacementText();
bool Matched =
@@ -3545,7 +3587,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
if (Replace) {
auto Err = Result.add(*Replace);
if (Err) {
- llvm::consumeError(std::move(Err));
+ consumeError(std::move(Err));
unsigned NewOffset =
Result.getShiftedCodePosition(Replace->getOffset());
auto Shifted = tooling::Replacement(FileName, NewOffset, 0,
@@ -3559,13 +3601,13 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
} // anonymous namespace
-llvm::Expected<tooling::Replacements>
+Expected<tooling::Replacements>
cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces,
const FormatStyle &Style) {
// We need to use lambda function here since there are two versions of
// `cleanup`.
auto Cleanup = [](const FormatStyle &Style, StringRef Code,
- std::vector<tooling::Range> Ranges,
+ ArrayRef<tooling::Range> Ranges,
StringRef FileName) -> tooling::Replacements {
return cleanup(Style, Code, Ranges, FileName);
};
@@ -3671,7 +3713,7 @@ reformat(const FormatStyle &Style, StringRef Code,
FormatStyle S = Expanded;
S.RemoveSemicolon = true;
Passes.emplace_back([&, S = std::move(S)](const Environment &Env) {
- return SemiRemover(Env, S).process(/*SkipAnnotation=*/true);
+ return SemiRemover(Env, S).process();
});
}
@@ -3748,11 +3790,11 @@ reformat(const FormatStyle &Style, StringRef Code,
tooling::Replacements NonNoOpFixes;
for (const tooling::Replacement &Fix : Fixes) {
StringRef OriginalCode = Code.substr(Fix.getOffset(), Fix.getLength());
- if (!OriginalCode.equals(Fix.getReplacementText())) {
+ if (OriginalCode != Fix.getReplacementText()) {
auto Err = NonNoOpFixes.add(Fix);
if (Err) {
llvm::errs() << "Error adding replacements : "
- << llvm::toString(std::move(Err)) << "\n";
+ << toString(std::move(Err)) << "\n";
}
}
}
@@ -3836,8 +3878,7 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
LangOpts.Digraphs = LexingStd >= FormatStyle::LS_Cpp11;
LangOpts.LineComment = 1;
- bool AlternativeOperators = Style.isCpp();
- LangOpts.CXXOperatorNames = AlternativeOperators ? 1 : 0;
+ LangOpts.CXXOperatorNames = Style.isCpp();
LangOpts.Bool = 1;
LangOpts.ObjC = 1;
LangOpts.MicrosoftExt = 1; // To get kw___try, kw___finally.
@@ -3875,7 +3916,11 @@ static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
FileName.ends_with_insensitive(".protodevel")) {
return FormatStyle::LK_Proto;
}
- if (FileName.ends_with_insensitive(".textpb") ||
+ // txtpb is the canonical extension, and textproto is the legacy canonical
+ // extension
+ // https://protobuf.dev/reference/protobuf/textformat-spec/#text-format-files
+ if (FileName.ends_with_insensitive(".txtpb") ||
+ FileName.ends_with_insensitive(".textpb") ||
FileName.ends_with_insensitive(".pb.txt") ||
FileName.ends_with_insensitive(".textproto") ||
FileName.ends_with_insensitive(".asciipb")) {
@@ -3902,7 +3947,7 @@ FormatStyle::LanguageKind guessLanguage(StringRef FileName, StringRef Code) {
auto Extension = llvm::sys::path::extension(FileName);
// If there's no file extension (or it's .h), we need to check the contents
// of the code to see if it contains Objective-C.
- if (Extension.empty() || Extension == ".h") {
+ if (!Code.empty() && (Extension.empty() || Extension == ".h")) {
auto NonEmptyFileName = FileName.empty() ? "guess.h" : FileName;
Environment Env(Code, NonEmptyFileName, /*Ranges=*/{});
ObjCHeaderStyleGuesser Guesser(Env, getLLVMStyle());
@@ -3921,34 +3966,37 @@ const char *DefaultFallbackStyle = "LLVM";
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
loadAndParseConfigFile(StringRef ConfigFile, llvm::vfs::FileSystem *FS,
- FormatStyle *Style, bool AllowUnknownOptions) {
+ FormatStyle *Style, bool AllowUnknownOptions,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
FS->getBufferForFile(ConfigFile.str());
if (auto EC = Text.getError())
return EC;
- if (auto EC = parseConfiguration(*Text.get(), Style, AllowUnknownOptions))
+ if (auto EC = parseConfiguration(*Text.get(), Style, AllowUnknownOptions,
+ DiagHandler)) {
return EC;
+ }
return Text;
}
-llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
- StringRef FallbackStyleName,
- StringRef Code, llvm::vfs::FileSystem *FS,
- bool AllowUnknownOptions) {
+Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
+ StringRef FallbackStyleName, StringRef Code,
+ llvm::vfs::FileSystem *FS,
+ bool AllowUnknownOptions,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler) {
FormatStyle Style = getLLVMStyle(guessLanguage(FileName, Code));
FormatStyle FallbackStyle = getNoStyle();
if (!getPredefinedStyle(FallbackStyleName, Style.Language, &FallbackStyle))
return make_string_error("Invalid fallback style: " + FallbackStyleName);
- llvm::SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 1>
- ChildFormatTextToApply;
+ SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 1> ChildFormatTextToApply;
if (StyleName.starts_with("{")) {
// Parse YAML/JSON style from the command line.
StringRef Source = "<command-line>";
if (std::error_code ec =
parseConfiguration(llvm::MemoryBufferRef(StyleName, Source), &Style,
- AllowUnknownOptions)) {
+ AllowUnknownOptions, DiagHandler)) {
return make_string_error("Error parsing -style: " + ec.message());
}
@@ -3968,7 +4016,8 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
StyleName.starts_with_insensitive("file:")) {
auto ConfigFile = StyleName.substr(5);
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
- loadAndParseConfigFile(ConfigFile, FS, &Style, AllowUnknownOptions);
+ loadAndParseConfigFile(ConfigFile, FS, &Style, AllowUnknownOptions,
+ DiagHandler);
if (auto EC = Text.getError()) {
return make_string_error("Error reading " + ConfigFile + ": " +
EC.message());
@@ -4007,8 +4056,9 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
auto applyChildFormatTexts = [&](FormatStyle *Style) {
for (const auto &MemBuf : llvm::reverse(ChildFormatTextToApply)) {
- auto EC = parseConfiguration(*MemBuf, Style, AllowUnknownOptions,
- dropDiagnosticHandler);
+ auto EC =
+ parseConfiguration(*MemBuf, Style, AllowUnknownOptions,
+ DiagHandler ? DiagHandler : dropDiagnosticHandler);
// It was already correctly parsed.
assert(!EC);
static_cast<void>(EC);
@@ -4016,7 +4066,7 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
};
// Look for .clang-format/_clang-format file in the file's parent directories.
- llvm::SmallVector<std::string, 2> FilesToLookFor;
+ SmallVector<std::string, 2> FilesToLookFor;
FilesToLookFor.push_back(".clang-format");
FilesToLookFor.push_back("_clang-format");
@@ -4042,7 +4092,8 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
}
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
- loadAndParseConfigFile(ConfigFile, FS, &Style, AllowUnknownOptions);
+ loadAndParseConfigFile(ConfigFile, FS, &Style, AllowUnknownOptions,
+ DiagHandler);
if (auto EC = Text.getError()) {
if (EC != ParseError::Unsuitable) {
return make_string_error("Error reading " + ConfigFile + ": " +
diff --git a/contrib/llvm-project/clang/lib/Format/FormatInternal.h b/contrib/llvm-project/clang/lib/Format/FormatInternal.h
index 9043ce32e9e3..60c5bf6b786b 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatInternal.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatInternal.h
@@ -15,7 +15,9 @@
#ifndef LLVM_CLANG_LIB_FORMAT_FORMATINTERNAL_H
#define LLVM_CLANG_LIB_FORMAT_FORMATINTERNAL_H
-#include "BreakableToken.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Format/Format.h"
+#include "clang/Tooling/Core/Replacement.h"
#include <utility>
namespace clang {
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
index b791c5a26bbe..85bec71ffbbc 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
@@ -34,45 +34,23 @@ const char *getTokenTypeName(TokenType Type) {
return nullptr;
}
-// FIXME: This is copy&pasted from Sema. Put it in a common place and remove
-// duplication.
-bool FormatToken::isSimpleTypeSpecifier() const {
- switch (Tok.getKind()) {
- case tok::kw_short:
- case tok::kw_long:
- case tok::kw___int64:
- case tok::kw___int128:
- case tok::kw_signed:
- case tok::kw_unsigned:
- case tok::kw_void:
- case tok::kw_char:
- case tok::kw_int:
- case tok::kw_half:
- case tok::kw_float:
- case tok::kw_double:
- case tok::kw___bf16:
- case tok::kw__Float16:
- case tok::kw___float128:
- case tok::kw___ibm128:
- case tok::kw_wchar_t:
- case tok::kw_bool:
-#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
-#include "clang/Basic/TransformTypeTraits.def"
- case tok::annot_typename:
- case tok::kw_char8_t:
- case tok::kw_char16_t:
- case tok::kw_char32_t:
- case tok::kw_typeof:
- case tok::kw_decltype:
- case tok::kw__Atomic:
- return true;
- default:
- return false;
- }
+// Sorted common C++ non-keyword types.
+static SmallVector<StringRef> CppNonKeywordTypes = {
+ "clock_t", "int16_t", "int32_t", "int64_t", "int8_t",
+ "intptr_t", "ptrdiff_t", "size_t", "time_t", "uint16_t",
+ "uint32_t", "uint64_t", "uint8_t", "uintptr_t",
+};
+
+bool FormatToken::isTypeName(const LangOptions &LangOpts) const {
+ const bool IsCpp = LangOpts.CXXOperatorNames;
+ return is(TT_TypeName) || Tok.isSimpleTypeSpecifier(LangOpts) ||
+ (IsCpp && is(tok::identifier) &&
+ std::binary_search(CppNonKeywordTypes.begin(),
+ CppNonKeywordTypes.end(), TokenText));
}
-bool FormatToken::isTypeOrIdentifier() const {
- return isSimpleTypeSpecifier() || Tok.isOneOf(tok::kw_auto, tok::identifier);
+bool FormatToken::isTypeOrIdentifier(const LangOptions &LangOpts) const {
+ return isTypeName(LangOpts) || isOneOf(tok::kw_auto, tok::identifier);
}
bool FormatToken::isBlockIndentedInitRBrace(const FormatStyle &Style) const {
@@ -137,7 +115,7 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State,
// bin-packed. Add a severe penalty to this so that column layouts are
// preferred if possible.
if (!Format)
- return 10000;
+ return 10'000;
// Format the entire list.
unsigned Penalty = 0;
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.h b/contrib/llvm-project/clang/lib/Format/FormatToken.h
index dede89f26001..9bfeb2052164 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.h
@@ -19,8 +19,6 @@
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Format/Format.h"
#include "clang/Lex/Lexer.h"
-#include <memory>
-#include <optional>
#include <unordered_set>
namespace clang {
@@ -37,7 +35,10 @@ namespace format {
TYPE(BinaryOperator) \
TYPE(BitFieldColon) \
TYPE(BlockComment) \
+ /* l_brace of a block that is not the body of a (e.g. loop) statement. */ \
+ TYPE(BlockLBrace) \
TYPE(BracedListLBrace) \
+ TYPE(CaseLabelArrow) \
/* The colon at the end of a case label. */ \
TYPE(CaseLabelColon) \
TYPE(CastRParen) \
@@ -76,6 +77,7 @@ namespace format {
TYPE(ForEachMacro) \
TYPE(FunctionAnnotationRParen) \
TYPE(FunctionDeclarationName) \
+ TYPE(FunctionDeclarationLParen) \
TYPE(FunctionLBrace) \
TYPE(FunctionLikeOrFreestandingMacro) \
TYPE(FunctionTypeLParen) \
@@ -100,6 +102,7 @@ namespace format {
TYPE(JsTypeColon) \
TYPE(JsTypeOperator) \
TYPE(JsTypeOptionalQuestion) \
+ TYPE(LambdaArrow) \
TYPE(LambdaLBrace) \
TYPE(LambdaLSquare) \
TYPE(LeadingJavaAnnotation) \
@@ -148,7 +151,26 @@ namespace format {
TYPE(StructLBrace) \
TYPE(StructRBrace) \
TYPE(StructuredBindingLSquare) \
+ TYPE(SwitchExpressionLabel) \
+ TYPE(SwitchExpressionLBrace) \
+ TYPE(TableGenBangOperator) \
+ TYPE(TableGenCondOperator) \
+ TYPE(TableGenCondOperatorColon) \
+ TYPE(TableGenCondOperatorComma) \
+ TYPE(TableGenDAGArgCloser) \
+ TYPE(TableGenDAGArgListColon) \
+ TYPE(TableGenDAGArgListColonToAlign) \
+ TYPE(TableGenDAGArgListComma) \
+ TYPE(TableGenDAGArgListCommaToBreak) \
+ TYPE(TableGenDAGArgOpener) \
+ TYPE(TableGenDAGArgOpenerToBreak) \
+ TYPE(TableGenDAGArgOperatorID) \
+ TYPE(TableGenDAGArgOperatorToBreak) \
+ TYPE(TableGenListCloser) \
+ TYPE(TableGenListOpener) \
TYPE(TableGenMultiLineString) \
+ TYPE(TableGenTrailingPasteOperator) \
+ TYPE(TableGenValueSuffix) \
TYPE(TemplateCloser) \
TYPE(TemplateOpener) \
TYPE(TemplateString) \
@@ -559,6 +581,9 @@ public:
/// Is optional and can be removed.
bool Optional = false;
+ /// Might be function declaration open/closing paren.
+ bool MightBeFunctionDeclParen = false;
+
/// Number of optional braces to be inserted after this token:
/// -1: a single left brace
/// 0: no braces
@@ -644,12 +669,16 @@ public:
return Tok.isObjCAtKeyword(Kind);
}
+ bool isAccessSpecifierKeyword() const {
+ return isOneOf(tok::kw_public, tok::kw_protected, tok::kw_private);
+ }
+
bool isAccessSpecifier(bool ColonRequired = true) const {
- if (!isOneOf(tok::kw_public, tok::kw_protected, tok::kw_private))
+ if (!isAccessSpecifierKeyword())
return false;
if (!ColonRequired)
return true;
- const auto NextNonComment = getNextNonComment();
+ const auto *NextNonComment = getNextNonComment();
return NextNonComment && NextNonComment->is(tok::colon);
}
@@ -661,10 +690,8 @@ public:
isAttribute();
}
- /// Determine whether the token is a simple-type-specifier.
- [[nodiscard]] bool isSimpleTypeSpecifier() const;
-
- [[nodiscard]] bool isTypeOrIdentifier() const;
+ [[nodiscard]] bool isTypeName(const LangOptions &LangOpts) const;
+ [[nodiscard]] bool isTypeOrIdentifier(const LangOptions &LangOpts) const;
bool isObjCAccessSpecifier() const {
return is(tok::at) && Next &&
@@ -699,13 +726,36 @@ public:
bool isMemberAccess() const {
return isOneOf(tok::arrow, tok::period, tok::arrowstar) &&
!isOneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow,
- TT_LeadingJavaAnnotation);
+ TT_LambdaArrow, TT_LeadingJavaAnnotation);
}
bool isPointerOrReference() const {
return isOneOf(tok::star, tok::amp, tok::ampamp);
}
+ bool isCppAlternativeOperatorKeyword() const {
+ assert(!TokenText.empty());
+ if (!isalpha(TokenText[0]))
+ return false;
+
+ switch (Tok.getKind()) {
+ case tok::ampamp:
+ case tok::ampequal:
+ case tok::amp:
+ case tok::pipe:
+ case tok::tilde:
+ case tok::exclaim:
+ case tok::exclaimequal:
+ case tok::pipepipe:
+ case tok::pipeequal:
+ case tok::caret:
+ case tok::caretequal:
+ return true;
+ default:
+ return false;
+ }
+ }
+
bool isUnaryOperator() const {
switch (Tok.getKind()) {
case tok::plus:
@@ -809,8 +859,8 @@ public:
/// Returns whether the token is the left square bracket of a C++
/// structured binding declaration.
- bool isCppStructuredBinding(const FormatStyle &Style) const {
- if (!Style.isCpp() || isNot(tok::l_square))
+ bool isCppStructuredBinding(bool IsCpp) const {
+ if (!IsCpp || isNot(tok::l_square))
return false;
const FormatToken *T = this;
do {
@@ -1601,10 +1651,10 @@ struct AdditionalKeywords {
IdentifierInfo *kw_then;
/// Returns \c true if \p Tok is a keyword or an identifier.
- bool isWordLike(const FormatToken &Tok) const {
+ bool isWordLike(const FormatToken &Tok, bool IsVerilog = true) const {
// getIdentifierinfo returns non-null for keywords as well as identifiers.
return Tok.Tok.getIdentifierInfo() &&
- !Tok.isOneOf(kw_verilogHash, kw_verilogHashHash, kw_apostrophe);
+ (!IsVerilog || !isVerilogKeywordSymbol(Tok));
}
/// Returns \c true if \p Tok is a true JavaScript identifier, returns
@@ -1612,10 +1662,12 @@ struct AdditionalKeywords {
/// If \c AcceptIdentifierName is true, returns true not only for keywords,
// but also for IdentifierName tokens (aka pseudo-keywords), such as
// ``yield``.
- bool IsJavaScriptIdentifier(const FormatToken &Tok,
+ bool isJavaScriptIdentifier(const FormatToken &Tok,
bool AcceptIdentifierName = true) const {
// Based on the list of JavaScript & TypeScript keywords here:
// https://github.com/microsoft/TypeScript/blob/main/src/compiler/scanner.ts#L74
+ if (Tok.isAccessSpecifierKeyword())
+ return false;
switch (Tok.Tok.getKind()) {
case tok::kw_break:
case tok::kw_case:
@@ -1635,9 +1687,6 @@ struct AdditionalKeywords {
case tok::kw_import:
case tok::kw_module:
case tok::kw_new:
- case tok::kw_private:
- case tok::kw_protected:
- case tok::kw_public:
case tok::kw_return:
case tok::kw_static:
case tok::kw_switch:
@@ -1680,6 +1729,8 @@ struct AdditionalKeywords {
/// Returns \c true if \p Tok is a C# keyword, returns
/// \c false if it is a anything else.
bool isCSharpKeyword(const FormatToken &Tok) const {
+ if (Tok.isAccessSpecifierKeyword())
+ return true;
switch (Tok.Tok.getKind()) {
case tok::kw_bool:
case tok::kw_break:
@@ -1706,9 +1757,6 @@ struct AdditionalKeywords {
case tok::kw_namespace:
case tok::kw_new:
case tok::kw_operator:
- case tok::kw_private:
- case tok::kw_protected:
- case tok::kw_public:
case tok::kw_return:
case tok::kw_short:
case tok::kw_sizeof:
@@ -1733,6 +1781,10 @@ struct AdditionalKeywords {
}
}
+ bool isVerilogKeywordSymbol(const FormatToken &Tok) const {
+ return Tok.isOneOf(kw_verilogHash, kw_verilogHashHash, kw_apostrophe);
+ }
+
bool isVerilogWordOperator(const FormatToken &Tok) const {
return Tok.isOneOf(kw_before, kw_intersect, kw_dist, kw_iff, kw_inside,
kw_with);
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
index 52a55ea23b5f..63949b2e26bd 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
@@ -100,6 +100,13 @@ ArrayRef<FormatToken *> FormatTokenLexer::lex() {
if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
FirstInLineIndex = Tokens.size() - 1;
} while (Tokens.back()->isNot(tok::eof));
+ if (Style.InsertNewlineAtEOF) {
+ auto &TokEOF = *Tokens.back();
+ if (TokEOF.NewlinesBefore == 0) {
+ TokEOF.NewlinesBefore = 1;
+ TokEOF.OriginalColumn = 0;
+ }
+ }
return Tokens;
}
@@ -276,13 +283,44 @@ void FormatTokenLexer::tryMergePreviousTokens() {
return;
}
}
- // TableGen's Multi line string starts with [{
- if (Style.isTableGen() && tryMergeTokens({tok::l_square, tok::l_brace},
- TT_TableGenMultiLineString)) {
- // Set again with finalizing. This must never be annotated as other types.
- Tokens.back()->setFinalizedType(TT_TableGenMultiLineString);
- Tokens.back()->Tok.setKind(tok::string_literal);
- return;
+ if (Style.isTableGen()) {
+ // TableGen's Multi line string starts with [{
+ if (tryMergeTokens({tok::l_square, tok::l_brace},
+ TT_TableGenMultiLineString)) {
+ // Set again with finalizing. This must never be annotated as other types.
+ Tokens.back()->setFinalizedType(TT_TableGenMultiLineString);
+ Tokens.back()->Tok.setKind(tok::string_literal);
+ return;
+ }
+ // TableGen's bang operator is the form !<name>.
+ // !cond is a special case with specific syntax.
+ if (tryMergeTokens({tok::exclaim, tok::identifier},
+ TT_TableGenBangOperator)) {
+ Tokens.back()->Tok.setKind(tok::identifier);
+ Tokens.back()->Tok.setIdentifierInfo(nullptr);
+ if (Tokens.back()->TokenText == "!cond")
+ Tokens.back()->setFinalizedType(TT_TableGenCondOperator);
+ else
+ Tokens.back()->setFinalizedType(TT_TableGenBangOperator);
+ return;
+ }
+ if (tryMergeTokens({tok::exclaim, tok::kw_if}, TT_TableGenBangOperator)) {
+ // Here, "! if" becomes "!if". That is, ! captures if even when the space
+ // exists. That is only one possibility in TableGen's syntax.
+ Tokens.back()->Tok.setKind(tok::identifier);
+ Tokens.back()->Tok.setIdentifierInfo(nullptr);
+ Tokens.back()->setFinalizedType(TT_TableGenBangOperator);
+ return;
+ }
+ // +, - with numbers are literals. Not unary operators.
+ if (tryMergeTokens({tok::plus, tok::numeric_constant}, TT_Unknown)) {
+ Tokens.back()->Tok.setKind(tok::numeric_constant);
+ return;
+ }
+ if (tryMergeTokens({tok::minus, tok::numeric_constant}, TT_Unknown)) {
+ Tokens.back()->Tok.setKind(tok::numeric_constant);
+ return;
+ }
}
}
@@ -373,7 +411,7 @@ bool FormatTokenLexer::tryMergeNullishCoalescingEqual() {
return false;
auto &NullishCoalescing = *(Tokens.end() - 2);
auto &Equal = *(Tokens.end() - 1);
- if (NullishCoalescing->getType() != TT_NullCoalescingOperator ||
+ if (NullishCoalescing->isNot(TT_NullCoalescingOperator) ||
Equal->isNot(tok::equal)) {
return false;
}
@@ -785,7 +823,7 @@ void FormatTokenLexer::handleTableGenMultilineString() {
auto CloseOffset = Lex->getBuffer().find("}]", OpenOffset);
if (CloseOffset == StringRef::npos)
return;
- auto Text = Lex->getBuffer().substr(OpenOffset, CloseOffset + 2);
+ auto Text = Lex->getBuffer().substr(OpenOffset, CloseOffset - OpenOffset + 2);
MultiLineString->TokenText = Text;
resetLexer(SourceMgr.getFileOffset(
Lex->getSourceLocation(Lex->getBufferLocation() - 2 + Text.size())));
@@ -1389,7 +1427,7 @@ void FormatTokenLexer::readRawToken(FormatToken &Tok) {
// For formatting, treat unterminated string literals like normal string
// literals.
if (Tok.is(tok::unknown)) {
- if (!Tok.TokenText.empty() && Tok.TokenText[0] == '"') {
+ if (Tok.TokenText.starts_with("\"")) {
Tok.Tok.setKind(tok::string_literal);
Tok.IsUnterminatedLiteral = true;
} else if (Style.isJavaScript() && Tok.TokenText == "''") {
@@ -1411,7 +1449,6 @@ void FormatTokenLexer::readRawToken(FormatToken &Tok) {
void FormatTokenLexer::resetLexer(unsigned Offset) {
StringRef Buffer = SourceMgr.getBufferData(ID);
- LangOpts = getFormattingLangOpts(Style);
Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID), LangOpts,
Buffer.begin(), Buffer.begin() + Offset, Buffer.end()));
Lex->SetKeepWhitespaceMode(true);
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
index 65dd733bd533..277cc0a2dfde 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
@@ -17,14 +17,9 @@
#include "Encoding.h"
#include "FormatToken.h"
-#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Format/Format.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/Support/Regex.h"
#include <stack>
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenSource.h b/contrib/llvm-project/clang/lib/Format/FormatTokenSource.h
index 7819244eb7d1..8f00e5f4582c 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenSource.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenSource.h
@@ -15,10 +15,7 @@
#ifndef LLVM_CLANG_LIB_FORMAT_FORMATTOKENSOURCE_H
#define LLVM_CLANG_LIB_FORMAT_FORMATTOKENSOURCE_H
-#include "FormatToken.h"
#include "UnwrappedLineParser.h"
-#include "llvm/ADT/DenseMap.h"
-#include <cstddef>
#define DEBUG_TYPE "format-token-source"
@@ -75,6 +72,15 @@ public:
// getNextToken() -> a1
// getNextToken() -> a2
virtual FormatToken *insertTokens(ArrayRef<FormatToken *> Tokens) = 0;
+
+ [[nodiscard]] FormatToken *getNextNonComment() {
+ FormatToken *Tok;
+ do {
+ Tok = getNextToken();
+ assert(Tok);
+ } while (Tok->is(tok::comment));
+ return Tok;
+ }
};
class IndexedTokenSource : public FormatTokenSource {
@@ -167,7 +173,7 @@ private:
return Next;
}
- void dbgToken(int Position, llvm::StringRef Indent = "") {
+ void dbgToken(int Position, StringRef Indent = "") {
FormatToken *Tok = Tokens[Position];
llvm::dbgs() << Indent << "[" << Position
<< "] Token: " << Tok->Tok.getName() << " / " << Tok->TokenText
diff --git a/contrib/llvm-project/clang/lib/Format/MacroCallReconstructor.cpp b/contrib/llvm-project/clang/lib/Format/MacroCallReconstructor.cpp
index cbdd1683c54d..101acefdfe7a 100644
--- a/contrib/llvm-project/clang/lib/Format/MacroCallReconstructor.cpp
+++ b/contrib/llvm-project/clang/lib/Format/MacroCallReconstructor.cpp
@@ -33,7 +33,7 @@ void forEachToken(const UnwrappedLine &Line, const T &Call,
FormatToken *Parent = nullptr) {
bool First = true;
for (const auto &N : Line.Tokens) {
- Call(N.Tok, Parent, First);
+ Call(N.Tok, Parent, First, Line.Level);
First = false;
for (const auto &Child : N.Children)
forEachToken(Child, Call, N.Tok);
@@ -44,7 +44,7 @@ MacroCallReconstructor::MacroCallReconstructor(
unsigned Level,
const llvm::DenseMap<FormatToken *, std::unique_ptr<UnwrappedLine>>
&ActiveExpansions)
- : Level(Level), IdToReconstructed(ActiveExpansions) {
+ : Result(Level), IdToReconstructed(ActiveExpansions) {
Result.Tokens.push_back(std::make_unique<LineNode>());
ActiveReconstructedLines.push_back(&Result);
}
@@ -52,9 +52,8 @@ MacroCallReconstructor::MacroCallReconstructor(
void MacroCallReconstructor::addLine(const UnwrappedLine &Line) {
assert(State != Finalized);
LLVM_DEBUG(llvm::dbgs() << "MCR: new line...\n");
- forEachToken(Line, [&](FormatToken *Token, FormatToken *Parent, bool First) {
- add(Token, Parent, First);
- });
+ forEachToken(Line, [&](FormatToken *Token, FormatToken *Parent, bool First,
+ unsigned Level) { add(Token, Parent, First, Level); });
assert(InProgress || finished());
}
@@ -62,8 +61,8 @@ UnwrappedLine MacroCallReconstructor::takeResult() && {
finalize();
assert(Result.Tokens.size() == 1 &&
Result.Tokens.front()->Children.size() == 1);
- UnwrappedLine Final =
- createUnwrappedLine(*Result.Tokens.front()->Children.front(), Level);
+ UnwrappedLine Final = createUnwrappedLine(
+ *Result.Tokens.front()->Children.front(), Result.Level);
assert(!Final.Tokens.empty());
return Final;
}
@@ -72,7 +71,8 @@ UnwrappedLine MacroCallReconstructor::takeResult() && {
// ExpandedParent in the incoming unwrapped line. \p First specifies whether it
// is the first token in a given unwrapped line.
void MacroCallReconstructor::add(FormatToken *Token,
- FormatToken *ExpandedParent, bool First) {
+ FormatToken *ExpandedParent, bool First,
+ unsigned Level) {
LLVM_DEBUG(
llvm::dbgs() << "MCR: Token: " << Token->TokenText << ", Parent: "
<< (ExpandedParent ? ExpandedParent->TokenText : "<null>")
@@ -102,7 +102,7 @@ void MacroCallReconstructor::add(FormatToken *Token,
First = true;
}
- prepareParent(ExpandedParent, First);
+ prepareParent(ExpandedParent, First, Level);
if (Token->MacroCtx) {
// If this token was generated by a macro call, add the reconstructed
@@ -129,7 +129,7 @@ void MacroCallReconstructor::add(FormatToken *Token,
// is the parent of ActiveReconstructedLines.back() in the reconstructed
// unwrapped line.
void MacroCallReconstructor::prepareParent(FormatToken *ExpandedParent,
- bool NewLine) {
+ bool NewLine, unsigned Level) {
LLVM_DEBUG({
llvm::dbgs() << "ParentMap:\n";
debugParentMap();
@@ -172,7 +172,7 @@ void MacroCallReconstructor::prepareParent(FormatToken *ExpandedParent,
}
assert(!ActiveReconstructedLines.empty());
ActiveReconstructedLines.back()->Tokens.back()->Children.push_back(
- std::make_unique<ReconstructedLine>());
+ std::make_unique<ReconstructedLine>(Level));
ActiveReconstructedLines.push_back(
&*ActiveReconstructedLines.back()->Tokens.back()->Children.back());
} else if (parentLine().Tokens.back()->Tok != Parent) {
@@ -424,7 +424,8 @@ bool MacroCallReconstructor::processNextReconstructed() {
SpelledParentToReconstructedParent[MacroCallStructure.back()
.ParentLastToken] = Token;
appendToken(Token);
- prepareParent(Token, /*NewLine=*/true);
+ prepareParent(Token, /*NewLine=*/true,
+ MacroCallStructure.back().Line->Level);
Token->MacroParent = true;
return false;
}
@@ -435,7 +436,8 @@ bool MacroCallReconstructor::processNextReconstructed() {
[MacroCallStructure.back().Line->Tokens.back()->Tok] = Token;
Token->MacroParent = true;
appendToken(Token, MacroCallStructure.back().Line);
- prepareParent(Token, /*NewLine=*/true);
+ prepareParent(Token, /*NewLine=*/true,
+ MacroCallStructure.back().Line->Level);
return true;
}
if (Token->is(tok::r_paren)) {
@@ -509,16 +511,36 @@ MacroCallReconstructor::createUnwrappedLine(const ReconstructedLine &Line,
for (const auto &N : Line.Tokens) {
Result.Tokens.push_back(N->Tok);
UnwrappedLineNode &Current = Result.Tokens.back();
- for (const auto &Child : N->Children) {
- if (Child->Tokens.empty())
- continue;
- Current.Children.push_back(createUnwrappedLine(*Child, Level + 1));
- }
- if (Current.Children.size() == 1 &&
- Current.Tok->isOneOf(tok::l_paren, tok::comma)) {
- Result.Tokens.splice(Result.Tokens.end(),
- Current.Children.front().Tokens);
- Current.Children.clear();
+ auto NumChildren =
+ std::count_if(N->Children.begin(), N->Children.end(),
+ [](const auto &Child) { return !Child->Tokens.empty(); });
+ if (NumChildren == 1 && Current.Tok->isOneOf(tok::l_paren, tok::comma)) {
+ // If we only have one child, and the child is due to a macro expansion
+ // (either attached to a left parenthesis or comma), merge the child into
+ // the current line to prevent forced breaks for macro arguments.
+ auto *Child = std::find_if(
+ N->Children.begin(), N->Children.end(),
+ [](const auto &Child) { return !Child->Tokens.empty(); });
+ auto Line = createUnwrappedLine(**Child, Level);
+ Result.Tokens.splice(Result.Tokens.end(), Line.Tokens);
+ } else if (NumChildren > 0) {
+ // When there are multiple children with different indent, make sure that
+ // we indent them:
+ // 1. One level below the current line's level.
+ // 2. At the correct level relative to each other.
+ unsigned MinChildLevel =
+ std::min_element(N->Children.begin(), N->Children.end(),
+ [](const auto &E1, const auto &E2) {
+ return E1->Level < E2->Level;
+ })
+ ->get()
+ ->Level;
+ for (const auto &Child : N->Children) {
+ if (Child->Tokens.empty())
+ continue;
+ Current.Children.push_back(createUnwrappedLine(
+ *Child, Level + 1 + (Child->Level - MinChildLevel)));
+ }
}
}
return Result;
diff --git a/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp b/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
index 5a1cdd884c5e..5768ff37fefc 100644
--- a/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
+++ b/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
@@ -119,7 +119,7 @@ private:
};
MacroExpander::MacroExpander(
- const std::vector<std::string> &Macros, clang::SourceManager &SourceMgr,
+ const std::vector<std::string> &Macros, SourceManager &SourceMgr,
const FormatStyle &Style,
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
IdentifierTable &IdentTable)
@@ -134,7 +134,7 @@ MacroExpander::~MacroExpander() = default;
void MacroExpander::parseDefinition(const std::string &Macro) {
Buffers.push_back(
llvm::MemoryBuffer::getMemBufferCopy(Macro, "<scratch space>"));
- clang::FileID FID = SourceMgr.createFileID(Buffers.back()->getMemBufferRef());
+ FileID FID = SourceMgr.createFileID(Buffers.back()->getMemBufferRef());
FormatTokenLexer Lex(SourceMgr, FID, 0, Style, encoding::Encoding_UTF8,
Allocator, IdentTable);
const auto Tokens = Lex.lex();
@@ -150,20 +150,20 @@ void MacroExpander::parseDefinition(const std::string &Macro) {
}
}
-bool MacroExpander::defined(llvm::StringRef Name) const {
+bool MacroExpander::defined(StringRef Name) const {
return FunctionLike.contains(Name) || ObjectLike.contains(Name);
}
-bool MacroExpander::objectLike(llvm::StringRef Name) const {
+bool MacroExpander::objectLike(StringRef Name) const {
return ObjectLike.contains(Name);
}
-bool MacroExpander::hasArity(llvm::StringRef Name, unsigned Arity) const {
+bool MacroExpander::hasArity(StringRef Name, unsigned Arity) const {
auto it = FunctionLike.find(Name);
return it != FunctionLike.end() && it->second.contains(Arity);
}
-llvm::SmallVector<FormatToken *, 8>
+SmallVector<FormatToken *, 8>
MacroExpander::expand(FormatToken *ID,
std::optional<ArgsList> OptionalArgs) const {
if (OptionalArgs)
diff --git a/contrib/llvm-project/clang/lib/Format/Macros.h b/contrib/llvm-project/clang/lib/Format/Macros.h
index 1964624e828c..e05f734b0db8 100644
--- a/contrib/llvm-project/clang/lib/Format/Macros.h
+++ b/contrib/llvm-project/clang/lib/Format/Macros.h
@@ -39,15 +39,9 @@
#define CLANG_LIB_FORMAT_MACROS_H
#include <list>
-#include <map>
-#include <string>
-#include <vector>
#include "FormatToken.h"
-#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
namespace clang {
namespace format {
@@ -85,7 +79,7 @@ struct UnwrappedLineNode;
///
class MacroExpander {
public:
- using ArgsList = llvm::ArrayRef<llvm::SmallVector<FormatToken *, 8>>;
+ using ArgsList = ArrayRef<SmallVector<FormatToken *, 8>>;
/// Construct a macro expander from a set of macro definitions.
/// Macro definitions must be encoded as UTF-8.
@@ -101,27 +95,27 @@ public:
/// Macros that cannot be parsed will be silently discarded.
///
MacroExpander(const std::vector<std::string> &Macros,
- clang::SourceManager &SourceMgr, const FormatStyle &Style,
+ SourceManager &SourceMgr, const FormatStyle &Style,
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
IdentifierTable &IdentTable);
~MacroExpander();
/// Returns whether any macro \p Name is defined, regardless of overloads.
- bool defined(llvm::StringRef Name) const;
+ bool defined(StringRef Name) const;
/// Returns whetherh there is an object-like overload, i.e. where the macro
/// has no arguments and should not consume subsequent parentheses.
- bool objectLike(llvm::StringRef Name) const;
+ bool objectLike(StringRef Name) const;
/// Returns whether macro \p Name provides an overload with the given arity.
- bool hasArity(llvm::StringRef Name, unsigned Arity) const;
+ bool hasArity(StringRef Name, unsigned Arity) const;
/// Returns the expanded stream of format tokens for \p ID, where
/// each element in \p Args is a positional argument to the macro call.
/// If \p Args is not set, the object-like overload is used.
/// If \p Args is set, the overload with the arity equal to \c Args.size() is
/// used.
- llvm::SmallVector<FormatToken *, 8>
+ SmallVector<FormatToken *, 8>
expand(FormatToken *ID, std::optional<ArgsList> OptionalArgs) const;
private:
@@ -130,7 +124,7 @@ private:
void parseDefinition(const std::string &Macro);
- clang::SourceManager &SourceMgr;
+ SourceManager &SourceMgr;
const FormatStyle &Style;
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator;
IdentifierTable &IdentTable;
@@ -231,8 +225,9 @@ public:
UnwrappedLine takeResult() &&;
private:
- void add(FormatToken *Token, FormatToken *ExpandedParent, bool First);
- void prepareParent(FormatToken *ExpandedParent, bool First);
+ void add(FormatToken *Token, FormatToken *ExpandedParent, bool First,
+ unsigned Level);
+ void prepareParent(FormatToken *ExpandedParent, bool First, unsigned Level);
FormatToken *getParentInResult(FormatToken *Parent);
void reconstruct(FormatToken *Token);
void startReconstruction(FormatToken *Token);
@@ -265,14 +260,16 @@ private:
LineNode() = default;
LineNode(FormatToken *Tok) : Tok(Tok) {}
FormatToken *Tok = nullptr;
- llvm::SmallVector<std::unique_ptr<ReconstructedLine>> Children;
+ SmallVector<std::unique_ptr<ReconstructedLine>> Children;
};
// Line in which we build up the resulting unwrapped line.
// FIXME: Investigate changing UnwrappedLine to a pointer type and using it
// instead of rolling our own type.
struct ReconstructedLine {
- llvm::SmallVector<std::unique_ptr<LineNode>> Tokens;
+ explicit ReconstructedLine(unsigned Level) : Level(Level) {}
+ unsigned Level;
+ SmallVector<std::unique_ptr<LineNode>> Tokens;
};
// The line in which we collect the resulting reconstructed output.
@@ -288,7 +285,7 @@ private:
// Stack of currently "open" lines, where each line's predecessor's last
// token is the parent token for that line.
- llvm::SmallVector<ReconstructedLine *> ActiveReconstructedLines;
+ SmallVector<ReconstructedLine *> ActiveReconstructedLines;
// Maps from the expanded token to the token that takes its place in the
// reconstructed token stream in terms of parent-child relationships.
@@ -328,7 +325,7 @@ private:
};
// Stack of macro calls for which we're in the middle of an expansion.
- llvm::SmallVector<Expansion> ActiveExpansions;
+ SmallVector<Expansion> ActiveExpansions;
struct MacroCallState {
MacroCallState(ReconstructedLine *Line, FormatToken *ParentLastToken,
@@ -371,10 +368,7 @@ private:
// |- ,
// | \- <argument>
// \- )
- llvm::SmallVector<MacroCallState> MacroCallStructure;
-
- // Level the generated UnwrappedLine will be at.
- const unsigned Level;
+ SmallVector<MacroCallState> MacroCallStructure;
// Maps from identifier of the macro call to an unwrapped line containing
// all tokens of the macro call.
diff --git a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
index 84941746f0df..593f8efff25a 100644
--- a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
@@ -272,7 +272,7 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
// The case `long const long int volatile` -> `long long int const volatile`
// The case `long long volatile int const` -> `long long int const volatile`
// The case `const long long volatile int` -> `long long int const volatile`
- if (TypeToken->isSimpleTypeSpecifier()) {
+ if (TypeToken->isTypeName(LangOpts)) {
// The case `const decltype(foo)` -> `const decltype(foo)`
// The case `const typeof(foo)` -> `const typeof(foo)`
// The case `const _Atomic(foo)` -> `const _Atomic(foo)`
@@ -280,8 +280,10 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
return Tok;
const FormatToken *LastSimpleTypeSpecifier = TypeToken;
- while (isQualifierOrType(LastSimpleTypeSpecifier->getNextNonComment()))
+ while (isQualifierOrType(LastSimpleTypeSpecifier->getNextNonComment(),
+ LangOpts)) {
LastSimpleTypeSpecifier = LastSimpleTypeSpecifier->getNextNonComment();
+ }
rotateTokens(SourceMgr, Fixes, Tok, LastSimpleTypeSpecifier,
/*Left=*/false);
@@ -291,7 +293,7 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
// The case `unsigned short const` -> `unsigned short const`
// The case:
// `unsigned short volatile const` -> `unsigned short const volatile`
- if (PreviousCheck && PreviousCheck->isSimpleTypeSpecifier()) {
+ if (PreviousCheck && PreviousCheck->isTypeName(LangOpts)) {
if (LastQual != Tok)
rotateTokens(SourceMgr, Fixes, Tok, LastQual, /*Left=*/false);
return Tok;
@@ -408,11 +410,11 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
// The case `volatile long long const int` -> `const volatile long long int`
// The case `const long long volatile int` -> `const volatile long long int`
// The case `long volatile long int const` -> `const volatile long long int`
- if (TypeToken->isSimpleTypeSpecifier()) {
+ if (TypeToken->isTypeName(LangOpts)) {
const FormatToken *LastSimpleTypeSpecifier = TypeToken;
while (isConfiguredQualifierOrType(
LastSimpleTypeSpecifier->getPreviousNonComment(),
- ConfiguredQualifierTokens)) {
+ ConfiguredQualifierTokens, LangOpts)) {
LastSimpleTypeSpecifier =
LastSimpleTypeSpecifier->getPreviousNonComment();
}
@@ -561,6 +563,8 @@ void LeftRightQualifierAlignmentFixer::fixQualifierAlignment(
for (const auto *Tok = First; Tok && Tok != Last && Tok->Next;
Tok = Tok->Next) {
+ if (Tok->MustBreakBefore)
+ break;
if (Tok->is(tok::comment))
continue;
if (RightAlign) {
@@ -608,22 +612,21 @@ void prepareLeftRightOrderingForQualifierAlignmentFixer(
}
}
-bool LeftRightQualifierAlignmentFixer::isQualifierOrType(
- const FormatToken *const Tok) {
- return Tok && (Tok->isSimpleTypeSpecifier() || Tok->is(tok::kw_auto) ||
+bool isQualifierOrType(const FormatToken *Tok, const LangOptions &LangOpts) {
+ return Tok && (Tok->isTypeName(LangOpts) || Tok->is(tok::kw_auto) ||
isQualifier(Tok));
}
-bool LeftRightQualifierAlignmentFixer::isConfiguredQualifierOrType(
- const FormatToken *const Tok,
- const std::vector<tok::TokenKind> &Qualifiers) {
- return Tok && (Tok->isSimpleTypeSpecifier() || Tok->is(tok::kw_auto) ||
+bool isConfiguredQualifierOrType(const FormatToken *Tok,
+ const std::vector<tok::TokenKind> &Qualifiers,
+ const LangOptions &LangOpts) {
+ return Tok && (Tok->isTypeName(LangOpts) || Tok->is(tok::kw_auto) ||
isConfiguredQualifier(Tok, Qualifiers));
}
// If a token is an identifier and it's upper case, it could
// be a macro and hence we need to be able to ignore it.
-bool LeftRightQualifierAlignmentFixer::isPossibleMacro(const FormatToken *Tok) {
+bool isPossibleMacro(const FormatToken *Tok) {
if (!Tok)
return false;
if (Tok->isNot(tok::identifier))
diff --git a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h
index e922d8005595..a0a0d597ebf3 100644
--- a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h
+++ b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h
@@ -32,6 +32,15 @@ void prepareLeftRightOrderingForQualifierAlignmentFixer(
std::vector<std::string> &RightOrder,
std::vector<tok::TokenKind> &Qualifiers);
+// Is the Token a simple or qualifier type
+bool isQualifierOrType(const FormatToken *Tok, const LangOptions &LangOpts);
+bool isConfiguredQualifierOrType(const FormatToken *Tok,
+ const std::vector<tok::TokenKind> &Qualifiers,
+ const LangOptions &LangOpts);
+
+// Is the Token likely a Macro
+bool isPossibleMacro(const FormatToken *Tok);
+
class LeftRightQualifierAlignmentFixer : public TokenAnalyzer {
std::string Qualifier;
bool RightAlign;
@@ -69,15 +78,6 @@ public:
const FormatToken *Tok,
const std::string &Qualifier,
tok::TokenKind QualifierType);
-
- // Is the Token a simple or qualifier type
- static bool isQualifierOrType(const FormatToken *Tok);
- static bool
- isConfiguredQualifierOrType(const FormatToken *Tok,
- const std::vector<tok::TokenKind> &Qualifiers);
-
- // Is the Token likely a Macro
- static bool isPossibleMacro(const FormatToken *Tok);
};
} // end namespace format
diff --git a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
index 1a6a1b19e702..1acce26ff279 100644
--- a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
+++ b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
@@ -34,8 +34,6 @@ namespace format {
class FormatTokenLexer;
-using clang::format::FormatStyle;
-
// An imported symbol in a JavaScript ES6 import/export, possibly aliased.
struct JsImportedSymbol {
StringRef Symbol;
@@ -178,7 +176,7 @@ public:
}
}
}
- llvm::StringRef PreviousText = getSourceText(InsertionPoint);
+ StringRef PreviousText = getSourceText(InsertionPoint);
if (ReferencesText == PreviousText)
return {Result, 0};
@@ -209,7 +207,7 @@ public:
// FIXME: better error handling. For now, just print error message and skip
// the replacement for the release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
@@ -276,7 +274,7 @@ private:
SortChunk.push_back(*Start);
++Start;
}
- llvm::stable_sort(SortChunk);
+ stable_sort(SortChunk);
mergeModuleReferences(SortChunk);
ReferencesSorted.insert(ReferencesSorted.end(), SortChunk.begin(),
SortChunk.end());
@@ -334,10 +332,10 @@ private:
// Sort the individual symbols within the import.
// E.g. `import {b, a} from 'x';` -> `import {a, b} from 'x';`
SmallVector<JsImportedSymbol, 1> Symbols = Reference.Symbols;
- llvm::stable_sort(
- Symbols, [&](const JsImportedSymbol &LHS, const JsImportedSymbol &RHS) {
- return LHS.Symbol.compare_insensitive(RHS.Symbol) < 0;
- });
+ stable_sort(Symbols,
+ [&](const JsImportedSymbol &LHS, const JsImportedSymbol &RHS) {
+ return LHS.Symbol.compare_insensitive(RHS.Symbol) < 0;
+ });
if (!Reference.SymbolsMerged && Symbols == Reference.Symbols) {
// Symbols didn't change, just emit the entire module reference.
StringRef ReferenceStmt = getSourceText(Reference.Range);
@@ -349,7 +347,7 @@ private:
// ... then the references in order ...
if (!Symbols.empty()) {
Buffer += getSourceText(Symbols.front().Range);
- for (const JsImportedSymbol &Symbol : llvm::drop_begin(Symbols)) {
+ for (const JsImportedSymbol &Symbol : drop_begin(Symbols)) {
Buffer += ",";
Buffer += getSourceText(Symbol.Range);
}
diff --git a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.h b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.h
index 7336db9537b0..b55b149aab4c 100644
--- a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.h
+++ b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.h
@@ -14,10 +14,7 @@
#ifndef LLVM_CLANG_LIB_FORMAT_SORTJAVASCRIPTIMPORTS_H
#define LLVM_CLANG_LIB_FORMAT_SORTJAVASCRIPTIMPORTS_H
-#include "clang/Basic/LLVM.h"
#include "clang/Format/Format.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/StringRef.h"
namespace clang {
namespace format {
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
index bd648c430f9b..804a2b0f5e8c 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
@@ -84,7 +84,7 @@ Environment::Environment(StringRef Code, StringRef FileName,
NextStartColumn(NextStartColumn), LastStartColumn(LastStartColumn) {}
TokenAnalyzer::TokenAnalyzer(const Environment &Env, const FormatStyle &Style)
- : Style(Style), Env(Env),
+ : Style(Style), LangOpts(getFormattingLangOpts(Style)), Env(Env),
AffectedRangeMgr(Env.getSourceManager(), Env.getCharRanges()),
UnwrappedLines(1),
Encoding(encoding::detectEncoding(
@@ -101,7 +101,7 @@ std::pair<tooling::Replacements, unsigned>
TokenAnalyzer::process(bool SkipAnnotation) {
tooling::Replacements Result;
llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
- IdentifierTable IdentTable(getFormattingLangOpts(Style));
+ IdentifierTable IdentTable(LangOpts);
FormatTokenLexer Lex(Env.getSourceManager(), Env.getFileID(),
Env.getFirstStartColumn(), Style, Encoding, Allocator,
IdentTable);
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h
index 4086dab1c94c..ef559099d325 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h
@@ -17,19 +17,8 @@
#define LLVM_CLANG_LIB_FORMAT_TOKENANALYZER_H
#include "AffectedRangeManager.h"
-#include "Encoding.h"
-#include "FormatToken.h"
#include "FormatTokenLexer.h"
#include "TokenAnnotator.h"
-#include "UnwrappedLineParser.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/DiagnosticOptions.h"
-#include "clang/Basic/FileManager.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Format/Format.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/Support/Debug.h"
-#include <memory>
namespace clang {
namespace format {
@@ -103,6 +92,7 @@ protected:
void finishRun() override;
FormatStyle Style;
+ LangOptions LangOpts;
// Stores Style, FileID and SourceManager etc.
const Environment &Env;
// AffectedRangeMgr stores ranges to be fixed.
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
index 4d482e6543d6..ad9ed7b47d00 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
@@ -126,7 +126,9 @@ public:
const AdditionalKeywords &Keywords,
SmallVector<ScopeType> &Scopes)
: Style(Style), Line(Line), CurrentToken(Line.First), AutoFound(false),
- Keywords(Keywords), Scopes(Scopes) {
+ IsCpp(Style.isCpp()), LangOpts(getFormattingLangOpts(Style)),
+ Keywords(Keywords), Scopes(Scopes), TemplateDeclarationDepth(0) {
+ assert(IsCpp == LangOpts.CXXOperatorNames);
Contexts.push_back(Context(tok::unknown, 1, /*IsExpression=*/false));
resetTokenMetadata();
}
@@ -152,8 +154,8 @@ private:
if (NonTemplateLess.count(CurrentToken->Previous) > 0)
return false;
- const FormatToken &Previous = *CurrentToken->Previous; // The '<'.
- if (Previous.Previous) {
+ if (const auto &Previous = *CurrentToken->Previous; // The '<'.
+ Previous.Previous) {
if (Previous.Previous->Tok.isLiteral())
return false;
if (Previous.Previous->is(tok::r_brace))
@@ -173,15 +175,13 @@ private:
FormatToken *Left = CurrentToken->Previous;
Left->ParentBracket = Contexts.back().ContextKind;
ScopedContextCreator ContextCreator(*this, tok::less, 12);
+ Contexts.back().IsExpression = false;
- // If this angle is in the context of an expression, we need to be more
- // hesitant to detect it as opening template parameters.
- bool InExprContext = Contexts.back().IsExpression;
+ const auto *BeforeLess = Left->Previous;
- Contexts.back().IsExpression = false;
// If there's a template keyword before the opening angle bracket, this is a
// template parameter, not an argument.
- if (Left->Previous && Left->Previous->isNot(tok::kw_template))
+ if (BeforeLess && BeforeLess->isNot(tok::kw_template))
Contexts.back().ContextType = Context::TemplateArgument;
if (Style.Language == FormatStyle::LK_Java &&
@@ -189,20 +189,29 @@ private:
next();
}
- while (CurrentToken) {
+ for (bool SeenTernaryOperator = false, MaybeAngles = true; CurrentToken;) {
+ const bool InExpr = Contexts[Contexts.size() - 2].IsExpression;
if (CurrentToken->is(tok::greater)) {
- // Try to do a better job at looking for ">>" within the condition of
- // a statement. Conservatively insert spaces between consecutive ">"
- // tokens to prevent splitting right bitshift operators and potentially
- // altering program semantics. This check is overly conservative and
- // will prevent spaces from being inserted in select nested template
- // parameter cases, but should not alter program semantics.
- if (CurrentToken->Next && CurrentToken->Next->is(tok::greater) &&
- Left->ParentBracket != tok::less &&
- CurrentToken->getStartOfNonWhitespace() ==
- CurrentToken->Next->getStartOfNonWhitespace().getLocWithOffset(
- -1)) {
- return false;
+ const auto *Next = CurrentToken->Next;
+ if (CurrentToken->isNot(TT_TemplateCloser)) {
+ // Try to do a better job at looking for ">>" within the condition of
+ // a statement. Conservatively insert spaces between consecutive ">"
+ // tokens to prevent splitting right shift operators and potentially
+ // altering program semantics. This check is overly conservative and
+ // will prevent spaces from being inserted in select nested template
+ // parameter cases, but should not alter program semantics.
+ if (Next && Next->is(tok::greater) &&
+ Left->ParentBracket != tok::less &&
+ CurrentToken->getStartOfNonWhitespace() ==
+ Next->getStartOfNonWhitespace().getLocWithOffset(-1)) {
+ return false;
+ }
+ if (InExpr && SeenTernaryOperator &&
+ (!Next || !Next->isOneOf(tok::l_paren, tok::l_brace))) {
+ return false;
+ }
+ if (!MaybeAngles)
+ return false;
}
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
@@ -212,14 +221,14 @@ private:
// msg: < item: data >
// In TT_TextProto, map<key, value> does not occur.
if (Style.Language == FormatStyle::LK_TextProto ||
- (Style.Language == FormatStyle::LK_Proto && Left->Previous &&
- Left->Previous->isOneOf(TT_SelectorName, TT_DictLiteral))) {
+ (Style.Language == FormatStyle::LK_Proto && BeforeLess &&
+ BeforeLess->isOneOf(TT_SelectorName, TT_DictLiteral))) {
CurrentToken->setType(TT_DictLiteral);
} else {
CurrentToken->setType(TT_TemplateCloser);
CurrentToken->Tok.setLength(1);
}
- if (CurrentToken->Next && CurrentToken->Next->Tok.isLiteral())
+ if (Next && Next->Tok.isLiteral())
return false;
next();
return true;
@@ -229,23 +238,23 @@ private:
next();
continue;
}
- if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace) ||
- (CurrentToken->isOneOf(tok::colon, tok::question) && InExprContext &&
- !Style.isCSharp() && !Style.isProto())) {
+ if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace))
return false;
- }
+ const auto &Prev = *CurrentToken->Previous;
// If a && or || is found and interpreted as a binary operator, this set
// of angles is likely part of something like "a < b && c > d". If the
// angles are inside an expression, the ||/&& might also be a binary
// operator that was misinterpreted because we are parsing template
// parameters.
// FIXME: This is getting out of hand, write a decent parser.
- if (CurrentToken->Previous->isOneOf(tok::pipepipe, tok::ampamp) &&
- CurrentToken->Previous->is(TT_BinaryOperator) &&
- Contexts[Contexts.size() - 2].IsExpression &&
- !Line.startsWith(tok::kw_template)) {
- return false;
+ if (MaybeAngles && InExpr && !Line.startsWith(tok::kw_template) &&
+ Prev.is(TT_BinaryOperator)) {
+ const auto Precedence = Prev.getPrecedence();
+ if (Precedence > prec::Conditional && Precedence < prec::Relational)
+ MaybeAngles = false;
}
+ if (Prev.isOneOf(tok::question, tok::colon) && !Style.isProto())
+ SeenTernaryOperator = true;
updateParameterCount(Left, CurrentToken);
if (Style.Language == FormatStyle::LK_Proto) {
if (FormatToken *Previous = CurrentToken->getPreviousNonComment()) {
@@ -256,6 +265,18 @@ private:
}
}
}
+ if (Style.isTableGen()) {
+ if (CurrentToken->isOneOf(tok::comma, tok::equal)) {
+ // They appear as separators. Unless they are not in class definition.
+ next();
+ continue;
+ }
+ // In angle, there must be Value like tokens. Types are also able to be
+ // parsed in the same way with Values.
+ if (!parseTableGenValue())
+ return false;
+ continue;
+ }
if (!consumeToken())
return false;
}
@@ -388,6 +409,28 @@ private:
Contexts.back().IsExpression = !IsForOrCatch;
}
+ if (Style.isTableGen()) {
+ if (FormatToken *Prev = OpeningParen.Previous) {
+ if (Prev->is(TT_TableGenCondOperator)) {
+ Contexts.back().IsTableGenCondOpe = true;
+ Contexts.back().IsExpression = true;
+ } else if (Contexts.size() > 1 &&
+ Contexts[Contexts.size() - 2].IsTableGenBangOpe) {
+ // Hack to handle bang operators. The parent context's flag
+ // was set by parseTableGenSimpleValue().
+ // We have to specify the context outside because the prev of "(" may
+ // be ">", not the bang operator in this case.
+ Contexts.back().IsTableGenBangOpe = true;
+ Contexts.back().IsExpression = true;
+ } else {
+ // Otherwise, this paren seems DAGArg.
+ if (!parseTableGenDAGArg())
+ return false;
+ return parseTableGenDAGArgAndList(&OpeningParen);
+ }
+ }
+ }
+
// Infer the role of the l_paren based on the previous token if we haven't
// detected one yet.
if (PrevNonComment && OpeningParen.is(TT_Unknown)) {
@@ -528,7 +571,7 @@ private:
(CurrentToken->is(tok::l_paren) && CurrentToken->Next &&
CurrentToken->Next->isOneOf(tok::star, tok::amp, tok::caret));
if ((CurrentToken->Previous->isOneOf(tok::kw_const, tok::kw_auto) ||
- CurrentToken->Previous->isSimpleTypeSpecifier()) &&
+ CurrentToken->Previous->isTypeName(LangOpts)) &&
!(CurrentToken->is(tok::l_brace) ||
(CurrentToken->is(tok::l_paren) && !ProbablyFunctionTypeLParen))) {
Contexts.back().IsExpression = false;
@@ -549,6 +592,22 @@ private:
if (CurrentToken->is(tok::comma))
Contexts.back().CanBeExpression = true;
+ if (Style.isTableGen()) {
+ if (CurrentToken->is(tok::comma)) {
+ if (Contexts.back().IsTableGenCondOpe)
+ CurrentToken->setType(TT_TableGenCondOperatorComma);
+ next();
+ } else if (CurrentToken->is(tok::colon)) {
+ if (Contexts.back().IsTableGenCondOpe)
+ CurrentToken->setType(TT_TableGenCondOperatorColon);
+ next();
+ }
+ // In TableGen there must be Values in parens.
+ if (!parseTableGenValue())
+ return false;
+ continue;
+ }
+
FormatToken *Tok = CurrentToken;
if (!consumeToken())
return false;
@@ -595,8 +654,8 @@ private:
return true;
// Limit this to being an access modifier that follows.
- if (AttrTok->isOneOf(tok::kw_public, tok::kw_private, tok::kw_protected,
- tok::comment, tok::kw_class, tok::kw_static,
+ if (AttrTok->isAccessSpecifierKeyword() ||
+ AttrTok->isOneOf(tok::comment, tok::kw_class, tok::kw_static,
tok::l_square, Keywords.kw_internal)) {
return true;
}
@@ -626,13 +685,13 @@ private:
// In C++, this can happen either in array of templates (foo<int>[10])
// or when array is a nested template type (unique_ptr<type1<type2>[]>).
bool CppArrayTemplates =
- Style.isCpp() && Parent && Parent->is(TT_TemplateCloser) &&
+ IsCpp && Parent && Parent->is(TT_TemplateCloser) &&
(Contexts.back().CanBeExpression || Contexts.back().IsExpression ||
Contexts.back().ContextType == Context::TemplateArgument);
const bool IsInnerSquare = Contexts.back().InCpp11AttributeSpecifier;
const bool IsCpp11AttributeSpecifier =
- isCppAttribute(Style.isCpp(), *Left) || IsInnerSquare;
+ isCppAttribute(IsCpp, *Left) || IsInnerSquare;
// Treat C# Attributes [STAThread] much like C++ attributes [[...]].
bool IsCSharpAttributeSpecifier =
@@ -640,12 +699,11 @@ private:
Contexts.back().InCSharpAttributeSpecifier;
bool InsideInlineASM = Line.startsWith(tok::kw_asm);
- bool IsCppStructuredBinding = Left->isCppStructuredBinding(Style);
+ bool IsCppStructuredBinding = Left->isCppStructuredBinding(IsCpp);
bool StartsObjCMethodExpr =
!IsCppStructuredBinding && !InsideInlineASM && !CppArrayTemplates &&
- Style.isCpp() && !IsCpp11AttributeSpecifier &&
- !IsCSharpAttributeSpecifier && Contexts.back().CanBeExpression &&
- Left->isNot(TT_LambdaLSquare) &&
+ IsCpp && !IsCpp11AttributeSpecifier && !IsCSharpAttributeSpecifier &&
+ Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
!CurrentToken->isOneOf(tok::l_brace, tok::r_square) &&
(!Parent ||
Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
@@ -673,7 +731,7 @@ private:
Contexts.back().ContextKind == tok::l_brace &&
Parent->isOneOf(tok::l_brace, tok::comma)) {
Left->setType(TT_JsComputedPropertyName);
- } else if (Style.isCpp() && Contexts.back().ContextKind == tok::l_brace &&
+ } else if (IsCpp && Contexts.back().ContextKind == tok::l_brace &&
Parent && Parent->isOneOf(tok::l_brace, tok::comma)) {
Left->setType(TT_DesignatedInitializerLSquare);
} else if (IsCSharpAttributeSpecifier) {
@@ -776,9 +834,8 @@ private:
Parent->overwriteFixedType(TT_BinaryOperator);
}
// An arrow after an ObjC method expression is not a lambda arrow.
- if (CurrentToken->getType() == TT_ObjCMethodExpr &&
- CurrentToken->Next &&
- CurrentToken->Next->is(TT_TrailingReturnArrow)) {
+ if (CurrentToken->is(TT_ObjCMethodExpr) && CurrentToken->Next &&
+ CurrentToken->Next->is(TT_LambdaArrow)) {
CurrentToken->Next->overwriteFixedType(TT_Unknown);
}
Left->MatchingParen = CurrentToken;
@@ -803,6 +860,8 @@ private:
if (Left->BlockParameterCount > 1)
Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName = 0;
}
+ if (Style.isTableGen() && Left->is(TT_TableGenListOpener))
+ CurrentToken->setType(TT_TableGenListCloser);
next();
return true;
}
@@ -833,6 +892,19 @@ private:
Left->setType(TT_ArrayInitializerLSquare);
}
FormatToken *Tok = CurrentToken;
+ if (Style.isTableGen()) {
+ if (CurrentToken->isOneOf(tok::comma, tok::minus, tok::ellipsis)) {
+ // '-' and '...' appears as a separator in slice.
+ next();
+ } else {
+ // In TableGen there must be a list of Values in square brackets.
+ // It must be ValueList or SliceElements.
+ if (!parseTableGenValue())
+ return false;
+ }
+ updateParameterCount(Left, Tok);
+ continue;
+ }
if (!consumeToken())
return false;
updateParameterCount(Left, Tok);
@@ -840,6 +912,242 @@ private:
return false;
}
+ void skipToNextNonComment() {
+ next();
+ while (CurrentToken && CurrentToken->is(tok::comment))
+ next();
+ }
+
+ // Simplified parser for TableGen Value. Returns true on success.
+ // It consists of SimpleValues, SimpleValues with Suffixes, and Value followed
+ // by '#', paste operator.
+ // There also exists the case the Value is parsed as NameValue.
+ // In this case, the Value ends if '{' is found.
+ bool parseTableGenValue(bool ParseNameMode = false) {
+ if (!CurrentToken)
+ return false;
+ while (CurrentToken->is(tok::comment))
+ next();
+ if (!parseTableGenSimpleValue())
+ return false;
+ if (!CurrentToken)
+ return true;
+ // Value "#" [Value]
+ if (CurrentToken->is(tok::hash)) {
+ if (CurrentToken->Next &&
+ CurrentToken->Next->isOneOf(tok::colon, tok::semi, tok::l_brace)) {
+ // Trailing paste operator.
+ // These are only the allowed cases in TGParser::ParseValue().
+ CurrentToken->setType(TT_TableGenTrailingPasteOperator);
+ next();
+ return true;
+ }
+ FormatToken *HashTok = CurrentToken;
+ skipToNextNonComment();
+ HashTok->setType(TT_Unknown);
+ if (!parseTableGenValue(ParseNameMode))
+ return false;
+ }
+ // In name mode, '{' is regarded as the end of the value.
+ // See TGParser::ParseValue in TGParser.cpp
+ if (ParseNameMode && CurrentToken->is(tok::l_brace))
+ return true;
+ // These tokens indicates this is a value with suffixes.
+ if (CurrentToken->isOneOf(tok::l_brace, tok::l_square, tok::period)) {
+ CurrentToken->setType(TT_TableGenValueSuffix);
+ FormatToken *Suffix = CurrentToken;
+ skipToNextNonComment();
+ if (Suffix->is(tok::l_square))
+ return parseSquare();
+ if (Suffix->is(tok::l_brace)) {
+ Scopes.push_back(getScopeType(*Suffix));
+ return parseBrace();
+ }
+ }
+ return true;
+ }
+
+ // TokVarName ::= "$" ualpha (ualpha | "0"..."9")*
+ // Appears as a part of DagArg.
+ // This does not change the current token on fail.
+ bool tryToParseTableGenTokVar() {
+ if (!CurrentToken)
+ return false;
+ if (CurrentToken->is(tok::identifier) &&
+ CurrentToken->TokenText.front() == '$') {
+ skipToNextNonComment();
+ return true;
+ }
+ return false;
+ }
+
+ // DagArg ::= Value [":" TokVarName] | TokVarName
+ // Appears as a part of SimpleValue6.
+ bool parseTableGenDAGArg(bool AlignColon = false) {
+ if (tryToParseTableGenTokVar())
+ return true;
+ if (parseTableGenValue()) {
+ if (CurrentToken && CurrentToken->is(tok::colon)) {
+ if (AlignColon)
+ CurrentToken->setType(TT_TableGenDAGArgListColonToAlign);
+ else
+ CurrentToken->setType(TT_TableGenDAGArgListColon);
+ skipToNextNonComment();
+ return tryToParseTableGenTokVar();
+ }
+ return true;
+ }
+ return false;
+ }
+
+ // Judge if the token is a operator ID to insert line break in DAGArg.
+ // That is, TableGenBreakingDAGArgOperators is empty (by the definition of the
+ // option) or the token is in the list.
+ bool isTableGenDAGArgBreakingOperator(const FormatToken &Tok) {
+ auto &Opes = Style.TableGenBreakingDAGArgOperators;
+ // If the list is empty, all operators are breaking operators.
+ if (Opes.empty())
+ return true;
+ // Otherwise, the operator is limited to normal identifiers.
+ if (Tok.isNot(tok::identifier) ||
+ Tok.isOneOf(TT_TableGenBangOperator, TT_TableGenCondOperator)) {
+ return false;
+ }
+ // The case next is colon, it is not a operator of identifier.
+ if (!Tok.Next || Tok.Next->is(tok::colon))
+ return false;
+ return std::find(Opes.begin(), Opes.end(), Tok.TokenText.str()) !=
+ Opes.end();
+ }
+
+ // SimpleValue6 ::= "(" DagArg [DagArgList] ")"
+ // This parses SimpleValue 6's inside part of "(" ")"
+ bool parseTableGenDAGArgAndList(FormatToken *Opener) {
+ FormatToken *FirstTok = CurrentToken;
+ if (!parseTableGenDAGArg())
+ return false;
+ bool BreakInside = false;
+ if (Style.TableGenBreakInsideDAGArg != FormatStyle::DAS_DontBreak) {
+ // Specialized detection for DAGArgOperator, that determines the way of
+ // line break for this DAGArg elements.
+ if (isTableGenDAGArgBreakingOperator(*FirstTok)) {
+ // Special case for identifier DAGArg operator.
+ BreakInside = true;
+ Opener->setType(TT_TableGenDAGArgOpenerToBreak);
+ if (FirstTok->isOneOf(TT_TableGenBangOperator,
+ TT_TableGenCondOperator)) {
+ // Special case for bang/cond operators. Set the whole operator as
+ // the DAGArg operator. Always break after it.
+ CurrentToken->Previous->setType(TT_TableGenDAGArgOperatorToBreak);
+ } else if (FirstTok->is(tok::identifier)) {
+ if (Style.TableGenBreakInsideDAGArg == FormatStyle::DAS_BreakAll)
+ FirstTok->setType(TT_TableGenDAGArgOperatorToBreak);
+ else
+ FirstTok->setType(TT_TableGenDAGArgOperatorID);
+ }
+ }
+ }
+ // Parse the [DagArgList] part
+ bool FirstDAGArgListElm = true;
+ while (CurrentToken) {
+ if (!FirstDAGArgListElm && CurrentToken->is(tok::comma)) {
+ CurrentToken->setType(BreakInside ? TT_TableGenDAGArgListCommaToBreak
+ : TT_TableGenDAGArgListComma);
+ skipToNextNonComment();
+ }
+ if (CurrentToken && CurrentToken->is(tok::r_paren)) {
+ CurrentToken->setType(TT_TableGenDAGArgCloser);
+ Opener->MatchingParen = CurrentToken;
+ CurrentToken->MatchingParen = Opener;
+ skipToNextNonComment();
+ return true;
+ }
+ if (!parseTableGenDAGArg(
+ BreakInside &&
+ Style.AlignConsecutiveTableGenBreakingDAGArgColons.Enabled)) {
+ return false;
+ }
+ FirstDAGArgListElm = false;
+ }
+ return false;
+ }
+
+ bool parseTableGenSimpleValue() {
+ assert(Style.isTableGen());
+ if (!CurrentToken)
+ return false;
+ FormatToken *Tok = CurrentToken;
+ skipToNextNonComment();
+ // SimpleValue 1, 2, 3: Literals
+ if (Tok->isOneOf(tok::numeric_constant, tok::string_literal,
+ TT_TableGenMultiLineString, tok::kw_true, tok::kw_false,
+ tok::question, tok::kw_int)) {
+ return true;
+ }
+ // SimpleValue 4: ValueList, Type
+ if (Tok->is(tok::l_brace)) {
+ Scopes.push_back(getScopeType(*Tok));
+ return parseBrace();
+ }
+ // SimpleValue 5: List initializer
+ if (Tok->is(tok::l_square)) {
+ Tok->setType(TT_TableGenListOpener);
+ if (!parseSquare())
+ return false;
+ if (Tok->is(tok::less)) {
+ CurrentToken->setType(TT_TemplateOpener);
+ return parseAngle();
+ }
+ return true;
+ }
+ // SimpleValue 6: DAGArg [DAGArgList]
+ // SimpleValue6 ::= "(" DagArg [DagArgList] ")"
+ if (Tok->is(tok::l_paren)) {
+ Tok->setType(TT_TableGenDAGArgOpener);
+ return parseTableGenDAGArgAndList(Tok);
+ }
+ // SimpleValue 9: Bang operator
+ if (Tok->is(TT_TableGenBangOperator)) {
+ if (CurrentToken && CurrentToken->is(tok::less)) {
+ CurrentToken->setType(TT_TemplateOpener);
+ skipToNextNonComment();
+ if (!parseAngle())
+ return false;
+ }
+ if (!CurrentToken || CurrentToken->isNot(tok::l_paren))
+ return false;
+ skipToNextNonComment();
+ // FIXME: Hack using inheritance to child context
+ Contexts.back().IsTableGenBangOpe = true;
+ bool Result = parseParens();
+ Contexts.back().IsTableGenBangOpe = false;
+ return Result;
+ }
+ // SimpleValue 9: Cond operator
+ if (Tok->is(TT_TableGenCondOperator)) {
+ Tok = CurrentToken;
+ skipToNextNonComment();
+ if (!Tok || Tok->isNot(tok::l_paren))
+ return false;
+ bool Result = parseParens();
+ return Result;
+ }
+ // We have to check identifier at the last because the kind of bang/cond
+ // operators are also identifier.
+ // SimpleValue 7: Identifiers
+ if (Tok->is(tok::identifier)) {
+ // SimpleValue 8: Anonymous record
+ if (CurrentToken && CurrentToken->is(tok::less)) {
+ CurrentToken->setType(TT_TemplateOpener);
+ skipToNextNonComment();
+ return parseAngle();
+ }
+ return true;
+ }
+
+ return false;
+ }
+
bool couldBeInStructArrayInitializer() const {
if (Contexts.size() < 2)
return false;
@@ -880,6 +1188,8 @@ private:
OpeningBrace.getPreviousNonComment()->isNot(Keywords.kw_apostrophe))) {
Contexts.back().VerilogMayBeConcatenation = true;
}
+ if (Style.isTableGen())
+ Contexts.back().ColonIsDictLiteral = false;
unsigned CommaCount = 0;
while (CurrentToken) {
@@ -906,8 +1216,8 @@ private:
FormatToken *Previous = CurrentToken->getPreviousNonComment();
if (Previous->is(TT_JsTypeOptionalQuestion))
Previous = Previous->getPreviousNonComment();
- if ((CurrentToken->is(tok::colon) &&
- (!Contexts.back().ColonIsDictLiteral || !Style.isCpp())) ||
+ if ((CurrentToken->is(tok::colon) && !Style.isTableGen() &&
+ (!Contexts.back().ColonIsDictLiteral || !IsCpp)) ||
Style.isProto()) {
OpeningBrace.setType(TT_DictLiteral);
if (Previous->Tok.getIdentifierInfo() ||
@@ -915,10 +1225,12 @@ private:
Previous->setType(TT_SelectorName);
}
}
- if (CurrentToken->is(tok::colon) && OpeningBrace.is(TT_Unknown))
+ if (CurrentToken->is(tok::colon) && OpeningBrace.is(TT_Unknown) &&
+ !Style.isTableGen()) {
OpeningBrace.setType(TT_DictLiteral);
- else if (Style.isJavaScript())
+ } else if (Style.isJavaScript()) {
OpeningBrace.overwriteFixedType(TT_DictLiteral);
+ }
}
if (CurrentToken->is(tok::comma)) {
if (Style.isJavaScript())
@@ -961,20 +1273,26 @@ private:
}
bool parseTemplateDeclaration() {
- if (CurrentToken && CurrentToken->is(tok::less)) {
- CurrentToken->setType(TT_TemplateOpener);
- next();
- if (!parseAngle())
- return false;
- if (CurrentToken)
- CurrentToken->Previous->ClosesTemplateDeclaration = true;
- return true;
- }
- return false;
+ if (!CurrentToken || CurrentToken->isNot(tok::less))
+ return false;
+
+ CurrentToken->setType(TT_TemplateOpener);
+ next();
+
+ TemplateDeclarationDepth++;
+ const bool WellFormed = parseAngle();
+ TemplateDeclarationDepth--;
+ if (!WellFormed)
+ return false;
+
+ if (CurrentToken && TemplateDeclarationDepth == 0)
+ CurrentToken->Previous->ClosesTemplateDeclaration = true;
+
+ return true;
}
bool consumeToken() {
- if (Style.isCpp()) {
+ if (IsCpp) {
const auto *Prev = CurrentToken->getPreviousNonComment();
if (Prev && Prev->is(tok::r_square) && Prev->is(TT_AttributeSquare) &&
CurrentToken->isOneOf(tok::kw_if, tok::kw_switch, tok::kw_case,
@@ -989,6 +1307,9 @@ private:
// operators.
if (Tok->is(TT_VerilogTableItem))
return true;
+ // Multi-line string itself is a single annotated token.
+ if (Tok->is(TT_TableGenMultiLineString))
+ return true;
switch (Tok->Tok.getKind()) {
case tok::plus:
case tok::minus:
@@ -1050,6 +1371,8 @@ private:
Line.First->startsSequence(tok::kw_export, Keywords.kw_module) ||
Line.First->startsSequence(tok::kw_export, Keywords.kw_import)) {
Tok->setType(TT_ModulePartitionColon);
+ } else if (Line.First->is(tok::kw_asm)) {
+ Tok->setType(TT_InlineASMColon);
} else if (Contexts.back().ColonIsDictLiteral || Style.isProto()) {
Tok->setType(TT_DictLiteral);
if (Style.Language == FormatStyle::LK_TextProto) {
@@ -1109,6 +1432,8 @@ private:
Tok->setType(TT_CtorInitializerColon);
} else {
Tok->setType(TT_InheritanceColon);
+ if (Prev->isAccessSpecifierKeyword())
+ Line.Type = LT_AccessModifier;
}
} else if (canBeObjCSelectorComponent(*Tok->Previous) && Tok->Next &&
(Tok->Next->isOneOf(tok::r_paren, tok::comma) ||
@@ -1117,9 +1442,6 @@ private:
// This handles a special macro in ObjC code where selectors including
// the colon are passed as macro arguments.
Tok->setType(TT_ObjCMethodExpr);
- } else if (Contexts.back().ContextKind == tok::l_paren &&
- !Line.InPragmaDirective) {
- Tok->setType(TT_InlineASMColon);
}
break;
case tok::pipe:
@@ -1130,6 +1452,14 @@ private:
Tok->setType(TT_JsTypeOperator);
break;
case tok::kw_if:
+ if (Style.isTableGen()) {
+ // In TableGen it has the form 'if' <value> 'then'.
+ if (!parseTableGenValue())
+ return false;
+ if (CurrentToken && CurrentToken->is(Keywords.kw_then))
+ next(); // skip then
+ break;
+ }
if (CurrentToken &&
CurrentToken->isOneOf(tok::kw_constexpr, tok::identifier)) {
next();
@@ -1153,7 +1483,7 @@ private:
if (CurrentToken && CurrentToken->is(Keywords.kw_await))
next();
}
- if (Style.isCpp() && CurrentToken && CurrentToken->is(tok::kw_co_await))
+ if (IsCpp && CurrentToken && CurrentToken->is(tok::kw_co_await))
next();
Contexts.back().ColonIsForRangeExpr = true;
if (!CurrentToken || CurrentToken->isNot(tok::l_paren))
@@ -1225,23 +1555,27 @@ private:
return false;
if (Line.MustBeDeclaration && Contexts.size() == 1 &&
!Contexts.back().IsExpression && !Line.startsWith(TT_ObjCProperty) &&
+ !Line.startsWith(tok::l_paren) &&
!Tok->isOneOf(TT_TypeDeclarationParen, TT_RequiresExpressionLParen)) {
if (const auto *Previous = Tok->Previous;
!Previous ||
(!Previous->isAttribute() &&
!Previous->isOneOf(TT_RequiresClause, TT_LeadingJavaAnnotation))) {
Line.MightBeFunctionDecl = true;
+ Tok->MightBeFunctionDeclParen = true;
}
}
break;
case tok::l_square:
+ if (Style.isTableGen())
+ Tok->setType(TT_TableGenListOpener);
if (!parseSquare())
return false;
break;
case tok::l_brace:
if (Style.Language == FormatStyle::LK_TextProto) {
FormatToken *Previous = Tok->getPreviousNonComment();
- if (Previous && Previous->getType() != TT_DictLiteral)
+ if (Previous && Previous->isNot(TT_DictLiteral))
Previous->setType(TT_SelectorName);
}
Scopes.push_back(getScopeType(*Tok));
@@ -1261,9 +1595,11 @@ private:
Tok->Previous->isOneOf(TT_SelectorName, TT_DictLiteral))) {
Tok->setType(TT_DictLiteral);
FormatToken *Previous = Tok->getPreviousNonComment();
- if (Previous && Previous->getType() != TT_DictLiteral)
+ if (Previous && Previous->isNot(TT_DictLiteral))
Previous->setType(TT_SelectorName);
}
+ if (Style.isTableGen())
+ Tok->setType(TT_TemplateOpener);
} else {
Tok->setType(TT_BinaryOperator);
NonTemplateLess.insert(Tok);
@@ -1283,7 +1619,7 @@ private:
return false;
break;
case tok::greater:
- if (Style.Language != FormatStyle::LK_TextProto)
+ if (Style.Language != FormatStyle::LK_TextProto && Tok->is(TT_Unknown))
Tok->setType(TT_BinaryOperator);
if (Tok->Previous && Tok->Previous->is(TT_TemplateCloser))
Tok->SpacesRequiredBefore = 1;
@@ -1423,10 +1759,29 @@ private:
if (!Tok->getPreviousNonComment())
Line.IsContinuation = true;
}
+ if (Style.isTableGen()) {
+ if (Tok->is(Keywords.kw_assert)) {
+ if (!parseTableGenValue())
+ return false;
+ } else if (Tok->isOneOf(Keywords.kw_def, Keywords.kw_defm) &&
+ (!Tok->Next ||
+ !Tok->Next->isOneOf(tok::colon, tok::l_brace))) {
+ // The case NameValue appears.
+ if (!parseTableGenValue(true))
+ return false;
+ }
+ }
break;
case tok::arrow:
- if (Tok->Previous && Tok->Previous->is(tok::kw_noexcept))
+ if (Tok->isNot(TT_LambdaArrow) && Tok->Previous &&
+ Tok->Previous->is(tok::kw_noexcept)) {
Tok->setType(TT_TrailingReturnArrow);
+ }
+ break;
+ case tok::equal:
+ // In TableGen, there must be a value after "=";
+ if (Style.isTableGen() && !parseTableGenValue())
+ return false;
break;
default:
break;
@@ -1564,6 +1919,8 @@ private:
case tok::pp_elif:
Contexts.back().IsExpression = true;
next();
+ if (CurrentToken)
+ CurrentToken->SpacesRequiredBefore = true;
parseLine();
break;
default:
@@ -1658,6 +2015,8 @@ public:
if (!consumeToken())
return LT_Invalid;
}
+ if (Line.Type == LT_AccessModifier)
+ return LT_AccessModifier;
if (KeywordVirtualFound)
return LT_VirtualFunctionDecl;
if (ImportStatement)
@@ -1703,11 +2062,11 @@ private:
TT_LambdaLSquare, TT_LambdaLBrace, TT_AttributeMacro, TT_IfMacro,
TT_ForEachMacro, TT_TypenameMacro, TT_FunctionLBrace,
TT_ImplicitStringLiteral, TT_InlineASMBrace, TT_FatArrow,
- TT_NamespaceMacro, TT_OverloadedOperator, TT_RegexLiteral,
- TT_TemplateString, TT_ObjCStringLiteral, TT_UntouchableMacroFunc,
- TT_StatementAttributeLikeMacro, TT_FunctionLikeOrFreestandingMacro,
- TT_ClassLBrace, TT_EnumLBrace, TT_RecordLBrace, TT_StructLBrace,
- TT_UnionLBrace, TT_RequiresClause,
+ TT_LambdaArrow, TT_NamespaceMacro, TT_OverloadedOperator,
+ TT_RegexLiteral, TT_TemplateString, TT_ObjCStringLiteral,
+ TT_UntouchableMacroFunc, TT_StatementAttributeLikeMacro,
+ TT_FunctionLikeOrFreestandingMacro, TT_ClassLBrace, TT_EnumLBrace,
+ TT_RecordLBrace, TT_StructLBrace, TT_UnionLBrace, TT_RequiresClause,
TT_RequiresClauseInARequiresExpression, TT_RequiresExpression,
TT_RequiresExpressionLParen, TT_RequiresExpressionLBrace,
TT_BracedListLBrace)) {
@@ -1757,6 +2116,9 @@ private:
// Whether the braces may mean concatenation instead of structure or array
// literal.
bool VerilogMayBeConcatenation = false;
+ bool IsTableGenDAGArg = false;
+ bool IsTableGenBangOpe = false;
+ bool IsTableGenCondOpe = false;
enum {
Unknown,
// Like the part after `:` in a constructor.
@@ -1890,7 +2252,7 @@ private:
Contexts.back().IsExpression = true;
} else if (Current.is(TT_TrailingReturnArrow)) {
Contexts.back().IsExpression = false;
- } else if (Current.is(Keywords.kw_assert)) {
+ } else if (Current.isOneOf(TT_LambdaArrow, Keywords.kw_assert)) {
Contexts.back().IsExpression = Style.Language == FormatStyle::LK_Java;
} else if (Current.Previous &&
Current.Previous->is(TT_CtorInitializerColon)) {
@@ -1986,7 +2348,7 @@ private:
if (Current.Previous) {
bool IsIdentifier =
Style.isJavaScript()
- ? Keywords.IsJavaScriptIdentifier(
+ ? Keywords.isJavaScriptIdentifier(
*Current.Previous, /* AcceptIdentifierName= */ true)
: Current.Previous->is(tok::identifier);
if (IsIdentifier ||
@@ -2009,7 +2371,8 @@ private:
// Line.MightBeFunctionDecl can only be true after the parentheses of a
// function declaration have been found. In this case, 'Current' is a
// trailing token of this declaration and thus cannot be a name.
- if (Current.is(Keywords.kw_instanceof)) {
+ if ((Style.isJavaScript() || Style.Language == FormatStyle::LK_Java) &&
+ Current.is(Keywords.kw_instanceof)) {
Current.setType(TT_BinaryOperator);
} else if (isStartOfName(Current) &&
(!Line.MightBeFunctionDecl || Current.NestingLevel != 0)) {
@@ -2024,7 +2387,7 @@ private:
AutoFound = true;
} else if (Current.is(tok::arrow) &&
Style.Language == FormatStyle::LK_Java) {
- Current.setType(TT_TrailingReturnArrow);
+ Current.setType(TT_LambdaArrow);
} else if (Current.is(tok::arrow) && Style.isVerilog()) {
// The implication operator.
Current.setType(TT_BinaryOperator);
@@ -2061,6 +2424,9 @@ private:
// In JavaScript, `interface X { foo?(): bar; }` is an optional method
// on the interface, not a ternary expression.
Current.setType(TT_JsTypeOptionalQuestion);
+ } else if (Style.isTableGen()) {
+ // In TableGen, '?' is just an identifier like token.
+ Current.setType(TT_Unknown);
} else {
Current.setType(TT_ConditionalExpr);
}
@@ -2108,9 +2474,9 @@ private:
Current.setType(TT_CastRParen);
if (Current.MatchingParen && Current.Next &&
!Current.Next->isBinaryOperator() &&
- !Current.Next->isOneOf(tok::semi, tok::colon, tok::l_brace,
- tok::comma, tok::period, tok::arrow,
- tok::coloncolon, tok::kw_noexcept)) {
+ !Current.Next->isOneOf(
+ tok::semi, tok::colon, tok::l_brace, tok::l_paren, tok::comma,
+ tok::period, tok::arrow, tok::coloncolon, tok::kw_noexcept)) {
if (FormatToken *AfterParen = Current.MatchingParen->Next;
AfterParen && AfterParen->isNot(tok::caret)) {
// Make sure this isn't the return type of an Obj-C block declaration.
@@ -2239,6 +2605,9 @@ private:
// keywords such as let and def* defines names.
if (Keywords.isTableGenDefinition(*PreviousNotConst))
return true;
+ // Otherwise C++ style declarations is available only inside the brace.
+ if (Contexts.back().ContextKind != tok::l_brace)
+ return false;
}
bool IsPPKeyword = PreviousNotConst->is(tok::identifier) &&
@@ -2263,15 +2632,17 @@ private:
return false;
// int a or auto a.
- if (PreviousNotConst->isOneOf(tok::identifier, tok::kw_auto))
+ if (PreviousNotConst->isOneOf(tok::identifier, tok::kw_auto) &&
+ PreviousNotConst->isNot(TT_StatementAttributeLikeMacro)) {
return true;
+ }
// *a or &a or &&a.
if (PreviousNotConst->is(TT_PointerOrReference))
return true;
// MyClass a;
- if (PreviousNotConst->isSimpleTypeSpecifier())
+ if (PreviousNotConst->isTypeName(LangOpts))
return true;
// type[] a in Java
@@ -2287,7 +2658,7 @@ private:
/// Determine whether '(' is starting a C++ cast.
bool lParenStartsCppCast(const FormatToken &Tok) {
// C-style casts are only used in C++.
- if (!Style.isCpp())
+ if (!IsCpp)
return false;
FormatToken *LeftOfParens = Tok.getPreviousNonComment();
@@ -2307,20 +2678,27 @@ private:
/// Determine whether ')' is ending a cast.
bool rParenEndsCast(const FormatToken &Tok) {
+ assert(Tok.is(tok::r_paren));
+
+ if (!Tok.MatchingParen || !Tok.Previous)
+ return false;
+
// C-style casts are only used in C++, C# and Java.
- if (!Style.isCSharp() && !Style.isCpp() &&
- Style.Language != FormatStyle::LK_Java) {
+ if (!IsCpp && !Style.isCSharp() && Style.Language != FormatStyle::LK_Java)
return false;
- }
+
+ const auto *LParen = Tok.MatchingParen;
+ const auto *BeforeRParen = Tok.Previous;
+ const auto *AfterRParen = Tok.Next;
// Empty parens aren't casts and there are no casts at the end of the line.
- if (Tok.Previous == Tok.MatchingParen || !Tok.Next || !Tok.MatchingParen)
+ if (BeforeRParen == LParen || !AfterRParen)
return false;
- if (Tok.MatchingParen->is(TT_OverloadedOperatorLParen))
+ if (LParen->is(TT_OverloadedOperatorLParen))
return false;
- FormatToken *LeftOfParens = Tok.MatchingParen->getPreviousNonComment();
+ auto *LeftOfParens = LParen->getPreviousNonComment();
if (LeftOfParens) {
// If there is a closing parenthesis left of the current
// parentheses, look past it as these might be chained casts.
@@ -2376,37 +2754,41 @@ private:
}
}
- if (Tok.Next->isOneOf(tok::question, tok::ampamp))
+ if (AfterRParen->is(tok::question) ||
+ (AfterRParen->is(tok::ampamp) && !BeforeRParen->isTypeName(LangOpts))) {
return false;
+ }
// `foreach((A a, B b) in someList)` should not be seen as a cast.
- if (Tok.Next->is(Keywords.kw_in) && Style.isCSharp())
+ if (AfterRParen->is(Keywords.kw_in) && Style.isCSharp())
return false;
// Functions which end with decorations like volatile, noexcept are unlikely
// to be casts.
- if (Tok.Next->isOneOf(tok::kw_noexcept, tok::kw_volatile, tok::kw_const,
- tok::kw_requires, tok::kw_throw, tok::arrow,
- Keywords.kw_override, Keywords.kw_final) ||
- isCppAttribute(Style.isCpp(), *Tok.Next)) {
+ if (AfterRParen->isOneOf(tok::kw_noexcept, tok::kw_volatile, tok::kw_const,
+ tok::kw_requires, tok::kw_throw, tok::arrow,
+ Keywords.kw_override, Keywords.kw_final) ||
+ isCppAttribute(IsCpp, *AfterRParen)) {
return false;
}
// As Java has no function types, a "(" after the ")" likely means that this
// is a cast.
- if (Style.Language == FormatStyle::LK_Java && Tok.Next->is(tok::l_paren))
+ if (Style.Language == FormatStyle::LK_Java && AfterRParen->is(tok::l_paren))
return true;
// If a (non-string) literal follows, this is likely a cast.
- if (Tok.Next->isOneOf(tok::kw_sizeof, tok::kw_alignof) ||
- (Tok.Next->Tok.isLiteral() && Tok.Next->isNot(tok::string_literal))) {
+ if (AfterRParen->isOneOf(tok::kw_sizeof, tok::kw_alignof) ||
+ (AfterRParen->Tok.isLiteral() &&
+ AfterRParen->isNot(tok::string_literal))) {
return true;
}
// Heuristically try to determine whether the parentheses contain a type.
- auto IsQualifiedPointerOrReference = [](FormatToken *T) {
+ auto IsQualifiedPointerOrReference = [](const FormatToken *T,
+ const LangOptions &LangOpts) {
// This is used to handle cases such as x = (foo *const)&y;
- assert(!T->isSimpleTypeSpecifier() && "Should have already been checked");
+ assert(!T->isTypeName(LangOpts) && "Should have already been checked");
// Strip trailing qualifiers such as const or volatile when checking
// whether the parens could be a cast to a pointer/reference type.
while (T) {
@@ -2436,12 +2818,11 @@ private:
return T && T->is(TT_PointerOrReference);
};
bool ParensAreType =
- !Tok.Previous ||
- Tok.Previous->isOneOf(TT_TemplateCloser, TT_TypeDeclarationParen) ||
- Tok.Previous->isSimpleTypeSpecifier() ||
- IsQualifiedPointerOrReference(Tok.Previous);
+ BeforeRParen->isOneOf(TT_TemplateCloser, TT_TypeDeclarationParen) ||
+ BeforeRParen->isTypeName(LangOpts) ||
+ IsQualifiedPointerOrReference(BeforeRParen, LangOpts);
bool ParensCouldEndDecl =
- Tok.Next->isOneOf(tok::equal, tok::semi, tok::l_brace, tok::greater);
+ AfterRParen->isOneOf(tok::equal, tok::semi, tok::l_brace, tok::greater);
if (ParensAreType && !ParensCouldEndDecl)
return true;
@@ -2453,49 +2834,65 @@ private:
// Certain token types inside the parentheses mean that this can't be a
// cast.
- for (const FormatToken *Token = Tok.MatchingParen->Next; Token != &Tok;
- Token = Token->Next) {
+ for (const auto *Token = LParen->Next; Token != &Tok; Token = Token->Next)
if (Token->is(TT_BinaryOperator))
return false;
- }
// If the following token is an identifier or 'this', this is a cast. All
// cases where this can be something else are handled above.
- if (Tok.Next->isOneOf(tok::identifier, tok::kw_this))
+ if (AfterRParen->isOneOf(tok::identifier, tok::kw_this))
return true;
// Look for a cast `( x ) (`.
- if (Tok.Next->is(tok::l_paren) && Tok.Previous && Tok.Previous->Previous) {
- if (Tok.Previous->is(tok::identifier) &&
- Tok.Previous->Previous->is(tok::l_paren)) {
+ if (AfterRParen->is(tok::l_paren) && BeforeRParen->Previous) {
+ if (BeforeRParen->is(tok::identifier) &&
+ BeforeRParen->Previous->is(tok::l_paren)) {
return true;
}
}
- if (!Tok.Next->Next)
+ if (!AfterRParen->Next)
return false;
+ if (AfterRParen->is(tok::l_brace) &&
+ AfterRParen->getBlockKind() == BK_BracedInit) {
+ return true;
+ }
+
// If the next token after the parenthesis is a unary operator, assume
// that this is cast, unless there are unexpected tokens inside the
// parenthesis.
- const bool NextIsAmpOrStar = Tok.Next->isOneOf(tok::amp, tok::star);
- if (!(Tok.Next->isUnaryOperator() || NextIsAmpOrStar) ||
- Tok.Next->is(tok::plus) ||
- !Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant)) {
+ const bool NextIsAmpOrStar = AfterRParen->isOneOf(tok::amp, tok::star);
+ if (!(AfterRParen->isUnaryOperator() || NextIsAmpOrStar) ||
+ AfterRParen->is(tok::plus) ||
+ !AfterRParen->Next->isOneOf(tok::identifier, tok::numeric_constant)) {
return false;
}
+
if (NextIsAmpOrStar &&
- (Tok.Next->Next->is(tok::numeric_constant) || Line.InPPDirective)) {
+ (AfterRParen->Next->is(tok::numeric_constant) || Line.InPPDirective)) {
return false;
}
- if (Line.InPPDirective && Tok.Next->is(tok::minus))
+
+ if (Line.InPPDirective && AfterRParen->is(tok::minus))
return false;
+
// Search for unexpected tokens.
- for (FormatToken *Prev = Tok.Previous; Prev != Tok.MatchingParen;
- Prev = Prev->Previous) {
+ for (auto *Prev = BeforeRParen; Prev != LParen; Prev = Prev->Previous) {
+ if (Prev->is(tok::r_paren)) {
+ if (Prev->is(TT_CastRParen))
+ return false;
+ Prev = Prev->MatchingParen;
+ if (!Prev)
+ return false;
+ if (Prev->is(TT_FunctionTypeLParen))
+ break;
+ continue;
+ }
if (!Prev->isOneOf(tok::kw_const, tok::identifier, tok::coloncolon))
return false;
}
+
return true;
}
@@ -2562,6 +2959,8 @@ private:
return TT_UnaryOperator;
if (PrevToken->is(TT_TypeName))
return TT_PointerOrReference;
+ if (PrevToken->isOneOf(tok::kw_new, tok::kw_delete) && Tok.is(tok::ampamp))
+ return TT_BinaryOperator;
const FormatToken *NextToken = Tok.getNextNonComment();
@@ -2709,6 +3108,8 @@ private:
AnnotatedLine &Line;
FormatToken *CurrentToken;
bool AutoFound;
+ bool IsCpp;
+ LangOptions LangOpts;
const AdditionalKeywords &Keywords;
SmallVector<ScopeType> &Scopes;
@@ -2718,6 +3119,8 @@ private:
// same decision irrespective of the decisions for tokens leading up to it.
// Store this information to prevent this from causing exponential runtime.
llvm::SmallPtrSet<FormatToken *, 16> NonTemplateLess;
+
+ int TemplateDeclarationDepth;
};
static const int PrecedenceUnaryOperator = prec::PointerToMember + 1;
@@ -2881,7 +3284,7 @@ private:
}
if (Current->is(TT_JsComputedPropertyName))
return prec::Assignment;
- if (Current->is(TT_TrailingReturnArrow))
+ if (Current->is(TT_LambdaArrow))
return prec::Comma;
if (Current->is(TT_FatArrow))
return prec::Assignment;
@@ -3055,7 +3458,8 @@ private:
} else {
break;
}
- } else if (Tok->is(tok::hash)) {
+ } else if (Tok->is(Keywords.kw_verilogHash)) {
+ // Delay control.
if (Next->is(tok::l_paren))
Next = Next->MatchingParen;
if (Next)
@@ -3164,7 +3568,8 @@ static unsigned maxNestingDepth(const AnnotatedLine &Line) {
// Returns the name of a function with no return type, e.g. a constructor or
// destructor.
-static FormatToken *getFunctionName(const AnnotatedLine &Line) {
+static FormatToken *getFunctionName(const AnnotatedLine &Line,
+ FormatToken *&OpeningParen) {
for (FormatToken *Tok = Line.getFirstNonComment(), *Name = nullptr; Tok;
Tok = Tok->getNextNonComment()) {
// Skip C++11 attributes both before and after the function name.
@@ -3177,10 +3582,12 @@ static FormatToken *getFunctionName(const AnnotatedLine &Line) {
// Make sure the name is followed by a pair of parentheses.
if (Name) {
- return Tok->is(tok::l_paren) && Tok->isNot(TT_FunctionTypeLParen) &&
- Tok->MatchingParen
- ? Name
- : nullptr;
+ if (Tok->is(tok::l_paren) && Tok->isNot(TT_FunctionTypeLParen) &&
+ Tok->MatchingParen) {
+ OpeningParen = Tok;
+ return Name;
+ }
+ return nullptr;
}
// Skip keywords that may precede the constructor/destructor name.
@@ -3256,11 +3663,14 @@ void TokenAnnotator::annotate(AnnotatedLine &Line) {
ExpressionParser ExprParser(Style, Keywords, Line);
ExprParser.parse();
- if (Style.isCpp()) {
- auto *Tok = getFunctionName(Line);
+ if (IsCpp) {
+ FormatToken *OpeningParen = nullptr;
+ auto *Tok = getFunctionName(Line, OpeningParen);
if (Tok && ((!Scopes.empty() && Scopes.back() == ST_Class) ||
Line.endsWith(TT_FunctionLBrace) || isCtorOrDtorName(Tok))) {
Tok->setFinalizedType(TT_CtorDtorDeclName);
+ assert(OpeningParen);
+ OpeningParen->setFinalizedType(TT_FunctionDeclarationLParen);
}
}
@@ -3274,16 +3684,12 @@ void TokenAnnotator::annotate(AnnotatedLine &Line) {
auto *First = Line.First;
First->SpacesRequiredBefore = 1;
First->CanBreakBefore = First->MustBreakBefore;
-
- if (First->is(tok::eof) && First->NewlinesBefore == 0 &&
- Style.InsertNewlineAtEOF) {
- First->NewlinesBefore = 1;
- }
}
// This function heuristically determines whether 'Current' starts the name of a
// function declaration.
-static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
+static bool isFunctionDeclarationName(const LangOptions &LangOpts,
+ const FormatToken &Current,
const AnnotatedLine &Line,
FormatToken *&ClosingParen) {
assert(Current.Previous);
@@ -3294,7 +3700,15 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
if (!Current.Tok.getIdentifierInfo())
return false;
- auto skipOperatorName = [](const FormatToken *Next) -> const FormatToken * {
+ const auto &Previous = *Current.Previous;
+
+ if (const auto *PrevPrev = Previous.Previous;
+ PrevPrev && PrevPrev->is(TT_ObjCDecl)) {
+ return false;
+ }
+
+ auto skipOperatorName =
+ [&LangOpts](const FormatToken *Next) -> const FormatToken * {
for (; Next; Next = Next->Next) {
if (Next->is(TT_OverloadedOperatorLParen))
return Next;
@@ -3313,7 +3727,7 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
Next = Next->Next;
continue;
}
- if ((Next->isSimpleTypeSpecifier() || Next->is(tok::identifier)) &&
+ if ((Next->isTypeName(LangOpts) || Next->is(tok::identifier)) &&
Next->Next && Next->Next->isPointerOrReference()) {
// For operator void*(), operator char*(), operator Foo*().
Next = Next->Next;
@@ -3329,21 +3743,22 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
return nullptr;
};
+ const auto *Next = Current.Next;
+ const bool IsCpp = LangOpts.CXXOperatorNames;
+
// Find parentheses of parameter list.
- const FormatToken *Next = Current.Next;
if (Current.is(tok::kw_operator)) {
- const auto *Previous = Current.Previous;
- if (Previous->Tok.getIdentifierInfo() &&
- !Previous->isOneOf(tok::kw_return, tok::kw_co_return)) {
+ if (Previous.Tok.getIdentifierInfo() &&
+ !Previous.isOneOf(tok::kw_return, tok::kw_co_return)) {
return true;
}
- if (Previous->is(tok::r_paren) && Previous->is(TT_TypeDeclarationParen)) {
- assert(Previous->MatchingParen);
- assert(Previous->MatchingParen->is(tok::l_paren));
- assert(Previous->MatchingParen->is(TT_TypeDeclarationParen));
+ if (Previous.is(tok::r_paren) && Previous.is(TT_TypeDeclarationParen)) {
+ assert(Previous.MatchingParen);
+ assert(Previous.MatchingParen->is(tok::l_paren));
+ assert(Previous.MatchingParen->is(TT_TypeDeclarationParen));
return true;
}
- if (!Previous->isPointerOrReference() && Previous->isNot(TT_TemplateCloser))
+ if (!Previous.isPointerOrReference() && Previous.isNot(TT_TemplateCloser))
return false;
Next = skipOperatorName(Next);
} else {
@@ -3411,9 +3826,8 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
Tok = Tok->MatchingParen;
continue;
}
- if (Tok->is(tok::kw_const) || Tok->isSimpleTypeSpecifier() ||
- Tok->isOneOf(TT_PointerOrReference, TT_StartOfName, tok::ellipsis,
- TT_TypeName)) {
+ if (Tok->is(tok::kw_const) || Tok->isTypeName(LangOpts) ||
+ Tok->isOneOf(TT_PointerOrReference, TT_StartOfName, tok::ellipsis)) {
return true;
}
if (Tok->isOneOf(tok::l_brace, TT_ObjCMethodExpr) || Tok->Tok.isLiteral())
@@ -3425,15 +3839,16 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
bool TokenAnnotator::mustBreakForReturnType(const AnnotatedLine &Line) const {
assert(Line.MightBeFunctionDecl);
- if ((Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_TopLevel ||
- Style.AlwaysBreakAfterReturnType ==
- FormatStyle::RTBS_TopLevelDefinitions) &&
+ if ((Style.BreakAfterReturnType == FormatStyle::RTBS_TopLevel ||
+ Style.BreakAfterReturnType == FormatStyle::RTBS_TopLevelDefinitions) &&
Line.Level > 0) {
return false;
}
- switch (Style.AlwaysBreakAfterReturnType) {
+ switch (Style.BreakAfterReturnType) {
case FormatStyle::RTBS_None:
+ case FormatStyle::RTBS_Automatic:
+ case FormatStyle::RTBS_ExceptShortType:
return false;
case FormatStyle::RTBS_All:
case FormatStyle::RTBS_TopLevel:
@@ -3462,7 +3877,6 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
if (AlignArrayOfStructures)
calculateArrayInitializerColumnList(Line);
- const bool IsCpp = Style.isCpp();
bool SeenName = false;
bool LineIsFunctionDeclaration = false;
FormatToken *ClosingParen = nullptr;
@@ -3475,11 +3889,17 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
AfterLastAttribute = Tok;
if (const bool IsCtorOrDtor = Tok->is(TT_CtorDtorDeclName);
IsCtorOrDtor ||
- isFunctionDeclarationName(Style.isCpp(), *Tok, Line, ClosingParen)) {
+ isFunctionDeclarationName(LangOpts, *Tok, Line, ClosingParen)) {
if (!IsCtorOrDtor)
Tok->setFinalizedType(TT_FunctionDeclarationName);
LineIsFunctionDeclaration = true;
SeenName = true;
+ if (ClosingParen) {
+ auto *OpeningParen = ClosingParen->MatchingParen;
+ assert(OpeningParen);
+ if (OpeningParen->is(TT_Unknown))
+ OpeningParen->setType(TT_FunctionDeclarationLParen);
+ }
break;
}
}
@@ -3513,7 +3933,7 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
do {
Tok = Tok->Next;
} while (Tok && Tok->isNot(TT_OverloadedOperatorLParen));
- if (!Tok)
+ if (!Tok || !Tok->MatchingParen)
break;
const auto *LeftParen = Tok;
for (Tok = Tok->Next; Tok && Tok != LeftParen->MatchingParen;
@@ -3532,6 +3952,8 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
}
} else if (ClosingParen) {
for (auto *Tok = ClosingParen->Next; Tok; Tok = Tok->Next) {
+ if (Tok->is(TT_CtorInitializerColon))
+ break;
if (Tok->is(tok::arrow)) {
Tok->setType(TT_TrailingReturnArrow);
break;
@@ -3781,7 +4203,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
}
if (Right.is(TT_PointerOrReference))
return 190;
- if (Right.is(TT_TrailingReturnArrow))
+ if (Right.is(TT_LambdaArrow))
return 110;
if (Left.is(tok::equal) && Right.is(tok::l_brace))
return 160;
@@ -3966,11 +4388,24 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return Left.is(tok::hash);
if (Left.isOneOf(tok::hashhash, tok::hash))
return Right.is(tok::hash);
+ if (Left.is(BK_Block) && Right.is(tok::r_brace) &&
+ Right.MatchingParen == &Left && Line.Children.empty()) {
+ return Style.SpaceInEmptyBlock;
+ }
if ((Left.is(tok::l_paren) && Right.is(tok::r_paren)) ||
(Left.is(tok::l_brace) && Left.isNot(BK_Block) &&
Right.is(tok::r_brace) && Right.isNot(BK_Block))) {
return Style.SpacesInParensOptions.InEmptyParentheses;
}
+ if (Style.SpacesInParens == FormatStyle::SIPO_Custom &&
+ Style.SpacesInParensOptions.ExceptDoubleParentheses &&
+ Left.is(tok::r_paren) && Right.is(tok::r_paren)) {
+ auto *InnerLParen = Left.MatchingParen;
+ if (InnerLParen && InnerLParen->Previous == Right.MatchingParen) {
+ InnerLParen->SpacesRequiredBefore = 0;
+ return false;
+ }
+ }
if (Style.SpacesInParensOptions.InConditionalStatements) {
const FormatToken *LeftParen = nullptr;
if (Left.is(tok::l_paren))
@@ -3996,9 +4431,11 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.is(tok::kw_auto) && Right.isOneOf(tok::l_paren, tok::l_brace))
return false;
+ const auto *BeforeLeft = Left.Previous;
+
// operator co_await(x)
- if (Right.is(tok::l_paren) && Left.is(tok::kw_co_await) && Left.Previous &&
- Left.Previous->is(tok::kw_operator)) {
+ if (Right.is(tok::l_paren) && Left.is(tok::kw_co_await) && BeforeLeft &&
+ BeforeLeft->is(tok::kw_operator)) {
return false;
}
// co_await (x), co_yield (x), co_return (x)
@@ -4049,8 +4486,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return false;
}
if (Right.is(tok::ellipsis)) {
- return Left.Tok.isLiteral() || (Left.is(tok::identifier) && Left.Previous &&
- Left.Previous->is(tok::kw_case));
+ return Left.Tok.isLiteral() || (Left.is(tok::identifier) && BeforeLeft &&
+ BeforeLeft->is(tok::kw_case));
}
if (Left.is(tok::l_square) && Right.is(tok::amp))
return Style.SpacesInSquareBrackets;
@@ -4075,7 +4512,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.Tok.isLiteral())
return true;
// for (auto a = 0, b = 0; const auto & c : {1, 2, 3})
- if (Left.isTypeOrIdentifier() && Right.Next && Right.Next->Next &&
+ if (Left.isTypeOrIdentifier(LangOpts) && Right.Next && Right.Next->Next &&
Right.Next->Next->is(TT_RangeBasedForLoopColon)) {
return getTokenPointerOrReferenceAlignment(Right) !=
FormatStyle::PAS_Left;
@@ -4118,7 +4555,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Right.is(tok::l_brace) && Right.is(BK_Block))
return true;
// for (auto a = 0, b = 0; const auto& c : {1, 2, 3})
- if (Left.Previous && Left.Previous->isTypeOrIdentifier() && Right.Next &&
+ if (BeforeLeft && BeforeLeft->isTypeOrIdentifier(LangOpts) && Right.Next &&
Right.Next->is(TT_RangeBasedForLoopColon)) {
return getTokenPointerOrReferenceAlignment(Left) !=
FormatStyle::PAS_Right;
@@ -4141,12 +4578,23 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
startsWithInitStatement(Line)))) {
return false;
}
- return Left.Previous && !Left.Previous->isOneOf(
- tok::l_paren, tok::coloncolon, tok::l_square);
+ if (!BeforeLeft)
+ return false;
+ if (BeforeLeft->is(tok::coloncolon)) {
+ if (Left.isNot(tok::star))
+ return false;
+ assert(Style.PointerAlignment != FormatStyle::PAS_Right);
+ if (!Right.startsSequence(tok::identifier, tok::r_paren))
+ return true;
+ assert(Right.Next);
+ const auto *LParen = Right.Next->MatchingParen;
+ return !LParen || LParen->isNot(TT_FunctionTypeLParen);
+ }
+ return !BeforeLeft->isOneOf(tok::l_paren, tok::l_square);
}
// Ensure right pointer alignment with ellipsis e.g. int *...P
- if (Left.is(tok::ellipsis) && Left.Previous &&
- Left.Previous->isPointerOrReference()) {
+ if (Left.is(tok::ellipsis) && BeforeLeft &&
+ BeforeLeft->isPointerOrReference()) {
return Style.PointerAlignment != FormatStyle::PAS_Right;
}
@@ -4157,7 +4605,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Right.isPointerOrReference()) {
const FormatToken *Previous = &Left;
while (Previous && Previous->isNot(tok::kw_operator)) {
- if (Previous->is(tok::identifier) || Previous->isSimpleTypeSpecifier()) {
+ if (Previous->is(tok::identifier) || Previous->isTypeName(LangOpts)) {
Previous = Previous->getPreviousNonComment();
continue;
}
@@ -4293,14 +4741,13 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Right.is(TT_OverloadedOperatorLParen))
return spaceRequiredBeforeParens(Right);
// Function declaration or definition
- if (Line.MightBeFunctionDecl && (Left.is(TT_FunctionDeclarationName))) {
- if (Line.mightBeFunctionDefinition()) {
- return Style.SpaceBeforeParensOptions.AfterFunctionDefinitionName ||
- spaceRequiredBeforeParens(Right);
- } else {
- return Style.SpaceBeforeParensOptions.AfterFunctionDeclarationName ||
- spaceRequiredBeforeParens(Right);
- }
+ if (Line.MightBeFunctionDecl && Right.is(TT_FunctionDeclarationLParen)) {
+ if (spaceRequiredBeforeParens(Right))
+ return true;
+ const auto &Options = Style.SpaceBeforeParensOptions;
+ return Line.mightBeFunctionDefinition()
+ ? Options.AfterFunctionDefinitionName
+ : Options.AfterFunctionDeclarationName;
}
// Lambda
if (Line.Type != LT_PreprocessorDirective && Left.is(tok::r_square) &&
@@ -4308,13 +4755,13 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return Style.SpaceBeforeParensOptions.AfterFunctionDefinitionName ||
spaceRequiredBeforeParens(Right);
}
- if (!Left.Previous || Left.Previous->isNot(tok::period)) {
+ if (!BeforeLeft || !BeforeLeft->isOneOf(tok::period, tok::arrow)) {
if (Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch)) {
return Style.SpaceBeforeParensOptions.AfterControlStatements ||
spaceRequiredBeforeParens(Right);
}
if (Left.isOneOf(tok::kw_new, tok::kw_delete)) {
- return ((!Line.MightBeFunctionDecl || !Left.Previous) &&
+ return ((!Line.MightBeFunctionDecl || !BeforeLeft) &&
Style.SpaceBeforeParens != FormatStyle::SBPO_Never) ||
spaceRequiredBeforeParens(Right);
}
@@ -4346,7 +4793,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (!Style.isVerilog() &&
(Left.isOneOf(tok::identifier, tok::greater, tok::r_square,
tok::r_paren) ||
- Left.isSimpleTypeSpecifier()) &&
+ Left.isTypeName(LangOpts)) &&
Right.is(tok::l_brace) && Right.getNextNonComment() &&
Right.isNot(BK_Block)) {
return false;
@@ -4382,8 +4829,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// Objective-C dictionary literal -> no space before closing brace.
return false;
}
- if (Right.getType() == TT_TrailingAnnotation &&
- Right.isOneOf(tok::amp, tok::ampamp) &&
+ if (Right.is(TT_TrailingAnnotation) && Right.isOneOf(tok::amp, tok::ampamp) &&
Left.isOneOf(tok::kw_const, tok::kw_volatile) &&
(!Right.Next || Right.Next->is(tok::semi))) {
// Match const and volatile ref-qualifiers without any additional
@@ -4404,16 +4850,21 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.Finalized)
return Right.hasWhitespaceBefore();
+ const bool IsVerilog = Style.isVerilog();
+ assert(!IsVerilog || !IsCpp);
+
// Never ever merge two words.
- if (Keywords.isWordLike(Right) && Keywords.isWordLike(Left))
+ if (Keywords.isWordLike(Right, IsVerilog) &&
+ Keywords.isWordLike(Left, IsVerilog)) {
return true;
+ }
// Leave a space between * and /* to avoid C4138 `comment end` found outside
// of comment.
if (Left.is(tok::star) && Right.is(tok::comment))
return true;
- if (Style.isCpp()) {
+ if (IsCpp) {
if (Left.is(TT_OverloadedOperator) &&
Right.isOneOf(TT_TemplateOpener, TT_TemplateCloser)) {
return true;
@@ -4458,6 +4909,12 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Right.is(TT_TemplateOpener)) {
return true;
}
+ // C++ Core Guidelines suppression tag, e.g. `[[suppress(type.5)]]`.
+ if (Left.is(tok::identifier) && Right.is(tok::numeric_constant))
+ return Right.TokenText[0] != '.';
+ // `Left` is a keyword (including C++ alternative operator) or identifier.
+ if (Left.Tok.getIdentifierInfo() && Right.Tok.isLiteral())
+ return true;
} else if (Style.isProto()) {
if (Right.is(tok::period) &&
Left.isOneOf(Keywords.kw_optional, Keywords.kw_required,
@@ -4555,11 +5012,11 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// space between method modifier and opening parenthesis of a tuple return
// type
- if (Left.isOneOf(tok::kw_public, tok::kw_private, tok::kw_protected,
- tok::kw_virtual, tok::kw_extern, tok::kw_static,
- Keywords.kw_internal, Keywords.kw_abstract,
- Keywords.kw_sealed, Keywords.kw_override,
- Keywords.kw_async, Keywords.kw_unsafe) &&
+ if ((Left.isAccessSpecifierKeyword() ||
+ Left.isOneOf(tok::kw_virtual, tok::kw_extern, tok::kw_static,
+ Keywords.kw_internal, Keywords.kw_abstract,
+ Keywords.kw_sealed, Keywords.kw_override,
+ Keywords.kw_async, Keywords.kw_unsafe)) &&
Right.is(tok::l_paren)) {
return true;
}
@@ -4585,7 +5042,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
}
// In tagged template literals ("html`bar baz`"), there is no space between
// the tag identifier and the template string.
- if (Keywords.IsJavaScriptIdentifier(Left,
+ if (Keywords.isJavaScriptIdentifier(Left,
/* AcceptIdentifierName= */ false) &&
Right.is(TT_TemplateString)) {
return false;
@@ -4668,6 +5125,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true; // "x! as string", "x! in y"
}
} else if (Style.Language == FormatStyle::LK_Java) {
+ if (Left.is(TT_CaseLabelArrow) || Right.is(TT_CaseLabelArrow))
+ return true;
if (Left.is(tok::r_square) && Right.is(tok::l_brace))
return true;
// spaces inside square brackets.
@@ -4678,19 +5137,16 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return Style.SpaceBeforeParensOptions.AfterControlStatements ||
spaceRequiredBeforeParens(Right);
}
- if ((Left.isOneOf(tok::kw_static, tok::kw_public, tok::kw_private,
- tok::kw_protected) ||
- Left.isOneOf(Keywords.kw_final, Keywords.kw_abstract,
+ if ((Left.isAccessSpecifierKeyword() ||
+ Left.isOneOf(tok::kw_static, Keywords.kw_final, Keywords.kw_abstract,
Keywords.kw_native)) &&
Right.is(TT_TemplateOpener)) {
return true;
}
- } else if (Style.isVerilog()) {
+ } else if (IsVerilog) {
// An escaped identifier ends with whitespace.
- if (Style.isVerilog() && Left.is(tok::identifier) &&
- Left.TokenText[0] == '\\') {
+ if (Left.is(tok::identifier) && Left.TokenText[0] == '\\')
return true;
- }
// Add space between things in a primitive's state table unless in a
// transition like `(0?)`.
if ((Left.is(TT_VerilogTableItem) &&
@@ -4768,7 +5224,45 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Left.endsSequence(tok::greatergreater, tok::l_brace))) {
return false;
}
+ } else if (Style.isTableGen()) {
+ // Avoid to connect [ and {. [{ is start token of multiline string.
+ if (Left.is(tok::l_square) && Right.is(tok::l_brace))
+ return true;
+ if (Left.is(tok::r_brace) && Right.is(tok::r_square))
+ return true;
+ // Do not insert around colon in DAGArg and cond operator.
+ if (Right.isOneOf(TT_TableGenDAGArgListColon,
+ TT_TableGenDAGArgListColonToAlign) ||
+ Left.isOneOf(TT_TableGenDAGArgListColon,
+ TT_TableGenDAGArgListColonToAlign)) {
+ return false;
+ }
+ if (Right.is(TT_TableGenCondOperatorColon))
+ return false;
+ if (Left.isOneOf(TT_TableGenDAGArgOperatorID,
+ TT_TableGenDAGArgOperatorToBreak) &&
+ Right.isNot(TT_TableGenDAGArgCloser)) {
+ return true;
+ }
+ // Do not insert bang operators and consequent openers.
+ if (Right.isOneOf(tok::l_paren, tok::less) &&
+ Left.isOneOf(TT_TableGenBangOperator, TT_TableGenCondOperator)) {
+ return false;
+ }
+ // Trailing paste requires space before '{' or ':', the case in name values.
+ // Not before ';', the case in normal values.
+ if (Left.is(TT_TableGenTrailingPasteOperator) &&
+ Right.isOneOf(tok::l_brace, tok::colon)) {
+ return true;
+ }
+ // Otherwise paste operator does not prefer space around.
+ if (Left.is(tok::hash) || Right.is(tok::hash))
+ return false;
+ // Sure not to connect after defining keywords.
+ if (Keywords.isTableGenDefinition(Left))
+ return true;
}
+
if (Left.is(TT_ImplicitStringLiteral))
return Right.hasWhitespaceBefore();
if (Line.Type == LT_ObjCMethodDecl) {
@@ -4787,9 +5281,10 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return false;
}
- if (Right.is(TT_TrailingReturnArrow) || Left.is(TT_TrailingReturnArrow))
+ if (Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow) ||
+ Left.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow)) {
return true;
-
+ }
if (Left.is(tok::comma) && Right.isNot(TT_OverloadedOperatorLParen) &&
// In an unexpanded macro call we only find the parentheses and commas
// in a line; the commas and closing parenthesis do not require a space.
@@ -4848,21 +5343,11 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
}
if (Left.is(TT_UnaryOperator)) {
- if (Right.isNot(tok::l_paren)) {
- // The alternative operators for ~ and ! are "compl" and "not".
- // If they are used instead, we do not want to combine them with
- // the token to the right, unless that is a left paren.
- if (Left.is(tok::exclaim) && Left.TokenText == "not")
- return true;
- if (Left.is(tok::tilde) && Left.TokenText == "compl")
- return true;
- // Lambda captures allow for a lone &, so "&]" needs to be properly
- // handled.
- if (Left.is(tok::amp) && Right.is(tok::r_square))
- return Style.SpacesInSquareBrackets;
- }
- return (Style.SpaceAfterLogicalNot && Left.is(tok::exclaim)) ||
- Right.is(TT_BinaryOperator);
+ // Lambda captures allow for a lone &, so "&]" needs to be properly
+ // handled.
+ if (Left.is(tok::amp) && Right.is(tok::r_square))
+ return Style.SpacesInSquareBrackets;
+ return Style.SpaceAfterLogicalNot && Left.is(tok::exclaim);
}
// If the next token is a binary operator or a selector name, we have
@@ -4981,6 +5466,15 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Right.NewlinesBefore > 1 && Style.MaxEmptyLinesToKeep > 0)
return true;
+ if (Style.BreakFunctionDefinitionParameters && Line.MightBeFunctionDecl &&
+ Line.mightBeFunctionDefinition() && Left.MightBeFunctionDeclParen &&
+ Left.ParameterCount > 0) {
+ return true;
+ }
+
+ const auto *BeforeLeft = Left.Previous;
+ const auto *AfterRight = Right.Next;
+
if (Style.isCSharp()) {
if (Left.is(TT_FatArrow) && Right.is(tok::l_brace) &&
Style.BraceWrapping.AfterFunction) {
@@ -4992,7 +5486,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
if (Right.is(TT_CSharpGenericTypeConstraint))
return true;
- if (Right.Next && Right.Next->is(TT_FatArrow) &&
+ if (AfterRight && AfterRight->is(TT_FatArrow) &&
(Right.is(tok::numeric_constant) ||
(Right.is(tok::identifier) && Right.TokenText == "_"))) {
return true;
@@ -5009,15 +5503,14 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
Left.is(tok::r_square) && Right.is(tok::l_square)) {
return true;
}
-
} else if (Style.isJavaScript()) {
// FIXME: This might apply to other languages and token kinds.
- if (Right.is(tok::string_literal) && Left.is(tok::plus) && Left.Previous &&
- Left.Previous->is(tok::string_literal)) {
+ if (Right.is(tok::string_literal) && Left.is(tok::plus) && BeforeLeft &&
+ BeforeLeft->is(tok::string_literal)) {
return true;
}
if (Left.is(TT_DictLiteral) && Left.is(tok::l_brace) && Line.Level == 0 &&
- Left.Previous && Left.Previous->is(tok::equal) &&
+ BeforeLeft && BeforeLeft->is(tok::equal) &&
Line.First->isOneOf(tok::identifier, Keywords.kw_import, tok::kw_export,
tok::kw_const) &&
// kw_var/kw_let are pseudo-tokens that are tok::identifier, so match
@@ -5036,8 +5529,8 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// instead of bin-packing.
return true;
}
- if (Right.is(tok::r_brace) && Left.is(tok::l_brace) && Left.Previous &&
- Left.Previous->is(TT_FatArrow)) {
+ if (Right.is(tok::r_brace) && Left.is(tok::l_brace) && BeforeLeft &&
+ BeforeLeft->is(TT_FatArrow)) {
// JS arrow function (=> {...}).
switch (Style.AllowShortLambdasOnASingleLine) {
case FormatStyle::SLS_All:
@@ -5065,8 +5558,8 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
FormatStyle::SFS_InlineOnly);
}
} else if (Style.Language == FormatStyle::LK_Java) {
- if (Right.is(tok::plus) && Left.is(tok::string_literal) && Right.Next &&
- Right.Next->is(tok::string_literal)) {
+ if (Right.is(tok::plus) && Left.is(tok::string_literal) && AfterRight &&
+ AfterRight->is(tok::string_literal)) {
return true;
}
} else if (Style.isVerilog()) {
@@ -5090,7 +5583,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (!Keywords.isVerilogBegin(Right) && Keywords.isVerilogEndOfLabel(Left))
return true;
} else if (Style.BreakAdjacentStringLiterals &&
- (Style.isCpp() || Style.isProto() ||
+ (IsCpp || Style.isProto() ||
Style.Language == FormatStyle::LK_TableGen)) {
if (Left.isStringLiteral() && Right.isStringLiteral())
return true;
@@ -5119,6 +5612,24 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
return Style.BreakArrays;
}
+ } else if (Style.isTableGen()) {
+ // Break the comma in side cond operators.
+ // !cond(case1:1,
+ // case2:0);
+ if (Left.is(TT_TableGenCondOperatorComma))
+ return true;
+ if (Left.is(TT_TableGenDAGArgOperatorToBreak) &&
+ Right.isNot(TT_TableGenDAGArgCloser)) {
+ return true;
+ }
+ if (Left.is(TT_TableGenDAGArgListCommaToBreak))
+ return true;
+ if (Right.is(TT_TableGenDAGArgCloser) && Right.MatchingParen &&
+ Right.MatchingParen->is(TT_TableGenDAGArgOpenerToBreak) &&
+ &Left != Right.MatchingParen->Next) {
+ // Check to avoid empty DAGArg such as (ins).
+ return Style.TableGenBreakInsideDAGArg == FormatStyle::DAS_BreakAll;
+ }
}
if (Line.startsWith(tok::kw_asm) && Right.is(TT_InlineASMColon) &&
@@ -5157,14 +5668,13 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
if (Left.IsUnterminatedLiteral)
return true;
- // FIXME: Breaking after newlines seems useful in general. Turn this into an
- // option and recognize more cases like endl etc, and break independent of
- // what comes after operator lessless.
- if (Right.is(tok::lessless) && Right.Next &&
- Right.Next->is(tok::string_literal) && Left.is(tok::string_literal) &&
- Left.TokenText.ends_with("\\n\"")) {
- return true;
+
+ if (BeforeLeft && BeforeLeft->is(tok::lessless) &&
+ Left.is(tok::string_literal) && Right.is(tok::lessless) && AfterRight &&
+ AfterRight->is(tok::string_literal)) {
+ return Right.NewlinesBefore > 0;
}
+
if (Right.is(TT_RequiresClause)) {
switch (Style.RequiresClausePosition) {
case FormatStyle::RCPS_OwnLine:
@@ -5182,7 +5692,9 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// concept ...
if (Right.is(tok::kw_concept))
return Style.BreakBeforeConceptDeclarations == FormatStyle::BBCDS_Always;
- return Style.AlwaysBreakTemplateDeclarations == FormatStyle::BTDS_Yes;
+ return Style.BreakTemplateDeclarations == FormatStyle::BTDS_Yes ||
+ (Style.BreakTemplateDeclarations == FormatStyle::BTDS_Leave &&
+ Right.NewlinesBefore > 0);
}
if (Left.ClosesRequiresClause && Right.isNot(tok::semi)) {
switch (Style.RequiresClausePosition) {
@@ -5237,8 +5749,8 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// string literal accordingly. Thus, we try keep existing line breaks.
return Right.IsMultiline && Right.NewlinesBefore > 0;
}
- if ((Left.is(tok::l_brace) || (Left.is(tok::less) && Left.Previous &&
- Left.Previous->is(tok::equal))) &&
+ if ((Left.is(tok::l_brace) ||
+ (Left.is(tok::less) && BeforeLeft && BeforeLeft->is(tok::equal))) &&
Right.NestingLevel == 1 && Style.Language == FormatStyle::LK_Proto) {
// Don't put enums or option definitions onto single lines in protocol
// buffers.
@@ -5250,9 +5762,8 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (isAllmanBrace(Left) || isAllmanBrace(Right)) {
auto *FirstNonComment = Line.getFirstNonComment();
bool AccessSpecifier =
- FirstNonComment &&
- FirstNonComment->isOneOf(Keywords.kw_internal, tok::kw_public,
- tok::kw_private, tok::kw_protected);
+ FirstNonComment && (FirstNonComment->is(Keywords.kw_internal) ||
+ FirstNonComment->isAccessSpecifierKeyword());
if (Style.BraceWrapping.AfterEnum) {
if (Line.startsWith(tok::kw_enum) ||
@@ -5352,7 +5863,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
//
// We ensure elsewhere that extensions are always on their own line.
if (Style.isProto() && Right.is(TT_SelectorName) &&
- Right.isNot(tok::r_square) && Right.Next) {
+ Right.isNot(tok::r_square) && AfterRight) {
// Keep `@submessage` together in:
// @submessage { key: value }
if (Left.is(tok::at))
@@ -5361,7 +5872,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// selector { ...
// selector: { ...
// selector: @base { ...
- FormatToken *LBrace = Right.Next;
+ const auto *LBrace = AfterRight;
if (LBrace && LBrace->is(tok::colon)) {
LBrace = LBrace->Next;
if (LBrace && LBrace->is(tok::at)) {
@@ -5438,13 +5949,13 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
} else if (Style.isJavaScript()) {
const FormatToken *NonComment = Right.getPreviousNonComment();
if (NonComment &&
- NonComment->isOneOf(
- tok::kw_return, Keywords.kw_yield, tok::kw_continue, tok::kw_break,
- tok::kw_throw, Keywords.kw_interface, Keywords.kw_type,
- tok::kw_static, tok::kw_public, tok::kw_private, tok::kw_protected,
- Keywords.kw_readonly, Keywords.kw_override, Keywords.kw_abstract,
- Keywords.kw_get, Keywords.kw_set, Keywords.kw_async,
- Keywords.kw_await)) {
+ (NonComment->isAccessSpecifierKeyword() ||
+ NonComment->isOneOf(
+ tok::kw_return, Keywords.kw_yield, tok::kw_continue, tok::kw_break,
+ tok::kw_throw, Keywords.kw_interface, Keywords.kw_type,
+ tok::kw_static, Keywords.kw_readonly, Keywords.kw_override,
+ Keywords.kw_abstract, Keywords.kw_get, Keywords.kw_set,
+ Keywords.kw_async, Keywords.kw_await))) {
return false; // Otherwise automatic semicolon insertion would trigger.
}
if (Right.NestingLevel == 0 &&
@@ -5516,6 +6027,23 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return false;
if (Left.is(TT_TemplateString) && Left.opensScope())
return true;
+ } else if (Style.isTableGen()) {
+ // Avoid to break after "def", "class", "let" and so on.
+ if (Keywords.isTableGenDefinition(Left))
+ return false;
+ // Avoid to break after '(' in the cases that is in bang operators.
+ if (Right.is(tok::l_paren)) {
+ return !Left.isOneOf(TT_TableGenBangOperator, TT_TableGenCondOperator,
+ TT_TemplateCloser);
+ }
+ // Avoid to break between the value and its suffix part.
+ if (Left.is(TT_TableGenValueSuffix))
+ return false;
+ // Avoid to break around paste operator.
+ if (Left.is(tok::hash) || Right.is(tok::hash))
+ return false;
+ if (Left.isOneOf(TT_TableGenBangOperator, TT_TableGenCondOperator))
+ return false;
}
if (Left.is(tok::at))
@@ -5615,7 +6143,11 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return Style.BreakBeforeConceptDeclarations != FormatStyle::BBCDS_Never;
if (Right.is(TT_RequiresClause))
return true;
- if (Left.ClosesTemplateDeclaration || Left.is(TT_FunctionAnnotationRParen))
+ if (Left.ClosesTemplateDeclaration) {
+ return Style.BreakTemplateDeclarations != FormatStyle::BTDS_Leave ||
+ Right.NewlinesBefore > 0;
+ }
+ if (Left.is(TT_FunctionAnnotationRParen))
return true;
if (Left.ClosesRequiresClause)
return true;
@@ -5770,8 +6302,8 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return Left.isOneOf(tok::comma, tok::coloncolon, tok::semi, tok::l_brace,
tok::kw_class, tok::kw_struct, tok::comment) ||
Right.isMemberAccess() ||
- Right.isOneOf(TT_TrailingReturnArrow, tok::lessless, tok::colon,
- tok::l_square, tok::at) ||
+ Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow, tok::lessless,
+ tok::colon, tok::l_square, tok::at) ||
(Left.is(tok::r_paren) &&
Right.isOneOf(tok::identifier, tok::kw_const)) ||
(Left.is(tok::l_paren) && Right.isNot(tok::r_paren)) ||
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
index 05a6daa87d80..f4f2bba0eb21 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
@@ -16,13 +16,14 @@
#define LLVM_CLANG_LIB_FORMAT_TOKENANNOTATOR_H
#include "UnwrappedLineParser.h"
-#include "clang/Format/Format.h"
namespace clang {
namespace format {
enum LineType {
LT_Invalid,
+ // Contains public/private/protected followed by TT_InheritanceColon.
+ LT_AccessModifier,
LT_ImportStatement,
LT_ObjCDecl, // An @interface, @implementation, or @protocol line.
LT_ObjCMethodDecl,
@@ -46,7 +47,7 @@ enum ScopeType {
class AnnotatedLine {
public:
AnnotatedLine(const UnwrappedLine &Line)
- : First(Line.Tokens.front().Tok), Level(Line.Level),
+ : First(Line.Tokens.front().Tok), Type(LT_Other), Level(Line.Level),
PPLevel(Line.PPLevel),
MatchingOpeningBlockLineIndex(Line.MatchingOpeningBlockLineIndex),
MatchingClosingBlockLineIndex(Line.MatchingClosingBlockLineIndex),
@@ -212,7 +213,10 @@ private:
class TokenAnnotator {
public:
TokenAnnotator(const FormatStyle &Style, const AdditionalKeywords &Keywords)
- : Style(Style), Keywords(Keywords) {}
+ : Style(Style), IsCpp(Style.isCpp()),
+ LangOpts(getFormattingLangOpts(Style)), Keywords(Keywords) {
+ assert(IsCpp == LangOpts.CXXOperatorNames);
+ }
/// Adapts the indent levels of comment lines to the indent of the
/// subsequent line.
@@ -260,6 +264,9 @@ private:
const FormatStyle &Style;
+ bool IsCpp;
+ LangOptions LangOpts;
+
const AdditionalKeywords &Keywords;
SmallVector<ScopeType> Scopes;
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
index adeb07243487..1804c1437fd4 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -57,7 +57,7 @@ public:
/// Update the indent state given that \p Line is going to be formatted
/// next.
void nextLine(const AnnotatedLine &Line) {
- Offset = getIndentOffset(*Line.First);
+ Offset = getIndentOffset(Line);
// Update the indent level cache size so that we can rely on it
// having the right size in adjustToUnmodifiedline.
if (Line.Level >= IndentForLevel.size())
@@ -111,42 +111,41 @@ private:
///
/// For example, 'public:' labels in classes are offset by 1 or 2
/// characters to the left from their level.
- int getIndentOffset(const FormatToken &RootToken) {
+ int getIndentOffset(const AnnotatedLine &Line) {
if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
Style.isCSharp()) {
return 0;
}
- auto IsAccessModifier = [this, &RootToken]() {
- if (RootToken.isAccessSpecifier(Style.isCpp())) {
+ auto IsAccessModifier = [&](const FormatToken &RootToken) {
+ if (Line.Type == LT_AccessModifier || RootToken.isObjCAccessSpecifier())
return true;
- } else if (RootToken.isObjCAccessSpecifier()) {
- return true;
- }
+
+ const auto *Next = RootToken.Next;
+
// Handle Qt signals.
- else if (RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
- RootToken.Next && RootToken.Next->is(tok::colon)) {
- return true;
- } else if (RootToken.Next &&
- RootToken.Next->isOneOf(Keywords.kw_slots,
- Keywords.kw_qslots) &&
- RootToken.Next->Next && RootToken.Next->Next->is(tok::colon)) {
+ if (RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
+ Next && Next->is(tok::colon)) {
return true;
}
- // Handle malformed access specifier e.g. 'private' without trailing ':'.
- else if (!RootToken.Next && RootToken.isAccessSpecifier(false)) {
+
+ if (Next && Next->isOneOf(Keywords.kw_slots, Keywords.kw_qslots) &&
+ Next->Next && Next->Next->is(tok::colon)) {
return true;
}
- return false;
+
+ // Handle malformed access specifier e.g. 'private' without trailing ':'.
+ return !Next && RootToken.isAccessSpecifier(false);
};
- if (IsAccessModifier()) {
+ if (IsAccessModifier(*Line.First)) {
// The AccessModifierOffset may be overridden by IndentAccessModifiers,
// in which case we take a negative value of the IndentWidth to simulate
// the upper indent level.
return Style.IndentAccessModifiers ? -Style.IndentWidth
: Style.AccessModifierOffset;
}
+
return 0;
}
@@ -515,6 +514,12 @@ private:
}
}
+ if (TheLine->First->is(TT_SwitchExpressionLabel)) {
+ return Style.AllowShortCaseExpressionOnASingleLine
+ ? tryMergeShortCaseLabels(I, E, Limit)
+ : 0;
+ }
+
if (TheLine->Last->is(tok::l_brace)) {
bool ShouldMerge = false;
// Try to merge records.
@@ -796,8 +801,12 @@ private:
}
}
- if (const auto *LastNonComment = Line.getLastNonComment();
- LastNonComment && LastNonComment->is(tok::l_brace)) {
+ if (Line.endsWith(tok::l_brace)) {
+ if (Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never &&
+ Line.First->is(TT_BlockLBrace)) {
+ return 0;
+ }
+
if (IsSplitBlock && Line.First == Line.Last &&
I > AnnotatedLines.begin() &&
(I[-1]->endsWith(tok::kw_else) || IsCtrlStmt(*I[-1]))) {
@@ -1221,7 +1230,7 @@ private:
// While not empty, take first element and follow edges.
while (!Queue.empty()) {
// Quit if we still haven't found a solution by now.
- if (Count > 25000000)
+ if (Count > 25'000'000)
return 0;
Penalty = Queue.top().first.first;
@@ -1235,7 +1244,7 @@ private:
// Cut off the analysis of certain solutions if the analysis gets too
// complex. See description of IgnoreStackForComparison.
- if (Count > 50000)
+ if (Count > 50'000)
Node->State.IgnoreStackForComparison = true;
if (!Seen.insert(&Node->State).second) {
@@ -1469,11 +1478,13 @@ static auto computeNewlines(const AnnotatedLine &Line,
Newlines = std::min(Newlines, 1u);
if (Newlines == 0 && !RootToken.IsFirst)
Newlines = 1;
- if (RootToken.IsFirst && !RootToken.HasUnescapedNewline)
+ if (RootToken.IsFirst &&
+ (!Style.KeepEmptyLines.AtStartOfFile || !RootToken.HasUnescapedNewline)) {
Newlines = 0;
+ }
// Remove empty lines after "{".
- if (!Style.KeepEmptyLinesAtTheStartOfBlocks && PreviousLine &&
+ if (!Style.KeepEmptyLines.AtStartOfBlock && PreviousLine &&
PreviousLine->Last->is(tok::l_brace) &&
!PreviousLine->startsWithNamespace() &&
!(PrevPrevLine && PrevPrevLine->startsWithNamespace() &&
@@ -1545,9 +1556,9 @@ void UnwrappedLineFormatter::formatFirstToken(
unsigned NewlineIndent) {
FormatToken &RootToken = *Line.First;
if (RootToken.is(tok::eof)) {
- unsigned Newlines =
- std::min(RootToken.NewlinesBefore,
- Style.KeepEmptyLinesAtEOF ? Style.MaxEmptyLinesToKeep + 1 : 1);
+ unsigned Newlines = std::min(
+ RootToken.NewlinesBefore,
+ Style.KeepEmptyLines.AtEndOfFile ? Style.MaxEmptyLinesToKeep + 1 : 1);
unsigned TokenIndent = Newlines ? NewlineIndent : 0;
Whitespaces->replaceWhitespace(RootToken, Newlines, TokenIndent,
TokenIndent);
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.h b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.h
index ee6d31de8c42..9b8acf427a2a 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.h
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.h
@@ -16,8 +16,6 @@
#define LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEFORMATTER_H
#include "ContinuationIndenter.h"
-#include "clang/Format/Format.h"
-#include <map>
namespace clang {
namespace format {
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
index 573919798870..e3fb976ee1cc 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
@@ -47,7 +47,8 @@ void printLine(llvm::raw_ostream &OS, const UnwrappedLine &Line,
OS << Prefix;
NewLine = false;
}
- OS << I->Tok->Tok.getName() << "[" << "T=" << (unsigned)I->Tok->getType()
+ OS << I->Tok->Tok.getName() << "["
+ << "T=" << (unsigned)I->Tok->getType()
<< ", OC=" << I->Tok->OriginalColumn << ", \"" << I->Tok->TokenText
<< "\"] ";
for (SmallVectorImpl<UnwrappedLine>::const_iterator
@@ -90,6 +91,12 @@ private:
} // end anonymous namespace
+std::ostream &operator<<(std::ostream &Stream, const UnwrappedLine &Line) {
+ llvm::raw_os_ostream OS(Stream);
+ printLine(OS, Line);
+ return Stream;
+}
+
class ScopedLineState {
public:
ScopedLineState(UnwrappedLineParser &Parser,
@@ -105,6 +112,7 @@ public:
Parser.Line->PPLevel = PreBlockLine->PPLevel;
Parser.Line->InPPDirective = PreBlockLine->InPPDirective;
Parser.Line->InMacroBody = PreBlockLine->InMacroBody;
+ Parser.Line->UnbracedBodyLevel = PreBlockLine->UnbracedBodyLevel;
}
~ScopedLineState() {
@@ -153,14 +161,17 @@ UnwrappedLineParser::UnwrappedLineParser(
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
IdentifierTable &IdentTable)
: Line(new UnwrappedLine), MustBreakBeforeNextToken(false),
- CurrentLines(&Lines), Style(Style), Keywords(Keywords),
+ CurrentLines(&Lines), Style(Style), IsCpp(Style.isCpp()),
+ LangOpts(getFormattingLangOpts(Style)), Keywords(Keywords),
CommentPragmasRegex(Style.CommentPragmas), Tokens(nullptr),
Callback(Callback), AllTokens(Tokens), PPBranchLevel(-1),
IncludeGuard(Style.IndentPPDirectives == FormatStyle::PPDIS_None
? IG_Rejected
: IG_Inited),
IncludeGuardToken(nullptr), FirstStartColumn(FirstStartColumn),
- Macros(Style.Macros, SourceMgr, Style, Allocator, IdentTable) {}
+ Macros(Style.Macros, SourceMgr, Style, Allocator, IdentTable) {
+ assert(IsCpp == LangOpts.CXXOperatorNames);
+}
void UnwrappedLineParser::reset() {
PPBranchLevel = -1;
@@ -357,13 +368,15 @@ bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace,
do {
if (FormatTok->isAttribute()) {
nextToken();
+ if (FormatTok->is(tok::l_paren))
+ parseParens();
continue;
}
- tok::TokenKind kind = FormatTok->Tok.getKind();
- if (FormatTok->getType() == TT_MacroBlockBegin)
- kind = tok::l_brace;
- else if (FormatTok->getType() == TT_MacroBlockEnd)
- kind = tok::r_brace;
+ tok::TokenKind Kind = FormatTok->Tok.getKind();
+ if (FormatTok->is(TT_MacroBlockBegin))
+ Kind = tok::l_brace;
+ else if (FormatTok->is(TT_MacroBlockEnd))
+ Kind = tok::r_brace;
auto ParseDefault = [this, OpeningBrace, IfKind, &IfLBrace, &HasDoWhile,
&HasLabel, &StatementCount] {
@@ -374,7 +387,7 @@ bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace,
assert(StatementCount > 0 && "StatementCount overflow!");
};
- switch (kind) {
+ switch (Kind) {
case tok::comment:
nextToken();
addUnwrappedLine();
@@ -389,9 +402,10 @@ bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace,
ParseDefault();
continue;
}
- if (!InRequiresExpression && FormatTok->isNot(TT_MacroBlockBegin) &&
- tryToParseBracedList()) {
- continue;
+ if (!InRequiresExpression && FormatTok->isNot(TT_MacroBlockBegin)) {
+ if (tryToParseBracedList())
+ continue;
+ FormatTok->setFinalizedType(TT_BlockLBrace);
}
parseBlock();
++StatementCount;
@@ -421,15 +435,11 @@ bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace,
break;
case tok::kw_default: {
unsigned StoredPosition = Tokens->getPosition();
- FormatToken *Next;
- do {
- Next = Tokens->getNextToken();
- assert(Next);
- } while (Next->is(tok::comment));
+ auto *Next = Tokens->getNextNonComment();
FormatTok = Tokens->setPosition(StoredPosition);
- if (Next->isNot(tok::colon)) {
- // default not followed by ':' is not a case label; treat it like
- // an identifier.
+ if (!Next->isOneOf(tok::colon, tok::arrow)) {
+ // default not followed by `:` or `->` is not a case label; treat it
+ // like an identifier.
parseStructuralElement();
break;
}
@@ -448,6 +458,7 @@ bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace,
}
if (!SwitchLabelEncountered &&
(Style.IndentCaseLabels ||
+ (OpeningBrace && OpeningBrace->is(TT_SwitchExpressionLBrace)) ||
(Line->InPPDirective && Line->Level == 1))) {
++Line->Level;
}
@@ -489,18 +500,23 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
};
SmallVector<StackEntry, 8> LBraceStack;
assert(Tok->is(tok::l_brace));
+
do {
- // Get next non-comment, non-preprocessor token.
- FormatToken *NextTok;
- do {
- NextTok = Tokens->getNextToken();
- } while (NextTok->is(tok::comment));
- while (NextTok->is(tok::hash) && !Line->InMacroBody) {
- NextTok = Tokens->getNextToken();
- do {
+ auto *NextTok = Tokens->getNextNonComment();
+
+ if (!Line->InMacroBody && !Style.isTableGen()) {
+ // Skip PPDirective lines and comments.
+ while (NextTok->is(tok::hash)) {
NextTok = Tokens->getNextToken();
- } while (NextTok->is(tok::comment) ||
- (NextTok->NewlinesBefore == 0 && NextTok->isNot(tok::eof)));
+ if (NextTok->is(tok::pp_not_keyword))
+ break;
+ do {
+ NextTok = Tokens->getNextToken();
+ } while (!NextTok->HasUnescapedNewline && NextTok->isNot(tok::eof));
+
+ while (NextTok->is(tok::comment))
+ NextTok = Tokens->getNextToken();
+ }
}
switch (Tok->Tok.getKind()) {
@@ -529,21 +545,11 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
case tok::r_brace:
if (LBraceStack.empty())
break;
- if (LBraceStack.back().Tok->is(BK_Unknown)) {
+ if (auto *LBrace = LBraceStack.back().Tok; LBrace->is(BK_Unknown)) {
bool ProbablyBracedList = false;
if (Style.Language == FormatStyle::LK_Proto) {
ProbablyBracedList = NextTok->isOneOf(tok::comma, tok::r_square);
- } else {
- // Skip NextTok over preprocessor lines, otherwise we may not
- // properly diagnose the block as a braced intializer
- // if the comma separator appears after the pp directive.
- while (NextTok->is(tok::hash)) {
- ScopedMacroState MacroState(*Line, Tokens, NextTok);
- do {
- NextTok = Tokens->getNextToken();
- } while (NextTok->isNot(tok::eof));
- }
-
+ } else if (LBrace->isNot(TT_EnumLBrace)) {
// Using OriginalColumn to distinguish between ObjC methods and
// binary operators is a bit hacky.
bool NextIsObjCMethod = NextTok->isOneOf(tok::plus, tok::minus) &&
@@ -557,14 +563,15 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// If we already marked the opening brace as braced list, the closing
// must also be part of it.
- ProbablyBracedList = LBraceStack.back().Tok->is(TT_BracedListLBrace);
+ ProbablyBracedList = LBrace->is(TT_BracedListLBrace);
ProbablyBracedList = ProbablyBracedList ||
(Style.isJavaScript() &&
NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
Keywords.kw_as));
- ProbablyBracedList = ProbablyBracedList ||
- (Style.isCpp() && NextTok->is(tok::l_paren));
+ ProbablyBracedList =
+ ProbablyBracedList || (IsCpp && (PrevTok->Tok.isLiteral() ||
+ NextTok->is(tok::l_paren)));
// If there is a comma, semicolon or right paren after the closing
// brace, we assume this is a braced initializer list.
@@ -602,14 +609,20 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
NextTok = Tokens->getNextToken();
ProbablyBracedList = NextTok->isNot(tok::l_square);
}
+
+ // Cpp macro definition body that is a nonempty braced list or block:
+ if (IsCpp && Line->InMacroBody && PrevTok != FormatTok &&
+ !FormatTok->Previous && NextTok->is(tok::eof) &&
+ // A statement can end with only `;` (simple statement), a block
+ // closing brace (compound statement), or `:` (label statement).
+ // If PrevTok is a block opening brace, Tok ends an empty block.
+ !PrevTok->isOneOf(tok::semi, BK_Block, tok::colon)) {
+ ProbablyBracedList = true;
+ }
}
- if (ProbablyBracedList) {
- Tok->setBlockKind(BK_BracedInit);
- LBraceStack.back().Tok->setBlockKind(BK_BracedInit);
- } else {
- Tok->setBlockKind(BK_Block);
- LBraceStack.back().Tok->setBlockKind(BK_Block);
- }
+ const auto BlockKind = ProbablyBracedList ? BK_BracedInit : BK_Block;
+ Tok->setBlockKind(BlockKind);
+ LBrace->setBlockKind(BlockKind);
}
LBraceStack.pop_back();
break;
@@ -631,6 +644,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
default:
break;
}
+
PrevTok = Tok;
Tok = NextTok;
} while (Tok->isNot(tok::eof) && !LBraceStack.empty());
@@ -813,8 +827,11 @@ FormatToken *UnwrappedLineParser::parseBlock(bool MustBeDeclaration,
return IfLBrace;
}
- if (FormatTok->is(tok::r_brace) && Tok->is(TT_NamespaceLBrace))
- FormatTok->setFinalizedType(TT_NamespaceRBrace);
+ if (FormatTok->is(tok::r_brace)) {
+ FormatTok->setBlockKind(BK_Block);
+ if (Tok->is(TT_NamespaceLBrace))
+ FormatTok->setFinalizedType(TT_NamespaceRBrace);
+ }
const bool IsFunctionRBrace =
FormatTok->is(tok::r_brace) && Tok->is(TT_FunctionLBrace);
@@ -1171,20 +1188,14 @@ void UnwrappedLineParser::parsePPDefine() {
Line->InMacroBody = true;
if (Style.SkipMacroDefinitionBody) {
- do {
+ while (!eof()) {
FormatTok->Finalized = true;
- nextToken();
- } while (!eof());
+ FormatTok = Tokens->getNextToken();
+ }
addUnwrappedLine();
return;
}
- if (FormatTok->is(tok::identifier) &&
- Tokens->peekNextToken()->is(tok::colon)) {
- nextToken();
- nextToken();
- }
-
// Errors during a preprocessor directive can only affect the layout of the
// preprocessor directive, and thus we ignore them. An alternative approach
// would be to use the same approach we use on the file level (no
@@ -1213,7 +1224,6 @@ void UnwrappedLineParser::parsePPUnknown() {
static bool tokenCanStartNewLine(const FormatToken &Tok) {
// Semicolon can be a null-statement, l_square can be a start of a macro or
// a C++11 attribute, but this doesn't seem to be common.
- assert(Tok.isNot(TT_AttributeSquare));
return !Tok.isOneOf(tok::semi, tok::l_brace,
// Tokens that can only be used as binary operators and a
// part of overloaded operator names.
@@ -1419,7 +1429,7 @@ void UnwrappedLineParser::parseStructuralElement(
return;
}
- if (Style.isCpp()) {
+ if (IsCpp) {
while (FormatTok->is(tok::l_square) && handleCppAttributes()) {
}
} else if (Style.isVerilog()) {
@@ -1452,6 +1462,15 @@ void UnwrappedLineParser::parseStructuralElement(
}
// Tokens that only make sense at the beginning of a line.
+ if (FormatTok->isAccessSpecifierKeyword()) {
+ if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
+ Style.isCSharp()) {
+ nextToken();
+ } else {
+ parseAccessSpecifier();
+ }
+ return;
+ }
switch (FormatTok->Tok.getKind()) {
case tok::kw_asm:
nextToken();
@@ -1473,16 +1492,6 @@ void UnwrappedLineParser::parseStructuralElement(
case tok::kw_namespace:
parseNamespace();
return;
- case tok::kw_public:
- case tok::kw_protected:
- case tok::kw_private:
- if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
- Style.isCSharp()) {
- nextToken();
- } else {
- parseAccessSpecifier();
- }
- return;
case tok::kw_if: {
if (Style.isJavaScript() && Line->MustBeDeclaration) {
// field/method declaration.
@@ -1515,9 +1524,9 @@ void UnwrappedLineParser::parseStructuralElement(
// 'switch: string' field declaration.
break;
}
- parseSwitch();
+ parseSwitch(/*IsExpr=*/false);
return;
- case tok::kw_default:
+ case tok::kw_default: {
// In Verilog default along with other labels are handled in the next loop.
if (Style.isVerilog())
break;
@@ -1525,14 +1534,22 @@ void UnwrappedLineParser::parseStructuralElement(
// 'default: string' field declaration.
break;
}
+ auto *Default = FormatTok;
nextToken();
if (FormatTok->is(tok::colon)) {
FormatTok->setFinalizedType(TT_CaseLabelColon);
parseLabel();
return;
}
+ if (FormatTok->is(tok::arrow)) {
+ FormatTok->setFinalizedType(TT_CaseLabelArrow);
+ Default->setFinalizedType(TT_SwitchExpressionLabel);
+ parseLabel();
+ return;
+ }
// e.g. "default void f() {}" in a Java interface.
break;
+ }
case tok::kw_case:
// Proto: there are no switch/case statements.
if (Style.Language == FormatStyle::LK_Proto) {
@@ -1593,7 +1610,7 @@ void UnwrappedLineParser::parseStructuralElement(
parseJavaScriptEs6ImportExport();
return;
}
- if (Style.isCpp()) {
+ if (IsCpp) {
nextToken();
if (FormatTok->is(tok::kw_namespace)) {
parseNamespace();
@@ -1637,12 +1654,11 @@ void UnwrappedLineParser::parseStructuralElement(
addUnwrappedLine();
return;
}
- if (Style.isCpp() && parseModuleImport())
+ if (IsCpp && parseModuleImport())
return;
}
- if (Style.isCpp() &&
- FormatTok->isOneOf(Keywords.kw_signals, Keywords.kw_qsignals,
- Keywords.kw_slots, Keywords.kw_qslots)) {
+ if (IsCpp && FormatTok->isOneOf(Keywords.kw_signals, Keywords.kw_qsignals,
+ Keywords.kw_slots, Keywords.kw_qslots)) {
nextToken();
if (FormatTok->is(tok::colon)) {
nextToken();
@@ -1650,11 +1666,11 @@ void UnwrappedLineParser::parseStructuralElement(
return;
}
}
- if (Style.isCpp() && FormatTok->is(TT_StatementMacro)) {
+ if (IsCpp && FormatTok->is(TT_StatementMacro)) {
parseStatementMacro();
return;
}
- if (Style.isCpp() && FormatTok->is(TT_NamespaceMacro)) {
+ if (IsCpp && FormatTok->is(TT_NamespaceMacro)) {
parseNamespace();
return;
}
@@ -1665,7 +1681,8 @@ void UnwrappedLineParser::parseStructuralElement(
if (!Style.isJavaScript() && !Style.isVerilog() && !Style.isTableGen() &&
Tokens->peekNextToken()->is(tok::colon) && !Line->MustBeDeclaration) {
nextToken();
- Line->Tokens.begin()->Tok->MustBreakBefore = true;
+ if (!Line->InMacroBody || CurrentLines->size() > 1)
+ Line->Tokens.begin()->Tok->MustBreakBefore = true;
FormatTok->setFinalizedType(TT_GotoLabelColon);
parseLabel(!Style.IndentGotoLabels);
if (HasLabel)
@@ -1678,9 +1695,15 @@ void UnwrappedLineParser::parseStructuralElement(
break;
}
- const bool InRequiresExpression =
- OpeningBrace && OpeningBrace->is(TT_RequiresExpressionLBrace);
- do {
+ for (const bool InRequiresExpression =
+ OpeningBrace && OpeningBrace->is(TT_RequiresExpressionLBrace);
+ !eof();) {
+ if (IsCpp && FormatTok->isCppAlternativeOperatorKeyword()) {
+ if (auto *Next = Tokens->peekNextToken(/*SkipComment=*/true);
+ Next && Next->isBinaryOperator()) {
+ FormatTok->Tok.setKind(tok::identifier);
+ }
+ }
const FormatToken *Previous = FormatTok->Previous;
switch (FormatTok->Tok.getKind()) {
case tok::at:
@@ -1750,7 +1773,7 @@ void UnwrappedLineParser::parseStructuralElement(
}
break;
case tok::kw_requires: {
- if (Style.isCpp()) {
+ if (IsCpp) {
bool ParsedClause = parseRequires();
if (ParsedClause)
return;
@@ -1760,8 +1783,9 @@ void UnwrappedLineParser::parseStructuralElement(
break;
}
case tok::kw_enum:
- // Ignore if this is part of "template <enum ...".
- if (Previous && Previous->is(tok::less)) {
+ // Ignore if this is part of "template <enum ..." or "... -> enum" or
+ // "template <..., enum ...>".
+ if (Previous && Previous->isOneOf(tok::less, tok::arrow, tok::comma)) {
nextToken();
break;
}
@@ -1771,7 +1795,7 @@ void UnwrappedLineParser::parseStructuralElement(
if (!parseEnum())
break;
// This only applies to C++ and Verilog.
- if (!Style.isCpp() && !Style.isVerilog()) {
+ if (!IsCpp && !Style.isVerilog()) {
addUnwrappedLine();
return;
}
@@ -1839,7 +1863,7 @@ void UnwrappedLineParser::parseStructuralElement(
parseParens();
// Break the unwrapped line if a K&R C function definition has a parameter
// declaration.
- if (OpeningBrace || !Style.isCpp() || !Previous || eof())
+ if (OpeningBrace || !IsCpp || !Previous || eof())
break;
if (isC78ParameterDecl(FormatTok,
Tokens->peekNextToken(/*SkipComment=*/true),
@@ -1857,8 +1881,7 @@ void UnwrappedLineParser::parseStructuralElement(
case tok::caret:
nextToken();
// Block return type.
- if (FormatTok->Tok.isAnyIdentifier() ||
- FormatTok->isSimpleTypeSpecifier()) {
+ if (FormatTok->Tok.isAnyIdentifier() || FormatTok->isTypeName(LangOpts)) {
nextToken();
// Return types: pointers are ok too.
while (FormatTok->is(tok::star))
@@ -1891,7 +1914,8 @@ void UnwrappedLineParser::parseStructuralElement(
} else if (Style.BraceWrapping.AfterFunction) {
addUnwrappedLine();
}
- FormatTok->setFinalizedType(TT_FunctionLBrace);
+ if (!Previous || Previous->isNot(TT_TypeDeclarationParen))
+ FormatTok->setFinalizedType(TT_FunctionLBrace);
parseBlock();
IsDecltypeAutoFunction = false;
addUnwrappedLine();
@@ -1968,13 +1992,13 @@ void UnwrappedLineParser::parseStructuralElement(
}
}
- if (!Style.isCpp() && FormatTok->is(Keywords.kw_interface)) {
+ if (!IsCpp && FormatTok->is(Keywords.kw_interface)) {
if (parseStructLike())
return;
break;
}
- if (Style.isCpp() && FormatTok->is(TT_StatementMacro)) {
+ if (IsCpp && FormatTok->is(TT_StatementMacro)) {
parseStatementMacro();
return;
}
@@ -2059,6 +2083,12 @@ void UnwrappedLineParser::parseStructuralElement(
case tok::kw_new:
parseNew();
break;
+ case tok::kw_switch:
+ if (Style.Language == FormatStyle::LK_Java)
+ parseSwitch(/*IsExpr=*/true);
+ else
+ nextToken();
+ break;
case tok::kw_case:
// Proto: there are no switch/case statements.
if (Style.Language == FormatStyle::LK_Proto) {
@@ -2102,11 +2132,16 @@ void UnwrappedLineParser::parseStructuralElement(
return;
}
break;
+ case tok::greater:
+ nextToken();
+ if (FormatTok->is(tok::l_brace))
+ FormatTok->Previous->setFinalizedType(TT_TemplateCloser);
+ break;
default:
nextToken();
break;
}
- } while (!eof());
+ }
}
bool UnwrappedLineParser::tryToParsePropertyAccessor() {
@@ -2131,8 +2166,8 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
bool HasSpecialAccessor = false;
bool IsTrivialPropertyAccessor = true;
while (!eof()) {
- if (Tok->isOneOf(tok::semi, tok::kw_public, tok::kw_private,
- tok::kw_protected, Keywords.kw_internal, Keywords.kw_get,
+ if (Tok->isAccessSpecifierKeyword() ||
+ Tok->isOneOf(tok::semi, Keywords.kw_internal, Keywords.kw_get,
Keywords.kw_init, Keywords.kw_set)) {
if (Tok->isOneOf(Keywords.kw_get, Keywords.kw_init, Keywords.kw_set))
HasSpecialAccessor = true;
@@ -2202,7 +2237,7 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
bool UnwrappedLineParser::tryToParseLambda() {
assert(FormatTok->is(tok::l_square));
- if (!Style.isCpp()) {
+ if (!IsCpp) {
nextToken();
return false;
}
@@ -2214,7 +2249,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
bool InTemplateParameterList = false;
while (FormatTok->isNot(tok::l_brace)) {
- if (FormatTok->isSimpleTypeSpecifier()) {
+ if (FormatTok->isTypeName(LangOpts) || FormatTok->isAttribute()) {
nextToken();
continue;
}
@@ -2235,6 +2270,8 @@ bool UnwrappedLineParser::tryToParseLambda() {
break;
case tok::kw_auto:
case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
case tok::kw_template:
case tok::kw_typename:
case tok::amp:
@@ -2292,7 +2329,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
// This might or might not actually be a lambda arrow (this could be an
// ObjC method invocation followed by a dereferencing arrow). We might
// reset this back to TT_Unknown in TokenAnnotator.
- FormatTok->setFinalizedType(TT_TrailingReturnArrow);
+ FormatTok->setFinalizedType(TT_LambdaArrow);
SeenArrow = true;
nextToken();
break;
@@ -2331,7 +2368,7 @@ bool UnwrappedLineParser::tryToParseLambdaIntroducer() {
!Previous->isOneOf(tok::kw_return, tok::kw_co_await,
tok::kw_co_yield, tok::kw_co_return)) ||
Previous->closesScope())) ||
- LeftSquare->isCppStructuredBinding(Style)) {
+ LeftSquare->isCppStructuredBinding(IsCpp)) {
return false;
}
if (FormatTok->is(tok::l_square) || tok::isLiteral(FormatTok->Tok.getKind()))
@@ -2412,6 +2449,7 @@ bool UnwrappedLineParser::tryToParseChildBlock() {
}
bool UnwrappedLineParser::parseBracedList(bool IsAngleBracket, bool IsEnum) {
+ assert(!IsAngleBracket || !IsEnum);
bool HasError = false;
// FIXME: Once we have an expression parser in the UnwrappedLineParser,
@@ -2434,8 +2472,11 @@ bool UnwrappedLineParser::parseBracedList(bool IsAngleBracket, bool IsEnum) {
}
}
if (FormatTok->is(IsAngleBracket ? tok::greater : tok::r_brace)) {
- if (IsEnum && !Style.AllowShortEnumsOnASingleLine)
- addUnwrappedLine();
+ if (IsEnum) {
+ FormatTok->setBlockKind(BK_Block);
+ if (!Style.AllowShortEnumsOnASingleLine)
+ addUnwrappedLine();
+ }
nextToken();
return !HasError;
}
@@ -2460,6 +2501,11 @@ bool UnwrappedLineParser::parseBracedList(bool IsAngleBracket, bool IsEnum) {
// Assume there are no blocks inside a braced init list apart
// from the ones we explicitly parse out (like lambdas).
FormatTok->setBlockKind(BK_BracedInit);
+ if (!IsAngleBracket) {
+ auto *Prev = FormatTok->Previous;
+ if (Prev && Prev->is(tok::greater))
+ Prev->setFinalizedType(TT_TemplateCloser);
+ }
nextToken();
parseBracedList();
break;
@@ -2504,6 +2550,7 @@ bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
assert(FormatTok->is(tok::l_paren) && "'(' expected.");
auto *LeftParen = FormatTok;
bool SeenEqual = false;
+ bool MightBeFoldExpr = false;
const bool MightBeStmtExpr = Tokens->peekNextToken()->is(tok::l_brace);
nextToken();
do {
@@ -2514,10 +2561,10 @@ bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_brace))
parseChildBlock();
break;
- case tok::r_paren:
- if (!MightBeStmtExpr && !Line->InMacroBody &&
+ case tok::r_paren: {
+ auto *Prev = LeftParen->Previous;
+ if (!MightBeStmtExpr && !MightBeFoldExpr && !Line->InMacroBody &&
Style.RemoveParentheses > FormatStyle::RPS_Leave) {
- const auto *Prev = LeftParen->Previous;
const auto *Next = Tokens->peekNextToken();
const bool DoubleParens =
Prev && Prev->is(tok::l_paren) && Next && Next->is(tok::r_paren);
@@ -2539,8 +2586,17 @@ bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
FormatTok->Optional = true;
}
}
+ if (Prev) {
+ if (Prev->is(TT_TypenameMacro)) {
+ LeftParen->setFinalizedType(TT_TypeDeclarationParen);
+ FormatTok->setFinalizedType(TT_TypeDeclarationParen);
+ } else if (Prev->is(tok::greater) && FormatTok->Previous == LeftParen) {
+ Prev->setFinalizedType(TT_TemplateCloser);
+ }
+ }
nextToken();
return SeenEqual;
+ }
case tok::r_brace:
// A "}" inside parenthesis is an error if there wasn't a matching "{".
return SeenEqual;
@@ -2558,6 +2614,10 @@ bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
parseBracedList();
}
break;
+ case tok::ellipsis:
+ MightBeFoldExpr = true;
+ nextToken();
+ break;
case tok::equal:
SeenEqual = true;
if (Style.isCSharp() && FormatTok->is(TT_FatArrow))
@@ -2577,6 +2637,12 @@ bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
else
nextToken();
break;
+ case tok::kw_switch:
+ if (Style.Language == FormatStyle::LK_Java)
+ parseSwitch(/*IsExpr=*/true);
+ else
+ nextToken();
+ break;
case tok::kw_requires: {
auto RequiresToken = FormatTok;
nextToken();
@@ -2621,6 +2687,7 @@ void UnwrappedLineParser::parseSquare(bool LambdaIntroducer) {
break;
}
case tok::at:
+ case tok::colon:
nextToken();
if (FormatTok->is(tok::l_brace)) {
nextToken();
@@ -2672,7 +2739,9 @@ void UnwrappedLineParser::parseUnbracedBody(bool CheckEOF) {
addUnwrappedLine();
++Line->Level;
+ ++Line->UnbracedBodyLevel;
parseStructuralElement();
+ --Line->UnbracedBodyLevel;
if (Tok) {
assert(!Line->InPPDirective);
@@ -2915,9 +2984,15 @@ void UnwrappedLineParser::parseTryCatch() {
assert(FormatTok->isOneOf(tok::kw_try, tok::kw___try) && "'try' expected");
nextToken();
bool NeedsUnwrappedLine = false;
+ bool HasCtorInitializer = false;
if (FormatTok->is(tok::colon)) {
+ auto *Colon = FormatTok;
// We are in a function try block, what comes is an initializer list.
nextToken();
+ if (FormatTok->is(tok::identifier)) {
+ HasCtorInitializer = true;
+ Colon->setFinalizedType(TT_CtorInitializerColon);
+ }
// In case identifiers were removed by clang-tidy, what might follow is
// multiple commas in sequence - before the first identifier.
@@ -2926,14 +3001,11 @@ void UnwrappedLineParser::parseTryCatch() {
while (FormatTok->is(tok::identifier)) {
nextToken();
- if (FormatTok->is(tok::l_paren))
+ if (FormatTok->is(tok::l_paren)) {
parseParens();
- if (FormatTok->Previous && FormatTok->Previous->is(tok::identifier) &&
- FormatTok->is(tok::l_brace)) {
- do {
- nextToken();
- } while (FormatTok->isNot(tok::r_brace));
+ } else if (FormatTok->is(tok::l_brace)) {
nextToken();
+ parseBracedList();
}
// In case identifiers were removed by clang-tidy, what might follow is
@@ -2949,6 +3021,8 @@ void UnwrappedLineParser::parseTryCatch() {
keepAncestorBraces();
if (FormatTok->is(tok::l_brace)) {
+ if (HasCtorInitializer)
+ FormatTok->setFinalizedType(TT_FunctionLBrace);
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock();
if (Style.BraceWrapping.BeforeCatch)
@@ -3144,7 +3218,7 @@ void UnwrappedLineParser::parseForOrWhileLoop(bool HasParens) {
// JS' for await ( ...
if (Style.isJavaScript() && FormatTok->is(Keywords.kw_await))
nextToken();
- if (Style.isCpp() && FormatTok->is(tok::kw_co_await))
+ if (IsCpp && FormatTok->is(tok::kw_co_await))
nextToken();
if (HasParens && FormatTok->is(tok::l_paren)) {
// The type is only set for Verilog basically because we were afraid to
@@ -3195,10 +3269,11 @@ void UnwrappedLineParser::parseDoWhile() {
void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
nextToken();
unsigned OldLineLevel = Line->Level;
- if (Line->Level > 1 || (!Line->InPPDirective && Line->Level > 0))
- --Line->Level;
+
if (LeftAlignLabel)
Line->Level = 0;
+ else if (Line->Level > 1 || (!Line->InPPDirective && Line->Level > 0))
+ --Line->Level;
if (!Style.IndentCaseBlocks && CommentsBeforeNextToken.empty() &&
FormatTok->is(tok::l_brace)) {
@@ -3233,6 +3308,7 @@ void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
void UnwrappedLineParser::parseCaseLabel() {
assert(FormatTok->is(tok::kw_case) && "'case' expected");
+ auto *Case = FormatTok;
// FIXME: fix handling of complex expressions here.
do {
@@ -3241,11 +3317,16 @@ void UnwrappedLineParser::parseCaseLabel() {
FormatTok->setFinalizedType(TT_CaseLabelColon);
break;
}
+ if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::arrow)) {
+ FormatTok->setFinalizedType(TT_CaseLabelArrow);
+ Case->setFinalizedType(TT_SwitchExpressionLabel);
+ break;
+ }
} while (!eof());
parseLabel();
}
-void UnwrappedLineParser::parseSwitch() {
+void UnwrappedLineParser::parseSwitch(bool IsExpr) {
assert(FormatTok->is(tok::kw_switch) && "'switch' expected");
nextToken();
if (FormatTok->is(tok::l_paren))
@@ -3255,10 +3336,15 @@ void UnwrappedLineParser::parseSwitch() {
if (FormatTok->is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
- FormatTok->setFinalizedType(TT_ControlStatementLBrace);
- parseBlock();
+ FormatTok->setFinalizedType(IsExpr ? TT_SwitchExpressionLBrace
+ : TT_ControlStatementLBrace);
+ if (IsExpr)
+ parseChildBlock();
+ else
+ parseBlock();
setPreviousRBraceType(TT_ControlStatementRBrace);
- addUnwrappedLine();
+ if (!IsExpr)
+ addUnwrappedLine();
} else {
addUnwrappedLine();
++Line->Level;
@@ -3271,8 +3357,8 @@ void UnwrappedLineParser::parseSwitch() {
}
// Operators that can follow a C variable.
-static bool isCOperatorFollowingVar(tok::TokenKind kind) {
- switch (kind) {
+static bool isCOperatorFollowingVar(tok::TokenKind Kind) {
+ switch (Kind) {
case tok::ampamp:
case tok::ampequal:
case tok::arrow:
@@ -3344,7 +3430,7 @@ void UnwrappedLineParser::parseAccessSpecifier() {
/// \brief Parses a requires, decides if it is a clause or an expression.
/// \pre The current token has to be the requires keyword.
/// \returns true if it parsed a clause.
-bool clang::format::UnwrappedLineParser::parseRequires() {
+bool UnwrappedLineParser::parseRequires() {
assert(FormatTok->is(tok::kw_requires) && "'requires' expected");
auto RequiresToken = FormatTok;
@@ -3407,7 +3493,7 @@ bool clang::format::UnwrappedLineParser::parseRequires() {
break;
}
default:
- if (PreviousNonComment->isTypeOrIdentifier()) {
+ if (PreviousNonComment->isTypeOrIdentifier(LangOpts)) {
// This is a requires clause.
parseRequiresClause(RequiresToken);
return true;
@@ -3446,11 +3532,6 @@ bool clang::format::UnwrappedLineParser::parseRequires() {
return false;
}
break;
- case tok::r_paren:
- case tok::pipepipe:
- FormatTok = Tokens->setPosition(StoredPosition);
- parseRequiresClause(RequiresToken);
- return true;
case tok::eof:
// Break out of the loop.
Lookahead = 50;
@@ -3458,6 +3539,7 @@ bool clang::format::UnwrappedLineParser::parseRequires() {
case tok::coloncolon:
LastWasColonColon = true;
break;
+ case tok::kw_decltype:
case tok::identifier:
if (FoundType && !LastWasColonColon && OpenAngles == 0) {
FormatTok = Tokens->setPosition(StoredPosition);
@@ -3474,7 +3556,7 @@ bool clang::format::UnwrappedLineParser::parseRequires() {
--OpenAngles;
break;
default:
- if (NextToken->isSimpleTypeSpecifier()) {
+ if (NextToken->isTypeName(LangOpts)) {
FormatTok = Tokens->setPosition(StoredPosition);
parseRequiresExpression(RequiresToken);
return false;
@@ -3707,14 +3789,19 @@ bool UnwrappedLineParser::parseEnum() {
if (Style.Language == FormatStyle::LK_Proto && FormatTok->is(tok::equal))
return false;
- // Eat up enum class ...
- if (FormatTok->isOneOf(tok::kw_class, tok::kw_struct))
- nextToken();
+ if (IsCpp) {
+ // Eat up enum class ...
+ if (FormatTok->isOneOf(tok::kw_class, tok::kw_struct))
+ nextToken();
+ while (FormatTok->is(tok::l_square))
+ if (!handleCppAttributes())
+ return false;
+ }
while (FormatTok->Tok.getIdentifierInfo() ||
FormatTok->isOneOf(tok::colon, tok::coloncolon, tok::less,
tok::greater, tok::comma, tok::question,
- tok::l_square, tok::r_square)) {
+ tok::l_square)) {
if (Style.isVerilog()) {
FormatTok->setFinalizedType(TT_VerilogDimensionedTypeName);
nextToken();
@@ -3727,12 +3814,11 @@ bool UnwrappedLineParser::parseEnum() {
// We can have macros or attributes in between 'enum' and the enum name.
if (FormatTok->is(tok::l_paren))
parseParens();
- assert(FormatTok->isNot(TT_AttributeSquare));
if (FormatTok->is(tok::identifier)) {
nextToken();
// If there are two identifiers in a row, this is likely an elaborate
// return type. In Java, this can be "implements", etc.
- if (Style.isCpp() && FormatTok->is(tok::identifier))
+ if (IsCpp && FormatTok->is(tok::identifier))
return false;
}
}
@@ -3907,9 +3993,14 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
const FormatToken &InitialToken = *FormatTok;
nextToken();
+ const FormatToken *ClassName = nullptr;
+ bool IsDerived = false;
auto IsNonMacroIdentifier = [](const FormatToken *Tok) {
return Tok->is(tok::identifier) && Tok->TokenText != Tok->TokenText.upper();
};
+ // JavaScript/TypeScript supports anonymous classes like:
+ // a = class extends foo { }
+ bool JSPastExtendsOrImplements = false;
// The actual identifier can be a nested name specifier, and in macros
// it is often token-pasted.
// An [[attribute]] can be before the identifier.
@@ -3920,6 +4011,7 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
FormatTok->isOneOf(tok::period, tok::comma))) {
if (Style.isJavaScript() &&
FormatTok->isOneOf(Keywords.kw_extends, Keywords.kw_implements)) {
+ JSPastExtendsOrImplements = true;
// JavaScript/TypeScript supports inline object types in
// extends/implements positions:
// class Foo implements {bar: number} { }
@@ -3931,14 +4023,38 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
}
if (FormatTok->is(tok::l_square) && handleCppAttributes())
continue;
+ const auto *Previous = FormatTok;
nextToken();
- // We can have macros in between 'class' and the class name.
- if (!IsNonMacroIdentifier(FormatTok->Previous) &&
- FormatTok->is(tok::l_paren)) {
- parseParens();
+ switch (FormatTok->Tok.getKind()) {
+ case tok::l_paren:
+ // We can have macros in between 'class' and the class name.
+ if (!IsNonMacroIdentifier(Previous) ||
+ // e.g. `struct macro(a) S { int i; };`
+ Previous->Previous == &InitialToken) {
+ parseParens();
+ }
+ break;
+ case tok::coloncolon:
+ case tok::hashhash:
+ break;
+ default:
+ if (!JSPastExtendsOrImplements && !ClassName &&
+ Previous->is(tok::identifier) && Previous->isNot(TT_AttributeMacro)) {
+ ClassName = Previous;
+ }
}
}
+ auto IsListInitialization = [&] {
+ if (!ClassName || IsDerived)
+ return false;
+ assert(FormatTok->is(tok::l_brace));
+ const auto *Prev = FormatTok->getPreviousNonComment();
+ assert(Prev);
+ return Prev != ClassName && Prev->is(tok::identifier) &&
+ Prev->isNot(Keywords.kw_final) && tryToParseBracedList();
+ };
+
if (FormatTok->isOneOf(tok::colon, tok::less)) {
int AngleNestingLevel = 0;
do {
@@ -3947,19 +4063,28 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
else if (FormatTok->is(tok::greater))
--AngleNestingLevel;
- if (AngleNestingLevel == 0 && FormatTok->is(tok::l_paren) &&
- IsNonMacroIdentifier(FormatTok->Previous)) {
- break;
+ if (AngleNestingLevel == 0) {
+ if (FormatTok->is(tok::colon)) {
+ IsDerived = true;
+ } else if (FormatTok->is(tok::identifier) &&
+ FormatTok->Previous->is(tok::coloncolon)) {
+ ClassName = FormatTok;
+ } else if (FormatTok->is(tok::l_paren) &&
+ IsNonMacroIdentifier(FormatTok->Previous)) {
+ break;
+ }
}
if (FormatTok->is(tok::l_brace)) {
+ if (AngleNestingLevel == 0 && IsListInitialization())
+ return;
calculateBraceTypes(/*ExpectClassBody=*/true);
if (!tryToParseBracedList())
break;
}
if (FormatTok->is(tok::l_square)) {
FormatToken *Previous = FormatTok->Previous;
- if (!Previous ||
- !(Previous->is(tok::r_paren) || Previous->isTypeOrIdentifier())) {
+ if (!Previous || (Previous->isNot(tok::r_paren) &&
+ !Previous->isTypeOrIdentifier(LangOpts))) {
// Don't try parsing a lambda if we had a closing parenthesis before,
// it was probably a pointer to an array: int (*)[].
if (!tryToParseLambda())
@@ -3996,6 +4121,8 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
}
};
if (FormatTok->is(tok::l_brace)) {
+ if (IsListInitialization())
+ return;
auto [OpenBraceType, ClosingBraceType] = GetBraceTypes(InitialToken);
FormatTok->setFinalizedType(OpenBraceType);
if (ParseAsExpr) {
@@ -4700,14 +4827,13 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
do {
FormatTok = Tokens->getNextToken();
assert(FormatTok);
- while (FormatTok->getType() == TT_ConflictStart ||
- FormatTok->getType() == TT_ConflictEnd ||
- FormatTok->getType() == TT_ConflictAlternative) {
- if (FormatTok->getType() == TT_ConflictStart)
+ while (FormatTok->isOneOf(TT_ConflictStart, TT_ConflictEnd,
+ TT_ConflictAlternative)) {
+ if (FormatTok->is(TT_ConflictStart))
conditionalCompilationStart(/*Unreachable=*/false);
- else if (FormatTok->getType() == TT_ConflictAlternative)
+ else if (FormatTok->is(TT_ConflictAlternative))
conditionalCompilationAlternative();
- else if (FormatTok->getType() == TT_ConflictEnd)
+ else if (FormatTok->is(TT_ConflictEnd))
conditionalCompilationEnd();
FormatTok = Tokens->getNextToken();
FormatTok->MustBreakBefore = true;
@@ -4753,6 +4879,8 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
PPBranchLevel > 0) {
Line->Level += PPBranchLevel;
}
+ assert(Line->Level >= Line->UnbracedBodyLevel);
+ Line->Level -= Line->UnbracedBodyLevel;
flushComments(isOnNewLine(*FormatTok));
parsePPDirective();
PreviousWasComment = FormatTok->is(tok::comment);
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
index 739298690bbd..d5eeb3d57149 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
@@ -15,17 +15,8 @@
#ifndef LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
#define LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
-#include "Encoding.h"
-#include "FormatToken.h"
#include "Macros.h"
-#include "clang/Basic/IdentifierTable.h"
-#include "clang/Format/Format.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/Support/Regex.h"
-#include <list>
#include <stack>
-#include <vector>
namespace clang {
namespace format {
@@ -58,6 +49,9 @@ struct UnwrappedLine {
/// Whether it is part of a macro body.
bool InMacroBody = false;
+ /// Nesting level of unbraced body of a control statement.
+ unsigned UnbracedBodyLevel = 0;
+
bool MustBeDeclaration = false;
/// Whether the parser has seen \c decltype(auto) in this line.
@@ -166,7 +160,7 @@ private:
void parseDoWhile();
void parseLabel(bool LeftAlignLabel = false);
void parseCaseLabel();
- void parseSwitch();
+ void parseSwitch(bool IsExpr);
void parseNamespace();
bool parseModuleImport();
void parseNew();
@@ -324,6 +318,8 @@ private:
llvm::BitVector DeclarationScopeStack;
const FormatStyle &Style;
+ bool IsCpp;
+ LangOptions LangOpts;
const AdditionalKeywords &Keywords;
llvm::Regex CommentPragmasRegex;
@@ -420,6 +416,8 @@ struct UnwrappedLineNode {
SmallVector<UnwrappedLine, 0> Children;
};
+std::ostream &operator<<(std::ostream &Stream, const UnwrappedLine &Line);
+
} // end namespace format
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
index df84f97a8e8a..fd4a40a86082 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
@@ -107,10 +107,16 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
llvm::sort(Changes, Change::IsBeforeInFile(SourceMgr));
calculateLineBreakInformation();
alignConsecutiveMacros();
- alignConsecutiveShortCaseStatements();
+ alignConsecutiveShortCaseStatements(/*IsExpr=*/true);
+ alignConsecutiveShortCaseStatements(/*IsExpr=*/false);
alignConsecutiveDeclarations();
alignConsecutiveBitFields();
alignConsecutiveAssignments();
+ if (Style.isTableGen()) {
+ alignConsecutiveTableGenBreakingDAGArgColons();
+ alignConsecutiveTableGenCondOperatorColons();
+ alignConsecutiveTableGenDefinitions();
+ }
alignChainedConditionals();
alignTrailingComments();
alignEscapedNewlines();
@@ -123,11 +129,14 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
void WhitespaceManager::calculateLineBreakInformation() {
Changes[0].PreviousEndOfTokenColumn = 0;
Change *LastOutsideTokenChange = &Changes[0];
- for (unsigned i = 1, e = Changes.size(); i != e; ++i) {
+ for (unsigned I = 1, e = Changes.size(); I != e; ++I) {
+ auto &C = Changes[I];
+ auto &P = Changes[I - 1];
+ auto &PrevTokLength = P.TokenLength;
SourceLocation OriginalWhitespaceStart =
- Changes[i].OriginalWhitespaceRange.getBegin();
+ C.OriginalWhitespaceRange.getBegin();
SourceLocation PreviousOriginalWhitespaceEnd =
- Changes[i - 1].OriginalWhitespaceRange.getEnd();
+ P.OriginalWhitespaceRange.getEnd();
unsigned OriginalWhitespaceStartOffset =
SourceMgr.getFileOffset(OriginalWhitespaceStart);
unsigned PreviousOriginalWhitespaceEndOffset =
@@ -162,31 +171,28 @@ void WhitespaceManager::calculateLineBreakInformation() {
// line of the token.
auto NewlinePos = Text.find_first_of('\n');
if (NewlinePos == StringRef::npos) {
- Changes[i - 1].TokenLength = OriginalWhitespaceStartOffset -
- PreviousOriginalWhitespaceEndOffset +
- Changes[i].PreviousLinePostfix.size() +
- Changes[i - 1].CurrentLinePrefix.size();
+ PrevTokLength = OriginalWhitespaceStartOffset -
+ PreviousOriginalWhitespaceEndOffset +
+ C.PreviousLinePostfix.size() + P.CurrentLinePrefix.size();
+ if (!P.IsInsideToken)
+ PrevTokLength = std::min(PrevTokLength, P.Tok->ColumnWidth);
} else {
- Changes[i - 1].TokenLength =
- NewlinePos + Changes[i - 1].CurrentLinePrefix.size();
+ PrevTokLength = NewlinePos + P.CurrentLinePrefix.size();
}
// If there are multiple changes in this token, sum up all the changes until
// the end of the line.
- if (Changes[i - 1].IsInsideToken && Changes[i - 1].NewlinesBefore == 0) {
- LastOutsideTokenChange->TokenLength +=
- Changes[i - 1].TokenLength + Changes[i - 1].Spaces;
- } else {
- LastOutsideTokenChange = &Changes[i - 1];
- }
+ if (P.IsInsideToken && P.NewlinesBefore == 0)
+ LastOutsideTokenChange->TokenLength += PrevTokLength + P.Spaces;
+ else
+ LastOutsideTokenChange = &P;
- Changes[i].PreviousEndOfTokenColumn =
- Changes[i - 1].StartOfTokenColumn + Changes[i - 1].TokenLength;
+ C.PreviousEndOfTokenColumn = P.StartOfTokenColumn + PrevTokLength;
- Changes[i - 1].IsTrailingComment =
- (Changes[i].NewlinesBefore > 0 || Changes[i].Tok->is(tok::eof) ||
- (Changes[i].IsInsideToken && Changes[i].Tok->is(tok::comment))) &&
- Changes[i - 1].Tok->is(tok::comment) &&
+ P.IsTrailingComment =
+ (C.NewlinesBefore > 0 || C.Tok->is(tok::eof) ||
+ (C.IsInsideToken && C.Tok->is(tok::comment))) &&
+ P.Tok->is(tok::comment) &&
// FIXME: This is a dirty hack. The problem is that
// BreakableLineCommentSection does comment reflow changes and here is
// the aligning of trailing comments. Consider the case where we reflow
@@ -459,16 +465,18 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
if (i + 1 != Changes.size())
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
- // If PointerAlignment is PAS_Right, keep *s or &s next to the token
+ // If PointerAlignment is PAS_Right, keep *s or &s next to the token,
+ // except if the token is equal, then a space is needed.
if ((Style.PointerAlignment == FormatStyle::PAS_Right ||
Style.ReferenceAlignment == FormatStyle::RAS_Right) &&
- CurrentChange.Spaces != 0) {
+ CurrentChange.Spaces != 0 &&
+ !CurrentChange.Tok->isOneOf(tok::equal, tok::r_paren,
+ TT_TemplateCloser)) {
const bool ReferenceNotRightAligned =
Style.ReferenceAlignment != FormatStyle::RAS_Right &&
Style.ReferenceAlignment != FormatStyle::RAS_Pointer;
for (int Previous = i - 1;
- Previous >= 0 &&
- Changes[Previous].Tok->getType() == TT_PointerOrReference;
+ Previous >= 0 && Changes[Previous].Tok->is(TT_PointerOrReference);
--Previous) {
assert(Changes[Previous].Tok->isPointerOrReference());
if (Changes[Previous].Tok->isNot(tok::star)) {
@@ -849,7 +857,12 @@ void WhitespaceManager::alignConsecutiveAssignments() {
}
void WhitespaceManager::alignConsecutiveBitFields() {
- if (!Style.AlignConsecutiveBitFields.Enabled)
+ alignConsecutiveColons(Style.AlignConsecutiveBitFields, TT_BitFieldColon);
+}
+
+void WhitespaceManager::alignConsecutiveColons(
+ const FormatStyle::AlignConsecutiveStyle &AlignStyle, TokenType Type) {
+ if (!AlignStyle.Enabled)
return;
AlignTokens(
@@ -863,27 +876,32 @@ void WhitespaceManager::alignConsecutiveBitFields() {
if (&C != &Changes.back() && (&C + 1)->NewlinesBefore > 0)
return false;
- return C.Tok->is(TT_BitFieldColon);
+ return C.Tok->is(Type);
},
- Changes, /*StartAt=*/0, Style.AlignConsecutiveBitFields);
+ Changes, /*StartAt=*/0, AlignStyle);
}
-void WhitespaceManager::alignConsecutiveShortCaseStatements() {
+void WhitespaceManager::alignConsecutiveShortCaseStatements(bool IsExpr) {
if (!Style.AlignConsecutiveShortCaseStatements.Enabled ||
- !Style.AllowShortCaseLabelsOnASingleLine) {
+ !(IsExpr ? Style.AllowShortCaseExpressionOnASingleLine
+ : Style.AllowShortCaseLabelsOnASingleLine)) {
return;
}
+ const auto Type = IsExpr ? TT_CaseLabelArrow : TT_CaseLabelColon;
+ const auto &Option = Style.AlignConsecutiveShortCaseStatements;
+ const bool AlignArrowOrColon =
+ IsExpr ? Option.AlignCaseArrows : Option.AlignCaseColons;
+
auto Matches = [&](const Change &C) {
- if (Style.AlignConsecutiveShortCaseStatements.AlignCaseColons)
- return C.Tok->is(TT_CaseLabelColon);
+ if (AlignArrowOrColon)
+ return C.Tok->is(Type);
// Ignore 'IsInsideToken' to allow matching trailing comments which
// need to be reflowed as that causes the token to appear in two
// different changes, which will cause incorrect alignment as we'll
// reflow early due to detecting multiple aligning tokens per line.
- return !C.IsInsideToken && C.Tok->Previous &&
- C.Tok->Previous->is(TT_CaseLabelColon);
+ return !C.IsInsideToken && C.Tok->Previous && C.Tok->Previous->is(Type);
};
unsigned MinColumn = 0;
@@ -934,7 +952,7 @@ void WhitespaceManager::alignConsecutiveShortCaseStatements() {
if (Changes[I].Tok->isNot(tok::comment))
LineIsComment = false;
- if (Changes[I].Tok->is(TT_CaseLabelColon)) {
+ if (Changes[I].Tok->is(Type)) {
LineIsEmptyCase =
!Changes[I].Tok->Next || Changes[I].Tok->Next->isTrailingComment();
@@ -972,6 +990,21 @@ void WhitespaceManager::alignConsecutiveShortCaseStatements() {
Changes);
}
+void WhitespaceManager::alignConsecutiveTableGenBreakingDAGArgColons() {
+ alignConsecutiveColons(Style.AlignConsecutiveTableGenBreakingDAGArgColons,
+ TT_TableGenDAGArgListColonToAlign);
+}
+
+void WhitespaceManager::alignConsecutiveTableGenCondOperatorColons() {
+ alignConsecutiveColons(Style.AlignConsecutiveTableGenCondOperatorColons,
+ TT_TableGenCondOperatorColon);
+}
+
+void WhitespaceManager::alignConsecutiveTableGenDefinitions() {
+ alignConsecutiveColons(Style.AlignConsecutiveTableGenDefinitionColons,
+ TT_InheritanceColon);
+}
+
void WhitespaceManager::alignConsecutiveDeclarations() {
if (!Style.AlignConsecutiveDeclarations.Enabled)
return;
@@ -1085,7 +1118,7 @@ void WhitespaceManager::alignTrailingComments() {
// leave the comments.
if (RestoredLineLength >= Style.ColumnLimit && Style.ColumnLimit > 0)
break;
- C.Spaces = OriginalSpaces;
+ C.Spaces = C.NewlinesBefore > 0 ? C.Tok->OriginalColumn : OriginalSpaces;
continue;
}
@@ -1214,22 +1247,29 @@ void WhitespaceManager::alignTrailingComments(unsigned Start, unsigned End,
}
void WhitespaceManager::alignEscapedNewlines() {
- if (Style.AlignEscapedNewlines == FormatStyle::ENAS_DontAlign)
+ const auto Align = Style.AlignEscapedNewlines;
+ if (Align == FormatStyle::ENAS_DontAlign)
return;
- bool AlignLeft = Style.AlignEscapedNewlines == FormatStyle::ENAS_Left;
- unsigned MaxEndOfLine = AlignLeft ? 0 : Style.ColumnLimit;
+ const bool WithLastLine = Align == FormatStyle::ENAS_LeftWithLastLine;
+ const bool AlignLeft = Align == FormatStyle::ENAS_Left || WithLastLine;
+ const auto MaxColumn = Style.ColumnLimit;
+ unsigned MaxEndOfLine = AlignLeft ? 0 : MaxColumn;
unsigned StartOfMacro = 0;
for (unsigned i = 1, e = Changes.size(); i < e; ++i) {
Change &C = Changes[i];
- if (C.NewlinesBefore > 0) {
- if (C.ContinuesPPDirective) {
- MaxEndOfLine = std::max(C.PreviousEndOfTokenColumn + 2, MaxEndOfLine);
- } else {
- alignEscapedNewlines(StartOfMacro + 1, i, MaxEndOfLine);
- MaxEndOfLine = AlignLeft ? 0 : Style.ColumnLimit;
- StartOfMacro = i;
- }
+ if (C.NewlinesBefore == 0 && (!WithLastLine || C.Tok->isNot(tok::eof)))
+ continue;
+ const bool InPPDirective = C.ContinuesPPDirective;
+ const auto BackslashColumn = C.PreviousEndOfTokenColumn + 2;
+ if (InPPDirective ||
+ (WithLastLine && (MaxColumn == 0 || BackslashColumn <= MaxColumn))) {
+ MaxEndOfLine = std::max(BackslashColumn, MaxEndOfLine);
+ }
+ if (!InPPDirective) {
+ alignEscapedNewlines(StartOfMacro + 1, i, MaxEndOfLine);
+ MaxEndOfLine = AlignLeft ? 0 : MaxColumn;
+ StartOfMacro = i;
}
}
alignEscapedNewlines(StartOfMacro + 1, Changes.size(), MaxEndOfLine);
@@ -1466,10 +1506,10 @@ WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
: Cell);
// Go to the next non-comment and ensure there is a break in front
const auto *NextNonComment = C.Tok->getNextNonComment();
- while (NextNonComment->is(tok::comma))
+ while (NextNonComment && NextNonComment->is(tok::comma))
NextNonComment = NextNonComment->getNextNonComment();
auto j = i;
- while (Changes[j].Tok != NextNonComment && j < End)
+ while (j < End && Changes[j].Tok != NextNonComment)
++j;
if (j < End && Changes[j].NewlinesBefore == 0 &&
Changes[j].Tok->isNot(tok::r_brace)) {
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
index 8ac73305871a..7b91d8bf4db7 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
@@ -17,11 +17,6 @@
#include "TokenAnnotator.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Format/Format.h"
-#include "llvm/ADT/SmallVector.h"
-#include <algorithm>
-#include <string>
-#include <tuple>
namespace clang {
namespace format {
@@ -226,6 +221,11 @@ private:
/// Align consecutive bitfields over all \c Changes.
void alignConsecutiveBitFields();
+ /// Align consecutive colon. For bitfields, TableGen DAGArgs and defintions.
+ void
+ alignConsecutiveColons(const FormatStyle::AlignConsecutiveStyle &AlignStyle,
+ TokenType Type);
+
/// Align consecutive declarations over all \c Changes.
void alignConsecutiveDeclarations();
@@ -233,7 +233,16 @@ private:
void alignChainedConditionals();
/// Align consecutive short case statements over all \c Changes.
- void alignConsecutiveShortCaseStatements();
+ void alignConsecutiveShortCaseStatements(bool IsExpr);
+
+ /// Align consecutive TableGen DAGArg colon over all \c Changes.
+ void alignConsecutiveTableGenBreakingDAGArgColons();
+
+ /// Align consecutive TableGen cond operator colon over all \c Changes.
+ void alignConsecutiveTableGenCondOperatorColons();
+
+ /// Align consecutive TableGen definitions over all \c Changes.
+ void alignConsecutiveTableGenDefinitions();
/// Align trailing comments over all \c Changes.
void alignTrailingComments();
diff --git a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
index f09a01b5dd4a..67d4c07d1ce3 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
@@ -56,6 +56,7 @@
#include "clang/Sema/CodeCompleteConsumer.h"
#include "clang/Sema/CodeCompleteOptions.h"
#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaCodeCompletion.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
@@ -375,8 +376,8 @@ void ASTUnit::CacheCodeCompletionResults() {
SmallVector<Result, 8> Results;
CachedCompletionAllocator = std::make_shared<GlobalCodeCompletionAllocator>();
CodeCompletionTUInfo CCTUInfo(CachedCompletionAllocator);
- TheSema->GatherGlobalCodeCompletions(*CachedCompletionAllocator,
- CCTUInfo, Results);
+ TheSema->CodeCompletion().GatherGlobalCodeCompletions(
+ *CachedCompletionAllocator, CCTUInfo, Results);
// Translate global code completions into cached completions.
llvm::DenseMap<CanQualType, unsigned> CompletionTypes;
@@ -540,7 +541,17 @@ public:
if (InitializedLanguage)
return false;
+ // FIXME: We did similar things in ReadHeaderSearchOptions too. But such
+ // style is not scaling. Probably we need to invite some mechanism to
+ // handle such patterns generally.
+ auto PICLevel = LangOpt.PICLevel;
+ auto PIE = LangOpt.PIE;
+
LangOpt = LangOpts;
+
+ LangOpt.PICLevel = PICLevel;
+ LangOpt.PIE = PIE;
+
InitializedLanguage = true;
updated();
@@ -790,7 +801,8 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
const std::string &Filename, const PCHContainerReader &PCHContainerRdr,
WhatToLoad ToLoad, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts,
- std::shared_ptr<HeaderSearchOptions> HSOpts, bool OnlyLocalDecls,
+ std::shared_ptr<HeaderSearchOptions> HSOpts,
+ std::shared_ptr<LangOptions> LangOpts, bool OnlyLocalDecls,
CaptureDiagsKind CaptureDiagnostics, bool AllowASTWithCompilerErrors,
bool UserFilesAreVolatile, IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(true));
@@ -804,7 +816,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
- AST->LangOpts = std::make_shared<LangOptions>();
+ AST->LangOpts = LangOpts ? LangOpts : std::make_shared<LangOptions>();
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->Diagnostics = Diags;
@@ -1056,7 +1068,7 @@ public:
std::vector<Decl *> takeTopLevelDecls() { return std::move(TopLevelDecls); }
- std::vector<serialization::DeclID> takeTopLevelDeclIDs() {
+ std::vector<LocalDeclID> takeTopLevelDeclIDs() {
return std::move(TopLevelDeclIDs);
}
@@ -1090,7 +1102,7 @@ public:
private:
unsigned Hash = 0;
std::vector<Decl *> TopLevelDecls;
- std::vector<serialization::DeclID> TopLevelDeclIDs;
+ std::vector<LocalDeclID> TopLevelDeclIDs;
llvm::SmallVector<ASTUnit::StandaloneDiagnostic, 4> PreambleDiags;
};
@@ -1456,11 +1468,12 @@ void ASTUnit::RealizeTopLevelDeclsFromPreamble() {
std::vector<Decl *> Resolved;
Resolved.reserve(TopLevelDeclsInPreamble.size());
- ExternalASTSource &Source = *getASTContext().getExternalSource();
+ // The module file of the preamble.
+ serialization::ModuleFile &MF = Reader->getModuleManager().getPrimaryModule();
for (const auto TopLevelDecl : TopLevelDeclsInPreamble) {
// Resolve the declaration ID to an actual declaration, possibly
// deserializing the declaration in the process.
- if (Decl *D = Source.GetExternalDecl(TopLevelDecl))
+ if (Decl *D = Reader->GetLocalDecl(MF, TopLevelDecl))
Resolved.push_back(D);
}
TopLevelDeclsInPreamble.clear();
@@ -2362,8 +2375,6 @@ bool ASTUnit::serialize(raw_ostream &OS) {
return serializeUnit(Writer, Buffer, getSema(), OS);
}
-using SLocRemap = ContinuousRangeMap<unsigned, int, 2>;
-
void ASTUnit::TranslateStoredDiagnostics(
FileManager &FileMgr,
SourceManager &SrcMgr,
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
index a25aa88bd85e..6242b5a7d9fe 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
@@ -411,8 +411,7 @@ static void InitializeFileRemapping(DiagnosticsEngine &Diags,
SourceMgr.overrideFileContents(FromFile, RB.second->getMemBufferRef());
else
SourceMgr.overrideFileContents(
- FromFile, std::unique_ptr<llvm::MemoryBuffer>(
- const_cast<llvm::MemoryBuffer *>(RB.second)));
+ FromFile, std::unique_ptr<llvm::MemoryBuffer>(RB.second));
}
// Remap files in the source manager (with other files).
@@ -1047,6 +1046,11 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
if (getFrontendOpts().ShowStats || !getFrontendOpts().StatsFile.empty())
llvm::EnableStatistics(false);
+ // Sort vectors containing toc data and no toc data variables to facilitate
+ // binary search later.
+ llvm::sort(getCodeGenOpts().TocDataVarsUserSpecified);
+ llvm::sort(getCodeGenOpts().NoTocDataVars);
+
for (const FrontendInputFile &FIF : getFrontendOpts().Inputs) {
// Reset the ID tables if we are reusing the SourceManager and parsing
// regular files.
@@ -1061,30 +1065,7 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
}
}
- if (getDiagnosticOpts().ShowCarets) {
- // We can have multiple diagnostics sharing one diagnostic client.
- // Get the total number of warnings/errors from the client.
- unsigned NumWarnings = getDiagnostics().getClient()->getNumWarnings();
- unsigned NumErrors = getDiagnostics().getClient()->getNumErrors();
-
- if (NumWarnings)
- OS << NumWarnings << " warning" << (NumWarnings == 1 ? "" : "s");
- if (NumWarnings && NumErrors)
- OS << " and ";
- if (NumErrors)
- OS << NumErrors << " error" << (NumErrors == 1 ? "" : "s");
- if (NumWarnings || NumErrors) {
- OS << " generated";
- if (getLangOpts().CUDA) {
- if (!getLangOpts().CUDAIsDevice) {
- OS << " when compiling for host";
- } else {
- OS << " when compiling for " << getTargetOpts().CPU;
- }
- }
- OS << ".\n";
- }
- }
+ printDiagnosticStats();
if (getFrontendOpts().ShowStats) {
if (hasFileManager()) {
@@ -1112,6 +1093,36 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
return !getDiagnostics().getClient()->getNumErrors();
}
+void CompilerInstance::printDiagnosticStats() {
+ if (!getDiagnosticOpts().ShowCarets)
+ return;
+
+ raw_ostream &OS = getVerboseOutputStream();
+
+ // We can have multiple diagnostics sharing one diagnostic client.
+ // Get the total number of warnings/errors from the client.
+ unsigned NumWarnings = getDiagnostics().getClient()->getNumWarnings();
+ unsigned NumErrors = getDiagnostics().getClient()->getNumErrors();
+
+ if (NumWarnings)
+ OS << NumWarnings << " warning" << (NumWarnings == 1 ? "" : "s");
+ if (NumWarnings && NumErrors)
+ OS << " and ";
+ if (NumErrors)
+ OS << NumErrors << " error" << (NumErrors == 1 ? "" : "s");
+ if (NumWarnings || NumErrors) {
+ OS << " generated";
+ if (getLangOpts().CUDA) {
+ if (!getLangOpts().CUDAIsDevice) {
+ OS << " when compiling for host";
+ } else {
+ OS << " when compiling for " << getTargetOpts().CPU;
+ }
+ }
+ OS << ".\n";
+ }
+}
+
void CompilerInstance::LoadRequestedPlugins() {
// Load any requested plugins.
for (const std::string &Path : getFrontendOpts().Plugins) {
@@ -1194,16 +1205,6 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
// Note the name of the module we're building.
Invocation->getLangOpts().CurrentModule = std::string(ModuleName);
- // Make sure that the failed-module structure has been allocated in
- // the importing instance, and propagate the pointer to the newly-created
- // instance.
- PreprocessorOptions &ImportingPPOpts
- = ImportingInstance.getInvocation().getPreprocessorOpts();
- if (!ImportingPPOpts.FailedModules)
- ImportingPPOpts.FailedModules =
- std::make_shared<PreprocessorOptions::FailedModulesSet>();
- PPOpts.FailedModules = ImportingPPOpts.FailedModules;
-
// If there is a module map file, build the module using the module map.
// Set up the inputs/outputs so that we build the module from its umbrella
// header.
@@ -1257,6 +1258,13 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
SourceMgr.pushModuleBuildStack(ModuleName,
FullSourceLoc(ImportLoc, ImportingInstance.getSourceManager()));
+ // Make sure that the failed-module structure has been allocated in
+ // the importing instance, and propagate the pointer to the newly-created
+ // instance.
+ if (!ImportingInstance.hasFailedModulesSet())
+ ImportingInstance.createFailedModulesSet();
+ Instance.setFailedModulesSet(ImportingInstance.getFailedModulesSetPtr());
+
// If we're collecting module dependencies, we need to share a collector
// between all of the module CompilerInstances. Other than that, we don't
// want to produce any dependency output from the module build.
@@ -1284,6 +1292,10 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
diag::remark_module_build_done)
<< ModuleName;
+ // Propagate the statistics to the parent FileManager.
+ if (!FrontendOpts.ModulesShareFileManager)
+ ImportingInstance.getFileManager().AddStats(Instance.getFileManager());
+
if (Crashed) {
// Clear the ASTConsumer if it hasn't been already, in case it owns streams
// that must be closed before clearing output files.
@@ -1325,9 +1337,24 @@ static bool compileModule(CompilerInstance &ImportingInstance,
// Get or create the module map that we'll use to build this module.
ModuleMap &ModMap
= ImportingInstance.getPreprocessor().getHeaderSearchInfo().getModuleMap();
+ SourceManager &SourceMgr = ImportingInstance.getSourceManager();
bool Result;
- if (OptionalFileEntryRef ModuleMapFile =
- ModMap.getContainingModuleMapFile(Module)) {
+ if (FileID ModuleMapFID = ModMap.getContainingModuleMapFileID(Module);
+ ModuleMapFID.isValid()) {
+ // We want to use the top-level module map. If we don't, the compiling
+ // instance may think the containing module map is a top-level one, while
+ // the importing instance knows it's included from a parent module map via
+ // the extern directive. This mismatch could bite us later.
+ SourceLocation Loc = SourceMgr.getIncludeLoc(ModuleMapFID);
+ while (Loc.isValid() && isModuleMap(SourceMgr.getFileCharacteristic(Loc))) {
+ ModuleMapFID = SourceMgr.getFileID(Loc);
+ Loc = SourceMgr.getIncludeLoc(ModuleMapFID);
+ }
+
+ OptionalFileEntryRef ModuleMapFile =
+ SourceMgr.getFileEntryRefForID(ModuleMapFID);
+ assert(ModuleMapFile && "Top-level module map with no FileID");
+
// Canonicalize compilation to start with the public module map. This is
// vital for submodules declarations in the private module maps to be
// correctly parsed when depending on a top level module in the public one.
@@ -1584,6 +1611,14 @@ static void checkConfigMacro(Preprocessor &PP, StringRef ConfigMacro,
}
}
+static void checkConfigMacros(Preprocessor &PP, Module *M,
+ SourceLocation ImportLoc) {
+ clang::Module *TopModule = M->getTopLevelModule();
+ for (const StringRef ConMacro : TopModule->ConfigMacros) {
+ checkConfigMacro(PP, ConMacro, M, ImportLoc);
+ }
+}
+
/// Write a new timestamp file with the given path.
static void writeTimestampFile(StringRef TimestampFile) {
std::error_code EC;
@@ -1822,6 +1857,13 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
Module *M =
HS.lookupModule(ModuleName, ImportLoc, true, !IsInclusionDirective);
+ // Check for any configuration macros that have changed. This is done
+ // immediately before potentially building a module in case this module
+ // depends on having one of its configuration macros defined to successfully
+ // build. If this is not done the user will never see the warning.
+ if (M)
+ checkConfigMacros(getPreprocessor(), M, ImportLoc);
+
// Select the source and filename for loading the named module.
std::string ModuleFilename;
ModuleSource Source =
@@ -1950,10 +1992,8 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
return nullptr;
}
- // Check whether we have already attempted to build this module (but
- // failed).
- if (getPreprocessorOpts().FailedModules &&
- getPreprocessorOpts().FailedModules->hasAlreadyFailed(ModuleName)) {
+ // Check whether we have already attempted to build this module (but failed).
+ if (FailedModules && FailedModules->hasAlreadyFailed(ModuleName)) {
getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_built)
<< ModuleName << SourceRange(ImportLoc, ModuleNameLoc);
return nullptr;
@@ -1964,8 +2004,8 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
ModuleFilename)) {
assert(getDiagnostics().hasErrorOccurred() &&
"undiagnosed error in compileModuleAndReadAST");
- if (getPreprocessorOpts().FailedModules)
- getPreprocessorOpts().FailedModules->addFailed(ModuleName);
+ if (FailedModules)
+ FailedModules->addFailed(ModuleName);
return nullptr;
}
@@ -1999,12 +2039,23 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
if (auto MaybeModule = MM.getCachedModuleLoad(*Path[0].first)) {
// Use the cached result, which may be nullptr.
Module = *MaybeModule;
+ // Config macros are already checked before building a module, but they need
+ // to be checked at each import location in case any of the config macros
+ // have a new value at the current `ImportLoc`.
+ if (Module)
+ checkConfigMacros(getPreprocessor(), Module, ImportLoc);
} else if (ModuleName == getLangOpts().CurrentModule) {
// This is the module we're building.
Module = PP->getHeaderSearchInfo().lookupModule(
ModuleName, ImportLoc, /*AllowSearch*/ true,
/*AllowExtraModuleMapSearch*/ !IsInclusionDirective);
+ // Config macros do not need to be checked here for two reasons.
+ // * This will always be textual inclusion, and thus the config macros
+ // actually do impact the content of the header.
+ // * `Preprocessor::HandleHeaderIncludeOrImport` will never call this
+ // function as the `#include` or `#import` is textual.
+
MM.cacheModuleLoad(*Path[0].first, Module);
} else {
ModuleLoadResult Result = findOrCompileModuleAndReadAST(
@@ -2139,18 +2190,11 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
TheASTReader->makeModuleVisible(Module, Visibility, ImportLoc);
}
- // Check for any configuration macros that have changed.
- clang::Module *TopModule = Module->getTopLevelModule();
- for (unsigned I = 0, N = TopModule->ConfigMacros.size(); I != N; ++I) {
- checkConfigMacro(getPreprocessor(), TopModule->ConfigMacros[I],
- Module, ImportLoc);
- }
-
// Resolve any remaining module using export_as for this one.
getPreprocessor()
.getHeaderSearchInfo()
.getModuleMap()
- .resolveLinkAsDependencies(TopModule);
+ .resolveLinkAsDependencies(Module->getTopLevelModule());
LastModuleImportLoc = ImportLoc;
LastModuleImportResult = ModuleLoadResult(Module);
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
index feb4de2084b8..028fdb2cc6b9 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
@@ -191,6 +191,17 @@ CompilerInvocationBase::shallow_copy_assign(const CompilerInvocationBase &X) {
return *this;
}
+CompilerInvocation::CompilerInvocation(const CowCompilerInvocation &X)
+ : CompilerInvocationBase(EmptyConstructor{}) {
+ CompilerInvocationBase::deep_copy_assign(X);
+}
+
+CompilerInvocation &
+CompilerInvocation::operator=(const CowCompilerInvocation &X) {
+ CompilerInvocationBase::deep_copy_assign(X);
+ return *this;
+}
+
namespace {
template <typename T>
T &ensureOwned(std::shared_ptr<T> &Storage) {
@@ -522,10 +533,10 @@ static T extractMaskValue(T KeyPath) {
#define PARSE_OPTION_WITH_MARSHALLING( \
ARGS, DIAGS, PREFIX_TYPE, SPELLING, ID, KIND, GROUP, ALIAS, ALIASARGS, \
- FLAGS, VISIBILITY, PARAM, HELPTEXT, METAVAR, VALUES, SHOULD_PARSE, \
- ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, \
- NORMALIZER, DENORMALIZER, MERGER, EXTRACTOR, TABLE_INDEX) \
- if ((VISIBILITY)&options::CC1Option) { \
+ FLAGS, VISIBILITY, PARAM, HELPTEXT, HELPTEXTSFORVARIANTS, METAVAR, VALUES, \
+ SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, IMPLIED_CHECK, \
+ IMPLIED_VALUE, NORMALIZER, DENORMALIZER, MERGER, EXTRACTOR, TABLE_INDEX) \
+ if ((VISIBILITY) & options::CC1Option) { \
KEYPATH = MERGER(KEYPATH, DEFAULT_VALUE); \
if (IMPLIED_CHECK) \
KEYPATH = MERGER(KEYPATH, IMPLIED_VALUE); \
@@ -539,10 +550,10 @@ static T extractMaskValue(T KeyPath) {
// with lifetime extension of the reference.
#define GENERATE_OPTION_WITH_MARSHALLING( \
CONSUMER, PREFIX_TYPE, SPELLING, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, \
- VISIBILITY, PARAM, HELPTEXT, METAVAR, VALUES, SHOULD_PARSE, ALWAYS_EMIT, \
- KEYPATH, DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, \
- DENORMALIZER, MERGER, EXTRACTOR, TABLE_INDEX) \
- if ((VISIBILITY)&options::CC1Option) { \
+ VISIBILITY, PARAM, HELPTEXT, HELPTEXTSFORVARIANTS, METAVAR, VALUES, \
+ SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, IMPLIED_CHECK, \
+ IMPLIED_VALUE, NORMALIZER, DENORMALIZER, MERGER, EXTRACTOR, TABLE_INDEX) \
+ if ((VISIBILITY) & options::CC1Option) { \
[&](const auto &Extracted) { \
if (ALWAYS_EMIT || \
(Extracted != \
@@ -599,6 +610,19 @@ static bool FixupInvocation(CompilerInvocation &Invocation,
LangOpts.NewAlignOverride = 0;
}
+ // The -f[no-]raw-string-literals option is only valid in C and in C++
+ // standards before C++11.
+ if (LangOpts.CPlusPlus11) {
+ if (Args.hasArg(OPT_fraw_string_literals, OPT_fno_raw_string_literals)) {
+ Args.claimAllArgs(OPT_fraw_string_literals, OPT_fno_raw_string_literals);
+ Diags.Report(diag::warn_drv_fraw_string_literals_in_cxx11)
+ << bool(LangOpts.RawStringLiterals);
+ }
+
+ // Do not allow disabling raw string literals in C++11 or later.
+ LangOpts.RawStringLiterals = true;
+ }
+
// Prevent the user from specifying both -fsycl-is-device and -fsycl-is-host.
if (LangOpts.SYCLIsDevice && LangOpts.SYCLIsHost)
Diags.Report(diag::err_drv_argument_not_allowed_with) << "-fsycl-is-device"
@@ -1447,6 +1471,55 @@ static void setPGOUseInstrumentor(CodeGenOptions &Opts,
Opts.setProfileUse(CodeGenOptions::ProfileClangInstr);
}
+void CompilerInvocation::setDefaultPointerAuthOptions(
+ PointerAuthOptions &Opts, const LangOptions &LangOpts,
+ const llvm::Triple &Triple) {
+ assert(Triple.getArch() == llvm::Triple::aarch64);
+ if (LangOpts.PointerAuthCalls) {
+ using Key = PointerAuthSchema::ARM8_3Key;
+ using Discrimination = PointerAuthSchema::Discrimination;
+ // If you change anything here, be sure to update <ptrauth.h>.
+ Opts.FunctionPointers = PointerAuthSchema(
+ Key::ASIA, false,
+ LangOpts.PointerAuthFunctionTypeDiscrimination ? Discrimination::Type
+ : Discrimination::None);
+
+ Opts.CXXVTablePointers = PointerAuthSchema(
+ Key::ASDA, LangOpts.PointerAuthVTPtrAddressDiscrimination,
+ LangOpts.PointerAuthVTPtrTypeDiscrimination ? Discrimination::Type
+ : Discrimination::None);
+
+ if (LangOpts.PointerAuthTypeInfoVTPtrDiscrimination)
+ Opts.CXXTypeInfoVTablePointer =
+ PointerAuthSchema(Key::ASDA, true, Discrimination::Constant,
+ StdTypeInfoVTablePointerConstantDiscrimination);
+ else
+ Opts.CXXTypeInfoVTablePointer =
+ PointerAuthSchema(Key::ASDA, false, Discrimination::None);
+
+ Opts.CXXVTTVTablePointers =
+ PointerAuthSchema(Key::ASDA, false, Discrimination::None);
+ Opts.CXXVirtualFunctionPointers = Opts.CXXVirtualVariadicFunctionPointers =
+ PointerAuthSchema(Key::ASIA, true, Discrimination::Decl);
+ Opts.CXXMemberFunctionPointers =
+ PointerAuthSchema(Key::ASIA, false, Discrimination::Type);
+ }
+ Opts.ReturnAddresses = LangOpts.PointerAuthReturns;
+ Opts.AuthTraps = LangOpts.PointerAuthAuthTraps;
+ Opts.IndirectGotos = LangOpts.PointerAuthIndirectGotos;
+}
+
+static void parsePointerAuthOptions(PointerAuthOptions &Opts,
+ const LangOptions &LangOpts,
+ const llvm::Triple &Triple,
+ DiagnosticsEngine &Diags) {
+ if (!LangOpts.PointerAuthCalls && !LangOpts.PointerAuthReturns &&
+ !LangOpts.PointerAuthAuthTraps && !LangOpts.PointerAuthIndirectGotos)
+ return;
+
+ CompilerInvocation::setDefaultPointerAuthOptions(Opts, LangOpts, Triple);
+}
+
void CompilerInvocationBase::GenerateCodeGenArgs(const CodeGenOptions &Opts,
ArgumentConsumer Consumer,
const llvm::Triple &T,
@@ -1545,6 +1618,9 @@ void CompilerInvocationBase::GenerateCodeGenArgs(const CodeGenOptions &Opts,
llvm::DICompileUnit::DebugNameTableKind::Default))
GenerateArg(Consumer, OPT_gpubnames);
+ if (Opts.DebugTemplateAlias)
+ GenerateArg(Consumer, OPT_gtemplate_alias);
+
auto TNK = Opts.getDebugSimpleTemplateNames();
if (TNK != llvm::codegenoptions::DebugTemplateNamesKind::Full) {
if (TNK == llvm::codegenoptions::DebugTemplateNamesKind::Simple)
@@ -1816,6 +1892,8 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.BinutilsVersion =
std::string(Args.getLastArgValue(OPT_fbinutils_version_EQ));
+ Opts.DebugTemplateAlias = Args.hasArg(OPT_gtemplate_alias);
+
Opts.DebugNameTable = static_cast<unsigned>(
Args.hasArg(OPT_ggnu_pubnames)
? llvm::DICompileUnit::DebugNameTableKind::GNU
@@ -1953,7 +2031,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
else if (Val == llvm::FunctionReturnThunksKind::Extern &&
- Args.getLastArgValue(OPT_mcmodel_EQ).equals("large"))
+ Args.getLastArgValue(OPT_mcmodel_EQ) == "large")
Diags.Report(diag::err_drv_argument_not_allowed_with)
<< A->getAsString(Args)
<< Args.getLastArg(OPT_mcmodel_EQ)->getAsString(Args);
@@ -1975,14 +2053,6 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.LinkBitcodeFiles.push_back(F);
}
- if (Arg *A = Args.getLastArg(OPT_ftlsmodel_EQ)) {
- if (T.isOSAIX()) {
- StringRef Name = A->getValue();
- if (Name == "local-dynamic")
- Diags.Report(diag::err_aix_unsupported_tls_model) << Name;
- }
- }
-
if (Arg *A = Args.getLastArg(OPT_fdenormal_fp_math_EQ)) {
StringRef Val = A->getValue();
Opts.FPDenormalMode = llvm::parseDenormalFPAttribute(Val);
@@ -2145,6 +2215,9 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.EmitVersionIdentMetadata = Args.hasFlag(OPT_Qy, OPT_Qn, true);
+ if (!LangOpts->CUDAIsDevice)
+ parsePointerAuthOptions(Opts.PointerAuth, *LangOpts, T, Diags);
+
if (Args.hasArg(options::OPT_ffinite_loops))
Opts.FiniteLoops = CodeGenOptions::FiniteLoopsKind::Always;
else if (Args.hasArg(options::OPT_fno_finite_loops))
@@ -2399,6 +2472,9 @@ void CompilerInvocationBase::GenerateDiagnosticArgs(
// This option is automatically generated from UndefPrefixes.
if (Warning == "undef-prefix")
continue;
+ // This option is automatically generated from CheckConstexprFunctionBodies.
+ if (Warning == "invalid-constexpr" || Warning == "no-invalid-constexpr")
+ continue;
Consumer(StringRef("-W") + Warning);
}
@@ -2541,6 +2617,7 @@ static const auto &getFrontendActionTable() {
{frontend::DumpTokens, OPT_dump_tokens},
{frontend::EmitAssembly, OPT_S},
{frontend::EmitBC, OPT_emit_llvm_bc},
+ {frontend::EmitCIR, OPT_emit_cir},
{frontend::EmitHTML, OPT_emit_html},
{frontend::EmitLLVM, OPT_emit_llvm},
{frontend::EmitLLVMOnly, OPT_emit_llvm_only},
@@ -2553,6 +2630,8 @@ static const auto &getFrontendActionTable() {
{frontend::GenerateModule, OPT_emit_module},
{frontend::GenerateModuleInterface, OPT_emit_module_interface},
+ {frontend::GenerateReducedModuleInterface,
+ OPT_emit_reduced_module_interface},
{frontend::GenerateHeaderUnit, OPT_emit_header_unit},
{frontend::GeneratePCH, OPT_emit_pch},
{frontend::GenerateInterfaceStubs, OPT_emit_interface_stubs},
@@ -2752,6 +2831,9 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts,
case Language::HLSL:
Lang = "hlsl";
break;
+ case Language::CIR:
+ Lang = "cir";
+ break;
}
GenerateArg(Consumer, OPT_x,
@@ -2827,6 +2909,30 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
}
Opts.ProgramAction = *ProgramAction;
+
+ // Catch common mistakes when multiple actions are specified for cc1 (e.g.
+ // -S -emit-llvm means -emit-llvm while -emit-llvm -S means -S). However, to
+ // support driver `-c -Xclang ACTION` (-cc1 -emit-llvm file -main-file-name
+ // X ACTION), we suppress the error when the two actions are separated by
+ // -main-file-name.
+ //
+ // As an exception, accept composable -ast-dump*.
+ if (!A->getSpelling().starts_with("-ast-dump")) {
+ const Arg *SavedAction = nullptr;
+ for (const Arg *AA :
+ Args.filtered(OPT_Action_Group, OPT_main_file_name)) {
+ if (AA->getOption().matches(OPT_main_file_name)) {
+ SavedAction = nullptr;
+ } else if (!SavedAction) {
+ SavedAction = AA;
+ } else {
+ if (!A->getOption().matches(OPT_ast_dump_EQ))
+ Diags.Report(diag::err_fe_invalid_multiple_actions)
+ << SavedAction->getSpelling() << A->getSpelling();
+ break;
+ }
+ }
+ }
}
if (const Arg* A = Args.getLastArg(OPT_plugin)) {
@@ -2878,6 +2984,8 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
if (Opts.ProgramAction != frontend::GenerateModule && Opts.IsSystemModule)
Diags.Report(diag::err_drv_argument_only_allowed_with) << "-fsystem-module"
<< "-emit-module";
+ if (Args.hasArg(OPT_fclangir) || Args.hasArg(OPT_emit_cir))
+ Opts.UseClangIRPipeline = true;
if (Args.hasArg(OPT_aux_target_cpu))
Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu));
@@ -2953,6 +3061,7 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
.Cases("ast", "pcm", "precompiled-header",
InputKind(Language::Unknown, InputKind::Precompiled))
.Case("ir", Language::LLVM_IR)
+ .Case("cir", Language::CIR)
.Default(Language::Unknown);
if (DashX.isUnknown())
@@ -3186,6 +3295,22 @@ static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
bool IsIndexHeaderMap = false;
bool IsSysrootSpecified =
Args.hasArg(OPT__sysroot_EQ) || Args.hasArg(OPT_isysroot);
+
+ // Expand a leading `=` to the sysroot if one was passed (and it's not a
+ // framework flag).
+ auto PrefixHeaderPath = [IsSysrootSpecified,
+ &Opts](const llvm::opt::Arg *A,
+ bool IsFramework = false) -> std::string {
+ assert(A->getNumValues() && "Unexpected empty search path flag!");
+ if (IsSysrootSpecified && !IsFramework && A->getValue()[0] == '=') {
+ SmallString<32> Buffer;
+ llvm::sys::path::append(Buffer, Opts.Sysroot,
+ llvm::StringRef(A->getValue()).substr(1));
+ return std::string(Buffer);
+ }
+ return A->getValue();
+ };
+
for (const auto *A : Args.filtered(OPT_I, OPT_F, OPT_index_header_map)) {
if (A->getOption().matches(OPT_index_header_map)) {
// -index-header-map applies to the next -I or -F.
@@ -3197,16 +3322,7 @@ static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
IsIndexHeaderMap ? frontend::IndexHeaderMap : frontend::Angled;
bool IsFramework = A->getOption().matches(OPT_F);
- std::string Path = A->getValue();
-
- if (IsSysrootSpecified && !IsFramework && A->getValue()[0] == '=') {
- SmallString<32> Buffer;
- llvm::sys::path::append(Buffer, Opts.Sysroot,
- llvm::StringRef(A->getValue()).substr(1));
- Path = std::string(Buffer);
- }
-
- Opts.AddPath(Path, Group, IsFramework,
+ Opts.AddPath(PrefixHeaderPath(A, IsFramework), Group, IsFramework,
/*IgnoreSysroot*/ true);
IsIndexHeaderMap = false;
}
@@ -3224,12 +3340,18 @@ static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
}
for (const auto *A : Args.filtered(OPT_idirafter))
- Opts.AddPath(A->getValue(), frontend::After, false, true);
+ Opts.AddPath(PrefixHeaderPath(A), frontend::After, false, true);
for (const auto *A : Args.filtered(OPT_iquote))
- Opts.AddPath(A->getValue(), frontend::Quoted, false, true);
- for (const auto *A : Args.filtered(OPT_isystem, OPT_iwithsysroot))
- Opts.AddPath(A->getValue(), frontend::System, false,
- !A->getOption().matches(OPT_iwithsysroot));
+ Opts.AddPath(PrefixHeaderPath(A), frontend::Quoted, false, true);
+
+ for (const auto *A : Args.filtered(OPT_isystem, OPT_iwithsysroot)) {
+ if (A->getOption().matches(OPT_iwithsysroot)) {
+ Opts.AddPath(A->getValue(), frontend::System, false,
+ /*IgnoreSysRoot=*/false);
+ continue;
+ }
+ Opts.AddPath(PrefixHeaderPath(A), frontend::System, false, true);
+ }
for (const auto *A : Args.filtered(OPT_iframework))
Opts.AddPath(A->getValue(), frontend::System, true, true);
for (const auto *A : Args.filtered(OPT_iframeworkwithsysroot))
@@ -3288,12 +3410,57 @@ static void ParseAPINotesArgs(APINotesOptions &Opts, ArgList &Args,
Opts.ModuleSearchPaths.push_back(A->getValue());
}
+static void GeneratePointerAuthArgs(const LangOptions &Opts,
+ ArgumentConsumer Consumer) {
+ if (Opts.PointerAuthIntrinsics)
+ GenerateArg(Consumer, OPT_fptrauth_intrinsics);
+ if (Opts.PointerAuthCalls)
+ GenerateArg(Consumer, OPT_fptrauth_calls);
+ if (Opts.PointerAuthReturns)
+ GenerateArg(Consumer, OPT_fptrauth_returns);
+ if (Opts.PointerAuthIndirectGotos)
+ GenerateArg(Consumer, OPT_fptrauth_indirect_gotos);
+ if (Opts.PointerAuthAuthTraps)
+ GenerateArg(Consumer, OPT_fptrauth_auth_traps);
+ if (Opts.PointerAuthVTPtrAddressDiscrimination)
+ GenerateArg(Consumer, OPT_fptrauth_vtable_pointer_address_discrimination);
+ if (Opts.PointerAuthVTPtrTypeDiscrimination)
+ GenerateArg(Consumer, OPT_fptrauth_vtable_pointer_type_discrimination);
+ if (Opts.PointerAuthTypeInfoVTPtrDiscrimination)
+ GenerateArg(Consumer, OPT_fptrauth_type_info_vtable_pointer_discrimination);
+
+ if (Opts.PointerAuthInitFini)
+ GenerateArg(Consumer, OPT_fptrauth_init_fini);
+ if (Opts.PointerAuthFunctionTypeDiscrimination)
+ GenerateArg(Consumer, OPT_fptrauth_function_pointer_type_discrimination);
+}
+
+static void ParsePointerAuthArgs(LangOptions &Opts, ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ Opts.PointerAuthIntrinsics = Args.hasArg(OPT_fptrauth_intrinsics);
+ Opts.PointerAuthCalls = Args.hasArg(OPT_fptrauth_calls);
+ Opts.PointerAuthReturns = Args.hasArg(OPT_fptrauth_returns);
+ Opts.PointerAuthIndirectGotos = Args.hasArg(OPT_fptrauth_indirect_gotos);
+ Opts.PointerAuthAuthTraps = Args.hasArg(OPT_fptrauth_auth_traps);
+ Opts.PointerAuthVTPtrAddressDiscrimination =
+ Args.hasArg(OPT_fptrauth_vtable_pointer_address_discrimination);
+ Opts.PointerAuthVTPtrTypeDiscrimination =
+ Args.hasArg(OPT_fptrauth_vtable_pointer_type_discrimination);
+ Opts.PointerAuthTypeInfoVTPtrDiscrimination =
+ Args.hasArg(OPT_fptrauth_type_info_vtable_pointer_discrimination);
+
+ Opts.PointerAuthInitFini = Args.hasArg(OPT_fptrauth_init_fini);
+ Opts.PointerAuthFunctionTypeDiscrimination =
+ Args.hasArg(OPT_fptrauth_function_pointer_type_discrimination);
+}
+
/// Check if input file kind and language standard are compatible.
static bool IsInputCompatibleWithStandard(InputKind IK,
const LangStandard &S) {
switch (IK.getLanguage()) {
case Language::Unknown:
case Language::LLVM_IR:
+ case Language::CIR:
llvm_unreachable("should not parse language flags for this input");
case Language::C:
@@ -3359,6 +3526,8 @@ static StringRef GetInputKindName(InputKind IK) {
return "Asm";
case Language::LLVM_IR:
return "LLVM IR";
+ case Language::CIR:
+ return "Clang IR";
case Language::HLSL:
return "HLSL";
@@ -3374,7 +3543,8 @@ void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts,
const llvm::Triple &T,
InputKind IK) {
if (IK.getFormat() == InputKind::Precompiled ||
- IK.getLanguage() == Language::LLVM_IR) {
+ IK.getLanguage() == Language::LLVM_IR ||
+ IK.getLanguage() == Language::CIR) {
if (Opts.ObjCAutoRefCount)
GenerateArg(Consumer, OPT_fobjc_arc);
if (Opts.PICLevel != 0)
@@ -3479,7 +3649,8 @@ void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts,
GenerateArg(Consumer, OPT_fblocks);
if (Opts.ConvergentFunctions &&
- !(Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) || Opts.SYCLIsDevice))
+ !(Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) || Opts.SYCLIsDevice ||
+ Opts.HLSL))
GenerateArg(Consumer, OPT_fconvergent_functions);
if (Opts.NoBuiltin && !Opts.Freestanding)
@@ -3617,6 +3788,9 @@ void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts,
case LangOptions::ClangABI::Ver17:
GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "17.0");
break;
+ case LangOptions::ClangABI::Ver18:
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "18.0");
+ break;
case LangOptions::ClangABI::Latest:
break;
}
@@ -3660,7 +3834,8 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
unsigned NumErrorsBefore = Diags.getNumErrors();
if (IK.getFormat() == InputKind::Precompiled ||
- IK.getLanguage() == Language::LLVM_IR) {
+ IK.getLanguage() == Language::LLVM_IR ||
+ IK.getLanguage() == Language::CIR) {
// ObjCAAutoRefCount and Sanitize LangOpts are used to setup the
// PassManager in BackendUtil.cpp. They need to be initialized no matter
// what the input type is.
@@ -3876,7 +4051,7 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.ConvergentFunctions = Args.hasArg(OPT_fconvergent_functions) ||
Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) ||
- Opts.SYCLIsDevice;
+ Opts.SYCLIsDevice || Opts.HLSL;
Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
if (!Opts.NoBuiltin)
@@ -4009,6 +4184,7 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
if (TT.getArch() == llvm::Triple::UnknownArch ||
!(TT.getArch() == llvm::Triple::aarch64 || TT.isPPC() ||
+ TT.getArch() == llvm::Triple::systemz ||
TT.getArch() == llvm::Triple::nvptx ||
TT.getArch() == llvm::Triple::nvptx64 ||
TT.getArch() == llvm::Triple::amdgcn ||
@@ -4122,6 +4298,8 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.setClangABICompat(LangOptions::ClangABI::Ver15);
else if (Major <= 17)
Opts.setClangABICompat(LangOptions::ClangABI::Ver17);
+ else if (Major <= 18)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver18);
} else if (Ver != "latest") {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -4245,11 +4423,30 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_hlsl_bad_shader_unsupported)
<< ShaderModel << T.getOSName() << T.str();
}
+ // Validate that if fnative-half-type is given, that
+ // the language standard is at least hlsl2018, and that
+ // the target shader model is at least 6.2.
+ if (Args.getLastArg(OPT_fnative_half_type)) {
+ const LangStandard &Std =
+ LangStandard::getLangStandardForKind(Opts.LangStd);
+ if (!(Opts.LangStd >= LangStandard::lang_hlsl2018 &&
+ T.getOSVersion() >= VersionTuple(6, 2)))
+ Diags.Report(diag::err_drv_hlsl_16bit_types_unsupported)
+ << "-enable-16bit-types" << true << Std.getName()
+ << T.getOSVersion().getAsString();
+ }
} else if (T.isSPIRVLogical()) {
if (!T.isVulkanOS() || T.getVulkanVersion() == VersionTuple(0)) {
Diags.Report(diag::err_drv_hlsl_bad_shader_unsupported)
<< VulkanEnv << T.getOSName() << T.str();
}
+ if (Args.getLastArg(OPT_fnative_half_type)) {
+ const LangStandard &Std =
+ LangStandard::getLangStandardForKind(Opts.LangStd);
+ if (!(Opts.LangStd >= LangStandard::lang_hlsl2018))
+ Diags.Report(diag::err_drv_hlsl_16bit_types_unsupported)
+ << "-fnative-half-type" << false << Std.getName();
+ }
} else {
llvm_unreachable("expected DXIL or SPIR-V target");
}
@@ -4268,6 +4465,7 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::ASTView:
case frontend::EmitAssembly:
case frontend::EmitBC:
+ case frontend::EmitCIR:
case frontend::EmitHTML:
case frontend::EmitLLVM:
case frontend::EmitLLVMOnly:
@@ -4277,6 +4475,7 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::FixIt:
case frontend::GenerateModule:
case frontend::GenerateModuleInterface:
+ case frontend::GenerateReducedModuleInterface:
case frontend::GenerateHeaderUnit:
case frontend::GeneratePCH:
case frontend::GenerateInterfaceStubs:
@@ -4371,6 +4570,9 @@ static void GeneratePreprocessorArgs(const PreprocessorOptions &Opts,
if (Opts.DefineTargetOSMacros)
GenerateArg(Consumer, OPT_fdefine_target_os_macros);
+ for (const auto &EmbedEntry : Opts.EmbedEntries)
+ GenerateArg(Consumer, OPT_embed_dir_EQ, EmbedEntry);
+
// Don't handle LexEditorPlaceholders. It is implied by the action that is
// generated elsewhere.
}
@@ -4463,6 +4665,11 @@ static bool ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
}
}
+ for (const auto *A : Args.filtered(OPT_embed_dir_EQ)) {
+ StringRef Val = A->getValue();
+ Opts.EmbedEntries.push_back(std::string(Val));
+ }
+
// Always avoid lexing editor placeholders when we're just running the
// preprocessor as we never want to emit the
// "editor placeholder in source file" error in PP only mode.
@@ -4606,6 +4813,8 @@ bool CompilerInvocation::CreateFromArgsImpl(
Res.getFileSystemOpts().WorkingDir);
ParseAPINotesArgs(Res.getAPINotesOpts(), Args, Diags);
+ ParsePointerAuthArgs(LangOpts, Args, Diags);
+
ParseLangArgs(LangOpts, Args, DashX, T, Res.getPreprocessorOpts().Includes,
Diags);
if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
@@ -4763,6 +4972,7 @@ std::string CompilerInvocation::getModuleHash() const {
if (hsOpts.ModulesStrictContextHash) {
HBuilder.addRange(hsOpts.SystemHeaderPrefixes);
HBuilder.addRange(hsOpts.UserEntries);
+ HBuilder.addRange(hsOpts.VFSOverlayFiles);
const DiagnosticOptions &diagOpts = getDiagnosticOpts();
#define DIAGOPT(Name, Bits, Default) HBuilder.add(diagOpts.Name);
@@ -4835,6 +5045,7 @@ void CompilerInvocationBase::generateCC1CommandLine(
GenerateTargetArgs(getTargetOpts(), Consumer);
GenerateHeaderSearchArgs(getHeaderSearchOpts(), Consumer);
GenerateAPINotesArgs(getAPINotesOpts(), Consumer);
+ GeneratePointerAuthArgs(getLangOpts(), Consumer);
GenerateLangArgs(getLangOpts(), Consumer, T, getFrontendOpts().DashX);
GenerateCodeGenArgs(getCodeGenOpts(), Consumer, T,
getFrontendOpts().OutputFile, &getLangOpts());
diff --git a/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp b/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
index 19abcac2befb..528eae2c5283 100644
--- a/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
@@ -62,11 +62,25 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
/*IsMissing=*/false);
}
+ void EmbedDirective(SourceLocation, StringRef, bool,
+ OptionalFileEntryRef File,
+ const LexEmbedParametersResult &) override {
+ assert(File && "expected to only be called when the file is found");
+ StringRef FileName =
+ llvm::sys::path::remove_leading_dotslash(File->getName());
+ DepCollector.maybeAddDependency(FileName,
+ /*FromModule*/ false,
+ /*IsSystem*/ false,
+ /*IsModuleFile*/ false,
+ /*IsMissing*/ false);
+ }
+
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange,
OptionalFileEntryRef File, StringRef SearchPath,
- StringRef RelativePath, const Module *Imported,
+ StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported,
SrcMgr::CharacteristicKind FileType) override {
if (!File)
DepCollector.maybeAddDependency(FileName, /*FromModule*/ false,
@@ -76,6 +90,18 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
// Files that actually exist are handled by FileChanged.
}
+ void HasEmbed(SourceLocation, StringRef, bool,
+ OptionalFileEntryRef File) override {
+ if (!File)
+ return;
+ StringRef Filename =
+ llvm::sys::path::remove_leading_dotslash(File->getName());
+ DepCollector.maybeAddDependency(Filename,
+ /*FromModule=*/false, false,
+ /*IsModuleFile=*/false,
+ /*IsMissing=*/false);
+ }
+
void HasInclude(SourceLocation Loc, StringRef SpelledFilename, bool IsAngled,
OptionalFileEntryRef File,
SrcMgr::CharacteristicKind FileType) override {
diff --git a/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp b/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
index b471471f3528..c23ce66a40dd 100644
--- a/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
@@ -43,15 +43,20 @@ private:
public:
DependencyGraphCallback(const Preprocessor *_PP, StringRef OutputFile,
StringRef SysRoot)
- : PP(_PP), OutputFile(OutputFile.str()), SysRoot(SysRoot.str()) { }
+ : PP(_PP), OutputFile(OutputFile.str()), SysRoot(SysRoot.str()) {}
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange,
OptionalFileEntryRef File, StringRef SearchPath,
- StringRef RelativePath, const Module *Imported,
+ StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported,
SrcMgr::CharacteristicKind FileType) override;
+ void EmbedDirective(SourceLocation HashLoc, StringRef FileName, bool IsAngled,
+ OptionalFileEntryRef File,
+ const LexEmbedParametersResult &Params) override;
+
void EndOfMainFile() override {
OutputGraphFile();
}
@@ -68,8 +73,26 @@ void clang::AttachDependencyGraphGen(Preprocessor &PP, StringRef OutputFile,
void DependencyGraphCallback::InclusionDirective(
SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
bool IsAngled, CharSourceRange FilenameRange, OptionalFileEntryRef File,
- StringRef SearchPath, StringRef RelativePath, const Module *Imported,
- SrcMgr::CharacteristicKind FileType) {
+ StringRef SearchPath, StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported, SrcMgr::CharacteristicKind FileType) {
+ if (!File)
+ return;
+
+ SourceManager &SM = PP->getSourceManager();
+ OptionalFileEntryRef FromFile =
+ SM.getFileEntryRefForID(SM.getFileID(SM.getExpansionLoc(HashLoc)));
+ if (!FromFile)
+ return;
+
+ Dependencies[*FromFile].push_back(*File);
+
+ AllFiles.insert(*File);
+ AllFiles.insert(*FromFile);
+}
+
+void DependencyGraphCallback::EmbedDirective(SourceLocation HashLoc, StringRef,
+ bool, OptionalFileEntryRef File,
+ const LexEmbedParametersResult &) {
if (!File)
return;
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
index eff785b99a09..a9c45e525c69 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
@@ -71,7 +71,7 @@ public:
if (Previous)
Previous->ReaderInitialized(Reader);
}
- void IdentifierRead(serialization::IdentID ID,
+ void IdentifierRead(serialization::IdentifierID ID,
IdentifierInfo *II) override {
if (Previous)
Previous->IdentifierRead(ID, II);
@@ -80,7 +80,7 @@ public:
if (Previous)
Previous->TypeRead(Idx, T);
}
- void DeclRead(serialization::DeclID ID, const Decl *D) override {
+ void DeclRead(GlobalDeclID ID, const Decl *D) override {
if (Previous)
Previous->DeclRead(ID, D);
}
@@ -102,7 +102,7 @@ public:
bool DeletePrevious)
: DelegatingDeserializationListener(Previous, DeletePrevious) {}
- void DeclRead(serialization::DeclID ID, const Decl *D) override {
+ void DeclRead(GlobalDeclID ID, const Decl *D) override {
llvm::outs() << "PCH DECL: " << D->getDeclKindName();
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
llvm::outs() << " - ";
@@ -128,7 +128,7 @@ public:
: DelegatingDeserializationListener(Previous, DeletePrevious), Ctx(Ctx),
NamesToCheck(NamesToCheck) {}
- void DeclRead(serialization::DeclID ID, const Decl *D) override {
+ void DeclRead(GlobalDeclID ID, const Decl *D) override {
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
if (NamesToCheck.find(ND->getNameAsString()) != NamesToCheck.end()) {
unsigned DiagID
@@ -535,8 +535,14 @@ static Module *prepareToBuildModule(CompilerInstance &CI,
if (*OriginalModuleMap != CI.getSourceManager().getFileEntryRefForID(
CI.getSourceManager().getMainFileID())) {
M->IsInferred = true;
- CI.getPreprocessor().getHeaderSearchInfo().getModuleMap()
- .setInferredModuleAllowedBy(M, *OriginalModuleMap);
+ auto FileCharacter =
+ M->IsSystem ? SrcMgr::C_System_ModuleMap : SrcMgr::C_User_ModuleMap;
+ FileID OriginalModuleMapFID = CI.getSourceManager().getOrCreateFileID(
+ *OriginalModuleMap, FileCharacter);
+ CI.getPreprocessor()
+ .getHeaderSearchInfo()
+ .getModuleMap()
+ .setInferredModuleAllowedBy(M, OriginalModuleMapFID);
}
}
@@ -689,7 +695,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
std::string(InputFile), CI.getPCHContainerReader(),
ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts(),
- CI.getHeaderSearchOptsPtr());
+ CI.getHeaderSearchOptsPtr(), CI.getLangOptsPtr());
if (!AST)
return false;
@@ -751,8 +757,11 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// IR files bypass the rest of initialization.
if (Input.getKind().getLanguage() == Language::LLVM_IR) {
- assert(hasIRSupport() &&
- "This action does not have IR file support!");
+ if (!hasIRSupport()) {
+ CI.getDiagnostics().Report(diag::err_ast_action_on_llvm_ir)
+ << Input.getFile();
+ return false;
+ }
// Inform the diagnostic client we are processing a source file.
CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), nullptr);
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
index c1d6e7145536..e70210d55fe2 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
@@ -184,12 +184,12 @@ bool GeneratePCHAction::BeginSourceFileAction(CompilerInstance &CI) {
return true;
}
-std::unique_ptr<ASTConsumer>
-GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+std::vector<std::unique_ptr<ASTConsumer>>
+GenerateModuleAction::CreateMultiplexConsumer(CompilerInstance &CI,
+ StringRef InFile) {
std::unique_ptr<raw_pwrite_stream> OS = CreateOutputFile(CI, InFile);
if (!OS)
- return nullptr;
+ return {};
std::string OutputFile = CI.getFrontendOpts().OutputFile;
std::string Sysroot;
@@ -210,6 +210,17 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
+CI.getFrontendOpts().BuildingImplicitModule));
Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
CI, std::string(InFile), OutputFile, std::move(OS), Buffer));
+ return Consumers;
+}
+
+std::unique_ptr<ASTConsumer>
+GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers =
+ CreateMultiplexConsumer(CI, InFile);
+ if (Consumers.empty())
+ return nullptr;
+
return std::make_unique<MultiplexConsumer>(std::move(Consumers));
}
@@ -261,11 +272,20 @@ bool GenerateModuleInterfaceAction::BeginSourceFileAction(
std::unique_ptr<ASTConsumer>
GenerateModuleInterfaceAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
- CI.getHeaderSearchOpts().ModulesSkipDiagnosticOptions = true;
- CI.getHeaderSearchOpts().ModulesSkipHeaderSearchPaths = true;
- CI.getHeaderSearchOpts().ModulesSkipPragmaDiagnosticMappings = true;
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
- return GenerateModuleAction::CreateASTConsumer(CI, InFile);
+ if (CI.getFrontendOpts().GenReducedBMI &&
+ !CI.getFrontendOpts().ModuleOutputPath.empty()) {
+ Consumers.push_back(std::make_unique<ReducedBMIGenerator>(
+ CI.getPreprocessor(), CI.getModuleCache(),
+ CI.getFrontendOpts().ModuleOutputPath));
+ }
+
+ Consumers.push_back(std::make_unique<CXX20ModulesGenerator>(
+ CI.getPreprocessor(), CI.getModuleCache(),
+ CI.getFrontendOpts().OutputFile));
+
+ return std::make_unique<MultiplexConsumer>(std::move(Consumers));
}
std::unique_ptr<raw_pwrite_stream>
@@ -274,6 +294,14 @@ GenerateModuleInterfaceAction::CreateOutputFile(CompilerInstance &CI,
return CI.createDefaultOutputFile(/*Binary=*/true, InFile, "pcm");
}
+std::unique_ptr<ASTConsumer>
+GenerateReducedModuleInterfaceAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return std::make_unique<ReducedBMIGenerator>(CI.getPreprocessor(),
+ CI.getModuleCache(),
+ CI.getFrontendOpts().OutputFile);
+}
+
bool GenerateHeaderUnitAction::BeginSourceFileAction(CompilerInstance &CI) {
if (!CI.getLangOpts().CPlusPlusModules) {
CI.getDiagnostics().Report(diag::err_module_interface_requires_cpp_modules);
@@ -426,6 +454,8 @@ private:
return "BuildingBuiltinDumpStructCall";
case CodeSynthesisContext::BuildingDeductionGuides:
return "BuildingDeductionGuides";
+ case CodeSynthesisContext::TypeAliasTemplateInstantiation:
+ return "TypeAliasTemplateInstantiation";
}
return "";
}
@@ -811,9 +841,16 @@ static StringRef ModuleKindName(Module::ModuleKind MK) {
}
void DumpModuleInfoAction::ExecuteAction() {
- assert(isCurrentFileAST() && "dumping non-AST?");
- // Set up the output file.
CompilerInstance &CI = getCompilerInstance();
+
+ // Don't process files of type other than module to avoid crash
+ if (!isCurrentFileAST()) {
+ CI.getDiagnostics().Report(diag::err_file_is_not_module)
+ << getCurrentFile();
+ return;
+ }
+
+ // Set up the output file.
StringRef OutputFileName = CI.getFrontendOpts().OutputFile;
if (!OutputFileName.empty() && OutputFileName != "-") {
std::error_code EC;
@@ -826,8 +863,7 @@ void DumpModuleInfoAction::ExecuteAction() {
auto &FileMgr = CI.getFileManager();
auto Buffer = FileMgr.getBufferForFile(getCurrentFile());
StringRef Magic = (*Buffer)->getMemBufferRef().getBuffer();
- bool IsRaw = (Magic.size() >= 4 && Magic[0] == 'C' && Magic[1] == 'P' &&
- Magic[2] == 'C' && Magic[3] == 'H');
+ bool IsRaw = Magic.starts_with("CPCH");
Out << " Module format: " << (IsRaw ? "raw" : "obj") << "\n";
Preprocessor &PP = CI.getPreprocessor();
@@ -840,7 +876,6 @@ void DumpModuleInfoAction::ExecuteAction() {
const LangOptions &LO = getCurrentASTUnit().getLangOpts();
if (LO.CPlusPlusModules && !LO.CurrentModule.empty()) {
-
ASTReader *R = getCurrentASTUnit().getASTReader().get();
unsigned SubModuleCount = R->getTotalNumSubmodules();
serialization::ModuleFile &MF = R->getModuleManager().getPrimaryModule();
@@ -1061,6 +1096,7 @@ void PrintPreambleAction::ExecuteAction() {
case Language::CUDA:
case Language::HIP:
case Language::HLSL:
+ case Language::CIR:
break;
case Language::Unknown:
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
index bf83b27c1367..32ed99571e85 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
@@ -34,5 +34,6 @@ InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
.Case("hip", Language::HIP)
.Cases("ll", "bc", Language::LLVM_IR)
.Case("hlsl", Language::HLSL)
+ .Case("cir", Language::CIR)
.Default(Language::Unknown);
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
index 1b91c86f9139..3ed7243deba8 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
@@ -113,7 +113,11 @@ static T PickFP(const llvm::fltSemantics *Sem, T IEEEHalfVal, T IEEESingleVal,
static void DefineFloatMacros(MacroBuilder &Builder, StringRef Prefix,
const llvm::fltSemantics *Sem, StringRef Ext) {
- const char *DenormMin, *Epsilon, *Max, *Min;
+ const char *DenormMin, *NormMax, *Epsilon, *Max, *Min;
+ NormMax = PickFP(Sem, "6.5504e+4", "3.40282347e+38",
+ "1.7976931348623157e+308", "1.18973149535723176502e+4932",
+ "8.98846567431157953864652595394501e+307",
+ "1.18973149535723176508575932662800702e+4932");
DenormMin = PickFP(Sem, "5.9604644775390625e-8", "1.40129846e-45",
"4.9406564584124654e-324", "3.64519953188247460253e-4951",
"4.94065645841246544176568792868221e-324",
@@ -144,6 +148,7 @@ static void DefineFloatMacros(MacroBuilder &Builder, StringRef Prefix,
DefPrefix += "_";
Builder.defineMacro(DefPrefix + "DENORM_MIN__", Twine(DenormMin)+Ext);
+ Builder.defineMacro(DefPrefix + "NORM_MAX__", Twine(NormMax)+Ext);
Builder.defineMacro(DefPrefix + "HAS_DENORM__");
Builder.defineMacro(DefPrefix + "DIG__", Twine(Digits));
Builder.defineMacro(DefPrefix + "DECIMAL_DIG__", Twine(DecimalDigits));
@@ -181,14 +186,21 @@ static void DefineTypeSize(const Twine &MacroName, TargetInfo::IntType Ty,
TI.isTypeSigned(Ty), Builder);
}
-static void DefineFmt(const Twine &Prefix, TargetInfo::IntType Ty,
- const TargetInfo &TI, MacroBuilder &Builder) {
- bool IsSigned = TI.isTypeSigned(Ty);
+static void DefineFmt(const LangOptions &LangOpts, const Twine &Prefix,
+ TargetInfo::IntType Ty, const TargetInfo &TI,
+ MacroBuilder &Builder) {
StringRef FmtModifier = TI.getTypeFormatModifier(Ty);
- for (const char *Fmt = IsSigned ? "di" : "ouxX"; *Fmt; ++Fmt) {
- Builder.defineMacro(Prefix + "_FMT" + Twine(*Fmt) + "__",
- Twine("\"") + FmtModifier + Twine(*Fmt) + "\"");
- }
+ auto Emitter = [&](char Fmt) {
+ Builder.defineMacro(Prefix + "_FMT" + Twine(Fmt) + "__",
+ Twine("\"") + FmtModifier + Twine(Fmt) + "\"");
+ };
+ bool IsSigned = TI.isTypeSigned(Ty);
+ llvm::for_each(StringRef(IsSigned ? "di" : "ouxX"), Emitter);
+
+ // C23 added the b and B modifiers for printing binary output of unsigned
+ // integers. Conditionally define those if compiling in C23 mode.
+ if (LangOpts.C23 && !IsSigned)
+ llvm::for_each(StringRef("bB"), Emitter);
}
static void DefineType(const Twine &MacroName, TargetInfo::IntType Ty,
@@ -217,7 +229,8 @@ static void DefineTypeSizeAndWidth(const Twine &Prefix, TargetInfo::IntType Ty,
DefineTypeWidth(Prefix + "_WIDTH__", Ty, TI, Builder);
}
-static void DefineExactWidthIntType(TargetInfo::IntType Ty,
+static void DefineExactWidthIntType(const LangOptions &LangOpts,
+ TargetInfo::IntType Ty,
const TargetInfo &TI,
MacroBuilder &Builder) {
int TypeWidth = TI.getTypeWidth(Ty);
@@ -236,7 +249,7 @@ static void DefineExactWidthIntType(TargetInfo::IntType Ty,
const char *Prefix = IsSigned ? "__INT" : "__UINT";
DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
- DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
+ DefineFmt(LangOpts, Prefix + Twine(TypeWidth), Ty, TI, Builder);
StringRef ConstSuffix(TI.getTypeConstantSuffix(Ty));
Builder.defineMacro(Prefix + Twine(TypeWidth) + "_C_SUFFIX__", ConstSuffix);
@@ -259,7 +272,8 @@ static void DefineExactWidthIntTypeSize(TargetInfo::IntType Ty,
DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
}
-static void DefineLeastWidthIntType(unsigned TypeWidth, bool IsSigned,
+static void DefineLeastWidthIntType(const LangOptions &LangOpts,
+ unsigned TypeWidth, bool IsSigned,
const TargetInfo &TI,
MacroBuilder &Builder) {
TargetInfo::IntType Ty = TI.getLeastIntTypeByWidth(TypeWidth, IsSigned);
@@ -274,11 +288,12 @@ static void DefineLeastWidthIntType(unsigned TypeWidth, bool IsSigned,
DefineTypeSizeAndWidth(Prefix + Twine(TypeWidth), Ty, TI, Builder);
else
DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
- DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
+ DefineFmt(LangOpts, Prefix + Twine(TypeWidth), Ty, TI, Builder);
}
-static void DefineFastIntType(unsigned TypeWidth, bool IsSigned,
- const TargetInfo &TI, MacroBuilder &Builder) {
+static void DefineFastIntType(const LangOptions &LangOpts, unsigned TypeWidth,
+ bool IsSigned, const TargetInfo &TI,
+ MacroBuilder &Builder) {
// stdint.h currently defines the fast int types as equivalent to the least
// types.
TargetInfo::IntType Ty = TI.getLeastIntTypeByWidth(TypeWidth, IsSigned);
@@ -293,7 +308,7 @@ static void DefineFastIntType(unsigned TypeWidth, bool IsSigned,
DefineTypeSizeAndWidth(Prefix + Twine(TypeWidth), Ty, TI, Builder);
else
DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
- DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
+ DefineFmt(LangOpts, Prefix + Twine(TypeWidth), Ty, TI, Builder);
}
@@ -379,8 +394,7 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Twine((unsigned)LangOpts.getHLSLVersion()));
if (LangOpts.NativeHalfType)
- Builder.defineMacro("__HLSL_ENABLE_16_BIT",
- Twine((unsigned)LangOpts.getHLSLVersion()));
+ Builder.defineMacro("__HLSL_ENABLE_16_BIT", "1");
// Shader target information
// "enums" for shader stages
@@ -423,7 +437,8 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// [C++] Whether __STDC__ is predefined and if so, what its value is,
// are implementation-defined.
// (Removed in C++20.)
- if (!LangOpts.MSVCCompat && !LangOpts.TraditionalCPP)
+ if ((!LangOpts.MSVCCompat || LangOpts.MSVCEnableStdcMacro) &&
+ !LangOpts.TraditionalCPP)
Builder.defineMacro("__STDC__");
// -- __STDC_HOSTED__
// The integer literal 1 if the implementation is a hosted
@@ -438,7 +453,9 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// value is, are implementation-defined.
// (Removed in C++20.)
if (!LangOpts.CPlusPlus) {
- if (LangOpts.C23)
+ if (LangOpts.C2y)
+ Builder.defineMacro("__STDC_VERSION__", "202400L");
+ else if (LangOpts.C23)
Builder.defineMacro("__STDC_VERSION__", "202311L");
else if (LangOpts.C17)
Builder.defineMacro("__STDC_VERSION__", "201710L");
@@ -498,6 +515,14 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__STDC_UTF_16__", "1");
Builder.defineMacro("__STDC_UTF_32__", "1");
+ // __has_embed definitions
+ Builder.defineMacro("__STDC_EMBED_NOT_FOUND__",
+ llvm::itostr(static_cast<int>(EmbedResult::NotFound)));
+ Builder.defineMacro("__STDC_EMBED_FOUND__",
+ llvm::itostr(static_cast<int>(EmbedResult::Found)));
+ Builder.defineMacro("__STDC_EMBED_EMPTY__",
+ llvm::itostr(static_cast<int>(EmbedResult::Empty)));
+
if (LangOpts.ObjC)
Builder.defineMacro("__OBJC__");
@@ -643,7 +668,9 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
: "200704");
Builder.defineMacro("__cpp_constexpr_in_decltype", "201711L");
Builder.defineMacro("__cpp_range_based_for",
- LangOpts.CPlusPlus17 ? "201603L" : "200907");
+ LangOpts.CPlusPlus23 ? "202211L"
+ : LangOpts.CPlusPlus17 ? "201603L"
+ : "200907");
Builder.defineMacro("__cpp_static_assert", LangOpts.CPlusPlus26 ? "202306L"
: LangOpts.CPlusPlus17
? "201411L"
@@ -692,7 +719,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_nested_namespace_definitions", "201411L");
Builder.defineMacro("__cpp_variadic_using", "201611L");
Builder.defineMacro("__cpp_aggregate_bases", "201603L");
- Builder.defineMacro("__cpp_structured_bindings", "201606L");
+ Builder.defineMacro("__cpp_structured_bindings", "202403L");
Builder.defineMacro("__cpp_nontype_template_args",
"201411L"); // (not latest)
Builder.defineMacro("__cpp_fold_expressions", "201603L");
@@ -708,10 +735,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
if (LangOpts.CPlusPlus20) {
Builder.defineMacro("__cpp_aggregate_paren_init", "201902L");
- // P0848 is implemented, but we're still waiting for other concepts
- // issues to be addressed before bumping __cpp_concepts up to 202002L.
- // Refer to the discussion of this at https://reviews.llvm.org/D128619.
- Builder.defineMacro("__cpp_concepts", "201907L");
+ Builder.defineMacro("__cpp_concepts", "202002");
Builder.defineMacro("__cpp_conditional_explicit", "201806L");
Builder.defineMacro("__cpp_consteval", "202211L");
Builder.defineMacro("__cpp_constexpr_dynamic_alloc", "201907L");
@@ -724,7 +748,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
}
// C++23 features.
if (LangOpts.CPlusPlus23) {
- Builder.defineMacro("__cpp_implicit_move", "202011L");
+ Builder.defineMacro("__cpp_implicit_move", "202207L");
Builder.defineMacro("__cpp_size_t_suffix", "202011L");
Builder.defineMacro("__cpp_if_consteval", "202106L");
Builder.defineMacro("__cpp_multidimensional_subscript", "202211L");
@@ -738,6 +762,10 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_named_character_escapes", "202207L");
Builder.defineMacro("__cpp_placeholder_variables", "202306L");
+ // C++26 features supported in earlier language modes.
+ Builder.defineMacro("__cpp_pack_indexing", "202311L");
+ Builder.defineMacro("__cpp_deleted_function", "202403L");
+
if (LangOpts.Char8)
Builder.defineMacro("__cpp_char8_t", "202207L");
Builder.defineMacro("__cpp_impl_destroying_delete", "201806L");
@@ -766,6 +794,60 @@ void InitializeOpenCLFeatureTestMacros(const TargetInfo &TI,
Builder.defineMacro("__opencl_c_int64");
}
+llvm::SmallString<32> ConstructFixedPointLiteral(llvm::APFixedPoint Val,
+ llvm::StringRef Suffix) {
+ if (Val.isSigned() && Val == llvm::APFixedPoint::getMin(Val.getSemantics())) {
+ // When representing the min value of a signed fixed point type in source
+ // code, we cannot simply write `-<lowest value>`. For example, the min
+ // value of a `short _Fract` cannot be written as `-1.0hr`. This is because
+ // the parser will read this (and really any negative numerical literal) as
+ // a UnaryOperator that owns a FixedPointLiteral with a positive value
+ // rather than just a FixedPointLiteral with a negative value. Compiling
+ // `-1.0hr` results in an overflow to the maximal value of that fixed point
+ // type. The correct way to represent a signed min value is to instead split
+ // it into two halves, like `(-0.5hr-0.5hr)` which is what the standard
+ // defines SFRACT_MIN as.
+ llvm::SmallString<32> Literal;
+ Literal.push_back('(');
+ llvm::SmallString<32> HalfStr =
+ ConstructFixedPointLiteral(Val.shr(1), Suffix);
+ Literal += HalfStr;
+ Literal += HalfStr;
+ Literal.push_back(')');
+ return Literal;
+ }
+
+ llvm::SmallString<32> Str(Val.toString());
+ Str += Suffix;
+ return Str;
+}
+
+void DefineFixedPointMacros(const TargetInfo &TI, MacroBuilder &Builder,
+ llvm::StringRef TypeName, llvm::StringRef Suffix,
+ unsigned Width, unsigned Scale, bool Signed) {
+ // Saturation doesn't affect the size or scale of a fixed point type, so we
+ // don't need it here.
+ llvm::FixedPointSemantics FXSema(
+ Width, Scale, Signed, /*IsSaturated=*/false,
+ !Signed && TI.doUnsignedFixedPointTypesHavePadding());
+ llvm::SmallString<32> MacroPrefix("__");
+ MacroPrefix += TypeName;
+ Builder.defineMacro(MacroPrefix + "_EPSILON__",
+ ConstructFixedPointLiteral(
+ llvm::APFixedPoint::getEpsilon(FXSema), Suffix));
+ Builder.defineMacro(MacroPrefix + "_FBIT__", Twine(Scale));
+ Builder.defineMacro(
+ MacroPrefix + "_MAX__",
+ ConstructFixedPointLiteral(llvm::APFixedPoint::getMax(FXSema), Suffix));
+
+ // ISO/IEC TR 18037:2008 doesn't specify MIN macros for unsigned types since
+ // they're all just zero.
+ if (Signed)
+ Builder.defineMacro(
+ MacroPrefix + "_MIN__",
+ ConstructFixedPointLiteral(llvm::APFixedPoint::getMin(FXSema), Suffix));
+}
+
static void InitializePredefinedMacros(const TargetInfo &TI,
const LangOptions &LangOpts,
const FrontendOptions &FEOpts,
@@ -860,6 +942,12 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (LangOpts.GNUCVersion && LangOpts.CPlusPlus11)
Builder.defineMacro("__GXX_EXPERIMENTAL_CXX0X__");
+ if (TI.getTriple().isWindowsGNUEnvironment()) {
+ // Set ABI defining macros for libstdc++ for MinGW, where the
+ // default in libstdc++ differs from the defaults for this target.
+ Builder.defineMacro("__GXX_TYPEINFO_EQUALITY_INLINE", "0");
+ }
+
if (LangOpts.ObjC) {
if (LangOpts.ObjCRuntime.isNonFragile()) {
Builder.defineMacro("__OBJC2__");
@@ -941,6 +1029,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
else if (LangOpts.hasDWARFExceptions() &&
(TI.getTriple().isThumb() || TI.getTriple().isARM()))
Builder.defineMacro("__ARM_DWARF_EH__");
+ else if (LangOpts.hasWasmExceptions() && TI.getTriple().isWasm())
+ Builder.defineMacro("__WASM_EXCEPTIONS__");
if (LangOpts.Deprecated)
Builder.defineMacro("__DEPRECATED");
@@ -1064,27 +1154,30 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineTypeSizeof("__SIZEOF_INT128__", 128, TI, Builder);
DefineType("__INTMAX_TYPE__", TI.getIntMaxType(), Builder);
- DefineFmt("__INTMAX", TI.getIntMaxType(), TI, Builder);
+ DefineFmt(LangOpts, "__INTMAX", TI.getIntMaxType(), TI, Builder);
Builder.defineMacro("__INTMAX_C_SUFFIX__",
TI.getTypeConstantSuffix(TI.getIntMaxType()));
DefineType("__UINTMAX_TYPE__", TI.getUIntMaxType(), Builder);
- DefineFmt("__UINTMAX", TI.getUIntMaxType(), TI, Builder);
+ DefineFmt(LangOpts, "__UINTMAX", TI.getUIntMaxType(), TI, Builder);
Builder.defineMacro("__UINTMAX_C_SUFFIX__",
TI.getTypeConstantSuffix(TI.getUIntMaxType()));
DefineType("__PTRDIFF_TYPE__", TI.getPtrDiffType(LangAS::Default), Builder);
- DefineFmt("__PTRDIFF", TI.getPtrDiffType(LangAS::Default), TI, Builder);
+ DefineFmt(LangOpts, "__PTRDIFF", TI.getPtrDiffType(LangAS::Default), TI,
+ Builder);
DefineType("__INTPTR_TYPE__", TI.getIntPtrType(), Builder);
- DefineFmt("__INTPTR", TI.getIntPtrType(), TI, Builder);
+ DefineFmt(LangOpts, "__INTPTR", TI.getIntPtrType(), TI, Builder);
DefineType("__SIZE_TYPE__", TI.getSizeType(), Builder);
- DefineFmt("__SIZE", TI.getSizeType(), TI, Builder);
+ DefineFmt(LangOpts, "__SIZE", TI.getSizeType(), TI, Builder);
DefineType("__WCHAR_TYPE__", TI.getWCharType(), Builder);
DefineType("__WINT_TYPE__", TI.getWIntType(), Builder);
DefineTypeSizeAndWidth("__SIG_ATOMIC", TI.getSigAtomicType(), TI, Builder);
+ if (LangOpts.C23)
+ DefineType("__CHAR8_TYPE__", TI.UnsignedChar, Builder);
DefineType("__CHAR16_TYPE__", TI.getChar16Type(), Builder);
DefineType("__CHAR32_TYPE__", TI.getChar32Type(), Builder);
DefineType("__UINTPTR_TYPE__", TI.getUIntPtrType(), Builder);
- DefineFmt("__UINTPTR", TI.getUIntPtrType(), TI, Builder);
+ DefineFmt(LangOpts, "__UINTPTR", TI.getUIntPtrType(), TI, Builder);
// The C standard requires the width of uintptr_t and intptr_t to be the same,
// per 7.20.2.4p1. Same for intmax_t and uintmax_t, per 7.20.2.5p1.
@@ -1095,6 +1188,47 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
TI.getTypeWidth(TI.getIntMaxType()) &&
"uintmax_t and intmax_t have different widths?");
+ if (LangOpts.FixedPoint) {
+ // Each unsigned type has the same width as their signed type.
+ DefineFixedPointMacros(TI, Builder, "SFRACT", "HR", TI.getShortFractWidth(),
+ TI.getShortFractScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "USFRACT", "UHR",
+ TI.getShortFractWidth(),
+ TI.getUnsignedShortFractScale(), /*Signed=*/false);
+ DefineFixedPointMacros(TI, Builder, "FRACT", "R", TI.getFractWidth(),
+ TI.getFractScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "UFRACT", "UR", TI.getFractWidth(),
+ TI.getUnsignedFractScale(), /*Signed=*/false);
+ DefineFixedPointMacros(TI, Builder, "LFRACT", "LR", TI.getLongFractWidth(),
+ TI.getLongFractScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "ULFRACT", "ULR",
+ TI.getLongFractWidth(),
+ TI.getUnsignedLongFractScale(), /*Signed=*/false);
+ DefineFixedPointMacros(TI, Builder, "SACCUM", "HK", TI.getShortAccumWidth(),
+ TI.getShortAccumScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "USACCUM", "UHK",
+ TI.getShortAccumWidth(),
+ TI.getUnsignedShortAccumScale(), /*Signed=*/false);
+ DefineFixedPointMacros(TI, Builder, "ACCUM", "K", TI.getAccumWidth(),
+ TI.getAccumScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "UACCUM", "UK", TI.getAccumWidth(),
+ TI.getUnsignedAccumScale(), /*Signed=*/false);
+ DefineFixedPointMacros(TI, Builder, "LACCUM", "LK", TI.getLongAccumWidth(),
+ TI.getLongAccumScale(), /*Signed=*/true);
+ DefineFixedPointMacros(TI, Builder, "ULACCUM", "ULK",
+ TI.getLongAccumWidth(),
+ TI.getUnsignedLongAccumScale(), /*Signed=*/false);
+
+ Builder.defineMacro("__SACCUM_IBIT__", Twine(TI.getShortAccumIBits()));
+ Builder.defineMacro("__USACCUM_IBIT__",
+ Twine(TI.getUnsignedShortAccumIBits()));
+ Builder.defineMacro("__ACCUM_IBIT__", Twine(TI.getAccumIBits()));
+ Builder.defineMacro("__UACCUM_IBIT__", Twine(TI.getUnsignedAccumIBits()));
+ Builder.defineMacro("__LACCUM_IBIT__", Twine(TI.getLongAccumIBits()));
+ Builder.defineMacro("__ULACCUM_IBIT__",
+ Twine(TI.getUnsignedLongAccumIBits()));
+ }
+
if (TI.hasFloat16Type())
DefineFloatMacros(Builder, "FLT16", &TI.getHalfFormat(), "F16");
DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat(), "F");
@@ -1119,65 +1253,66 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__WINT_UNSIGNED__");
// Define exact-width integer types for stdint.h
- DefineExactWidthIntType(TargetInfo::SignedChar, TI, Builder);
+ DefineExactWidthIntType(LangOpts, TargetInfo::SignedChar, TI, Builder);
if (TI.getShortWidth() > TI.getCharWidth())
- DefineExactWidthIntType(TargetInfo::SignedShort, TI, Builder);
+ DefineExactWidthIntType(LangOpts, TargetInfo::SignedShort, TI, Builder);
if (TI.getIntWidth() > TI.getShortWidth())
- DefineExactWidthIntType(TargetInfo::SignedInt, TI, Builder);
+ DefineExactWidthIntType(LangOpts, TargetInfo::SignedInt, TI, Builder);
if (TI.getLongWidth() > TI.getIntWidth())
- DefineExactWidthIntType(TargetInfo::SignedLong, TI, Builder);
+ DefineExactWidthIntType(LangOpts, TargetInfo::SignedLong, TI, Builder);
if (TI.getLongLongWidth() > TI.getLongWidth())
- DefineExactWidthIntType(TargetInfo::SignedLongLong, TI, Builder);
+ DefineExactWidthIntType(LangOpts, TargetInfo::SignedLongLong, TI, Builder);
- DefineExactWidthIntType(TargetInfo::UnsignedChar, TI, Builder);
+ DefineExactWidthIntType(LangOpts, TargetInfo::UnsignedChar, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::UnsignedChar, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::SignedChar, TI, Builder);
if (TI.getShortWidth() > TI.getCharWidth()) {
- DefineExactWidthIntType(TargetInfo::UnsignedShort, TI, Builder);
+ DefineExactWidthIntType(LangOpts, TargetInfo::UnsignedShort, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::UnsignedShort, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::SignedShort, TI, Builder);
}
if (TI.getIntWidth() > TI.getShortWidth()) {
- DefineExactWidthIntType(TargetInfo::UnsignedInt, TI, Builder);
+ DefineExactWidthIntType(LangOpts, TargetInfo::UnsignedInt, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::UnsignedInt, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::SignedInt, TI, Builder);
}
if (TI.getLongWidth() > TI.getIntWidth()) {
- DefineExactWidthIntType(TargetInfo::UnsignedLong, TI, Builder);
+ DefineExactWidthIntType(LangOpts, TargetInfo::UnsignedLong, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::UnsignedLong, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::SignedLong, TI, Builder);
}
if (TI.getLongLongWidth() > TI.getLongWidth()) {
- DefineExactWidthIntType(TargetInfo::UnsignedLongLong, TI, Builder);
+ DefineExactWidthIntType(LangOpts, TargetInfo::UnsignedLongLong, TI,
+ Builder);
DefineExactWidthIntTypeSize(TargetInfo::UnsignedLongLong, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::SignedLongLong, TI, Builder);
}
- DefineLeastWidthIntType(8, true, TI, Builder);
- DefineLeastWidthIntType(8, false, TI, Builder);
- DefineLeastWidthIntType(16, true, TI, Builder);
- DefineLeastWidthIntType(16, false, TI, Builder);
- DefineLeastWidthIntType(32, true, TI, Builder);
- DefineLeastWidthIntType(32, false, TI, Builder);
- DefineLeastWidthIntType(64, true, TI, Builder);
- DefineLeastWidthIntType(64, false, TI, Builder);
-
- DefineFastIntType(8, true, TI, Builder);
- DefineFastIntType(8, false, TI, Builder);
- DefineFastIntType(16, true, TI, Builder);
- DefineFastIntType(16, false, TI, Builder);
- DefineFastIntType(32, true, TI, Builder);
- DefineFastIntType(32, false, TI, Builder);
- DefineFastIntType(64, true, TI, Builder);
- DefineFastIntType(64, false, TI, Builder);
+ DefineLeastWidthIntType(LangOpts, 8, true, TI, Builder);
+ DefineLeastWidthIntType(LangOpts, 8, false, TI, Builder);
+ DefineLeastWidthIntType(LangOpts, 16, true, TI, Builder);
+ DefineLeastWidthIntType(LangOpts, 16, false, TI, Builder);
+ DefineLeastWidthIntType(LangOpts, 32, true, TI, Builder);
+ DefineLeastWidthIntType(LangOpts, 32, false, TI, Builder);
+ DefineLeastWidthIntType(LangOpts, 64, true, TI, Builder);
+ DefineLeastWidthIntType(LangOpts, 64, false, TI, Builder);
+
+ DefineFastIntType(LangOpts, 8, true, TI, Builder);
+ DefineFastIntType(LangOpts, 8, false, TI, Builder);
+ DefineFastIntType(LangOpts, 16, true, TI, Builder);
+ DefineFastIntType(LangOpts, 16, false, TI, Builder);
+ DefineFastIntType(LangOpts, 32, true, TI, Builder);
+ DefineFastIntType(LangOpts, 32, false, TI, Builder);
+ DefineFastIntType(LangOpts, 64, true, TI, Builder);
+ DefineFastIntType(LangOpts, 64, false, TI, Builder);
Builder.defineMacro("__USER_LABEL_PREFIX__", TI.getUserLabelPrefix());
@@ -1200,6 +1335,16 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__GCC_ATOMIC_TEST_AND_SET_TRUEVAL", "1");
}
+ // GCC defines these macros in both C and C++ modes despite them being needed
+ // mostly for STL implementations in C++.
+ auto [Destructive, Constructive] = TI.hardwareInterferenceSizes();
+ Builder.defineMacro("__GCC_DESTRUCTIVE_SIZE", Twine(Destructive));
+ Builder.defineMacro("__GCC_CONSTRUCTIVE_SIZE", Twine(Constructive));
+ // We need to use push_macro to allow users to redefine these macros from the
+ // command line with -D and not issue a -Wmacro-redefined warning.
+ Builder.append("#pragma push_macro(\"__GCC_DESTRUCTIVE_SIZE\")");
+ Builder.append("#pragma push_macro(\"__GCC_CONSTRUCTIVE_SIZE\")");
+
auto addLockFreeMacros = [&](const llvm::Twine &Prefix) {
// Used by libc++ and libstdc++ to implement ATOMIC_<foo>_LOCK_FREE.
#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
@@ -1207,8 +1352,10 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
getLockFreeValue(TI.get##Type##Width(), TI));
DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
DEFINE_LOCK_FREE_MACRO(CHAR, Char);
- if (LangOpts.Char8)
- DEFINE_LOCK_FREE_MACRO(CHAR8_T, Char); // Treat char8_t like char.
+ // char8_t has the same representation / width as unsigned
+ // char in C++ and is a typedef for unsigned char in C23
+ if (LangOpts.Char8 || LangOpts.C23)
+ DEFINE_LOCK_FREE_MACRO(CHAR8_T, Char);
DEFINE_LOCK_FREE_MACRO(CHAR16_T, Char16);
DEFINE_LOCK_FREE_MACRO(CHAR32_T, Char32);
DEFINE_LOCK_FREE_MACRO(WCHAR_T, WChar);
diff --git a/contrib/llvm-project/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
index d58f5bb09199..d7cfd23bb0a7 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
@@ -33,7 +33,8 @@ class InterfaceStubFunctionsConsumer : public ASTConsumer {
MangledSymbol(const std::string &ParentName, uint8_t Type, uint8_t Binding,
std::vector<std::string> Names)
- : ParentName(ParentName), Type(Type), Binding(Binding), Names(Names) {}
+ : ParentName(ParentName), Type(Type), Binding(Binding),
+ Names(std::move(Names)) {}
};
using MangledSymbols = std::map<const NamedDecl *, MangledSymbol>;
@@ -295,7 +296,7 @@ public:
OS << "Symbols:\n";
for (const auto &E : Symbols) {
const MangledSymbol &Symbol = E.second;
- for (auto Name : Symbol.Names) {
+ for (const auto &Name : Symbol.Names) {
OS << " - { Name: \""
<< (Symbol.ParentName.empty() || Instance.getLangOpts().CPlusPlus
? ""
diff --git a/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp b/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
index 939e611e5489..e2883f1e027e 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
@@ -55,7 +55,8 @@ struct ModuleDependencyPPCallbacks : public PPCallbacks {
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange,
OptionalFileEntryRef File, StringRef SearchPath,
- StringRef RelativePath, const Module *Imported,
+ StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported,
SrcMgr::CharacteristicKind FileType) override {
if (!File)
return;
@@ -104,7 +105,7 @@ static bool isCaseSensitivePath(StringRef Path) {
// already expects when sensitivity isn't setup.
for (auto &C : Path)
UpperDest.push_back(toUppercase(C));
- if (!llvm::sys::fs::real_path(UpperDest, RealDest) && Path.equals(RealDest))
+ if (!llvm::sys::fs::real_path(UpperDest, RealDest) && Path == RealDest)
return false;
return true;
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
index 737877329c9c..2158d176d189 100644
--- a/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
@@ -20,6 +20,9 @@ using namespace clang;
namespace clang {
+class NamespaceDecl;
+class TranslationUnitDecl;
+
MultiplexASTDeserializationListener::MultiplexASTDeserializationListener(
const std::vector<ASTDeserializationListener*>& L)
: Listeners(L) {
@@ -32,7 +35,7 @@ void MultiplexASTDeserializationListener::ReaderInitialized(
}
void MultiplexASTDeserializationListener::IdentifierRead(
- serialization::IdentID ID, IdentifierInfo *II) {
+ serialization::IdentifierID ID, IdentifierInfo *II) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->IdentifierRead(ID, II);
}
@@ -49,12 +52,17 @@ void MultiplexASTDeserializationListener::TypeRead(
Listeners[i]->TypeRead(Idx, T);
}
-void MultiplexASTDeserializationListener::DeclRead(
- serialization::DeclID ID, const Decl *D) {
+void MultiplexASTDeserializationListener::DeclRead(GlobalDeclID ID,
+ const Decl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->DeclRead(ID, D);
}
+void MultiplexASTDeserializationListener::PredefinedDeclBuilt(PredefinedDeclIDs ID, const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->PredefinedDeclBuilt(ID, D);
+}
+
void MultiplexASTDeserializationListener::SelectorRead(
serialization::SelectorID ID, Selector Sel) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
@@ -115,6 +123,11 @@ public:
void RedefinedHiddenDefinition(const NamedDecl *D, Module *M) override;
void AddedAttributeToRecord(const Attr *Attr,
const RecordDecl *Record) override;
+ void EnteringModulePurview() override;
+ void AddedManglingNumber(const Decl *D, unsigned) override;
+ void AddedStaticLocalNumbers(const Decl *D, unsigned) override;
+ void AddedAnonymousNamespace(const TranslationUnitDecl *,
+ NamespaceDecl *AnonNamespace) override;
private:
std::vector<ASTMutationListener*> Listeners;
@@ -238,6 +251,27 @@ void MultiplexASTMutationListener::AddedAttributeToRecord(
L->AddedAttributeToRecord(Attr, Record);
}
+void MultiplexASTMutationListener::EnteringModulePurview() {
+ for (auto *L : Listeners)
+ L->EnteringModulePurview();
+}
+
+void MultiplexASTMutationListener::AddedManglingNumber(const Decl *D,
+ unsigned Number) {
+ for (auto *L : Listeners)
+ L->AddedManglingNumber(D, Number);
+}
+void MultiplexASTMutationListener::AddedStaticLocalNumbers(const Decl *D,
+ unsigned Number) {
+ for (auto *L : Listeners)
+ L->AddedStaticLocalNumbers(D, Number);
+}
+void MultiplexASTMutationListener::AddedAnonymousNamespace(
+ const TranslationUnitDecl *TU, NamespaceDecl *AnonNamespace) {
+ for (auto *L : Listeners)
+ L->AddedAnonymousNamespace(TU, AnonNamespace);
+}
+
} // end namespace clang
MultiplexConsumer::MultiplexConsumer(
@@ -328,7 +362,7 @@ void MultiplexConsumer::CompleteTentativeDefinition(VarDecl *D) {
Consumer->CompleteTentativeDefinition(D);
}
-void MultiplexConsumer::CompleteExternalDeclaration(VarDecl *D) {
+void MultiplexConsumer::CompleteExternalDeclaration(DeclaratorDecl *D) {
for (auto &Consumer : Consumers)
Consumer->CompleteExternalDeclaration(D);
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp b/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
index 62373b23b82e..cab5838fceb2 100644
--- a/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
@@ -28,6 +28,7 @@
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/VirtualFileSystem.h"
@@ -98,7 +99,8 @@ public:
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange,
OptionalFileEntryRef File, StringRef SearchPath,
- StringRef RelativePath, const Module *Imported,
+ StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported,
SrcMgr::CharacteristicKind FileType) override {
// File is std::nullopt if it wasn't found.
// (We have some false negatives if PP recovered e.g. <foo> -> "foo")
@@ -289,8 +291,7 @@ private:
class PrecompilePreambleConsumer : public PCHGenerator {
public:
- PrecompilePreambleConsumer(PrecompilePreambleAction &Action,
- const Preprocessor &PP,
+ PrecompilePreambleConsumer(PrecompilePreambleAction &Action, Preprocessor &PP,
InMemoryModuleCache &ModuleCache,
StringRef isysroot,
std::shared_ptr<PCHBuffer> Buffer)
diff --git a/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index 7f5f66906823..0592423c12ec 100644
--- a/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -11,11 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/Utils.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/PreprocessorOutputOptions.h"
+#include "clang/Frontend/Utils.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Pragma.h"
@@ -93,6 +93,7 @@ private:
bool DisableLineMarkers;
bool DumpDefines;
bool DumpIncludeDirectives;
+ bool DumpEmbedDirectives;
bool UseLineDirectives;
bool IsFirstFileEntered;
bool MinimizeWhitespace;
@@ -100,6 +101,7 @@ private:
bool KeepSystemIncludes;
raw_ostream *OrigOS;
std::unique_ptr<llvm::raw_null_ostream> NullOS;
+ unsigned NumToksToSkip;
Token PrevTok;
Token PrevPrevTok;
@@ -107,14 +109,16 @@ private:
public:
PrintPPOutputPPCallbacks(Preprocessor &pp, raw_ostream *os, bool lineMarkers,
bool defines, bool DumpIncludeDirectives,
- bool UseLineDirectives, bool MinimizeWhitespace,
- bool DirectivesOnly, bool KeepSystemIncludes)
+ bool DumpEmbedDirectives, bool UseLineDirectives,
+ bool MinimizeWhitespace, bool DirectivesOnly,
+ bool KeepSystemIncludes)
: PP(pp), SM(PP.getSourceManager()), ConcatInfo(PP), OS(os),
DisableLineMarkers(lineMarkers), DumpDefines(defines),
DumpIncludeDirectives(DumpIncludeDirectives),
+ DumpEmbedDirectives(DumpEmbedDirectives),
UseLineDirectives(UseLineDirectives),
MinimizeWhitespace(MinimizeWhitespace), DirectivesOnly(DirectivesOnly),
- KeepSystemIncludes(KeepSystemIncludes), OrigOS(os) {
+ KeepSystemIncludes(KeepSystemIncludes), OrigOS(os), NumToksToSkip(0) {
CurLine = 0;
CurFilename += "<uninit>";
EmittedTokensOnThisLine = false;
@@ -129,6 +133,10 @@ public:
PrevPrevTok.startToken();
}
+ /// Returns true if #embed directives should be expanded into a comma-
+ /// delimited list of integer constants or not.
+ bool expandEmbedContents() const { return !DumpEmbedDirectives; }
+
bool isMinimizeWhitespace() const { return MinimizeWhitespace; }
void setEmittedTokensOnThisLine() { EmittedTokensOnThisLine = true; }
@@ -149,11 +157,15 @@ public:
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override;
+ void EmbedDirective(SourceLocation HashLoc, StringRef FileName, bool IsAngled,
+ OptionalFileEntryRef File,
+ const LexEmbedParametersResult &Params) override;
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange,
OptionalFileEntryRef File, StringRef SearchPath,
- StringRef RelativePath, const Module *Imported,
+ StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported,
SrcMgr::CharacteristicKind FileType) override;
void Ident(SourceLocation Loc, StringRef str) override;
void PragmaMessage(SourceLocation Loc, StringRef Namespace,
@@ -231,6 +243,9 @@ public:
void BeginModule(const Module *M);
void EndModule(const Module *M);
+
+ unsigned GetNumToksToSkip() const { return NumToksToSkip; }
+ void ResetSkipToks() { NumToksToSkip = 0; }
};
} // end anonymous namespace
@@ -398,11 +413,79 @@ void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
}
}
+void PrintPPOutputPPCallbacks::EmbedDirective(
+ SourceLocation HashLoc, StringRef FileName, bool IsAngled,
+ OptionalFileEntryRef File, const LexEmbedParametersResult &Params) {
+ if (!DumpEmbedDirectives)
+ return;
+
+ // The EmbedDirective() callback is called before we produce the annotation
+ // token stream for the directive. We skip printing the annotation tokens
+ // within PrintPreprocessedTokens(), but we also need to skip the prefix,
+ // suffix, and if_empty tokens as those are inserted directly into the token
+ // stream and would otherwise be printed immediately after printing the
+ // #embed directive.
+ //
+ // FIXME: counting tokens to skip is a kludge but we have no way to know
+ // which tokens were inserted as part of the embed and which ones were
+ // explicitly written by the user.
+ MoveToLine(HashLoc, /*RequireStartOfLine=*/true);
+ *OS << "#embed " << (IsAngled ? '<' : '"') << FileName
+ << (IsAngled ? '>' : '"');
+
+ auto PrintToks = [&](llvm::ArrayRef<Token> Toks) {
+ SmallString<128> SpellingBuffer;
+ for (const Token &T : Toks) {
+ if (T.hasLeadingSpace())
+ *OS << " ";
+ *OS << PP.getSpelling(T, SpellingBuffer);
+ }
+ };
+ bool SkipAnnotToks = true;
+ if (Params.MaybeIfEmptyParam) {
+ *OS << " if_empty(";
+ PrintToks(Params.MaybeIfEmptyParam->Tokens);
+ *OS << ")";
+ // If the file is empty, we can skip those tokens. If the file is not
+ // empty, we skip the annotation tokens.
+ if (File && !File->getSize()) {
+ NumToksToSkip += Params.MaybeIfEmptyParam->Tokens.size();
+ SkipAnnotToks = false;
+ }
+ }
+
+ if (Params.MaybeLimitParam) {
+ *OS << " limit(" << Params.MaybeLimitParam->Limit << ")";
+ }
+ if (Params.MaybeOffsetParam) {
+ *OS << " clang::offset(" << Params.MaybeOffsetParam->Offset << ")";
+ }
+ if (Params.MaybePrefixParam) {
+ *OS << " prefix(";
+ PrintToks(Params.MaybePrefixParam->Tokens);
+ *OS << ")";
+ NumToksToSkip += Params.MaybePrefixParam->Tokens.size();
+ }
+ if (Params.MaybeSuffixParam) {
+ *OS << " suffix(";
+ PrintToks(Params.MaybeSuffixParam->Tokens);
+ *OS << ")";
+ NumToksToSkip += Params.MaybeSuffixParam->Tokens.size();
+ }
+
+ // We may need to skip the annotation token.
+ if (SkipAnnotToks)
+ NumToksToSkip++;
+
+ *OS << " /* clang -E -dE */";
+ setEmittedDirectiveOnThisLine();
+}
+
void PrintPPOutputPPCallbacks::InclusionDirective(
SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
bool IsAngled, CharSourceRange FilenameRange, OptionalFileEntryRef File,
- StringRef SearchPath, StringRef RelativePath, const Module *Imported,
- SrcMgr::CharacteristicKind FileType) {
+ StringRef SearchPath, StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported, SrcMgr::CharacteristicKind FileType) {
// In -dI mode, dump #include directives prior to dumping their content or
// interpretation. Similar for -fkeep-system-includes.
if (DumpIncludeDirectives || (KeepSystemIncludes && isSystem(FileType))) {
@@ -418,14 +501,14 @@ void PrintPPOutputPPCallbacks::InclusionDirective(
}
// When preprocessing, turn implicit imports into module import pragmas.
- if (Imported) {
+ if (ModuleImported) {
switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) {
case tok::pp_include:
case tok::pp_import:
case tok::pp_include_next:
MoveToLine(HashLoc, /*RequireStartOfLine=*/true);
*OS << "#pragma clang module import "
- << Imported->getFullModuleName(true)
+ << SuggestedModule->getFullModuleName(true)
<< " /* clang -E: implicit import for "
<< "#" << PP.getSpelling(IncludeTok) << " "
<< (IsAngled ? '<' : '"') << FileName << (IsAngled ? '>' : '"')
@@ -677,7 +760,7 @@ void PrintPPOutputPPCallbacks::HandleWhitespaceBeforeTok(const Token &Tok,
if (Tok.is(tok::eof) ||
(Tok.isAnnotation() && !Tok.is(tok::annot_header_unit) &&
!Tok.is(tok::annot_module_begin) && !Tok.is(tok::annot_module_end) &&
- !Tok.is(tok::annot_repl_input_end)))
+ !Tok.is(tok::annot_repl_input_end) && !Tok.is(tok::annot_embed)))
return;
// EmittedDirectiveOnThisLine takes priority over RequireSameLine.
@@ -877,6 +960,27 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
std::string Name = M->getFullModuleName();
Callbacks->OS->write(Name.data(), Name.size());
Callbacks->HandleNewlinesInToken(Name.data(), Name.size());
+ } else if (Tok.is(tok::annot_embed)) {
+ // Manually explode the binary data out to a stream of comma-delimited
+ // integer values. If the user passed -dE, that is handled by the
+ // EmbedDirective() callback. We should only get here if the user did not
+ // pass -dE.
+ assert(Callbacks->expandEmbedContents() &&
+ "did not expect an embed annotation");
+ auto *Data =
+ reinterpret_cast<EmbedAnnotationData *>(Tok.getAnnotationValue());
+
+ // Loop over the contents and print them as a comma-delimited list of
+ // values.
+ bool PrintComma = false;
+ for (auto Iter = Data->BinaryData.begin(), End = Data->BinaryData.end();
+ Iter != End; ++Iter) {
+ if (PrintComma)
+ *Callbacks->OS << ", ";
+ *Callbacks->OS << static_cast<unsigned>(*Iter);
+ PrintComma = true;
+ }
+ IsStartOfLine = true;
} else if (Tok.isAnnotation()) {
// Ignore annotation tokens created by pragmas - the pragmas themselves
// will be reproduced in the preprocessed output.
@@ -925,6 +1029,10 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
if (Tok.is(tok::eof)) break;
PP.Lex(Tok);
+ // If lexing that token causes us to need to skip future tokens, do so now.
+ for (unsigned I = 0, Skip = Callbacks->GetNumToksToSkip(); I < Skip; ++I)
+ PP.Lex(Tok);
+ Callbacks->ResetSkipToks();
}
}
@@ -981,8 +1089,9 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
PrintPPOutputPPCallbacks *Callbacks = new PrintPPOutputPPCallbacks(
PP, OS, !Opts.ShowLineMarkers, Opts.ShowMacros,
- Opts.ShowIncludeDirectives, Opts.UseLineDirectives,
- Opts.MinimizeWhitespace, Opts.DirectivesOnly, Opts.KeepSystemIncludes);
+ Opts.ShowIncludeDirectives, Opts.ShowEmbedDirectives,
+ Opts.UseLineDirectives, Opts.MinimizeWhitespace, Opts.DirectivesOnly,
+ Opts.KeepSystemIncludes);
// Expand macros in pragmas with -fms-extensions. The assumption is that
// the majority of pragmas in such a file will be Microsoft pragmas.
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
index b6b37461089e..1462058003b3 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -75,7 +75,8 @@ private:
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange,
OptionalFileEntryRef File, StringRef SearchPath,
- StringRef RelativePath, const Module *Imported,
+ StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported,
SrcMgr::CharacteristicKind FileType) override;
void If(SourceLocation Loc, SourceRange ConditionRange,
ConditionValueKind ConditionValue) override;
@@ -189,9 +190,10 @@ void InclusionRewriter::InclusionDirective(
StringRef /*FileName*/, bool /*IsAngled*/,
CharSourceRange /*FilenameRange*/, OptionalFileEntryRef /*File*/,
StringRef /*SearchPath*/, StringRef /*RelativePath*/,
- const Module *Imported, SrcMgr::CharacteristicKind FileType) {
- if (Imported) {
- auto P = ModuleIncludes.insert(std::make_pair(HashLoc, Imported));
+ const Module *SuggestedModule, bool ModuleImported,
+ SrcMgr::CharacteristicKind FileType) {
+ if (ModuleImported) {
+ auto P = ModuleIncludes.insert(std::make_pair(HashLoc, SuggestedModule));
(void)P;
assert(P.second && "Unexpected revisitation of the same include directive");
} else
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index 1f40db785981..3849e4040b53 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -273,10 +273,9 @@ namespace {
std::string SStr;
llvm::raw_string_ostream S(SStr);
New->printPretty(S, nullptr, PrintingPolicy(LangOpts));
- const std::string &Str = S.str();
// If replacement succeeded or warning disabled return with no warning.
- if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) {
+ if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, SStr)) {
ReplacedNodes[Old] = New;
return;
}
@@ -465,15 +464,15 @@ namespace {
std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag);
std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
- StringRef funcName, std::string Tag);
- std::string SynthesizeBlockFunc(BlockExpr *CE, int i,
- StringRef funcName, std::string Tag);
- std::string SynthesizeBlockImpl(BlockExpr *CE,
- std::string Tag, std::string Desc);
- std::string SynthesizeBlockDescriptor(std::string DescTag,
- std::string ImplTag,
- int i, StringRef funcName,
- unsigned hasCopy);
+ StringRef funcName,
+ const std::string &Tag);
+ std::string SynthesizeBlockFunc(BlockExpr *CE, int i, StringRef funcName,
+ const std::string &Tag);
+ std::string SynthesizeBlockImpl(BlockExpr *CE, const std::string &Tag,
+ const std::string &Desc);
+ std::string SynthesizeBlockDescriptor(const std::string &DescTag,
+ const std::string &ImplTag, int i,
+ StringRef funcName, unsigned hasCopy);
Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp);
void SynthesizeBlockLiterals(SourceLocation FunLocStart,
StringRef FunName);
@@ -592,7 +591,7 @@ namespace {
}
bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
- IdentifierInfo* II = &Context->Idents.get("load");
+ const IdentifierInfo *II = &Context->Idents.get("load");
Selector LoadSel = Context->Selectors.getSelector(0, &II);
return OD->getClassMethod(LoadSel) != nullptr;
}
@@ -2581,7 +2580,7 @@ Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
std::string prettyBufS;
llvm::raw_string_ostream prettyBuf(prettyBufS);
Exp->getString()->printPretty(prettyBuf, nullptr, PrintingPolicy(LangOpts));
- Preamble += prettyBuf.str();
+ Preamble += prettyBufS;
Preamble += ",";
Preamble += utostr(Exp->getString()->getByteLength()) + "};\n";
@@ -4037,7 +4036,7 @@ static bool HasLocalVariableExternalStorage(ValueDecl *VD) {
std::string RewriteModernObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
StringRef funcName,
- std::string Tag) {
+ const std::string &Tag) {
const FunctionType *AFT = CE->getFunctionType();
QualType RT = AFT->getReturnType();
std::string StructRef = "struct " + Tag;
@@ -4131,9 +4130,8 @@ std::string RewriteModernObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
return S;
}
-std::string RewriteModernObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
- StringRef funcName,
- std::string Tag) {
+std::string RewriteModernObjC::SynthesizeBlockHelperFuncs(
+ BlockExpr *CE, int i, StringRef funcName, const std::string &Tag) {
std::string StructRef = "struct " + Tag;
std::string S = "static void __";
@@ -4175,8 +4173,9 @@ std::string RewriteModernObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
return S;
}
-std::string RewriteModernObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
- std::string Desc) {
+std::string RewriteModernObjC::SynthesizeBlockImpl(BlockExpr *CE,
+ const std::string &Tag,
+ const std::string &Desc) {
std::string S = "\nstruct " + Tag;
std::string Constructor = " " + Tag;
@@ -4290,10 +4289,9 @@ std::string RewriteModernObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Ta
return S;
}
-std::string RewriteModernObjC::SynthesizeBlockDescriptor(std::string DescTag,
- std::string ImplTag, int i,
- StringRef FunName,
- unsigned hasCopy) {
+std::string RewriteModernObjC::SynthesizeBlockDescriptor(
+ const std::string &DescTag, const std::string &ImplTag, int i,
+ StringRef FunName, unsigned hasCopy) {
std::string S = "\nstatic struct " + DescTag;
S += " {\n size_t reserved;\n";
@@ -4415,7 +4413,7 @@ void RewriteModernObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
llvm::raw_string_ostream constructorExprBuf(SStr);
GlobalConstructionExp->printPretty(constructorExprBuf, nullptr,
PrintingPolicy(LangOpts));
- globalBuf += constructorExprBuf.str();
+ globalBuf += SStr;
globalBuf += ";\n";
InsertText(FunLocStart, globalBuf);
GlobalConstructionExp = nullptr;
diff --git a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
index b76728acb907..0887b5a504f0 100644
--- a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -574,7 +574,7 @@ void SDiagsWriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
SmallString<256> diagnostic;
Info.FormatDiagnostic(diagnostic);
getMetaDiags()->Report(
- diag::warn_fe_serialized_diag_failure_during_finalisation)
+ diag::warn_fe_serialized_diag_failure_during_finalization)
<< diagnostic;
return;
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp b/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
index 779dead5d058..a264836a5439 100644
--- a/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
@@ -12,6 +12,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ConvertUTF.h"
@@ -41,6 +42,14 @@ static const enum raw_ostream::Colors fatalColor = raw_ostream::RED;
static const enum raw_ostream::Colors savedColor =
raw_ostream::SAVEDCOLOR;
+// Magenta is taken for 'warning'. Red is already 'error' and 'cyan'
+// is already taken for 'note'. Green is already used to underline
+// source ranges. White and black are bad because of the usual
+// terminal backgrounds. Which leaves us only with TWO options.
+static constexpr raw_ostream::Colors CommentColor = raw_ostream::YELLOW;
+static constexpr raw_ostream::Colors LiteralColor = raw_ostream::GREEN;
+static constexpr raw_ostream::Colors KeywordColor = raw_ostream::BLUE;
+
/// Add highlights to differences in template strings.
static void applyTemplateHighlighting(raw_ostream &OS, StringRef Str,
bool &Normal, bool Bold) {
@@ -136,7 +145,7 @@ printableTextForNextCharacter(StringRef SourceLine, size_t *I,
(void)Res;
assert(Res == llvm::conversionOK);
assert(OriginalBegin < Begin);
- assert((Begin - OriginalBegin) == CharSize);
+ assert(unsigned(Begin - OriginalBegin) == CharSize);
(*I) += (Begin - OriginalBegin);
@@ -644,10 +653,10 @@ static bool printWordWrapped(raw_ostream &OS, StringRef Str, unsigned Columns,
return Wrapped;
}
-TextDiagnostic::TextDiagnostic(raw_ostream &OS,
- const LangOptions &LangOpts,
- DiagnosticOptions *DiagOpts)
- : DiagnosticRenderer(LangOpts, DiagOpts), OS(OS) {}
+TextDiagnostic::TextDiagnostic(raw_ostream &OS, const LangOptions &LangOpts,
+ DiagnosticOptions *DiagOpts,
+ const Preprocessor *PP)
+ : DiagnosticRenderer(LangOpts, DiagOpts), OS(OS), PP(PP) {}
TextDiagnostic::~TextDiagnostic() {}
@@ -1112,6 +1121,162 @@ prepareAndFilterRanges(const SmallVectorImpl<CharSourceRange> &Ranges,
return LineRanges;
}
+/// Creates syntax highlighting information in form of StyleRanges.
+///
+/// The returned unique ptr has always exactly size
+/// (\p EndLineNumber - \p StartLineNumber + 1). Each SmallVector in there
+/// corresponds to syntax highlighting information in one line. In each line,
+/// the StyleRanges are non-overlapping and sorted from start to end of the
+/// line.
+static std::unique_ptr<llvm::SmallVector<TextDiagnostic::StyleRange>[]>
+highlightLines(StringRef FileData, unsigned StartLineNumber,
+ unsigned EndLineNumber, const Preprocessor *PP,
+ const LangOptions &LangOpts, bool ShowColors, FileID FID,
+ const SourceManager &SM) {
+ assert(StartLineNumber <= EndLineNumber);
+ auto SnippetRanges =
+ std::make_unique<SmallVector<TextDiagnostic::StyleRange>[]>(
+ EndLineNumber - StartLineNumber + 1);
+
+ if (!PP || !ShowColors)
+ return SnippetRanges;
+
+ // Might cause emission of another diagnostic.
+ if (PP->getIdentifierTable().getExternalIdentifierLookup())
+ return SnippetRanges;
+
+ auto Buff = llvm::MemoryBuffer::getMemBuffer(FileData);
+ Lexer L{FID, *Buff, SM, LangOpts};
+ L.SetKeepWhitespaceMode(true);
+
+ const char *FirstLineStart =
+ FileData.data() +
+ SM.getDecomposedLoc(SM.translateLineCol(FID, StartLineNumber, 1)).second;
+ if (const char *CheckPoint = PP->getCheckPoint(FID, FirstLineStart)) {
+ assert(CheckPoint >= Buff->getBufferStart() &&
+ CheckPoint <= Buff->getBufferEnd());
+ assert(CheckPoint <= FirstLineStart);
+ size_t Offset = CheckPoint - Buff->getBufferStart();
+ L.seek(Offset, /*IsAtStartOfLine=*/false);
+ }
+
+ // Classify the given token and append it to the given vector.
+ auto appendStyle =
+ [PP, &LangOpts](SmallVector<TextDiagnostic::StyleRange> &Vec,
+ const Token &T, unsigned Start, unsigned Length) -> void {
+ if (T.is(tok::raw_identifier)) {
+ StringRef RawIdent = T.getRawIdentifier();
+ // Special case true/false/nullptr/... literals, since they will otherwise
+ // be treated as keywords.
+ // FIXME: It would be good to have a programmatic way of getting this
+ // list.
+ if (llvm::StringSwitch<bool>(RawIdent)
+ .Case("true", true)
+ .Case("false", true)
+ .Case("nullptr", true)
+ .Case("__func__", true)
+ .Case("__objc_yes__", true)
+ .Case("__objc_no__", true)
+ .Case("__null", true)
+ .Case("__FUNCDNAME__", true)
+ .Case("__FUNCSIG__", true)
+ .Case("__FUNCTION__", true)
+ .Case("__FUNCSIG__", true)
+ .Default(false)) {
+ Vec.emplace_back(Start, Start + Length, LiteralColor);
+ } else {
+ const IdentifierInfo *II = PP->getIdentifierInfo(RawIdent);
+ assert(II);
+ if (II->isKeyword(LangOpts))
+ Vec.emplace_back(Start, Start + Length, KeywordColor);
+ }
+ } else if (tok::isLiteral(T.getKind())) {
+ Vec.emplace_back(Start, Start + Length, LiteralColor);
+ } else {
+ assert(T.is(tok::comment));
+ Vec.emplace_back(Start, Start + Length, CommentColor);
+ }
+ };
+
+ bool Stop = false;
+ while (!Stop) {
+ Token T;
+ Stop = L.LexFromRawLexer(T);
+ if (T.is(tok::unknown))
+ continue;
+
+ // We are only interested in identifiers, literals and comments.
+ if (!T.is(tok::raw_identifier) && !T.is(tok::comment) &&
+ !tok::isLiteral(T.getKind()))
+ continue;
+
+ bool Invalid = false;
+ unsigned TokenEndLine = SM.getSpellingLineNumber(T.getEndLoc(), &Invalid);
+ if (Invalid || TokenEndLine < StartLineNumber)
+ continue;
+
+ assert(TokenEndLine >= StartLineNumber);
+
+ unsigned TokenStartLine =
+ SM.getSpellingLineNumber(T.getLocation(), &Invalid);
+ if (Invalid)
+ continue;
+ // If this happens, we're done.
+ if (TokenStartLine > EndLineNumber)
+ break;
+
+ unsigned StartCol =
+ SM.getSpellingColumnNumber(T.getLocation(), &Invalid) - 1;
+ if (Invalid)
+ continue;
+
+ // Simple tokens.
+ if (TokenStartLine == TokenEndLine) {
+ SmallVector<TextDiagnostic::StyleRange> &LineRanges =
+ SnippetRanges[TokenStartLine - StartLineNumber];
+ appendStyle(LineRanges, T, StartCol, T.getLength());
+ continue;
+ }
+ assert((TokenEndLine - TokenStartLine) >= 1);
+
+ // For tokens that span multiple lines (think multiline comments), we
+ // divide them into multiple StyleRanges.
+ unsigned EndCol = SM.getSpellingColumnNumber(T.getEndLoc(), &Invalid) - 1;
+ if (Invalid)
+ continue;
+
+ std::string Spelling = Lexer::getSpelling(T, SM, LangOpts);
+
+ unsigned L = TokenStartLine;
+ unsigned LineLength = 0;
+ for (unsigned I = 0; I <= Spelling.size(); ++I) {
+ // This line is done.
+ if (I == Spelling.size() || isVerticalWhitespace(Spelling[I])) {
+ SmallVector<TextDiagnostic::StyleRange> &LineRanges =
+ SnippetRanges[L - StartLineNumber];
+
+ if (L >= StartLineNumber) {
+ if (L == TokenStartLine) // First line
+ appendStyle(LineRanges, T, StartCol, LineLength);
+ else if (L == TokenEndLine) // Last line
+ appendStyle(LineRanges, T, 0, EndCol);
+ else
+ appendStyle(LineRanges, T, 0, LineLength);
+ }
+
+ ++L;
+ if (L > EndLineNumber)
+ break;
+ LineLength = 0;
+ continue;
+ }
+ ++LineLength;
+ }
+ }
+
+ return SnippetRanges;
+}
+
/// Emit a code snippet and caret line.
///
/// This routine emits a single line's code snippet and caret line..
@@ -1181,6 +1346,12 @@ void TextDiagnostic::emitSnippetAndCaret(
OS.indent(MaxLineNoDisplayWidth + 2) << "| ";
};
+ // Prepare source highlighting information for the lines we're about to
+ // emit, starting from the first line.
+ std::unique_ptr<SmallVector<StyleRange>[]> SourceStyles =
+ highlightLines(BufData, Lines.first, Lines.second, PP, LangOpts,
+ DiagOpts->ShowColors, FID, SM);
+
SmallVector<LineRange> LineRanges =
prepareAndFilterRanges(Ranges, SM, Lines, FID, LangOpts);
@@ -1247,7 +1418,8 @@ void TextDiagnostic::emitSnippetAndCaret(
}
// Emit what we have computed.
- emitSnippet(SourceLine, MaxLineNoDisplayWidth, DisplayLineNo);
+ emitSnippet(SourceLine, MaxLineNoDisplayWidth, LineNo, DisplayLineNo,
+ SourceStyles[LineNo - Lines.first]);
if (!CaretLine.empty()) {
indentForLineNumbers();
@@ -1277,16 +1449,18 @@ void TextDiagnostic::emitSnippetAndCaret(
void TextDiagnostic::emitSnippet(StringRef SourceLine,
unsigned MaxLineNoDisplayWidth,
- unsigned LineNo) {
+ unsigned LineNo, unsigned DisplayLineNo,
+ ArrayRef<StyleRange> Styles) {
// Emit line number.
if (MaxLineNoDisplayWidth > 0) {
- unsigned LineNoDisplayWidth = getNumDisplayWidth(LineNo);
+ unsigned LineNoDisplayWidth = getNumDisplayWidth(DisplayLineNo);
OS.indent(MaxLineNoDisplayWidth - LineNoDisplayWidth + 1)
- << LineNo << " | ";
+ << DisplayLineNo << " | ";
}
// Print the source line one character at a time.
bool PrintReversed = false;
+ std::optional<llvm::raw_ostream::Colors> CurrentColor;
size_t I = 0;
while (I < SourceLine.size()) {
auto [Str, WasPrintable] =
@@ -1298,10 +1472,29 @@ void TextDiagnostic::emitSnippet(StringRef SourceLine,
PrintReversed = !PrintReversed;
if (PrintReversed)
OS.reverseColor();
- else
+ else {
OS.resetColor();
+ CurrentColor = std::nullopt;
+ }
+ }
+
+ // Apply syntax highlighting information.
+ const auto *CharStyle = llvm::find_if(Styles, [I](const StyleRange &R) {
+ return (R.Start < I && R.End >= I);
+ });
+
+ if (CharStyle != Styles.end()) {
+ if (!CurrentColor ||
+ (CurrentColor && *CurrentColor != CharStyle->Color)) {
+ OS.changeColor(CharStyle->Color, false);
+ CurrentColor = CharStyle->Color;
+ }
+ } else if (CurrentColor) {
+ OS.resetColor();
+ CurrentColor = std::nullopt;
}
}
+
OS << Str;
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticPrinter.cpp
index 0ff5376098ff..b2fb76253757 100644
--- a/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -36,7 +36,7 @@ TextDiagnosticPrinter::~TextDiagnosticPrinter() {
void TextDiagnosticPrinter::BeginSourceFile(const LangOptions &LO,
const Preprocessor *PP) {
// Build the TextDiagnostic utility.
- TextDiag.reset(new TextDiagnostic(OS, LO, &*DiagOpts));
+ TextDiag.reset(new TextDiagnostic(OS, LO, &*DiagOpts, PP));
}
void TextDiagnosticPrinter::EndSourceFile() {
diff --git a/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
index f508408ba706..48330e936171 100644
--- a/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -396,6 +396,12 @@ public:
}
};
+static std::string DetailedErrorString(const DiagnosticsEngine &Diags) {
+ if (Diags.getDiagnosticOptions().VerifyPrefixes.empty())
+ return "expected";
+ return *Diags.getDiagnosticOptions().VerifyPrefixes.begin();
+}
+
/// ParseDirective - Go through the comment and see if it indicates expected
/// diagnostics. If so, then put them in the appropriate directive list.
///
@@ -445,10 +451,9 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
// others.
// Regex in initial directive token: -re
- if (DToken.ends_with("-re")) {
+ if (DToken.consume_back("-re")) {
D.RegexKind = true;
KindStr = "regex";
- DToken = DToken.substr(0, DToken.size()-3);
}
// Type in initial directive token: -{error|warning|note|no-diagnostics}
@@ -479,14 +484,14 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
if (NoDiag) {
if (Status == VerifyDiagnosticConsumer::HasOtherExpectedDirectives)
Diags.Report(Pos, diag::err_verify_invalid_no_diags)
- << /*IsExpectedNoDiagnostics=*/true;
+ << DetailedErrorString(Diags) << /*IsExpectedNoDiagnostics=*/true;
else
Status = VerifyDiagnosticConsumer::HasExpectedNoDiagnostics;
continue;
}
if (Status == VerifyDiagnosticConsumer::HasExpectedNoDiagnostics) {
Diags.Report(Pos, diag::err_verify_invalid_no_diags)
- << /*IsExpectedNoDiagnostics=*/false;
+ << DetailedErrorString(Diags) << /*IsExpectedNoDiagnostics=*/false;
continue;
}
Status = VerifyDiagnosticConsumer::HasOtherExpectedDirectives;
@@ -1105,7 +1110,8 @@ void VerifyDiagnosticConsumer::CheckDiagnostics() {
// Produce an error if no expected-* directives could be found in the
// source file(s) processed.
if (Status == HasNoDirectives) {
- Diags.Report(diag::err_verify_no_directives).setForceEmit();
+ Diags.Report(diag::err_verify_no_directives).setForceEmit()
+ << DetailedErrorString(Diags);
++NumErrors;
Status = HasNoDirectivesReported;
}
diff --git a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index b280a1359d2f..7476b1076d10 100644
--- a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -53,6 +53,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case DumpTokens: return std::make_unique<DumpTokensAction>();
case EmitAssembly: return std::make_unique<EmitAssemblyAction>();
case EmitBC: return std::make_unique<EmitBCAction>();
+ case EmitCIR:
+ llvm_unreachable("CIR suppport not built into clang");
case EmitHTML: return std::make_unique<HTMLPrintAction>();
case EmitLLVM: return std::make_unique<EmitLLVMAction>();
case EmitLLVMOnly: return std::make_unique<EmitLLVMOnlyAction>();
@@ -65,6 +67,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
return std::make_unique<GenerateModuleFromModuleMapAction>();
case GenerateModuleInterface:
return std::make_unique<GenerateModuleInterfaceAction>();
+ case GenerateReducedModuleInterface:
+ return std::make_unique<GenerateReducedModuleInterfaceAction>();
case GenerateHeaderUnit:
return std::make_unique<GenerateHeaderUnitAction>();
case GeneratePCH: return std::make_unique<GeneratePCHAction>();
@@ -179,9 +183,13 @@ CreateFrontendAction(CompilerInstance &CI) {
#endif
// Wrap the base FE action in an extract api action to generate
- // symbol graph as a biproduct of comilation ( enabled with
- // --emit-symbol-graph option )
- if (!FEOpts.SymbolGraphOutputDir.empty()) {
+ // symbol graph as a biproduct of compilation (enabled with
+ // --emit-symbol-graph option)
+ if (FEOpts.EmitSymbolGraph) {
+ if (FEOpts.SymbolGraphOutputDir.empty()) {
+ CI.getDiagnostics().Report(diag::warn_missing_symbol_graph_dir);
+ CI.getFrontendOpts().SymbolGraphOutputDir = ".";
+ }
CI.getCodeGenOpts().ClearASTBeforeBackend = false;
Act = std::make_unique<WrappingExtractAPIAction>(std::move(Act));
}
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
index 3c3948863c1d..a04e8b6de44d 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
@@ -215,9 +215,7 @@ inline __device__ unsigned int __activemask() {
#if CUDA_VERSION < 9020
return __nvvm_vote_ballot(1);
#else
- unsigned int mask;
- asm volatile("activemask.b32 %0;" : "=r"(mask));
- return mask;
+ return __nvvm_activemask();
#endif
}
diff --git a/contrib/llvm-project/clang/lib/Headers/__stdarg_header_macro.h b/contrib/llvm-project/clang/lib/Headers/__stdarg_header_macro.h
new file mode 100644
index 000000000000..beb92ee02526
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stdarg_header_macro.h
@@ -0,0 +1,12 @@
+/*===---- __stdarg_header_macro.h ------------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDARG_H
+#define __STDARG_H
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stddef_header_macro.h b/contrib/llvm-project/clang/lib/Headers/__stddef_header_macro.h
new file mode 100644
index 000000000000..db5fb3c0abc1
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stddef_header_macro.h
@@ -0,0 +1,12 @@
+/*===---- __stddef_header_macro.h ------------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDDEF_H
+#define __STDDEF_H
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/arm_acle.h b/contrib/llvm-project/clang/lib/Headers/arm_acle.h
index 9cd34948e3c5..1518b0c4c842 100644
--- a/contrib/llvm-project/clang/lib/Headers/arm_acle.h
+++ b/contrib/llvm-project/clang/lib/Headers/arm_acle.h
@@ -75,6 +75,14 @@ static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(v
#define __dbg(t) __builtin_arm_dbg(t)
#endif
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
+#define _CHKFEAT_GCS 1
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__chkfeat(uint64_t __features) {
+ return __builtin_arm_chkfeat(__features) ^ __features;
+}
+#endif
+
/* 7.5 Swap */
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
__swp(uint32_t __x, volatile uint32_t *__p) {
@@ -109,7 +117,7 @@ __swp(uint32_t __x, volatile uint32_t *__p) {
#endif
/* 7.7 NOP */
-#if !defined(_MSC_VER) || !defined(__aarch64__)
+#if !defined(_MSC_VER) || (!defined(__aarch64__) && !defined(__arm64ec__))
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
__builtin_arm_nop();
}
@@ -313,7 +321,7 @@ __qdbl(int32_t __t) {
}
#endif
-/* 8.4.3 Accumultating multiplications */
+/* 8.4.3 Accumulating multiplications */
#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__smlabb(int32_t __a, int32_t __b, int32_t __c) {
@@ -545,7 +553,7 @@ __usub16(uint16x2_t __a, uint16x2_t __b) {
}
#endif
-/* 8.5.10 Parallel 16-bit multiplications */
+/* 8.5.10 Parallel 16-bit multiplication */
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
@@ -748,7 +756,7 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))
#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
-/* 10.3 Memory Tagging Extensions (MTE) Intrinsics */
+/* 10.3 MTE intrinsics */
#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
#define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask)
#define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset)
@@ -757,7 +765,7 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)
#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
-/* 18 Memory Operations Intrinsics */
+/* 18 memcpy family of operations intrinsics - MOPS */
#define __arm_mops_memset_tag(__tagged_address, __value, __size) \
__builtin_arm_mops_memset_tag(__tagged_address, __value, __size)
#endif
@@ -855,6 +863,24 @@ __rndrrs(uint64_t *__p) {
}
#endif
+/* 11.2 Guarded Control Stack intrinsics */
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
+static __inline__ void * __attribute__((__always_inline__, __nodebug__))
+__gcspr() {
+ return (void *)__builtin_arm_rsr64("gcspr_el0");
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("gcs")))
+__gcspopm() {
+ return __builtin_arm_gcspopm(0);
+}
+
+static __inline__ const void * __attribute__((__always_inline__, __nodebug__, target("gcs")))
+__gcsss(const void *__stack) {
+ return __builtin_arm_gcsss(__stack);
+}
+#endif
+
#if defined(__cplusplus)
}
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512erintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512erintrin.h
deleted file mode 100644
index 1c5a2d2d208f..000000000000
--- a/contrib/llvm-project/clang/lib/Headers/avx512erintrin.h
+++ /dev/null
@@ -1,271 +0,0 @@
-/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512erintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512ERINTRIN_H
-#define __AVX512ERINTRIN_H
-
-/* exp2a23 */
-#define _mm512_exp2a23_round_pd(A, R) \
- ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_exp2a23_round_pd(S, M, A, R) \
- ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R)))
-
-#define _mm512_maskz_exp2a23_round_pd(M, A, R) \
- ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm512_exp2a23_pd(A) \
- _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_exp2a23_pd(S, M, A) \
- _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_exp2a23_pd(M, A) \
- _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_exp2a23_round_ps(A, R) \
- ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_exp2a23_round_ps(S, M, A, R) \
- ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R)))
-
-#define _mm512_maskz_exp2a23_round_ps(M, A, R) \
- ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R)))
-
-#define _mm512_exp2a23_ps(A) \
- _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_exp2a23_ps(S, M, A) \
- _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_exp2a23_ps(M, A) \
- _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-/* rsqrt28 */
-#define _mm512_rsqrt28_round_pd(A, R) \
- ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) \
- ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R)))
-
-#define _mm512_maskz_rsqrt28_round_pd(M, A, R) \
- ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm512_rsqrt28_pd(A) \
- _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_rsqrt28_pd(S, M, A) \
- _mm512_mask_rsqrt28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_rsqrt28_pd(M, A) \
- _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_rsqrt28_round_ps(A, R) \
- ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) \
- ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R)))
-
-#define _mm512_maskz_rsqrt28_round_ps(M, A, R) \
- ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R)))
-
-#define _mm512_rsqrt28_ps(A) \
- _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_rsqrt28_ps(S, M, A) \
- _mm512_mask_rsqrt28_round_ps((S), (M), A, _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_rsqrt28_ps(M, A) \
- _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_rsqrt28_round_ss(A, B, R) \
- ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) \
- ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(S), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) \
- ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_rsqrt28_ss(A, B) \
- _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_rsqrt28_ss(S, M, A, B) \
- _mm_mask_rsqrt28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_rsqrt28_ss(M, A, B) \
- _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_rsqrt28_round_sd(A, B, R) \
- ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) \
- ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(S), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) \
- ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_rsqrt28_sd(A, B) \
- _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_rsqrt28_sd(S, M, A, B) \
- _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_rsqrt28_sd(M, A, B) \
- _mm_maskz_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-/* rcp28 */
-#define _mm512_rcp28_round_pd(A, R) \
- ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_rcp28_round_pd(S, M, A, R) \
- ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R)))
-
-#define _mm512_maskz_rcp28_round_pd(M, A, R) \
- ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm512_rcp28_pd(A) \
- _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_rcp28_pd(S, M, A) \
- _mm512_mask_rcp28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_rcp28_pd(M, A) \
- _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_rcp28_round_ps(A, R) \
- ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_rcp28_round_ps(S, M, A, R) \
- ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R)))
-
-#define _mm512_maskz_rcp28_round_ps(M, A, R) \
- ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R)))
-
-#define _mm512_rcp28_ps(A) \
- _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_rcp28_ps(S, M, A) \
- _mm512_mask_rcp28_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_rcp28_ps(M, A) \
- _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_rcp28_round_ss(A, B, R) \
- ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_rcp28_round_ss(S, M, A, B, R) \
- ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(S), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_maskz_rcp28_round_ss(M, A, B, R) \
- ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_rcp28_ss(A, B) \
- _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_rcp28_ss(S, M, A, B) \
- _mm_mask_rcp28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_rcp28_ss(M, A, B) \
- _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_rcp28_round_sd(A, B, R) \
- ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_rcp28_round_sd(S, M, A, B, R) \
- ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(S), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_maskz_rcp28_round_sd(M, A, B, R) \
- ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_rcp28_sd(A, B) \
- _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_rcp28_sd(S, M, A, B) \
- _mm_mask_rcp28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_rcp28_sd(M, A, B) \
- _mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#endif /* __AVX512ERINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h b/contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h
index 4123f10c3951..e136aa14a194 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h
@@ -96,8 +96,8 @@ _mm512_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4,
(h5), (h4), (h3), (h2), (h1))
static __inline __m512h __DEFAULT_FN_ATTRS512
-_mm512_set1_pch(_Float16 _Complex h) {
- return (__m512h)_mm512_set1_ps(__builtin_bit_cast(float, h));
+_mm512_set1_pch(_Float16 _Complex __h) {
+ return (__m512h)_mm512_set1_ps(__builtin_bit_cast(float, __h));
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_castph_ps(__m128h __a) {
@@ -282,75 +282,75 @@ _mm512_zextph256_ph512(__m256h __a) {
#define _mm_comi_sh(A, B, pred) \
_mm_comi_round_sh((A), (B), (pred), _MM_FROUND_CUR_DIRECTION)
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comieq_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_EQ_OS,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comieq_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_EQ_OS,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comilt_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LT_OS,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comilt_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LT_OS,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comile_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LE_OS,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comile_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LE_OS,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comigt_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GT_OS,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comigt_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GT_OS,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comige_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GE_OS,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comige_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GE_OS,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comineq_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_NEQ_US,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comineq_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_NEQ_US,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomieq_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_EQ_OQ,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomieq_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_EQ_OQ,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomilt_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LT_OQ,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomilt_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LT_OQ,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomile_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LE_OQ,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomile_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LE_OQ,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomigt_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GT_OQ,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomigt_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GT_OQ,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomige_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GE_OQ,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomige_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GE_OQ,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomineq_sh(__m128h A,
- __m128h B) {
- return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_NEQ_UQ,
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomineq_sh(__m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_NEQ_UQ,
_MM_FROUND_CUR_DIRECTION);
}
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512pfintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512pfintrin.h
deleted file mode 100644
index f853be021a2d..000000000000
--- a/contrib/llvm-project/clang/lib/Headers/avx512pfintrin.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*===------------- avx512pfintrin.h - PF intrinsics ------------------------===
- *
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512pfintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512PFINTRIN_H
-#define __AVX512PFINTRIN_H
-
-#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) \
- __builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
- (void const *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) \
- __builtin_ia32_gatherpfdpd((__mmask8) -1, (__v8si)(__m256i)(index), \
- (void const *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) \
- __builtin_ia32_gatherpfdps((__mmask16)(mask), \
- (__v16si)(__m512i)(index), (void const *)(addr), \
- (int)(scale), (int)(hint))
-
-#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) \
- __builtin_ia32_gatherpfdps((__mmask16) -1, \
- (__v16si)(__m512i)(index), (void const *)(addr), \
- (int)(scale), (int)(hint))
-
-#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) \
- __builtin_ia32_gatherpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (void const *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) \
- __builtin_ia32_gatherpfqpd((__mmask8) -1, (__v8di)(__m512i)(index), \
- (void const *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) \
- __builtin_ia32_gatherpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (void const *)(addr), (int)(scale), (int)(hint))
-
-#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) \
- __builtin_ia32_gatherpfqps((__mmask8) -1, (__v8di)(__m512i)(index), \
- (void const *)(addr), (int)(scale), (int)(hint))
-
-#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) \
- __builtin_ia32_scatterpfdpd((__mmask8)-1, (__v8si)(__m256i)(index), \
- (void *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) \
- __builtin_ia32_scatterpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
- (void *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) \
- __builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \
- (void *)(addr), (int)(scale), (int)(hint))
-
-#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) \
- __builtin_ia32_scatterpfdps((__mmask16)(mask), \
- (__v16si)(__m512i)(index), (void *)(addr), \
- (int)(scale), (int)(hint))
-
-#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) \
- __builtin_ia32_scatterpfqpd((__mmask8)-1, (__v8di)(__m512i)(index), \
- (void *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) \
- __builtin_ia32_scatterpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (void *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) \
- __builtin_ia32_scatterpfqps((__mmask8)-1, (__v8di)(__m512i)(index), \
- (void *)(addr), (int)(scale), (int)(hint))
-
-#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) \
- __builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (void *)(addr), (int)(scale), (int)(hint))
-
-#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/avxintrin.h b/contrib/llvm-project/clang/lib/Headers/avxintrin.h
index f116d8bc3a94..4983f3311370 100644
--- a/contrib/llvm-project/clang/lib/Headers/avxintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avxintrin.h
@@ -207,6 +207,8 @@ _mm256_div_ps(__m256 __a, __m256 __b)
/// Compares two 256-bit vectors of [4 x double] and returns the greater
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXPD </c> instruction.
@@ -226,6 +228,8 @@ _mm256_max_pd(__m256d __a, __m256d __b)
/// Compares two 256-bit vectors of [8 x float] and returns the greater
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXPS </c> instruction.
@@ -245,6 +249,8 @@ _mm256_max_ps(__m256 __a, __m256 __b)
/// Compares two 256-bit vectors of [4 x double] and returns the lesser
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINPD </c> instruction.
@@ -264,6 +270,8 @@ _mm256_min_pd(__m256d __a, __m256d __b)
/// Compares two 256-bit vectors of [8 x float] and returns the lesser
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINPS </c> instruction.
@@ -832,6 +840,7 @@ _mm256_permutevar_pd(__m256d __a, __m256i __c)
/// Copies the values stored in a 128-bit vector of [4 x float] as
/// specified by the 128-bit integer vector operand.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VPERMILPS </c> instruction.
@@ -1574,14 +1583,6 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
(__v4df)(__m256d)(b), (int)(mask)))
/* Compare */
-#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
-#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
-#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
-#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
-#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
-#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
-#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
-#define _CMP_ORD_Q 0x07 /* Ordered (non-signaling) */
#define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */
#define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unordered, signaling) */
#define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
@@ -1607,13 +1608,14 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
#define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
+/* Below intrinsic defined in emmintrin.h can be used for AVX */
/// Compares each of the corresponding double-precision values of two
/// 128-bit vectors of [2 x double], using the operation specified by the
/// immediate integer operand.
///
-/// Returns a [2 x double] vector consisting of two doubles corresponding to
-/// the two comparison results: zero if the comparison is false, and all 1's
-/// if the comparison is true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -1663,17 +1665,16 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 0x1E: Greater-than (ordered, non-signaling) \n
/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [2 x double] containing the comparison results.
-#define _mm_cmp_pd(a, b, c) \
- ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
- (__v2df)(__m128d)(b), (c)))
+/// \fn __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c)
+/* Below intrinsic defined in xmmintrin.h can be used for AVX */
/// Compares each of the corresponding values of two 128-bit vectors of
/// [4 x float], using the operation specified by the immediate integer
/// operand.
///
-/// Returns a [4 x float] vector consisting of four floats corresponding to
-/// the four comparison results: zero if the comparison is false, and all 1's
-/// if the comparison is true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -1723,17 +1724,15 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 0x1E: Greater-than (ordered, non-signaling) \n
/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-#define _mm_cmp_ps(a, b, c) \
- ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
- (__v4sf)(__m128)(b), (c)))
+/// \fn __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c)
/// Compares each of the corresponding double-precision values of two
/// 256-bit vectors of [4 x double], using the operation specified by the
/// immediate integer operand.
///
-/// Returns a [4 x double] vector consisting of four doubles corresponding to
-/// the four comparison results: zero if the comparison is false, and all 1's
-/// if the comparison is true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -1791,9 +1790,9 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// [8 x float], using the operation specified by the immediate integer
/// operand.
///
-/// Returns a [8 x float] vector consisting of eight floats corresponding to
-/// the eight comparison results: zero if the comparison is false, and all
-/// 1's if the comparison is true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -1847,12 +1846,14 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
((__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
(__v8sf)(__m256)(b), (c)))
+/* Below intrinsic defined in emmintrin.h can be used for AVX */
/// Compares each of the corresponding scalar double-precision values of
/// two 128-bit vectors of [2 x double], using the operation specified by the
/// immediate integer operand.
///
-/// If the result is true, all 64 bits of the destination vector are set;
-/// otherwise they are cleared.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -1902,16 +1903,16 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 0x1E: Greater-than (ordered, non-signaling) \n
/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [2 x double] containing the comparison results.
-#define _mm_cmp_sd(a, b, c) \
- ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
- (__v2df)(__m128d)(b), (c)))
+/// \fn __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c)
+/* Below intrinsic defined in xmmintrin.h can be used for AVX */
/// Compares each of the corresponding scalar values of two 128-bit
/// vectors of [4 x float], using the operation specified by the immediate
/// integer operand.
///
-/// If the result is true, all 32 bits of the destination vector are set;
-/// otherwise they are cleared.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -1961,9 +1962,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 0x1E: Greater-than (ordered, non-signaling) \n
/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-#define _mm_cmp_ss(a, b, c) \
- ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
- (__v4sf)(__m128)(b), (c)))
+/// \fn __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c)
/// Takes a [8 x i32] vector and returns the vector element value
/// indexed by the immediate constant operand.
@@ -2213,6 +2212,10 @@ _mm256_cvtpd_ps(__m256d __a)
/// Converts a vector of [8 x float] into a vector of [8 x i32].
///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCVTPS2DQ </c> instruction.
@@ -2242,9 +2245,13 @@ _mm256_cvtps_pd(__m128 __a)
return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df);
}
-/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4
-/// x i32], truncating the result by rounding towards zero when it is
-/// inexact.
+/// Converts a 256-bit vector of [4 x double] into four signed truncated
+/// (rounded toward zero) 32-bit integers returned in a 128-bit vector of
+/// [4 x i32].
+///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -2259,9 +2266,12 @@ _mm256_cvttpd_epi32(__m256d __a)
return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
}
-/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4
-/// x i32]. When a conversion is inexact, the value returned is rounded
-/// according to the rounding control bits in the MXCSR register.
+/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of
+/// [4 x i32].
+///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -2276,8 +2286,12 @@ _mm256_cvtpd_epi32(__m256d __a)
return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
}
-/// Converts a vector of [8 x float] into a vector of [8 x i32],
-/// truncating the result by rounding towards zero when it is inexact.
+/// Converts a vector of [8 x float] into eight signed truncated (rounded
+/// toward zero) 32-bit integers returned in a vector of [8 x i32].
+///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
diff --git a/contrib/llvm-project/clang/lib/Headers/bmiintrin.h b/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
index d8e57c0cb494..78bffe68e221 100644
--- a/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
@@ -161,8 +161,7 @@ _mm_tzcnt_64(unsigned long long __X)
#undef __RELAXED_FN_ATTRS
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__BMI__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI__)
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
@@ -610,7 +609,6 @@ __blsr_u64(unsigned long long __X)
#undef __DEFAULT_FN_ATTRS
-#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
- || defined(__BMI__) */
+#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__BMI__) */
#endif /* __BMIINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/builtins.h b/contrib/llvm-project/clang/lib/Headers/builtins.h
index 65095861ca9b..1e534e632c8e 100644
--- a/contrib/llvm-project/clang/lib/Headers/builtins.h
+++ b/contrib/llvm-project/clang/lib/Headers/builtins.h
@@ -13,4 +13,7 @@
#ifndef __BUILTINS_H
#define __BUILTINS_H
+#if defined(__MVS__) && __has_include_next(<builtins.h>)
+#include_next <builtins.h>
+#endif /* __MVS__ */
#endif /* __BUILTINS_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/cpuid.h b/contrib/llvm-project/clang/lib/Headers/cpuid.h
index 1ad6853a97c9..82d995f1b966 100644
--- a/contrib/llvm-project/clang/lib/Headers/cpuid.h
+++ b/contrib/llvm-project/clang/lib/Headers/cpuid.h
@@ -10,7 +10,7 @@
#ifndef __CPUID_H
#define __CPUID_H
-#if !(__x86_64__ || __i386__)
+#if !defined(__x86_64__) && !defined(__i386__)
#error this header is for x86 only
#endif
@@ -200,6 +200,9 @@
#define bit_AMXINT8 0x02000000
/* Features in %eax for leaf 7 sub-leaf 1 */
+#define bit_SHA512 0x00000001
+#define bit_SM3 0x00000002
+#define bit_SM4 0x00000004
#define bit_RAOINT 0x00000008
#define bit_AVXVNNI 0x00000010
#define bit_AVX512BF16 0x00000020
@@ -211,7 +214,12 @@
/* Features in %edx for leaf 7 sub-leaf 1 */
#define bit_AVXVNNIINT8 0x00000010
#define bit_AVXNECONVERT 0x00000020
+#define bit_AMXCOMPLEX 0x00000100
+#define bit_AVXVNNIINT16 0x00000400
#define bit_PREFETCHI 0x00004000
+#define bit_USERMSR 0x00008000
+#define bit_AVX10 0x00080000
+#define bit_APXF 0x00200000
/* Features in %eax for leaf 13 sub-leaf 1 */
#define bit_XSAVEOPT 0x00000001
@@ -244,8 +252,11 @@
#define bit_RDPRU 0x00000010
#define bit_WBNOINVD 0x00000200
+/* Features in %ebx for leaf 0x24 */
+#define bit_AVX10_256 0x00020000
+#define bit_AVX10_512 0x00040000
-#if __i386__
+#ifdef __i386__
#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \
__asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
: "0"(__leaf))
@@ -274,7 +285,7 @@ static __inline unsigned int __get_cpuid_max (unsigned int __leaf,
unsigned int *__sig)
{
unsigned int __eax, __ebx, __ecx, __edx;
-#if __i386__
+#ifdef __i386__
int __cpuid_supported;
__asm(" pushfl\n"
@@ -328,4 +339,13 @@ static __inline int __get_cpuid_count (unsigned int __leaf,
return 1;
}
+// In some configurations, __cpuidex is defined as a builtin (primarily
+// -fms-extensions) which will conflict with the __cpuidex definition below.
+#if !(__has_builtin(__cpuidex))
+static __inline void __cpuidex(int __cpu_info[4], int __leaf, int __subleaf) {
+ __cpuid_count(__leaf, __subleaf, __cpu_info[0], __cpu_info[1], __cpu_info[2],
+ __cpu_info[3]);
+}
+#endif
+
#endif /* __CPUID_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/algorithm b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/algorithm
index f14a0b00bb04..3f59f28ae35b 100644
--- a/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/algorithm
+++ b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/algorithm
@@ -99,7 +99,7 @@ template <class __T>
__attribute__((enable_if(true, "")))
inline _CPP14_CONSTEXPR __host__ __device__ const __T &
min(const __T &__a, const __T &__b) {
- return __a < __b ? __a : __b;
+ return __b < __a ? __b : __a;
}
#pragma pop_macro("_CPP14_CONSTEXPR")
diff --git a/contrib/llvm-project/clang/lib/Headers/emmintrin.h b/contrib/llvm-project/clang/lib/Headers/emmintrin.h
index 96e3ebdecbdf..4dff6421350c 100644
--- a/contrib/llvm-project/clang/lib/Headers/emmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/emmintrin.h
@@ -259,6 +259,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) {
/// result. The upper 64 bits of the result are copied from the upper
/// double-precision value of the first operand.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINSD / MINSD </c> instruction.
@@ -278,9 +280,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a,
}
/// Performs element-by-element comparison of the two 128-bit vectors of
-/// [2 x double] and returns the vector containing the lesser of each pair of
+/// [2 x double] and returns a vector containing the lesser of each pair of
/// values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINPD / MINPD </c> instruction.
@@ -301,6 +305,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a,
/// result. The upper 64 bits of the result are copied from the upper
/// double-precision value of the first operand.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXSD / MAXSD </c> instruction.
@@ -320,9 +326,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a,
}
/// Performs element-by-element comparison of the two 128-bit vectors of
-/// [2 x double] and returns the vector containing the greater of each pair
+/// [2 x double] and returns a vector containing the greater of each pair
/// of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXPD / MAXPD </c> instruction.
@@ -410,8 +418,10 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a,
}
/// Compares each of the corresponding double-precision values of the
-/// 128-bit vectors of [2 x double] for equality. Each comparison yields 0x0
-/// for false, 0xFFFFFFFFFFFFFFFF for true.
+/// 128-bit vectors of [2 x double] for equality.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -429,8 +439,10 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a,
/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
-/// operand are less than those in the second operand. Each comparison
-/// yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// operand are less than those in the second operand.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -450,7 +462,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are less than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -470,7 +483,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -490,7 +504,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are greater than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -510,8 +525,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are ordered with respect to those in the second operand.
///
-/// A pair of double-precision values are "ordered" with respect to each
-/// other if neither value is a NaN. Each comparison yields 0x0 for false,
+/// A pair of double-precision values are ordered with respect to each
+/// other if neither value is a NaN. Each comparison returns 0x0 for false,
/// 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
@@ -532,8 +547,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are unordered with respect to those in the second operand.
///
-/// A pair of double-precision values are "unordered" with respect to each
-/// other if one or both values are NaN. Each comparison yields 0x0 for
+/// A pair of double-precision values are unordered with respect to each
+/// other if one or both values are NaN. Each comparison returns 0x0 for
/// false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
@@ -555,7 +570,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are unequal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -575,7 +591,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not less than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -595,7 +612,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not less than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -615,7 +633,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -635,7 +654,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not greater than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -654,7 +674,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a,
/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] for equality.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -678,7 +699,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a,
/// the value in the first parameter is less than the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -702,7 +724,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a,
/// the value in the first parameter is less than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -726,7 +749,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a,
/// the value in the first parameter is greater than the corresponding value
/// in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -751,7 +775,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a,
/// the value in the first parameter is greater than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -773,11 +798,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a,
/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
-/// the value in the first parameter is "ordered" with respect to the
+/// the value in the first parameter is ordered with respect to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-/// of double-precision values are "ordered" with respect to each other if
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
+/// of double-precision values are ordered with respect to each other if
/// neither value is a NaN.
///
/// \headerfile <x86intrin.h>
@@ -799,11 +824,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a,
/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
-/// the value in the first parameter is "unordered" with respect to the
+/// the value in the first parameter is unordered with respect to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-/// of double-precision values are "unordered" with respect to each other if
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
+/// of double-precision values are unordered with respect to each other if
/// one or both values are NaN.
///
/// \headerfile <x86intrin.h>
@@ -829,7 +854,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a,
/// the value in the first parameter is unequal to the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -853,7 +879,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a,
/// the value in the first parameter is not less than the corresponding
/// value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -877,7 +904,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a,
/// the value in the first parameter is not less than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -901,7 +929,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a,
/// the value in the first parameter is not greater than the corresponding
/// value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -926,7 +955,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a,
/// the value in the first parameter is not greater than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -949,8 +979,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a,
/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] for equality.
///
-/// The comparison yields 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -962,8 +992,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
@@ -974,8 +1003,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a,
/// the value in the first parameter is less than the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -987,8 +1016,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
@@ -999,8 +1027,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a,
/// the value in the first parameter is less than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1012,8 +1040,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
@@ -1024,8 +1051,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a,
/// the value in the first parameter is greater than the corresponding value
/// in the second parameter.
///
-/// The comparison yields 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1037,8 +1064,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
@@ -1049,8 +1075,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a,
/// the value in the first parameter is greater than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1062,8 +1088,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
@@ -1074,8 +1099,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a,
/// the value in the first parameter is unequal to the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, 1 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 1.
///
/// \headerfile <x86intrin.h>
///
@@ -1087,18 +1112,17 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 1 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
}
/// Compares the lower double-precision floating-point values in each of
-/// the two 128-bit floating-point vectors of [2 x double] for equality. The
-/// comparison yields 0 for false, 1 for true.
+/// the two 128-bit floating-point vectors of [2 x double] for equality.
///
-/// If either of the two lower double-precision values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1110,8 +1134,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
@@ -1122,8 +1145,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a,
/// the value in the first parameter is less than the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0 for false, 1 for true. If either of the two lower
-/// double-precision values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1135,8 +1158,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
@@ -1147,8 +1169,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a,
/// the value in the first parameter is less than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0 for false, 1 for true. If either of the two lower
-/// double-precision values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1160,8 +1182,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
@@ -1172,8 +1193,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a,
/// the value in the first parameter is greater than the corresponding value
/// in the second parameter.
///
-/// The comparison yields 0 for false, 1 for true. If either of the two lower
-/// double-precision values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1185,8 +1206,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
@@ -1197,8 +1217,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a,
/// the value in the first parameter is greater than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1210,8 +1230,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
@@ -1222,8 +1241,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a,
/// the value in the first parameter is unequal to the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0 for false, 1 for true. If either of the two lower
-/// double-precision values is NaN, 1 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 1.
///
/// \headerfile <x86intrin.h>
///
@@ -1235,8 +1254,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a,
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison result. If either of the two
-/// lower double-precision values is NaN, 1 is returned.
+/// \returns An integer containing the comparison result.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a,
__m128d __b) {
return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
@@ -1304,6 +1322,10 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a) {
/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper
/// 64 bits of the result vector are set to zero.
///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCVTPD2DQ / CVTPD2DQ </c> instruction.
@@ -1319,6 +1341,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a) {
/// Converts the low-order element of a 128-bit vector of [2 x double]
/// into a 32-bit signed integer value.
///
+/// If the converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCVTSD2SI / CVTSD2SI </c> instruction.
@@ -1404,12 +1430,13 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a,
}
/// Converts the two double-precision floating-point elements of a
-/// 128-bit vector of [2 x double] into two signed 32-bit integer values,
-/// returned in the lower 64 bits of a 128-bit vector of [4 x i32].
+/// 128-bit vector of [2 x double] into two signed truncated (rounded
+/// toward zero) 32-bit integer values, returned in the lower 64 bits
+/// of a 128-bit vector of [4 x i32].
///
-/// If the result of either conversion is inexact, the result is truncated
-/// (rounded towards zero) regardless of the current MXCSR setting. The upper
-/// 64 bits of the result vector are set to zero.
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -1425,7 +1452,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a) {
}
/// Converts the low-order element of a [2 x double] vector into a 32-bit
-/// signed integer value, truncating the result when it is inexact.
+/// signed truncated (rounded toward zero) integer value.
+///
+/// If the converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -1444,6 +1475,10 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a) {
/// 128-bit vector of [2 x double] into two signed 32-bit integer values,
/// returned in a 64-bit vector of [2 x i32].
///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> CVTPD2PI </c> instruction.
@@ -1456,11 +1491,12 @@ static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvtpd_pi32(__m128d __a) {
}
/// Converts the two double-precision floating-point elements of a
-/// 128-bit vector of [2 x double] into two signed 32-bit integer values,
-/// returned in a 64-bit vector of [2 x i32].
+/// 128-bit vector of [2 x double] into two signed truncated (rounded toward
+/// zero) 32-bit integer values, returned in a 64-bit vector of [2 x i32].
///
-/// If the result of either conversion is inexact, the result is truncated
-/// (rounded towards zero) regardless of the current MXCSR setting.
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -1735,7 +1771,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void) {
/// lower 64 bits contain the value of the parameter. The upper 64 bits are
/// set to zero.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_sd(double __w) {
- return __extension__(__m128d){__w, 0};
+ return __extension__(__m128d){__w, 0.0};
}
/// Constructs a 128-bit floating-point vector of [2 x double], with each
@@ -2099,9 +2135,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a,
}
/// Adds, with saturation, the corresponding elements of two 128-bit
-/// signed [16 x i8] vectors, saving each sum in the corresponding element of
-/// a 128-bit result vector of [16 x i8]. Positive sums greater than 0x7F are
-/// saturated to 0x7F. Negative sums less than 0x80 are saturated to 0x80.
+/// signed [16 x i8] vectors, saving each sum in the corresponding element
+/// of a 128-bit result vector of [16 x i8].
+///
+/// Positive sums greater than 0x7F are saturated to 0x7F. Negative sums
+/// less than 0x80 are saturated to 0x80.
///
/// \headerfile <x86intrin.h>
///
@@ -2119,10 +2157,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a,
}
/// Adds, with saturation, the corresponding elements of two 128-bit
-/// signed [8 x i16] vectors, saving each sum in the corresponding element of
-/// a 128-bit result vector of [8 x i16]. Positive sums greater than 0x7FFF
-/// are saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
-/// 0x8000.
+/// signed [8 x i16] vectors, saving each sum in the corresponding element
+/// of a 128-bit result vector of [8 x i16].
+///
+/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums
+/// less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -2141,8 +2180,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a,
/// Adds, with saturation, the corresponding elements of two 128-bit
/// unsigned [16 x i8] vectors, saving each sum in the corresponding element
-/// of a 128-bit result vector of [16 x i8]. Positive sums greater than 0xFF
-/// are saturated to 0xFF. Negative sums are saturated to 0x00.
+/// of a 128-bit result vector of [16 x i8].
+///
+/// Positive sums greater than 0xFF are saturated to 0xFF. Negative sums are
+/// saturated to 0x00.
///
/// \headerfile <x86intrin.h>
///
@@ -2161,8 +2202,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a,
/// Adds, with saturation, the corresponding elements of two 128-bit
/// unsigned [8 x i16] vectors, saving each sum in the corresponding element
-/// of a 128-bit result vector of [8 x i16]. Positive sums greater than
-/// 0xFFFF are saturated to 0xFFFF. Negative sums are saturated to 0x0000.
+/// of a 128-bit result vector of [8 x i16].
+///
+/// Positive sums greater than 0xFFFF are saturated to 0xFFFF. Negative sums
+/// are saturated to 0x0000.
///
/// \headerfile <x86intrin.h>
///
@@ -2518,10 +2561,12 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a,
return (__m128i)((__v2du)__a - (__v2du)__b);
}
-/// Subtracts corresponding 8-bit signed integer values in the input and
-/// returns the differences in the corresponding bytes in the destination.
-/// Differences greater than 0x7F are saturated to 0x7F, and differences less
-/// than 0x80 are saturated to 0x80.
+/// Subtracts, with saturation, corresponding 8-bit signed integer values in
+/// the input and returns the differences in the corresponding bytes in the
+/// destination.
+///
+/// Differences greater than 0x7F are saturated to 0x7F, and differences
+/// less than 0x80 are saturated to 0x80.
///
/// \headerfile <x86intrin.h>
///
@@ -2538,8 +2583,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a,
return (__m128i)__builtin_elementwise_sub_sat((__v16qs)__a, (__v16qs)__b);
}
-/// Subtracts corresponding 16-bit signed integer values in the input and
-/// returns the differences in the corresponding bytes in the destination.
+/// Subtracts, with saturation, corresponding 16-bit signed integer values in
+/// the input and returns the differences in the corresponding bytes in the
+/// destination.
+///
/// Differences greater than 0x7FFF are saturated to 0x7FFF, and values less
/// than 0x8000 are saturated to 0x8000.
///
@@ -2558,9 +2605,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a,
return (__m128i)__builtin_elementwise_sub_sat((__v8hi)__a, (__v8hi)__b);
}
-/// Subtracts corresponding 8-bit unsigned integer values in the input
-/// and returns the differences in the corresponding bytes in the
-/// destination. Differences less than 0x00 are saturated to 0x00.
+/// Subtracts, with saturation, corresponding 8-bit unsigned integer values in
+/// the input and returns the differences in the corresponding bytes in the
+/// destination.
+///
+/// Differences less than 0x00 are saturated to 0x00.
///
/// \headerfile <x86intrin.h>
///
@@ -2577,9 +2626,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a,
return (__m128i)__builtin_elementwise_sub_sat((__v16qu)__a, (__v16qu)__b);
}
-/// Subtracts corresponding 16-bit unsigned integer values in the input
-/// and returns the differences in the corresponding bytes in the
-/// destination. Differences less than 0x0000 are saturated to 0x0000.
+/// Subtracts, with saturation, corresponding 16-bit unsigned integer values in
+/// the input and returns the differences in the corresponding bytes in the
+/// destination.
+///
+/// Differences less than 0x0000 are saturated to 0x0000.
///
/// \headerfile <x86intrin.h>
///
@@ -3008,8 +3059,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a,
}
/// Compares each of the corresponding 8-bit values of the 128-bit
-/// integer vectors for equality. Each comparison yields 0x0 for false, 0xFF
-/// for true.
+/// integer vectors for equality.
+///
+/// Each comparison returns 0x0 for false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3026,8 +3078,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a,
}
/// Compares each of the corresponding 16-bit values of the 128-bit
-/// integer vectors for equality. Each comparison yields 0x0 for false,
-/// 0xFFFF for true.
+/// integer vectors for equality.
+///
+/// Each comparison returns 0x0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3044,8 +3097,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a,
}
/// Compares each of the corresponding 32-bit values of the 128-bit
-/// integer vectors for equality. Each comparison yields 0x0 for false,
-/// 0xFFFFFFFF for true.
+/// integer vectors for equality.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3063,8 +3117,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a,
/// Compares each of the corresponding signed 8-bit values of the 128-bit
/// integer vectors to determine if the values in the first operand are
-/// greater than those in the second operand. Each comparison yields 0x0 for
-/// false, 0xFF for true.
+/// greater than those in the second operand.
+///
+/// Each comparison returns 0x0 for false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3086,7 +3141,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a,
/// 128-bit integer vectors to determine if the values in the first operand
/// are greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3106,7 +3161,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a,
/// 128-bit integer vectors to determine if the values in the first operand
/// are greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3126,7 +3181,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a,
/// integer vectors to determine if the values in the first operand are less
/// than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFF for true.
+/// Each comparison returns 0x0 for false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3146,7 +3201,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a,
/// 128-bit integer vectors to determine if the values in the first operand
/// are less than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3166,7 +3221,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a,
/// 128-bit integer vectors to determine if the values in the first operand
/// are less than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3207,7 +3262,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi64_sd(__m128d __a,
}
/// Converts the first (lower) element of a vector of [2 x double] into a
-/// 64-bit signed integer value, according to the current rounding mode.
+/// 64-bit signed integer value.
+///
+/// If the converted value does not fit in a 64-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -3222,7 +3281,11 @@ static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsd_si64(__m128d __a) {
}
/// Converts the first (lower) element of a vector of [2 x double] into a
-/// 64-bit signed integer value, truncating the result when it is inexact.
+/// 64-bit signed truncated (rounded toward zero) integer value.
+///
+/// If a converted value does not fit in a 64-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -3253,6 +3316,10 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a) {
/// Converts a vector of [4 x float] into a vector of [4 x i32].
///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCVTPS2DQ / CVTPS2DQ </c> instruction.
@@ -3265,8 +3332,12 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a) {
return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
}
-/// Converts a vector of [4 x float] into a vector of [4 x i32],
-/// truncating the result when it is inexact.
+/// Converts a vector of [4 x float] into four signed truncated (rounded toward
+/// zero) 32-bit integers, returned in a vector of [4 x i32].
+///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -4050,26 +4121,22 @@ void _mm_mfence(void);
} // extern "C"
#endif
-/// Converts 16-bit signed integers from both 128-bit integer vector
-/// operands into 8-bit signed integers, and packs the results into the
-/// destination. Positive values greater than 0x7F are saturated to 0x7F.
-/// Negative values less than 0x80 are saturated to 0x80.
+/// Converts, with saturation, 16-bit signed integers from both 128-bit integer
+/// vector operands into 8-bit signed integers, and packs the results into
+/// the destination.
+///
+/// Positive values greater than 0x7F are saturated to 0x7F. Negative values
+/// less than 0x80 are saturated to 0x80.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VPACKSSWB / PACKSSWB </c> instruction.
///
/// \param __a
-/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-/// a signed integer and is converted to a 8-bit signed integer with
-/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are
+/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are
/// written to the lower 64 bits of the result.
/// \param __b
-/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-/// a signed integer and is converted to a 8-bit signed integer with
-/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are
+/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are
/// written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [16 x i8] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a,
@@ -4077,26 +4144,22 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a,
return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
}
-/// Converts 32-bit signed integers from both 128-bit integer vector
-/// operands into 16-bit signed integers, and packs the results into the
-/// destination. Positive values greater than 0x7FFF are saturated to 0x7FFF.
-/// Negative values less than 0x8000 are saturated to 0x8000.
+/// Converts, with saturation, 32-bit signed integers from both 128-bit integer
+/// vector operands into 16-bit signed integers, and packs the results into
+/// the destination.
+///
+/// Positive values greater than 0x7FFF are saturated to 0x7FFF. Negative
+/// values less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VPACKSSDW / PACKSSDW </c> instruction.
///
/// \param __a
-/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-/// a signed integer and is converted to a 16-bit signed integer with
-/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
+/// A 128-bit integer vector of [4 x i32]. The converted [4 x i16] values
/// are written to the lower 64 bits of the result.
/// \param __b
-/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-/// a signed integer and is converted to a 16-bit signed integer with
-/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
+/// A 128-bit integer vector of [4 x i32]. The converted [4 x i16] values
/// are written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [8 x i16] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a,
@@ -4104,26 +4167,22 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a,
return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
}
-/// Converts 16-bit signed integers from both 128-bit integer vector
-/// operands into 8-bit unsigned integers, and packs the results into the
-/// destination. Values greater than 0xFF are saturated to 0xFF. Values less
-/// than 0x00 are saturated to 0x00.
+/// Converts, with saturation, 16-bit signed integers from both 128-bit integer
+/// vector operands into 8-bit unsigned integers, and packs the results into
+/// the destination.
+///
+/// Values greater than 0xFF are saturated to 0xFF. Values less than 0x00
+/// are saturated to 0x00.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VPACKUSWB / PACKUSWB </c> instruction.
///
/// \param __a
-/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-/// a signed integer and is converted to an 8-bit unsigned integer with
-/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are
+/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are
/// written to the lower 64 bits of the result.
/// \param __b
-/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-/// a signed integer and is converted to an 8-bit unsigned integer with
-/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are
+/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are
/// written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [16 x i8] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a,
@@ -4742,6 +4801,78 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) {
return (__m128d)__a;
}
+/// Compares each of the corresponding double-precision values of two
+/// 128-bit vectors of [2 x double], using the operation specified by the
+/// immediate integer operand.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> (V)CMPPD </c> instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x double].
+/// \param b
+/// A 128-bit vector of [2 x double].
+/// \param c
+/// An immediate integer operand, with bits [4:0] specifying which comparison
+/// operation to use: \n
+/// 0x00: Equal (ordered, non-signaling) \n
+/// 0x01: Less-than (ordered, signaling) \n
+/// 0x02: Less-than-or-equal (ordered, signaling) \n
+/// 0x03: Unordered (non-signaling) \n
+/// 0x04: Not-equal (unordered, non-signaling) \n
+/// 0x05: Not-less-than (unordered, signaling) \n
+/// 0x06: Not-less-than-or-equal (unordered, signaling) \n
+/// 0x07: Ordered (non-signaling) \n
+/// \returns A 128-bit vector of [2 x double] containing the comparison results.
+#define _mm_cmp_pd(a, b, c) \
+ ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
+ (c)))
+
+/// Compares each of the corresponding scalar double-precision values of
+/// two 128-bit vectors of [2 x double], using the operation specified by the
+/// immediate integer operand.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> (V)CMPSD </c> instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x double].
+/// \param b
+/// A 128-bit vector of [2 x double].
+/// \param c
+/// An immediate integer operand, with bits [4:0] specifying which comparison
+/// operation to use: \n
+/// 0x00: Equal (ordered, non-signaling) \n
+/// 0x01: Less-than (ordered, signaling) \n
+/// 0x02: Less-than-or-equal (ordered, signaling) \n
+/// 0x03: Unordered (non-signaling) \n
+/// 0x04: Not-equal (unordered, non-signaling) \n
+/// 0x05: Not-less-than (unordered, signaling) \n
+/// 0x06: Not-less-than-or-equal (unordered, signaling) \n
+/// 0x07: Ordered (non-signaling) \n
+/// \returns A 128-bit vector of [2 x double] containing the comparison results.
+#define _mm_cmp_sd(a, b, c) \
+ ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
+ (c)))
+
#if defined(__cplusplus)
extern "C" {
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/float.h b/contrib/llvm-project/clang/lib/Headers/float.h
index 0e73bca0a2d6..e5c439a9d47a 100644
--- a/contrib/llvm-project/clang/lib/Headers/float.h
+++ b/contrib/llvm-project/clang/lib/Headers/float.h
@@ -10,6 +10,10 @@
#ifndef __CLANG_FLOAT_H
#define __CLANG_FLOAT_H
+#if defined(__MVS__) && __has_include_next(<float.h>)
+#include_next <float.h>
+#else
+
/* If we're on MinGW, fall back to the system's float.h, which might have
* additional definitions provided for Windows.
* For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
@@ -82,6 +86,18 @@
# undef DBL_HAS_SUBNORM
# undef LDBL_HAS_SUBNORM
# endif
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+# undef FLT_NORM_MAX
+# undef DBL_NORM_MAX
+# undef LDBL_NORM_MAX
+#endif
+#endif
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+# undef INFINITY
+# undef NAN
#endif
/* Characteristics of floating point types, C99 5.2.4.2.2 */
@@ -151,6 +167,17 @@
# define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
#endif
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ !defined(__STRICT_ANSI__)
+ /* C23 5.2.5.3.3p29-30 */
+# define INFINITY (__builtin_inff())
+# define NAN (__builtin_nanf(""))
+ /* C23 5.2.5.3.3p32 */
+# define FLT_NORM_MAX __FLT_NORM_MAX__
+# define DBL_NORM_MAX __DBL_NORM_MAX__
+# define LDBL_NORM_MAX __LDBL_NORM_MAX__
+#endif
+
#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__
# define FLT16_MANT_DIG __FLT16_MANT_DIG__
# define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__
@@ -165,4 +192,5 @@
# define FLT16_TRUE_MIN __FLT16_TRUE_MIN__
#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */
+#endif /* __MVS__ */
#endif /* __CLANG_FLOAT_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/fmaintrin.h b/contrib/llvm-project/clang/lib/Headers/fmaintrin.h
index ea832fac4f99..22d1a780bbfd 100644
--- a/contrib/llvm-project/clang/lib/Headers/fmaintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/fmaintrin.h
@@ -60,7 +60,8 @@ _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a scalar multiply-add of the single-precision values in the
/// low 32 bits of 128-bit vectors of [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
@@ -88,7 +89,8 @@ _mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
/// Computes a scalar multiply-add of the double-precision values in the
/// low 64 bits of 128-bit vectors of [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
@@ -156,7 +158,8 @@ _mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a scalar multiply-subtract of the single-precision values in
/// the low 32 bits of 128-bit vectors of [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
@@ -184,7 +187,8 @@ _mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
/// Computes a scalar multiply-subtract of the double-precision values in
/// the low 64 bits of 128-bit vectors of [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
@@ -252,7 +256,8 @@ _mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a scalar negated multiply-add of the single-precision values in
/// the low 32 bits of 128-bit vectors of [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = -(__A[31:0] * __B[31:0]) + __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
@@ -280,7 +285,8 @@ _mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
/// Computes a scalar negated multiply-add of the double-precision values
/// in the low 64 bits of 128-bit vectors of [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = -(__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
@@ -348,7 +354,8 @@ _mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a scalar negated multiply-subtract of the single-precision
/// values in the low 32 bits of 128-bit vectors of [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = -(__A[31:0] * __B[31:0]) - __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
@@ -376,7 +383,8 @@ _mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
/// Computes a scalar negated multiply-subtract of the double-precision
/// values in the low 64 bits of 128-bit vectors of [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = -(__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
@@ -404,7 +412,8 @@ _mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]
@@ -430,7 +439,8 @@ _mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]
/// \endcode
@@ -454,7 +464,8 @@ _mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [4 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]
@@ -480,7 +491,8 @@ _mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [2 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]
/// \endcode
@@ -664,7 +676,8 @@ _mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
/// Computes a multiply with alternating add/subtract of 256-bit vectors of
/// [8 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]
@@ -694,7 +707,8 @@ _mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
/// Computes a multiply with alternating add/subtract of 256-bit vectors of
/// [4 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]
/// result[191:128] = (__A[191:128] * __B[191:128]) - __C[191:128]
@@ -720,7 +734,8 @@ _mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
/// Computes a vector multiply with alternating add/subtract of 256-bit
/// vectors of [8 x float].
-/// \code
+///
+/// \code{.operation}
/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]
@@ -750,7 +765,8 @@ _mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
/// Computes a vector multiply with alternating add/subtract of 256-bit
/// vectors of [4 x double].
-/// \code
+///
+/// \code{.operation}
/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]
/// result[191:128] = (__A[191:128] * __B[191:128]) + __C[191:128]
diff --git a/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h
index 9ea605cfa840..da6903df65ff 100644
--- a/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h
+++ b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h
@@ -12,6 +12,13 @@
namespace hlsl {
// built-in scalar data types:
+/// \typedef template<typename Ty, int Size> using vector = Ty
+/// __attribute__((ext_vector_type(Size)))
+///
+/// \tparam Ty The base type of the vector may be any builtin integral or
+/// floating point type.
+/// \tparam Size The size of the vector may be any value between 1 and 4.
+
#ifdef __HLSL_ENABLE_16_BIT
// 16-bit integer.
typedef unsigned short uint16_t;
@@ -35,7 +42,9 @@ typedef vector<uint16_t, 2> uint16_t2;
typedef vector<uint16_t, 3> uint16_t3;
typedef vector<uint16_t, 4> uint16_t4;
#endif
-
+typedef vector<bool, 2> bool2;
+typedef vector<bool, 3> bool3;
+typedef vector<bool, 4> bool4;
typedef vector<int, 2> int2;
typedef vector<int, 3> int3;
typedef vector<int, 4> int4;
@@ -49,11 +58,9 @@ typedef vector<uint64_t, 2> uint64_t2;
typedef vector<uint64_t, 3> uint64_t3;
typedef vector<uint64_t, 4> uint64_t4;
-#ifdef __HLSL_ENABLE_16_BIT
typedef vector<half, 2> half2;
typedef vector<half, 3> half3;
typedef vector<half, 4> half4;
-#endif
typedef vector<float, 2> float2;
typedef vector<float, 3> float3;
diff --git a/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h
index da153d8f8e03..09f26a4588c1 100644
--- a/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h
+++ b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -18,31 +18,58 @@ namespace hlsl {
#define _HLSL_BUILTIN_ALIAS(builtin) \
__attribute__((clang_builtin_alias(builtin)))
-#define _HLSL_AVAILABILITY(environment, version) \
- __attribute__((availability(environment, introduced = version)))
+#define _HLSL_AVAILABILITY(platform, version) \
+ __attribute__((availability(platform, introduced = version)))
+#define _HLSL_AVAILABILITY_STAGE(platform, version, stage) \
+ __attribute__(( \
+ availability(platform, introduced = version, environment = stage)))
+
+#ifdef __HLSL_ENABLE_16_BIT
+#define _HLSL_16BIT_AVAILABILITY(platform, version) \
+ __attribute__((availability(platform, introduced = version)))
+#define _HLSL_16BIT_AVAILABILITY_STAGE(platform, version, stage) \
+ __attribute__(( \
+ availability(platform, introduced = version, environment = stage)))
+#else
+#define _HLSL_16BIT_AVAILABILITY(environment, version)
+#define _HLSL_16BIT_AVAILABILITY_STAGE(environment, version, stage)
+#endif
//===----------------------------------------------------------------------===//
// abs builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T abs(T Val)
+/// \brief Returns the absolute value of the input value, \a Val.
+/// \param Val The input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
int16_t abs(int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
int16_t2 abs(int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
int16_t3 abs(int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
int16_t4 abs(int16_t4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+#endif
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
half abs(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
half2 abs(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
half3 abs(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
half4 abs(half4);
-#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
int abs(int);
@@ -81,18 +108,334 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
double4 abs(double4);
//===----------------------------------------------------------------------===//
-// ceil builtins
+// acos builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T acos(T Val)
+/// \brief Returns the arccosine of the input value, \a Val.
+/// \param Val The input value.
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_acos)
+half acos(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_acos)
+half2 acos(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_acos)
+half3 acos(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_acos)
+half4 acos(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_acos)
+float acos(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_acos)
+float2 acos(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_acos)
+float3 acos(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_acos)
+float4 acos(float4);
+
//===----------------------------------------------------------------------===//
+// all builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn bool all(T x)
+/// \brief Returns True if all components of the \a x parameter are non-zero;
+/// otherwise, false. \param x The input value.
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int16_t4);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint16_t4);
+#endif
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(bool);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(bool2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(bool3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(bool4);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(uint64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(double);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_all)
+bool all(double4);
+
+//===----------------------------------------------------------------------===//
+// any builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn bool any(T x)
+/// \brief Returns True if any components of the \a x parameter are non-zero;
+/// otherwise, false. \param x The input value.
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int16_t4);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint16_t4);
+#endif
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(bool);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(bool2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(bool3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(bool4);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(uint64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(double);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_any)
+bool any(double4);
+
+//===----------------------------------------------------------------------===//
+// asin builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T asin(T Val)
+/// \brief Returns the arcsine of the input value, \a Val.
+/// \param Val The input value.
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_asin)
+half asin(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_asin)
+half2 asin(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_asin)
+half3 asin(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_asin)
+half4 asin(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_asin)
+float asin(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_asin)
+float2 asin(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_asin)
+float3 asin(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_asin)
+float4 asin(float4);
+
+//===----------------------------------------------------------------------===//
+// atan builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T atan(T Val)
+/// \brief Returns the arctangent of the input value, \a Val.
+/// \param Val The input value.
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_atan)
+half atan(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_atan)
+half2 atan(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_atan)
+half3 atan(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_atan)
+half4 atan(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_atan)
+float atan(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_atan)
+float2 atan(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_atan)
+float3 atan(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_atan)
+float4 atan(float4);
+
+//===----------------------------------------------------------------------===//
+// ceil builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T ceil(T Val)
+/// \brief Returns the smallest integer value that is greater than or equal to
+/// the input value, \a Val.
+/// \param Val The input value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
half ceil(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
half2 ceil(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
half3 ceil(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
half4 ceil(half4);
-#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
float ceil(float);
@@ -103,28 +446,136 @@ float3 ceil(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
float4 ceil(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
-double ceil(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
-double2 ceil(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
-double3 ceil(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
-double4 ceil(double4);
+//===----------------------------------------------------------------------===//
+// clamp builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T clamp(T X, T Min, T Max)
+/// \brief Clamps the specified value \a X to the specified
+/// minimum ( \a Min) and maximum ( \a Max) range.
+/// \param X A value to clamp.
+/// \param Min The specified minimum range.
+/// \param Max The specified maximum range.
+///
+/// Returns The clamped value for the \a X parameter.
+/// For values of -INF or INF, clamp will behave as expected.
+/// However for values of NaN, the results are undefined.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+half clamp(half, half, half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+half2 clamp(half2, half2, half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+half3 clamp(half3, half3, half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+half4 clamp(half4, half4, half4);
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int16_t clamp(int16_t, int16_t, int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int16_t2 clamp(int16_t2, int16_t2, int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int16_t3 clamp(int16_t3, int16_t3, int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int16_t4 clamp(int16_t4, int16_t4, int16_t4);
+
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint16_t clamp(uint16_t, uint16_t, uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint16_t2 clamp(uint16_t2, uint16_t2, uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint16_t3 clamp(uint16_t3, uint16_t3, uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint16_t4 clamp(uint16_t4, uint16_t4, uint16_t4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int clamp(int, int, int);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int2 clamp(int2, int2, int2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int3 clamp(int3, int3, int3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int4 clamp(int4, int4, int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint clamp(uint, uint, uint);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint2 clamp(uint2, uint2, uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint3 clamp(uint3, uint3, uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint4 clamp(uint4, uint4, uint4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int64_t clamp(int64_t, int64_t, int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int64_t2 clamp(int64_t2, int64_t2, int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int64_t3 clamp(int64_t3, int64_t3, int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+int64_t4 clamp(int64_t4, int64_t4, int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint64_t clamp(uint64_t, uint64_t, uint64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint64_t2 clamp(uint64_t2, uint64_t2, uint64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint64_t3 clamp(uint64_t3, uint64_t3, uint64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+uint64_t4 clamp(uint64_t4, uint64_t4, uint64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+float clamp(float, float, float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+float2 clamp(float2, float2, float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+float3 clamp(float3, float3, float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+float4 clamp(float4, float4, float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+double clamp(double, double, double);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+double2 clamp(double2, double2, double2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+double3 clamp(double3, double3, double3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
+double4 clamp(double4, double4, double4);
//===----------------------------------------------------------------------===//
// cos builtins
//===----------------------------------------------------------------------===//
-#ifdef __HLSL_ENABLE_16_BIT
+
+/// \fn T cos(T Val)
+/// \brief Returns the cosine of the input value, \a Val.
+/// \param Val The input value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
half cos(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
half2 cos(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
half3 cos(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
half4 cos(half4);
-#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
float cos(float);
@@ -135,28 +586,217 @@ float3 cos(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
float4 cos(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
-double cos(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
-double2 cos(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
-double3 cos(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
-double4 cos(double4);
+//===----------------------------------------------------------------------===//
+// cosh builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T cosh(T Val)
+/// \brief Returns the hyperbolic cosine of the input value, \a Val.
+/// \param Val The input value.
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cosh)
+half cosh(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cosh)
+half2 cosh(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cosh)
+half3 cosh(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cosh)
+half4 cosh(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cosh)
+float cosh(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cosh)
+float2 cosh(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cosh)
+float3 cosh(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cosh)
+float4 cosh(float4);
//===----------------------------------------------------------------------===//
-// floor builtins
+// dot product builtins
//===----------------------------------------------------------------------===//
+
+/// \fn K dot(T X, T Y)
+/// \brief Return the dot product (a scalar value) of \a X and \a Y.
+/// \param X The X input value.
+/// \param Y The Y input value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+half dot(half, half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+half dot(half2, half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+half dot(half3, half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+half dot(half4, half4);
+
#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int16_t dot(int16_t, int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int16_t dot(int16_t2, int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int16_t dot(int16_t3, int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int16_t dot(int16_t4, int16_t4);
+
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint16_t dot(uint16_t, uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint16_t dot(uint16_t2, uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint16_t dot(uint16_t3, uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint16_t dot(uint16_t4, uint16_t4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+float dot(float, float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+float dot(float2, float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+float dot(float3, float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+float dot(float4, float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+double dot(double, double);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int dot(int, int);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int dot(int2, int2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int dot(int3, int3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int dot(int4, int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint dot(uint, uint);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint dot(uint2, uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint dot(uint3, uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint dot(uint4, uint4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int64_t dot(int64_t, int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int64_t dot(int64_t2, int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int64_t dot(int64_t3, int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+int64_t dot(int64_t4, int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint64_t dot(uint64_t, uint64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint64_t dot(uint64_t2, uint64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint64_t dot(uint64_t3, uint64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_dot)
+uint64_t dot(uint64_t4, uint64_t4);
+
+//===----------------------------------------------------------------------===//
+// exp builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T exp(T x)
+/// \brief Returns the base-e exponential, or \a e**x, of the specified value.
+/// \param x The specified input value.
+///
+/// The return value is the base-e exponential of the \a x parameter.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp)
+half exp(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp)
+half2 exp(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp)
+half3 exp(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp)
+half4 exp(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp)
+float exp(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp)
+float2 exp(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp)
+float3 exp(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp)
+float4 exp(float4);
+
+//===----------------------------------------------------------------------===//
+// exp2 builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T exp2(T x)
+/// \brief Returns the base 2 exponential, or \a 2**x, of the specified value.
+/// \param x The specified input value.
+///
+/// The base 2 exponential of the \a x parameter.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp2)
+half exp2(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp2)
+half2 exp2(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp2)
+half3 exp2(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp2)
+half4 exp2(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp2)
+float exp2(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp2)
+float2 exp2(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp2)
+float3 exp2(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_exp2)
+float4 exp2(float4);
+
+//===----------------------------------------------------------------------===//
+// floor builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T floor(T Val)
+/// \brief Returns the largest integer that is less than or equal to the input
+/// value, \a Val.
+/// \param Val The input value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
half floor(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
half2 floor(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
half3 floor(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
half4 floor(half4);
-#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
float floor(float);
@@ -167,28 +807,130 @@ float3 floor(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
float4 floor(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
-double floor(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
-double2 floor(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
-double3 floor(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
-double4 floor(double4);
+//===----------------------------------------------------------------------===//
+// frac builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T frac(T x)
+/// \brief Returns the fractional (or decimal) part of x. \a x parameter.
+/// \param x The specified input value.
+///
+/// If \a the return value is greater than or equal to 0 and less than 1.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+half frac(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+half2 frac(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+half3 frac(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+half4 frac(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+float frac(float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+float2 frac(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+float3 frac(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_frac)
+float4 frac(float4);
+
+//===----------------------------------------------------------------------===//
+// isinf builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T isinf(T x)
+/// \brief Determines if the specified value \a x is infinite.
+/// \param x The specified input value.
+///
+/// Returns a value of the same size as the input, with a value set
+/// to True if the x parameter is +INF or -INF. Otherwise, False.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isinf)
+bool isinf(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isinf)
+bool2 isinf(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isinf)
+bool3 isinf(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isinf)
+bool4 isinf(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isinf)
+bool isinf(float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isinf)
+bool2 isinf(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isinf)
+bool3 isinf(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isinf)
+bool4 isinf(float4);
+
+//===----------------------------------------------------------------------===//
+// lerp builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T lerp(T x, T y, T s)
+/// \brief Returns the linear interpolation of x to y by s.
+/// \param x [in] The first-floating point value.
+/// \param y [in] The second-floating point value.
+/// \param s [in] A value that linearly interpolates between the x parameter and
+/// the y parameter.
+///
+/// Linear interpolation is based on the following formula: x*(1-s) + y*s which
+/// can equivalently be written as x + s(y-x).
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+half lerp(half, half, half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+half2 lerp(half2, half2, half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+half3 lerp(half3, half3, half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+half4 lerp(half4, half4, half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+float lerp(float, float, float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+float2 lerp(float2, float2, float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+float3 lerp(float3, float3, float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_lerp)
+float4 lerp(float4, float4, float4);
//===----------------------------------------------------------------------===//
// log builtins
//===----------------------------------------------------------------------===//
-#ifdef __HLSL_ENABLE_16_BIT
+
+/// \fn T log(T Val)
+/// \brief The base-e logarithm of the input value, \a Val parameter.
+/// \param Val The input value.
+///
+/// If \a Val is negative, this result is undefined. If \a Val is 0, this
+/// function returns negative infinity.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
half log(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
half2 log(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
half3 log(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
half4 log(half4);
-#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
float log(float);
@@ -199,28 +941,29 @@ float3 log(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
float4 log(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
-double log(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
-double2 log(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
-double3 log(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
-double4 log(double4);
-
//===----------------------------------------------------------------------===//
// log10 builtins
//===----------------------------------------------------------------------===//
-#ifdef __HLSL_ENABLE_16_BIT
+
+/// \fn T log10(T Val)
+/// \brief The base-10 logarithm of the input value, \a Val parameter.
+/// \param Val The input value.
+///
+/// If \a Val is negative, this result is undefined. If \a Val is 0, this
+/// function returns negative infinity.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
half log10(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
half2 log10(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
half3 log10(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
half4 log10(half4);
-#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
float log10(float);
@@ -231,28 +974,29 @@ float3 log10(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
float4 log10(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
-double log10(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
-double2 log10(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
-double3 log10(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
-double4 log10(double4);
-
//===----------------------------------------------------------------------===//
// log2 builtins
//===----------------------------------------------------------------------===//
-#ifdef __HLSL_ENABLE_16_BIT
+
+/// \fn T log2(T Val)
+/// \brief The base-2 logarithm of the input value, \a Val parameter.
+/// \param Val The input value.
+///
+/// If \a Val is negative, this result is undefined. If \a Val is 0, this
+/// function returns negative infinity.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
half log2(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
half2 log2(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
half3 log2(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
half4 log2(half4);
-#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
float log2(float);
@@ -263,43 +1007,157 @@ float3 log2(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
float4 log2(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
-double log2(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
-double2 log2(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
-double3 log2(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
-double4 log2(double4);
+//===----------------------------------------------------------------------===//
+// mad builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T mad(T M, T A, T B)
+/// \brief The result of \a M * \a A + \a B.
+/// \param M The multiplication value.
+/// \param A The first addition value.
+/// \param B The second addition value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+half mad(half, half, half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+half2 mad(half2, half2, half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+half3 mad(half3, half3, half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+half4 mad(half4, half4, half4);
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int16_t mad(int16_t, int16_t, int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int16_t2 mad(int16_t2, int16_t2, int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int16_t3 mad(int16_t3, int16_t3, int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int16_t4 mad(int16_t4, int16_t4, int16_t4);
+
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint16_t mad(uint16_t, uint16_t, uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint16_t2 mad(uint16_t2, uint16_t2, uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint16_t3 mad(uint16_t3, uint16_t3, uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint16_t4 mad(uint16_t4, uint16_t4, uint16_t4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int mad(int, int, int);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int2 mad(int2, int2, int2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int3 mad(int3, int3, int3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int4 mad(int4, int4, int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint mad(uint, uint, uint);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint2 mad(uint2, uint2, uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint3 mad(uint3, uint3, uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint4 mad(uint4, uint4, uint4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int64_t mad(int64_t, int64_t, int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int64_t2 mad(int64_t2, int64_t2, int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int64_t3 mad(int64_t3, int64_t3, int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+int64_t4 mad(int64_t4, int64_t4, int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint64_t mad(uint64_t, uint64_t, uint64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint64_t2 mad(uint64_t2, uint64_t2, uint64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint64_t3 mad(uint64_t3, uint64_t3, uint64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+uint64_t4 mad(uint64_t4, uint64_t4, uint64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+float mad(float, float, float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+float2 mad(float2, float2, float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+float3 mad(float3, float3, float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+float4 mad(float4, float4, float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+double mad(double, double, double);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+double2 mad(double2, double2, double2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+double3 mad(double3, double3, double3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_mad)
+double4 mad(double4, double4, double4);
//===----------------------------------------------------------------------===//
// max builtins
//===----------------------------------------------------------------------===//
-#ifdef __HLSL_ENABLE_16_BIT
+
+/// \fn T max(T X, T Y)
+/// \brief Return the greater of \a X and \a Y.
+/// \param X The X input value.
+/// \param Y The Y input value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
half max(half, half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
half2 max(half2, half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
half3 max(half3, half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
half4 max(half4, half4);
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
int16_t max(int16_t, int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
int16_t2 max(int16_t2, int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
int16_t3 max(int16_t3, int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
int16_t4 max(int16_t4, int16_t4);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
uint16_t max(uint16_t, uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
uint16_t2 max(uint16_t2, uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
uint16_t3 max(uint16_t3, uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
uint16_t4 max(uint16_t4, uint16_t4);
#endif
@@ -361,31 +1219,49 @@ double4 max(double4, double4);
//===----------------------------------------------------------------------===//
// min builtins
//===----------------------------------------------------------------------===//
-#ifdef __HLSL_ENABLE_16_BIT
+
+/// \fn T min(T X, T Y)
+/// \brief Return the lesser of \a X and \a Y.
+/// \param X The X input value.
+/// \param Y The Y input value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
half min(half, half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
half2 min(half2, half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
half3 min(half3, half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
half4 min(half4, half4);
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
int16_t min(int16_t, int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
int16_t2 min(int16_t2, int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
int16_t3 min(int16_t3, int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
int16_t4 min(int16_t4, int16_t4);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
uint16_t min(uint16_t, uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
uint16_t2 min(uint16_t2, uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
uint16_t3 min(uint16_t3, uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
uint16_t4 min(uint16_t4, uint16_t4);
#endif
@@ -447,16 +1323,24 @@ double4 min(double4, double4);
//===----------------------------------------------------------------------===//
// pow builtins
//===----------------------------------------------------------------------===//
-#ifdef __HLSL_ENABLE_16_BIT
+
+/// \fn T pow(T Val, T Pow)
+/// \brief Return the value \a Val, raised to the power \a Pow.
+/// \param Val The input value.
+/// \param Pow The specified power.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
half pow(half, half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
half2 pow(half2, half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
half3 pow(half3, half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
half4 pow(half4, half4);
-#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
float pow(float, float);
@@ -467,48 +1351,30 @@ float3 pow(float3, float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
float4 pow(float4, float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
-double pow(double, double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
-double2 pow(double2, double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
-double3 pow(double3, double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
-double4 pow(double4, double4);
-
//===----------------------------------------------------------------------===//
// reversebits builtins
//===----------------------------------------------------------------------===//
-#ifdef __HLSL_ENABLE_16_BIT
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int16_t reversebits(int16_t);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int16_t2 reversebits(int16_t2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int16_t3 reversebits(int16_t3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int16_t4 reversebits(int16_t4);
+/// \fn T reversebits(T Val)
+/// \brief Return the value \a Val with the bit order reversed.
+/// \param Val The input value.
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint16_t reversebits(uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint16_t2 reversebits(uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint16_t3 reversebits(uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint16_t4 reversebits(uint16_t4);
#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int reversebits(int);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int2 reversebits(int2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int3 reversebits(int3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int4 reversebits(int4);
-
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint reversebits(uint);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint2 reversebits(uint2);
@@ -518,15 +1384,6 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint4 reversebits(uint4);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int64_t reversebits(int64_t);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int64_t2 reversebits(int64_t2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int64_t3 reversebits(int64_t3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int64_t4 reversebits(int64_t4);
-
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint64_t reversebits(uint64_t);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint64_t2 reversebits(uint64_t2);
@@ -536,18 +1393,133 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint64_t4 reversebits(uint64_t4);
//===----------------------------------------------------------------------===//
+// rcp builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T rcp(T x)
+/// \brief Calculates a fast, approximate, per-component reciprocal ie 1 / \a x.
+/// \param x The specified input value.
+///
+/// The return value is the reciprocal of the \a x parameter.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+half rcp(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+half2 rcp(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+half3 rcp(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+half4 rcp(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+float rcp(float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+float2 rcp(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+float3 rcp(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+float4 rcp(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+double rcp(double);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+double2 rcp(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+double3 rcp(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rcp)
+double4 rcp(double4);
+
+//===----------------------------------------------------------------------===//
+// rsqrt builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T rsqrt(T x)
+/// \brief Returns the reciprocal of the square root of the specified value.
+/// ie 1 / sqrt( \a x).
+/// \param x The specified input value.
+///
+/// This function uses the following formula: 1 / sqrt(x).
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rsqrt)
+half rsqrt(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rsqrt)
+half2 rsqrt(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rsqrt)
+half3 rsqrt(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rsqrt)
+half4 rsqrt(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rsqrt)
+float rsqrt(float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rsqrt)
+float2 rsqrt(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rsqrt)
+float3 rsqrt(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_rsqrt)
+float4 rsqrt(float4);
+
+//===----------------------------------------------------------------------===//
+// round builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T round(T x)
+/// \brief Rounds the specified value \a x to the nearest integer.
+/// \param x The specified input value.
+///
+/// The return value is the \a x parameter, rounded to the nearest integer
+/// within a floating-point type. Halfway cases are
+/// rounded to the nearest even value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
+half round(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
+half2 round(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
+half3 round(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
+half4 round(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
+float round(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
+float2 round(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
+float3 round(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
+float4 round(float4);
+
+//===----------------------------------------------------------------------===//
// sin builtins
//===----------------------------------------------------------------------===//
-#ifdef __HLSL_ENABLE_16_BIT
+
+/// \fn T sin(T Val)
+/// \brief Returns the sine of the input value, \a Val.
+/// \param Val The input value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
half sin(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
half2 sin(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
half3 sin(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
half4 sin(half4);
-#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
float sin(float);
@@ -558,42 +1530,140 @@ float3 sin(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
float4 sin(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
-double sin(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
-double2 sin(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
-double3 sin(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
-double4 sin(double4);
+//===----------------------------------------------------------------------===//
+// sinh builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T sinh(T Val)
+/// \brief Returns the hyperbolic sine of the input value, \a Val.
+/// \param Val The input value.
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sinh)
+half sinh(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sinh)
+half2 sinh(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sinh)
+half3 sinh(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sinh)
+half4 sinh(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sinh)
+float sinh(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sinh)
+float2 sinh(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sinh)
+float3 sinh(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sinh)
+float4 sinh(float4);
//===----------------------------------------------------------------------===//
// sqrt builtins
//===----------------------------------------------------------------------===//
+
+/// \fn T sqrt(T Val)
+/// \brief Returns the square root of the input value, \a Val.
+/// \param Val The input value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+half sqrt(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+half2 sqrt(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+half3 sqrt(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+half4 sqrt(half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+float sqrt(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+float2 sqrt(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+float3 sqrt(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+float4 sqrt(float4);
+
+//===----------------------------------------------------------------------===//
+// tan builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T tan(T Val)
+/// \brief Returns the tangent of the input value, \a Val.
+/// \param Val The input value.
+
#ifdef __HLSL_ENABLE_16_BIT
-_HLSL_BUILTIN_ALIAS(__builtin_sqrtf16)
-half sqrt(half In);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tan)
+half tan(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tan)
+half2 tan(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tan)
+half3 tan(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tan)
+half4 tan(half4);
#endif
-_HLSL_BUILTIN_ALIAS(__builtin_sqrtf)
-float sqrt(float In);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tan)
+float tan(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tan)
+float2 tan(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tan)
+float3 tan(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tan)
+float4 tan(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_sqrt)
-double sqrt(double In);
+//===----------------------------------------------------------------------===//
+// tanh builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn T tanh(T Val)
+/// \brief Returns the hyperbolic tangent of the input value, \a Val.
+/// \param Val The input value.
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tanh)
+half tanh(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tanh)
+half2 tanh(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tanh)
+half3 tanh(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tanh)
+half4 tanh(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tanh)
+float tanh(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tanh)
+float2 tanh(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tanh)
+float3 tanh(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_tanh)
+float4 tanh(float4);
//===----------------------------------------------------------------------===//
// trunc builtins
//===----------------------------------------------------------------------===//
-#ifdef __HLSL_ENABLE_16_BIT
+
+/// \fn T trunc(T Val)
+/// \brief Returns the truncated integer value of the input value, \a Val.
+/// \param Val The input value.
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
half trunc(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
half2 trunc(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
half3 trunc(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
half4 trunc(half4);
-#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
float trunc(float);
@@ -604,21 +1674,24 @@ float3 trunc(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
float4 trunc(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
-double trunc(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
-double2 trunc(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
-double3 trunc(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
-double4 trunc(double4);
-
//===----------------------------------------------------------------------===//
// Wave* builtins
//===----------------------------------------------------------------------===//
+
+/// \brief Counts the number of boolean variables which evaluate to true across
+/// all active lanes in the current wave.
+///
+/// \param Val The input boolean value.
+/// \return The number of lanes for which the boolean variable evaluates to
+/// true, across all active lanes in the current wave.
_HLSL_AVAILABILITY(shadermodel, 6.0)
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_count_bits)
-uint WaveActiveCountBits(bool bBit);
+__attribute__((convergent)) uint WaveActiveCountBits(bool Val);
+
+/// \brief Returns the index of the current lane within the current wave.
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_get_lane_index)
+__attribute__((convergent)) uint WaveGetLaneIndex();
} // namespace hlsl
#endif //_HLSL_HLSL_INTRINSICS_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/ia32intrin.h b/contrib/llvm-project/clang/lib/Headers/ia32intrin.h
index 1b979770e196..8e65f232a0de 100644
--- a/contrib/llvm-project/clang/lib/Headers/ia32intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ia32intrin.h
@@ -26,8 +26,8 @@
#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
#endif
-/// Find the first set bit starting from the lsb. Result is undefined if
-/// input is 0.
+/// Finds the first set bit starting from the least significant bit. The result
+/// is undefined if the input is 0.
///
/// \headerfile <x86intrin.h>
///
@@ -43,8 +43,8 @@ __bsfd(int __A) {
return __builtin_ctz((unsigned int)__A);
}
-/// Find the first set bit starting from the msb. Result is undefined if
-/// input is 0.
+/// Finds the first set bit starting from the most significant bit. The result
+/// is undefined if the input is 0.
///
/// \headerfile <x86intrin.h>
///
@@ -90,8 +90,8 @@ _bswap(int __A) {
return (int)__builtin_bswap32((unsigned int)__A);
}
-/// Find the first set bit starting from the lsb. Result is undefined if
-/// input is 0.
+/// Finds the first set bit starting from the least significant bit. The result
+/// is undefined if the input is 0.
///
/// \headerfile <x86intrin.h>
///
@@ -108,8 +108,8 @@ _bswap(int __A) {
/// \see __bsfd
#define _bit_scan_forward(A) __bsfd((A))
-/// Find the first set bit starting from the msb. Result is undefined if
-/// input is 0.
+/// Finds the first set bit starting from the most significant bit. The result
+/// is undefined if the input is 0.
///
/// \headerfile <x86intrin.h>
///
@@ -127,8 +127,8 @@ _bswap(int __A) {
#define _bit_scan_reverse(A) __bsrd((A))
#ifdef __x86_64__
-/// Find the first set bit starting from the lsb. Result is undefined if
-/// input is 0.
+/// Finds the first set bit starting from the least significant bit. The result
+/// is undefined if the input is 0.
///
/// \headerfile <x86intrin.h>
///
@@ -143,8 +143,8 @@ __bsfq(long long __A) {
return (long long)__builtin_ctzll((unsigned long long)__A);
}
-/// Find the first set bit starting from the msb. Result is undefined if
-/// input is 0.
+/// Finds the first set bit starting from the most significant bit. The result
+/// is undefined if input is 0.
///
/// \headerfile <x86intrin.h>
///
@@ -159,7 +159,7 @@ __bsrq(long long __A) {
return 63 - __builtin_clzll((unsigned long long)__A);
}
-/// Swaps the bytes in the input. Converting little endian to big endian or
+/// Swaps the bytes in the input, converting little endian to big endian or
/// vice versa.
///
/// \headerfile <x86intrin.h>
@@ -175,7 +175,7 @@ __bswapq(long long __A) {
return (long long)__builtin_bswap64((unsigned long long)__A);
}
-/// Swaps the bytes in the input. Converting little endian to big endian or
+/// Swaps the bytes in the input, converting little endian to big endian or
/// vice versa.
///
/// \headerfile <x86intrin.h>
@@ -198,7 +198,7 @@ __bswapq(long long __A) {
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c POPCNT instruction or a
-/// a sequence of arithmetic and logic ops to calculate it.
+/// sequence of arithmetic and logic operations to calculate it.
///
/// \param __A
/// An unsigned 32-bit integer operand.
@@ -220,7 +220,7 @@ __popcntd(unsigned int __A)
/// \endcode
///
/// This intrinsic corresponds to the \c POPCNT instruction or a
-/// a sequence of arithmetic and logic ops to calculate it.
+/// sequence of arithmetic and logic operations to calculate it.
///
/// \param A
/// An unsigned 32-bit integer operand.
@@ -235,7 +235,7 @@ __popcntd(unsigned int __A)
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c POPCNT instruction or a
-/// a sequence of arithmetic and logic ops to calculate it.
+/// sequence of arithmetic and logic operations to calculate it.
///
/// \param __A
/// An unsigned 64-bit integer operand.
@@ -257,7 +257,7 @@ __popcntq(unsigned long long __A)
/// \endcode
///
/// This intrinsic corresponds to the \c POPCNT instruction or a
-/// a sequence of arithmetic and logic ops to calculate it.
+/// sequence of arithmetic and logic operations to calculate it.
///
/// \param A
/// An unsigned 64-bit integer operand.
@@ -268,7 +268,7 @@ __popcntq(unsigned long long __A)
#endif /* __x86_64__ */
#ifdef __x86_64__
-/// Returns the program status and control \c RFLAGS register with the \c VM
+/// Returns the program status-and-control \c RFLAGS register with the \c VM
/// and \c RF flags cleared.
///
/// \headerfile <x86intrin.h>
@@ -282,7 +282,7 @@ __readeflags(void)
return __builtin_ia32_readeflags_u64();
}
-/// Writes the specified value to the program status and control \c RFLAGS
+/// Writes the specified value to the program status-and-control \c RFLAGS
/// register. Reserved bits are not affected.
///
/// \headerfile <x86intrin.h>
@@ -298,7 +298,7 @@ __writeeflags(unsigned long long __f)
}
#else /* !__x86_64__ */
-/// Returns the program status and control \c EFLAGS register with the \c VM
+/// Returns the program status-and-control \c EFLAGS register with the \c VM
/// and \c RF flags cleared.
///
/// \headerfile <x86intrin.h>
@@ -312,7 +312,7 @@ __readeflags(void)
return __builtin_ia32_readeflags_u32();
}
-/// Writes the specified value to the program status and control \c EFLAGS
+/// Writes the specified value to the program status-and-control \c EFLAGS
/// register. Reserved bits are not affected.
///
/// \headerfile <x86intrin.h>
@@ -328,7 +328,7 @@ __writeeflags(unsigned int __f)
}
#endif /* !__x86_64__ */
-/// Cast a 32-bit float value to a 32-bit unsigned integer value.
+/// Casts a 32-bit float value to a 32-bit unsigned integer value.
///
/// \headerfile <x86intrin.h>
///
@@ -337,13 +337,13 @@ __writeeflags(unsigned int __f)
///
/// \param __A
/// A 32-bit float value.
-/// \returns a 32-bit unsigned integer containing the converted value.
+/// \returns A 32-bit unsigned integer containing the converted value.
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CAST
_castf32_u32(float __A) {
return __builtin_bit_cast(unsigned int, __A);
}
-/// Cast a 64-bit float value to a 64-bit unsigned integer value.
+/// Casts a 64-bit float value to a 64-bit unsigned integer value.
///
/// \headerfile <x86intrin.h>
///
@@ -352,13 +352,13 @@ _castf32_u32(float __A) {
///
/// \param __A
/// A 64-bit float value.
-/// \returns a 64-bit unsigned integer containing the converted value.
+/// \returns A 64-bit unsigned integer containing the converted value.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CAST
_castf64_u64(double __A) {
return __builtin_bit_cast(unsigned long long, __A);
}
-/// Cast a 32-bit unsigned integer value to a 32-bit float value.
+/// Casts a 32-bit unsigned integer value to a 32-bit float value.
///
/// \headerfile <x86intrin.h>
///
@@ -367,13 +367,13 @@ _castf64_u64(double __A) {
///
/// \param __A
/// A 32-bit unsigned integer value.
-/// \returns a 32-bit float value containing the converted value.
+/// \returns A 32-bit float value containing the converted value.
static __inline__ float __DEFAULT_FN_ATTRS_CAST
_castu32_f32(unsigned int __A) {
return __builtin_bit_cast(float, __A);
}
-/// Cast a 64-bit unsigned integer value to a 64-bit float value.
+/// Casts a 64-bit unsigned integer value to a 64-bit float value.
///
/// \headerfile <x86intrin.h>
///
@@ -382,7 +382,7 @@ _castu32_f32(unsigned int __A) {
///
/// \param __A
/// A 64-bit unsigned integer value.
-/// \returns a 64-bit float value containing the converted value.
+/// \returns A 64-bit float value containing the converted value.
static __inline__ double __DEFAULT_FN_ATTRS_CAST
_castu64_f64(unsigned long long __A) {
return __builtin_bit_cast(double, __A);
@@ -470,7 +470,7 @@ __crc32q(unsigned long long __C, unsigned long long __D)
}
#endif /* __x86_64__ */
-/// Reads the specified performance monitoring counter. Refer to your
+/// Reads the specified performance-monitoring counter. Refer to your
/// processor's documentation to determine which performance counters are
/// supported.
///
@@ -487,7 +487,7 @@ __rdpmc(int __A) {
return __builtin_ia32_rdpmc(__A);
}
-/// Reads the processor's time stamp counter and the \c IA32_TSC_AUX MSR
+/// Reads the processor's time-stamp counter and the \c IA32_TSC_AUX MSR
/// \c (0xc0000103).
///
/// \headerfile <x86intrin.h>
@@ -495,14 +495,14 @@ __rdpmc(int __A) {
/// This intrinsic corresponds to the \c RDTSCP instruction.
///
/// \param __A
-/// Address of where to store the 32-bit \c IA32_TSC_AUX value.
-/// \returns The 64-bit value of the time stamp counter.
+/// The address of where to store the 32-bit \c IA32_TSC_AUX value.
+/// \returns The 64-bit value of the time-stamp counter.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__rdtscp(unsigned int *__A) {
return __builtin_ia32_rdtscp(__A);
}
-/// Reads the processor's time stamp counter.
+/// Reads the processor's time-stamp counter.
///
/// \headerfile <x86intrin.h>
///
@@ -512,7 +512,7 @@ __rdtscp(unsigned int *__A) {
///
/// This intrinsic corresponds to the \c RDTSC instruction.
///
-/// \returns The 64-bit value of the time stamp counter.
+/// \returns The 64-bit value of the time-stamp counter.
#define _rdtsc() __rdtsc()
/// Reads the specified performance monitoring counter. Refer to your
diff --git a/contrib/llvm-project/clang/lib/Headers/immintrin.h b/contrib/llvm-project/clang/lib/Headers/immintrin.h
index 27800f7a8202..cd6cf09b90ca 100644
--- a/contrib/llvm-project/clang/lib/Headers/immintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/immintrin.h
@@ -16,281 +16,231 @@
#include <x86gprintrin.h>
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__MMX__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__MMX__)
#include <mmintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SSE__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE__)
#include <xmmintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SSE2__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE2__)
#include <emmintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SSE3__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE3__)
#include <pmmintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SSSE3__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SSSE3__)
#include <tmmintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__SSE4_2__) || defined(__SSE4_1__))
#include <smmintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AES__) || defined(__PCLMUL__))
#include <wmmintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__CLFLUSHOPT__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__CLFLUSHOPT__)
#include <clflushoptintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__CLWB__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__CLWB__)
#include <clwbintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX__)
#include <avxintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX2__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX2__)
#include <avx2intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__F16C__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__F16C__)
#include <f16cintrin.h>
#endif
/* No feature check desired due to internal checks */
#include <bmiintrin.h>
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__BMI2__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI2__)
#include <bmi2intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__LZCNT__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__LZCNT__)
#include <lzcntintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__POPCNT__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__POPCNT__)
#include <popcntintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__FMA__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA__)
#include <fmaintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512F__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512F__)
#include <avx512fintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512VL__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VL__)
#include <avx512vlintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512BW__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BW__)
#include <avx512bwintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512BITALG__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BITALG__)
#include <avx512bitalgintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512CD__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512CD__)
#include <avx512cdintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512VPOPCNTDQ__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__)
#include <avx512vpopcntdqintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))
#include <avx512vpopcntdqvlintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512VNNI__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VNNI__)
#include <avx512vnniintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VNNI__))
#include <avx512vlvnniintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVXVNNI__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNI__)
#include <avxvnniintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512DQ__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512DQ__)
#include <avx512dqintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BITALG__))
#include <avx512vlbitalgintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BW__))
#include <avx512vlbwintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512CD__))
#include <avx512vlcdintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512DQ__))
#include <avx512vldqintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512ER__)
-#include <avx512erintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512IFMA__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512IFMA__)
#include <avx512ifmaintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512IFMA__) && defined(__AVX512VL__))
#include <avx512ifmavlintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVXIFMA__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXIFMA__)
#include <avxifmaintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512VBMI__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI__)
#include <avx512vbmiintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VBMI__) && defined(__AVX512VL__))
#include <avx512vbmivlintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512VBMI2__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI2__)
#include <avx512vbmi2intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VBMI2__) && defined(__AVX512VL__))
#include <avx512vlvbmi2intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512PF__)
-#include <avx512pfintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512FP16__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512FP16__)
#include <avx512fp16intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512FP16__))
#include <avx512vlfp16intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVX512BF16__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BF16__)
#include <avx512bf16intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BF16__))
#include <avx512vlbf16intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__PKU__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__PKU__)
#include <pkuintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__VPCLMULQDQ__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__VPCLMULQDQ__)
#include <vpclmulqdqintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__VAES__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__VAES__)
#include <vaesintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__GFNI__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__GFNI__)
#include <gfniintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVXVNNIINT8__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT8__)
#include <avxvnniint8intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVXNECONVERT__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXNECONVERT__)
#include <avxneconvertintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SHA512__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA512__)
#include <sha512intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SM3__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SM3__)
#include <sm3intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SM4__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SM4__)
#include <sm4intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AVXVNNIINT16__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT16__)
#include <avxvnniint16intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__RDPID__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPID__)
/// Reads the value of the IA32_TSC_AUX MSR (0xc0000103).
///
/// \headerfile <immintrin.h>
@@ -304,8 +254,7 @@ _rdpid_u32(void) {
}
#endif // __RDPID__
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__RDRND__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__RDRND__)
/// Returns a 16-bit hardware-generated random value.
///
/// \headerfile <immintrin.h>
@@ -367,8 +316,7 @@ _rdrand64_step(unsigned long long *__p)
}
#endif /* __RDRND__ */
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__FSGSBASE__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__FSGSBASE__)
#ifdef __x86_64__
/// Reads the FS base register.
///
@@ -481,8 +429,7 @@ _writegsbase_u64(unsigned long long __V)
#endif
#endif /* __FSGSBASE__ */
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__MOVBE__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVBE__)
/* The structs used below are to force the load/store to be unaligned. This
* is accomplished with the __packed__ attribute. The __may_alias__ prevents
@@ -598,139 +545,118 @@ _storebe_i64(void * __P, long long __D) {
#endif
#endif /* __MOVBE */
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__RTM__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__RTM__)
#include <rtmintrin.h>
#include <xtestintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SHA__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA__)
#include <shaintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__FXSR__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__FXSR__)
#include <fxsrintrin.h>
#endif
/* No feature check desired due to internal MSC_VER checks */
#include <xsaveintrin.h>
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__XSAVEOPT__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEOPT__)
#include <xsaveoptintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__XSAVEC__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEC__)
#include <xsavecintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__XSAVES__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVES__)
#include <xsavesintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SHSTK__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SHSTK__)
#include <cetintrin.h>
#endif
/* Intrinsics inside adcintrin.h are available at all times. */
#include <adcintrin.h>
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__ADX__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__ADX__)
#include <adxintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__RDSEED__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__RDSEED__)
#include <rdseedintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__WBNOINVD__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__WBNOINVD__)
#include <wbnoinvdintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__CLDEMOTE__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__CLDEMOTE__)
#include <cldemoteintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__WAITPKG__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__WAITPKG__)
#include <waitpkgintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__MOVDIRI__) || defined(__MOVDIR64B__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVDIRI__) || \
+ defined(__MOVDIR64B__)
#include <movdirintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__PCONFIG__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__PCONFIG__)
#include <pconfigintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SGX__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SGX__)
#include <sgxintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__PTWRITE__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__PTWRITE__)
#include <ptwriteintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__INVPCID__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__INVPCID__)
#include <invpcidintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AMX_FP16__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_FP16__)
#include <amxfp16intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__KL__) || defined(__WIDEKL__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__) || \
+ defined(__WIDEKL__)
#include <keylockerintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AMX_TILE__) || defined(__AMX_INT8__) || defined(__AMX_BF16__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_TILE__) || \
+ defined(__AMX_INT8__) || defined(__AMX_BF16__)
#include <amxintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AMX_COMPLEX__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_COMPLEX__)
#include <amxcomplexintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
defined(__AVX512VP2INTERSECT__)
#include <avx512vp2intersectintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+#if !defined(__SCE__) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
#include <avx512vlvp2intersectintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__ENQCMD__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__ENQCMD__)
#include <enqcmdintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SERIALIZE__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SERIALIZE__)
#include <serializeintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__TSXLDTRK__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__TSXLDTRK__)
#include <tsxldtrkintrin.h>
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/intrin.h b/contrib/llvm-project/clang/lib/Headers/intrin.h
index 9ebaea9fee94..6308c865ca91 100644
--- a/contrib/llvm-project/clang/lib/Headers/intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/intrin.h
@@ -15,8 +15,10 @@
#ifndef __INTRIN_H
#define __INTRIN_H
+#include <intrin0.h>
+
/* First include the standard intrinsics. */
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__))
#include <x86intrin.h>
#endif
@@ -24,7 +26,7 @@
#include <armintr.h>
#endif
-#if defined(__aarch64__)
+#if defined(__aarch64__) || defined(__arm64ec__)
#include <arm64intr.h>
#endif
@@ -131,8 +133,6 @@ void __writefsqword(unsigned long, unsigned __int64);
void __writefsword(unsigned long, unsigned short);
void __writemsr(unsigned long, unsigned __int64);
void *_AddressOfReturnAddress(void);
-unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
-unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
unsigned char _bittest(long const *, long);
unsigned char _bittestandcomplement(long *, long);
unsigned char _bittestandreset(long *, long);
@@ -151,7 +151,6 @@ long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);
__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);
void _ReadBarrier(void);
-void _ReadWriteBarrier(void);
unsigned int _rorx_u32(unsigned int, const unsigned int);
int _sarx_i32(int, unsigned int);
#if __STDC_HOSTED__
@@ -167,7 +166,7 @@ unsigned __int32 xbegin(void);
void _xend(void);
/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(__arm64ec__)
void __addgsbyte(unsigned long, unsigned char);
void __addgsdword(unsigned long, unsigned long);
void __addgsqword(unsigned long, unsigned __int64);
@@ -182,12 +181,6 @@ unsigned char __readgsbyte(unsigned long);
unsigned long __readgsdword(unsigned long);
unsigned __int64 __readgsqword(unsigned long);
unsigned short __readgsword(unsigned long);
-unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
- unsigned __int64 _HighPart,
- unsigned char _Shift);
-unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
- unsigned __int64 _HighPart,
- unsigned char _Shift);
void __stosq(unsigned __int64 *, unsigned __int64, size_t);
unsigned char __vmx_on(unsigned __int64 *);
unsigned char __vmx_vmclear(unsigned __int64 *);
@@ -236,216 +229,15 @@ unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);
__int64 __mulh(__int64, __int64);
unsigned __int64 __umulh(unsigned __int64, unsigned __int64);
-__int64 _mul128(__int64, __int64, __int64*);
-unsigned __int64 _umul128(unsigned __int64,
- unsigned __int64,
- unsigned __int64*);
+__int64 _mul128(__int64, __int64, __int64 *);
#endif /* __x86_64__ */
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-
-unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
-unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
-
-#endif
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
-__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
-__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
-__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value);
-__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
-__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
-
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange Add
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value);
-char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value);
-char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value);
-short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value);
-short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value);
-short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value);
-long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value);
-long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value);
-long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value);
-__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value);
-__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value);
-__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Increment
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-short _InterlockedIncrement16_acq(short volatile *_Value);
-short _InterlockedIncrement16_nf(short volatile *_Value);
-short _InterlockedIncrement16_rel(short volatile *_Value);
-long _InterlockedIncrement_acq(long volatile *_Value);
-long _InterlockedIncrement_nf(long volatile *_Value);
-long _InterlockedIncrement_rel(long volatile *_Value);
-__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value);
-__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value);
-__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Decrement
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-short _InterlockedDecrement16_acq(short volatile *_Value);
-short _InterlockedDecrement16_nf(short volatile *_Value);
-short _InterlockedDecrement16_rel(short volatile *_Value);
-long _InterlockedDecrement_acq(long volatile *_Value);
-long _InterlockedDecrement_nf(long volatile *_Value);
-long _InterlockedDecrement_rel(long volatile *_Value);
-__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value);
-__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value);
-__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked And
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedAnd8_acq(char volatile *_Value, char _Mask);
-char _InterlockedAnd8_nf(char volatile *_Value, char _Mask);
-char _InterlockedAnd8_rel(char volatile *_Value, char _Mask);
-short _InterlockedAnd16_acq(short volatile *_Value, short _Mask);
-short _InterlockedAnd16_nf(short volatile *_Value, short _Mask);
-short _InterlockedAnd16_rel(short volatile *_Value, short _Mask);
-long _InterlockedAnd_acq(long volatile *_Value, long _Mask);
-long _InterlockedAnd_nf(long volatile *_Value, long _Mask);
-long _InterlockedAnd_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Bit Counting and Testing
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-unsigned char _interlockedbittestandset_acq(long volatile *_BitBase,
- long _BitPos);
-unsigned char _interlockedbittestandset_nf(long volatile *_BitBase,
- long _BitPos);
-unsigned char _interlockedbittestandset_rel(long volatile *_BitBase,
- long _BitPos);
-unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase,
- long _BitPos);
-unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase,
- long _BitPos);
-unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,
- long _BitPos);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Or
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedOr8_acq(char volatile *_Value, char _Mask);
-char _InterlockedOr8_nf(char volatile *_Value, char _Mask);
-char _InterlockedOr8_rel(char volatile *_Value, char _Mask);
-short _InterlockedOr16_acq(short volatile *_Value, short _Mask);
-short _InterlockedOr16_nf(short volatile *_Value, short _Mask);
-short _InterlockedOr16_rel(short volatile *_Value, short _Mask);
-long _InterlockedOr_acq(long volatile *_Value, long _Mask);
-long _InterlockedOr_nf(long volatile *_Value, long _Mask);
-long _InterlockedOr_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Xor
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedXor8_acq(char volatile *_Value, char _Mask);
-char _InterlockedXor8_nf(char volatile *_Value, char _Mask);
-char _InterlockedXor8_rel(char volatile *_Value, char _Mask);
-short _InterlockedXor16_acq(short volatile *_Value, short _Mask);
-short _InterlockedXor16_nf(short volatile *_Value, short _Mask);
-short _InterlockedXor16_rel(short volatile *_Value, short _Mask);
-long _InterlockedXor_acq(long volatile *_Value, long _Mask);
-long _InterlockedXor_nf(long volatile *_Value, long _Mask);
-long _InterlockedXor_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedExchange8_acq(char volatile *_Target, char _Value);
-char _InterlockedExchange8_nf(char volatile *_Target, char _Value);
-char _InterlockedExchange8_rel(char volatile *_Target, char _Value);
-short _InterlockedExchange16_acq(short volatile *_Target, short _Value);
-short _InterlockedExchange16_nf(short volatile *_Target, short _Value);
-short _InterlockedExchange16_rel(short volatile *_Target, short _Value);
-long _InterlockedExchange_acq(long volatile *_Target, long _Value);
-long _InterlockedExchange_nf(long volatile *_Target, long _Value);
-long _InterlockedExchange_rel(long volatile *_Target, long _Value);
-__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value);
-__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value);
-__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Compare Exchange
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedCompareExchange8_acq(char volatile *_Destination,
- char _Exchange, char _Comparand);
-char _InterlockedCompareExchange8_nf(char volatile *_Destination,
- char _Exchange, char _Comparand);
-char _InterlockedCompareExchange8_rel(char volatile *_Destination,
- char _Exchange, char _Comparand);
-short _InterlockedCompareExchange16_acq(short volatile *_Destination,
- short _Exchange, short _Comparand);
-short _InterlockedCompareExchange16_nf(short volatile *_Destination,
- short _Exchange, short _Comparand);
-short _InterlockedCompareExchange16_rel(short volatile *_Destination,
- short _Exchange, short _Comparand);
-long _InterlockedCompareExchange_acq(long volatile *_Destination,
- long _Exchange, long _Comparand);
-long _InterlockedCompareExchange_nf(long volatile *_Destination,
- long _Exchange, long _Comparand);
-long _InterlockedCompareExchange_rel(long volatile *_Destination,
- long _Exchange, long _Comparand);
-__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand);
-__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand);
-__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand);
-#endif
-#if defined(__x86_64__) || defined(__aarch64__)
-unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
- __int64 _ExchangeHigh,
- __int64 _ExchangeLow,
- __int64 *_ComparandResult);
-#endif
-#if defined(__aarch64__)
-unsigned char _InterlockedCompareExchange128_acq(__int64 volatile *_Destination,
- __int64 _ExchangeHigh,
- __int64 _ExchangeLow,
- __int64 *_ComparandResult);
-unsigned char _InterlockedCompareExchange128_nf(__int64 volatile *_Destination,
- __int64 _ExchangeHigh,
- __int64 _ExchangeLow,
- __int64 *_ComparandResult);
-unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination,
- __int64 _ExchangeHigh,
- __int64 _ExchangeLow,
- __int64 *_ComparandResult);
-#endif
-
/*----------------------------------------------------------------------------*\
|* movs, stos
\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
+
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__))
static __inline__ void __DEFAULT_FN_ATTRS __movsb(unsigned char *__dst,
unsigned char const *__src,
size_t __n) {
@@ -514,7 +306,7 @@ static __inline__ void __DEFAULT_FN_ATTRS __stosw(unsigned short *__dst,
: "memory");
}
#endif
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(__arm64ec__)
static __inline__ void __DEFAULT_FN_ATTRS __movsq(
unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
__asm__ __volatile__("rep movsq"
@@ -533,10 +325,40 @@ static __inline__ void __DEFAULT_FN_ATTRS __stosq(unsigned __int64 *__dst,
/*----------------------------------------------------------------------------*\
|* Misc
\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__))
static __inline__ void __DEFAULT_FN_ATTRS __halt(void) {
__asm__ volatile("hlt");
}
+
+static inline unsigned char __inbyte(unsigned short port) {
+ unsigned char ret;
+ __asm__ __volatile__("inb %w1, %b0" : "=a"(ret) : "Nd"(port));
+ return ret;
+}
+
+static inline unsigned short __inword(unsigned short port) {
+ unsigned short ret;
+ __asm__ __volatile__("inw %w1, %w0" : "=a"(ret) : "Nd"(port));
+ return ret;
+}
+
+static inline unsigned long __indword(unsigned short port) {
+ unsigned long ret;
+ __asm__ __volatile__("inl %w1, %k0" : "=a"(ret) : "Nd"(port));
+ return ret;
+}
+
+static inline void __outbyte(unsigned short port, unsigned char data) {
+ __asm__ __volatile__("outb %b0, %w1" : : "a"(data), "Nd"(port));
+}
+
+static inline void __outword(unsigned short port, unsigned short data) {
+ __asm__ __volatile__("outw %w0, %w1" : : "a"(data), "Nd"(port));
+}
+
+static inline void __outdword(unsigned short port, unsigned long data) {
+ __asm__ __volatile__("outl %k0, %w1" : : "a"(data), "Nd"(port));
+}
#endif
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
@@ -548,9 +370,10 @@ static __inline__ void __DEFAULT_FN_ATTRS __nop(void) {
/*----------------------------------------------------------------------------*\
|* MS AArch64 specific
\*----------------------------------------------------------------------------*/
-#if defined(__aarch64__)
+#if defined(__aarch64__) || defined(__arm64ec__)
unsigned __int64 __getReg(int);
long _InterlockedAdd(long volatile *Addend, long Value);
+__int64 _InterlockedAdd64(__int64 volatile *Addend, __int64 Value);
__int64 _ReadStatusReg(int);
void _WriteStatusReg(int, __int64);
@@ -582,18 +405,19 @@ unsigned int _CountLeadingOnes(unsigned long);
unsigned int _CountLeadingOnes64(unsigned __int64);
unsigned int _CountLeadingSigns(long);
unsigned int _CountLeadingSigns64(__int64);
-unsigned int _CountLeadingZeros(unsigned long);
-unsigned int _CountLeadingZeros64(unsigned _int64);
unsigned int _CountOneBits(unsigned long);
unsigned int _CountOneBits64(unsigned __int64);
-void __cdecl __prefetch(void *);
+unsigned int __hlt(unsigned int, ...);
+
+void __cdecl __prefetch(const void *);
+
#endif
/*----------------------------------------------------------------------------*\
|* Privileged intrinsics
\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__))
static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
__readmsr(unsigned long __register) {
// Loads the contents of a 64-bit model specific register (MSR) specified in
@@ -607,7 +431,6 @@ __readmsr(unsigned long __register) {
__asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register));
return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax;
}
-#endif
static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS __readcr3(void) {
unsigned __LPTRINT_TYPE__ __cr3_val;
@@ -623,6 +446,7 @@ static __inline__ void __DEFAULT_FN_ATTRS
__writecr3(unsigned __INTPTR_TYPE__ __cr3_val) {
__asm__ ("mov {%0, %%cr3|cr3, %0}" : : "r"(__cr3_val) : "memory");
}
+#endif
#ifdef __cplusplus
}
diff --git a/contrib/llvm-project/clang/lib/Headers/intrin0.h b/contrib/llvm-project/clang/lib/Headers/intrin0.h
new file mode 100644
index 000000000000..6b01f3808652
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/intrin0.h
@@ -0,0 +1,247 @@
+/* ===-------- intrin.h ---------------------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Only include this if we're compiling for the windows platform. */
+#ifndef _MSC_VER
+#include_next <intrin0.h>
+#else
+
+#ifndef __INTRIN0_H
+#define __INTRIN0_H
+
+#if defined(__x86_64__) && !defined(__arm64ec__)
+#include <adcintrin.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
+unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
+void _ReadWriteBarrier(void);
+
+#if defined(__aarch64__) || defined(__arm64ec__)
+unsigned int _CountLeadingZeros(unsigned long);
+unsigned int _CountLeadingZeros64(unsigned _int64);
+unsigned char _InterlockedCompareExchange128_acq(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+unsigned char _InterlockedCompareExchange128_nf(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+#endif
+
+#if defined(__x86_64__) && !defined(__arm64ec__)
+unsigned __int64 _umul128(unsigned __int64, unsigned __int64,
+ unsigned __int64 *);
+unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
+ unsigned __int64 _HighPart,
+ unsigned char _Shift);
+unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
+ unsigned __int64 _HighPart,
+ unsigned char _Shift);
+#endif
+
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__))
+void _mm_pause(void);
+#endif
+
+#if defined(__x86_64__) || defined(__aarch64__)
+unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
+ __int64 _ExchangeHigh,
+ __int64 _ExchangeLow,
+ __int64 *_ComparandResult);
+#endif
+
+#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
+unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
+unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
+#endif
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
+ defined(__aarch64__)
+__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
+__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
+__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
+__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value);
+__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
+__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
+#endif
+
+#if defined(__arm__) || defined(__aarch64__) || defined(__arm64ec__)
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Add
+\*----------------------------------------------------------------------------*/
+char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value);
+char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value);
+char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value);
+short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value);
+short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value);
+short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value);
+long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value);
+long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value);
+long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value);
+__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend,
+ __int64 _Value);
+__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value);
+__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend,
+ __int64 _Value);
+
+/*----------------------------------------------------------------------------*\
+|* Interlocked Increment
+\*----------------------------------------------------------------------------*/
+short _InterlockedIncrement16_acq(short volatile *_Value);
+short _InterlockedIncrement16_nf(short volatile *_Value);
+short _InterlockedIncrement16_rel(short volatile *_Value);
+long _InterlockedIncrement_acq(long volatile *_Value);
+long _InterlockedIncrement_nf(long volatile *_Value);
+long _InterlockedIncrement_rel(long volatile *_Value);
+__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value);
+__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value);
+__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value);
+
+/*----------------------------------------------------------------------------*\
+|* Interlocked Decrement
+\*----------------------------------------------------------------------------*/
+short _InterlockedDecrement16_acq(short volatile *_Value);
+short _InterlockedDecrement16_nf(short volatile *_Value);
+short _InterlockedDecrement16_rel(short volatile *_Value);
+long _InterlockedDecrement_acq(long volatile *_Value);
+long _InterlockedDecrement_nf(long volatile *_Value);
+long _InterlockedDecrement_rel(long volatile *_Value);
+__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value);
+__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value);
+__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value);
+
+/*----------------------------------------------------------------------------*\
+|* Interlocked And
+\*----------------------------------------------------------------------------*/
+char _InterlockedAnd8_acq(char volatile *_Value, char _Mask);
+char _InterlockedAnd8_nf(char volatile *_Value, char _Mask);
+char _InterlockedAnd8_rel(char volatile *_Value, char _Mask);
+short _InterlockedAnd16_acq(short volatile *_Value, short _Mask);
+short _InterlockedAnd16_nf(short volatile *_Value, short _Mask);
+short _InterlockedAnd16_rel(short volatile *_Value, short _Mask);
+long _InterlockedAnd_acq(long volatile *_Value, long _Mask);
+long _InterlockedAnd_nf(long volatile *_Value, long _Mask);
+long _InterlockedAnd_rel(long volatile *_Value, long _Mask);
+__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask);
+
+/*----------------------------------------------------------------------------*\
+|* Bit Counting and Testing
+\*----------------------------------------------------------------------------*/
+unsigned char _interlockedbittestandset_acq(long volatile *_BitBase,
+ long _BitPos);
+unsigned char _interlockedbittestandset_nf(long volatile *_BitBase,
+ long _BitPos);
+unsigned char _interlockedbittestandset_rel(long volatile *_BitBase,
+ long _BitPos);
+unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase,
+ long _BitPos);
+unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase,
+ long _BitPos);
+unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,
+ long _BitPos);
+
+/*----------------------------------------------------------------------------*\
+|* Interlocked Or
+\*----------------------------------------------------------------------------*/
+char _InterlockedOr8_acq(char volatile *_Value, char _Mask);
+char _InterlockedOr8_nf(char volatile *_Value, char _Mask);
+char _InterlockedOr8_rel(char volatile *_Value, char _Mask);
+short _InterlockedOr16_acq(short volatile *_Value, short _Mask);
+short _InterlockedOr16_nf(short volatile *_Value, short _Mask);
+short _InterlockedOr16_rel(short volatile *_Value, short _Mask);
+long _InterlockedOr_acq(long volatile *_Value, long _Mask);
+long _InterlockedOr_nf(long volatile *_Value, long _Mask);
+long _InterlockedOr_rel(long volatile *_Value, long _Mask);
+__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask);
+
+/*----------------------------------------------------------------------------*\
+|* Interlocked Xor
+\*----------------------------------------------------------------------------*/
+char _InterlockedXor8_acq(char volatile *_Value, char _Mask);
+char _InterlockedXor8_nf(char volatile *_Value, char _Mask);
+char _InterlockedXor8_rel(char volatile *_Value, char _Mask);
+short _InterlockedXor16_acq(short volatile *_Value, short _Mask);
+short _InterlockedXor16_nf(short volatile *_Value, short _Mask);
+short _InterlockedXor16_rel(short volatile *_Value, short _Mask);
+long _InterlockedXor_acq(long volatile *_Value, long _Mask);
+long _InterlockedXor_nf(long volatile *_Value, long _Mask);
+long _InterlockedXor_rel(long volatile *_Value, long _Mask);
+__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask);
+
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange
+\*----------------------------------------------------------------------------*/
+char _InterlockedExchange8_acq(char volatile *_Target, char _Value);
+char _InterlockedExchange8_nf(char volatile *_Target, char _Value);
+char _InterlockedExchange8_rel(char volatile *_Target, char _Value);
+short _InterlockedExchange16_acq(short volatile *_Target, short _Value);
+short _InterlockedExchange16_nf(short volatile *_Target, short _Value);
+short _InterlockedExchange16_rel(short volatile *_Target, short _Value);
+long _InterlockedExchange_acq(long volatile *_Target, long _Value);
+long _InterlockedExchange_nf(long volatile *_Target, long _Value);
+long _InterlockedExchange_rel(long volatile *_Target, long _Value);
+__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value);
+__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value);
+__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value);
+
+/*----------------------------------------------------------------------------*\
+|* Interlocked Compare Exchange
+\*----------------------------------------------------------------------------*/
+char _InterlockedCompareExchange8_acq(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+char _InterlockedCompareExchange8_nf(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+char _InterlockedCompareExchange8_rel(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+short _InterlockedCompareExchange16_acq(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+short _InterlockedCompareExchange16_nf(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+short _InterlockedCompareExchange16_rel(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+long _InterlockedCompareExchange_acq(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_nf(long volatile *_Destination, long _Exchange,
+ long _Comparand);
+long _InterlockedCompareExchange_rel(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
+ __int64 _Exchange,
+ __int64 _Comparand);
+__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
+ __int64 _Exchange,
+ __int64 _Comparand);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INTRIN0_H */
+#endif /* _MSC_VER */
diff --git a/contrib/llvm-project/clang/lib/Headers/inttypes.h b/contrib/llvm-project/clang/lib/Headers/inttypes.h
index 1c894c4aca49..5150d22f8b2e 100644
--- a/contrib/llvm-project/clang/lib/Headers/inttypes.h
+++ b/contrib/llvm-project/clang/lib/Headers/inttypes.h
@@ -13,6 +13,9 @@
#if !defined(_AIX) || !defined(_STD_TYPES_T)
#define __CLANG_INTTYPES_H
#endif
+#if defined(__MVS__) && __has_include_next(<inttypes.h>)
+#include_next <inttypes.h>
+#else
#if defined(_MSC_VER) && _MSC_VER < 1800
#error MSVC does not have inttypes.h prior to Visual Studio 2013
@@ -94,4 +97,5 @@
#define SCNxFAST32 "x"
#endif
+#endif /* __MVS__ */
#endif /* __CLANG_INTTYPES_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/iso646.h b/contrib/llvm-project/clang/lib/Headers/iso646.h
index e0a20c6f1891..b53fcd9b4e53 100644
--- a/contrib/llvm-project/clang/lib/Headers/iso646.h
+++ b/contrib/llvm-project/clang/lib/Headers/iso646.h
@@ -9,6 +9,9 @@
#ifndef __ISO646_H
#define __ISO646_H
+#if defined(__MVS__) && __has_include_next(<iso646.h>)
+#include_next <iso646.h>
+#else
#ifndef __cplusplus
#define and &&
@@ -24,4 +27,5 @@
#define xor_eq ^=
#endif
+#endif /* __MVS__ */
#endif /* __ISO646_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h b/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h
index 1994ac42070a..f76e91b4d4b3 100644
--- a/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h
@@ -28,8 +28,7 @@
#ifndef _KEYLOCKERINTRIN_H
#define _KEYLOCKERINTRIN_H
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__KL__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__)
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
@@ -327,11 +326,9 @@ _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
#undef __DEFAULT_FN_ATTRS
-#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
- || defined(__KL__) */
+#endif /* !defined(__SCE__ || __has_feature(modules) || defined(__KL__) */
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__WIDEKL__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__)
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
@@ -524,7 +521,7 @@ _mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
#undef __DEFAULT_FN_ATTRS
-#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
- || defined(__WIDEKL__) */
+#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__) \
+ */
#endif /* _KEYLOCKERINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/limits.h b/contrib/llvm-project/clang/lib/Headers/limits.h
index 15e6bbe0abcf..56dffe568486 100644
--- a/contrib/llvm-project/clang/lib/Headers/limits.h
+++ b/contrib/llvm-project/clang/lib/Headers/limits.h
@@ -9,6 +9,10 @@
#ifndef __CLANG_LIMITS_H
#define __CLANG_LIMITS_H
+#if defined(__MVS__) && __has_include_next(<limits.h>)
+#include_next <limits.h>
+#else
+
/* The system's limits.h may, in turn, try to #include_next GCC's limits.h.
Avert this #include_next madness. */
#if defined __GNUC__ && !defined _GCC_LIMITS_H_
@@ -122,4 +126,5 @@
#define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
#endif
+#endif /* __MVS__ */
#endif /* __CLANG_LIMITS_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/assert.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/assert.h
index de650ca8442a..610ed96a458c 100644
--- a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/assert.h
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/assert.h
@@ -1,4 +1,4 @@
-//===-- Wrapper for C standard assert.h declarations on the GPU ------------===//
+//===-- Wrapper for C standard assert.h declarations on the GPU -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/Headers/mm3dnow.h b/contrib/llvm-project/clang/lib/Headers/mm3dnow.h
index 22ab13aa3340..afffba3a9c75 100644
--- a/contrib/llvm-project/clang/lib/Headers/mm3dnow.h
+++ b/contrib/llvm-project/clang/lib/Headers/mm3dnow.h
@@ -7,151 +7,16 @@
*===-----------------------------------------------------------------------===
*/
+// 3dNow intrinsics are no longer supported.
+
#ifndef _MM3DNOW_H_INCLUDED
#define _MM3DNOW_H_INCLUDED
+#ifndef _CLANG_DISABLE_CRT_DEPRECATION_WARNINGS
+#warning "The <mm3dnow.h> header is deprecated, and 3dNow! intrinsics are unsupported. For other intrinsics, include <x86intrin.h>, instead."
+#endif
+
#include <mmintrin.h>
#include <prfchwintrin.h>
-typedef float __v2sf __attribute__((__vector_size__(8)));
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow"), __min_vector_width__(64)))
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("3dnow")))
-_m_femms(void) {
- __builtin_ia32_femms();
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pavgusb(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pavgusb((__v8qi)__m1, (__v8qi)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pf2id(__m64 __m) {
- return (__m64)__builtin_ia32_pf2id((__v2sf)__m);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfacc(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfacc((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfadd(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfadd((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfcmpeq(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfcmpeq((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfcmpge(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfcmpge((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfcmpgt(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfcmpgt((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfmax(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfmax((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfmin(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfmin((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfmul(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfmul((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfrcp(__m64 __m) {
- return (__m64)__builtin_ia32_pfrcp((__v2sf)__m);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfrcpit1(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfrcpit1((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfrcpit2(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfrcpit2((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfrsqrt(__m64 __m) {
- return (__m64)__builtin_ia32_pfrsqrt((__v2sf)__m);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfrsqrtit1(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfsub(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfsub((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfsubr(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfsubr((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pi2fd(__m64 __m) {
- return (__m64)__builtin_ia32_pi2fd((__v2si)__m);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pmulhrw(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2);
-}
-
-/* Handle the 3dnowa instructions here. */
-#undef __DEFAULT_FN_ATTRS
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnowa"), __min_vector_width__(64)))
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pf2iw(__m64 __m) {
- return (__m64)__builtin_ia32_pf2iw((__v2sf)__m);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfnacc(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfnacc((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pfpnacc(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfpnacc((__v2sf)__m1, (__v2sf)__m2);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pi2fw(__m64 __m) {
- return (__m64)__builtin_ia32_pi2fw((__v2si)__m);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pswapdsf(__m64 __m) {
- return (__m64)__builtin_ia32_pswapdsf((__v2sf)__m);
-}
-
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_m_pswapdsi(__m64 __m) {
- return (__m64)__builtin_ia32_pswapdsi((__v2si)__m);
-}
-
-#undef __DEFAULT_FN_ATTRS
-
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/mmintrin.h b/contrib/llvm-project/clang/lib/Headers/mmintrin.h
index 08849f01071a..4e154e2d8593 100644
--- a/contrib/llvm-project/clang/lib/Headers/mmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/mmintrin.h
@@ -105,28 +105,23 @@ _mm_cvtm64_si64(__m64 __m)
return (long long)__m;
}
-/// Converts 16-bit signed integers from both 64-bit integer vector
-/// parameters of [4 x i16] into 8-bit signed integer values, and constructs
-/// a 64-bit integer vector of [8 x i8] as the result. Positive values
-/// greater than 0x7F are saturated to 0x7F. Negative values less than 0x80
-/// are saturated to 0x80.
+/// Converts, with saturation, 16-bit signed integers from both 64-bit integer
+/// vector parameters of [4 x i16] into 8-bit signed integer values, and
+/// constructs a 64-bit integer vector of [8 x i8] as the result.
+///
+/// Positive values greater than 0x7F are saturated to 0x7F. Negative values
+/// less than 0x80 are saturated to 0x80.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> PACKSSWB </c> instruction.
///
/// \param __m1
-/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
-/// 16-bit signed integer and is converted to an 8-bit signed integer with
-/// saturation. Positive values greater than 0x7F are saturated to 0x7F.
-/// Negative values less than 0x80 are saturated to 0x80. The converted
-/// [4 x i8] values are written to the lower 32 bits of the result.
+/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are
+/// written to the lower 32 bits of the result.
/// \param __m2
-/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
-/// 16-bit signed integer and is converted to an 8-bit signed integer with
-/// saturation. Positive values greater than 0x7F are saturated to 0x7F.
-/// Negative values less than 0x80 are saturated to 0x80. The converted
-/// [4 x i8] values are written to the upper 32 bits of the result.
+/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are
+/// written to the upper 32 bits of the result.
/// \returns A 64-bit integer vector of [8 x i8] containing the converted
/// values.
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -135,28 +130,23 @@ _mm_packs_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);
}
-/// Converts 32-bit signed integers from both 64-bit integer vector
-/// parameters of [2 x i32] into 16-bit signed integer values, and constructs
-/// a 64-bit integer vector of [4 x i16] as the result. Positive values
-/// greater than 0x7FFF are saturated to 0x7FFF. Negative values less than
-/// 0x8000 are saturated to 0x8000.
+/// Converts, with saturation, 32-bit signed integers from both 64-bit integer
+/// vector parameters of [2 x i32] into 16-bit signed integer values, and
+/// constructs a 64-bit integer vector of [4 x i16] as the result.
+///
+/// Positive values greater than 0x7FFF are saturated to 0x7FFF. Negative
+/// values less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> PACKSSDW </c> instruction.
///
/// \param __m1
-/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a
-/// 32-bit signed integer and is converted to a 16-bit signed integer with
-/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF.
-/// Negative values less than 0x8000 are saturated to 0x8000. The converted
-/// [2 x i16] values are written to the lower 32 bits of the result.
+/// A 64-bit integer vector of [2 x i32]. The converted [2 x i16] values are
+/// written to the lower 32 bits of the result.
/// \param __m2
-/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a
-/// 32-bit signed integer and is converted to a 16-bit signed integer with
-/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF.
-/// Negative values less than 0x8000 are saturated to 0x8000. The converted
-/// [2 x i16] values are written to the upper 32 bits of the result.
+/// A 64-bit integer vector of [2 x i32]. The converted [2 x i16] values are
+/// written to the upper 32 bits of the result.
/// \returns A 64-bit integer vector of [4 x i16] containing the converted
/// values.
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -165,28 +155,23 @@ _mm_packs_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);
}
-/// Converts 16-bit signed integers from both 64-bit integer vector
-/// parameters of [4 x i16] into 8-bit unsigned integer values, and
-/// constructs a 64-bit integer vector of [8 x i8] as the result. Values
-/// greater than 0xFF are saturated to 0xFF. Values less than 0 are saturated
-/// to 0.
+/// Converts, with saturation, 16-bit signed integers from both 64-bit integer
+/// vector parameters of [4 x i16] into 8-bit unsigned integer values, and
+/// constructs a 64-bit integer vector of [8 x i8] as the result.
+///
+/// Values greater than 0xFF are saturated to 0xFF. Values less than 0 are
+/// saturated to 0.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> PACKUSWB </c> instruction.
///
/// \param __m1
-/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
-/// 16-bit signed integer and is converted to an 8-bit unsigned integer with
-/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-/// than 0 are saturated to 0. The converted [4 x i8] values are written to
-/// the lower 32 bits of the result.
+/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are
+/// written to the lower 32 bits of the result.
/// \param __m2
-/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
-/// 16-bit signed integer and is converted to an 8-bit unsigned integer with
-/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-/// than 0 are saturated to 0. The converted [4 x i8] values are written to
-/// the upper 32 bits of the result.
+/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are
+/// written to the upper 32 bits of the result.
/// \returns A 64-bit integer vector of [8 x i8] containing the converted
/// values.
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -400,11 +385,13 @@ _mm_add_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2);
}
-/// Adds each 8-bit signed integer element of the first 64-bit integer
-/// vector of [8 x i8] to the corresponding 8-bit signed integer element of
-/// the second 64-bit integer vector of [8 x i8]. Positive sums greater than
-/// 0x7F are saturated to 0x7F. Negative sums less than 0x80 are saturated to
-/// 0x80. The results are packed into a 64-bit integer vector of [8 x i8].
+/// Adds, with saturation, each 8-bit signed integer element of the first
+/// 64-bit integer vector of [8 x i8] to the corresponding 8-bit signed
+/// integer element of the second 64-bit integer vector of [8 x i8].
+///
+/// Positive sums greater than 0x7F are saturated to 0x7F. Negative sums
+/// less than 0x80 are saturated to 0x80. The results are packed into a
+/// 64-bit integer vector of [8 x i8].
///
/// \headerfile <x86intrin.h>
///
@@ -422,12 +409,13 @@ _mm_adds_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
}
-/// Adds each 16-bit signed integer element of the first 64-bit integer
-/// vector of [4 x i16] to the corresponding 16-bit signed integer element of
-/// the second 64-bit integer vector of [4 x i16]. Positive sums greater than
-/// 0x7FFF are saturated to 0x7FFF. Negative sums less than 0x8000 are
-/// saturated to 0x8000. The results are packed into a 64-bit integer vector
-/// of [4 x i16].
+/// Adds, with saturation, each 16-bit signed integer element of the first
+/// 64-bit integer vector of [4 x i16] to the corresponding 16-bit signed
+/// integer element of the second 64-bit integer vector of [4 x i16].
+///
+/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums
+/// less than 0x8000 are saturated to 0x8000. The results are packed into a
+/// 64-bit integer vector of [4 x i16].
///
/// \headerfile <x86intrin.h>
///
@@ -445,11 +433,12 @@ _mm_adds_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
}
-/// Adds each 8-bit unsigned integer element of the first 64-bit integer
-/// vector of [8 x i8] to the corresponding 8-bit unsigned integer element of
-/// the second 64-bit integer vector of [8 x i8]. Sums greater than 0xFF are
-/// saturated to 0xFF. The results are packed into a 64-bit integer vector of
-/// [8 x i8].
+/// Adds, with saturation, each 8-bit unsigned integer element of the first
+/// 64-bit integer vector of [8 x i8] to the corresponding 8-bit unsigned
+/// integer element of the second 64-bit integer vector of [8 x i8].
+///
+/// Sums greater than 0xFF are saturated to 0xFF. The results are packed
+/// into a 64-bit integer vector of [8 x i8].
///
/// \headerfile <x86intrin.h>
///
@@ -467,11 +456,12 @@ _mm_adds_pu8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
}
-/// Adds each 16-bit unsigned integer element of the first 64-bit integer
-/// vector of [4 x i16] to the corresponding 16-bit unsigned integer element
-/// of the second 64-bit integer vector of [4 x i16]. Sums greater than
-/// 0xFFFF are saturated to 0xFFFF. The results are packed into a 64-bit
-/// integer vector of [4 x i16].
+/// Adds, with saturation, each 16-bit unsigned integer element of the first
+/// 64-bit integer vector of [4 x i16] to the corresponding 16-bit unsigned
+/// integer element of the second 64-bit integer vector of [4 x i16].
+///
+/// Sums greater than 0xFFFF are saturated to 0xFFFF. The results are packed
+/// into a 64-bit integer vector of [4 x i16].
///
/// \headerfile <x86intrin.h>
///
@@ -552,12 +542,13 @@ _mm_sub_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2);
}
-/// Subtracts each 8-bit signed integer element of the second 64-bit
-/// integer vector of [8 x i8] from the corresponding 8-bit signed integer
-/// element of the first 64-bit integer vector of [8 x i8]. Positive results
-/// greater than 0x7F are saturated to 0x7F. Negative results less than 0x80
-/// are saturated to 0x80. The results are packed into a 64-bit integer
-/// vector of [8 x i8].
+/// Subtracts, with saturation, each 8-bit signed integer element of the second
+/// 64-bit integer vector of [8 x i8] from the corresponding 8-bit signed
+/// integer element of the first 64-bit integer vector of [8 x i8].
+///
+/// Positive results greater than 0x7F are saturated to 0x7F. Negative
+/// results less than 0x80 are saturated to 0x80. The results are packed
+/// into a 64-bit integer vector of [8 x i8].
///
/// \headerfile <x86intrin.h>
///
@@ -575,12 +566,13 @@ _mm_subs_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);
}
-/// Subtracts each 16-bit signed integer element of the second 64-bit
-/// integer vector of [4 x i16] from the corresponding 16-bit signed integer
-/// element of the first 64-bit integer vector of [4 x i16]. Positive results
-/// greater than 0x7FFF are saturated to 0x7FFF. Negative results less than
-/// 0x8000 are saturated to 0x8000. The results are packed into a 64-bit
-/// integer vector of [4 x i16].
+/// Subtracts, with saturation, each 16-bit signed integer element of the
+/// second 64-bit integer vector of [4 x i16] from the corresponding 16-bit
+/// signed integer element of the first 64-bit integer vector of [4 x i16].
+///
+/// Positive results greater than 0x7FFF are saturated to 0x7FFF. Negative
+/// results less than 0x8000 are saturated to 0x8000. The results are packed
+/// into a 64-bit integer vector of [4 x i16].
///
/// \headerfile <x86intrin.h>
///
@@ -1149,7 +1141,7 @@ _mm_xor_si64(__m64 __m1, __m64 __m2)
/// [8 x i8] to determine if the element of the first vector is equal to the
/// corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFF for true.
+/// Each comparison returns 0 for false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1171,7 +1163,7 @@ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
/// [4 x i16] to determine if the element of the first vector is equal to the
/// corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFFFF for true.
+/// Each comparison returns 0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1193,7 +1185,7 @@ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
/// [2 x i32] to determine if the element of the first vector is equal to the
/// corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1215,7 +1207,7 @@ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
/// [8 x i8] to determine if the element of the first vector is greater than
/// the corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFF for true.
+/// Each comparison returns 0 for false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1237,7 +1229,7 @@ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
/// [4 x i16] to determine if the element of the first vector is greater than
/// the corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFFFF for true.
+/// Each comparison returns 0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1259,7 +1251,7 @@ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
/// [2 x i32] to determine if the element of the first vector is greater than
/// the corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
diff --git a/contrib/llvm-project/clang/lib/Headers/module.modulemap b/contrib/llvm-project/clang/lib/Headers/module.modulemap
index 56a13f69bc05..9ffc249c8d1a 100644
--- a/contrib/llvm-project/clang/lib/Headers/module.modulemap
+++ b/contrib/llvm-project/clang/lib/Headers/module.modulemap
@@ -44,7 +44,6 @@ module _Builtin_intrinsics [system] [extern_c] {
textual header "avxintrin.h"
textual header "avx2intrin.h"
textual header "avx512fintrin.h"
- textual header "avx512erintrin.h"
textual header "fmaintrin.h"
header "x86intrin.h"
@@ -203,6 +202,11 @@ module _Builtin_stdarg [system] {
export *
}
+ explicit module header_macro {
+ header "__stdarg_header_macro.h"
+ export *
+ }
+
explicit module va_arg {
header "__stdarg_va_arg.h"
export *
@@ -232,6 +236,10 @@ module _Builtin_stdbool [system] {
module _Builtin_stddef [system] {
textual header "stddef.h"
+ explicit module header_macro {
+ header "__stddef_header_macro.h"
+ export *
+ }
// __stddef_max_align_t.h is always in this module, even if
// -fbuiltin-headers-in-system-modules is passed.
explicit module max_align_t {
@@ -315,3 +323,8 @@ module opencl_c {
header "opencl-c.h"
header "opencl-c-base.h"
}
+
+module ptrauth {
+ header "ptrauth.h"
+ export *
+}
diff --git a/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h b/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
index 2494f6213fc5..786678b9d8a7 100644
--- a/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
+++ b/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
@@ -46,6 +46,10 @@
#define __opencl_c_ext_fp32_global_atomic_min_max 1
#define __opencl_c_ext_fp32_local_atomic_min_max 1
#define __opencl_c_ext_image_raw10_raw12 1
+#define cl_khr_kernel_clock 1
+#define __opencl_c_kernel_clock_scope_device 1
+#define __opencl_c_kernel_clock_scope_work_group 1
+#define __opencl_c_kernel_clock_scope_sub_group 1
#endif // defined(__SPIR__) || defined(__SPIRV__)
#endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
diff --git a/contrib/llvm-project/clang/lib/Headers/opencl-c.h b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
index 288bb18bc654..20719b74b6b8 100644
--- a/contrib/llvm-project/clang/lib/Headers/opencl-c.h
+++ b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
@@ -17314,6 +17314,21 @@ half __ovld __conv sub_group_clustered_rotate(half, int, uint);
#endif // cl_khr_fp16
#endif // cl_khr_subgroup_rotate
+#if defined(cl_khr_kernel_clock)
+#if defined(__opencl_c_kernel_clock_scope_device)
+ulong __ovld clock_read_device();
+uint2 __ovld clock_read_hilo_device();
+#endif // __opencl_c_kernel_clock_scope_device
+#if defined(__opencl_c_kernel_clock_scope_work_group)
+ulong __ovld clock_read_work_group();
+uint2 __ovld clock_read_hilo_work_group();
+#endif // __opencl_c_kernel_clock_scope_work_group
+#if defined(__opencl_c_kernel_clock_scope_sub_group)
+ulong __ovld clock_read_sub_group();
+uint2 __ovld clock_read_hilo_sub_group();
+#endif // __opencl_c_kernel_clock_scope_sub_group
+#endif // cl_khr_kernel_clock
+
#if defined(cl_intel_subgroups)
// Intel-Specific Sub Group Functions
float __ovld __conv intel_sub_group_shuffle( float , uint );
diff --git a/contrib/llvm-project/clang/lib/Headers/prfchwintrin.h b/contrib/llvm-project/clang/lib/Headers/prfchwintrin.h
index d2f91aa0123e..eaea5f3cf8fe 100644
--- a/contrib/llvm-project/clang/lib/Headers/prfchwintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/prfchwintrin.h
@@ -8,16 +8,17 @@
*/
#if !defined(__X86INTRIN_H) && !defined(_MM3DNOW_H_INCLUDED)
-#error "Never use <prfchwintrin.h> directly; include <x86intrin.h> or <mm3dnow.h> instead."
+#error "Never use <prfchwintrin.h> directly; include <x86intrin.h> instead."
#endif
#ifndef __PRFCHWINTRIN_H
#define __PRFCHWINTRIN_H
/// Loads a memory sequence containing the specified memory address into
-/// all data cache levels. The cache-coherency state is set to exclusive.
-/// Data can be read from and written to the cache line without additional
-/// delay.
+/// all data cache levels.
+///
+/// The cache-coherency state is set to exclusive. Data can be read from
+/// and written to the cache line without additional delay.
///
/// \headerfile <x86intrin.h>
///
@@ -32,10 +33,11 @@ _m_prefetch(void *__P)
}
/// Loads a memory sequence containing the specified memory address into
-/// the L1 data cache and sets the cache-coherency to modified. This
-/// provides a hint to the processor that the cache line will be modified.
-/// It is intended for use when the cache line will be written to shortly
-/// after the prefetch is performed.
+/// the L1 data cache and sets the cache-coherency state to modified.
+///
+/// This provides a hint to the processor that the cache line will be
+/// modified. It is intended for use when the cache line will be written to
+/// shortly after the prefetch is performed.
///
/// Note that the effect of this intrinsic is dependent on the processor
/// implementation.
diff --git a/contrib/llvm-project/clang/lib/Headers/ptrauth.h b/contrib/llvm-project/clang/lib/Headers/ptrauth.h
new file mode 100644
index 000000000000..154b599862a8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ptrauth.h
@@ -0,0 +1,330 @@
+/*===---- ptrauth.h - Pointer authentication -------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __PTRAUTH_H
+#define __PTRAUTH_H
+
+typedef enum {
+ ptrauth_key_asia = 0,
+ ptrauth_key_asib = 1,
+ ptrauth_key_asda = 2,
+ ptrauth_key_asdb = 3,
+
+ /* A process-independent key which can be used to sign code pointers. */
+ ptrauth_key_process_independent_code = ptrauth_key_asia,
+
+ /* A process-specific key which can be used to sign code pointers. */
+ ptrauth_key_process_dependent_code = ptrauth_key_asib,
+
+ /* A process-independent key which can be used to sign data pointers. */
+ ptrauth_key_process_independent_data = ptrauth_key_asda,
+
+ /* A process-specific key which can be used to sign data pointers. */
+ ptrauth_key_process_dependent_data = ptrauth_key_asdb,
+
+ /* The key used to sign return addresses on the stack.
+ The extra data is based on the storage address of the return address.
+ On AArch64, that is always the storage address of the return address + 8
+ (or, in other words, the value of the stack pointer on function entry) */
+ ptrauth_key_return_address = ptrauth_key_process_dependent_code,
+
+ /* The key used to sign C function pointers.
+ The extra data is always 0. */
+ ptrauth_key_function_pointer = ptrauth_key_process_independent_code,
+
+ /* The key used to sign C++ v-table pointers.
+ The extra data is always 0. */
+ ptrauth_key_cxx_vtable_pointer = ptrauth_key_process_independent_data,
+
+ /* Other pointers signed under the ABI use private ABI rules. */
+
+} ptrauth_key;
+
+/* An integer type of the appropriate size for a discriminator argument. */
+typedef __UINTPTR_TYPE__ ptrauth_extra_data_t;
+
+/* An integer type of the appropriate size for a generic signature. */
+typedef __UINTPTR_TYPE__ ptrauth_generic_signature_t;
+
+/* A signed pointer value embeds the original pointer together with
+ a signature that attests to the validity of that pointer. Because
+ this signature must use only "spare" bits of the pointer, a
+ signature's validity is probabilistic in practice: it is unlikely
+ but still plausible that an invalidly-derived signature will
+ somehow equal the correct signature and therefore successfully
+ authenticate. Nonetheless, this scheme provides a strong degree
+ of protection against certain kinds of attacks. */
+
+/* Authenticating a pointer that was not signed with the given key
+ and extra-data value will (likely) fail by trapping. */
+
+/* The null function pointer is always the all-zero bit pattern.
+ Signing an all-zero bit pattern will embed a (likely) non-zero
+ signature in the result, and so the result will not seem to be
+ a null function pointer. Authenticating this value will yield
+ a null function pointer back. However, authenticating an
+ all-zero bit pattern will probably fail, because the
+ authentication will expect a (likely) non-zero signature to
+ embedded in the value.
+
+ Because of this, if a pointer may validly be null, you should
+ check for null before attempting to authenticate it with one
+ of these intrinsics. This is not necessary when using the
+ __ptrauth qualifier; the compiler will perform this check
+ automatically. */
+
+#if __has_feature(ptrauth_intrinsics)
+
+/* Strip the signature from a value without authenticating it.
+
+ If the value is a function pointer, the result will not be a
+ legal function pointer because of the missing signature, and
+ attempting to call it will result in an authentication failure.
+
+ The value must be an expression of pointer type.
+ The key must be a constant expression of type ptrauth_key.
+ The result will have the same type as the original value. */
+#define ptrauth_strip(__value, __key) __builtin_ptrauth_strip(__value, __key)
+
+/* Blend a constant discriminator into the given pointer-like value
+ to form a new discriminator. Not all bits of the inputs are
+ guaranteed to contribute to the result.
+
+ On arm64e, the integer must fall within the range of a uint16_t;
+ other bits may be ignored.
+
+ For the purposes of ptrauth_sign_constant, the result of calling
+ this function is considered a constant expression if the arguments
+ are constant. Some restrictions may be imposed on the pointer.
+
+ The first argument must be an expression of pointer type.
+ The second argument must be an expression of integer type.
+ The result will have type uintptr_t. */
+#define ptrauth_blend_discriminator(__pointer, __integer) \
+ __builtin_ptrauth_blend_discriminator(__pointer, __integer)
+
+/* Return a signed pointer for a constant address in a manner which guarantees
+ a non-attackable sequence.
+
+ The value must be a constant expression of pointer type which evaluates to
+ a non-null pointer.
+ The key must be a constant expression of type ptrauth_key.
+ The extra data must be a constant expression of pointer or integer type;
+ if an integer, it will be coerced to ptrauth_extra_data_t.
+ The result will have the same type as the original value.
+
+ This can be used in constant expressions. */
+#define ptrauth_sign_constant(__value, __key, __data) \
+ __builtin_ptrauth_sign_constant(__value, __key, __data)
+
+/* Add a signature to the given pointer value using a specific key,
+ using the given extra data as a salt to the signing process.
+
+ This operation does not authenticate the original value and is
+ therefore potentially insecure if an attacker could possibly
+ control that value.
+
+ The value must be an expression of pointer type.
+ The key must be a constant expression of type ptrauth_key.
+ The extra data must be an expression of pointer or integer type;
+ if an integer, it will be coerced to ptrauth_extra_data_t.
+ The result will have the same type as the original value. */
+#define ptrauth_sign_unauthenticated(__value, __key, __data) \
+ __builtin_ptrauth_sign_unauthenticated(__value, __key, __data)
+
+/* Authenticate a pointer using one scheme and resign it using another.
+
+ If the result is subsequently authenticated using the new scheme, that
+ authentication is guaranteed to fail if and only if the initial
+ authentication failed.
+
+ The value must be an expression of pointer type.
+ The key must be a constant expression of type ptrauth_key.
+ The extra data must be an expression of pointer or integer type;
+ if an integer, it will be coerced to ptrauth_extra_data_t.
+ The result will have the same type as the original value.
+
+ This operation is guaranteed to not leave the intermediate value
+ available for attack before it is re-signed.
+
+ Do not pass a null pointer to this function. A null pointer
+ will not successfully authenticate.
+
+ This operation traps if the authentication fails. */
+#define ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, \
+ __new_data) \
+ __builtin_ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, \
+ __new_data)
+
+/* Authenticate a pointer using one scheme and resign it as a C
+ function pointer.
+
+ If the result is subsequently authenticated using the new scheme, that
+ authentication is guaranteed to fail if and only if the initial
+ authentication failed.
+
+ The value must be an expression of function pointer type.
+ The key must be a constant expression of type ptrauth_key.
+ The extra data must be an expression of pointer or integer type;
+ if an integer, it will be coerced to ptrauth_extra_data_t.
+ The result will have the same type as the original value.
+
+ This operation is guaranteed to not leave the intermediate value
+ available for attack before it is re-signed. Additionally, if this
+ expression is used syntactically as the function expression in a
+ call, only a single authentication will be performed. */
+#define ptrauth_auth_function(__value, __old_key, __old_data) \
+ ptrauth_auth_and_resign(__value, __old_key, __old_data, \
+ ptrauth_key_function_pointer, 0)
+
+/* Authenticate a data pointer.
+
+ The value must be an expression of non-function pointer type.
+ The key must be a constant expression of type ptrauth_key.
+ The extra data must be an expression of pointer or integer type;
+ if an integer, it will be coerced to ptrauth_extra_data_t.
+ The result will have the same type as the original value.
+
+ This operation traps if the authentication fails. */
+#define ptrauth_auth_data(__value, __old_key, __old_data) \
+ __builtin_ptrauth_auth(__value, __old_key, __old_data)
+
+/* Compute a constant discriminator from the given string.
+
+ The argument must be a string literal of char character type. The result
+ has type ptrauth_extra_data_t.
+
+ The result value is never zero and always within range for both the
+ __ptrauth qualifier and ptrauth_blend_discriminator.
+
+ This can be used in constant expressions.
+*/
+#define ptrauth_string_discriminator(__string) \
+ __builtin_ptrauth_string_discriminator(__string)
+
+/* Compute a constant discriminator from the given type.
+
+ The result can be used as the second argument to
+ ptrauth_blend_discriminator or the third argument to the
+ __ptrauth qualifier. It has type size_t.
+
+ If the type is a C++ member function pointer type, the result is
+ the discriminator used to signed member function pointers of that
+ type. If the type is a function, function pointer, or function
+ reference type, the result is the discriminator used to sign
+ functions of that type. It is ill-formed to use this macro with any
+ other type.
+
+ A call to this function is an integer constant expression. */
+#define ptrauth_type_discriminator(__type) \
+ __builtin_ptrauth_type_discriminator(__type)
+
+/* Compute a signature for the given pair of pointer-sized values.
+ The order of the arguments is significant.
+
+ Like a pointer signature, the resulting signature depends on
+ private key data and therefore should not be reliably reproducible
+ by attackers. That means that this can be used to validate the
+ integrity of arbitrary data by storing a signature for that data
+ alongside it, then checking that the signature is still valid later.
+ Data which exceeds two pointers in size can be signed by either
+ computing a tree of generic signatures or just signing an ordinary
+ cryptographic hash of the data.
+
+ The result has type ptrauth_generic_signature_t. However, it may
+ not have as many bits of entropy as that type's width would suggest;
+ some implementations are known to compute a compressed signature as
+ if the arguments were a pointer and a discriminator.
+
+ The arguments must be either pointers or integers; if integers, they
+ will be coerce to uintptr_t. */
+#define ptrauth_sign_generic_data(__value, __data) \
+ __builtin_ptrauth_sign_generic_data(__value, __data)
+
+/* C++ vtable pointer signing class attribute */
+#define ptrauth_cxx_vtable_pointer(key, address_discrimination, \
+ extra_discrimination...) \
+ [[clang::ptrauth_vtable_pointer(key, address_discrimination, \
+ extra_discrimination)]]
+
+#else
+
+#define ptrauth_strip(__value, __key) \
+ ({ \
+ (void)__key; \
+ __value; \
+ })
+
+#define ptrauth_blend_discriminator(__pointer, __integer) \
+ ({ \
+ (void)__pointer; \
+ (void)__integer; \
+ ((ptrauth_extra_data_t)0); \
+ })
+
+#define ptrauth_sign_constant(__value, __key, __data) \
+ ({ \
+ (void)__key; \
+ (void)__data; \
+ __value; \
+ })
+
+#define ptrauth_sign_unauthenticated(__value, __key, __data) \
+ ({ \
+ (void)__key; \
+ (void)__data; \
+ __value; \
+ })
+
+#define ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, \
+ __new_data) \
+ ({ \
+ (void)__old_key; \
+ (void)__old_data; \
+ (void)__new_key; \
+ (void)__new_data; \
+ __value; \
+ })
+
+#define ptrauth_auth_function(__value, __old_key, __old_data) \
+ ({ \
+ (void)__old_key; \
+ (void)__old_data; \
+ __value; \
+ })
+
+#define ptrauth_auth_data(__value, __old_key, __old_data) \
+ ({ \
+ (void)__old_key; \
+ (void)__old_data; \
+ __value; \
+ })
+
+#define ptrauth_string_discriminator(__string) \
+ ({ \
+ (void)__string; \
+ ((ptrauth_extra_data_t)0); \
+ })
+
+#define ptrauth_type_discriminator(__type) ((ptrauth_extra_data_t)0)
+
+#define ptrauth_sign_generic_data(__value, __data) \
+ ({ \
+ (void)__value; \
+ (void)__data; \
+ ((ptrauth_generic_signature_t)0); \
+ })
+
+
+#define ptrauth_cxx_vtable_pointer(key, address_discrimination, \
+ extra_discrimination...)
+
+#endif /* __has_feature(ptrauth_intrinsics) */
+
+#endif /* __PTRAUTH_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/sifive_vector.h b/contrib/llvm-project/clang/lib/Headers/sifive_vector.h
index 42d7224db614..4e67ad6fca26 100644
--- a/contrib/llvm-project/clang/lib/Headers/sifive_vector.h
+++ b/contrib/llvm-project/clang/lib/Headers/sifive_vector.h
@@ -13,4 +13,106 @@
#pragma clang riscv intrinsic sifive_vector
+#define __riscv_sf_vc_x_se_u8mf4(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 6, vl)
+#define __riscv_sf_vc_x_se_u8mf2(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 7, vl)
+#define __riscv_sf_vc_x_se_u8m1(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 0, vl)
+#define __riscv_sf_vc_x_se_u8m2(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 1, vl)
+#define __riscv_sf_vc_x_se_u8m4(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 2, vl)
+#define __riscv_sf_vc_x_se_u8m8(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 3, vl)
+
+#define __riscv_sf_vc_x_se_u16mf2(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 7, vl)
+#define __riscv_sf_vc_x_se_u16m1(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 0, vl)
+#define __riscv_sf_vc_x_se_u16m2(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 1, vl)
+#define __riscv_sf_vc_x_se_u16m4(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 2, vl)
+#define __riscv_sf_vc_x_se_u16m8(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 3, vl)
+
+#define __riscv_sf_vc_x_se_u32m1(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 0, vl)
+#define __riscv_sf_vc_x_se_u32m2(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 1, vl)
+#define __riscv_sf_vc_x_se_u32m4(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 2, vl)
+#define __riscv_sf_vc_x_se_u32m8(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 3, vl)
+
+#define __riscv_sf_vc_i_se_u8mf4(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 7, vl)
+#define __riscv_sf_vc_i_se_u8mf2(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 6, vl)
+#define __riscv_sf_vc_i_se_u8m1(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 0, vl)
+#define __riscv_sf_vc_i_se_u8m2(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 1, vl)
+#define __riscv_sf_vc_i_se_u8m4(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 2, vl)
+#define __riscv_sf_vc_i_se_u8m8(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 3, vl)
+
+#define __riscv_sf_vc_i_se_u16mf2(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 7, vl)
+#define __riscv_sf_vc_i_se_u16m1(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 0, vl)
+#define __riscv_sf_vc_i_se_u16m2(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 1, vl)
+#define __riscv_sf_vc_i_se_u16m4(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 2, vl)
+#define __riscv_sf_vc_i_se_u16m8(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 3, vl)
+
+#define __riscv_sf_vc_i_se_u32m1(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 0, vl)
+#define __riscv_sf_vc_i_se_u32m2(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 1, vl)
+#define __riscv_sf_vc_i_se_u32m4(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 2, vl)
+#define __riscv_sf_vc_i_se_u32m8(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 3, vl)
+
+#if __riscv_v_elen >= 64
+#define __riscv_sf_vc_x_se_u8mf8(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 5, vl)
+#define __riscv_sf_vc_x_se_u16mf4(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 6, vl)
+#define __riscv_sf_vc_x_se_u32mf2(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 7, vl)
+
+#define __riscv_sf_vc_i_se_u8mf8(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 5, vl)
+#define __riscv_sf_vc_i_se_u16mf4(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 6, vl)
+#define __riscv_sf_vc_i_se_u32mf2(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 7, vl)
+
+#define __riscv_sf_vc_i_se_u64m1(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 0, vl)
+#define __riscv_sf_vc_i_se_u64m2(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 1, vl)
+#define __riscv_sf_vc_i_se_u64m4(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 2, vl)
+#define __riscv_sf_vc_i_se_u64m8(p27_26, p24_20, p11_7, simm5, vl) \
+ __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 3, vl)
+
+#if __riscv_xlen >= 64
+#define __riscv_sf_vc_x_se_u64m1(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 0, vl)
+#define __riscv_sf_vc_x_se_u64m2(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 1, vl)
+#define __riscv_sf_vc_x_se_u64m4(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 2, vl)
+#define __riscv_sf_vc_x_se_u64m8(p27_26, p24_20, p11_7, rs1, vl) \
+ __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 3, vl)
+#endif
+#endif
+
#endif //_SIFIVE_VECTOR_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/smmintrin.h b/contrib/llvm-project/clang/lib/Headers/smmintrin.h
index 005d7db9c3c3..b3fec474e35a 100644
--- a/contrib/llvm-project/clang/lib/Headers/smmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/smmintrin.h
@@ -1188,6 +1188,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M,
/// Compares each of the corresponding 64-bit values of the 128-bit
/// integer vectors for equality.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VPCMPEQQ / PCMPEQQ </c> instruction.
@@ -1431,8 +1433,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {
}
/* SSE4 Pack with Unsigned Saturation. */
-/// Converts 32-bit signed integers from both 128-bit integer vector
-/// operands into 16-bit unsigned integers, and returns the packed result.
+/// Converts, with saturation, 32-bit signed integers from both 128-bit integer
+/// vector operands into 16-bit unsigned integers, and returns the packed
+/// result.
+///
/// Values greater than 0xFFFF are saturated to 0xFFFF. Values less than
/// 0x0000 are saturated to 0x0000.
///
@@ -1441,17 +1445,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {
/// This intrinsic corresponds to the <c> VPACKUSDW / PACKUSDW </c> instruction.
///
/// \param __V1
-/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a
-/// signed integer and is converted to a 16-bit unsigned integer with
-/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values
-/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
-/// are written to the lower 64 bits of the result.
+/// A 128-bit vector of [4 x i32]. The converted [4 x i16] values are
+/// written to the lower 64 bits of the result.
/// \param __V2
-/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a
-/// signed integer and is converted to a 16-bit unsigned integer with
-/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values
-/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
-/// are written to the higher 64 bits of the result.
+/// A 128-bit vector of [4 x i32]. The converted [4 x i16] values are
+/// written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [8 x i16] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1,
__m128i __V2) {
@@ -2305,6 +2303,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
/// integer vectors to determine if the values in the first operand are
/// greater than those in the second operand.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VPCMPGTQ / PCMPGTQ </c> instruction.
diff --git a/contrib/llvm-project/clang/lib/Headers/stdalign.h b/contrib/llvm-project/clang/lib/Headers/stdalign.h
index 158508e65d2b..56cdfa52d4ba 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdalign.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdalign.h
@@ -10,6 +10,10 @@
#ifndef __STDALIGN_H
#define __STDALIGN_H
+#if defined(__MVS__) && __has_include_next(<stdalign.h>)
+#include_next <stdalign.h>
+#else
+
#if defined(__cplusplus) || \
(defined(__STDC_VERSION__) && __STDC_VERSION__ < 202311L)
#ifndef __cplusplus
@@ -21,4 +25,5 @@
#define __alignof_is_defined 1
#endif /* __STDC_VERSION__ */
+#endif /* __MVS__ */
#endif /* __STDALIGN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdarg.h b/contrib/llvm-project/clang/lib/Headers/stdarg.h
index 94b066566f08..6203d7a600a2 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdarg.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdarg.h
@@ -14,29 +14,24 @@
* need to use some of its interfaces. Otherwise this header provides all of
* the expected interfaces.
*
- * When clang modules are enabled, this header is a textual header. It ignores
- * its header guard so that multiple submodules can export its interfaces.
- * Take module SM with submodules A and B, whose headers both include stdarg.h
- * When SM.A builds, __STDARG_H will be defined. When SM.B builds, the
- * definition from SM.A will leak when building without local submodule
- * visibility. stdarg.h wouldn't include any of its implementation headers, and
- * SM.B wouldn't import any of the stdarg modules, and SM.B's `export *`
- * wouldn't export any stdarg interfaces as expected. However, since stdarg.h
- * ignores its header guard when building with modules, it all works as
- * expected.
- *
- * When clang modules are not enabled, the header guards can function in the
- * normal simple fashion.
+ * When clang modules are enabled, this header is a textual header to support
+ * the multiple include behavior. As such, it doesn't directly declare anything
+ * so that it doesn't add duplicate declarations to all of its includers'
+ * modules.
*/
-#if !defined(__STDARG_H) || __has_feature(modules) || \
- defined(__need___va_list) || defined(__need_va_list) || \
- defined(__need_va_arg) || defined(__need___va_copy) || \
- defined(__need_va_copy)
+#if defined(__MVS__) && __has_include_next(<stdarg.h>)
+#undef __need___va_list
+#undef __need_va_list
+#undef __need_va_arg
+#undef __need___va_copy
+#undef __need_va_copy
+#include <__stdarg_header_macro.h>
+#include_next <stdarg.h>
+#else
#if !defined(__need___va_list) && !defined(__need_va_list) && \
!defined(__need_va_arg) && !defined(__need___va_copy) && \
!defined(__need_va_copy)
-#define __STDARG_H
#define __need___va_list
#define __need_va_list
#define __need_va_arg
@@ -49,6 +44,7 @@
!defined(__STRICT_ANSI__)
#define __need_va_copy
#endif
+#include <__stdarg_header_macro.h>
#endif
#ifdef __need___va_list
@@ -76,4 +72,4 @@
#undef __need_va_copy
#endif /* defined(__need_va_copy) */
-#endif
+#endif /* __MVS__ */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdatomic.h b/contrib/llvm-project/clang/lib/Headers/stdatomic.h
index 521c473dd169..1991351f9e9e 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdatomic.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdatomic.h
@@ -16,7 +16,7 @@
* Exclude the MSVC path as well as the MSVC header as of the 14.31.30818
* explicitly disallows `stdatomic.h` in the C mode via an `#error`. Fallback
* to the clang resource header until that is fully supported. The
- * `stdatomic.h` header requires C++ 23 or newer.
+ * `stdatomic.h` header requires C++23 or newer.
*/
#if __STDC_HOSTED__ && \
__has_include_next(<stdatomic.h>) && \
@@ -35,6 +35,9 @@ extern "C" {
#define ATOMIC_BOOL_LOCK_FREE __CLANG_ATOMIC_BOOL_LOCK_FREE
#define ATOMIC_CHAR_LOCK_FREE __CLANG_ATOMIC_CHAR_LOCK_FREE
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+#define ATOMIC_CHAR8_T_LOCK_FREE __CLANG_ATOMIC_CHAR8_T_LOCK_FREE
+#endif
#define ATOMIC_CHAR16_T_LOCK_FREE __CLANG_ATOMIC_CHAR16_T_LOCK_FREE
#define ATOMIC_CHAR32_T_LOCK_FREE __CLANG_ATOMIC_CHAR32_T_LOCK_FREE
#define ATOMIC_WCHAR_T_LOCK_FREE __CLANG_ATOMIC_WCHAR_T_LOCK_FREE
@@ -104,6 +107,9 @@ typedef _Atomic(long) atomic_long;
typedef _Atomic(unsigned long) atomic_ulong;
typedef _Atomic(long long) atomic_llong;
typedef _Atomic(unsigned long long) atomic_ullong;
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+typedef _Atomic(unsigned char) atomic_char8_t;
+#endif
typedef _Atomic(uint_least16_t) atomic_char16_t;
typedef _Atomic(uint_least32_t) atomic_char32_t;
typedef _Atomic(wchar_t) atomic_wchar_t;
@@ -166,7 +172,11 @@ typedef _Atomic(uintmax_t) atomic_uintmax_t;
typedef struct atomic_flag { atomic_bool _Value; } atomic_flag;
+#ifdef __cplusplus
+#define ATOMIC_FLAG_INIT {false}
+#else
#define ATOMIC_FLAG_INIT { 0 }
+#endif
/* These should be provided by the libc implementation. */
#ifdef __cplusplus
diff --git a/contrib/llvm-project/clang/lib/Headers/stdbool.h b/contrib/llvm-project/clang/lib/Headers/stdbool.h
index 9406aab0ca72..dfaad2b65a9b 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdbool.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdbool.h
@@ -12,6 +12,10 @@
#define __bool_true_false_are_defined 1
+#if defined(__MVS__) && __has_include_next(<stdbool.h>)
+#include_next <stdbool.h>
+#else
+
#if defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L
/* FIXME: We should be issuing a deprecation warning here, but cannot yet due
* to system headers which include this header file unconditionally.
@@ -31,4 +35,5 @@
#endif
#endif
+#endif /* __MVS__ */
#endif /* __STDBOOL_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/stddef.h b/contrib/llvm-project/clang/lib/Headers/stddef.h
index e0ad7b8d17af..99b275aebf5a 100644
--- a/contrib/llvm-project/clang/lib/Headers/stddef.h
+++ b/contrib/llvm-project/clang/lib/Headers/stddef.h
@@ -14,34 +14,32 @@
* need to use some of its interfaces. Otherwise this header provides all of
* the expected interfaces.
*
- * When clang modules are enabled, this header is a textual header. It ignores
- * its header guard so that multiple submodules can export its interfaces.
- * Take module SM with submodules A and B, whose headers both include stddef.h
- * When SM.A builds, __STDDEF_H will be defined. When SM.B builds, the
- * definition from SM.A will leak when building without local submodule
- * visibility. stddef.h wouldn't include any of its implementation headers, and
- * SM.B wouldn't import any of the stddef modules, and SM.B's `export *`
- * wouldn't export any stddef interfaces as expected. However, since stddef.h
- * ignores its header guard when building with modules, it all works as
- * expected.
- *
- * When clang modules are not enabled, the header guards can function in the
- * normal simple fashion.
+ * When clang modules are enabled, this header is a textual header to support
+ * the multiple include behavior. As such, it doesn't directly declare anything
+ * so that it doesn't add duplicate declarations to all of its includers'
+ * modules.
*/
-#if !defined(__STDDEF_H) || __has_feature(modules) || \
- (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1) || \
- defined(__need_ptrdiff_t) || defined(__need_size_t) || \
- defined(__need_rsize_t) || defined(__need_wchar_t) || \
- defined(__need_NULL) || defined(__need_nullptr_t) || \
- defined(__need_unreachable) || defined(__need_max_align_t) || \
- defined(__need_offsetof) || defined(__need_wint_t)
+#if defined(__MVS__) && __has_include_next(<stddef.h>)
+#undef __need_ptrdiff_t
+#undef __need_size_t
+#undef __need_rsize_t
+#undef __need_wchar_t
+#undef __need_NULL
+#undef __need_nullptr_t
+#undef __need_unreachable
+#undef __need_max_align_t
+#undef __need_offsetof
+#undef __need_wint_t
+#include <__stddef_header_macro.h>
+#include_next <stddef.h>
+
+#else
#if !defined(__need_ptrdiff_t) && !defined(__need_size_t) && \
!defined(__need_rsize_t) && !defined(__need_wchar_t) && \
!defined(__need_NULL) && !defined(__need_nullptr_t) && \
!defined(__need_unreachable) && !defined(__need_max_align_t) && \
!defined(__need_offsetof) && !defined(__need_wint_t)
-#define __STDDEF_H
#define __need_ptrdiff_t
#define __need_size_t
/* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is
@@ -50,7 +48,24 @@
#define __need_rsize_t
#endif
#define __need_wchar_t
+#if !defined(__STDDEF_H) || __has_feature(modules)
+/*
+ * __stddef_null.h is special when building without modules: if __need_NULL is
+ * set, then it will unconditionally redefine NULL. To avoid stepping on client
+ * definitions of NULL, __need_NULL should only be set the first time this
+ * header is included, that is when __STDDEF_H is not defined. However, when
+ * building with modules, this header is a textual header and needs to
+ * unconditionally include __stdef_null.h to support multiple submodules
+ * exporting _Builtin_stddef.null. Take module SM with submodules A and B, whose
+ * headers both include stddef.h When SM.A builds, __STDDEF_H will be defined.
+ * When SM.B builds, the definition from SM.A will leak when building without
+ * local submodule visibility. stddef.h wouldn't include __stddef_null.h, and
+ * SM.B wouldn't import _Builtin_stddef.null, and SM.B's `export *` wouldn't
+ * export NULL as expected. When building with modules, always include
+ * __stddef_null.h so that everything works as expected.
+ */
#define __need_NULL
+#endif
#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
defined(__cplusplus)
#define __need_nullptr_t
@@ -66,6 +81,7 @@
/* wint_t is provided by <wchar.h> and not <stddef.h>. It's here
* for compatibility, but must be explicitly requested. Therefore
* __need_wint_t is intentionally not defined here. */
+#include <__stddef_header_macro.h>
#endif
#if defined(__need_ptrdiff_t)
@@ -120,4 +136,4 @@ __WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */
#undef __need_wint_t
#endif /* __need_wint_t */
-#endif
+#endif /* __MVS__ */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdint.h b/contrib/llvm-project/clang/lib/Headers/stdint.h
index b6699b6ca3d4..01feab7b1ee2 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdint.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdint.h
@@ -14,6 +14,10 @@
#define __CLANG_STDINT_H
#endif
+#if defined(__MVS__) && __has_include_next(<stdint.h>)
+#include_next <stdint.h>
+#else
+
/* If we're hosted, fall back to the system's stdint.h, which might have
* additional definitions.
*/
@@ -947,4 +951,5 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#endif
#endif /* __STDC_HOSTED__ */
+#endif /* __MVS__ */
#endif /* __CLANG_STDINT_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h b/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h
index c90bf77e840e..6a9b209c7218 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h
@@ -10,9 +10,15 @@
#ifndef __STDNORETURN_H
#define __STDNORETURN_H
+#if defined(__MVS__) && __has_include_next(<stdnoreturn.h>)
+#include_next <stdnoreturn.h>
+#else
+
#define noreturn _Noreturn
#define __noreturn_is_defined 1
+#endif /* __MVS__ */
+
#if (defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L) && \
!defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)
/* The noreturn macro is deprecated in C23. We do not mark it as such because
diff --git a/contrib/llvm-project/clang/lib/Headers/tmmintrin.h b/contrib/llvm-project/clang/lib/Headers/tmmintrin.h
index 7d8dc46c57bf..bf8327b692d1 100644
--- a/contrib/llvm-project/clang/lib/Headers/tmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/tmmintrin.h
@@ -271,10 +271,11 @@ _mm_hadd_pi32(__m64 __a, __m64 __b)
return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b);
}
-/// Horizontally adds the adjacent pairs of values contained in 2 packed
-/// 128-bit vectors of [8 x i16]. Positive sums greater than 0x7FFF are
-/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
-/// 0x8000.
+/// Horizontally adds, with saturation, the adjacent pairs of values contained
+/// in two packed 128-bit vectors of [8 x i16].
+///
+/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums
+/// less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -296,10 +297,11 @@ _mm_hadds_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b);
}
-/// Horizontally adds the adjacent pairs of values contained in 2 packed
-/// 64-bit vectors of [4 x i16]. Positive sums greater than 0x7FFF are
-/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
-/// 0x8000.
+/// Horizontally adds, with saturation, the adjacent pairs of values contained
+/// in two packed 64-bit vectors of [4 x i16].
+///
+/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums
+/// less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -413,10 +415,11 @@ _mm_hsub_pi32(__m64 __a, __m64 __b)
return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b);
}
-/// Horizontally subtracts the adjacent pairs of values contained in 2
-/// packed 128-bit vectors of [8 x i16]. Positive differences greater than
-/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are
-/// saturated to 0x8000.
+/// Horizontally subtracts, with saturation, the adjacent pairs of values
+/// contained in two packed 128-bit vectors of [8 x i16].
+///
+/// Positive differences greater than 0x7FFF are saturated to 0x7FFF.
+/// Negative differences less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -438,10 +441,11 @@ _mm_hsubs_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b);
}
-/// Horizontally subtracts the adjacent pairs of values contained in 2
-/// packed 64-bit vectors of [4 x i16]. Positive differences greater than
-/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are
-/// saturated to 0x8000.
+/// Horizontally subtracts, with saturation, the adjacent pairs of values
+/// contained in two packed 64-bit vectors of [4 x i16].
+///
+/// Positive differences greater than 0x7FFF are saturated to 0x7FFF.
+/// Negative differences less than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
diff --git a/contrib/llvm-project/clang/lib/Headers/varargs.h b/contrib/llvm-project/clang/lib/Headers/varargs.h
index d241b7de3cb2..d33ddc5ae7f8 100644
--- a/contrib/llvm-project/clang/lib/Headers/varargs.h
+++ b/contrib/llvm-project/clang/lib/Headers/varargs.h
@@ -8,5 +8,9 @@
*/
#ifndef __VARARGS_H
#define __VARARGS_H
- #error "Please use <stdarg.h> instead of <varargs.h>"
+#if defined(__MVS__) && __has_include_next(<varargs.h>)
+#include_next <varargs.h>
+#else
+#error "Please use <stdarg.h> instead of <varargs.h>"
+#endif /* __MVS__ */
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/vecintrin.h b/contrib/llvm-project/clang/lib/Headers/vecintrin.h
index 1f51e32c0d13..609c7cf0b7a6 100644
--- a/contrib/llvm-project/clang/lib/Headers/vecintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/vecintrin.h
@@ -8359,7 +8359,9 @@ vec_min(__vector double __a, __vector double __b) {
static inline __ATTRS_ai __vector unsigned char
vec_add_u128(__vector unsigned char __a, __vector unsigned char __b) {
- return (__vector unsigned char)((__int128)__a + (__int128)__b);
+ return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
+ ((__int128)__a + (__int128)__b);
}
/*-- vec_addc ---------------------------------------------------------------*/
@@ -8389,6 +8391,7 @@ vec_addc(__vector unsigned long long __a, __vector unsigned long long __b) {
static inline __ATTRS_ai __vector unsigned char
vec_addc_u128(__vector unsigned char __a, __vector unsigned char __b) {
return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
__builtin_s390_vaccq((unsigned __int128)__a, (unsigned __int128)__b);
}
@@ -8398,6 +8401,7 @@ static inline __ATTRS_ai __vector unsigned char
vec_adde_u128(__vector unsigned char __a, __vector unsigned char __b,
__vector unsigned char __c) {
return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
__builtin_s390_vacq((unsigned __int128)__a, (unsigned __int128)__b,
(unsigned __int128)__c);
}
@@ -8408,6 +8412,7 @@ static inline __ATTRS_ai __vector unsigned char
vec_addec_u128(__vector unsigned char __a, __vector unsigned char __b,
__vector unsigned char __c) {
return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
__builtin_s390_vacccq((unsigned __int128)__a, (unsigned __int128)__b,
(unsigned __int128)__c);
}
@@ -8483,7 +8488,9 @@ vec_gfmsum(__vector unsigned int __a, __vector unsigned int __b) {
static inline __ATTRS_o_ai __vector unsigned char
vec_gfmsum_128(__vector unsigned long long __a,
__vector unsigned long long __b) {
- return (__vector unsigned char)__builtin_s390_vgfmg(__a, __b);
+ return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
+ __builtin_s390_vgfmg(__a, __b);
}
/*-- vec_gfmsum_accum -------------------------------------------------------*/
@@ -8513,6 +8520,7 @@ vec_gfmsum_accum_128(__vector unsigned long long __a,
__vector unsigned long long __b,
__vector unsigned char __c) {
return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
__builtin_s390_vgfmag(__a, __b, (unsigned __int128)__c);
}
@@ -8810,6 +8818,7 @@ vec_msum_u128(__vector unsigned long long __a, __vector unsigned long long __b,
#define vec_msum_u128(X, Y, Z, W) \
((__typeof__((vec_msum_u128)((X), (Y), (Z), (W)))) \
+ (unsigned __int128 __attribute__((__vector_size__(16)))) \
__builtin_s390_vmslg((X), (Y), (unsigned __int128)(Z), (W)))
#endif
@@ -8817,7 +8826,9 @@ vec_msum_u128(__vector unsigned long long __a, __vector unsigned long long __b,
static inline __ATTRS_ai __vector unsigned char
vec_sub_u128(__vector unsigned char __a, __vector unsigned char __b) {
- return (__vector unsigned char)((__int128)__a - (__int128)__b);
+ return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
+ ((__int128)__a - (__int128)__b);
}
/*-- vec_subc ---------------------------------------------------------------*/
@@ -8847,6 +8858,7 @@ vec_subc(__vector unsigned long long __a, __vector unsigned long long __b) {
static inline __ATTRS_ai __vector unsigned char
vec_subc_u128(__vector unsigned char __a, __vector unsigned char __b) {
return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
__builtin_s390_vscbiq((unsigned __int128)__a, (unsigned __int128)__b);
}
@@ -8856,6 +8868,7 @@ static inline __ATTRS_ai __vector unsigned char
vec_sube_u128(__vector unsigned char __a, __vector unsigned char __b,
__vector unsigned char __c) {
return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
__builtin_s390_vsbiq((unsigned __int128)__a, (unsigned __int128)__b,
(unsigned __int128)__c);
}
@@ -8866,6 +8879,7 @@ static inline __ATTRS_ai __vector unsigned char
vec_subec_u128(__vector unsigned char __a, __vector unsigned char __b,
__vector unsigned char __c) {
return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
__builtin_s390_vsbcbiq((unsigned __int128)__a, (unsigned __int128)__b,
(unsigned __int128)__c);
}
@@ -8886,12 +8900,16 @@ vec_sum2(__vector unsigned int __a, __vector unsigned int __b) {
static inline __ATTRS_o_ai __vector unsigned char
vec_sum_u128(__vector unsigned int __a, __vector unsigned int __b) {
- return (__vector unsigned char)__builtin_s390_vsumqf(__a, __b);
+ return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
+ __builtin_s390_vsumqf(__a, __b);
}
static inline __ATTRS_o_ai __vector unsigned char
vec_sum_u128(__vector unsigned long long __a, __vector unsigned long long __b) {
- return (__vector unsigned char)__builtin_s390_vsumqg(__a, __b);
+ return (__vector unsigned char)
+ (unsigned __int128 __attribute__((__vector_size__(16))))
+ __builtin_s390_vsumqg(__a, __b);
}
/*-- vec_sum4 ---------------------------------------------------------------*/
diff --git a/contrib/llvm-project/clang/lib/Headers/x86gprintrin.h b/contrib/llvm-project/clang/lib/Headers/x86gprintrin.h
index ed141879fbc7..3d5cc606d7e6 100644
--- a/contrib/llvm-project/clang/lib/Headers/x86gprintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/x86gprintrin.h
@@ -10,38 +10,31 @@
#ifndef __X86GPRINTRIN_H
#define __X86GPRINTRIN_H
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__HRESET__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__HRESET__)
#include <hresetintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__UINTR__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__UINTR__)
#include <uintrintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__USERMSR__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__USERMSR__)
#include <usermsrintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__CRC32__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__CRC32__)
#include <crc32intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__PRFCHI__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHI__)
#include <prfchiintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__RAOINT__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__RAOINT__)
#include <raointintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__CMPCCXADD__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__CMPCCXADD__)
#include <cmpccxaddintrin.h>
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/x86intrin.h b/contrib/llvm-project/clang/lib/Headers/x86intrin.h
index 450fd008dab9..f42e9e580f88 100644
--- a/contrib/llvm-project/clang/lib/Headers/x86intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/x86intrin.h
@@ -14,53 +14,39 @@
#include <immintrin.h>
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__3dNOW__)
-#include <mm3dnow.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__PRFCHW__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHW__)
#include <prfchwintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__SSE4A__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE4A__)
#include <ammintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__FMA4__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA4__)
#include <fma4intrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__XOP__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__XOP__)
#include <xopintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__TBM__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__TBM__)
#include <tbmintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__LWP__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__LWP__)
#include <lwpintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__MWAITX__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__MWAITX__)
#include <mwaitxintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__CLZERO__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__CLZERO__)
#include <clzerointrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__RDPRU__)
+#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPRU__)
#include <rdpruintrin.h>
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/xmmintrin.h b/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
index 47368f3c23d2..6fb27297af92 100644
--- a/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
@@ -316,6 +316,8 @@ _mm_rsqrt_ps(__m128 __a)
/// operands and returns the lesser value in the low-order bits of the
/// vector of [4 x float].
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINSS / MINSS </c> instructions.
@@ -338,6 +340,8 @@ _mm_min_ss(__m128 __a, __m128 __b)
/// Compares two 128-bit vectors of [4 x float] and returns the lesser
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINPS / MINPS </c> instructions.
@@ -358,6 +362,8 @@ _mm_min_ps(__m128 __a, __m128 __b)
/// operands and returns the greater value in the low-order bits of a 128-bit
/// vector of [4 x float].
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXSS / MAXSS </c> instructions.
@@ -380,6 +386,8 @@ _mm_max_ss(__m128 __a, __m128 __b)
/// Compares two 128-bit vectors of [4 x float] and returns the greater
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXPS / MAXPS </c> instructions.
@@ -474,8 +482,11 @@ _mm_xor_ps(__m128 __a, __m128 __b)
}
/// Compares two 32-bit float values in the low-order bits of both
-/// operands for equality and returns the result of the comparison in the
+/// operands for equality.
+///
+/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
/// low-order bits of a vector [4 x float].
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -498,6 +509,9 @@ _mm_cmpeq_ss(__m128 __a, __m128 __b)
/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] for equality.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPEQPS / CMPEQPS </c> instructions.
@@ -515,8 +529,11 @@ _mm_cmpeq_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is less than the
-/// corresponding value in the second operand and returns the result of the
-/// comparison in the low-order bits of a vector of [4 x float].
+/// corresponding value in the second operand.
+///
+/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
+/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -540,6 +557,9 @@ _mm_cmplt_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are less than those in the second operand.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPLTPS / CMPLTPS </c> instructions.
@@ -557,9 +577,11 @@ _mm_cmplt_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is less than or
-/// equal to the corresponding value in the second operand and returns the
-/// result of the comparison in the low-order bits of a vector of
-/// [4 x float].
+/// equal to the corresponding value in the second operand.
+///
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true, in
+/// the low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -583,6 +605,9 @@ _mm_cmple_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are less than or equal to those in the second operand.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPLEPS / CMPLEPS </c> instructions.
@@ -600,8 +625,11 @@ _mm_cmple_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is greater than
-/// the corresponding value in the second operand and returns the result of
-/// the comparison in the low-order bits of a vector of [4 x float].
+/// the corresponding value in the second operand.
+///
+/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
+/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -627,6 +655,9 @@ _mm_cmpgt_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are greater than those in the second operand.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPLTPS / CMPLTPS </c> instructions.
@@ -644,9 +675,11 @@ _mm_cmpgt_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is greater than
-/// or equal to the corresponding value in the second operand and returns
-/// the result of the comparison in the low-order bits of a vector of
-/// [4 x float].
+/// or equal to the corresponding value in the second operand.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
+/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -672,6 +705,9 @@ _mm_cmpge_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are greater than or equal to those in the second operand.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPLEPS / CMPLEPS </c> instructions.
@@ -687,9 +723,12 @@ _mm_cmpge_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a);
}
-/// Compares two 32-bit float values in the low-order bits of both
-/// operands for inequality and returns the result of the comparison in the
+/// Compares two 32-bit float values in the low-order bits of both operands
+/// for inequality.
+///
+/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -713,6 +752,9 @@ _mm_cmpneq_ss(__m128 __a, __m128 __b)
/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] for inequality.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPNEQPS / CMPNEQPS </c>
@@ -731,8 +773,11 @@ _mm_cmpneq_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is not less than
-/// the corresponding value in the second operand and returns the result of
-/// the comparison in the low-order bits of a vector of [4 x float].
+/// the corresponding value in the second operand.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
+/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -757,6 +802,9 @@ _mm_cmpnlt_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not less than those in the second operand.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPNLTPS / CMPNLTPS </c>
@@ -775,9 +823,11 @@ _mm_cmpnlt_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is not less than
-/// or equal to the corresponding value in the second operand and returns
-/// the result of the comparison in the low-order bits of a vector of
-/// [4 x float].
+/// or equal to the corresponding value in the second operand.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
+/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -802,6 +852,9 @@ _mm_cmpnle_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not less than or equal to those in the second operand.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPNLEPS / CMPNLEPS </c>
@@ -820,9 +873,11 @@ _mm_cmpnle_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is not greater
-/// than the corresponding value in the second operand and returns the
-/// result of the comparison in the low-order bits of a vector of
-/// [4 x float].
+/// than the corresponding value in the second operand.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
+/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -849,6 +904,9 @@ _mm_cmpngt_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not greater than those in the second operand.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPNLTPS / CMPNLTPS </c>
@@ -867,9 +925,11 @@ _mm_cmpngt_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is not greater
-/// than or equal to the corresponding value in the second operand and
-/// returns the result of the comparison in the low-order bits of a vector
-/// of [4 x float].
+/// than or equal to the corresponding value in the second operand.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
+/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -896,6 +956,9 @@ _mm_cmpnge_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not greater than or equal to those in the second operand.
///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPNLEPS / CMPNLEPS </c>
@@ -914,9 +977,11 @@ _mm_cmpnge_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is ordered with
-/// respect to the corresponding value in the second operand and returns the
-/// result of the comparison in the low-order bits of a vector of
-/// [4 x float].
+/// respect to the corresponding value in the second operand.
+///
+/// A pair of floating-point values are ordered with respect to each
+/// other if neither value is a NaN. Each comparison returns 0x0 for false,
+/// 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -941,6 +1006,10 @@ _mm_cmpord_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are ordered with respect to those in the second operand.
///
+/// A pair of floating-point values are ordered with respect to each
+/// other if neither value is a NaN. Each comparison returns 0x0 for false,
+/// 0xFFFFFFFF for true.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPORDPS / CMPORDPS </c>
@@ -959,9 +1028,11 @@ _mm_cmpord_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is unordered
-/// with respect to the corresponding value in the second operand and
-/// returns the result of the comparison in the low-order bits of a vector
-/// of [4 x float].
+/// with respect to the corresponding value in the second operand.
+///
+/// A pair of double-precision values are unordered with respect to each
+/// other if one or both values are NaN. Each comparison returns 0x0 for
+/// false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -986,6 +1057,10 @@ _mm_cmpunord_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are unordered with respect to those in the second operand.
///
+/// A pair of double-precision values are unordered with respect to each
+/// other if one or both values are NaN. Each comparison returns 0x0 for
+/// false, 0xFFFFFFFFFFFFFFFF for true.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCMPUNORDPS / CMPUNORDPS </c>
@@ -1003,9 +1078,10 @@ _mm_cmpunord_ps(__m128 __a, __m128 __b)
}
/// Compares two 32-bit float values in the low-order bits of both
-/// operands for equality and returns the result of the comparison.
+/// operands for equality.
///
-/// If either of the two lower 32-bit values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1018,8 +1094,7 @@ _mm_cmpunord_ps(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the
-/// two lower 32-bit values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comieq_ss(__m128 __a, __m128 __b)
{
@@ -1028,9 +1103,10 @@ _mm_comieq_ss(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the first operand is less than the second
-/// operand and returns the result of the comparison.
+/// operand.
///
-/// If either of the two lower 32-bit values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1043,8 +1119,7 @@ _mm_comieq_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower 32-bit values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comilt_ss(__m128 __a, __m128 __b)
{
@@ -1053,9 +1128,10 @@ _mm_comilt_ss(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the first operand is less than or equal to the
-/// second operand and returns the result of the comparison.
+/// second operand.
///
-/// If either of the two lower 32-bit values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1067,8 +1143,7 @@ _mm_comilt_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower 32-bit values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comile_ss(__m128 __a, __m128 __b)
{
@@ -1077,9 +1152,10 @@ _mm_comile_ss(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the first operand is greater than the second
-/// operand and returns the result of the comparison.
+/// operand.
///
-/// If either of the two lower 32-bit values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1091,8 +1167,7 @@ _mm_comile_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the
-/// two lower 32-bit values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comigt_ss(__m128 __a, __m128 __b)
{
@@ -1101,9 +1176,10 @@ _mm_comigt_ss(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the first operand is greater than or equal to
-/// the second operand and returns the result of the comparison.
+/// the second operand.
///
-/// If either of the two lower 32-bit values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1115,8 +1191,7 @@ _mm_comigt_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower 32-bit values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comige_ss(__m128 __a, __m128 __b)
{
@@ -1125,9 +1200,10 @@ _mm_comige_ss(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the first operand is not equal to the second
-/// operand and returns the result of the comparison.
+/// operand.
///
-/// If either of the two lower 32-bit values is NaN, 1 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 1.
///
/// \headerfile <x86intrin.h>
///
@@ -1139,8 +1215,7 @@ _mm_comige_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the
-/// two lower 32-bit values is NaN, 1 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comineq_ss(__m128 __a, __m128 __b)
{
@@ -1148,10 +1223,10 @@ _mm_comineq_ss(__m128 __a, __m128 __b)
}
/// Performs an unordered comparison of two 32-bit float values using
-/// the low-order bits of both operands to determine equality and returns
-/// the result of the comparison.
+/// the low-order bits of both operands to determine equality.
///
-/// If either of the two lower 32-bit values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1163,8 +1238,7 @@ _mm_comineq_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower 32-bit values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomieq_ss(__m128 __a, __m128 __b)
{
@@ -1173,9 +1247,10 @@ _mm_ucomieq_ss(__m128 __a, __m128 __b)
/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine if the first operand is
-/// less than the second operand and returns the result of the comparison.
+/// less than the second operand.
///
-/// If either of the two lower 32-bit values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1187,8 +1262,7 @@ _mm_ucomieq_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower 32-bit values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomilt_ss(__m128 __a, __m128 __b)
{
@@ -1197,10 +1271,10 @@ _mm_ucomilt_ss(__m128 __a, __m128 __b)
/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine if the first operand is
-/// less than or equal to the second operand and returns the result of the
-/// comparison.
+/// less than or equal to the second operand.
///
-/// If either of the two lower 32-bit values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1212,8 +1286,7 @@ _mm_ucomilt_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower 32-bit values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomile_ss(__m128 __a, __m128 __b)
{
@@ -1222,10 +1295,10 @@ _mm_ucomile_ss(__m128 __a, __m128 __b)
/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine if the first operand is
-/// greater than the second operand and returns the result of the
-/// comparison.
+/// greater than the second operand.
///
-/// If either of the two lower 32-bit values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1237,8 +1310,7 @@ _mm_ucomile_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower 32-bit values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomigt_ss(__m128 __a, __m128 __b)
{
@@ -1247,10 +1319,10 @@ _mm_ucomigt_ss(__m128 __a, __m128 __b)
/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine if the first operand is
-/// greater than or equal to the second operand and returns the result of
-/// the comparison.
+/// greater than or equal to the second operand.
///
-/// If either of the two lower 32-bit values is NaN, 0 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1262,8 +1334,7 @@ _mm_ucomigt_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower 32-bit values is NaN, 0 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomige_ss(__m128 __a, __m128 __b)
{
@@ -1271,10 +1342,10 @@ _mm_ucomige_ss(__m128 __a, __m128 __b)
}
/// Performs an unordered comparison of two 32-bit float values using
-/// the low-order bits of both operands to determine inequality and returns
-/// the result of the comparison.
+/// the low-order bits of both operands to determine inequality.
///
-/// If either of the two lower 32-bit values is NaN, 1 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1286,8 +1357,7 @@ _mm_ucomige_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-/// lower 32-bit values is NaN, 1 is returned.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomineq_ss(__m128 __a, __m128 __b)
{
@@ -1297,6 +1367,10 @@ _mm_ucomineq_ss(__m128 __a, __m128 __b)
/// Converts a float value contained in the lower 32 bits of a vector of
/// [4 x float] into a 32-bit integer.
///
+/// If the converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
@@ -1315,6 +1389,10 @@ _mm_cvtss_si32(__m128 __a)
/// Converts a float value contained in the lower 32 bits of a vector of
/// [4 x float] into a 32-bit integer.
///
+/// If the converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
@@ -1335,6 +1413,10 @@ _mm_cvt_ss2si(__m128 __a)
/// Converts a float value contained in the lower 32 bits of a vector of
/// [4 x float] into a 64-bit integer.
///
+/// If the converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
@@ -1355,6 +1437,10 @@ _mm_cvtss_si64(__m128 __a)
/// Converts two low-order float values in a 128-bit vector of
/// [4 x float] into a 64-bit vector of [2 x i32].
///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> CVTPS2PI </c> instruction.
@@ -1371,6 +1457,10 @@ _mm_cvtps_pi32(__m128 __a)
/// Converts two low-order float values in a 128-bit vector of
/// [4 x float] into a 64-bit vector of [2 x i32].
///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> CVTPS2PI </c> instruction.
@@ -1384,9 +1474,12 @@ _mm_cvt_ps2pi(__m128 __a)
return _mm_cvtps_pi32(__a);
}
-/// Converts a float value contained in the lower 32 bits of a vector of
-/// [4 x float] into a 32-bit integer, truncating the result when it is
-/// inexact.
+/// Converts the lower (first) element of a vector of [4 x float] into a signed
+/// truncated (rounded toward zero) 32-bit integer.
+///
+/// If the converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -1403,9 +1496,12 @@ _mm_cvttss_si32(__m128 __a)
return __builtin_ia32_cvttss2si((__v4sf)__a);
}
-/// Converts a float value contained in the lower 32 bits of a vector of
-/// [4 x float] into a 32-bit integer, truncating the result when it is
-/// inexact.
+/// Converts the lower (first) element of a vector of [4 x float] into a signed
+/// truncated (rounded toward zero) 32-bit integer.
+///
+/// If the converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -1423,9 +1519,12 @@ _mm_cvtt_ss2si(__m128 __a)
}
#ifdef __x86_64__
-/// Converts a float value contained in the lower 32 bits of a vector of
-/// [4 x float] into a 64-bit integer, truncating the result when it is
-/// inexact.
+/// Converts the lower (first) element of a vector of [4 x float] into a signed
+/// truncated (rounded toward zero) 64-bit integer.
+///
+/// If the converted value does not fit in a 64-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -1443,9 +1542,13 @@ _mm_cvttss_si64(__m128 __a)
}
#endif
-/// Converts two low-order float values in a 128-bit vector of
-/// [4 x float] into a 64-bit vector of [2 x i32], truncating the result
-/// when it is inexact.
+/// Converts the lower (first) two elements of a 128-bit vector of [4 x float]
+/// into two signed truncated (rounded toward zero) 32-bit integers,
+/// returned in a 64-bit vector of [2 x i32].
+///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -1461,9 +1564,13 @@ _mm_cvttps_pi32(__m128 __a)
return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a);
}
-/// Converts two low-order float values in a 128-bit vector of [4 x
-/// float] into a 64-bit vector of [2 x i32], truncating the result when it
-/// is inexact.
+/// Converts the lower (first) two elements of a 128-bit vector of [4 x float]
+/// into two signed truncated (rounded toward zero) 64-bit integers,
+/// returned in a 64-bit vector of [2 x i32].
+///
+/// If a converted value does not fit in a 32-bit integer, raises a
+/// floating-point invalid exception. If the exception is masked, returns
+/// the most negative integer.
///
/// \headerfile <x86intrin.h>
///
@@ -1803,7 +1910,7 @@ _mm_undefined_ps(void)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_set_ss(float __w)
{
- return __extension__ (__m128){ __w, 0, 0, 0 };
+ return __extension__ (__m128){ __w, 0.0f, 0.0f, 0.0f };
}
/// Constructs a 128-bit floating-point vector of [4 x float], with each
@@ -2940,6 +3047,85 @@ _mm_movemask_ps(__m128 __a)
return __builtin_ia32_movmskps((__v4sf)__a);
}
+/* Compare */
+#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
+#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
+#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
+#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
+#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
+#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
+#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
+#define _CMP_ORD_Q 0x07 /* Ordered (non-signaling) */
+
+/// Compares each of the corresponding values of two 128-bit vectors of
+/// [4 x float], using the operation specified by the immediate integer
+/// operand.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> (V)CMPPS </c> instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float].
+/// \param b
+/// A 128-bit vector of [4 x float].
+/// \param c
+/// An immediate integer operand, with bits [4:0] specifying which comparison
+/// operation to use: \n
+/// 0x00: Equal (ordered, non-signaling) \n
+/// 0x01: Less-than (ordered, signaling) \n
+/// 0x02: Less-than-or-equal (ordered, signaling) \n
+/// 0x03: Unordered (non-signaling) \n
+/// 0x04: Not-equal (unordered, non-signaling) \n
+/// 0x05: Not-less-than (unordered, signaling) \n
+/// 0x06: Not-less-than-or-equal (unordered, signaling) \n
+/// 0x07: Ordered (non-signaling) \n
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+#define _mm_cmp_ps(a, b, c) \
+ ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), (c)))
+
+/// Compares each of the corresponding scalar values of two 128-bit
+/// vectors of [4 x float], using the operation specified by the immediate
+/// integer operand.
+///
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> (V)CMPSS </c> instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float].
+/// \param b
+/// A 128-bit vector of [4 x float].
+/// \param c
+/// An immediate integer operand, with bits [4:0] specifying which comparison
+/// operation to use: \n
+/// 0x00: Equal (ordered, non-signaling) \n
+/// 0x01: Less-than (ordered, signaling) \n
+/// 0x02: Less-than-or-equal (ordered, signaling) \n
+/// 0x03: Unordered (non-signaling) \n
+/// 0x04: Not-equal (unordered, non-signaling) \n
+/// 0x05: Not-less-than (unordered, signaling) \n
+/// 0x06: Not-less-than-or-equal (unordered, signaling) \n
+/// 0x07: Ordered (non-signaling) \n
+/// \returns A 128-bit vector of [4 x float] containing the comparison results.
+#define _mm_cmp_ss(a, b, c) \
+ ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), (c)))
#define _MM_ALIGN16 __attribute__((aligned(16)))
diff --git a/contrib/llvm-project/clang/lib/Headers/yvals_core.h b/contrib/llvm-project/clang/lib/Headers/yvals_core.h
new file mode 100644
index 000000000000..5ee194a3e5f5
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/yvals_core.h
@@ -0,0 +1,25 @@
+//===----- yvals_core.h - Internal MSVC STL core header -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Only include this if we are aiming for MSVC compatibility.
+#ifndef _MSC_VER
+#include_next <yvals_core.h>
+#else
+
+#ifndef __clang_yvals_core_h
+#define __clang_yvals_core_h
+
+#include_next <yvals_core.h>
+
+#ifdef _STL_INTRIN_HEADER
+#undef _STL_INTRIN_HEADER
+#define _STL_INTRIN_HEADER <intrin0.h>
+#endif
+
+#endif
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/zos_wrappers/builtins.h b/contrib/llvm-project/clang/lib/Headers/zos_wrappers/builtins.h
new file mode 100644
index 000000000000..1f0d0e27ecb3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/zos_wrappers/builtins.h
@@ -0,0 +1,18 @@
+/*===---- builtins.h - z/Architecture Builtin Functions --------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ZOS_WRAPPERS_BUILTINS_H
+#define __ZOS_WRAPPERS_BUILTINS_H
+#if defined(__MVS__)
+#include_next <builtins.h>
+#if defined(__VEC__)
+#include <vecintrin.h>
+#endif
+#endif /* defined(__MVS__) */
+#endif /* __ZOS_WRAPPERS_BUILTINS_H */
diff --git a/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp b/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp
index 295f3f228ff7..cd7226e71171 100644
--- a/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp
+++ b/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/Comment.h"
#include "clang/AST/CommentVisitor.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "clang/Index/USRGeneration.h"
@@ -545,7 +546,8 @@ public:
void visitParagraphComment(const ParagraphComment *C);
void appendParagraphCommentWithKind(const ParagraphComment *C,
- StringRef Kind);
+ StringRef ParagraphKind,
+ StringRef PrependBodyText);
void visitBlockCommandComment(const BlockCommandComment *C);
void visitParamCommandComment(const ParamCommandComment *C);
@@ -679,15 +681,15 @@ CommentASTToXMLConverter::visitHTMLEndTagComment(const HTMLEndTagComment *C) {
Result << ">&lt;/" << C->getTagName() << "&gt;</rawHTML>";
}
-void
-CommentASTToXMLConverter::visitParagraphComment(const ParagraphComment *C) {
- appendParagraphCommentWithKind(C, StringRef());
+void CommentASTToXMLConverter::visitParagraphComment(
+ const ParagraphComment *C) {
+ appendParagraphCommentWithKind(C, StringRef(), StringRef());
}
void CommentASTToXMLConverter::appendParagraphCommentWithKind(
- const ParagraphComment *C,
- StringRef ParagraphKind) {
- if (C->isWhitespace())
+ const ParagraphComment *C, StringRef ParagraphKind,
+ StringRef PrependBodyText) {
+ if (C->isWhitespace() && PrependBodyText.empty())
return;
if (ParagraphKind.empty())
@@ -695,8 +697,11 @@ void CommentASTToXMLConverter::appendParagraphCommentWithKind(
else
Result << "<Para kind=\"" << ParagraphKind << "\">";
- for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
- I != E; ++I) {
+ if (!PrependBodyText.empty())
+ Result << PrependBodyText << " ";
+
+ for (Comment::child_iterator I = C->child_begin(), E = C->child_end(); I != E;
+ ++I) {
visit(*I);
}
Result << "</Para>";
@@ -705,8 +710,15 @@ void CommentASTToXMLConverter::appendParagraphCommentWithKind(
void CommentASTToXMLConverter::visitBlockCommandComment(
const BlockCommandComment *C) {
StringRef ParagraphKind;
+ StringRef ExceptionType;
- switch (C->getCommandID()) {
+ const unsigned CommandID = C->getCommandID();
+ const CommandInfo *Info = Traits.getCommandInfo(CommandID);
+ if (Info->IsThrowsCommand && C->getNumArgs() > 0) {
+ ExceptionType = C->getArgText(0);
+ }
+
+ switch (CommandID) {
case CommandTraits::KCI_attention:
case CommandTraits::KCI_author:
case CommandTraits::KCI_authors:
@@ -731,7 +743,8 @@ void CommentASTToXMLConverter::visitBlockCommandComment(
break;
}
- appendParagraphCommentWithKind(C->getParagraph(), ParagraphKind);
+ appendParagraphCommentWithKind(C->getParagraph(), ParagraphKind,
+ ExceptionType);
}
void CommentASTToXMLConverter::visitParamCommandComment(
@@ -1052,6 +1065,11 @@ void CommentASTToXMLConverter::visitFullComment(const FullComment *C) {
}
if (AA->getUnavailable())
Result << "<Unavailable/>";
+
+ IdentifierInfo *Environment = AA->getEnvironment();
+ if (Environment) {
+ Result << "<Environment>" << Environment->getName() << "</Environment>";
+ }
Result << "</Availability>";
}
}
diff --git a/contrib/llvm-project/clang/lib/Index/IndexBody.cpp b/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
index 08136baa5d40..c18daf7faa74 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
@@ -268,7 +268,7 @@ public:
}
return true;
};
- bool IsPropCall = Containing && isa<PseudoObjectExpr>(Containing);
+ bool IsPropCall = isa_and_nonnull<PseudoObjectExpr>(Containing);
// Implicit property message sends are not 'implicit'.
if ((E->isImplicit() || IsPropCall) &&
!(IsPropCall &&
diff --git a/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp b/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
index 1c04aa17d53f..a7fa6c5e6898 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
@@ -673,9 +673,12 @@ public:
IndexCtx.indexTagDecl(
D, SymbolRelation(SymbolRoleSet(SymbolRole::RelationSpecializationOf),
SpecializationOf));
- if (TypeSourceInfo *TSI = D->getTypeAsWritten())
- IndexCtx.indexTypeSourceInfo(TSI, /*Parent=*/nullptr,
- D->getLexicalDeclContext());
+ // Template specialization arguments.
+ if (const ASTTemplateArgumentListInfo *TemplateArgInfo =
+ D->getTemplateArgsAsWritten()) {
+ for (const auto &Arg : TemplateArgInfo->arguments())
+ handleTemplateArgumentLoc(Arg, D, D->getLexicalDeclContext());
+ }
return true;
}
@@ -700,14 +703,16 @@ public:
IndexCtx.handleDecl(TP);
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(TP)) {
if (TTP->hasDefaultArgument())
- IndexCtx.indexTypeSourceInfo(TTP->getDefaultArgumentInfo(), Parent);
+ handleTemplateArgumentLoc(TTP->getDefaultArgument(), Parent,
+ TP->getLexicalDeclContext());
if (auto *C = TTP->getTypeConstraint())
IndexCtx.handleReference(C->getNamedConcept(), C->getConceptNameLoc(),
Parent, TTP->getLexicalDeclContext());
} else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(TP)) {
IndexCtx.indexTypeSourceInfo(NTTP->getTypeSourceInfo(), Parent);
if (NTTP->hasDefaultArgument())
- IndexCtx.indexBody(NTTP->getDefaultArgument(), Parent);
+ handleTemplateArgumentLoc(NTTP->getDefaultArgument(), Parent,
+ TP->getLexicalDeclContext());
} else if (const auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(TP)) {
if (TTPD->hasDefaultArgument())
handleTemplateArgumentLoc(TTPD->getDefaultArgument(), Parent,
diff --git a/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp b/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
index 0f79694d1faa..419ff79a5cba 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
@@ -552,8 +552,7 @@ StringRef index::getSymbolSubKindString(SymbolSubKind K) {
case SymbolSubKind::AccessorSetter: return "acc-set";
case SymbolSubKind::UsingTypename: return "using-typename";
case SymbolSubKind::UsingValue: return "using-value";
- case SymbolSubKind::UsingEnum:
- return "using-enum";
+ case SymbolSubKind::UsingEnum: return "using-enum";
}
llvm_unreachable("invalid symbol subkind");
}
diff --git a/contrib/llvm-project/clang/lib/Index/IndexingAction.cpp b/contrib/llvm-project/clang/lib/Index/IndexingAction.cpp
index c9fcaad31128..81c46a0d08de 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexingAction.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexingAction.cpp
@@ -199,7 +199,7 @@ index::createIndexingAction(std::shared_ptr<IndexDataConsumer> DataConsumer,
}
static bool topLevelDeclVisitor(void *context, const Decl *D) {
- IndexingContext &IndexCtx = *static_cast<IndexingContext*>(context);
+ IndexingContext &IndexCtx = *static_cast<IndexingContext *>(context);
return IndexCtx.indexTopLevelDecl(D);
}
diff --git a/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp b/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
index 5acc86191f8f..ad7870309c5d 100644
--- a/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
+++ b/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
@@ -257,20 +257,31 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
!D->hasAttr<OverloadableAttr>())
return;
- if (const TemplateArgumentList *
- SpecArgs = D->getTemplateSpecializationArgs()) {
+ if (D->isFunctionTemplateSpecialization()) {
Out << '<';
- for (unsigned I = 0, N = SpecArgs->size(); I != N; ++I) {
- Out << '#';
- VisitTemplateArgument(SpecArgs->get(I));
+ if (const TemplateArgumentList *SpecArgs =
+ D->getTemplateSpecializationArgs()) {
+ for (const auto &Arg : SpecArgs->asArray()) {
+ Out << '#';
+ VisitTemplateArgument(Arg);
+ }
+ } else if (const ASTTemplateArgumentListInfo *SpecArgsWritten =
+ D->getTemplateSpecializationArgsAsWritten()) {
+ for (const auto &ArgLoc : SpecArgsWritten->arguments()) {
+ Out << '#';
+ VisitTemplateArgument(ArgLoc.getArgument());
+ }
}
Out << '>';
}
+ QualType CanonicalType = D->getType().getCanonicalType();
// Mangle in type information for the arguments.
- for (auto *PD : D->parameters()) {
- Out << '#';
- VisitType(PD->getType());
+ if (const auto *FPT = CanonicalType->getAs<FunctionProtoType>()) {
+ for (QualType PT : FPT->param_types()) {
+ Out << '#';
+ VisitType(PT);
+ }
}
if (D->isVariadic())
Out << '.';
@@ -769,6 +780,11 @@ void USRGenerator::VisitType(QualType T) {
#include "clang/Basic/RISCVVTypes.def"
#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ Out << "@BT@" << #Name; \
+ break;
+#include "clang/Basic/AMDGPUTypes.def"
case BuiltinType::ShortAccum:
Out << "@BT@ShortAccum"; break;
case BuiltinType::Accum:
diff --git a/contrib/llvm-project/clang/lib/InstallAPI/DiagnosticBuilderWrappers.cpp b/contrib/llvm-project/clang/lib/InstallAPI/DiagnosticBuilderWrappers.cpp
new file mode 100644
index 000000000000..c8d07f229902
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/InstallAPI/DiagnosticBuilderWrappers.cpp
@@ -0,0 +1,110 @@
+//===- DiagnosticBuilderWrappers.cpp ----------------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DiagnosticBuilderWrappers.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TextAPI/Platform.h"
+
+using clang::DiagnosticBuilder;
+
+namespace llvm {
+namespace MachO {
+const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const Architecture &Arch) {
+ DB.AddString(getArchitectureName(Arch));
+ return DB;
+}
+
+const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const ArchitectureSet &ArchSet) {
+ DB.AddString(std::string(ArchSet));
+ return DB;
+}
+
+const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const PlatformType &Platform) {
+ DB.AddString(getPlatformName(Platform));
+ return DB;
+}
+
+const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const PlatformVersionSet &Platforms) {
+ std::string PlatformAsString;
+ raw_string_ostream Stream(PlatformAsString);
+
+ Stream << "[ ";
+ llvm::interleaveComma(
+ Platforms, Stream,
+ [&Stream](const std::pair<PlatformType, VersionTuple> &PV) {
+ Stream << getPlatformName(PV.first);
+ if (!PV.second.empty())
+ Stream << PV.second.getAsString();
+ });
+ Stream << " ]";
+ DB.AddString(PlatformAsString);
+ return DB;
+}
+
+const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const FileType &Type) {
+ switch (Type) {
+ case FileType::MachO_Bundle:
+ DB.AddString("mach-o bundle");
+ return DB;
+ case FileType::MachO_DynamicLibrary:
+ DB.AddString("mach-o dynamic library");
+ return DB;
+ case FileType::MachO_DynamicLibrary_Stub:
+ DB.AddString("mach-o dynamic library stub");
+ return DB;
+ case FileType::TBD_V1:
+ DB.AddString("tbd-v1");
+ return DB;
+ case FileType::TBD_V2:
+ DB.AddString("tbd-v2");
+ return DB;
+ case FileType::TBD_V3:
+ DB.AddString("tbd-v3");
+ return DB;
+ case FileType::TBD_V4:
+ DB.AddString("tbd-v4");
+ return DB;
+ case FileType::TBD_V5:
+ DB.AddString("tbd-v5");
+ return DB;
+ case FileType::Invalid:
+ case FileType::All:
+ break;
+ }
+ llvm_unreachable("Unexpected file type for diagnostics.");
+}
+
+const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const PackedVersion &Version) {
+ std::string VersionString;
+ raw_string_ostream OS(VersionString);
+ OS << Version;
+ DB.AddString(VersionString);
+ return DB;
+}
+
+const clang::DiagnosticBuilder &
+operator<<(const clang::DiagnosticBuilder &DB,
+ const StringMapEntry<ArchitectureSet> &LibAttr) {
+ std::string IFAsString;
+ raw_string_ostream OS(IFAsString);
+
+ OS << LibAttr.getKey() << " [ " << LibAttr.getValue() << " ]";
+ DB.AddString(IFAsString);
+ return DB;
+}
+
+} // namespace MachO
+} // namespace llvm
diff --git a/contrib/llvm-project/clang/lib/InstallAPI/DiagnosticBuilderWrappers.h b/contrib/llvm-project/clang/lib/InstallAPI/DiagnosticBuilderWrappers.h
new file mode 100644
index 000000000000..48cfefbf65e6
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/InstallAPI/DiagnosticBuilderWrappers.h
@@ -0,0 +1,49 @@
+//===- DiagnosticBuilderWrappers.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Diagnostic wrappers for TextAPI types for error reporting.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INSTALLAPI_DIAGNOSTICBUILDER_WRAPPER_H
+#define LLVM_CLANG_INSTALLAPI_DIAGNOSTICBUILDER_WRAPPER_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/TextAPI/Architecture.h"
+#include "llvm/TextAPI/ArchitectureSet.h"
+#include "llvm/TextAPI/InterfaceFile.h"
+#include "llvm/TextAPI/Platform.h"
+
+namespace llvm {
+namespace MachO {
+
+const clang::DiagnosticBuilder &operator<<(const clang::DiagnosticBuilder &DB,
+ const PlatformType &Platform);
+
+const clang::DiagnosticBuilder &operator<<(const clang::DiagnosticBuilder &DB,
+ const PlatformVersionSet &Platforms);
+
+const clang::DiagnosticBuilder &operator<<(const clang::DiagnosticBuilder &DB,
+ const Architecture &Arch);
+
+const clang::DiagnosticBuilder &operator<<(const clang::DiagnosticBuilder &DB,
+ const ArchitectureSet &ArchSet);
+
+const clang::DiagnosticBuilder &operator<<(const clang::DiagnosticBuilder &DB,
+ const FileType &Type);
+
+const clang::DiagnosticBuilder &operator<<(const clang::DiagnosticBuilder &DB,
+ const PackedVersion &Version);
+
+const clang::DiagnosticBuilder &
+operator<<(const clang::DiagnosticBuilder &DB,
+ const StringMapEntry<ArchitectureSet> &LibAttr);
+
+} // namespace MachO
+} // namespace llvm
+#endif // LLVM_CLANG_INSTALLAPI_DIAGNOSTICBUILDER_WRAPPER_H
diff --git a/contrib/llvm-project/clang/lib/InstallAPI/DirectoryScanner.cpp b/contrib/llvm-project/clang/lib/InstallAPI/DirectoryScanner.cpp
new file mode 100644
index 000000000000..8984758e7b44
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/InstallAPI/DirectoryScanner.cpp
@@ -0,0 +1,300 @@
+//===- DirectoryScanner.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/InstallAPI/DirectoryScanner.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/TextAPI/DylibReader.h"
+
+using namespace llvm;
+using namespace llvm::MachO;
+
+namespace clang::installapi {
+
+HeaderSeq DirectoryScanner::getHeaders(ArrayRef<Library> Libraries) {
+ HeaderSeq Headers;
+ for (const Library &Lib : Libraries)
+ llvm::append_range(Headers, Lib.Headers);
+ return Headers;
+}
+
+llvm::Error DirectoryScanner::scan(StringRef Directory) {
+ if (Mode == ScanMode::ScanFrameworks)
+ return scanForFrameworks(Directory);
+
+ return scanForUnwrappedLibraries(Directory);
+}
+
+llvm::Error DirectoryScanner::scanForUnwrappedLibraries(StringRef Directory) {
+ // Check some known sub-directory locations.
+ auto GetDirectory = [&](const char *Sub) -> OptionalDirectoryEntryRef {
+ SmallString<PATH_MAX> Path(Directory);
+ sys::path::append(Path, Sub);
+ return FM.getOptionalDirectoryRef(Path);
+ };
+
+ auto DirPublic = GetDirectory("usr/include");
+ auto DirPrivate = GetDirectory("usr/local/include");
+ if (!DirPublic && !DirPrivate) {
+ std::error_code ec = std::make_error_code(std::errc::not_a_directory);
+ return createStringError(ec,
+ "cannot find any public (usr/include) or private "
+ "(usr/local/include) header directory");
+ }
+
+ Library &Lib = getOrCreateLibrary(Directory, Libraries);
+ Lib.IsUnwrappedDylib = true;
+
+ if (DirPublic)
+ if (Error Err = scanHeaders(DirPublic->getName(), Lib, HeaderType::Public,
+ Directory))
+ return Err;
+
+ if (DirPrivate)
+ if (Error Err = scanHeaders(DirPrivate->getName(), Lib, HeaderType::Private,
+ Directory))
+ return Err;
+
+ return Error::success();
+}
+
+static bool isFramework(StringRef Path) {
+ while (Path.back() == '/')
+ Path = Path.slice(0, Path.size() - 1);
+
+ return llvm::StringSwitch<bool>(llvm::sys::path::extension(Path))
+ .Case(".framework", true)
+ .Default(false);
+}
+
+Library &
+DirectoryScanner::getOrCreateLibrary(StringRef Path,
+ std::vector<Library> &Libs) const {
+ if (Path.consume_front(RootPath) && Path.empty())
+ Path = "/";
+
+ auto LibIt =
+ find_if(Libs, [Path](const Library &L) { return L.getPath() == Path; });
+ if (LibIt != Libs.end())
+ return *LibIt;
+
+ Libs.emplace_back(Path);
+ return Libs.back();
+}
+
+Error DirectoryScanner::scanHeaders(StringRef Path, Library &Lib,
+ HeaderType Type, StringRef BasePath,
+ StringRef ParentPath) const {
+ std::error_code ec;
+ auto &FS = FM.getVirtualFileSystem();
+ PathSeq SubDirectories;
+ for (vfs::directory_iterator i = FS.dir_begin(Path, ec), ie; i != ie;
+ i.increment(ec)) {
+ StringRef HeaderPath = i->path();
+ if (ec)
+ return createStringError(ec, "unable to read: " + HeaderPath);
+
+ if (sys::fs::is_symlink_file(HeaderPath))
+ continue;
+
+ // Ignore tmp files from unifdef.
+ const StringRef Filename = sys::path::filename(HeaderPath);
+ if (Filename.starts_with("."))
+ continue;
+
+ // If it is a directory, remember the subdirectory.
+ if (FM.getOptionalDirectoryRef(HeaderPath))
+ SubDirectories.push_back(HeaderPath.str());
+
+ if (!isHeaderFile(HeaderPath))
+ continue;
+
+ // Skip files that do not exist. This usually happens for broken symlinks.
+ if (FS.status(HeaderPath) == std::errc::no_such_file_or_directory)
+ continue;
+
+ auto IncludeName = createIncludeHeaderName(HeaderPath);
+ Lib.addHeaderFile(HeaderPath, Type,
+ IncludeName.has_value() ? IncludeName.value() : "");
+ }
+
+ // Go through the subdirectories.
+ // Sort the sub-directory first since different file systems might have
+ // different traverse order.
+ llvm::sort(SubDirectories);
+ if (ParentPath.empty())
+ ParentPath = Path;
+ for (const StringRef Dir : SubDirectories)
+ return scanHeaders(Dir, Lib, Type, BasePath, ParentPath);
+
+ return Error::success();
+}
+
+llvm::Error
+DirectoryScanner::scanMultipleFrameworks(StringRef Directory,
+ std::vector<Library> &Libs) const {
+ std::error_code ec;
+ auto &FS = FM.getVirtualFileSystem();
+ for (vfs::directory_iterator i = FS.dir_begin(Directory, ec), ie; i != ie;
+ i.increment(ec)) {
+ StringRef Curr = i->path();
+
+ // Skip files that do not exist. This usually happens for broken symlinks.
+ if (ec == std::errc::no_such_file_or_directory) {
+ ec.clear();
+ continue;
+ }
+ if (ec)
+ return createStringError(ec, Curr);
+
+ if (sys::fs::is_symlink_file(Curr))
+ continue;
+
+ if (isFramework(Curr)) {
+ if (!FM.getOptionalDirectoryRef(Curr))
+ continue;
+ Library &Framework = getOrCreateLibrary(Curr, Libs);
+ if (Error Err = scanFrameworkDirectory(Curr, Framework))
+ return Err;
+ }
+ }
+
+ return Error::success();
+}
+
+llvm::Error
+DirectoryScanner::scanSubFrameworksDirectory(StringRef Directory,
+ std::vector<Library> &Libs) const {
+ if (FM.getOptionalDirectoryRef(Directory))
+ return scanMultipleFrameworks(Directory, Libs);
+
+ std::error_code ec = std::make_error_code(std::errc::not_a_directory);
+ return createStringError(ec, Directory);
+}
+
+/// FIXME: How to handle versions? For now scan them separately as independent
+/// frameworks.
+llvm::Error
+DirectoryScanner::scanFrameworkVersionsDirectory(StringRef Path,
+ Library &Lib) const {
+ std::error_code ec;
+ auto &FS = FM.getVirtualFileSystem();
+ for (vfs::directory_iterator i = FS.dir_begin(Path, ec), ie; i != ie;
+ i.increment(ec)) {
+ const StringRef Curr = i->path();
+
+ // Skip files that do not exist. This usually happens for broken symlinks.
+ if (ec == std::errc::no_such_file_or_directory) {
+ ec.clear();
+ continue;
+ }
+ if (ec)
+ return createStringError(ec, Curr);
+
+ if (sys::fs::is_symlink_file(Curr))
+ continue;
+
+ // Each version should be a framework directory.
+ if (!FM.getOptionalDirectoryRef(Curr))
+ continue;
+
+ Library &VersionedFramework =
+ getOrCreateLibrary(Curr, Lib.FrameworkVersions);
+ if (Error Err = scanFrameworkDirectory(Curr, VersionedFramework))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+llvm::Error DirectoryScanner::scanFrameworkDirectory(StringRef Path,
+ Library &Framework) const {
+ // If the framework is inside Kernel or IOKit, scan headers in the different
+ // directories separately.
+ Framework.IsUnwrappedDylib =
+ Path.contains("Kernel.framework") || Path.contains("IOKit.framework");
+
+ // Unfortunately we cannot identify symlinks in the VFS. We assume that if
+ // there is a Versions directory, then we have symlinks and directly proceed
+ // to the Versions folder.
+ std::error_code ec;
+ auto &FS = FM.getVirtualFileSystem();
+
+ for (vfs::directory_iterator i = FS.dir_begin(Path, ec), ie; i != ie;
+ i.increment(ec)) {
+ StringRef Curr = i->path();
+ // Skip files that do not exist. This usually happens for broken symlinks.
+ if (ec == std::errc::no_such_file_or_directory) {
+ ec.clear();
+ continue;
+ }
+
+ if (ec)
+ return createStringError(ec, Curr);
+
+ if (sys::fs::is_symlink_file(Curr))
+ continue;
+
+ StringRef FileName = sys::path::filename(Curr);
+ // Scan all "public" headers.
+ if (FileName.contains("Headers")) {
+ if (Error Err = scanHeaders(Curr, Framework, HeaderType::Public, Curr))
+ return Err;
+ continue;
+ }
+ // Scan all "private" headers.
+ if (FileName.contains("PrivateHeaders")) {
+ if (Error Err = scanHeaders(Curr, Framework, HeaderType::Private, Curr))
+ return Err;
+ continue;
+ }
+ // Scan sub frameworks.
+ if (FileName.contains("Frameworks")) {
+ if (Error Err = scanSubFrameworksDirectory(Curr, Framework.SubFrameworks))
+ return Err;
+ continue;
+ }
+ // Check for versioned frameworks.
+ if (FileName.contains("Versions")) {
+ if (Error Err = scanFrameworkVersionsDirectory(Curr, Framework))
+ return Err;
+ continue;
+ }
+ }
+
+ return Error::success();
+}
+
+llvm::Error DirectoryScanner::scanForFrameworks(StringRef Directory) {
+ RootPath = "";
+
+ // Expect a certain directory structure and naming convention to find
+ // frameworks.
+ static const char *SubDirectories[] = {"System/Library/Frameworks/",
+ "System/Library/PrivateFrameworks/"};
+
+ // Check if the directory is already a framework.
+ if (isFramework(Directory)) {
+ Library &Framework = getOrCreateLibrary(Directory, Libraries);
+ if (Error Err = scanFrameworkDirectory(Directory, Framework))
+ return Err;
+ return Error::success();
+ }
+
+ // Check known sub-directory locations.
+ for (const auto *SubDir : SubDirectories) {
+ SmallString<PATH_MAX> Path(Directory);
+ sys::path::append(Path, SubDir);
+
+ if (Error Err = scanMultipleFrameworks(Path, Libraries))
+ return Err;
+ }
+
+ return Error::success();
+}
+} // namespace clang::installapi
diff --git a/contrib/llvm-project/clang/lib/InstallAPI/DylibVerifier.cpp b/contrib/llvm-project/clang/lib/InstallAPI/DylibVerifier.cpp
new file mode 100644
index 000000000000..d5d760767b41
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/InstallAPI/DylibVerifier.cpp
@@ -0,0 +1,1005 @@
+//===- DylibVerifier.cpp ----------------------------------------*- C++--*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/InstallAPI/DylibVerifier.h"
+#include "DiagnosticBuilderWrappers.h"
+#include "clang/InstallAPI/FrontendRecords.h"
+#include "clang/InstallAPI/InstallAPIDiagnostic.h"
+#include "llvm/Demangle/Demangle.h"
+#include "llvm/TextAPI/DylibReader.h"
+
+using namespace llvm::MachO;
+
+namespace clang {
+namespace installapi {
+
+/// Metadata stored about a mapping of a declaration to a symbol.
+struct DylibVerifier::SymbolContext {
+ // Name to use for all querying and verification
+ // purposes.
+ std::string SymbolName{""};
+
+ // Kind to map symbol type against record.
+ EncodeKind Kind = EncodeKind::GlobalSymbol;
+
+ // Frontend Attributes tied to the AST.
+ const FrontendAttrs *FA = nullptr;
+
+ // The ObjCInterface symbol type, if applicable.
+ ObjCIFSymbolKind ObjCIFKind = ObjCIFSymbolKind::None;
+
+ // Whether Decl is inlined.
+ bool Inlined = false;
+};
+
+struct DylibVerifier::DWARFContext {
+ // Track whether DSYM parsing has already been attempted to avoid re-parsing.
+ bool ParsedDSYM{false};
+
+ // Lookup table for source locations by symbol name.
+ DylibReader::SymbolToSourceLocMap SourceLocs{};
+};
+
+static bool isCppMangled(StringRef Name) {
+ // InstallAPI currently only supports itanium manglings.
+ return (Name.starts_with("_Z") || Name.starts_with("__Z") ||
+ Name.starts_with("___Z"));
+}
+
+static std::string demangle(StringRef Name) {
+ // InstallAPI currently only supports itanium manglings.
+ if (!isCppMangled(Name))
+ return Name.str();
+ char *Result = llvm::itaniumDemangle(Name);
+ if (!Result)
+ return Name.str();
+
+ std::string Demangled(Result);
+ free(Result);
+ return Demangled;
+}
+
+std::string DylibVerifier::getAnnotatedName(const Record *R,
+ SymbolContext &SymCtx,
+ bool ValidSourceLoc) {
+ assert(!SymCtx.SymbolName.empty() && "Expected symbol name");
+
+ const StringRef SymbolName = SymCtx.SymbolName;
+ std::string PrettyName =
+ (Demangle && (SymCtx.Kind == EncodeKind::GlobalSymbol))
+ ? demangle(SymbolName)
+ : SymbolName.str();
+
+ std::string Annotation;
+ if (R->isWeakDefined())
+ Annotation += "(weak-def) ";
+ if (R->isWeakReferenced())
+ Annotation += "(weak-ref) ";
+ if (R->isThreadLocalValue())
+ Annotation += "(tlv) ";
+
+ // Check if symbol represents only part of a @interface declaration.
+ switch (SymCtx.ObjCIFKind) {
+ default:
+ break;
+ case ObjCIFSymbolKind::EHType:
+ return Annotation + "Exception Type of " + PrettyName;
+ case ObjCIFSymbolKind::MetaClass:
+ return Annotation + "Metaclass of " + PrettyName;
+ case ObjCIFSymbolKind::Class:
+ return Annotation + "Class of " + PrettyName;
+ }
+
+ // Only print symbol type prefix or leading "_" if there is no source location
+ // tied to it. This can only ever happen when the location has to come from
+ // debug info.
+ if (ValidSourceLoc) {
+ StringRef PrettyNameRef(PrettyName);
+ if ((SymCtx.Kind == EncodeKind::GlobalSymbol) &&
+ !isCppMangled(SymbolName) && PrettyNameRef.starts_with("_"))
+ return Annotation + PrettyNameRef.drop_front(1).str();
+ return Annotation + PrettyName;
+ }
+
+ switch (SymCtx.Kind) {
+ case EncodeKind::GlobalSymbol:
+ return Annotation + PrettyName;
+ case EncodeKind::ObjectiveCInstanceVariable:
+ return Annotation + "(ObjC IVar) " + PrettyName;
+ case EncodeKind::ObjectiveCClass:
+ return Annotation + "(ObjC Class) " + PrettyName;
+ case EncodeKind::ObjectiveCClassEHType:
+ return Annotation + "(ObjC Class EH) " + PrettyName;
+ }
+
+ llvm_unreachable("unexpected case for EncodeKind");
+}
+
+static DylibVerifier::Result updateResult(const DylibVerifier::Result Prev,
+ const DylibVerifier::Result Curr) {
+ if (Prev == Curr)
+ return Prev;
+
+ // Never update from invalid or noverify state.
+ if ((Prev == DylibVerifier::Result::Invalid) ||
+ (Prev == DylibVerifier::Result::NoVerify))
+ return Prev;
+
+ // Don't let an ignored verification remove a valid one.
+ if (Prev == DylibVerifier::Result::Valid &&
+ Curr == DylibVerifier::Result::Ignore)
+ return Prev;
+
+ return Curr;
+}
+// __private_extern__ is a deprecated specifier that clang does not
+// respect in all contexts, it should just be considered hidden for InstallAPI.
+static bool shouldIgnorePrivateExternAttr(const Decl *D) {
+ if (const FunctionDecl *FD = cast<FunctionDecl>(D))
+ return FD->getStorageClass() == StorageClass::SC_PrivateExtern;
+ if (const VarDecl *VD = cast<VarDecl>(D))
+ return VD->getStorageClass() == StorageClass::SC_PrivateExtern;
+
+ return false;
+}
+
+Record *findRecordFromSlice(const RecordsSlice *Slice, StringRef Name,
+ EncodeKind Kind) {
+ switch (Kind) {
+ case EncodeKind::GlobalSymbol:
+ return Slice->findGlobal(Name);
+ case EncodeKind::ObjectiveCInstanceVariable:
+ return Slice->findObjCIVar(Name.contains('.'), Name);
+ case EncodeKind::ObjectiveCClass:
+ case EncodeKind::ObjectiveCClassEHType:
+ return Slice->findObjCInterface(Name);
+ }
+ llvm_unreachable("unexpected end when finding record");
+}
+
+void DylibVerifier::updateState(Result State) {
+ Ctx.FrontendState = updateResult(Ctx.FrontendState, State);
+}
+
+void DylibVerifier::addSymbol(const Record *R, SymbolContext &SymCtx,
+ TargetList &&Targets) {
+ if (Targets.empty())
+ Targets = {Ctx.Target};
+
+ Exports->addGlobal(SymCtx.Kind, SymCtx.SymbolName, R->getFlags(), Targets);
+}
+
+bool DylibVerifier::shouldIgnoreObsolete(const Record *R, SymbolContext &SymCtx,
+ const Record *DR) {
+ if (!SymCtx.FA->Avail.isObsoleted())
+ return false;
+
+ if (Zippered)
+ DeferredZipperedSymbols[SymCtx.SymbolName].emplace_back(ZipperedDeclSource{
+ SymCtx.FA, &Ctx.Diag->getSourceManager(), Ctx.Target});
+ return true;
+}
+
+bool DylibVerifier::shouldIgnoreReexport(const Record *R,
+ SymbolContext &SymCtx) const {
+ StringRef SymName = SymCtx.SymbolName;
+ // Linker directive symbols can never be ignored.
+ if (SymName.starts_with("$ld$"))
+ return false;
+
+ if (Reexports.empty())
+ return false;
+
+ for (const InterfaceFile &Lib : Reexports) {
+ if (!Lib.hasTarget(Ctx.Target))
+ continue;
+ if (auto Sym = Lib.getSymbol(SymCtx.Kind, SymName, SymCtx.ObjCIFKind))
+ if ((*Sym)->hasTarget(Ctx.Target))
+ return true;
+ }
+ return false;
+}
+
+bool DylibVerifier::shouldIgnoreInternalZipperedSymbol(
+ const Record *R, const SymbolContext &SymCtx) const {
+ if (!Zippered)
+ return false;
+
+ return Exports->findSymbol(SymCtx.Kind, SymCtx.SymbolName,
+ SymCtx.ObjCIFKind) != nullptr;
+}
+
+bool DylibVerifier::shouldIgnoreZipperedAvailability(const Record *R,
+ SymbolContext &SymCtx) {
+ if (!(Zippered && SymCtx.FA->Avail.isUnavailable()))
+ return false;
+
+ // Collect source location incase there is an exported symbol to diagnose
+ // during `verifyRemainingSymbols`.
+ DeferredZipperedSymbols[SymCtx.SymbolName].emplace_back(
+ ZipperedDeclSource{SymCtx.FA, SourceManagers.back().get(), Ctx.Target});
+
+ return true;
+}
+
+bool DylibVerifier::compareObjCInterfaceSymbols(const Record *R,
+ SymbolContext &SymCtx,
+ const ObjCInterfaceRecord *DR) {
+ const bool IsDeclVersionComplete =
+ ((SymCtx.ObjCIFKind & ObjCIFSymbolKind::Class) ==
+ ObjCIFSymbolKind::Class) &&
+ ((SymCtx.ObjCIFKind & ObjCIFSymbolKind::MetaClass) ==
+ ObjCIFSymbolKind::MetaClass);
+
+ const bool IsDylibVersionComplete = DR->isCompleteInterface();
+
+ // The common case, a complete ObjCInterface.
+ if (IsDeclVersionComplete && IsDylibVersionComplete)
+ return true;
+
+ auto PrintDiagnostic = [&](auto SymLinkage, const Record *Record,
+ StringRef SymName, bool PrintAsWarning = false) {
+ if (SymLinkage == RecordLinkage::Unknown)
+ Ctx.emitDiag([&]() {
+ Ctx.Diag->Report(SymCtx.FA->Loc, PrintAsWarning
+ ? diag::warn_library_missing_symbol
+ : diag::err_library_missing_symbol)
+ << SymName;
+ });
+ else
+ Ctx.emitDiag([&]() {
+ Ctx.Diag->Report(SymCtx.FA->Loc, PrintAsWarning
+ ? diag::warn_library_hidden_symbol
+ : diag::err_library_hidden_symbol)
+ << SymName;
+ });
+ };
+
+ if (IsDeclVersionComplete) {
+ // The decl represents a complete ObjCInterface, but the symbols in the
+ // dylib do not. Determine which symbol is missing. To keep older projects
+ // building, treat this as a warning.
+ if (!DR->isExportedSymbol(ObjCIFSymbolKind::Class)) {
+ SymCtx.ObjCIFKind = ObjCIFSymbolKind::Class;
+ PrintDiagnostic(DR->getLinkageForSymbol(ObjCIFSymbolKind::Class), R,
+ getAnnotatedName(R, SymCtx),
+ /*PrintAsWarning=*/true);
+ }
+ if (!DR->isExportedSymbol(ObjCIFSymbolKind::MetaClass)) {
+ SymCtx.ObjCIFKind = ObjCIFSymbolKind::MetaClass;
+ PrintDiagnostic(DR->getLinkageForSymbol(ObjCIFSymbolKind::MetaClass), R,
+ getAnnotatedName(R, SymCtx),
+ /*PrintAsWarning=*/true);
+ }
+ return true;
+ }
+
+ if (DR->isExportedSymbol(SymCtx.ObjCIFKind)) {
+ if (!IsDylibVersionComplete) {
+ // Both the declaration and dylib have a non-complete interface.
+ SymCtx.Kind = EncodeKind::GlobalSymbol;
+ SymCtx.SymbolName = R->getName();
+ }
+ return true;
+ }
+
+ // At this point that means there was not a matching class symbol
+ // to represent the one discovered as a declaration.
+ PrintDiagnostic(DR->getLinkageForSymbol(SymCtx.ObjCIFKind), R,
+ SymCtx.SymbolName);
+ return false;
+}
+
+DylibVerifier::Result DylibVerifier::compareVisibility(const Record *R,
+ SymbolContext &SymCtx,
+ const Record *DR) {
+
+ if (R->isExported()) {
+ if (!DR) {
+ Ctx.emitDiag([&]() {
+ Ctx.Diag->Report(SymCtx.FA->Loc, diag::err_library_missing_symbol)
+ << getAnnotatedName(R, SymCtx);
+ });
+ return Result::Invalid;
+ }
+ if (DR->isInternal()) {
+ Ctx.emitDiag([&]() {
+ Ctx.Diag->Report(SymCtx.FA->Loc, diag::err_library_hidden_symbol)
+ << getAnnotatedName(R, SymCtx);
+ });
+ return Result::Invalid;
+ }
+ }
+
+ // Emit a diagnostic for hidden declarations with external symbols, except
+ // when theres an inlined attribute.
+ if ((R->isInternal() && !SymCtx.Inlined) && DR && DR->isExported()) {
+
+ if (Mode == VerificationMode::ErrorsOnly)
+ return Result::Ignore;
+
+ if (shouldIgnorePrivateExternAttr(SymCtx.FA->D))
+ return Result::Ignore;
+
+ if (shouldIgnoreInternalZipperedSymbol(R, SymCtx))
+ return Result::Ignore;
+
+ unsigned ID;
+ Result Outcome;
+ if (Mode == VerificationMode::ErrorsAndWarnings) {
+ ID = diag::warn_header_hidden_symbol;
+ Outcome = Result::Ignore;
+ } else {
+ ID = diag::err_header_hidden_symbol;
+ Outcome = Result::Invalid;
+ }
+ Ctx.emitDiag([&]() {
+ Ctx.Diag->Report(SymCtx.FA->Loc, ID) << getAnnotatedName(R, SymCtx);
+ });
+ return Outcome;
+ }
+
+ if (R->isInternal())
+ return Result::Ignore;
+
+ return Result::Valid;
+}
+
+DylibVerifier::Result DylibVerifier::compareAvailability(const Record *R,
+ SymbolContext &SymCtx,
+ const Record *DR) {
+ if (!SymCtx.FA->Avail.isUnavailable())
+ return Result::Valid;
+
+ if (shouldIgnoreZipperedAvailability(R, SymCtx))
+ return Result::Ignore;
+
+ const bool IsDeclAvailable = SymCtx.FA->Avail.isUnavailable();
+
+ switch (Mode) {
+ case VerificationMode::ErrorsAndWarnings:
+ Ctx.emitDiag([&]() {
+ Ctx.Diag->Report(SymCtx.FA->Loc, diag::warn_header_availability_mismatch)
+ << getAnnotatedName(R, SymCtx) << IsDeclAvailable << IsDeclAvailable;
+ });
+ return Result::Ignore;
+ case VerificationMode::Pedantic:
+ Ctx.emitDiag([&]() {
+ Ctx.Diag->Report(SymCtx.FA->Loc, diag::err_header_availability_mismatch)
+ << getAnnotatedName(R, SymCtx) << IsDeclAvailable << IsDeclAvailable;
+ });
+ return Result::Invalid;
+ case VerificationMode::ErrorsOnly:
+ return Result::Ignore;
+ case VerificationMode::Invalid:
+ llvm_unreachable("Unexpected verification mode symbol verification");
+ }
+ llvm_unreachable("Unexpected verification mode symbol verification");
+}
+
+bool DylibVerifier::compareSymbolFlags(const Record *R, SymbolContext &SymCtx,
+ const Record *DR) {
+ if (DR->isThreadLocalValue() && !R->isThreadLocalValue()) {
+ Ctx.emitDiag([&]() {
+ Ctx.Diag->Report(SymCtx.FA->Loc, diag::err_dylib_symbol_flags_mismatch)
+ << getAnnotatedName(DR, SymCtx) << DR->isThreadLocalValue();
+ });
+ return false;
+ }
+ if (!DR->isThreadLocalValue() && R->isThreadLocalValue()) {
+ Ctx.emitDiag([&]() {
+ Ctx.Diag->Report(SymCtx.FA->Loc, diag::err_header_symbol_flags_mismatch)
+ << getAnnotatedName(R, SymCtx) << R->isThreadLocalValue();
+ });
+ return false;
+ }
+
+ if (DR->isWeakDefined() && !R->isWeakDefined()) {
+ Ctx.emitDiag([&]() {
+ Ctx.Diag->Report(SymCtx.FA->Loc, diag::err_dylib_symbol_flags_mismatch)
+ << getAnnotatedName(DR, SymCtx) << R->isWeakDefined();
+ });
+ return false;
+ }
+ if (!DR->isWeakDefined() && R->isWeakDefined()) {
+ Ctx.emitDiag([&]() {
+ Ctx.Diag->Report(SymCtx.FA->Loc, diag::err_header_symbol_flags_mismatch)
+ << getAnnotatedName(R, SymCtx) << R->isWeakDefined();
+ });
+ return false;
+ }
+
+ return true;
+}
+
+DylibVerifier::Result DylibVerifier::verifyImpl(Record *R,
+ SymbolContext &SymCtx) {
+ R->setVerify();
+ if (!canVerify()) {
+ // Accumulate symbols when not in verifying against dylib.
+ if (R->isExported() && !SymCtx.FA->Avail.isUnavailable() &&
+ !SymCtx.FA->Avail.isObsoleted()) {
+ addSymbol(R, SymCtx);
+ }
+ return Ctx.FrontendState;
+ }
+
+ if (shouldIgnoreReexport(R, SymCtx)) {
+ updateState(Result::Ignore);
+ return Ctx.FrontendState;
+ }
+
+ Record *DR =
+ findRecordFromSlice(Ctx.DylibSlice, SymCtx.SymbolName, SymCtx.Kind);
+ if (DR)
+ DR->setVerify();
+
+ if (shouldIgnoreObsolete(R, SymCtx, DR)) {
+ updateState(Result::Ignore);
+ return Ctx.FrontendState;
+ }
+
+ // Unavailable declarations don't need matching symbols.
+ if (SymCtx.FA->Avail.isUnavailable() && (!DR || DR->isInternal())) {
+ updateState(Result::Valid);
+ return Ctx.FrontendState;
+ }
+
+ Result VisibilityCheck = compareVisibility(R, SymCtx, DR);
+ if (VisibilityCheck != Result::Valid) {
+ updateState(VisibilityCheck);
+ return Ctx.FrontendState;
+ }
+
+ // All missing symbol cases to diagnose have been handled now.
+ if (!DR) {
+ updateState(Result::Ignore);
+ return Ctx.FrontendState;
+ }
+
+ // Check for mismatching ObjC interfaces.
+ if (SymCtx.ObjCIFKind != ObjCIFSymbolKind::None) {
+ if (!compareObjCInterfaceSymbols(
+ R, SymCtx, Ctx.DylibSlice->findObjCInterface(DR->getName()))) {
+ updateState(Result::Invalid);
+ return Ctx.FrontendState;
+ }
+ }
+
+ Result AvailabilityCheck = compareAvailability(R, SymCtx, DR);
+ if (AvailabilityCheck != Result::Valid) {
+ updateState(AvailabilityCheck);
+ return Ctx.FrontendState;
+ }
+
+ if (!compareSymbolFlags(R, SymCtx, DR)) {
+ updateState(Result::Invalid);
+ return Ctx.FrontendState;
+ }
+
+ addSymbol(R, SymCtx);
+ updateState(Result::Valid);
+ return Ctx.FrontendState;
+}
+
+bool DylibVerifier::canVerify() {
+ return Ctx.FrontendState != Result::NoVerify;
+}
+
+void DylibVerifier::assignSlice(const Target &T) {
+ assert(T == Ctx.Target && "Active targets should match.");
+ if (Dylib.empty())
+ return;
+
+ // Note: there are no reexport slices with binaries, as opposed to TBD files,
+ // so it can be assumed that the target match is the active top-level library.
+ auto It = find_if(
+ Dylib, [&T](const auto &Slice) { return T == Slice->getTarget(); });
+
+ assert(It != Dylib.end() && "Target slice should always exist.");
+ Ctx.DylibSlice = It->get();
+}
+
+void DylibVerifier::setTarget(const Target &T) {
+ Ctx.Target = T;
+ Ctx.DiscoveredFirstError = false;
+ if (Dylib.empty()) {
+ updateState(Result::NoVerify);
+ return;
+ }
+ updateState(Result::Ignore);
+ assignSlice(T);
+}
+
+void DylibVerifier::setSourceManager(
+ IntrusiveRefCntPtr<SourceManager> SourceMgr) {
+ if (!Ctx.Diag)
+ return;
+ SourceManagers.push_back(std::move(SourceMgr));
+ Ctx.Diag->setSourceManager(SourceManagers.back().get());
+}
+
+DylibVerifier::Result DylibVerifier::verify(ObjCIVarRecord *R,
+ const FrontendAttrs *FA,
+ const StringRef SuperClass) {
+ if (R->isVerified())
+ return getState();
+
+ std::string FullName =
+ ObjCIVarRecord::createScopedName(SuperClass, R->getName());
+ SymbolContext SymCtx{FullName, EncodeKind::ObjectiveCInstanceVariable, FA};
+ return verifyImpl(R, SymCtx);
+}
+
+static ObjCIFSymbolKind assignObjCIFSymbolKind(const ObjCInterfaceRecord *R) {
+ ObjCIFSymbolKind Result = ObjCIFSymbolKind::None;
+ if (R->getLinkageForSymbol(ObjCIFSymbolKind::Class) != RecordLinkage::Unknown)
+ Result |= ObjCIFSymbolKind::Class;
+ if (R->getLinkageForSymbol(ObjCIFSymbolKind::MetaClass) !=
+ RecordLinkage::Unknown)
+ Result |= ObjCIFSymbolKind::MetaClass;
+ if (R->getLinkageForSymbol(ObjCIFSymbolKind::EHType) !=
+ RecordLinkage::Unknown)
+ Result |= ObjCIFSymbolKind::EHType;
+ return Result;
+}
+
+DylibVerifier::Result DylibVerifier::verify(ObjCInterfaceRecord *R,
+ const FrontendAttrs *FA) {
+ if (R->isVerified())
+ return getState();
+ SymbolContext SymCtx;
+ SymCtx.SymbolName = R->getName();
+ SymCtx.ObjCIFKind = assignObjCIFSymbolKind(R);
+
+ SymCtx.Kind = R->hasExceptionAttribute() ? EncodeKind::ObjectiveCClassEHType
+ : EncodeKind::ObjectiveCClass;
+ SymCtx.FA = FA;
+
+ return verifyImpl(R, SymCtx);
+}
+
+DylibVerifier::Result DylibVerifier::verify(GlobalRecord *R,
+ const FrontendAttrs *FA) {
+ if (R->isVerified())
+ return getState();
+
+ // Global classifications could be obfusciated with `asm`.
+ SimpleSymbol Sym = parseSymbol(R->getName());
+ SymbolContext SymCtx;
+ SymCtx.SymbolName = Sym.Name;
+ SymCtx.Kind = Sym.Kind;
+ SymCtx.FA = FA;
+ SymCtx.Inlined = R->isInlined();
+ return verifyImpl(R, SymCtx);
+}
+
+void DylibVerifier::VerifierContext::emitDiag(llvm::function_ref<void()> Report,
+ RecordLoc *Loc) {
+ if (!DiscoveredFirstError) {
+ Diag->Report(diag::warn_target)
+ << (PrintArch ? getArchitectureName(Target.Arch)
+ : getTargetTripleName(Target));
+ DiscoveredFirstError = true;
+ }
+ if (Loc && Loc->isValid())
+ llvm::errs() << Loc->File << ":" << Loc->Line << ":" << 0 << ": ";
+
+ Report();
+}
+
+// The existence of weak-defined RTTI can not always be inferred from the
+// header files because they can be generated as part of an implementation
+// file.
+// InstallAPI doesn't warn about weak-defined RTTI, because this doesn't affect
+// static linking and so can be ignored for text-api files.
+static bool shouldIgnoreCpp(StringRef Name, bool IsWeakDef) {
+ return (IsWeakDef &&
+ (Name.starts_with("__ZTI") || Name.starts_with("__ZTS")));
+}
+void DylibVerifier::visitSymbolInDylib(const Record &R, SymbolContext &SymCtx) {
+ // Undefined symbols should not be in InstallAPI generated text-api files.
+ if (R.isUndefined()) {
+ updateState(Result::Valid);
+ return;
+ }
+
+ // Internal symbols should not be in InstallAPI generated text-api files.
+ if (R.isInternal()) {
+ updateState(Result::Valid);
+ return;
+ }
+
+ // Allow zippered symbols with potentially mismatching availability
+ // between macOS and macCatalyst in the final text-api file.
+ const StringRef SymbolName(SymCtx.SymbolName);
+ if (const Symbol *Sym = Exports->findSymbol(SymCtx.Kind, SymCtx.SymbolName,
+ SymCtx.ObjCIFKind)) {
+ if (Sym->hasArchitecture(Ctx.Target.Arch)) {
+ updateState(Result::Ignore);
+ return;
+ }
+ }
+
+ const bool IsLinkerSymbol = SymbolName.starts_with("$ld$");
+
+ if (R.isVerified()) {
+ // Check for unavailable symbols.
+ // This should only occur in the zippered case where we ignored
+ // availability until all headers have been parsed.
+ auto It = DeferredZipperedSymbols.find(SymCtx.SymbolName);
+ if (It == DeferredZipperedSymbols.end()) {
+ updateState(Result::Valid);
+ return;
+ }
+
+ ZipperedDeclSources Locs;
+ for (const ZipperedDeclSource &ZSource : It->second) {
+ if (ZSource.FA->Avail.isObsoleted()) {
+ updateState(Result::Ignore);
+ return;
+ }
+ if (ZSource.T.Arch != Ctx.Target.Arch)
+ continue;
+ Locs.emplace_back(ZSource);
+ }
+ assert(Locs.size() == 2 && "Expected two decls for zippered symbol");
+
+ // Print violating declarations per platform.
+ for (const ZipperedDeclSource &ZSource : Locs) {
+ unsigned DiagID = 0;
+ if (Mode == VerificationMode::Pedantic || IsLinkerSymbol) {
+ updateState(Result::Invalid);
+ DiagID = diag::err_header_availability_mismatch;
+ } else if (Mode == VerificationMode::ErrorsAndWarnings) {
+ updateState(Result::Ignore);
+ DiagID = diag::warn_header_availability_mismatch;
+ } else {
+ updateState(Result::Ignore);
+ return;
+ }
+ // Bypass emitDiag banner and print the target everytime.
+ Ctx.Diag->setSourceManager(ZSource.SrcMgr);
+ Ctx.Diag->Report(diag::warn_target) << getTargetTripleName(ZSource.T);
+ Ctx.Diag->Report(ZSource.FA->Loc, DiagID)
+ << getAnnotatedName(&R, SymCtx) << ZSource.FA->Avail.isUnavailable()
+ << ZSource.FA->Avail.isUnavailable();
+ }
+ return;
+ }
+
+ if (shouldIgnoreCpp(SymbolName, R.isWeakDefined())) {
+ updateState(Result::Valid);
+ return;
+ }
+
+ if (Aliases.count({SymbolName.str(), SymCtx.Kind})) {
+ updateState(Result::Valid);
+ return;
+ }
+
+ // All checks at this point classify as some kind of violation.
+ // The different verification modes dictate whether they are reported to the
+ // user.
+ if (IsLinkerSymbol || (Mode > VerificationMode::ErrorsOnly))
+ accumulateSrcLocForDylibSymbols();
+ RecordLoc Loc = DWARFCtx->SourceLocs.lookup(SymCtx.SymbolName);
+
+ // Regardless of verification mode, error out on mismatched special linker
+ // symbols.
+ if (IsLinkerSymbol) {
+ Ctx.emitDiag(
+ [&]() {
+ Ctx.Diag->Report(diag::err_header_symbol_missing)
+ << getAnnotatedName(&R, SymCtx, Loc.isValid());
+ },
+ &Loc);
+ updateState(Result::Invalid);
+ return;
+ }
+
+ // Missing declarations for exported symbols are hard errors on Pedantic mode.
+ if (Mode == VerificationMode::Pedantic) {
+ Ctx.emitDiag(
+ [&]() {
+ Ctx.Diag->Report(diag::err_header_symbol_missing)
+ << getAnnotatedName(&R, SymCtx, Loc.isValid());
+ },
+ &Loc);
+ updateState(Result::Invalid);
+ return;
+ }
+
+ // Missing declarations for exported symbols are warnings on ErrorsAndWarnings
+ // mode.
+ if (Mode == VerificationMode::ErrorsAndWarnings) {
+ Ctx.emitDiag(
+ [&]() {
+ Ctx.Diag->Report(diag::warn_header_symbol_missing)
+ << getAnnotatedName(&R, SymCtx, Loc.isValid());
+ },
+ &Loc);
+ updateState(Result::Ignore);
+ return;
+ }
+
+ // Missing declarations are dropped for ErrorsOnly mode. It is the last
+ // remaining mode.
+ updateState(Result::Ignore);
+ return;
+}
+
+void DylibVerifier::visitGlobal(const GlobalRecord &R) {
+ SymbolContext SymCtx;
+ SimpleSymbol Sym = parseSymbol(R.getName());
+ SymCtx.SymbolName = Sym.Name;
+ SymCtx.Kind = Sym.Kind;
+ visitSymbolInDylib(R, SymCtx);
+}
+
+void DylibVerifier::visitObjCIVar(const ObjCIVarRecord &R,
+ const StringRef Super) {
+ SymbolContext SymCtx;
+ SymCtx.SymbolName = ObjCIVarRecord::createScopedName(Super, R.getName());
+ SymCtx.Kind = EncodeKind::ObjectiveCInstanceVariable;
+ visitSymbolInDylib(R, SymCtx);
+}
+
+void DylibVerifier::accumulateSrcLocForDylibSymbols() {
+ if (DSYMPath.empty())
+ return;
+
+ assert(DWARFCtx != nullptr && "Expected an initialized DWARFContext");
+ if (DWARFCtx->ParsedDSYM)
+ return;
+ DWARFCtx->ParsedDSYM = true;
+ DWARFCtx->SourceLocs =
+ DylibReader::accumulateSourceLocFromDSYM(DSYMPath, Ctx.Target);
+}
+
+void DylibVerifier::visitObjCInterface(const ObjCInterfaceRecord &R) {
+ SymbolContext SymCtx;
+ SymCtx.SymbolName = R.getName();
+ SymCtx.ObjCIFKind = assignObjCIFSymbolKind(&R);
+ if (SymCtx.ObjCIFKind > ObjCIFSymbolKind::EHType) {
+ if (R.hasExceptionAttribute()) {
+ SymCtx.Kind = EncodeKind::ObjectiveCClassEHType;
+ visitSymbolInDylib(R, SymCtx);
+ }
+ SymCtx.Kind = EncodeKind::ObjectiveCClass;
+ visitSymbolInDylib(R, SymCtx);
+ } else {
+ SymCtx.Kind = R.hasExceptionAttribute() ? EncodeKind::ObjectiveCClassEHType
+ : EncodeKind::ObjectiveCClass;
+ visitSymbolInDylib(R, SymCtx);
+ }
+
+ for (const ObjCIVarRecord *IV : R.getObjCIVars())
+ visitObjCIVar(*IV, R.getName());
+}
+
+void DylibVerifier::visitObjCCategory(const ObjCCategoryRecord &R) {
+ for (const ObjCIVarRecord *IV : R.getObjCIVars())
+ visitObjCIVar(*IV, R.getSuperClassName());
+}
+
+DylibVerifier::Result DylibVerifier::verifyRemainingSymbols() {
+ if (getState() == Result::NoVerify)
+ return Result::NoVerify;
+ assert(!Dylib.empty() && "No binary to verify against");
+
+ DWARFContext DWARFInfo;
+ DWARFCtx = &DWARFInfo;
+ Ctx.Target = Target(Architecture::AK_unknown, PlatformType::PLATFORM_UNKNOWN);
+ for (std::shared_ptr<RecordsSlice> Slice : Dylib) {
+ if (Ctx.Target.Arch == Slice->getTarget().Arch)
+ continue;
+ Ctx.DiscoveredFirstError = false;
+ Ctx.PrintArch = true;
+ Ctx.Target = Slice->getTarget();
+ Ctx.DylibSlice = Slice.get();
+ Slice->visit(*this);
+ }
+ return getState();
+}
+
+bool DylibVerifier::verifyBinaryAttrs(const ArrayRef<Target> ProvidedTargets,
+ const BinaryAttrs &ProvidedBA,
+ const LibAttrs &ProvidedReexports,
+ const LibAttrs &ProvidedClients,
+ const LibAttrs &ProvidedRPaths,
+ const FileType &FT) {
+ assert(!Dylib.empty() && "Need dylib to verify.");
+
+ // Pickup any load commands that can differ per slice to compare.
+ TargetList DylibTargets;
+ LibAttrs DylibReexports;
+ LibAttrs DylibClients;
+ LibAttrs DylibRPaths;
+ for (const std::shared_ptr<RecordsSlice> &RS : Dylib) {
+ DylibTargets.push_back(RS->getTarget());
+ const BinaryAttrs &BinInfo = RS->getBinaryAttrs();
+ for (const StringRef LibName : BinInfo.RexportedLibraries)
+ DylibReexports[LibName].set(DylibTargets.back().Arch);
+ for (const StringRef LibName : BinInfo.AllowableClients)
+ DylibClients[LibName].set(DylibTargets.back().Arch);
+ // Compare attributes that are only representable in >= TBD_V5.
+ if (FT >= FileType::TBD_V5)
+ for (const StringRef Name : BinInfo.RPaths)
+ DylibRPaths[Name].set(DylibTargets.back().Arch);
+ }
+
+ // Check targets first.
+ ArchitectureSet ProvidedArchs = mapToArchitectureSet(ProvidedTargets);
+ ArchitectureSet DylibArchs = mapToArchitectureSet(DylibTargets);
+ if (ProvidedArchs != DylibArchs) {
+ Ctx.Diag->Report(diag::err_architecture_mismatch)
+ << ProvidedArchs << DylibArchs;
+ return false;
+ }
+ auto ProvidedPlatforms = mapToPlatformVersionSet(ProvidedTargets);
+ auto DylibPlatforms = mapToPlatformVersionSet(DylibTargets);
+ if (ProvidedPlatforms != DylibPlatforms) {
+ const bool DiffMinOS =
+ mapToPlatformSet(ProvidedTargets) == mapToPlatformSet(DylibTargets);
+ if (DiffMinOS)
+ Ctx.Diag->Report(diag::warn_platform_mismatch)
+ << ProvidedPlatforms << DylibPlatforms;
+ else {
+ Ctx.Diag->Report(diag::err_platform_mismatch)
+ << ProvidedPlatforms << DylibPlatforms;
+ return false;
+ }
+ }
+
+ // Because InstallAPI requires certain attributes to match across architecture
+ // slices, take the first one to compare those with.
+ const BinaryAttrs &DylibBA = (*Dylib.begin())->getBinaryAttrs();
+
+ if (ProvidedBA.InstallName != DylibBA.InstallName) {
+ Ctx.Diag->Report(diag::err_install_name_mismatch)
+ << ProvidedBA.InstallName << DylibBA.InstallName;
+ return false;
+ }
+
+ if (ProvidedBA.CurrentVersion != DylibBA.CurrentVersion) {
+ Ctx.Diag->Report(diag::err_current_version_mismatch)
+ << ProvidedBA.CurrentVersion << DylibBA.CurrentVersion;
+ return false;
+ }
+
+ if (ProvidedBA.CompatVersion != DylibBA.CompatVersion) {
+ Ctx.Diag->Report(diag::err_compatibility_version_mismatch)
+ << ProvidedBA.CompatVersion << DylibBA.CompatVersion;
+ return false;
+ }
+
+ if (ProvidedBA.AppExtensionSafe != DylibBA.AppExtensionSafe) {
+ Ctx.Diag->Report(diag::err_appextension_safe_mismatch)
+ << (ProvidedBA.AppExtensionSafe ? "true" : "false")
+ << (DylibBA.AppExtensionSafe ? "true" : "false");
+ return false;
+ }
+
+ if (!DylibBA.TwoLevelNamespace) {
+ Ctx.Diag->Report(diag::err_no_twolevel_namespace);
+ return false;
+ }
+
+ if (ProvidedBA.OSLibNotForSharedCache != DylibBA.OSLibNotForSharedCache) {
+ Ctx.Diag->Report(diag::err_shared_cache_eligiblity_mismatch)
+ << (ProvidedBA.OSLibNotForSharedCache ? "true" : "false")
+ << (DylibBA.OSLibNotForSharedCache ? "true" : "false");
+ return false;
+ }
+
+ if (ProvidedBA.ParentUmbrella.empty() && !DylibBA.ParentUmbrella.empty()) {
+ Ctx.Diag->Report(diag::err_parent_umbrella_missing)
+ << "installAPI option" << DylibBA.ParentUmbrella;
+ return false;
+ }
+
+ if (!ProvidedBA.ParentUmbrella.empty() && DylibBA.ParentUmbrella.empty()) {
+ Ctx.Diag->Report(diag::err_parent_umbrella_missing)
+ << "binary file" << ProvidedBA.ParentUmbrella;
+ return false;
+ }
+
+ if ((!ProvidedBA.ParentUmbrella.empty()) &&
+ (ProvidedBA.ParentUmbrella != DylibBA.ParentUmbrella)) {
+ Ctx.Diag->Report(diag::err_parent_umbrella_mismatch)
+ << ProvidedBA.ParentUmbrella << DylibBA.ParentUmbrella;
+ return false;
+ }
+
+ auto CompareLibraries = [&](const LibAttrs &Provided, const LibAttrs &Dylib,
+ unsigned DiagID_missing, unsigned DiagID_mismatch,
+ bool Fatal = true) {
+ if (Provided == Dylib)
+ return true;
+
+ for (const llvm::StringMapEntry<ArchitectureSet> &PAttr : Provided) {
+ const auto DAttrIt = Dylib.find(PAttr.getKey());
+ if (DAttrIt == Dylib.end()) {
+ Ctx.Diag->Report(DiagID_missing) << "binary file" << PAttr;
+ if (Fatal)
+ return false;
+ }
+
+ if (PAttr.getValue() != DAttrIt->getValue()) {
+ Ctx.Diag->Report(DiagID_mismatch) << PAttr << *DAttrIt;
+ if (Fatal)
+ return false;
+ }
+ }
+
+ for (const llvm::StringMapEntry<ArchitectureSet> &DAttr : Dylib) {
+ const auto PAttrIt = Provided.find(DAttr.getKey());
+ if (PAttrIt == Provided.end()) {
+ Ctx.Diag->Report(DiagID_missing) << "installAPI option" << DAttr;
+ if (!Fatal)
+ continue;
+ return false;
+ }
+
+ if (PAttrIt->getValue() != DAttr.getValue()) {
+ if (Fatal)
+ llvm_unreachable("this case was already covered above.");
+ }
+ }
+ return true;
+ };
+
+ if (!CompareLibraries(ProvidedReexports, DylibReexports,
+ diag::err_reexported_libraries_missing,
+ diag::err_reexported_libraries_mismatch))
+ return false;
+
+ if (!CompareLibraries(ProvidedClients, DylibClients,
+ diag::err_allowable_clients_missing,
+ diag::err_allowable_clients_mismatch))
+ return false;
+
+ if (FT >= FileType::TBD_V5) {
+ // Ignore rpath differences if building an asan variant, since the
+ // compiler injects additional paths.
+ // FIXME: Building with sanitizers does not always change the install
+ // name, so this is not a foolproof solution.
+ if (!ProvidedBA.InstallName.ends_with("_asan")) {
+ if (!CompareLibraries(ProvidedRPaths, DylibRPaths,
+ diag::warn_rpaths_missing,
+ diag::warn_rpaths_mismatch,
+ /*Fatal=*/false))
+ return true;
+ }
+ }
+
+ return true;
+}
+
+std::unique_ptr<SymbolSet> DylibVerifier::takeExports() {
+ for (const auto &[Alias, Base] : Aliases) {
+ TargetList Targets;
+ SymbolFlags Flags = SymbolFlags::None;
+ if (const Symbol *Sym = Exports->findSymbol(Base.second, Base.first)) {
+ Flags = Sym->getFlags();
+ Targets = {Sym->targets().begin(), Sym->targets().end()};
+ }
+
+ Record R(Alias.first, RecordLinkage::Exported, Flags);
+ SymbolContext SymCtx;
+ SymCtx.SymbolName = Alias.first;
+ SymCtx.Kind = Alias.second;
+ addSymbol(&R, SymCtx, std::move(Targets));
+ }
+
+ return std::move(Exports);
+}
+
+} // namespace installapi
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/InstallAPI/FileList.cpp b/contrib/llvm-project/clang/lib/InstallAPI/FileList.cpp
new file mode 100644
index 000000000000..65610903840a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/InstallAPI/FileList.cpp
@@ -0,0 +1,192 @@
+//===- FileList.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/InstallAPI/FileList.h"
+#include "clang/Basic/DiagnosticFrontend.h"
+#include "clang/InstallAPI/FileList.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/TextAPI/TextAPIError.h"
+#include <optional>
+
+// clang-format off
+/*
+InstallAPI JSON Input Format specification.
+
+{
+ "headers" : [ # Required: Key must exist.
+ { # Optional: May contain 0 or more header inputs.
+ "path" : "/usr/include/mach-o/dlfn.h", # Required: Path should point to destination
+ # location where applicable.
+ "type" : "public", # Required: Maps to HeaderType for header.
+ "language": "c++" # Optional: Language mode for header.
+ }
+ ],
+ "version" : "3" # Required: Version 3 supports language mode
+ & project header input.
+}
+*/
+// clang-format on
+
+using namespace llvm;
+using namespace llvm::json;
+using namespace llvm::MachO;
+using namespace clang::installapi;
+
+namespace {
+class Implementation {
+private:
+ Expected<StringRef> parseString(const Object *Obj, StringRef Key,
+ StringRef Error);
+ Expected<StringRef> parsePath(const Object *Obj);
+ Expected<HeaderType> parseType(const Object *Obj);
+ std::optional<clang::Language> parseLanguage(const Object *Obj);
+ Error parseHeaders(Array &Headers);
+
+public:
+ std::unique_ptr<MemoryBuffer> InputBuffer;
+ clang::FileManager *FM;
+ unsigned Version;
+ HeaderSeq HeaderList;
+
+ Error parse(StringRef Input);
+};
+
+Expected<StringRef>
+Implementation::parseString(const Object *Obj, StringRef Key, StringRef Error) {
+ auto Str = Obj->getString(Key);
+ if (!Str)
+ return make_error<StringError>(Error, inconvertibleErrorCode());
+ return *Str;
+}
+
+Expected<HeaderType> Implementation::parseType(const Object *Obj) {
+ auto TypeStr =
+ parseString(Obj, "type", "required field 'type' not specified");
+ if (!TypeStr)
+ return TypeStr.takeError();
+
+ if (*TypeStr == "public")
+ return HeaderType::Public;
+ else if (*TypeStr == "private")
+ return HeaderType::Private;
+ else if (*TypeStr == "project" && Version >= 2)
+ return HeaderType::Project;
+
+ return make_error<TextAPIError>(TextAPIErrorCode::InvalidInputFormat,
+ "unsupported header type");
+}
+
+Expected<StringRef> Implementation::parsePath(const Object *Obj) {
+ auto Path = parseString(Obj, "path", "required field 'path' not specified");
+ if (!Path)
+ return Path.takeError();
+
+ return *Path;
+}
+
+std::optional<clang::Language>
+Implementation::parseLanguage(const Object *Obj) {
+ auto Language = Obj->getString("language");
+ if (!Language)
+ return std::nullopt;
+
+ return StringSwitch<clang::Language>(*Language)
+ .Case("c", clang::Language::C)
+ .Case("c++", clang::Language::CXX)
+ .Case("objective-c", clang::Language::ObjC)
+ .Case("objective-c++", clang::Language::ObjCXX)
+ .Default(clang::Language::Unknown);
+}
+
+Error Implementation::parseHeaders(Array &Headers) {
+ for (const auto &H : Headers) {
+ auto *Obj = H.getAsObject();
+ if (!Obj)
+ return make_error<StringError>("expect a JSON object",
+ inconvertibleErrorCode());
+ auto Type = parseType(Obj);
+ if (!Type)
+ return Type.takeError();
+ auto Path = parsePath(Obj);
+ if (!Path)
+ return Path.takeError();
+ auto Language = parseLanguage(Obj);
+
+ StringRef PathStr = *Path;
+ if (*Type == HeaderType::Project) {
+ HeaderList.emplace_back(
+ HeaderFile{PathStr, *Type, /*IncludeName=*/"", Language});
+ continue;
+ }
+
+ if (FM)
+ if (!FM->getOptionalFileRef(PathStr))
+ return createFileError(
+ PathStr, make_error_code(std::errc::no_such_file_or_directory));
+
+ auto IncludeName = createIncludeHeaderName(PathStr);
+ HeaderList.emplace_back(PathStr, *Type,
+ IncludeName.has_value() ? IncludeName.value() : "",
+ Language);
+ }
+
+ return Error::success();
+}
+
+Error Implementation::parse(StringRef Input) {
+ auto Val = json::parse(Input);
+ if (!Val)
+ return Val.takeError();
+
+ auto *Root = Val->getAsObject();
+ if (!Root)
+ return make_error<StringError>("not a JSON object",
+ inconvertibleErrorCode());
+
+ auto VersionStr = Root->getString("version");
+ if (!VersionStr)
+ return make_error<TextAPIError>(TextAPIErrorCode::InvalidInputFormat,
+ "required field 'version' not specified");
+ if (VersionStr->getAsInteger(10, Version))
+ return make_error<TextAPIError>(TextAPIErrorCode::InvalidInputFormat,
+ "invalid version number");
+
+ if (Version < 1 || Version > 3)
+ return make_error<TextAPIError>(TextAPIErrorCode::InvalidInputFormat,
+ "unsupported version");
+
+ // Not specifying any header files should be atypical, but valid.
+ auto Headers = Root->getArray("headers");
+ if (!Headers)
+ return Error::success();
+
+ Error Err = parseHeaders(*Headers);
+ if (Err)
+ return Err;
+
+ return Error::success();
+}
+} // namespace
+
+llvm::Error
+FileListReader::loadHeaders(std::unique_ptr<MemoryBuffer> InputBuffer,
+ HeaderSeq &Destination, clang::FileManager *FM) {
+ Implementation Impl;
+ Impl.InputBuffer = std::move(InputBuffer);
+ Impl.FM = FM;
+
+ if (llvm::Error Err = Impl.parse(Impl.InputBuffer->getBuffer()))
+ return Err;
+
+ Destination.reserve(Destination.size() + Impl.HeaderList.size());
+ llvm::move(Impl.HeaderList, std::back_inserter(Destination));
+
+ return Error::success();
+}
diff --git a/contrib/llvm-project/clang/lib/InstallAPI/Frontend.cpp b/contrib/llvm-project/clang/lib/InstallAPI/Frontend.cpp
new file mode 100644
index 000000000000..04d06f46d265
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/InstallAPI/Frontend.cpp
@@ -0,0 +1,220 @@
+//===- Frontend.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/InstallAPI/Frontend.h"
+#include "clang/AST/Availability.h"
+#include "clang/InstallAPI/FrontendRecords.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+
+using namespace llvm;
+using namespace llvm::MachO;
+
+namespace clang::installapi {
+std::pair<GlobalRecord *, FrontendAttrs *> FrontendRecordsSlice::addGlobal(
+ StringRef Name, RecordLinkage Linkage, GlobalRecord::Kind GV,
+ const clang::AvailabilityInfo Avail, const Decl *D, const HeaderType Access,
+ SymbolFlags Flags, bool Inlined) {
+
+ GlobalRecord *GR =
+ llvm::MachO::RecordsSlice::addGlobal(Name, Linkage, GV, Flags, Inlined);
+ auto Result = FrontendRecords.insert(
+ {GR, FrontendAttrs{Avail, D, D->getLocation(), Access}});
+ return {GR, &(Result.first->second)};
+}
+
+std::pair<ObjCInterfaceRecord *, FrontendAttrs *>
+FrontendRecordsSlice::addObjCInterface(StringRef Name, RecordLinkage Linkage,
+ const clang::AvailabilityInfo Avail,
+ const Decl *D, HeaderType Access,
+ bool IsEHType) {
+ ObjCIFSymbolKind SymType =
+ ObjCIFSymbolKind::Class | ObjCIFSymbolKind::MetaClass;
+ if (IsEHType)
+ SymType |= ObjCIFSymbolKind::EHType;
+
+ ObjCInterfaceRecord *ObjCR =
+ llvm::MachO::RecordsSlice::addObjCInterface(Name, Linkage, SymType);
+ auto Result = FrontendRecords.insert(
+ {ObjCR, FrontendAttrs{Avail, D, D->getLocation(), Access}});
+ return {ObjCR, &(Result.first->second)};
+}
+
+std::pair<ObjCCategoryRecord *, FrontendAttrs *>
+FrontendRecordsSlice::addObjCCategory(StringRef ClassToExtend,
+ StringRef CategoryName,
+ const clang::AvailabilityInfo Avail,
+ const Decl *D, HeaderType Access) {
+ ObjCCategoryRecord *ObjCR =
+ llvm::MachO::RecordsSlice::addObjCCategory(ClassToExtend, CategoryName);
+ auto Result = FrontendRecords.insert(
+ {ObjCR, FrontendAttrs{Avail, D, D->getLocation(), Access}});
+ return {ObjCR, &(Result.first->second)};
+}
+
+std::pair<ObjCIVarRecord *, FrontendAttrs *> FrontendRecordsSlice::addObjCIVar(
+ ObjCContainerRecord *Container, StringRef IvarName, RecordLinkage Linkage,
+ const clang::AvailabilityInfo Avail, const Decl *D, HeaderType Access,
+ const clang::ObjCIvarDecl::AccessControl AC) {
+ // If the decl otherwise would have been exported, check their access control.
+ // Ivar's linkage is also determined by this.
+ if ((Linkage == RecordLinkage::Exported) &&
+ ((AC == ObjCIvarDecl::Private) || (AC == ObjCIvarDecl::Package)))
+ Linkage = RecordLinkage::Internal;
+ ObjCIVarRecord *ObjCR =
+ llvm::MachO::RecordsSlice::addObjCIVar(Container, IvarName, Linkage);
+ auto Result = FrontendRecords.insert(
+ {ObjCR, FrontendAttrs{Avail, D, D->getLocation(), Access}});
+
+ return {ObjCR, &(Result.first->second)};
+}
+
+std::optional<HeaderType>
+InstallAPIContext::findAndRecordFile(const FileEntry *FE,
+ const Preprocessor &PP) {
+ if (!FE)
+ return std::nullopt;
+
+ // Check if header has been looked up already and whether it is something
+ // installapi should use.
+ auto It = KnownFiles.find(FE);
+ if (It != KnownFiles.end()) {
+ if (It->second != HeaderType::Unknown)
+ return It->second;
+ else
+ return std::nullopt;
+ }
+
+ // If file was not found, search by how the header was
+ // included. This is primarily to resolve headers found
+ // in a different location than what passed directly as input.
+ StringRef IncludeName = PP.getHeaderSearchInfo().getIncludeNameForHeader(FE);
+ auto BackupIt = KnownIncludes.find(IncludeName.str());
+ if (BackupIt != KnownIncludes.end()) {
+ KnownFiles[FE] = BackupIt->second;
+ return BackupIt->second;
+ }
+
+ // Record that the file was found to avoid future string searches for the
+ // same file.
+ KnownFiles.insert({FE, HeaderType::Unknown});
+ return std::nullopt;
+}
+
+void InstallAPIContext::addKnownHeader(const HeaderFile &H) {
+ auto FE = FM->getFile(H.getPath());
+ if (!FE)
+ return; // File does not exist.
+ KnownFiles[*FE] = H.getType();
+
+ if (!H.useIncludeName())
+ return;
+
+ KnownIncludes[H.getIncludeName()] = H.getType();
+}
+
+static StringRef getFileExtension(clang::Language Lang) {
+ switch (Lang) {
+ default:
+ llvm_unreachable("Unexpected language option.");
+ case clang::Language::C:
+ return ".c";
+ case clang::Language::CXX:
+ return ".cpp";
+ case clang::Language::ObjC:
+ return ".m";
+ case clang::Language::ObjCXX:
+ return ".mm";
+ }
+}
+
+std::unique_ptr<MemoryBuffer> createInputBuffer(InstallAPIContext &Ctx) {
+ assert(Ctx.Type != HeaderType::Unknown &&
+ "unexpected access level for parsing");
+ SmallString<4096> Contents;
+ raw_svector_ostream OS(Contents);
+ for (const HeaderFile &H : Ctx.InputHeaders) {
+ if (H.isExcluded())
+ continue;
+ if (H.getType() != Ctx.Type)
+ continue;
+ if (Ctx.LangMode == Language::C || Ctx.LangMode == Language::CXX)
+ OS << "#include ";
+ else
+ OS << "#import ";
+ if (H.useIncludeName())
+ OS << "<" << H.getIncludeName() << ">\n";
+ else
+ OS << "\"" << H.getPath() << "\"\n";
+
+ Ctx.addKnownHeader(H);
+ }
+ if (Contents.empty())
+ return nullptr;
+
+ SmallString<64> BufferName(
+ {"installapi-includes-", Ctx.Slice->getTriple().str(), "-",
+ getName(Ctx.Type), getFileExtension(Ctx.LangMode)});
+ return llvm::MemoryBuffer::getMemBufferCopy(Contents, BufferName);
+}
+
+std::string findLibrary(StringRef InstallName, FileManager &FM,
+ ArrayRef<std::string> FrameworkSearchPaths,
+ ArrayRef<std::string> LibrarySearchPaths,
+ ArrayRef<std::string> SearchPaths) {
+ auto getLibrary =
+ [&](const StringRef FullPath) -> std::optional<std::string> {
+ // Prefer TextAPI files when possible.
+ SmallString<PATH_MAX> TextAPIFilePath = FullPath;
+ replace_extension(TextAPIFilePath, ".tbd");
+
+ if (FM.getOptionalFileRef(TextAPIFilePath))
+ return std::string(TextAPIFilePath);
+
+ if (FM.getOptionalFileRef(FullPath))
+ return std::string(FullPath);
+
+ return std::nullopt;
+ };
+
+ const StringRef Filename = sys::path::filename(InstallName);
+ const bool IsFramework = sys::path::parent_path(InstallName)
+ .ends_with((Filename + ".framework").str());
+ if (IsFramework) {
+ for (const StringRef Path : FrameworkSearchPaths) {
+ SmallString<PATH_MAX> FullPath(Path);
+ sys::path::append(FullPath, Filename + StringRef(".framework"), Filename);
+ if (auto LibOrNull = getLibrary(FullPath))
+ return *LibOrNull;
+ }
+ } else {
+ // Copy Apple's linker behavior: If this is a .dylib inside a framework, do
+ // not search -L paths.
+ bool IsEmbeddedDylib = (sys::path::extension(InstallName) == ".dylib") &&
+ InstallName.contains(".framework/");
+ if (!IsEmbeddedDylib) {
+ for (const StringRef Path : LibrarySearchPaths) {
+ SmallString<PATH_MAX> FullPath(Path);
+ sys::path::append(FullPath, Filename);
+ if (auto LibOrNull = getLibrary(FullPath))
+ return *LibOrNull;
+ }
+ }
+ }
+
+ for (const StringRef Path : SearchPaths) {
+ SmallString<PATH_MAX> FullPath(Path);
+ sys::path::append(FullPath, InstallName);
+ if (auto LibOrNull = getLibrary(FullPath))
+ return *LibOrNull;
+ }
+
+ return {};
+}
+
+} // namespace clang::installapi
diff --git a/contrib/llvm-project/clang/lib/InstallAPI/HeaderFile.cpp b/contrib/llvm-project/clang/lib/InstallAPI/HeaderFile.cpp
new file mode 100644
index 000000000000..0b7041ec8147
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/InstallAPI/HeaderFile.cpp
@@ -0,0 +1,88 @@
+//===- HeaderFile.cpp ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/InstallAPI/HeaderFile.h"
+#include "llvm/TextAPI/Utils.h"
+
+using namespace llvm;
+namespace clang::installapi {
+
+llvm::Regex HeaderFile::getFrameworkIncludeRule() {
+ return llvm::Regex("/(.+)\\.framework/(.+)?Headers/(.+)");
+}
+
+std::optional<std::string> createIncludeHeaderName(const StringRef FullPath) {
+ // Headers in usr(/local)*/include.
+ std::string Pattern = "/include/";
+ auto PathPrefix = FullPath.find(Pattern);
+ if (PathPrefix != StringRef::npos) {
+ PathPrefix += Pattern.size();
+ return FullPath.drop_front(PathPrefix).str();
+ }
+
+ // Framework Headers.
+ SmallVector<StringRef, 4> Matches;
+ HeaderFile::getFrameworkIncludeRule().match(FullPath, &Matches);
+ // Returned matches are always in stable order.
+ if (Matches.size() != 4)
+ return std::nullopt;
+
+ return Matches[1].drop_front(Matches[1].rfind('/') + 1).str() + "/" +
+ Matches[3].str();
+}
+
+bool isHeaderFile(StringRef Path) {
+ return StringSwitch<bool>(sys::path::extension(Path))
+ .Cases(".h", ".H", ".hh", ".hpp", ".hxx", true)
+ .Default(false);
+}
+
+llvm::Expected<PathSeq> enumerateFiles(FileManager &FM, StringRef Directory) {
+ PathSeq Files;
+ std::error_code EC;
+ auto &FS = FM.getVirtualFileSystem();
+ for (llvm::vfs::recursive_directory_iterator i(FS, Directory, EC), ie;
+ i != ie; i.increment(EC)) {
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Skip files that do not exist. This usually happens for broken symlinks.
+ if (FS.status(i->path()) == std::errc::no_such_file_or_directory)
+ continue;
+
+ StringRef Path = i->path();
+ if (isHeaderFile(Path))
+ Files.emplace_back(Path);
+ }
+
+ return Files;
+}
+
+HeaderGlob::HeaderGlob(StringRef GlobString, Regex &&Rule, HeaderType Type)
+ : GlobString(GlobString), Rule(std::move(Rule)), Type(Type) {}
+
+bool HeaderGlob::match(const HeaderFile &Header) {
+ if (Header.getType() != Type)
+ return false;
+
+ bool Match = Rule.match(Header.getPath());
+ if (Match)
+ FoundMatch = true;
+ return Match;
+}
+
+Expected<std::unique_ptr<HeaderGlob>> HeaderGlob::create(StringRef GlobString,
+ HeaderType Type) {
+ auto Rule = MachO::createRegexFromGlob(GlobString);
+ if (!Rule)
+ return Rule.takeError();
+
+ return std::make_unique<HeaderGlob>(GlobString, std::move(*Rule), Type);
+}
+
+} // namespace clang::installapi
diff --git a/contrib/llvm-project/clang/lib/InstallAPI/Library.cpp b/contrib/llvm-project/clang/lib/InstallAPI/Library.cpp
new file mode 100644
index 000000000000..bdfa3535273e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/InstallAPI/Library.cpp
@@ -0,0 +1,40 @@
+//===- Library.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/InstallAPI/Library.h"
+
+using namespace llvm;
+namespace clang::installapi {
+
+const Regex Rule("(.+)/(.+)\\.framework/");
+StringRef Library::getFrameworkNameFromInstallName(StringRef InstallName) {
+ assert(InstallName.contains(".framework") && "expected a framework");
+ SmallVector<StringRef, 3> Match;
+ Rule.match(InstallName, &Match);
+ if (Match.empty())
+ return "";
+ return Match.back();
+}
+
+StringRef Library::getName() const {
+ assert(!IsUnwrappedDylib && "expected a framework");
+ StringRef Path = BaseDirectory;
+
+ // Return the framework name extracted from path.
+ while (!Path.empty()) {
+ if (Path.ends_with(".framework"))
+ return sys::path::filename(Path);
+ Path = sys::path::parent_path(Path);
+ }
+
+ // Otherwise, return the name of the BaseDirectory.
+ Path = BaseDirectory;
+ return sys::path::filename(Path.rtrim("/"));
+}
+
+} // namespace clang::installapi
diff --git a/contrib/llvm-project/clang/lib/InstallAPI/Visitor.cpp b/contrib/llvm-project/clang/lib/InstallAPI/Visitor.cpp
new file mode 100644
index 000000000000..a73ea0b0d124
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/InstallAPI/Visitor.cpp
@@ -0,0 +1,728 @@
+//===- Visitor.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/InstallAPI/Visitor.h"
+#include "clang/AST/Availability.h"
+#include "clang/AST/ParentMapContext.h"
+#include "clang/AST/VTableBuilder.h"
+#include "clang/Basic/Linkage.h"
+#include "clang/InstallAPI/DylibVerifier.h"
+#include "clang/InstallAPI/FrontendRecords.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Mangler.h"
+
+using namespace llvm;
+using namespace llvm::MachO;
+
+namespace {
+enum class CXXLinkage {
+ ExternalLinkage,
+ LinkOnceODRLinkage,
+ WeakODRLinkage,
+ PrivateLinkage,
+};
+}
+
+namespace clang::installapi {
+
+// Exported NamedDecl needs to have external linkage and
+// default visibility from LinkageComputer.
+static bool isExported(const NamedDecl *D) {
+ auto LV = D->getLinkageAndVisibility();
+ return isExternallyVisible(LV.getLinkage()) &&
+ (LV.getVisibility() == DefaultVisibility);
+}
+
+static bool isInlined(const FunctionDecl *D) {
+ bool HasInlineAttribute = false;
+ bool NoCXXAttr =
+ (!D->getASTContext().getLangOpts().CPlusPlus &&
+ !D->getASTContext().getTargetInfo().getCXXABI().isMicrosoft() &&
+ !D->hasAttr<DLLExportAttr>());
+
+ // Check all redeclarations to find an inline attribute or keyword.
+ for (const auto *RD : D->redecls()) {
+ if (!RD->isInlined())
+ continue;
+ HasInlineAttribute = true;
+ if (!(NoCXXAttr || RD->hasAttr<GNUInlineAttr>()))
+ continue;
+ if (RD->doesThisDeclarationHaveABody() &&
+ RD->isInlineDefinitionExternallyVisible())
+ return false;
+ }
+
+ if (!HasInlineAttribute)
+ return false;
+
+ return true;
+}
+
+static SymbolFlags getFlags(bool WeakDef, bool ThreadLocal = false) {
+ SymbolFlags Result = SymbolFlags::None;
+ if (WeakDef)
+ Result |= SymbolFlags::WeakDefined;
+ if (ThreadLocal)
+ Result |= SymbolFlags::ThreadLocalValue;
+
+ return Result;
+}
+
+void InstallAPIVisitor::HandleTranslationUnit(ASTContext &ASTCtx) {
+ if (ASTCtx.getDiagnostics().hasErrorOccurred())
+ return;
+
+ auto *D = ASTCtx.getTranslationUnitDecl();
+ TraverseDecl(D);
+}
+
+std::string InstallAPIVisitor::getMangledName(const NamedDecl *D) const {
+ SmallString<256> Name;
+ if (MC->shouldMangleDeclName(D)) {
+ raw_svector_ostream NStream(Name);
+ MC->mangleName(D, NStream);
+ } else
+ Name += D->getNameAsString();
+
+ return getBackendMangledName(Name);
+}
+
+std::string InstallAPIVisitor::getBackendMangledName(Twine Name) const {
+ SmallString<256> FinalName;
+ Mangler::getNameWithPrefix(FinalName, Name, DataLayout(Layout));
+ return std::string(FinalName);
+}
+
+std::optional<HeaderType>
+InstallAPIVisitor::getAccessForDecl(const NamedDecl *D) const {
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid())
+ return std::nullopt;
+
+ // If the loc refers to a macro expansion, InstallAPI needs to first get the
+ // file location of the expansion.
+ auto FileLoc = SrcMgr.getFileLoc(Loc);
+ FileID ID = SrcMgr.getFileID(FileLoc);
+ if (ID.isInvalid())
+ return std::nullopt;
+
+ const FileEntry *FE = SrcMgr.getFileEntryForID(ID);
+ if (!FE)
+ return std::nullopt;
+
+ auto Header = Ctx.findAndRecordFile(FE, PP);
+ if (!Header.has_value())
+ return std::nullopt;
+
+ HeaderType Access = Header.value();
+ assert(Access != HeaderType::Unknown && "unexpected access level for global");
+ return Access;
+}
+
+/// Check if the interface itself or any of its super classes have an
+/// exception attribute. InstallAPI needs to export an additional symbol
+/// ("OBJC_EHTYPE_$CLASS_NAME") if any of the classes have the exception
+/// attribute.
+static bool hasObjCExceptionAttribute(const ObjCInterfaceDecl *D) {
+ for (; D != nullptr; D = D->getSuperClass())
+ if (D->hasAttr<ObjCExceptionAttr>())
+ return true;
+
+ return false;
+}
+void InstallAPIVisitor::recordObjCInstanceVariables(
+ const ASTContext &ASTCtx, ObjCContainerRecord *Record, StringRef SuperClass,
+ const llvm::iterator_range<
+ DeclContext::specific_decl_iterator<ObjCIvarDecl>>
+ Ivars) {
+ RecordLinkage Linkage = RecordLinkage::Exported;
+ const RecordLinkage ContainerLinkage = Record->getLinkage();
+ // If fragile, set to unknown.
+ if (ASTCtx.getLangOpts().ObjCRuntime.isFragile())
+ Linkage = RecordLinkage::Unknown;
+ // Linkage should be inherited from container.
+ else if (ContainerLinkage != RecordLinkage::Unknown)
+ Linkage = ContainerLinkage;
+ for (const auto *IV : Ivars) {
+ auto Access = getAccessForDecl(IV);
+ if (!Access)
+ continue;
+ StringRef Name = IV->getName();
+ const AvailabilityInfo Avail = AvailabilityInfo::createFromDecl(IV);
+ auto AC = IV->getCanonicalAccessControl();
+ auto [ObjCIVR, FA] =
+ Ctx.Slice->addObjCIVar(Record, Name, Linkage, Avail, IV, *Access, AC);
+ Ctx.Verifier->verify(ObjCIVR, FA, SuperClass);
+ }
+}
+
+bool InstallAPIVisitor::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
+ // Skip forward declaration for classes (@class)
+ if (!D->isThisDeclarationADefinition())
+ return true;
+
+ // Skip over declarations that access could not be collected for.
+ auto Access = getAccessForDecl(D);
+ if (!Access)
+ return true;
+
+ StringRef Name = D->getObjCRuntimeNameAsString();
+ const RecordLinkage Linkage =
+ isExported(D) ? RecordLinkage::Exported : RecordLinkage::Internal;
+ const AvailabilityInfo Avail = AvailabilityInfo::createFromDecl(D);
+ const bool IsEHType =
+ (!D->getASTContext().getLangOpts().ObjCRuntime.isFragile() &&
+ hasObjCExceptionAttribute(D));
+
+ auto [Class, FA] =
+ Ctx.Slice->addObjCInterface(Name, Linkage, Avail, D, *Access, IsEHType);
+ Ctx.Verifier->verify(Class, FA);
+
+ // Get base class.
+ StringRef SuperClassName;
+ if (const auto *SuperClass = D->getSuperClass())
+ SuperClassName = SuperClass->getObjCRuntimeNameAsString();
+
+ recordObjCInstanceVariables(D->getASTContext(), Class, Class->getName(),
+ D->ivars());
+ return true;
+}
+
+bool InstallAPIVisitor::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
+ StringRef CategoryName = D->getName();
+ // Skip over declarations that access could not be collected for.
+ auto Access = getAccessForDecl(D);
+ if (!Access)
+ return true;
+ const AvailabilityInfo Avail = AvailabilityInfo::createFromDecl(D);
+ const ObjCInterfaceDecl *InterfaceD = D->getClassInterface();
+ const StringRef InterfaceName = InterfaceD->getName();
+
+ ObjCCategoryRecord *CategoryRecord =
+ Ctx.Slice->addObjCCategory(InterfaceName, CategoryName, Avail, D, *Access)
+ .first;
+ recordObjCInstanceVariables(D->getASTContext(), CategoryRecord, InterfaceName,
+ D->ivars());
+ return true;
+}
+
+bool InstallAPIVisitor::VisitVarDecl(const VarDecl *D) {
+ // Skip function parameters.
+ if (isa<ParmVarDecl>(D))
+ return true;
+
+ // Skip variables in records. They are handled separately for C++.
+ if (D->getDeclContext()->isRecord())
+ return true;
+
+ // Skip anything inside functions or methods.
+ if (!D->isDefinedOutsideFunctionOrMethod())
+ return true;
+
+ // If this is a template but not specialization or instantiation, skip.
+ if (D->getASTContext().getTemplateOrSpecializationInfo(D) &&
+ D->getTemplateSpecializationKind() == TSK_Undeclared)
+ return true;
+
+ // Skip over declarations that access could not collected for.
+ auto Access = getAccessForDecl(D);
+ if (!Access)
+ return true;
+
+ const RecordLinkage Linkage =
+ isExported(D) ? RecordLinkage::Exported : RecordLinkage::Internal;
+ const bool WeakDef = D->hasAttr<WeakAttr>();
+ const bool ThreadLocal = D->getTLSKind() != VarDecl::TLS_None;
+ const AvailabilityInfo Avail = AvailabilityInfo::createFromDecl(D);
+ auto [GR, FA] = Ctx.Slice->addGlobal(getMangledName(D), Linkage,
+ GlobalRecord::Kind::Variable, Avail, D,
+ *Access, getFlags(WeakDef, ThreadLocal));
+ Ctx.Verifier->verify(GR, FA);
+ return true;
+}
+
+bool InstallAPIVisitor::VisitFunctionDecl(const FunctionDecl *D) {
+ if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(D)) {
+ // Skip member function in class templates.
+ if (M->getParent()->getDescribedClassTemplate() != nullptr)
+ return true;
+
+ // Skip methods in CXX RecordDecls.
+ for (const DynTypedNode &P : D->getASTContext().getParents(*M)) {
+ if (P.get<CXXRecordDecl>())
+ return true;
+ }
+
+ // Skip CXX ConstructorDecls and DestructorDecls.
+ if (isa<CXXConstructorDecl>(M) || isa<CXXDestructorDecl>(M))
+ return true;
+ }
+
+ // Skip templated functions.
+ switch (D->getTemplatedKind()) {
+ case FunctionDecl::TK_NonTemplate:
+ case FunctionDecl::TK_DependentNonTemplate:
+ break;
+ case FunctionDecl::TK_MemberSpecialization:
+ case FunctionDecl::TK_FunctionTemplateSpecialization:
+ if (auto *TempInfo = D->getTemplateSpecializationInfo()) {
+ if (!TempInfo->isExplicitInstantiationOrSpecialization())
+ return true;
+ }
+ break;
+ case FunctionDecl::TK_FunctionTemplate:
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization:
+ return true;
+ }
+
+ auto Access = getAccessForDecl(D);
+ if (!Access)
+ return true;
+ auto Name = getMangledName(D);
+ const AvailabilityInfo Avail = AvailabilityInfo::createFromDecl(D);
+ const bool ExplicitInstantiation = D->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDeclaration;
+ const bool WeakDef = ExplicitInstantiation || D->hasAttr<WeakAttr>();
+ const bool Inlined = isInlined(D);
+ const RecordLinkage Linkage = (Inlined || !isExported(D))
+ ? RecordLinkage::Internal
+ : RecordLinkage::Exported;
+ auto [GR, FA] =
+ Ctx.Slice->addGlobal(Name, Linkage, GlobalRecord::Kind::Function, Avail,
+ D, *Access, getFlags(WeakDef), Inlined);
+ Ctx.Verifier->verify(GR, FA);
+ return true;
+}
+
+static bool hasVTable(const CXXRecordDecl *D) {
+ // Check if vtable symbols should be emitted, only dynamic classes need
+ // vtables.
+ if (!D->hasDefinition() || !D->isDynamicClass())
+ return false;
+
+ assert(D->isExternallyVisible() && "Should be externally visible");
+ assert(D->isCompleteDefinition() && "Only works on complete definitions");
+
+ const CXXMethodDecl *KeyFunctionD =
+ D->getASTContext().getCurrentKeyFunction(D);
+ // If this class has a key function, then there is a vtable, possibly internal
+ // though.
+ if (KeyFunctionD) {
+ switch (KeyFunctionD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDefinition:
+ return true;
+ case TSK_ExplicitInstantiationDeclaration:
+ llvm_unreachable(
+ "Unexpected TemplateSpecializationKind for key function");
+ }
+ } else if (D->isAbstract()) {
+ // If the class is abstract and it doesn't have a key function, it is a
+ // 'pure' virtual class. It doesn't need a vtable.
+ return false;
+ }
+
+ switch (D->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ return false;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ return true;
+ }
+
+ llvm_unreachable("Invalid TemplateSpecializationKind!");
+}
+
+static CXXLinkage getVTableLinkage(const CXXRecordDecl *D) {
+ assert((D->hasDefinition() && D->isDynamicClass()) && "Record has no vtable");
+ assert(D->isExternallyVisible() && "Record should be externally visible");
+ if (D->getVisibility() == HiddenVisibility)
+ return CXXLinkage::PrivateLinkage;
+
+ const CXXMethodDecl *KeyFunctionD =
+ D->getASTContext().getCurrentKeyFunction(D);
+ if (KeyFunctionD) {
+ // If this class has a key function, use that to determine the
+ // linkage of the vtable.
+ switch (KeyFunctionD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ if (isInlined(KeyFunctionD))
+ return CXXLinkage::LinkOnceODRLinkage;
+ return CXXLinkage::ExternalLinkage;
+ case TSK_ImplicitInstantiation:
+ llvm_unreachable("No external vtable for implicit instantiations");
+ case TSK_ExplicitInstantiationDefinition:
+ return CXXLinkage::WeakODRLinkage;
+ case TSK_ExplicitInstantiationDeclaration:
+ llvm_unreachable(
+ "Unexpected TemplateSpecializationKind for key function");
+ }
+ }
+
+ switch (D->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ return CXXLinkage::LinkOnceODRLinkage;
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ return CXXLinkage::WeakODRLinkage;
+ }
+
+ llvm_unreachable("Invalid TemplateSpecializationKind!");
+}
+
+static bool isRTTIWeakDef(const CXXRecordDecl *D) {
+ if (D->hasAttr<WeakAttr>())
+ return true;
+
+ if (D->isAbstract() && D->getASTContext().getCurrentKeyFunction(D) == nullptr)
+ return true;
+
+ if (D->isDynamicClass())
+ return getVTableLinkage(D) != CXXLinkage::ExternalLinkage;
+
+ return false;
+}
+
+static bool hasRTTI(const CXXRecordDecl *D) {
+ if (!D->getASTContext().getLangOpts().RTTI)
+ return false;
+
+ if (!D->hasDefinition())
+ return false;
+
+ if (!D->isDynamicClass())
+ return false;
+
+ // Don't emit weak-def RTTI information. InstallAPI cannot reliably determine
+ // if the final binary will have those weak defined RTTI symbols. This depends
+ // on the optimization level and if the class has been instantiated and used.
+ //
+ // Luckily, the Apple static linker doesn't need those weak defined RTTI
+ // symbols for linking. They are only needed by the runtime linker. That means
+ // they can be safely dropped.
+ if (isRTTIWeakDef(D))
+ return false;
+
+ return true;
+}
+
+std::string
+InstallAPIVisitor::getMangledCXXRTTIName(const CXXRecordDecl *D) const {
+ SmallString<256> Name;
+ raw_svector_ostream NameStream(Name);
+ MC->mangleCXXRTTIName(QualType(D->getTypeForDecl(), 0), NameStream);
+
+ return getBackendMangledName(Name);
+}
+
+std::string InstallAPIVisitor::getMangledCXXRTTI(const CXXRecordDecl *D) const {
+ SmallString<256> Name;
+ raw_svector_ostream NameStream(Name);
+ MC->mangleCXXRTTI(QualType(D->getTypeForDecl(), 0), NameStream);
+
+ return getBackendMangledName(Name);
+}
+
+std::string
+InstallAPIVisitor::getMangledCXXVTableName(const CXXRecordDecl *D) const {
+ SmallString<256> Name;
+ raw_svector_ostream NameStream(Name);
+ MC->mangleCXXVTable(D, NameStream);
+
+ return getBackendMangledName(Name);
+}
+
+std::string InstallAPIVisitor::getMangledCXXThunk(
+ const GlobalDecl &D, const ThunkInfo &Thunk, bool ElideOverrideInfo) const {
+ SmallString<256> Name;
+ raw_svector_ostream NameStream(Name);
+ const auto *Method = cast<CXXMethodDecl>(D.getDecl());
+ if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(Method))
+ MC->mangleCXXDtorThunk(Dtor, D.getDtorType(), Thunk, ElideOverrideInfo,
+ NameStream);
+ else
+ MC->mangleThunk(Method, Thunk, ElideOverrideInfo, NameStream);
+
+ return getBackendMangledName(Name);
+}
+
+std::string InstallAPIVisitor::getMangledCtorDtor(const CXXMethodDecl *D,
+ int Type) const {
+ SmallString<256> Name;
+ raw_svector_ostream NameStream(Name);
+ GlobalDecl GD;
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(D))
+ GD = GlobalDecl(Ctor, CXXCtorType(Type));
+ else {
+ const auto *Dtor = cast<CXXDestructorDecl>(D);
+ GD = GlobalDecl(Dtor, CXXDtorType(Type));
+ }
+ MC->mangleName(GD, NameStream);
+ return getBackendMangledName(Name);
+}
+
+void InstallAPIVisitor::emitVTableSymbols(const CXXRecordDecl *D,
+ const AvailabilityInfo &Avail,
+ const HeaderType Access,
+ bool EmittedVTable) {
+ if (hasVTable(D)) {
+ EmittedVTable = true;
+ const CXXLinkage VTableLinkage = getVTableLinkage(D);
+ if (VTableLinkage == CXXLinkage::ExternalLinkage ||
+ VTableLinkage == CXXLinkage::WeakODRLinkage) {
+ const std::string Name = getMangledCXXVTableName(D);
+ const bool WeakDef = VTableLinkage == CXXLinkage::WeakODRLinkage;
+ auto [GR, FA] = Ctx.Slice->addGlobal(Name, RecordLinkage::Exported,
+ GlobalRecord::Kind::Variable, Avail,
+ D, Access, getFlags(WeakDef));
+ Ctx.Verifier->verify(GR, FA);
+ if (!D->getDescribedClassTemplate() && !D->isInvalidDecl()) {
+ VTableContextBase *VTable = D->getASTContext().getVTableContext();
+ auto AddThunk = [&](GlobalDecl GD) {
+ const ItaniumVTableContext::ThunkInfoVectorTy *Thunks =
+ VTable->getThunkInfo(GD);
+ if (!Thunks)
+ return;
+
+ for (const auto &Thunk : *Thunks) {
+ const std::string Name =
+ getMangledCXXThunk(GD, Thunk, /*ElideOverrideInfo=*/true);
+ auto [GR, FA] = Ctx.Slice->addGlobal(Name, RecordLinkage::Exported,
+ GlobalRecord::Kind::Function,
+ Avail, GD.getDecl(), Access);
+ Ctx.Verifier->verify(GR, FA);
+ }
+ };
+
+ for (const auto *Method : D->methods()) {
+ if (isa<CXXConstructorDecl>(Method) || !Method->isVirtual())
+ continue;
+
+ if (auto Dtor = dyn_cast<CXXDestructorDecl>(Method)) {
+ // Skip default destructor.
+ if (Dtor->isDefaulted())
+ continue;
+ AddThunk({Dtor, Dtor_Deleting});
+ AddThunk({Dtor, Dtor_Complete});
+ } else
+ AddThunk(Method);
+ }
+ }
+ }
+ }
+
+ if (!EmittedVTable)
+ return;
+
+ if (hasRTTI(D)) {
+ std::string Name = getMangledCXXRTTI(D);
+ auto [GR, FA] =
+ Ctx.Slice->addGlobal(Name, RecordLinkage::Exported,
+ GlobalRecord::Kind::Variable, Avail, D, Access);
+ Ctx.Verifier->verify(GR, FA);
+
+ Name = getMangledCXXRTTIName(D);
+ auto [NamedGR, NamedFA] =
+ Ctx.Slice->addGlobal(Name, RecordLinkage::Exported,
+ GlobalRecord::Kind::Variable, Avail, D, Access);
+ Ctx.Verifier->verify(NamedGR, NamedFA);
+ }
+
+ for (const auto &It : D->bases()) {
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(It.getType()->castAs<RecordType>()->getDecl());
+ const auto BaseAccess = getAccessForDecl(Base);
+ if (!BaseAccess)
+ continue;
+ const AvailabilityInfo BaseAvail = AvailabilityInfo::createFromDecl(Base);
+ emitVTableSymbols(Base, BaseAvail, *BaseAccess, /*EmittedVTable=*/true);
+ }
+}
+
+bool InstallAPIVisitor::VisitCXXRecordDecl(const CXXRecordDecl *D) {
+ if (!D->isCompleteDefinition())
+ return true;
+
+ // Skip templated classes.
+ if (D->getDescribedClassTemplate() != nullptr)
+ return true;
+
+ // Skip partial templated classes too.
+ if (isa<ClassTemplatePartialSpecializationDecl>(D))
+ return true;
+
+ auto Access = getAccessForDecl(D);
+ if (!Access)
+ return true;
+ const AvailabilityInfo Avail = AvailabilityInfo::createFromDecl(D);
+
+ // Check whether to emit the vtable/rtti symbols.
+ if (isExported(D))
+ emitVTableSymbols(D, Avail, *Access);
+
+ TemplateSpecializationKind ClassSK = TSK_Undeclared;
+ bool KeepInlineAsWeak = false;
+ if (auto *Templ = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ ClassSK = Templ->getTemplateSpecializationKind();
+ if (ClassSK == TSK_ExplicitInstantiationDeclaration)
+ KeepInlineAsWeak = true;
+ }
+
+ // Record the class methods.
+ for (const auto *M : D->methods()) {
+ // Inlined methods are usually not emitted, except when it comes from a
+ // specialized template.
+ bool WeakDef = false;
+ if (isInlined(M)) {
+ if (!KeepInlineAsWeak)
+ continue;
+
+ WeakDef = true;
+ }
+
+ if (!isExported(M))
+ continue;
+
+ switch (M->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ break;
+ case TSK_ImplicitInstantiation:
+ continue;
+ case TSK_ExplicitInstantiationDeclaration:
+ if (ClassSK == TSK_ExplicitInstantiationDeclaration)
+ WeakDef = true;
+ break;
+ case TSK_ExplicitInstantiationDefinition:
+ WeakDef = true;
+ break;
+ }
+
+ if (!M->isUserProvided())
+ continue;
+
+ // Methods that are deleted are not exported.
+ if (M->isDeleted())
+ continue;
+
+ const auto Access = getAccessForDecl(M);
+ if (!Access)
+ return true;
+ const AvailabilityInfo Avail = AvailabilityInfo::createFromDecl(M);
+
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(M)) {
+ // Defaulted constructors are not exported.
+ if (Ctor->isDefaulted())
+ continue;
+
+ std::string Name = getMangledCtorDtor(M, Ctor_Base);
+ auto [GR, FA] = Ctx.Slice->addGlobal(Name, RecordLinkage::Exported,
+ GlobalRecord::Kind::Function, Avail,
+ D, *Access, getFlags(WeakDef));
+ Ctx.Verifier->verify(GR, FA);
+
+ if (!D->isAbstract()) {
+ std::string Name = getMangledCtorDtor(M, Ctor_Complete);
+ auto [GR, FA] = Ctx.Slice->addGlobal(
+ Name, RecordLinkage::Exported, GlobalRecord::Kind::Function, Avail,
+ D, *Access, getFlags(WeakDef));
+ Ctx.Verifier->verify(GR, FA);
+ }
+
+ continue;
+ }
+
+ if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(M)) {
+ // Defaulted destructors are not exported.
+ if (Dtor->isDefaulted())
+ continue;
+
+ std::string Name = getMangledCtorDtor(M, Dtor_Base);
+ auto [GR, FA] = Ctx.Slice->addGlobal(Name, RecordLinkage::Exported,
+ GlobalRecord::Kind::Function, Avail,
+ D, *Access, getFlags(WeakDef));
+ Ctx.Verifier->verify(GR, FA);
+
+ Name = getMangledCtorDtor(M, Dtor_Complete);
+ auto [CompleteGR, CompleteFA] = Ctx.Slice->addGlobal(
+ Name, RecordLinkage::Exported, GlobalRecord::Kind::Function, Avail, D,
+ *Access, getFlags(WeakDef));
+ Ctx.Verifier->verify(CompleteGR, CompleteFA);
+
+ if (Dtor->isVirtual()) {
+ Name = getMangledCtorDtor(M, Dtor_Deleting);
+ auto [VirtualGR, VirtualFA] = Ctx.Slice->addGlobal(
+ Name, RecordLinkage::Exported, GlobalRecord::Kind::Function, Avail,
+ D, *Access, getFlags(WeakDef));
+ Ctx.Verifier->verify(VirtualGR, VirtualFA);
+ }
+
+ continue;
+ }
+
+ // Though abstract methods can map to exports, this is generally unexpected.
+ // Except in the case of destructors. Only ignore pure virtuals after
+ // checking if the member function was a destructor.
+ if (M->isPureVirtual())
+ continue;
+
+ std::string Name = getMangledName(M);
+ auto [GR, FA] = Ctx.Slice->addGlobal(Name, RecordLinkage::Exported,
+ GlobalRecord::Kind::Function, Avail, M,
+ *Access, getFlags(WeakDef));
+ Ctx.Verifier->verify(GR, FA);
+ }
+
+ if (auto *Templ = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ if (!Templ->isExplicitInstantiationOrSpecialization())
+ return true;
+ }
+
+ using var_iter = CXXRecordDecl::specific_decl_iterator<VarDecl>;
+ using var_range = iterator_range<var_iter>;
+ for (const auto *Var : var_range(D->decls())) {
+ // Skip const static member variables.
+ // \code
+ // struct S {
+ // static const int x = 0;
+ // };
+ // \endcode
+ if (Var->isStaticDataMember() && Var->hasInit())
+ continue;
+
+ // Skip unexported var decls.
+ if (!isExported(Var))
+ continue;
+
+ const std::string Name = getMangledName(Var);
+ const auto Access = getAccessForDecl(Var);
+ if (!Access)
+ return true;
+ const AvailabilityInfo Avail = AvailabilityInfo::createFromDecl(Var);
+ const bool WeakDef = Var->hasAttr<WeakAttr>() || KeepInlineAsWeak;
+
+ auto [GR, FA] = Ctx.Slice->addGlobal(Name, RecordLinkage::Exported,
+ GlobalRecord::Kind::Variable, Avail, D,
+ *Access, getFlags(WeakDef));
+ Ctx.Verifier->verify(GR, FA);
+ }
+
+ return true;
+}
+
+} // namespace clang::installapi
diff --git a/contrib/llvm-project/clang/lib/Interpreter/CodeCompletion.cpp b/contrib/llvm-project/clang/lib/Interpreter/CodeCompletion.cpp
index 25183ae9eeb9..791426807cb9 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/CodeCompletion.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/CodeCompletion.cpp
@@ -368,8 +368,7 @@ void ReplCodeCompleter::codeComplete(CompilerInstance *InterpCI,
llvm::SmallVector<const llvm::MemoryBuffer *, 1> tb = {};
InterpCI->getFrontendOpts().Inputs[0] = FrontendInputFile(
CodeCompletionFileName, Language::CXX, InputKind::Source);
- auto Act = std::unique_ptr<IncrementalSyntaxOnlyAction>(
- new IncrementalSyntaxOnlyAction(ParentCI));
+ auto Act = std::make_unique<IncrementalSyntaxOnlyAction>(ParentCI);
std::unique_ptr<llvm::MemoryBuffer> MB =
llvm::MemoryBuffer::getMemBufferCopy(Content, CodeCompletionFileName);
llvm::SmallVector<ASTUnit::RemappedFile, 4> RemappedFiles;
diff --git a/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp b/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp
index fb42964e4936..07c9e3005e5f 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp
@@ -17,6 +17,7 @@
#include "clang/Frontend/CompilerInstance.h"
#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Target/TargetMachine.h"
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
index 40bcef94797d..1824a5b4570a 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
@@ -20,6 +20,7 @@
#include "llvm/ExecutionEngine/Orc/Debugging/DebuggerSupport.h"
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
#include "llvm/ExecutionEngine/Orc/LLJIT.h"
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
#include "llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h"
@@ -35,27 +36,31 @@ LLVM_ATTRIBUTE_USED void linkComponents() {
}
namespace clang {
+IncrementalExecutor::IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC)
+ : TSCtx(TSC) {}
+
+llvm::Expected<std::unique_ptr<llvm::orc::LLJITBuilder>>
+IncrementalExecutor::createDefaultJITBuilder(
+ llvm::orc::JITTargetMachineBuilder JTMB) {
+ auto JITBuilder = std::make_unique<llvm::orc::LLJITBuilder>();
+ JITBuilder->setJITTargetMachineBuilder(std::move(JTMB));
+ JITBuilder->setPrePlatformSetup([](llvm::orc::LLJIT &J) {
+ // Try to enable debugging of JIT'd code (only works with JITLink for
+ // ELF and MachO).
+ consumeError(llvm::orc::enableDebuggerSupport(J));
+ return llvm::Error::success();
+ });
+ return std::move(JITBuilder);
+}
IncrementalExecutor::IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC,
- llvm::Error &Err,
- const clang::TargetInfo &TI)
+ llvm::orc::LLJITBuilder &JITBuilder,
+ llvm::Error &Err)
: TSCtx(TSC) {
using namespace llvm::orc;
llvm::ErrorAsOutParameter EAO(&Err);
- auto JTMB = JITTargetMachineBuilder(TI.getTriple());
- JTMB.addFeatures(TI.getTargetOpts().Features);
- LLJITBuilder Builder;
- Builder.setJITTargetMachineBuilder(JTMB);
- Builder.setPrePlatformSetup(
- [](LLJIT &J) {
- // Try to enable debugging of JIT'd code (only works with JITLink for
- // ELF and MachO).
- consumeError(enableDebuggerSupport(J));
- return llvm::Error::success();
- });
-
- if (auto JitOrErr = Builder.create())
+ if (auto JitOrErr = JITBuilder.create())
Jit = std::move(*JitOrErr);
else {
Err = JitOrErr.takeError();
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
index dd0a210a0614..dbd61f0b8b1e 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
@@ -23,7 +23,9 @@
namespace llvm {
class Error;
namespace orc {
+class JITTargetMachineBuilder;
class LLJIT;
+class LLJITBuilder;
class ThreadSafeContext;
} // namespace orc
} // namespace llvm
@@ -41,21 +43,27 @@ class IncrementalExecutor {
llvm::DenseMap<const PartialTranslationUnit *, llvm::orc::ResourceTrackerSP>
ResourceTrackers;
+protected:
+ IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC);
+
public:
enum SymbolNameKind { IRName, LinkerName };
- IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC, llvm::Error &Err,
- const clang::TargetInfo &TI);
- ~IncrementalExecutor();
+ IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC,
+ llvm::orc::LLJITBuilder &JITBuilder, llvm::Error &Err);
+ virtual ~IncrementalExecutor();
- llvm::Error addModule(PartialTranslationUnit &PTU);
- llvm::Error removeModule(PartialTranslationUnit &PTU);
- llvm::Error runCtors() const;
- llvm::Error cleanUp();
+ virtual llvm::Error addModule(PartialTranslationUnit &PTU);
+ virtual llvm::Error removeModule(PartialTranslationUnit &PTU);
+ virtual llvm::Error runCtors() const;
+ virtual llvm::Error cleanUp();
llvm::Expected<llvm::orc::ExecutorAddr>
getSymbolAddress(llvm::StringRef Name, SymbolNameKind NameKind) const;
llvm::orc::LLJIT &GetExecutionEngine() { return *Jit; }
+
+ static llvm::Expected<std::unique_ptr<llvm::orc::LLJITBuilder>>
+ createDefaultJITBuilder(llvm::orc::JITTargetMachineBuilder JTMB);
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
index 370bcbfee8b0..b7c809c45098 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
@@ -79,7 +79,7 @@ public:
void CompleteTentativeDefinition(VarDecl *D) override final {
Consumer->CompleteTentativeDefinition(D);
}
- void CompleteExternalDeclaration(VarDecl *D) override final {
+ void CompleteExternalDeclaration(DeclaratorDecl *D) override final {
Consumer->CompleteExternalDeclaration(D);
}
void AssignInheritanceModel(CXXRecordDecl *RD) override final {
@@ -209,6 +209,10 @@ IncrementalParser::IncrementalParser(Interpreter &Interp,
if (Err)
return;
CI->ExecuteAction(*Act);
+
+ if (getCodeGen())
+ CachedInCodeGenModule = GenModule();
+
std::unique_ptr<ASTConsumer> IncrConsumer =
std::make_unique<IncrementalASTConsumer>(Interp, CI->takeASTConsumer());
CI->setASTConsumer(std::move(IncrConsumer));
@@ -224,11 +228,8 @@ IncrementalParser::IncrementalParser(Interpreter &Interp,
return; // PTU.takeError();
}
- if (CodeGenerator *CG = getCodeGen()) {
- std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
- CG->StartModule("incr_module_" + std::to_string(PTUs.size()),
- M->getContext());
- PTU->TheModule = std::move(M);
+ if (getCodeGen()) {
+ PTU->TheModule = GenModule();
assert(PTU->TheModule && "Failed to create initial PTU");
}
}
@@ -364,6 +365,19 @@ IncrementalParser::Parse(llvm::StringRef input) {
std::unique_ptr<llvm::Module> IncrementalParser::GenModule() {
static unsigned ID = 0;
if (CodeGenerator *CG = getCodeGen()) {
+ // Clang's CodeGen is designed to work with a single llvm::Module. In many
+ // cases for convenience various CodeGen parts have a reference to the
+ // llvm::Module (TheModule or Module) which does not change when a new
+ // module is pushed. However, the execution engine wants to take ownership
+ // of the module which does not map well to CodeGen's design. To work this
+ // around we created an empty module to make CodeGen happy. We should make
+ // sure it always stays empty.
+ assert((!CachedInCodeGenModule ||
+ (CachedInCodeGenModule->empty() &&
+ CachedInCodeGenModule->global_empty() &&
+ CachedInCodeGenModule->alias_empty() &&
+ CachedInCodeGenModule->ifunc_empty())) &&
+ "CodeGen wrote to a readonly module");
std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
CG->StartModule("incr_module_" + std::to_string(ID++), M->getContext());
return M;
@@ -373,20 +387,36 @@ std::unique_ptr<llvm::Module> IncrementalParser::GenModule() {
void IncrementalParser::CleanUpPTU(PartialTranslationUnit &PTU) {
TranslationUnitDecl *MostRecentTU = PTU.TUPart;
- TranslationUnitDecl *FirstTU = MostRecentTU->getFirstDecl();
- if (StoredDeclsMap *Map = FirstTU->getPrimaryContext()->getLookupPtr()) {
- for (auto I = Map->begin(); I != Map->end(); ++I) {
- StoredDeclsList &List = I->second;
+ if (StoredDeclsMap *Map = MostRecentTU->getPrimaryContext()->getLookupPtr()) {
+ for (auto &&[Key, List] : *Map) {
DeclContextLookupResult R = List.getLookupResult();
+ std::vector<NamedDecl *> NamedDeclsToRemove;
+ bool RemoveAll = true;
for (NamedDecl *D : R) {
- if (D->getTranslationUnitDecl() == MostRecentTU) {
+ if (D->getTranslationUnitDecl() == MostRecentTU)
+ NamedDeclsToRemove.push_back(D);
+ else
+ RemoveAll = false;
+ }
+ if (LLVM_LIKELY(RemoveAll)) {
+ Map->erase(Key);
+ } else {
+ for (NamedDecl *D : NamedDeclsToRemove)
List.remove(D);
- }
}
- if (List.isNull())
- Map->erase(I);
}
}
+
+ // FIXME: We should de-allocate MostRecentTU
+ for (Decl *D : MostRecentTU->decls()) {
+ auto *ND = dyn_cast<NamedDecl>(D);
+ if (!ND)
+ continue;
+ // Check if we need to clean up the IdResolver chain.
+ if (ND->getDeclName().getFETokenInfo() && !D->getLangOpts().ObjC &&
+ !D->getLangOpts().CPlusPlus)
+ getCI()->getSema().IdResolver.RemoveDecl(ND);
+ }
}
llvm::StringRef IncrementalParser::GetMangledName(GlobalDecl GD) const {
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
index e13b74c7f659..f63bce50acd3 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
@@ -24,6 +24,7 @@
#include <memory>
namespace llvm {
class LLVMContext;
+class Module;
} // namespace llvm
namespace clang {
@@ -57,6 +58,10 @@ protected:
/// of code.
std::list<PartialTranslationUnit> PTUs;
+ /// When CodeGen is created the first llvm::Module gets cached in many places
+ /// and we must keep it alive.
+ std::unique_ptr<llvm::Module> CachedInCodeGenModule;
+
IncrementalParser();
public:
diff --git a/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp b/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
index 9f97a3c6b0be..985d0b7c0ef3 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
@@ -15,6 +15,9 @@
#include "IncrementalExecutor.h"
#include "IncrementalParser.h"
#include "InterpreterUtils.h"
+#ifdef __EMSCRIPTEN__
+#include "Wasm.h"
+#endif // __EMSCRIPTEN__
#include "clang/AST/ASTContext.h"
#include "clang/AST/Mangle.h"
@@ -42,6 +45,9 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Host.h"
+
+#include <cstdarg>
+
using namespace clang;
// FIXME: Figure out how to unify with namespace init_convenience from
@@ -132,7 +138,8 @@ CreateCI(const llvm::opt::ArgStringList &Argv) {
} // anonymous namespace
llvm::Expected<std::unique_ptr<CompilerInstance>>
-IncrementalCompilerBuilder::create(std::vector<const char *> &ClangArgv) {
+IncrementalCompilerBuilder::create(std::string TT,
+ std::vector<const char *> &ClangArgv) {
// If we don't know ClangArgv0 or the address of main() at this point, try
// to guess it anyway (it's possible on some platforms).
@@ -162,8 +169,7 @@ IncrementalCompilerBuilder::create(std::vector<const char *> &ClangArgv) {
TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagsBuffer);
- driver::Driver Driver(/*MainBinaryName=*/ClangArgv[0],
- llvm::sys::getProcessTriple(), Diags);
+ driver::Driver Driver(/*MainBinaryName=*/ClangArgv[0], TT, Diags);
Driver.setCheckInputsExist(false); // the input comes from mem buffers
llvm::ArrayRef<const char *> RF = llvm::ArrayRef(ClangArgv);
std::unique_ptr<driver::Compilation> Compilation(Driver.BuildCompilation(RF));
@@ -183,9 +189,16 @@ IncrementalCompilerBuilder::CreateCpp() {
std::vector<const char *> Argv;
Argv.reserve(5 + 1 + UserArgs.size());
Argv.push_back("-xc++");
+#ifdef __EMSCRIPTEN__
+ Argv.push_back("-target");
+ Argv.push_back("wasm32-unknown-emscripten");
+ Argv.push_back("-shared");
+ Argv.push_back("-fvisibility=default");
+#endif
Argv.insert(Argv.end(), UserArgs.begin(), UserArgs.end());
- return IncrementalCompilerBuilder::create(Argv);
+ std::string TT = TargetTriple ? *TargetTriple : llvm::sys::getProcessTriple();
+ return IncrementalCompilerBuilder::create(TT, Argv);
}
llvm::Expected<std::unique_ptr<CompilerInstance>>
@@ -213,7 +226,8 @@ IncrementalCompilerBuilder::createCuda(bool device) {
Argv.insert(Argv.end(), UserArgs.begin(), UserArgs.end());
- return IncrementalCompilerBuilder::create(Argv);
+ std::string TT = TargetTriple ? *TargetTriple : llvm::sys::getProcessTriple();
+ return IncrementalCompilerBuilder::create(TT, Argv);
}
llvm::Expected<std::unique_ptr<CompilerInstance>>
@@ -227,12 +241,32 @@ IncrementalCompilerBuilder::CreateCudaHost() {
}
Interpreter::Interpreter(std::unique_ptr<CompilerInstance> CI,
- llvm::Error &Err) {
- llvm::ErrorAsOutParameter EAO(&Err);
+ llvm::Error &ErrOut,
+ std::unique_ptr<llvm::orc::LLJITBuilder> JITBuilder)
+ : JITBuilder(std::move(JITBuilder)) {
+ llvm::ErrorAsOutParameter EAO(&ErrOut);
auto LLVMCtx = std::make_unique<llvm::LLVMContext>();
TSCtx = std::make_unique<llvm::orc::ThreadSafeContext>(std::move(LLVMCtx));
- IncrParser = std::make_unique<IncrementalParser>(*this, std::move(CI),
- *TSCtx->getContext(), Err);
+ IncrParser = std::make_unique<IncrementalParser>(
+ *this, std::move(CI), *TSCtx->getContext(), ErrOut);
+ if (ErrOut)
+ return;
+
+ // Not all frontends support code-generation, e.g. ast-dump actions don't
+ if (IncrParser->getCodeGen()) {
+ if (llvm::Error Err = CreateExecutor()) {
+ ErrOut = joinErrors(std::move(ErrOut), std::move(Err));
+ return;
+ }
+
+ // Process the PTUs that came from initialization. For example -include will
+ // give us a header that's processed at initialization of the preprocessor.
+ for (PartialTranslationUnit &PTU : IncrParser->getPTUs())
+ if (llvm::Error Err = Execute(PTU)) {
+ ErrOut = joinErrors(std::move(ErrOut), std::move(Err));
+ return;
+ }
+ }
}
Interpreter::~Interpreter() {
@@ -248,14 +282,10 @@ Interpreter::~Interpreter() {
// can't find the precise resource directory in unittests so we have to hard
// code them.
const char *const Runtimes = R"(
+ #define __CLANG_REPL__ 1
#ifdef __cplusplus
+ #define EXTERN_C extern "C"
void *__clang_Interpreter_SetValueWithAlloc(void*, void*, void*);
- void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*);
- void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, void*);
- void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, float);
- void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, double);
- void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, long double);
- void __clang_Interpreter_SetValueNoAlloc(void*,void*,void*,unsigned long long);
struct __clang_Interpreter_NewTag{} __ci_newtag;
void* operator new(__SIZE_TYPE__, void* __p, __clang_Interpreter_NewTag) noexcept;
template <class T, class = T (*)() /*disable for arrays*/>
@@ -267,7 +297,11 @@ const char *const Runtimes = R"(
void __clang_Interpreter_SetValueCopyArr(const T (*Src)[N], void* Placement, unsigned long Size) {
__clang_Interpreter_SetValueCopyArr(Src[0], Placement, Size);
}
+#else
+ #define EXTERN_C extern
#endif // __cplusplus
+
+ EXTERN_C void __clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType, ...);
)";
llvm::Expected<std::unique_ptr<Interpreter>>
@@ -278,15 +312,14 @@ Interpreter::create(std::unique_ptr<CompilerInstance> CI) {
if (Err)
return std::move(Err);
+ // Add runtime code and set a marker to hide it from user code. Undo will not
+ // go through that.
auto PTU = Interp->Parse(Runtimes);
if (!PTU)
return PTU.takeError();
+ Interp->markUserCodeStart();
Interp->ValuePrintingInfo.resize(4);
- // FIXME: This is a ugly hack. Undo command checks its availability by looking
- // at the size of the PTU list. However we have parsed something in the
- // beginning of the REPL so we have to mark them as 'Irrevocable'.
- Interp->InitPTUSize = Interp->IncrParser->getPTUs().size();
return std::move(Interp);
}
@@ -343,6 +376,11 @@ const ASTContext &Interpreter::getASTContext() const {
return getCompilerInstance()->getASTContext();
}
+void Interpreter::markUserCodeStart() {
+ assert(!InitPTUSize && "We only do this once");
+ InitPTUSize = IncrParser->getPTUs().size();
+}
+
size_t Interpreter::getEffectivePTUSize() const {
std::list<PartialTranslationUnit> &PTUs = IncrParser->getPTUs();
assert(PTUs.size() >= InitPTUSize && "empty PTU list?");
@@ -366,17 +404,51 @@ Interpreter::Parse(llvm::StringRef Code) {
return IncrParser->Parse(Code);
}
+static llvm::Expected<llvm::orc::JITTargetMachineBuilder>
+createJITTargetMachineBuilder(const std::string &TT) {
+ if (TT == llvm::sys::getProcessTriple())
+ // This fails immediately if the target backend is not registered
+ return llvm::orc::JITTargetMachineBuilder::detectHost();
+
+ // If the target backend is not registered, LLJITBuilder::create() will fail
+ return llvm::orc::JITTargetMachineBuilder(llvm::Triple(TT));
+}
+
llvm::Error Interpreter::CreateExecutor() {
- const clang::TargetInfo &TI =
- getCompilerInstance()->getASTContext().getTargetInfo();
+ if (IncrExecutor)
+ return llvm::make_error<llvm::StringError>("Operation failed. "
+ "Execution engine exists",
+ std::error_code());
+ if (!IncrParser->getCodeGen())
+ return llvm::make_error<llvm::StringError>("Operation failed. "
+ "No code generator available",
+ std::error_code());
+ if (!JITBuilder) {
+ const std::string &TT = getCompilerInstance()->getTargetOpts().Triple;
+ auto JTMB = createJITTargetMachineBuilder(TT);
+ if (!JTMB)
+ return JTMB.takeError();
+ auto JB = IncrementalExecutor::createDefaultJITBuilder(std::move(*JTMB));
+ if (!JB)
+ return JB.takeError();
+ JITBuilder = std::move(*JB);
+ }
+
llvm::Error Err = llvm::Error::success();
- auto Executor = std::make_unique<IncrementalExecutor>(*TSCtx, Err, TI);
+#ifdef __EMSCRIPTEN__
+ auto Executor = std::make_unique<WasmIncrementalExecutor>(*TSCtx);
+#else
+ auto Executor =
+ std::make_unique<IncrementalExecutor>(*TSCtx, *JITBuilder, Err);
+#endif
if (!Err)
IncrExecutor = std::move(Executor);
return Err;
}
+void Interpreter::ResetExecutor() { IncrExecutor.reset(); }
+
llvm::Error Interpreter::Execute(PartialTranslationUnit &T) {
assert(T.TheModule);
if (!IncrExecutor) {
@@ -505,16 +577,21 @@ static constexpr llvm::StringRef MagicRuntimeInterface[] = {
"__clang_Interpreter_SetValueWithAlloc",
"__clang_Interpreter_SetValueCopyArr", "__ci_newtag"};
-bool Interpreter::FindRuntimeInterface() {
+static std::unique_ptr<RuntimeInterfaceBuilder>
+createInProcessRuntimeInterfaceBuilder(Interpreter &Interp, ASTContext &Ctx,
+ Sema &S);
+
+std::unique_ptr<RuntimeInterfaceBuilder> Interpreter::FindRuntimeInterface() {
if (llvm::all_of(ValuePrintingInfo, [](Expr *E) { return E != nullptr; }))
- return true;
+ return nullptr;
Sema &S = getCompilerInstance()->getSema();
ASTContext &Ctx = S.getASTContext();
auto LookupInterface = [&](Expr *&Interface, llvm::StringRef Name) {
LookupResult R(S, &Ctx.Idents.get(Name), SourceLocation(),
- Sema::LookupOrdinaryName, Sema::ForVisibleRedeclaration);
+ Sema::LookupOrdinaryName,
+ RedeclarationKind::ForVisibleRedeclaration);
S.LookupQualifiedName(R, Ctx.getTranslationUnitDecl());
if (R.empty())
return false;
@@ -526,120 +603,36 @@ bool Interpreter::FindRuntimeInterface() {
if (!LookupInterface(ValuePrintingInfo[NoAlloc],
MagicRuntimeInterface[NoAlloc]))
- return false;
- if (!LookupInterface(ValuePrintingInfo[WithAlloc],
- MagicRuntimeInterface[WithAlloc]))
- return false;
- if (!LookupInterface(ValuePrintingInfo[CopyArray],
- MagicRuntimeInterface[CopyArray]))
- return false;
- if (!LookupInterface(ValuePrintingInfo[NewTag],
- MagicRuntimeInterface[NewTag]))
- return false;
- return true;
+ return nullptr;
+ if (Ctx.getLangOpts().CPlusPlus) {
+ if (!LookupInterface(ValuePrintingInfo[WithAlloc],
+ MagicRuntimeInterface[WithAlloc]))
+ return nullptr;
+ if (!LookupInterface(ValuePrintingInfo[CopyArray],
+ MagicRuntimeInterface[CopyArray]))
+ return nullptr;
+ if (!LookupInterface(ValuePrintingInfo[NewTag],
+ MagicRuntimeInterface[NewTag]))
+ return nullptr;
+ }
+
+ return createInProcessRuntimeInterfaceBuilder(*this, Ctx, S);
}
namespace {
-class RuntimeInterfaceBuilder
- : public TypeVisitor<RuntimeInterfaceBuilder, Interpreter::InterfaceKind> {
- clang::Interpreter &Interp;
+class InterfaceKindVisitor
+ : public TypeVisitor<InterfaceKindVisitor, Interpreter::InterfaceKind> {
+ friend class InProcessRuntimeInterfaceBuilder;
+
ASTContext &Ctx;
Sema &S;
Expr *E;
llvm::SmallVector<Expr *, 3> Args;
public:
- RuntimeInterfaceBuilder(clang::Interpreter &In, ASTContext &C, Sema &SemaRef,
- Expr *VE, ArrayRef<Expr *> FixedArgs)
- : Interp(In), Ctx(C), S(SemaRef), E(VE) {
- // The Interpreter* parameter and the out parameter `OutVal`.
- for (Expr *E : FixedArgs)
- Args.push_back(E);
-
- // Get rid of ExprWithCleanups.
- if (auto *EWC = llvm::dyn_cast_if_present<ExprWithCleanups>(E))
- E = EWC->getSubExpr();
- }
-
- ExprResult getCall() {
- QualType Ty = E->getType();
- QualType DesugaredTy = Ty.getDesugaredType(Ctx);
-
- // For lvalue struct, we treat it as a reference.
- if (DesugaredTy->isRecordType() && E->isLValue()) {
- DesugaredTy = Ctx.getLValueReferenceType(DesugaredTy);
- Ty = Ctx.getLValueReferenceType(Ty);
- }
-
- Expr *TypeArg =
- CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)Ty.getAsOpaquePtr());
- // The QualType parameter `OpaqueType`, represented as `void*`.
- Args.push_back(TypeArg);
-
- // We push the last parameter based on the type of the Expr. Note we need
- // special care for rvalue struct.
- Interpreter::InterfaceKind Kind = Visit(&*DesugaredTy);
- switch (Kind) {
- case Interpreter::InterfaceKind::WithAlloc:
- case Interpreter::InterfaceKind::CopyArray: {
- // __clang_Interpreter_SetValueWithAlloc.
- ExprResult AllocCall = S.ActOnCallExpr(
- /*Scope=*/nullptr,
- Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::WithAlloc],
- E->getBeginLoc(), Args, E->getEndLoc());
- assert(!AllocCall.isInvalid() && "Can't create runtime interface call!");
-
- TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ty, SourceLocation());
-
- // Force CodeGen to emit destructor.
- if (auto *RD = Ty->getAsCXXRecordDecl()) {
- auto *Dtor = S.LookupDestructor(RD);
- Dtor->addAttr(UsedAttr::CreateImplicit(Ctx));
- Interp.getCompilerInstance()->getASTConsumer().HandleTopLevelDecl(
- DeclGroupRef(Dtor));
- }
-
- // __clang_Interpreter_SetValueCopyArr.
- if (Kind == Interpreter::InterfaceKind::CopyArray) {
- const auto *ConstantArrTy =
- cast<ConstantArrayType>(DesugaredTy.getTypePtr());
- size_t ArrSize = Ctx.getConstantArrayElementCount(ConstantArrTy);
- Expr *ArrSizeExpr = IntegerLiteralExpr(Ctx, ArrSize);
- Expr *Args[] = {E, AllocCall.get(), ArrSizeExpr};
- return S.ActOnCallExpr(
- /*Scope *=*/nullptr,
- Interp
- .getValuePrintingInfo()[Interpreter::InterfaceKind::CopyArray],
- SourceLocation(), Args, SourceLocation());
- }
- Expr *Args[] = {
- AllocCall.get(),
- Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::NewTag]};
- ExprResult CXXNewCall = S.BuildCXXNew(
- E->getSourceRange(),
- /*UseGlobal=*/true, /*PlacementLParen=*/SourceLocation(), Args,
- /*PlacementRParen=*/SourceLocation(),
- /*TypeIdParens=*/SourceRange(), TSI->getType(), TSI, std::nullopt,
- E->getSourceRange(), E);
-
- assert(!CXXNewCall.isInvalid() &&
- "Can't create runtime placement new call!");
-
- return S.ActOnFinishFullExpr(CXXNewCall.get(),
- /*DiscardedValue=*/false);
- }
- // __clang_Interpreter_SetValueNoAlloc.
- case Interpreter::InterfaceKind::NoAlloc: {
- return S.ActOnCallExpr(
- /*Scope=*/nullptr,
- Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::NoAlloc],
- E->getBeginLoc(), Args, E->getEndLoc());
- }
- default:
- llvm_unreachable("Unhandled Interpreter::InterfaceKind");
- }
- }
+ InterfaceKindVisitor(ASTContext &Ctx, Sema &S, Expr *E)
+ : Ctx(Ctx), S(S), E(E) {}
Interpreter::InterfaceKind VisitRecordType(const RecordType *Ty) {
return Interpreter::InterfaceKind::WithAlloc;
@@ -693,10 +686,12 @@ public:
}
private:
- // Force cast these types to uint64 to reduce the number of overloads of
- // `__clang_Interpreter_SetValueNoAlloc`.
+ // Force cast these types to the uint that fits the register size. That way we
+ // reduce the number of overloads of `__clang_Interpreter_SetValueNoAlloc`.
void HandleIntegralOrEnumType(const Type *Ty) {
- TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ctx.UnsignedLongLongTy);
+ uint64_t PtrBits = Ctx.getTypeSize(Ctx.VoidPtrTy);
+ QualType UIntTy = Ctx.getBitIntType(/*Unsigned=*/true, PtrBits);
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(UIntTy);
ExprResult CastedExpr =
S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E);
assert(!CastedExpr.isInvalid() && "Cannot create cstyle cast expr");
@@ -711,8 +706,124 @@ private:
Args.push_back(CastedExpr.get());
}
};
+
+class InProcessRuntimeInterfaceBuilder : public RuntimeInterfaceBuilder {
+ Interpreter &Interp;
+ ASTContext &Ctx;
+ Sema &S;
+
+public:
+ InProcessRuntimeInterfaceBuilder(Interpreter &Interp, ASTContext &C, Sema &S)
+ : Interp(Interp), Ctx(C), S(S) {}
+
+ TransformExprFunction *getPrintValueTransformer() override {
+ return &transformForValuePrinting;
+ }
+
+private:
+ static ExprResult transformForValuePrinting(RuntimeInterfaceBuilder *Builder,
+ Expr *E,
+ ArrayRef<Expr *> FixedArgs) {
+ auto *B = static_cast<InProcessRuntimeInterfaceBuilder *>(Builder);
+
+ // Get rid of ExprWithCleanups.
+ if (auto *EWC = llvm::dyn_cast_if_present<ExprWithCleanups>(E))
+ E = EWC->getSubExpr();
+
+ InterfaceKindVisitor Visitor(B->Ctx, B->S, E);
+
+ // The Interpreter* parameter and the out parameter `OutVal`.
+ for (Expr *E : FixedArgs)
+ Visitor.Args.push_back(E);
+
+ QualType Ty = E->getType();
+ QualType DesugaredTy = Ty.getDesugaredType(B->Ctx);
+
+ // For lvalue struct, we treat it as a reference.
+ if (DesugaredTy->isRecordType() && E->isLValue()) {
+ DesugaredTy = B->Ctx.getLValueReferenceType(DesugaredTy);
+ Ty = B->Ctx.getLValueReferenceType(Ty);
+ }
+
+ Expr *TypeArg = CStyleCastPtrExpr(B->S, B->Ctx.VoidPtrTy,
+ (uintptr_t)Ty.getAsOpaquePtr());
+ // The QualType parameter `OpaqueType`, represented as `void*`.
+ Visitor.Args.push_back(TypeArg);
+
+ // We push the last parameter based on the type of the Expr. Note we need
+ // special care for rvalue struct.
+ Interpreter::InterfaceKind Kind = Visitor.Visit(&*DesugaredTy);
+ switch (Kind) {
+ case Interpreter::InterfaceKind::WithAlloc:
+ case Interpreter::InterfaceKind::CopyArray: {
+ // __clang_Interpreter_SetValueWithAlloc.
+ ExprResult AllocCall = B->S.ActOnCallExpr(
+ /*Scope=*/nullptr,
+ B->Interp
+ .getValuePrintingInfo()[Interpreter::InterfaceKind::WithAlloc],
+ E->getBeginLoc(), Visitor.Args, E->getEndLoc());
+ assert(!AllocCall.isInvalid() && "Can't create runtime interface call!");
+
+ TypeSourceInfo *TSI =
+ B->Ctx.getTrivialTypeSourceInfo(Ty, SourceLocation());
+
+ // Force CodeGen to emit destructor.
+ if (auto *RD = Ty->getAsCXXRecordDecl()) {
+ auto *Dtor = B->S.LookupDestructor(RD);
+ Dtor->addAttr(UsedAttr::CreateImplicit(B->Ctx));
+ B->Interp.getCompilerInstance()->getASTConsumer().HandleTopLevelDecl(
+ DeclGroupRef(Dtor));
+ }
+
+ // __clang_Interpreter_SetValueCopyArr.
+ if (Kind == Interpreter::InterfaceKind::CopyArray) {
+ const auto *ConstantArrTy =
+ cast<ConstantArrayType>(DesugaredTy.getTypePtr());
+ size_t ArrSize = B->Ctx.getConstantArrayElementCount(ConstantArrTy);
+ Expr *ArrSizeExpr = IntegerLiteralExpr(B->Ctx, ArrSize);
+ Expr *Args[] = {E, AllocCall.get(), ArrSizeExpr};
+ return B->S.ActOnCallExpr(
+ /*Scope *=*/nullptr,
+ B->Interp
+ .getValuePrintingInfo()[Interpreter::InterfaceKind::CopyArray],
+ SourceLocation(), Args, SourceLocation());
+ }
+ Expr *Args[] = {
+ AllocCall.get(),
+ B->Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::NewTag]};
+ ExprResult CXXNewCall = B->S.BuildCXXNew(
+ E->getSourceRange(),
+ /*UseGlobal=*/true, /*PlacementLParen=*/SourceLocation(), Args,
+ /*PlacementRParen=*/SourceLocation(),
+ /*TypeIdParens=*/SourceRange(), TSI->getType(), TSI, std::nullopt,
+ E->getSourceRange(), E);
+
+ assert(!CXXNewCall.isInvalid() &&
+ "Can't create runtime placement new call!");
+
+ return B->S.ActOnFinishFullExpr(CXXNewCall.get(),
+ /*DiscardedValue=*/false);
+ }
+ // __clang_Interpreter_SetValueNoAlloc.
+ case Interpreter::InterfaceKind::NoAlloc: {
+ return B->S.ActOnCallExpr(
+ /*Scope=*/nullptr,
+ B->Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::NoAlloc],
+ E->getBeginLoc(), Visitor.Args, E->getEndLoc());
+ }
+ default:
+ llvm_unreachable("Unhandled Interpreter::InterfaceKind");
+ }
+ }
+};
} // namespace
+static std::unique_ptr<RuntimeInterfaceBuilder>
+createInProcessRuntimeInterfaceBuilder(Interpreter &Interp, ASTContext &Ctx,
+ Sema &S) {
+ return std::make_unique<InProcessRuntimeInterfaceBuilder>(Interp, Ctx, S);
+}
+
// This synthesizes a call expression to a speciall
// function that is responsible for generating the Value.
// In general, we transform:
@@ -731,8 +842,13 @@ Expr *Interpreter::SynthesizeExpr(Expr *E) {
Sema &S = getCompilerInstance()->getSema();
ASTContext &Ctx = S.getASTContext();
- if (!FindRuntimeInterface())
- llvm_unreachable("We can't find the runtime iterface for pretty print!");
+ if (!RuntimeIB) {
+ RuntimeIB = FindRuntimeInterface();
+ AddPrintValueCall = RuntimeIB->getPrintValueTransformer();
+ }
+
+ assert(AddPrintValueCall &&
+ "We don't have a runtime interface for pretty print!");
// Create parameter `ThisInterp`.
auto *ThisInterp = CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)this);
@@ -741,9 +857,9 @@ Expr *Interpreter::SynthesizeExpr(Expr *E) {
auto *OutValue = CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)&LastValue);
// Build `__clang_Interpreter_SetValue*` call.
- RuntimeInterfaceBuilder Builder(*this, Ctx, S, E, {ThisInterp, OutValue});
+ ExprResult Result =
+ AddPrintValueCall(RuntimeIB.get(), E, {ThisInterp, OutValue});
- ExprResult Result = Builder.getCall();
// It could fail, like printing an array type in C. (not supported)
if (Result.isInvalid())
return E;
@@ -759,69 +875,81 @@ __clang_Interpreter_SetValueWithAlloc(void *This, void *OutVal,
return VRef.getPtr();
}
-// Pointers, lvalue struct that can take as a reference.
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
- void *Val) {
- Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
- VRef.setPtr(Val);
-}
-
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal,
- void *OpaqueType) {
+extern "C" void REPL_EXTERNAL_VISIBILITY __clang_Interpreter_SetValueNoAlloc(
+ void *This, void *OutVal, void *OpaqueType, ...) {
Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
-}
-
-static void SetValueDataBasedOnQualType(Value &V, unsigned long long Data) {
- QualType QT = V.getType();
- if (const auto *ET = QT->getAs<EnumType>())
- QT = ET->getDecl()->getIntegerType();
-
- switch (QT->castAs<BuiltinType>()->getKind()) {
- default:
- llvm_unreachable("unknown type kind!");
-#define X(type, name) \
- case BuiltinType::name: \
- V.set##name(Data); \
- break;
- REPL_BUILTIN_TYPES
-#undef X
+ Interpreter *I = static_cast<Interpreter *>(This);
+ VRef = Value(I, OpaqueType);
+ if (VRef.isVoid())
+ return;
+
+ va_list args;
+ va_start(args, /*last named param*/ OpaqueType);
+
+ QualType QT = VRef.getType();
+ if (VRef.getKind() == Value::K_PtrOrObj) {
+ VRef.setPtr(va_arg(args, void *));
+ } else {
+ if (const auto *ET = QT->getAs<EnumType>())
+ QT = ET->getDecl()->getIntegerType();
+ switch (QT->castAs<BuiltinType>()->getKind()) {
+ default:
+ llvm_unreachable("unknown type kind!");
+ break;
+ // Types shorter than int are resolved as int, else va_arg has UB.
+ case BuiltinType::Bool:
+ VRef.setBool(va_arg(args, int));
+ break;
+ case BuiltinType::Char_S:
+ VRef.setChar_S(va_arg(args, int));
+ break;
+ case BuiltinType::SChar:
+ VRef.setSChar(va_arg(args, int));
+ break;
+ case BuiltinType::Char_U:
+ VRef.setChar_U(va_arg(args, unsigned));
+ break;
+ case BuiltinType::UChar:
+ VRef.setUChar(va_arg(args, unsigned));
+ break;
+ case BuiltinType::Short:
+ VRef.setShort(va_arg(args, int));
+ break;
+ case BuiltinType::UShort:
+ VRef.setUShort(va_arg(args, unsigned));
+ break;
+ case BuiltinType::Int:
+ VRef.setInt(va_arg(args, int));
+ break;
+ case BuiltinType::UInt:
+ VRef.setUInt(va_arg(args, unsigned));
+ break;
+ case BuiltinType::Long:
+ VRef.setLong(va_arg(args, long));
+ break;
+ case BuiltinType::ULong:
+ VRef.setULong(va_arg(args, unsigned long));
+ break;
+ case BuiltinType::LongLong:
+ VRef.setLongLong(va_arg(args, long long));
+ break;
+ case BuiltinType::ULongLong:
+ VRef.setULongLong(va_arg(args, unsigned long long));
+ break;
+ // Types shorter than double are resolved as double, else va_arg has UB.
+ case BuiltinType::Float:
+ VRef.setFloat(va_arg(args, double));
+ break;
+ case BuiltinType::Double:
+ VRef.setDouble(va_arg(args, double));
+ break;
+ case BuiltinType::LongDouble:
+ VRef.setLongDouble(va_arg(args, long double));
+ break;
+ // See REPL_BUILTIN_TYPES.
+ }
}
-}
-
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
- unsigned long long Val) {
- Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
- SetValueDataBasedOnQualType(VRef, Val);
-}
-
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
- float Val) {
- Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
- VRef.setFloat(Val);
-}
-
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
- double Val) {
- Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
- VRef.setDouble(Val);
-}
-
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
- long double Val) {
- Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
- VRef.setLongDouble(Val);
+ va_end(args);
}
// A trampoline to work around the fact that operator placement new cannot
diff --git a/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp b/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp
index c19cf6aa3156..45f6322b8461 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp
@@ -72,7 +72,7 @@ NamedDecl *LookupNamed(Sema &S, llvm::StringRef Name,
const DeclContext *Within) {
DeclarationName DName = &S.Context.Idents.get(Name);
LookupResult R(S, DName, SourceLocation(), Sema::LookupOrdinaryName,
- Sema::ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
R.suppressDiagnostics();
diff --git a/contrib/llvm-project/clang/lib/Interpreter/Value.cpp b/contrib/llvm-project/clang/lib/Interpreter/Value.cpp
index 1d6b2da087e9..eb2ce9c9fd33 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/Value.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/Value.cpp
@@ -1,4 +1,4 @@
-//===--- Interpreter.h - Incremental Compiation and Execution---*- C++ -*-===//
+//===------------ Value.cpp - Definition of interpreter value -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -22,8 +22,6 @@
#include <cstdint>
#include <utility>
-using namespace clang;
-
namespace {
// This is internal buffer maintained by Value, used to hold temporaries.
@@ -61,7 +59,7 @@ public:
void Release() {
assert(RefCnt > 0 && "Can't release if reference count is already zero");
if (--RefCnt == 0) {
- // We hace a non-trivial dtor.
+ // We have a non-trivial dtor.
if (Dtor && IsAlive()) {
assert(Elements && "We at least should have 1 element in Value");
size_t Stride = AllocSize / Elements;
@@ -97,6 +95,8 @@ private:
};
} // namespace
+namespace clang {
+
static Value::Kind ConvertQualTypeToKind(const ASTContext &Ctx, QualType QT) {
if (Ctx.hasSameType(QT, Ctx.VoidTy))
return Value::K_Void;
@@ -265,3 +265,5 @@ void Value::print(llvm::raw_ostream &Out) const {
assert(OpaqueType != nullptr && "Can't print default Value");
Out << "Not implement yet.\n";
}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/Wasm.cpp b/contrib/llvm-project/clang/lib/Interpreter/Wasm.cpp
new file mode 100644
index 000000000000..aa10b160ccf8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/Wasm.cpp
@@ -0,0 +1,149 @@
+//===----------------- Wasm.cpp - Wasm Interpreter --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements interpreter support for code execution in WebAssembly.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Wasm.h"
+#include "IncrementalExecutor.h"
+
+#include <llvm/IR/LegacyPassManager.h>
+#include <llvm/IR/Module.h>
+#include <llvm/MC/TargetRegistry.h>
+#include <llvm/Target/TargetMachine.h>
+
+#include <clang/Interpreter/Interpreter.h>
+
+#include <string>
+
+namespace lld {
+enum Flavor {
+ Invalid,
+ Gnu, // -flavor gnu
+ MinGW, // -flavor gnu MinGW
+ WinLink, // -flavor link
+ Darwin, // -flavor darwin
+ Wasm, // -flavor wasm
+};
+
+using Driver = bool (*)(llvm::ArrayRef<const char *>, llvm::raw_ostream &,
+ llvm::raw_ostream &, bool, bool);
+
+struct DriverDef {
+ Flavor f;
+ Driver d;
+};
+
+struct Result {
+ int retCode;
+ bool canRunAgain;
+};
+
+Result lldMain(llvm::ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS, llvm::ArrayRef<DriverDef> drivers);
+
+namespace wasm {
+bool link(llvm::ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
+ llvm::raw_ostream &stderrOS, bool exitEarly, bool disableOutput);
+} // namespace wasm
+} // namespace lld
+
+#include <dlfcn.h>
+
+namespace clang {
+
+WasmIncrementalExecutor::WasmIncrementalExecutor(
+ llvm::orc::ThreadSafeContext &TSC)
+ : IncrementalExecutor(TSC) {}
+
+llvm::Error WasmIncrementalExecutor::addModule(PartialTranslationUnit &PTU) {
+ std::string ErrorString;
+
+ const llvm::Target *Target = llvm::TargetRegistry::lookupTarget(
+ PTU.TheModule->getTargetTriple(), ErrorString);
+ if (!Target) {
+ return llvm::make_error<llvm::StringError>("Failed to create Wasm Target: ",
+ llvm::inconvertibleErrorCode());
+ }
+
+ llvm::TargetOptions TO = llvm::TargetOptions();
+ llvm::TargetMachine *TargetMachine = Target->createTargetMachine(
+ PTU.TheModule->getTargetTriple(), "", "", TO, llvm::Reloc::Model::PIC_);
+ PTU.TheModule->setDataLayout(TargetMachine->createDataLayout());
+ std::string ObjectFileName = PTU.TheModule->getName().str() + ".o";
+ std::string BinaryFileName = PTU.TheModule->getName().str() + ".wasm";
+
+ std::error_code Error;
+ llvm::raw_fd_ostream ObjectFileOutput(llvm::StringRef(ObjectFileName), Error);
+
+ llvm::legacy::PassManager PM;
+ if (TargetMachine->addPassesToEmitFile(PM, ObjectFileOutput, nullptr,
+ llvm::CodeGenFileType::ObjectFile)) {
+ return llvm::make_error<llvm::StringError>(
+ "Wasm backend cannot produce object.", llvm::inconvertibleErrorCode());
+ }
+
+ if (!PM.run(*PTU.TheModule)) {
+
+ return llvm::make_error<llvm::StringError>("Failed to emit Wasm object.",
+ llvm::inconvertibleErrorCode());
+ }
+
+ ObjectFileOutput.close();
+
+ std::vector<const char *> LinkerArgs = {"wasm-ld",
+ "-shared",
+ "--import-memory",
+ "--experimental-pic",
+ "--stack-first",
+ "--allow-undefined",
+ ObjectFileName.c_str(),
+ "-o",
+ BinaryFileName.c_str()};
+
+ const lld::DriverDef WasmDriver = {lld::Flavor::Wasm, &lld::wasm::link};
+ std::vector<lld::DriverDef> WasmDriverArgs;
+ WasmDriverArgs.push_back(WasmDriver);
+ lld::Result Result =
+ lld::lldMain(LinkerArgs, llvm::outs(), llvm::errs(), WasmDriverArgs);
+
+ if (Result.retCode)
+ return llvm::make_error<llvm::StringError>(
+ "Failed to link incremental module", llvm::inconvertibleErrorCode());
+
+ void *LoadedLibModule =
+ dlopen(BinaryFileName.c_str(), RTLD_NOW | RTLD_GLOBAL);
+ if (LoadedLibModule == nullptr) {
+ llvm::errs() << dlerror() << '\n';
+ return llvm::make_error<llvm::StringError>(
+ "Failed to load incremental module", llvm::inconvertibleErrorCode());
+ }
+
+ return llvm::Error::success();
+}
+
+llvm::Error WasmIncrementalExecutor::removeModule(PartialTranslationUnit &PTU) {
+ return llvm::make_error<llvm::StringError>("Not implemented yet",
+ llvm::inconvertibleErrorCode());
+}
+
+llvm::Error WasmIncrementalExecutor::runCtors() const {
+ // This seems to be automatically done when using dlopen()
+ return llvm::Error::success();
+}
+
+llvm::Error WasmIncrementalExecutor::cleanUp() {
+ // Can't call cleanUp through IncrementalExecutor as it
+ // tries to deinitialize JIT which hasn't been initialized
+ return llvm::Error::success();
+}
+
+WasmIncrementalExecutor::~WasmIncrementalExecutor() = default;
+
+} // namespace clang \ No newline at end of file
diff --git a/contrib/llvm-project/clang/lib/Interpreter/Wasm.h b/contrib/llvm-project/clang/lib/Interpreter/Wasm.h
new file mode 100644
index 000000000000..4632613326d3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/Wasm.h
@@ -0,0 +1,38 @@
+//===------------------ Wasm.h - Wasm Interpreter ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements interpreter support for code execution in WebAssembly.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_INTERPRETER_WASM_H
+#define LLVM_CLANG_LIB_INTERPRETER_WASM_H
+
+#ifndef __EMSCRIPTEN__
+#error "This requires emscripten."
+#endif // __EMSCRIPTEN__
+
+#include "IncrementalExecutor.h"
+
+namespace clang {
+
+class WasmIncrementalExecutor : public IncrementalExecutor {
+public:
+ WasmIncrementalExecutor(llvm::orc::ThreadSafeContext &TSC);
+
+ llvm::Error addModule(PartialTranslationUnit &PTU) override;
+ llvm::Error removeModule(PartialTranslationUnit &PTU) override;
+ llvm::Error runCtors() const override;
+ llvm::Error cleanUp() override;
+
+ ~WasmIncrementalExecutor() override;
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_INTERPRETER_WASM_H
diff --git a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp
index 980f865cf24c..31a4c0f52b46 100644
--- a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp
@@ -73,8 +73,8 @@ struct Scanner {
// Set the lexer to use 'tok::at' for '@', instead of 'tok::unknown'.
LangOpts.ObjC = true;
LangOpts.LineComment = true;
- // FIXME: we do not enable C11 or C++11, so we are missing u/u8/U"" and
- // R"()" literals.
+ LangOpts.RawStringLiterals = true;
+ // FIXME: we do not enable C11 or C++11, so we are missing u/u8/U"".
return LangOpts;
}
@@ -88,8 +88,8 @@ private:
[[nodiscard]] dependency_directives_scan::Token &
lexToken(const char *&First, const char *const End);
- dependency_directives_scan::Token &lexIncludeFilename(const char *&First,
- const char *const End);
+ [[nodiscard]] dependency_directives_scan::Token &
+ lexIncludeFilename(const char *&First, const char *const End);
void skipLine(const char *&First, const char *const End);
void skipDirective(StringRef Name, const char *&First, const char *const End);
@@ -369,7 +369,7 @@ static void skipBlockComment(const char *&First, const char *const End) {
}
}
-/// \returns True if the current single quotation mark character is a C++ 14
+/// \returns True if the current single quotation mark character is a C++14
/// digit separator.
static bool isQuoteCppDigitSeparator(const char *const Start,
const char *const Cur,
@@ -544,7 +544,7 @@ Scanner::lexIncludeFilename(const char *&First, const char *const End) {
void Scanner::lexPPDirectiveBody(const char *&First, const char *const End) {
while (true) {
const dependency_directives_scan::Token &Tok = lexToken(First, End);
- if (Tok.is(tok::eod))
+ if (Tok.is(tok::eod) || Tok.is(tok::eof))
break;
}
}
@@ -660,7 +660,18 @@ bool Scanner::lexModule(const char *&First, const char *const End) {
// an import.
switch (*First) {
- case ':':
+ case ':': {
+ // `module :` is never the start of a valid module declaration.
+ if (Id == "module") {
+ skipLine(First, End);
+ return false;
+ }
+ // `import:(type)name` is a valid ObjC method decl, so check one more token.
+ (void)lexToken(First, End);
+ if (!tryLexIdentifierOrSkipLine(First, End))
+ return false;
+ break;
+ }
case '<':
case '"':
break;
@@ -901,7 +912,11 @@ bool Scanner::lexPPLine(const char *&First, const char *const End) {
case pp___include_macros:
case pp_include_next:
case pp_import:
- lexIncludeFilename(First, End);
+ // Ignore missing filenames in include or import directives.
+ if (lexIncludeFilename(First, End).is(tok::eod)) {
+ skipDirective(Id, First, End);
+ return true;
+ }
break;
default:
break;
diff --git a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
index dfa974e9a67e..c3b3064cfbf2 100644
--- a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
@@ -25,11 +25,11 @@
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Capacity.h"
#include "llvm/Support/Errc.h"
@@ -37,6 +37,7 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/Support/xxhash.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -60,20 +61,21 @@ ALWAYS_ENABLED_STATISTIC(NumSubFrameworkLookups,
const IdentifierInfo *
HeaderFileInfo::getControllingMacro(ExternalPreprocessorSource *External) {
- if (ControllingMacro) {
- if (ControllingMacro->isOutOfDate()) {
- assert(External && "We must have an external source if we have a "
- "controlling macro that is out of date.");
- External->updateOutOfDateIdentifier(
- *const_cast<IdentifierInfo *>(ControllingMacro));
- }
- return ControllingMacro;
- }
+ if (LazyControllingMacro.isID()) {
+ if (!External)
+ return nullptr;
- if (!ControllingMacroID || !External)
- return nullptr;
+ LazyControllingMacro =
+ External->GetIdentifier(LazyControllingMacro.getID());
+ return LazyControllingMacro.getPtr();
+ }
- ControllingMacro = External->GetIdentifier(ControllingMacroID);
+ IdentifierInfo *ControllingMacro = LazyControllingMacro.getPtr();
+ if (ControllingMacro && ControllingMacro->isOutOfDate()) {
+ assert(External && "We must have an external source if we have a "
+ "controlling macro that is out of date.");
+ External->updateOutOfDateIdentifier(*ControllingMacro);
+ }
return ControllingMacro;
}
@@ -141,6 +143,28 @@ std::vector<bool> HeaderSearch::computeUserEntryUsage() const {
return UserEntryUsage;
}
+std::vector<bool> HeaderSearch::collectVFSUsageAndClear() const {
+ std::vector<bool> VFSUsage;
+ if (!getHeaderSearchOpts().ModulesIncludeVFSUsage)
+ return VFSUsage;
+
+ llvm::vfs::FileSystem &RootFS = FileMgr.getVirtualFileSystem();
+ // TODO: This only works if the `RedirectingFileSystem`s were all created by
+ // `createVFSFromOverlayFiles`.
+ RootFS.visit([&](llvm::vfs::FileSystem &FS) {
+ if (auto *RFS = dyn_cast<llvm::vfs::RedirectingFileSystem>(&FS)) {
+ VFSUsage.push_back(RFS->hasBeenUsed());
+ RFS->clearHasBeenUsed();
+ }
+ });
+ assert(VFSUsage.size() == getHeaderSearchOpts().VFSOverlayFiles.size() &&
+ "A different number of RedirectingFileSystem's were present than "
+ "-ivfsoverlay options passed to Clang!");
+ // VFS visit order is the opposite of VFSOverlayFiles order.
+ std::reverse(VFSUsage.begin(), VFSUsage.end());
+ return VFSUsage;
+}
+
/// CreateHeaderMap - This method returns a HeaderMap for the specified
/// FileEntry, uniquing them through the 'HeaderMaps' datastructure.
const HeaderMap *HeaderSearch::CreateHeaderMap(FileEntryRef FE) {
@@ -259,10 +283,10 @@ std::string HeaderSearch::getCachedModuleFileNameImpl(StringRef ModuleName,
if (getModuleMap().canonicalizeModuleMapPath(CanonicalPath))
return {};
- llvm::hash_code Hash = llvm::hash_combine(CanonicalPath.str().lower());
+ auto Hash = llvm::xxh3_64bits(CanonicalPath.str().lower());
SmallString<128> HashStr;
- llvm::APInt(64, size_t(Hash)).toStringUnsigned(HashStr, /*Radix*/36);
+ llvm::APInt(64, Hash).toStringUnsigned(HashStr, /*Radix*/36);
llvm::sys::path::append(Result, ModuleName + "-" + HashStr + ".pcm");
}
return Result.str().str();
@@ -925,9 +949,13 @@ OptionalFileEntryRef HeaderSearch::LookupFile(
// If we have no includer, that means we're processing a #include
// from a module build. We should treat this as a system header if we're
// building a [system] module.
- bool IncluderIsSystemHeader =
- Includer ? getFileInfo(*Includer).DirInfo != SrcMgr::C_User :
- BuildSystemModule;
+ bool IncluderIsSystemHeader = [&]() {
+ if (!Includer)
+ return BuildSystemModule;
+ const HeaderFileInfo *HFI = getExistingFileInfo(*Includer);
+ assert(HFI && "includer without file info");
+ return HFI->DirInfo != SrcMgr::C_User;
+ }();
if (OptionalFileEntryRef FE = getFileAndSuggestModule(
TmpDir, IncludeLoc, IncluderAndDir.second, IncluderIsSystemHeader,
RequestingModule, SuggestedModule)) {
@@ -942,10 +970,11 @@ OptionalFileEntryRef HeaderSearch::LookupFile(
// Note that we only use one of FromHFI/ToHFI at once, due to potential
// reallocation of the underlying vector potentially making the first
// reference binding dangling.
- HeaderFileInfo &FromHFI = getFileInfo(*Includer);
- unsigned DirInfo = FromHFI.DirInfo;
- bool IndexHeaderMapHeader = FromHFI.IndexHeaderMapHeader;
- StringRef Framework = FromHFI.Framework;
+ const HeaderFileInfo *FromHFI = getExistingFileInfo(*Includer);
+ assert(FromHFI && "includer without file info");
+ unsigned DirInfo = FromHFI->DirInfo;
+ bool IndexHeaderMapHeader = FromHFI->IndexHeaderMapHeader;
+ StringRef Framework = FromHFI->Framework;
HeaderFileInfo &ToHFI = getFileInfo(*FE);
ToHFI.DirInfo = DirInfo;
@@ -1132,10 +1161,12 @@ OptionalFileEntryRef HeaderSearch::LookupFile(
// "Foo" is the name of the framework in which the including header was found.
if (!Includers.empty() && Includers.front().first && !isAngled &&
!Filename.contains('/')) {
- HeaderFileInfo &IncludingHFI = getFileInfo(*Includers.front().first);
- if (IncludingHFI.IndexHeaderMapHeader) {
+ const HeaderFileInfo *IncludingHFI =
+ getExistingFileInfo(*Includers.front().first);
+ assert(IncludingHFI && "includer without file info");
+ if (IncludingHFI->IndexHeaderMapHeader) {
SmallString<128> ScratchFilename;
- ScratchFilename += IncludingHFI.Framework;
+ ScratchFilename += IncludingHFI->Framework;
ScratchFilename += '/';
ScratchFilename += Filename;
@@ -1265,11 +1296,11 @@ OptionalFileEntryRef HeaderSearch::LookupSubframeworkHeader(
}
// This file is a system header or C++ unfriendly if the old file is.
- //
- // Note that the temporary 'DirInfo' is required here, as either call to
- // getFileInfo could resize the vector and we don't want to rely on order
- // of evaluation.
- unsigned DirInfo = getFileInfo(ContextFileEnt).DirInfo;
+ const HeaderFileInfo *ContextHFI = getExistingFileInfo(ContextFileEnt);
+ assert(ContextHFI && "context file without file info");
+ // Note that the temporary 'DirInfo' is required here, as the call to
+ // getFileInfo could resize the vector and might invalidate 'ContextHFI'.
+ unsigned DirInfo = ContextHFI->DirInfo;
getFileInfo(*File).DirInfo = DirInfo;
FrameworkName.pop_back(); // remove the trailing '/'
@@ -1285,6 +1316,30 @@ OptionalFileEntryRef HeaderSearch::LookupSubframeworkHeader(
// File Info Management.
//===----------------------------------------------------------------------===//
+static bool moduleMembershipNeedsMerge(const HeaderFileInfo *HFI,
+ ModuleMap::ModuleHeaderRole Role) {
+ if (ModuleMap::isModular(Role))
+ return !HFI->isModuleHeader || HFI->isTextualModuleHeader;
+ if (!HFI->isModuleHeader && (Role & ModuleMap::TextualHeader))
+ return !HFI->isTextualModuleHeader;
+ return false;
+}
+
+static void mergeHeaderFileInfoModuleBits(HeaderFileInfo &HFI,
+ bool isModuleHeader,
+ bool isTextualModuleHeader) {
+ HFI.isModuleHeader |= isModuleHeader;
+ if (HFI.isModuleHeader)
+ HFI.isTextualModuleHeader = false;
+ else
+ HFI.isTextualModuleHeader |= isTextualModuleHeader;
+}
+
+void HeaderFileInfo::mergeModuleMembership(ModuleMap::ModuleHeaderRole Role) {
+ mergeHeaderFileInfoModuleBits(*this, ModuleMap::isModular(Role),
+ (Role & ModuleMap::TextualHeader));
+}
+
/// Merge the header file info provided by \p OtherHFI into the current
/// header file info (\p HFI)
static void mergeHeaderFileInfo(HeaderFileInfo &HFI,
@@ -1293,12 +1348,11 @@ static void mergeHeaderFileInfo(HeaderFileInfo &HFI,
HFI.isImport |= OtherHFI.isImport;
HFI.isPragmaOnce |= OtherHFI.isPragmaOnce;
- HFI.isModuleHeader |= OtherHFI.isModuleHeader;
+ mergeHeaderFileInfoModuleBits(HFI, OtherHFI.isModuleHeader,
+ OtherHFI.isTextualModuleHeader);
- if (!HFI.ControllingMacro && !HFI.ControllingMacroID) {
- HFI.ControllingMacro = OtherHFI.ControllingMacro;
- HFI.ControllingMacroID = OtherHFI.ControllingMacroID;
- }
+ if (!HFI.LazyControllingMacro.isValid())
+ HFI.LazyControllingMacro = OtherHFI.LazyControllingMacro;
HFI.DirInfo = OtherHFI.DirInfo;
HFI.External = (!HFI.IsValid || HFI.External);
@@ -1309,8 +1363,6 @@ static void mergeHeaderFileInfo(HeaderFileInfo &HFI,
HFI.Framework = OtherHFI.Framework;
}
-/// getFileInfo - Return the HeaderFileInfo structure for the specified
-/// FileEntry.
HeaderFileInfo &HeaderSearch::getFileInfo(FileEntryRef FE) {
if (FE.getUID() >= FileInfo.size())
FileInfo.resize(FE.getUID() + 1);
@@ -1327,27 +1379,20 @@ HeaderFileInfo &HeaderSearch::getFileInfo(FileEntryRef FE) {
}
HFI->IsValid = true;
- // We have local information about this header file, so it's no longer
- // strictly external.
+ // We assume the caller has local information about this header file, so it's
+ // no longer strictly external.
HFI->External = false;
return *HFI;
}
-const HeaderFileInfo *
-HeaderSearch::getExistingFileInfo(FileEntryRef FE, bool WantExternal) const {
- // If we have an external source, ensure we have the latest information.
- // FIXME: Use a generation count to check whether this is really up to date.
+const HeaderFileInfo *HeaderSearch::getExistingFileInfo(FileEntryRef FE) const {
HeaderFileInfo *HFI;
if (ExternalSource) {
- if (FE.getUID() >= FileInfo.size()) {
- if (!WantExternal)
- return nullptr;
+ if (FE.getUID() >= FileInfo.size())
FileInfo.resize(FE.getUID() + 1);
- }
HFI = &FileInfo[FE.getUID()];
- if (!WantExternal && (!HFI->IsValid || HFI->External))
- return nullptr;
+ // FIXME: Use a generation count to check whether this is really up to date.
if (!HFI->Resolved) {
auto ExternalHFI = ExternalSource->GetHeaderFileInfo(FE);
if (ExternalHFI.IsValid) {
@@ -1356,16 +1401,25 @@ HeaderSearch::getExistingFileInfo(FileEntryRef FE, bool WantExternal) const {
mergeHeaderFileInfo(*HFI, ExternalHFI);
}
}
- } else if (FE.getUID() >= FileInfo.size()) {
- return nullptr;
- } else {
+ } else if (FE.getUID() < FileInfo.size()) {
HFI = &FileInfo[FE.getUID()];
+ } else {
+ HFI = nullptr;
}
- if (!HFI->IsValid || (HFI->External && !WantExternal))
- return nullptr;
+ return (HFI && HFI->IsValid) ? HFI : nullptr;
+}
- return HFI;
+const HeaderFileInfo *
+HeaderSearch::getExistingLocalFileInfo(FileEntryRef FE) const {
+ HeaderFileInfo *HFI;
+ if (FE.getUID() < FileInfo.size()) {
+ HFI = &FileInfo[FE.getUID()];
+ } else {
+ HFI = nullptr;
+ }
+
+ return (HFI && HFI->IsValid && !HFI->External) ? HFI : nullptr;
}
bool HeaderSearch::isFileMultipleIncludeGuarded(FileEntryRef File) const {
@@ -1373,27 +1427,24 @@ bool HeaderSearch::isFileMultipleIncludeGuarded(FileEntryRef File) const {
// once. Note that we dor't check for #import, because that's not a property
// of the file itself.
if (auto *HFI = getExistingFileInfo(File))
- return HFI->isPragmaOnce || HFI->ControllingMacro ||
- HFI->ControllingMacroID;
+ return HFI->isPragmaOnce || HFI->LazyControllingMacro.isValid();
return false;
}
void HeaderSearch::MarkFileModuleHeader(FileEntryRef FE,
ModuleMap::ModuleHeaderRole Role,
bool isCompilingModuleHeader) {
- bool isModularHeader = ModuleMap::isModular(Role);
-
// Don't mark the file info as non-external if there's nothing to change.
if (!isCompilingModuleHeader) {
- if (!isModularHeader)
+ if ((Role & ModuleMap::ExcludedHeader))
return;
auto *HFI = getExistingFileInfo(FE);
- if (HFI && HFI->isModuleHeader)
+ if (HFI && !moduleMembershipNeedsMerge(HFI, Role))
return;
}
auto &HFI = getFileInfo(FE);
- HFI.isModuleHeader |= isModularHeader;
+ HFI.mergeModuleMembership(Role);
HFI.isCompilingModuleHeader |= isCompilingModuleHeader;
}
@@ -1401,74 +1452,128 @@ bool HeaderSearch::ShouldEnterIncludeFile(Preprocessor &PP,
FileEntryRef File, bool isImport,
bool ModulesEnabled, Module *M,
bool &IsFirstIncludeOfFile) {
- ++NumIncluded; // Count # of attempted #includes.
-
+ // An include file should be entered if either:
+ // 1. This is the first include of the file.
+ // 2. This file can be included multiple times, that is it's not an
+ // "include-once" file.
+ //
+ // Include-once is controlled by these preprocessor directives.
+ //
+ // #pragma once
+ // This directive is in the include file, and marks it as an include-once
+ // file.
+ //
+ // #import <file>
+ // This directive is in the includer, and indicates that the include file
+ // should only be entered if this is the first include.
+ ++NumIncluded;
IsFirstIncludeOfFile = false;
-
- // Get information about this file.
HeaderFileInfo &FileInfo = getFileInfo(File);
- // If this is a #import directive, check that we have not already imported
- // this header.
- if (isImport) {
- // If this has already been imported, don't import it again.
- FileInfo.isImport = true;
+ auto MaybeReenterImportedFile = [&]() -> bool {
+ // Modules add a wrinkle though: what's included isn't necessarily visible.
+ // Consider this module.
+ // module Example {
+ // module A { header "a.h" export * }
+ // module B { header "b.h" export * }
+ // }
+ // b.h includes c.h. The main file includes a.h, which will trigger a module
+ // build of Example, and c.h will be included. However, c.h isn't visible to
+ // the main file. Normally this is fine, the main file can just include c.h
+ // if it needs it. If c.h is in a module, the include will translate into a
+ // module import, this function will be skipped, and everything will work as
+ // expected. However, if c.h is not in a module (or is `textual`), then this
+ // function will run. If c.h is include-once, it will not be entered from
+ // the main file and it will still not be visible.
+
+ // If modules aren't enabled then there's no visibility issue. Always
+ // respect `#pragma once`.
+ if (!ModulesEnabled || FileInfo.isPragmaOnce)
+ return false;
- // FIXME: this is a workaround for the lack of proper modules-aware support
- // for #import / #pragma once
- auto TryEnterImported = [&]() -> bool {
- if (!ModulesEnabled)
- return false;
- // Ensure FileInfo bits are up to date.
- ModMap.resolveHeaderDirectives(File);
- // Modules with builtins are special; multiple modules use builtins as
- // modular headers, example:
- //
- // module stddef { header "stddef.h" export * }
- //
- // After module map parsing, this expands to:
- //
- // module stddef {
- // header "/path_to_builtin_dirs/stddef.h"
- // textual "stddef.h"
- // }
- //
- // It's common that libc++ and system modules will both define such
- // submodules. Make sure cached results for a builtin header won't
- // prevent other builtin modules from potentially entering the builtin
- // header. Note that builtins are header guarded and the decision to
- // actually enter them is postponed to the controlling macros logic below.
- bool TryEnterHdr = false;
- if (FileInfo.isCompilingModuleHeader && FileInfo.isModuleHeader)
- TryEnterHdr = ModMap.isBuiltinHeader(File);
-
- // Textual headers can be #imported from different modules. Since ObjC
- // headers find in the wild might rely only on #import and do not contain
- // controlling macros, be conservative and only try to enter textual
- // headers if such macro is present.
- if (!FileInfo.isModuleHeader &&
- FileInfo.getControllingMacro(ExternalLookup))
- TryEnterHdr = true;
- return TryEnterHdr;
- };
+ // Ensure FileInfo bits are up to date.
+ ModMap.resolveHeaderDirectives(File);
+
+ // This brings up a subtlety of #import - it's not a very good indicator of
+ // include-once. Developers are often unaware of the difference between
+ // #include and #import, and tend to use one or the other indiscrimiately.
+ // In order to support #include on include-once headers that lack macro
+ // guards and `#pragma once` (which is the vast majority of Objective-C
+ // headers), if a file is ever included with #import, it's marked as
+ // isImport in the HeaderFileInfo and treated as include-once. This allows
+ // #include to work in Objective-C.
+ // #include <Foundation/Foundation.h>
+ // #include <Foundation/NSString.h>
+ // Foundation.h has an #import of NSString.h, and so the second #include is
+ // skipped even though NSString.h has no `#pragma once` and no macro guard.
+ //
+ // However, this helpfulness causes problems with modules. If c.h is not an
+ // include-once file, but something included it with #import anyway (as is
+ // typical in Objective-C code), this include will be skipped and c.h will
+ // not be visible. Consider it not include-once if it is a `textual` header
+ // in a module.
+ if (FileInfo.isTextualModuleHeader)
+ return true;
- // Has this already been #import'ed or #include'd?
- if (PP.alreadyIncluded(File) && !TryEnterImported())
+ if (FileInfo.isCompilingModuleHeader) {
+ // It's safer to re-enter a file whose module is being built because its
+ // declarations will still be scoped to a single module.
+ if (FileInfo.isModuleHeader) {
+ // Headers marked as "builtin" are covered by the system module maps
+ // rather than the builtin ones. Some versions of the Darwin module fail
+ // to mark stdarg.h and stddef.h as textual. Attempt to re-enter these
+ // files while building their module to allow them to function properly.
+ if (ModMap.isBuiltinHeader(File))
+ return true;
+ } else {
+ // Files that are excluded from their module can potentially be
+ // re-entered from their own module. This might cause redeclaration
+ // errors if another module saw this file first, but there's a
+ // reasonable chance that its module will build first. However if
+ // there's no controlling macro, then trust the #import and assume this
+ // really is an include-once file.
+ if (FileInfo.getControllingMacro(ExternalLookup))
+ return true;
+ }
+ }
+ // If the include file has a macro guard, then it might still not be
+ // re-entered if the controlling macro is visibly defined. e.g. another
+ // header in the module being built included this file and local submodule
+ // visibility is not enabled.
+
+ // It might be tempting to re-enter the include-once file if it's not
+ // visible in an attempt to make it visible. However this will still cause
+ // redeclaration errors against the known-but-not-visible declarations. The
+ // include file not being visible will most likely cause "undefined x"
+ // errors, but at least there's a slim chance of compilation succeeding.
+ return false;
+ };
+
+ if (isImport) {
+ // As discussed above, record that this file was ever `#import`ed, and treat
+ // it as an include-once file from here out.
+ FileInfo.isImport = true;
+ if (PP.alreadyIncluded(File) && !MaybeReenterImportedFile())
return false;
} else {
- // Otherwise, if this is a #include of a file that was previously #import'd
- // or if this is the second #include of a #pragma once file, ignore it.
- if (FileInfo.isPragmaOnce || FileInfo.isImport)
+ // isPragmaOnce and isImport are only set after the file has been included
+ // at least once. If either are set then this is a repeat #include of an
+ // include-once file.
+ if (FileInfo.isPragmaOnce ||
+ (FileInfo.isImport && !MaybeReenterImportedFile()))
return false;
}
- // Next, check to see if the file is wrapped with #ifndef guards. If so, and
- // if the macro that guards it is defined, we know the #include has no effect.
- if (const IdentifierInfo *ControllingMacro
- = FileInfo.getControllingMacro(ExternalLookup)) {
+ // As a final optimization, check for a macro guard and skip entering the file
+ // if the controlling macro is defined. The macro guard will effectively erase
+ // the file's contents, and the include would have no effect other than to
+ // waste time opening and reading a file.
+ if (const IdentifierInfo *ControllingMacro =
+ FileInfo.getControllingMacro(ExternalLookup)) {
// If the header corresponds to a module, check whether the macro is already
- // defined in that module rather than checking in the current set of visible
- // modules.
+ // defined in that module rather than checking all visible modules. This is
+ // mainly to cover corner cases where the same controlling macro is used in
+ // different files in multiple modules.
if (M ? PP.isMacroDefinedInLocalModule(ControllingMacro, M)
: PP.isMacroDefined(ControllingMacro)) {
++NumMultiIncludeFileOptzn;
@@ -1476,8 +1581,8 @@ bool HeaderSearch::ShouldEnterIncludeFile(Preprocessor &PP,
}
}
+ FileInfo.IsLocallyIncluded = true;
IsFirstIncludeOfFile = PP.markIncluded(File);
-
return true;
}
@@ -1941,6 +2046,8 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
using namespace llvm::sys;
llvm::SmallString<32> FilePath = File;
+ if (!WorkingDir.empty() && !path::is_absolute(FilePath))
+ fs::make_absolute(WorkingDir, FilePath);
// remove_dots switches to backslashes on windows as a side-effect!
// We always want to suggest forward slashes for includes.
// (not remove_dots(..., posix) as that misparses windows paths).
diff --git a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
index 50b56265f6e1..ef1e1f4bd9ae 100644
--- a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
@@ -74,6 +74,51 @@ tok::ObjCKeywordKind Token::getObjCKeywordID() const {
return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
}
+/// Determine whether the token kind starts a simple-type-specifier.
+bool Token::isSimpleTypeSpecifier(const LangOptions &LangOpts) const {
+ switch (getKind()) {
+ case tok::annot_typename:
+ case tok::annot_decltype:
+ case tok::annot_pack_indexing_type:
+ return true;
+
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_int:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw___bf16:
+ case tok::kw__Float16:
+ case tok::kw___float128:
+ case tok::kw___ibm128:
+ case tok::kw_wchar_t:
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ case tok::kw__Accum:
+ case tok::kw__Fract:
+ case tok::kw__Sat:
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
+ case tok::kw___auto_type:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_typeof:
+ case tok::kw_decltype:
+ case tok::kw_char8_t:
+ return getIdentifierInfo()->isKeyword(LangOpts);
+
+ default:
+ return false;
+ }
+}
+
//===----------------------------------------------------------------------===//
// Lexer Class Implementation
//===----------------------------------------------------------------------===//
@@ -2216,8 +2261,17 @@ bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
unsigned PrefixLen = 0;
- while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen]))
+ while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen])) {
+ if (!isLexingRawMode() &&
+ llvm::is_contained({'$', '@', '`'}, CurPtr[PrefixLen])) {
+ const char *Pos = &CurPtr[PrefixLen];
+ Diag(Pos, LangOpts.CPlusPlus26
+ ? diag::warn_cxx26_compat_raw_string_literal_character_set
+ : diag::ext_cxx26_raw_string_literal_character_set)
+ << StringRef(Pos, 1);
+ }
++PrefixLen;
+ }
// If the last character was not a '(', then we didn't lex a valid delimiter.
if (CurPtr[PrefixLen] != '(') {
@@ -2225,6 +2279,8 @@ bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
const char *PrefixEnd = &CurPtr[PrefixLen];
if (PrefixLen == 16) {
Diag(PrefixEnd, diag::err_raw_delim_too_long);
+ } else if (*PrefixEnd == '\n') {
+ Diag(PrefixEnd, diag::err_invalid_newline_raw_delim);
} else {
Diag(PrefixEnd, diag::err_invalid_char_raw_delim)
<< StringRef(PrefixEnd, 1);
@@ -3820,7 +3876,7 @@ LexStart:
tok::utf16_char_constant);
// UTF-16 raw string literal
- if (Char == 'R' && LangOpts.CPlusPlus11 &&
+ if (Char == 'R' && LangOpts.RawStringLiterals &&
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
return LexRawStringLiteral(Result,
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
@@ -3842,7 +3898,7 @@ LexStart:
SizeTmp2, Result),
tok::utf8_char_constant);
- if (Char2 == 'R' && LangOpts.CPlusPlus11) {
+ if (Char2 == 'R' && LangOpts.RawStringLiterals) {
unsigned SizeTmp3;
char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
// UTF-8 raw string literal
@@ -3878,7 +3934,7 @@ LexStart:
tok::utf32_char_constant);
// UTF-32 raw string literal
- if (Char == 'R' && LangOpts.CPlusPlus11 &&
+ if (Char == 'R' && LangOpts.RawStringLiterals &&
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
return LexRawStringLiteral(Result,
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
@@ -3893,7 +3949,7 @@ LexStart:
// Notify MIOpt that we read a non-whitespace/non-comment token.
MIOpt.ReadToken();
- if (LangOpts.CPlusPlus11) {
+ if (LangOpts.RawStringLiterals) {
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '"')
@@ -3916,7 +3972,7 @@ LexStart:
tok::wide_string_literal);
// Wide raw string literal.
- if (LangOpts.CPlusPlus11 && Char == 'R' &&
+ if (LangOpts.RawStringLiterals && Char == 'R' &&
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
return LexRawStringLiteral(Result,
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
diff --git a/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
index 0a78638f6805..9d2720af5dbd 100644
--- a/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
@@ -974,6 +974,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
bool isFixedPointConstant = isFixedPointLiteral();
bool isFPConstant = isFloatingLiteral();
bool HasSize = false;
+ bool DoubleUnderscore = false;
// Loop over all of the characters of the suffix. If we see something bad,
// we break out of the loop.
@@ -1117,6 +1118,32 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
if (isImaginary) break; // Cannot be repeated.
isImaginary = true;
continue; // Success.
+ case '_':
+ if (isFPConstant)
+ break; // Invalid for floats
+ if (HasSize)
+ break;
+ // There is currently no way to reach this with DoubleUnderscore set.
+ // If new double underscope literals are added handle it here as above.
+ assert(!DoubleUnderscore && "unhandled double underscore case");
+ if (LangOpts.CPlusPlus && s + 2 < ThisTokEnd &&
+ s[1] == '_') { // s + 2 < ThisTokEnd to ensure some character exists
+ // after __
+ DoubleUnderscore = true;
+ s += 2; // Skip both '_'
+ if (s + 1 < ThisTokEnd &&
+ (*s == 'u' || *s == 'U')) { // Ensure some character after 'u'/'U'
+ isUnsigned = true;
+ ++s;
+ }
+ if (s + 1 < ThisTokEnd &&
+ ((*s == 'w' && *(++s) == 'b') || (*s == 'W' && *(++s) == 'B'))) {
+ isBitInt = true;
+ HasSize = true;
+ continue;
+ }
+ }
+ break;
case 'w':
case 'W':
if (isFPConstant)
@@ -1127,9 +1154,9 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
// wb and WB are allowed, but a mixture of cases like Wb or wB is not. We
// explicitly do not support the suffix in C++ as an extension because a
// library-based UDL that resolves to a library type may be more
- // appropriate there.
- if (!LangOpts.CPlusPlus && ((s[0] == 'w' && s[1] == 'b') ||
- (s[0] == 'W' && s[1] == 'B'))) {
+ // appropriate there. The same rules apply for __wb/__WB.
+ if ((!LangOpts.CPlusPlus || DoubleUnderscore) && s + 1 < ThisTokEnd &&
+ ((s[0] == 'w' && s[1] == 'b') || (s[0] == 'W' && s[1] == 'B'))) {
isBitInt = true;
HasSize = true;
++s; // Skip both characters (2nd char skipped on continue).
@@ -1241,7 +1268,9 @@ bool NumericLiteralParser::isValidUDSuffix(const LangOptions &LangOpts,
return false;
// By C++11 [lex.ext]p10, ud-suffixes starting with an '_' are always valid.
- if (Suffix[0] == '_')
+ // Suffixes starting with '__' (double underscore) are for use by
+ // the implementation.
+ if (Suffix.starts_with("_") && !Suffix.starts_with("__"))
return true;
// In C++11, there are no library suffixes.
@@ -1358,11 +1387,17 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
// Handle simple binary numbers 0b01010
if ((c1 == 'b' || c1 == 'B') && (s[1] == '0' || s[1] == '1')) {
- // 0b101010 is a C++1y / GCC extension.
- Diags.Report(TokLoc, LangOpts.CPlusPlus14
- ? diag::warn_cxx11_compat_binary_literal
- : LangOpts.CPlusPlus ? diag::ext_binary_literal_cxx14
- : diag::ext_binary_literal);
+ // 0b101010 is a C++14 and C23 extension.
+ unsigned DiagId;
+ if (LangOpts.CPlusPlus14)
+ DiagId = diag::warn_cxx11_compat_binary_literal;
+ else if (LangOpts.C23)
+ DiagId = diag::warn_c23_compat_binary_literal;
+ else if (LangOpts.CPlusPlus)
+ DiagId = diag::ext_binary_literal_cxx14;
+ else
+ DiagId = diag::ext_binary_literal;
+ Diags.Report(TokLoc, DiagId);
++s;
assert(s < ThisTokEnd && "didn't maximally munch?");
radix = 2;
@@ -1486,7 +1521,8 @@ bool NumericLiteralParser::GetIntegerValue(llvm::APInt &Val) {
}
llvm::APFloat::opStatus
-NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
+NumericLiteralParser::GetFloatValue(llvm::APFloat &Result,
+ llvm::RoundingMode RM) {
using llvm::APFloat;
unsigned n = std::min(SuffixBegin - ThisTokBegin, ThisTokEnd - ThisTokBegin);
@@ -1500,15 +1536,16 @@ NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
Str = Buffer;
}
- auto StatusOrErr =
- Result.convertFromString(Str, APFloat::rmNearestTiesToEven);
+ auto StatusOrErr = Result.convertFromString(Str, RM);
assert(StatusOrErr && "Invalid floating point representation");
return !errorToBool(StatusOrErr.takeError()) ? *StatusOrErr
: APFloat::opInvalidOp;
}
-static inline bool IsExponentPart(char c) {
- return c == 'p' || c == 'P' || c == 'e' || c == 'E';
+static inline bool IsExponentPart(char c, bool isHex) {
+ if (isHex)
+ return c == 'p' || c == 'P';
+ return c == 'e' || c == 'E';
}
bool NumericLiteralParser::GetFixedPointValue(llvm::APInt &StoreVal, unsigned Scale) {
@@ -1527,7 +1564,8 @@ bool NumericLiteralParser::GetFixedPointValue(llvm::APInt &StoreVal, unsigned Sc
if (saw_exponent) {
const char *Ptr = DigitsBegin;
- while (!IsExponentPart(*Ptr)) ++Ptr;
+ while (!IsExponentPart(*Ptr, radix == 16))
+ ++Ptr;
ExponentBegin = Ptr;
++Ptr;
NegativeExponent = *Ptr == '-';
diff --git a/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp b/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp
index 39bb0f44eff2..dfdf463665f3 100644
--- a/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp
@@ -257,7 +257,7 @@ LLVM_DUMP_METHOD void MacroDirective::dump() const {
}
ModuleMacro *ModuleMacro::create(Preprocessor &PP, Module *OwningModule,
- IdentifierInfo *II, MacroInfo *Macro,
+ const IdentifierInfo *II, MacroInfo *Macro,
ArrayRef<ModuleMacro *> Overrides) {
void *Mem = PP.getPreprocessorAllocator().Allocate(
sizeof(ModuleMacro) + sizeof(ModuleMacro *) * Overrides.size(),
diff --git a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
index 10c475f617d4..eed7eca2e735 100644
--- a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
@@ -648,8 +648,7 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(FileEntryRef File) {
UmbrellaModule = UmbrellaModule->Parent;
if (UmbrellaModule->InferSubmodules) {
- OptionalFileEntryRef UmbrellaModuleMap =
- getModuleMapFileForUniquing(UmbrellaModule);
+ FileID UmbrellaModuleMap = getModuleMapFileIDForUniquing(UmbrellaModule);
// Infer submodules for each of the directories we found between
// the directory of the umbrella header and the directory where
@@ -1021,7 +1020,7 @@ Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
// If the framework has a parent path from which we're allowed to infer
// a framework module, do so.
- OptionalFileEntryRef ModuleMapFile;
+ FileID ModuleMapFID;
if (!Parent) {
// Determine whether we're allowed to infer a module map.
bool canInfer = false;
@@ -1060,7 +1059,7 @@ Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
Attrs.IsExhaustive |= inferred->second.Attrs.IsExhaustive;
Attrs.NoUndeclaredIncludes |=
inferred->second.Attrs.NoUndeclaredIncludes;
- ModuleMapFile = inferred->second.ModuleMapFile;
+ ModuleMapFID = inferred->second.ModuleMapFID;
}
}
}
@@ -1069,7 +1068,7 @@ Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
if (!canInfer)
return nullptr;
} else {
- ModuleMapFile = getModuleMapFileForUniquing(Parent);
+ ModuleMapFID = getModuleMapFileIDForUniquing(Parent);
}
// Look for an umbrella header.
@@ -1086,7 +1085,7 @@ Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
Module *Result = new Module(ModuleName, SourceLocation(), Parent,
/*IsFramework=*/true, /*IsExplicit=*/false,
NumCreatedModules++);
- InferredModuleAllowedBy[Result] = ModuleMapFile;
+ InferredModuleAllowedBy[Result] = ModuleMapFID;
Result->IsInferred = true;
if (!Parent) {
if (LangOpts.CurrentModule == ModuleName)
@@ -1307,28 +1306,34 @@ void ModuleMap::addHeader(Module *Mod, Module::Header Header,
Cb->moduleMapAddHeader(Header.Entry.getName());
}
-OptionalFileEntryRef
-ModuleMap::getContainingModuleMapFile(const Module *Module) const {
+FileID ModuleMap::getContainingModuleMapFileID(const Module *Module) const {
if (Module->DefinitionLoc.isInvalid())
- return std::nullopt;
+ return {};
- return SourceMgr.getFileEntryRefForID(
- SourceMgr.getFileID(Module->DefinitionLoc));
+ return SourceMgr.getFileID(Module->DefinitionLoc);
}
OptionalFileEntryRef
-ModuleMap::getModuleMapFileForUniquing(const Module *M) const {
+ModuleMap::getContainingModuleMapFile(const Module *Module) const {
+ return SourceMgr.getFileEntryRefForID(getContainingModuleMapFileID(Module));
+}
+
+FileID ModuleMap::getModuleMapFileIDForUniquing(const Module *M) const {
if (M->IsInferred) {
assert(InferredModuleAllowedBy.count(M) && "missing inferred module map");
return InferredModuleAllowedBy.find(M)->second;
}
- return getContainingModuleMapFile(M);
+ return getContainingModuleMapFileID(M);
+}
+
+OptionalFileEntryRef
+ModuleMap::getModuleMapFileForUniquing(const Module *M) const {
+ return SourceMgr.getFileEntryRefForID(getModuleMapFileIDForUniquing(M));
}
-void ModuleMap::setInferredModuleAllowedBy(Module *M,
- OptionalFileEntryRef ModMap) {
+void ModuleMap::setInferredModuleAllowedBy(Module *M, FileID ModMapFID) {
assert(M->IsInferred && "module not inferred");
- InferredModuleAllowedBy[M] = ModMap;
+ InferredModuleAllowedBy[M] = ModMapFID;
}
std::error_code
@@ -1517,7 +1522,7 @@ namespace clang {
ModuleMap &Map;
/// The current module map file.
- FileEntryRef ModuleMapFile;
+ FileID ModuleMapFID;
/// Source location of most recent parsed module declaration
SourceLocation CurrModuleDeclLoc;
@@ -1585,13 +1590,12 @@ namespace clang {
bool parseOptionalAttributes(Attributes &Attrs);
public:
- explicit ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
- const TargetInfo *Target, DiagnosticsEngine &Diags,
- ModuleMap &Map, FileEntryRef ModuleMapFile,
- DirectoryEntryRef Directory, bool IsSystem)
+ ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
+ const TargetInfo *Target, DiagnosticsEngine &Diags,
+ ModuleMap &Map, FileID ModuleMapFID,
+ DirectoryEntryRef Directory, bool IsSystem)
: L(L), SourceMgr(SourceMgr), Target(Target), Diags(Diags), Map(Map),
- ModuleMapFile(ModuleMapFile), Directory(Directory),
- IsSystem(IsSystem) {
+ ModuleMapFID(ModuleMapFID), Directory(Directory), IsSystem(IsSystem) {
Tok.clear();
consumeToken();
}
@@ -2011,11 +2015,13 @@ void ModuleMapParser::parseModuleDecl() {
}
if (TopLevelModule &&
- ModuleMapFile != Map.getContainingModuleMapFile(TopLevelModule)) {
- assert(ModuleMapFile != Map.getModuleMapFileForUniquing(TopLevelModule) &&
+ ModuleMapFID != Map.getContainingModuleMapFileID(TopLevelModule)) {
+ assert(ModuleMapFID !=
+ Map.getModuleMapFileIDForUniquing(TopLevelModule) &&
"submodule defined in same file as 'module *' that allowed its "
"top-level module");
- Map.addAdditionalModuleMapFile(TopLevelModule, ModuleMapFile);
+ Map.addAdditionalModuleMapFile(
+ TopLevelModule, *SourceMgr.getFileEntryRefForID(ModuleMapFID));
}
}
@@ -2120,7 +2126,8 @@ void ModuleMapParser::parseModuleDecl() {
ActiveModule->NoUndeclaredIncludes = true;
ActiveModule->Directory = Directory;
- StringRef MapFileName(ModuleMapFile.getName());
+ StringRef MapFileName(
+ SourceMgr.getFileEntryRefForID(ModuleMapFID)->getName());
if (MapFileName.ends_with("module.private.modulemap") ||
MapFileName.ends_with("module_private.map")) {
ActiveModule->ModuleMapIsPrivate = true;
@@ -2906,7 +2913,7 @@ void ModuleMapParser::parseInferredModuleDecl(bool Framework, bool Explicit) {
// We'll be inferring framework modules for this directory.
Map.InferredDirectories[Directory].InferModules = true;
Map.InferredDirectories[Directory].Attrs = Attrs;
- Map.InferredDirectories[Directory].ModuleMapFile = ModuleMapFile;
+ Map.InferredDirectories[Directory].ModuleMapFID = ModuleMapFID;
// FIXME: Handle the 'framework' keyword.
}
@@ -3139,8 +3146,7 @@ bool ModuleMap::parseModuleMapFile(FileEntryRef File, bool IsSystem,
Buffer->getBufferStart() + (Offset ? *Offset : 0),
Buffer->getBufferEnd());
SourceLocation Start = L.getSourceLocation();
- ModuleMapParser Parser(L, SourceMgr, Target, Diags, *this, File, Dir,
- IsSystem);
+ ModuleMapParser Parser(L, SourceMgr, Target, Diags, *this, ID, Dir, IsSystem);
bool Result = Parser.parseModuleMapFile();
ParsedModuleMap[File] = Result;
diff --git a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
index a980f4bcbae1..4e77df9ec444 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
@@ -19,6 +19,7 @@
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/HeaderSearch.h"
@@ -39,6 +40,7 @@
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/AlignOf.h"
@@ -82,8 +84,7 @@ Preprocessor::AllocateVisibilityMacroDirective(SourceLocation Loc,
/// Read and discard all tokens remaining on the current line until
/// the tok::eod token is found.
-SourceRange Preprocessor::DiscardUntilEndOfDirective() {
- Token Tmp;
+SourceRange Preprocessor::DiscardUntilEndOfDirective(Token &Tmp) {
SourceRange Res;
LexUnexpandedToken(Tmp);
@@ -183,7 +184,7 @@ static MacroDiag shouldWarnOnMacroDef(Preprocessor &PP, IdentifierInfo *II) {
return isFeatureTestMacro(Text) ? MD_NoWarn : MD_ReservedMacro;
if (II->isKeyword(Lang))
return MD_KeywordDef;
- if (Lang.CPlusPlus11 && (Text.equals("override") || Text.equals("final")))
+ if (Lang.CPlusPlus11 && (Text == "override" || Text == "final"))
return MD_KeywordDef;
return MD_NoWarn;
}
@@ -545,7 +546,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
if (!*SkipRangePtr) {
*SkipRangePtr = Hashptr - BeginPtr;
}
- assert(*SkipRangePtr == Hashptr - BeginPtr);
+ assert(*SkipRangePtr == unsigned(Hashptr - BeginPtr));
BeginPtr = nullptr;
SkipRangePtr = nullptr;
}
@@ -1073,6 +1074,76 @@ OptionalFileEntryRef Preprocessor::LookupFile(
return std::nullopt;
}
+OptionalFileEntryRef
+Preprocessor::LookupEmbedFile(StringRef Filename, bool isAngled, bool OpenFile,
+ const FileEntry *LookupFromFile) {
+ FileManager &FM = this->getFileManager();
+ if (llvm::sys::path::is_absolute(Filename)) {
+ // lookup path or immediately fail
+ llvm::Expected<FileEntryRef> ShouldBeEntry =
+ FM.getFileRef(Filename, OpenFile);
+ return llvm::expectedToOptional(std::move(ShouldBeEntry));
+ }
+
+ auto SeparateComponents = [](SmallVectorImpl<char> &LookupPath,
+ StringRef StartingFrom, StringRef FileName,
+ bool RemoveInitialFileComponentFromLookupPath) {
+ llvm::sys::path::native(StartingFrom, LookupPath);
+ if (RemoveInitialFileComponentFromLookupPath)
+ llvm::sys::path::remove_filename(LookupPath);
+ if (!LookupPath.empty() &&
+ !llvm::sys::path::is_separator(LookupPath.back())) {
+ LookupPath.push_back(llvm::sys::path::get_separator().front());
+ }
+ LookupPath.append(FileName.begin(), FileName.end());
+ };
+
+ // Otherwise, it's search time!
+ SmallString<512> LookupPath;
+ // Non-angled lookup
+ if (!isAngled) {
+ if (LookupFromFile) {
+ // Use file-based lookup.
+ StringRef FullFileDir = LookupFromFile->tryGetRealPathName();
+ if (!FullFileDir.empty()) {
+ SeparateComponents(LookupPath, FullFileDir, Filename, true);
+ llvm::Expected<FileEntryRef> ShouldBeEntry =
+ FM.getFileRef(LookupPath, OpenFile);
+ if (ShouldBeEntry)
+ return llvm::expectedToOptional(std::move(ShouldBeEntry));
+ llvm::consumeError(ShouldBeEntry.takeError());
+ }
+ }
+
+ // Otherwise, do working directory lookup.
+ LookupPath.clear();
+ auto MaybeWorkingDirEntry = FM.getDirectoryRef(".");
+ if (MaybeWorkingDirEntry) {
+ DirectoryEntryRef WorkingDirEntry = *MaybeWorkingDirEntry;
+ StringRef WorkingDir = WorkingDirEntry.getName();
+ if (!WorkingDir.empty()) {
+ SeparateComponents(LookupPath, WorkingDir, Filename, false);
+ llvm::Expected<FileEntryRef> ShouldBeEntry =
+ FM.getFileRef(LookupPath, OpenFile);
+ if (ShouldBeEntry)
+ return llvm::expectedToOptional(std::move(ShouldBeEntry));
+ llvm::consumeError(ShouldBeEntry.takeError());
+ }
+ }
+ }
+
+ for (const auto &Entry : PPOpts->EmbedEntries) {
+ LookupPath.clear();
+ SeparateComponents(LookupPath, Entry, Filename, false);
+ llvm::Expected<FileEntryRef> ShouldBeEntry =
+ FM.getFileRef(LookupPath, OpenFile);
+ if (ShouldBeEntry)
+ return llvm::expectedToOptional(std::move(ShouldBeEntry));
+ llvm::consumeError(ShouldBeEntry.takeError());
+ }
+ return std::nullopt;
+}
+
//===----------------------------------------------------------------------===//
// Preprocessor Directive Handling.
//===----------------------------------------------------------------------===//
@@ -1168,6 +1239,7 @@ void Preprocessor::HandleDirective(Token &Result) {
case tok::pp_include_next:
case tok::pp___include_macros:
case tok::pp_pragma:
+ case tok::pp_embed:
Diag(Result, diag::err_embedded_directive) << II->getName();
Diag(*ArgMacro, diag::note_macro_expansion_here)
<< ArgMacro->getIdentifierInfo();
@@ -1282,6 +1354,11 @@ void Preprocessor::HandleDirective(Token &Result) {
return HandleIdentSCCSDirective(Result);
case tok::pp_sccs:
return HandleIdentSCCSDirective(Result);
+ case tok::pp_embed:
+ return HandleEmbedDirective(SavedHash.getLocation(), Result,
+ getCurrentFileLexer()
+ ? *getCurrentFileLexer()->getFileEntry()
+ : static_cast<FileEntry *>(nullptr));
case tok::pp_assert:
//isExtension = true; // FIXME: implement #assert
break;
@@ -1918,7 +1995,8 @@ bool Preprocessor::checkModuleIsAvailable(const LangOptions &LangOpts,
// FIXME: Track the location at which the requirement was specified, and
// use it here.
Diags.Report(M.DefinitionLoc, diag::err_module_unavailable)
- << M.getFullModuleName() << Requirement.second << Requirement.first;
+ << M.getFullModuleName() << Requirement.RequiredState
+ << Requirement.FeatureName;
}
return true;
}
@@ -2253,26 +2331,27 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// FIXME: We do not have a good way to disambiguate C++ clang modules from
// C++ standard modules (other than use/non-use of Header Units).
- Module *SM = SuggestedModule.getModule();
- bool MaybeTranslateInclude =
- Action == Enter && File && SM && !SM->isForBuilding(getLangOpts());
+ Module *ModuleToImport = SuggestedModule.getModule();
+
+ bool MaybeTranslateInclude = Action == Enter && File && ModuleToImport &&
+ !ModuleToImport->isForBuilding(getLangOpts());
// Maybe a usable Header Unit
bool UsableHeaderUnit = false;
- if (getLangOpts().CPlusPlusModules && SM && SM->isHeaderUnit()) {
+ if (getLangOpts().CPlusPlusModules && ModuleToImport &&
+ ModuleToImport->isHeaderUnit()) {
if (TrackGMFState.inGMF() || IsImportDecl)
UsableHeaderUnit = true;
else if (!IsImportDecl) {
// This is a Header Unit that we do not include-translate
- SuggestedModule = ModuleMap::KnownHeader();
- SM = nullptr;
+ ModuleToImport = nullptr;
}
}
// Maybe a usable clang header module.
bool UsableClangHeaderModule =
- (getLangOpts().CPlusPlusModules || getLangOpts().Modules) && SM &&
- !SM->isHeaderUnit();
+ (getLangOpts().CPlusPlusModules || getLangOpts().Modules) &&
+ ModuleToImport && !ModuleToImport->isHeaderUnit();
// Determine whether we should try to import the module for this #include, if
// there is one. Don't do so if precompiled module support is disabled or we
@@ -2282,12 +2361,11 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// unavailable, diagnose the situation and bail out.
// FIXME: Remove this; loadModule does the same check (but produces
// slightly worse diagnostics).
- if (checkModuleIsAvailable(getLangOpts(), getTargetInfo(),
- *SuggestedModule.getModule(),
+ if (checkModuleIsAvailable(getLangOpts(), getTargetInfo(), *ModuleToImport,
getDiagnostics())) {
Diag(FilenameTok.getLocation(),
diag::note_implicit_top_level_module_import_here)
- << SuggestedModule.getModule()->getTopLevelModuleName();
+ << ModuleToImport->getTopLevelModuleName();
return {ImportAction::None};
}
@@ -2295,7 +2373,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// FIXME: Should we have a second loadModule() overload to avoid this
// extra lookup step?
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
- for (Module *Mod = SM; Mod; Mod = Mod->Parent)
+ for (Module *Mod = ModuleToImport; Mod; Mod = Mod->Parent)
Path.push_back(std::make_pair(getIdentifierInfo(Mod->Name),
FilenameTok.getLocation()));
std::reverse(Path.begin(), Path.end());
@@ -2306,12 +2384,12 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// Load the module to import its macros. We'll make the declarations
// visible when the parser gets here.
- // FIXME: Pass SuggestedModule in here rather than converting it to a path
+ // FIXME: Pass ModuleToImport in here rather than converting it to a path
// and making the module loader convert it back again.
ModuleLoadResult Imported = TheModuleLoader.loadModule(
IncludeTok.getLocation(), Path, Module::Hidden,
/*IsInclusionDirective=*/true);
- assert((Imported == nullptr || Imported == SuggestedModule.getModule()) &&
+ assert((Imported == nullptr || Imported == ModuleToImport) &&
"the imported module is different than the suggested one");
if (Imported) {
@@ -2323,8 +2401,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// was in the directory of an umbrella header, for instance), but no
// actual module containing it exists (because the umbrella header is
// incomplete). Treat this as a textual inclusion.
- SuggestedModule = ModuleMap::KnownHeader();
- SM = nullptr;
+ ModuleToImport = nullptr;
} else if (Imported.isConfigMismatch()) {
// On a configuration mismatch, enter the header textually. We still know
// that it's part of the corresponding module.
@@ -2365,7 +2442,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// this file will have no effect.
if (Action == Enter && File &&
!HeaderInfo.ShouldEnterIncludeFile(*this, *File, EnterOnce,
- getLangOpts().Modules, SM,
+ getLangOpts().Modules, ModuleToImport,
IsFirstIncludeOfFile)) {
// C++ standard modules:
// If we are not in the GMF, then we textually include only
@@ -2380,7 +2457,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
if (UsableHeaderUnit && !getLangOpts().CompilingPCH)
Action = TrackGMFState.inGMF() ? Import : Skip;
else
- Action = (SuggestedModule && !getLangOpts().CompilingPCH) ? Import : Skip;
+ Action = (ModuleToImport && !getLangOpts().CompilingPCH) ? Import : Skip;
}
// Check for circular inclusion of the main file.
@@ -2400,8 +2477,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// FIXME: Use a different callback for a pp-import?
Callbacks->InclusionDirective(HashLoc, IncludeTok, LookupFilename, isAngled,
FilenameRange, File, SearchPath, RelativePath,
- Action == Import ? SuggestedModule.getModule()
- : nullptr,
+ SuggestedModule.getModule(), Action == Import,
FileCharacter);
if (Action == Skip && File)
Callbacks->FileSkipped(*File, FilenameTok, FileCharacter);
@@ -2412,7 +2488,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// If this is a C++20 pp-import declaration, diagnose if we didn't find any
// module corresponding to the named header.
- if (IsImportDecl && !SuggestedModule) {
+ if (IsImportDecl && !ModuleToImport) {
Diag(FilenameTok, diag::err_header_import_not_header_unit)
<< OriginalFilename << File->getName();
return {ImportAction::None};
@@ -2517,8 +2593,8 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
switch (Action) {
case Skip:
// If we don't need to enter the file, stop now.
- if (SM)
- return {ImportAction::SkippedModuleImport, SM};
+ if (ModuleToImport)
+ return {ImportAction::SkippedModuleImport, ModuleToImport};
return {ImportAction::None};
case IncludeLimitReached:
@@ -2528,15 +2604,15 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
case Import: {
// If this is a module import, make it visible if needed.
- assert(SM && "no module to import");
+ assert(ModuleToImport && "no module to import");
- makeModuleVisible(SM, EndLoc);
+ makeModuleVisible(ModuleToImport, EndLoc);
if (IncludeTok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp___include_macros)
return {ImportAction::None};
- return {ImportAction::ModuleImport, SM};
+ return {ImportAction::ModuleImport, ModuleToImport};
}
case Enter:
@@ -2573,13 +2649,14 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// Determine if we're switching to building a new submodule, and which one.
// This does not apply for C++20 modules header units.
- if (SM && !SM->isHeaderUnit()) {
- if (SM->getTopLevelModule()->ShadowingModule) {
+ if (ModuleToImport && !ModuleToImport->isHeaderUnit()) {
+ if (ModuleToImport->getTopLevelModule()->ShadowingModule) {
// We are building a submodule that belongs to a shadowed module. This
// means we find header files in the shadowed module.
- Diag(SM->DefinitionLoc, diag::err_module_build_shadowed_submodule)
- << SM->getFullModuleName();
- Diag(SM->getTopLevelModule()->ShadowingModule->DefinitionLoc,
+ Diag(ModuleToImport->DefinitionLoc,
+ diag::err_module_build_shadowed_submodule)
+ << ModuleToImport->getFullModuleName();
+ Diag(ModuleToImport->getTopLevelModule()->ShadowingModule->DefinitionLoc,
diag::note_previous_definition);
return {ImportAction::None};
}
@@ -2591,21 +2668,22 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// that behaves the same as the header would behave in a compilation using
// that PCH, which means we should enter the submodule. We need to teach
// the AST serialization layer to deal with the resulting AST.
- if (getLangOpts().CompilingPCH && SM->isForBuilding(getLangOpts()))
+ if (getLangOpts().CompilingPCH &&
+ ModuleToImport->isForBuilding(getLangOpts()))
return {ImportAction::None};
assert(!CurLexerSubmodule && "should not have marked this as a module yet");
- CurLexerSubmodule = SM;
+ CurLexerSubmodule = ModuleToImport;
// Let the macro handling code know that any future macros are within
// the new submodule.
- EnterSubmodule(SM, EndLoc, /*ForPragma*/ false);
+ EnterSubmodule(ModuleToImport, EndLoc, /*ForPragma*/ false);
// Let the parser know that any future declarations are within the new
// submodule.
// FIXME: There's no point doing this if we're handling a #__include_macros
// directive.
- return {ImportAction::ModuleBegin, SM};
+ return {ImportAction::ModuleBegin, ModuleToImport};
}
assert(!IsImportDecl && "failed to diagnose missing module for import decl");
@@ -2806,7 +2884,7 @@ static bool isConfigurationPattern(Token &MacroName, MacroInfo *MI,
if (TrimmedValue.ends_with("__"))
TrimmedValue = TrimmedValue.drop_back(2);
}
- return TrimmedValue.equals(MacroText);
+ return TrimmedValue == MacroText;
} else {
return false;
}
@@ -3542,3 +3620,395 @@ void Preprocessor::HandleElifFamilyDirective(Token &ElifToken,
HashToken.getLocation(), CI.IfLoc, /*Foundnonskip*/ true,
/*FoundElse*/ CI.FoundElse, ElifToken.getLocation());
}
+
+std::optional<LexEmbedParametersResult>
+Preprocessor::LexEmbedParameters(Token &CurTok, bool ForHasEmbed) {
+ LexEmbedParametersResult Result{};
+ SmallVector<Token, 2> ParameterTokens;
+ tok::TokenKind EndTokenKind = ForHasEmbed ? tok::r_paren : tok::eod;
+
+ auto DiagMismatchedBracesAndSkipToEOD =
+ [&](tok::TokenKind Expected,
+ std::pair<tok::TokenKind, SourceLocation> Matches) {
+ Diag(CurTok, diag::err_expected) << Expected;
+ Diag(Matches.second, diag::note_matching) << Matches.first;
+ if (CurTok.isNot(tok::eod))
+ DiscardUntilEndOfDirective(CurTok);
+ };
+
+ auto ExpectOrDiagAndSkipToEOD = [&](tok::TokenKind Kind) {
+ if (CurTok.isNot(Kind)) {
+ Diag(CurTok, diag::err_expected) << Kind;
+ if (CurTok.isNot(tok::eod))
+ DiscardUntilEndOfDirective(CurTok);
+ return false;
+ }
+ return true;
+ };
+
+ // C23 6.10:
+ // pp-parameter-name:
+ // pp-standard-parameter
+ // pp-prefixed-parameter
+ //
+ // pp-standard-parameter:
+ // identifier
+ //
+ // pp-prefixed-parameter:
+ // identifier :: identifier
+ auto LexPPParameterName = [&]() -> std::optional<std::string> {
+ // We expect the current token to be an identifier; if it's not, things
+ // have gone wrong.
+ if (!ExpectOrDiagAndSkipToEOD(tok::identifier))
+ return std::nullopt;
+
+ const IdentifierInfo *Prefix = CurTok.getIdentifierInfo();
+
+ // Lex another token; it is either a :: or we're done with the parameter
+ // name.
+ LexNonComment(CurTok);
+ if (CurTok.is(tok::coloncolon)) {
+ // We found a ::, so lex another identifier token.
+ LexNonComment(CurTok);
+ if (!ExpectOrDiagAndSkipToEOD(tok::identifier))
+ return std::nullopt;
+
+ const IdentifierInfo *Suffix = CurTok.getIdentifierInfo();
+
+ // Lex another token so we're past the name.
+ LexNonComment(CurTok);
+ return (llvm::Twine(Prefix->getName()) + "::" + Suffix->getName()).str();
+ }
+ return Prefix->getName().str();
+ };
+
+ // C23 6.10p5: In all aspects, a preprocessor standard parameter specified by
+ // this document as an identifier pp_param and an identifier of the form
+ // __pp_param__ shall behave the same when used as a preprocessor parameter,
+ // except for the spelling.
+ auto NormalizeParameterName = [](StringRef Name) {
+ if (Name.size() > 4 && Name.starts_with("__") && Name.ends_with("__"))
+ return Name.substr(2, Name.size() - 4);
+ return Name;
+ };
+
+ auto LexParenthesizedIntegerExpr = [&]() -> std::optional<size_t> {
+ // we have a limit parameter and its internals are processed using
+ // evaluation rules from #if.
+ if (!ExpectOrDiagAndSkipToEOD(tok::l_paren))
+ return std::nullopt;
+
+ // We do not consume the ( because EvaluateDirectiveExpression will lex
+ // the next token for us.
+ IdentifierInfo *ParameterIfNDef = nullptr;
+ bool EvaluatedDefined;
+ DirectiveEvalResult LimitEvalResult = EvaluateDirectiveExpression(
+ ParameterIfNDef, CurTok, EvaluatedDefined, /*CheckForEOD=*/false);
+
+ if (!LimitEvalResult.Value) {
+ // If there was an error evaluating the directive expression, we expect
+ // to be at the end of directive token.
+ assert(CurTok.is(tok::eod) && "expect to be at the end of directive");
+ return std::nullopt;
+ }
+
+ if (!ExpectOrDiagAndSkipToEOD(tok::r_paren))
+ return std::nullopt;
+
+ // Eat the ).
+ LexNonComment(CurTok);
+
+ // C23 6.10.3.2p2: The token defined shall not appear within the constant
+ // expression.
+ if (EvaluatedDefined) {
+ Diag(CurTok, diag::err_defined_in_pp_embed);
+ return std::nullopt;
+ }
+
+ if (LimitEvalResult.Value) {
+ const llvm::APSInt &Result = *LimitEvalResult.Value;
+ if (Result.isNegative()) {
+ Diag(CurTok, diag::err_requires_positive_value)
+ << toString(Result, 10) << /*positive*/ 0;
+ return std::nullopt;
+ }
+ return Result.getLimitedValue();
+ }
+ return std::nullopt;
+ };
+
+ auto GetMatchingCloseBracket = [](tok::TokenKind Kind) {
+ switch (Kind) {
+ case tok::l_paren:
+ return tok::r_paren;
+ case tok::l_brace:
+ return tok::r_brace;
+ case tok::l_square:
+ return tok::r_square;
+ default:
+ llvm_unreachable("should not get here");
+ }
+ };
+
+ auto LexParenthesizedBalancedTokenSoup =
+ [&](llvm::SmallVectorImpl<Token> &Tokens) {
+ std::vector<std::pair<tok::TokenKind, SourceLocation>> BracketStack;
+
+ // We expect the current token to be a left paren.
+ if (!ExpectOrDiagAndSkipToEOD(tok::l_paren))
+ return false;
+ LexNonComment(CurTok); // Eat the (
+
+ bool WaitingForInnerCloseParen = false;
+ while (CurTok.isNot(tok::eod) &&
+ (WaitingForInnerCloseParen || CurTok.isNot(tok::r_paren))) {
+ switch (CurTok.getKind()) {
+ default: // Shutting up diagnostics about not fully-covered switch.
+ break;
+ case tok::l_paren:
+ WaitingForInnerCloseParen = true;
+ [[fallthrough]];
+ case tok::l_brace:
+ case tok::l_square:
+ BracketStack.push_back({CurTok.getKind(), CurTok.getLocation()});
+ break;
+ case tok::r_paren:
+ WaitingForInnerCloseParen = false;
+ [[fallthrough]];
+ case tok::r_brace:
+ case tok::r_square: {
+ tok::TokenKind Matching =
+ GetMatchingCloseBracket(BracketStack.back().first);
+ if (BracketStack.empty() || CurTok.getKind() != Matching) {
+ DiagMismatchedBracesAndSkipToEOD(Matching, BracketStack.back());
+ return false;
+ }
+ BracketStack.pop_back();
+ } break;
+ }
+ Tokens.push_back(CurTok);
+ LexNonComment(CurTok);
+ }
+
+ // When we're done, we want to eat the closing paren.
+ if (!ExpectOrDiagAndSkipToEOD(tok::r_paren))
+ return false;
+
+ LexNonComment(CurTok); // Eat the )
+ return true;
+ };
+
+ LexNonComment(CurTok); // Prime the pump.
+ while (!CurTok.isOneOf(EndTokenKind, tok::eod)) {
+ SourceLocation ParamStartLoc = CurTok.getLocation();
+ std::optional<std::string> ParamName = LexPPParameterName();
+ if (!ParamName)
+ return std::nullopt;
+ StringRef Parameter = NormalizeParameterName(*ParamName);
+
+ // Lex the parameters (dependent on the parameter type we want!).
+ //
+ // C23 6.10.3.Xp1: The X standard embed parameter may appear zero times or
+ // one time in the embed parameter sequence.
+ if (Parameter == "limit") {
+ if (Result.MaybeLimitParam)
+ Diag(CurTok, diag::err_pp_embed_dup_params) << Parameter;
+
+ std::optional<size_t> Limit = LexParenthesizedIntegerExpr();
+ if (!Limit)
+ return std::nullopt;
+ Result.MaybeLimitParam =
+ PPEmbedParameterLimit{*Limit, {ParamStartLoc, CurTok.getLocation()}};
+ } else if (Parameter == "clang::offset") {
+ if (Result.MaybeOffsetParam)
+ Diag(CurTok, diag::err_pp_embed_dup_params) << Parameter;
+
+ std::optional<size_t> Offset = LexParenthesizedIntegerExpr();
+ if (!Offset)
+ return std::nullopt;
+ Result.MaybeOffsetParam = PPEmbedParameterOffset{
+ *Offset, {ParamStartLoc, CurTok.getLocation()}};
+ } else if (Parameter == "prefix") {
+ if (Result.MaybePrefixParam)
+ Diag(CurTok, diag::err_pp_embed_dup_params) << Parameter;
+
+ SmallVector<Token, 4> Soup;
+ if (!LexParenthesizedBalancedTokenSoup(Soup))
+ return std::nullopt;
+ Result.MaybePrefixParam = PPEmbedParameterPrefix{
+ std::move(Soup), {ParamStartLoc, CurTok.getLocation()}};
+ } else if (Parameter == "suffix") {
+ if (Result.MaybeSuffixParam)
+ Diag(CurTok, diag::err_pp_embed_dup_params) << Parameter;
+
+ SmallVector<Token, 4> Soup;
+ if (!LexParenthesizedBalancedTokenSoup(Soup))
+ return std::nullopt;
+ Result.MaybeSuffixParam = PPEmbedParameterSuffix{
+ std::move(Soup), {ParamStartLoc, CurTok.getLocation()}};
+ } else if (Parameter == "if_empty") {
+ if (Result.MaybeIfEmptyParam)
+ Diag(CurTok, diag::err_pp_embed_dup_params) << Parameter;
+
+ SmallVector<Token, 4> Soup;
+ if (!LexParenthesizedBalancedTokenSoup(Soup))
+ return std::nullopt;
+ Result.MaybeIfEmptyParam = PPEmbedParameterIfEmpty{
+ std::move(Soup), {ParamStartLoc, CurTok.getLocation()}};
+ } else {
+ ++Result.UnrecognizedParams;
+
+ // If there's a left paren, we need to parse a balanced token sequence
+ // and just eat those tokens.
+ if (CurTok.is(tok::l_paren)) {
+ SmallVector<Token, 4> Soup;
+ if (!LexParenthesizedBalancedTokenSoup(Soup))
+ return std::nullopt;
+ }
+ if (!ForHasEmbed) {
+ Diag(CurTok, diag::err_pp_unknown_parameter) << 1 << Parameter;
+ return std::nullopt;
+ }
+ }
+ }
+ return Result;
+}
+
+void Preprocessor::HandleEmbedDirectiveImpl(
+ SourceLocation HashLoc, const LexEmbedParametersResult &Params,
+ StringRef BinaryContents) {
+ if (BinaryContents.empty()) {
+ // If we have no binary contents, the only thing we need to emit are the
+ // if_empty tokens, if any.
+ // FIXME: this loses AST fidelity; nothing in the compiler will see that
+ // these tokens came from #embed. We have to hack around this when printing
+ // preprocessed output. The same is true for prefix and suffix tokens.
+ if (Params.MaybeIfEmptyParam) {
+ ArrayRef<Token> Toks = Params.MaybeIfEmptyParam->Tokens;
+ size_t TokCount = Toks.size();
+ auto NewToks = std::make_unique<Token[]>(TokCount);
+ llvm::copy(Toks, NewToks.get());
+ EnterTokenStream(std::move(NewToks), TokCount, true, true);
+ }
+ return;
+ }
+
+ size_t NumPrefixToks = Params.PrefixTokenCount(),
+ NumSuffixToks = Params.SuffixTokenCount();
+ size_t TotalNumToks = 1 + NumPrefixToks + NumSuffixToks;
+ size_t CurIdx = 0;
+ auto Toks = std::make_unique<Token[]>(TotalNumToks);
+
+ // Add the prefix tokens, if any.
+ if (Params.MaybePrefixParam) {
+ llvm::copy(Params.MaybePrefixParam->Tokens, &Toks[CurIdx]);
+ CurIdx += NumPrefixToks;
+ }
+
+ EmbedAnnotationData *Data = new (BP) EmbedAnnotationData;
+ Data->BinaryData = BinaryContents;
+
+ Toks[CurIdx].startToken();
+ Toks[CurIdx].setKind(tok::annot_embed);
+ Toks[CurIdx].setAnnotationRange(HashLoc);
+ Toks[CurIdx++].setAnnotationValue(Data);
+
+ // Now add the suffix tokens, if any.
+ if (Params.MaybeSuffixParam) {
+ llvm::copy(Params.MaybeSuffixParam->Tokens, &Toks[CurIdx]);
+ CurIdx += NumSuffixToks;
+ }
+
+ assert(CurIdx == TotalNumToks && "Calculated the incorrect number of tokens");
+ EnterTokenStream(std::move(Toks), TotalNumToks, true, true);
+}
+
+void Preprocessor::HandleEmbedDirective(SourceLocation HashLoc, Token &EmbedTok,
+ const FileEntry *LookupFromFile) {
+ // Give the usual extension/compatibility warnings.
+ if (LangOpts.C23)
+ Diag(EmbedTok, diag::warn_compat_pp_embed_directive);
+ else
+ Diag(EmbedTok, diag::ext_pp_embed_directive)
+ << (LangOpts.CPlusPlus ? /*Clang*/ 1 : /*C23*/ 0);
+
+ // Parse the filename header
+ Token FilenameTok;
+ if (LexHeaderName(FilenameTok))
+ return;
+
+ if (FilenameTok.isNot(tok::header_name)) {
+ Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
+ if (FilenameTok.isNot(tok::eod))
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ // Parse the optional sequence of
+ // directive-parameters:
+ // identifier parameter-name-list[opt] directive-argument-list[opt]
+ // directive-argument-list:
+ // '(' balanced-token-sequence ')'
+ // parameter-name-list:
+ // '::' identifier parameter-name-list[opt]
+ Token CurTok;
+ std::optional<LexEmbedParametersResult> Params =
+ LexEmbedParameters(CurTok, /*ForHasEmbed=*/false);
+
+ assert((Params || CurTok.is(tok::eod)) &&
+ "expected success or to be at the end of the directive");
+ if (!Params)
+ return;
+
+ // Now, splat the data out!
+ SmallString<128> FilenameBuffer;
+ StringRef Filename = getSpelling(FilenameTok, FilenameBuffer);
+ StringRef OriginalFilename = Filename;
+ bool isAngled =
+ GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
+ // If GetIncludeFilenameSpelling set the start ptr to null, there was an
+ // error.
+ assert(!Filename.empty());
+ OptionalFileEntryRef MaybeFileRef =
+ this->LookupEmbedFile(Filename, isAngled, true, LookupFromFile);
+ if (!MaybeFileRef) {
+ // could not find file
+ if (Callbacks && Callbacks->EmbedFileNotFound(OriginalFilename)) {
+ return;
+ }
+ Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
+ return;
+ }
+ std::optional<llvm::MemoryBufferRef> MaybeFile =
+ getSourceManager().getMemoryBufferForFileOrNone(*MaybeFileRef);
+ if (!MaybeFile) {
+ // could not find file
+ Diag(FilenameTok, diag::err_cannot_open_file)
+ << Filename << "a buffer to the contents could not be created";
+ return;
+ }
+ StringRef BinaryContents = MaybeFile->getBuffer();
+
+ // The order is important between 'offset' and 'limit'; we want to offset
+ // first and then limit second; otherwise we may reduce the notional resource
+ // size to something too small to offset into.
+ if (Params->MaybeOffsetParam) {
+ // FIXME: just like with the limit() and if_empty() parameters, this loses
+ // source fidelity in the AST; it has no idea that there was an offset
+ // involved.
+ // offsets all the way to the end of the file make for an empty file.
+ BinaryContents = BinaryContents.substr(Params->MaybeOffsetParam->Offset);
+ }
+
+ if (Params->MaybeLimitParam) {
+ // FIXME: just like with the clang::offset() and if_empty() parameters,
+ // this loses source fidelity in the AST; it has no idea there was a limit
+ // involved.
+ BinaryContents = BinaryContents.substr(0, Params->MaybeLimitParam->Limit);
+ }
+
+ if (Callbacks)
+ Callbacks->EmbedDirective(HashLoc, Filename, isAngled, MaybeFileRef,
+ *Params);
+ HandleEmbedDirectiveImpl(HashLoc, *Params, BinaryContents);
+}
diff --git a/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp b/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
index 8f25c67ec9df..8bb82bd22eb9 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
@@ -333,11 +333,11 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
: diag::ext_cxx23_size_t_suffix
: diag::err_cxx23_size_t_suffix);
- // 'wb/uwb' literals are a C23 feature. We explicitly do not support the
- // suffix in C++ as an extension because a library-based UDL that resolves
- // to a library type may be more appropriate there.
+ // 'wb/uwb' literals are a C23 feature.
+ // '__wb/__uwb' are a C++ extension.
if (Literal.isBitInt)
- PP.Diag(PeekTok, PP.getLangOpts().C23
+ PP.Diag(PeekTok, PP.getLangOpts().CPlusPlus ? diag::ext_cxx_bitint_suffix
+ : PP.getLangOpts().C23
? diag::warn_c23_compat_bitint_suffix
: diag::ext_c23_bitint_suffix);
@@ -870,7 +870,9 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
/// may occur after a #if or #elif directive. If the expression is equivalent
/// to "!defined(X)" return X in IfNDefMacro.
Preprocessor::DirectiveEvalResult
-Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
+Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro,
+ Token &Tok, bool &EvaluatedDefined,
+ bool CheckForEoD) {
SaveAndRestore PPDir(ParsingIfOrElifDirective, true);
// Save the current state of 'DisableMacroExpansion' and reset it to false. If
// 'DisableMacroExpansion' is true, then we must be in a macro argument list
@@ -882,7 +884,6 @@ Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
DisableMacroExpansion = false;
// Peek ahead one token.
- Token Tok;
LexNonComment(Tok);
// C99 6.10.1p3 - All expressions are evaluated as intmax_t or uintmax_t.
@@ -895,7 +896,7 @@ Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
// Parse error, skip the rest of the macro line.
SourceRange ConditionRange = ExprStartLoc;
if (Tok.isNot(tok::eod))
- ConditionRange = DiscardUntilEndOfDirective();
+ ConditionRange = DiscardUntilEndOfDirective(Tok);
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
@@ -903,11 +904,14 @@ Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
// We cannot trust the source range from the value because there was a
// parse error. Track the range manually -- the end of the directive is the
// end of the condition range.
- return {false,
+ return {std::nullopt,
+ false,
DT.IncludedUndefinedIds,
{ExprStartLoc, ConditionRange.getEnd()}};
}
+ EvaluatedDefined = DT.State != DefinedTracker::Unknown;
+
// If we are at the end of the expression after just parsing a value, there
// must be no (unparenthesized) binary operators involved, so we can exit
// directly.
@@ -919,7 +923,10 @@ Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
- return {ResVal.Val != 0, DT.IncludedUndefinedIds, ResVal.getRange()};
+ bool IsNonZero = ResVal.Val != 0;
+ SourceRange ValRange = ResVal.getRange();
+ return {std::move(ResVal.Val), IsNonZero, DT.IncludedUndefinedIds,
+ ValRange};
}
// Otherwise, we must have a binary operator (e.g. "#if 1 < 2"), so parse the
@@ -928,21 +935,37 @@ Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
Tok, true, DT.IncludedUndefinedIds, *this)) {
// Parse error, skip the rest of the macro line.
if (Tok.isNot(tok::eod))
- DiscardUntilEndOfDirective();
+ DiscardUntilEndOfDirective(Tok);
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
- return {false, DT.IncludedUndefinedIds, ResVal.getRange()};
+ SourceRange ValRange = ResVal.getRange();
+ return {std::nullopt, false, DT.IncludedUndefinedIds, ValRange};
}
- // If we aren't at the tok::eod token, something bad happened, like an extra
- // ')' token.
- if (Tok.isNot(tok::eod)) {
- Diag(Tok, diag::err_pp_expected_eol);
- DiscardUntilEndOfDirective();
+ if (CheckForEoD) {
+ // If we aren't at the tok::eod token, something bad happened, like an extra
+ // ')' token.
+ if (Tok.isNot(tok::eod)) {
+ Diag(Tok, diag::err_pp_expected_eol);
+ DiscardUntilEndOfDirective(Tok);
+ }
}
+ EvaluatedDefined = EvaluatedDefined || DT.State != DefinedTracker::Unknown;
+
// Restore 'DisableMacroExpansion'.
DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
- return {ResVal.Val != 0, DT.IncludedUndefinedIds, ResVal.getRange()};
+ bool IsNonZero = ResVal.Val != 0;
+ SourceRange ValRange = ResVal.getRange();
+ return {std::move(ResVal.Val), IsNonZero, DT.IncludedUndefinedIds, ValRange};
+}
+
+Preprocessor::DirectiveEvalResult
+Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro,
+ bool CheckForEoD) {
+ Token Tok;
+ bool EvaluatedDefined;
+ return EvaluateDirectiveExpression(IfNDefMacro, Tok, EvaluatedDefined,
+ CheckForEoD);
}
diff --git a/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
index 3b1b6df1dbae..8221db46e06a 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
@@ -368,8 +368,7 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
// Okay, this has a controlling macro, remember in HeaderFileInfo.
if (OptionalFileEntryRef FE = CurPPLexer->getFileEntry()) {
HeaderInfo.SetFileControllingMacro(*FE, ControllingMacro);
- if (MacroInfo *MI =
- getMacroInfo(const_cast<IdentifierInfo*>(ControllingMacro)))
+ if (MacroInfo *MI = getMacroInfo(ControllingMacro))
MI->setUsedForHeaderGuard(true);
if (const IdentifierInfo *DefinedMacro =
CurPPLexer->MIOpt.GetDefinedMacro()) {
@@ -805,7 +804,7 @@ Module *Preprocessor::LeaveSubmodule(bool ForPragma) {
llvm::SmallPtrSet<const IdentifierInfo*, 8> VisitedMacros;
for (unsigned I = Info.OuterPendingModuleMacroNames;
I != PendingModuleMacroNames.size(); ++I) {
- auto *II = const_cast<IdentifierInfo*>(PendingModuleMacroNames[I]);
+ auto *II = PendingModuleMacroNames[I];
if (!VisitedMacros.insert(II).second)
continue;
@@ -855,8 +854,8 @@ Module *Preprocessor::LeaveSubmodule(bool ForPragma) {
// Don't bother creating a module macro if it would represent a #undef
// that doesn't override anything.
if (Def || !Macro.getOverriddenMacros().empty())
- addModuleMacro(LeavingMod, II, Def,
- Macro.getOverriddenMacros(), IsNew);
+ addModuleMacro(LeavingMod, II, Def, Macro.getOverriddenMacros(),
+ IsNew);
if (!getLangOpts().ModulesLocalVisibility) {
// This macro is exposed to the rest of this compilation as a
diff --git a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
index ad02f31209b0..fb88ec2bf603 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
@@ -226,7 +226,7 @@ void Preprocessor::updateModuleMacroInfo(const IdentifierInfo *II,
bool IsSystemMacro = true;
bool IsAmbiguous = false;
if (auto *MD = Info.MD) {
- while (MD && isa<VisibilityMacroDirective>(MD))
+ while (isa_and_nonnull<VisibilityMacroDirective>(MD))
MD = MD->getPrevious();
if (auto *DMD = dyn_cast_or_null<DefMacroDirective>(MD)) {
MI = DMD->getInfo();
@@ -380,6 +380,7 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__has_c_attribute = nullptr;
Ident__has_declspec = RegisterBuiltinMacro(*this, "__has_declspec_attribute");
+ Ident__has_embed = RegisterBuiltinMacro(*this, "__has_embed");
Ident__has_include = RegisterBuiltinMacro(*this, "__has_include");
Ident__has_include_next = RegisterBuiltinMacro(*this, "__has_include_next");
Ident__has_warning = RegisterBuiltinMacro(*this, "__has_warning");
@@ -993,11 +994,20 @@ MacroArgs *Preprocessor::ReadMacroCallArgumentList(Token &MacroName,
// If the macro contains the comma pasting extension, the diagnostic
// is suppressed; we know we'll get another diagnostic later.
if (!MI->hasCommaPasting()) {
- // C++20 allows this construct, but standards before C++20 and all C
- // standards do not allow the construct (we allow it as an extension).
- Diag(Tok, getLangOpts().CPlusPlus20
- ? diag::warn_cxx17_compat_missing_varargs_arg
- : diag::ext_missing_varargs_arg);
+ // C++20 [cpp.replace]p15, C23 6.10.5p12
+ //
+ // C++20 and C23 allow this construct, but standards before that
+ // do not (we allow it as an extension).
+ unsigned ID;
+ if (getLangOpts().CPlusPlus20)
+ ID = diag::warn_cxx17_compat_missing_varargs_arg;
+ else if (getLangOpts().CPlusPlus)
+ ID = diag::ext_cxx_missing_varargs_arg;
+ else if (getLangOpts().C23)
+ ID = diag::warn_c17_compat_missing_varargs_arg;
+ else
+ ID = diag::ext_c_missing_varargs_arg;
+ Diag(Tok, ID);
Diag(MI->getDefinitionLoc(), diag::note_macro_here)
<< MacroName.getIdentifierInfo();
}
@@ -1270,6 +1280,105 @@ static bool EvaluateHasIncludeCommon(Token &Tok, IdentifierInfo *II,
return File.has_value();
}
+/// EvaluateHasEmbed - Process a '__has_embed("foo" params...)' expression.
+/// Returns a filled optional with the value if successful; otherwise, empty.
+EmbedResult Preprocessor::EvaluateHasEmbed(Token &Tok, IdentifierInfo *II) {
+ // These expressions are only allowed within a preprocessor directive.
+ if (!this->isParsingIfOrElifDirective()) {
+ Diag(Tok, diag::err_pp_directive_required) << II;
+ // Return a valid identifier token.
+ assert(Tok.is(tok::identifier));
+ Tok.setIdentifierInfo(II);
+ return EmbedResult::Invalid;
+ }
+
+ // Ensure we have a '('.
+ LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_pp_expected_after) << II << tok::l_paren;
+ // If the next token looks like a filename or the start of one,
+ // assume it is and process it as such.
+ return EmbedResult::Invalid;
+ }
+
+ // Save '(' location for possible missing ')' message and then lex the header
+ // name token for the embed resource.
+ SourceLocation LParenLoc = Tok.getLocation();
+ if (this->LexHeaderName(Tok))
+ return EmbedResult::Invalid;
+
+ if (Tok.isNot(tok::header_name)) {
+ Diag(Tok.getLocation(), diag::err_pp_expects_filename);
+ return EmbedResult::Invalid;
+ }
+
+ SourceLocation FilenameLoc = Tok.getLocation();
+ Token FilenameTok = Tok;
+
+ std::optional<LexEmbedParametersResult> Params =
+ this->LexEmbedParameters(Tok, /*ForHasEmbed=*/true);
+ assert((Params || Tok.is(tok::eod)) &&
+ "expected success or to be at the end of the directive");
+
+ if (!Params)
+ return EmbedResult::Invalid;
+
+ if (Params->UnrecognizedParams > 0)
+ return EmbedResult::NotFound;
+
+ if (!Tok.is(tok::r_paren)) {
+ Diag(this->getLocForEndOfToken(FilenameLoc), diag::err_pp_expected_after)
+ << II << tok::r_paren;
+ Diag(LParenLoc, diag::note_matching) << tok::l_paren;
+ if (Tok.isNot(tok::eod))
+ DiscardUntilEndOfDirective();
+ return EmbedResult::Invalid;
+ }
+
+ SmallString<128> FilenameBuffer;
+ StringRef Filename = this->getSpelling(FilenameTok, FilenameBuffer);
+ bool isAngled =
+ this->GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
+ // If GetIncludeFilenameSpelling set the start ptr to null, there was an
+ // error.
+ assert(!Filename.empty());
+ const FileEntry *LookupFromFile =
+ this->getCurrentFileLexer() ? *this->getCurrentFileLexer()->getFileEntry()
+ : static_cast<FileEntry *>(nullptr);
+ OptionalFileEntryRef MaybeFileEntry =
+ this->LookupEmbedFile(Filename, isAngled, false, LookupFromFile);
+ if (Callbacks) {
+ Callbacks->HasEmbed(LParenLoc, Filename, isAngled, MaybeFileEntry);
+ }
+ if (!MaybeFileEntry)
+ return EmbedResult::NotFound;
+
+ size_t FileSize = MaybeFileEntry->getSize();
+ // First, "offset" into the file (this reduces the amount of data we can read
+ // from the file).
+ if (Params->MaybeOffsetParam) {
+ if (Params->MaybeOffsetParam->Offset > FileSize)
+ FileSize = 0;
+ else
+ FileSize -= Params->MaybeOffsetParam->Offset;
+ }
+
+ // Second, limit the data from the file (this also reduces the amount of data
+ // we can read from the file).
+ if (Params->MaybeLimitParam) {
+ if (Params->MaybeLimitParam->Limit > FileSize)
+ FileSize = 0;
+ else
+ FileSize = Params->MaybeLimitParam->Limit;
+ }
+
+ // If we have no data left to read, the file is empty, otherwise we have the
+ // expected resource.
+ if (FileSize == 0)
+ return EmbedResult::Empty;
+ return EmbedResult::Found;
+}
+
bool Preprocessor::EvaluateHasInclude(Token &Tok, IdentifierInfo *II) {
return EvaluateHasIncludeCommon(Tok, II, *this, nullptr, nullptr);
}
@@ -1493,6 +1602,34 @@ static bool isTargetVariantEnvironment(const TargetInfo &TI,
return false;
}
+static bool IsBuiltinTrait(Token &Tok) {
+
+#define TYPE_TRAIT_1(Spelling, Name, Key) \
+ case tok::kw_##Spelling: \
+ return true;
+#define TYPE_TRAIT_2(Spelling, Name, Key) \
+ case tok::kw_##Spelling: \
+ return true;
+#define TYPE_TRAIT_N(Spelling, Name, Key) \
+ case tok::kw_##Spelling: \
+ return true;
+#define ARRAY_TYPE_TRAIT(Spelling, Name, Key) \
+ case tok::kw_##Spelling: \
+ return true;
+#define EXPRESSION_TRAIT(Spelling, Name, Key) \
+ case tok::kw_##Spelling: \
+ return true;
+#define TRANSFORM_TYPE_TRAIT_DEF(K, Spelling) \
+ case tok::kw___##Spelling: \
+ return true;
+
+ switch (Tok.getKind()) {
+ default:
+ return false;
+#include "clang/Basic/TokenKinds.def"
+ }
+}
+
/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
/// as a builtin macro, handle it and return the next token as 'Tok'.
void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
@@ -1672,6 +1809,12 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
return false;
else if (II->getBuiltinID() != 0) {
switch (II->getBuiltinID()) {
+ case Builtin::BI__builtin_cpu_is:
+ return getTargetInfo().supportsCpuIs();
+ case Builtin::BI__builtin_cpu_init:
+ return getTargetInfo().supportsCpuInit();
+ case Builtin::BI__builtin_cpu_supports:
+ return getTargetInfo().supportsCpuSupports();
case Builtin::BI__builtin_operator_new:
case Builtin::BI__builtin_operator_delete:
// denotes date of behavior change to support calling arbitrary
@@ -1683,27 +1826,11 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
getTargetInfo().getTargetOpts().FeatureMap);
}
return true;
- } else if (II->getTokenID() != tok::identifier ||
- II->hasRevertedTokenIDToIdentifier()) {
- // Treat all keywords that introduce a custom syntax of the form
- //
- // '__some_keyword' '(' [...] ')'
- //
- // as being "builtin functions", even if the syntax isn't a valid
- // function call (for example, because the builtin takes a type
- // argument).
- if (II->getName().starts_with("__builtin_") ||
- II->getName().starts_with("__is_") ||
- II->getName().starts_with("__has_"))
- return true;
- return llvm::StringSwitch<bool>(II->getName())
- .Case("__array_rank", true)
- .Case("__array_extent", true)
- .Case("__reference_binds_to_temporary", true)
- .Case("__reference_constructs_from_temporary", true)
-#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) .Case("__" #Trait, true)
-#include "clang/Basic/TransformTypeTraits.def"
- .Default(false);
+ } else if (IsBuiltinTrait(Tok)) {
+ return true;
+ } else if (II->getTokenID() != tok::identifier &&
+ II->getName().starts_with("__builtin_")) {
+ return true;
} else {
return llvm::StringSwitch<bool>(II->getName())
// Report builtin templates as being builtins.
@@ -1807,6 +1934,17 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
return;
OS << (int)Value;
Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__has_embed) {
+ // The argument to these two builtins should be a parenthesized
+ // file name string literal using angle brackets (<>) or
+ // double-quotes (""), optionally followed by a series of
+ // arguments similar to form like attributes.
+ EmbedResult Value = EvaluateHasEmbed(Tok, II);
+ if (Value == EmbedResult::Invalid)
+ return;
+
+ Tok.setKind(tok::numeric_constant);
+ OS << static_cast<int>(Value);
} else if (II == Ident__has_warning) {
// The argument should be a parenthesized string literal.
EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this, false,
diff --git a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
index 499813f8ab7d..10f0ab7180e6 100644
--- a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
@@ -1444,7 +1444,8 @@ struct PragmaWarningHandler : public PragmaHandler {
.Case("once", PPCallbacks::PWS_Once)
.Case("suppress", PPCallbacks::PWS_Suppress)
.Default(-1);
- if ((SpecifierValid = SpecifierInt != -1))
+ SpecifierValid = SpecifierInt != -1;
+ if (SpecifierValid)
Specifier =
static_cast<PPCallbacks::PragmaWarningSpecifier>(SpecifierInt);
diff --git a/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp b/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
index aab6a2bed89d..be5aac7ef31b 100644
--- a/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
@@ -472,8 +472,8 @@ void PreprocessingRecord::MacroUndefined(const Token &Id,
void PreprocessingRecord::InclusionDirective(
SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
bool IsAngled, CharSourceRange FilenameRange, OptionalFileEntryRef File,
- StringRef SearchPath, StringRef RelativePath, const Module *Imported,
- SrcMgr::CharacteristicKind FileType) {
+ StringRef SearchPath, StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported, SrcMgr::CharacteristicKind FileType) {
InclusionDirective::InclusionKind Kind = InclusionDirective::Include;
switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) {
@@ -506,10 +506,9 @@ void PreprocessingRecord::InclusionDirective(
EndLoc = EndLoc.getLocWithOffset(-1); // the InclusionDirective expects
// a token range.
}
- clang::InclusionDirective *ID =
- new (*this) clang::InclusionDirective(*this, Kind, FileName, !IsAngled,
- (bool)Imported, File,
- SourceRange(HashLoc, EndLoc));
+ clang::InclusionDirective *ID = new (*this) clang::InclusionDirective(
+ *this, Kind, FileName, !IsAngled, ModuleImported, File,
+ SourceRange(HashLoc, EndLoc));
addPreprocessedEntity(ID);
}
diff --git a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
index 7fdb5d4c0d7b..63e27e62cffc 100644
--- a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
@@ -58,6 +58,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Capacity.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -72,6 +73,9 @@
using namespace clang;
+/// Minimum distance between two check points, in tokens.
+static constexpr unsigned CheckPointStepSize = 1024;
+
LLVM_INSTANTIATE_REGISTRY(PragmaHandlerRegistry)
ExternalPreprocessorSource::~ExternalPreprocessorSource() = default;
@@ -756,7 +760,7 @@ void Preprocessor::HandlePoisonedIdentifier(Token & Identifier) {
Diag(Identifier,it->second) << Identifier.getIdentifierInfo();
}
-void Preprocessor::updateOutOfDateIdentifier(IdentifierInfo &II) const {
+void Preprocessor::updateOutOfDateIdentifier(const IdentifierInfo &II) const {
assert(II.isOutOfDate() && "not out of date");
getExternalSource()->updateOutOfDateIdentifier(II);
}
@@ -954,6 +958,11 @@ void Preprocessor::Lex(Token &Result) {
}
}
+ if (CurLexer && ++CheckPointCounter == CheckPointStepSize) {
+ CheckPoints[CurLexer->getFileID()].push_back(CurLexer->BufferPtr);
+ CheckPointCounter = 0;
+ }
+
LastTokenWasAt = Result.is(tok::at);
--LexLevel;
@@ -979,7 +988,7 @@ void Preprocessor::LexTokensUntilEOF(std::vector<Token> *Tokens) {
}
/// Lex a header-name token (including one formed from header-name-tokens if
-/// \p AllowConcatenation is \c true).
+/// \p AllowMacroExpansion is \c true).
///
/// \param FilenameTok Filled in with the next token. On success, this will
/// be either a header_name token. On failure, it will be whatever other
@@ -1475,26 +1484,56 @@ void Preprocessor::emitFinalMacroWarning(const Token &Identifier,
}
bool Preprocessor::isSafeBufferOptOut(const SourceManager &SourceMgr,
- const SourceLocation &Loc) const {
- // Try to find a region in `SafeBufferOptOutMap` where `Loc` is in:
- auto FirstRegionEndingAfterLoc = llvm::partition_point(
- SafeBufferOptOutMap,
- [&SourceMgr,
- &Loc](const std::pair<SourceLocation, SourceLocation> &Region) {
- return SourceMgr.isBeforeInTranslationUnit(Region.second, Loc);
- });
+ const SourceLocation &Loc) const {
+ // The lambda that tests if a `Loc` is in an opt-out region given one opt-out
+ // region map:
+ auto TestInMap = [&SourceMgr](const SafeBufferOptOutRegionsTy &Map,
+ const SourceLocation &Loc) -> bool {
+ // Try to find a region in `SafeBufferOptOutMap` where `Loc` is in:
+ auto FirstRegionEndingAfterLoc = llvm::partition_point(
+ Map, [&SourceMgr,
+ &Loc](const std::pair<SourceLocation, SourceLocation> &Region) {
+ return SourceMgr.isBeforeInTranslationUnit(Region.second, Loc);
+ });
+
+ if (FirstRegionEndingAfterLoc != Map.end()) {
+ // To test if the start location of the found region precedes `Loc`:
+ return SourceMgr.isBeforeInTranslationUnit(
+ FirstRegionEndingAfterLoc->first, Loc);
+ }
+ // If we do not find a region whose end location passes `Loc`, we want to
+ // check if the current region is still open:
+ if (!Map.empty() && Map.back().first == Map.back().second)
+ return SourceMgr.isBeforeInTranslationUnit(Map.back().first, Loc);
+ return false;
+ };
- if (FirstRegionEndingAfterLoc != SafeBufferOptOutMap.end()) {
- // To test if the start location of the found region precedes `Loc`:
- return SourceMgr.isBeforeInTranslationUnit(FirstRegionEndingAfterLoc->first,
- Loc);
- }
- // If we do not find a region whose end location passes `Loc`, we want to
- // check if the current region is still open:
- if (!SafeBufferOptOutMap.empty() &&
- SafeBufferOptOutMap.back().first == SafeBufferOptOutMap.back().second)
- return SourceMgr.isBeforeInTranslationUnit(SafeBufferOptOutMap.back().first,
- Loc);
+ // What the following does:
+ //
+ // If `Loc` belongs to the local TU, we just look up `SafeBufferOptOutMap`.
+ // Otherwise, `Loc` is from a loaded AST. We look up the
+ // `LoadedSafeBufferOptOutMap` first to get the opt-out region map of the
+ // loaded AST where `Loc` is at. Then we find if `Loc` is in an opt-out
+ // region w.r.t. the region map. If the region map is absent, it means there
+ // is no opt-out pragma in that loaded AST.
+ //
+ // Opt-out pragmas in the local TU or a loaded AST is not visible to another
+ // one of them. That means if you put the pragmas around a `#include
+ // "module.h"`, where module.h is a module, it is not actually suppressing
+ // warnings in module.h. This is fine because warnings in module.h will be
+ // reported when module.h is compiled in isolation and nothing in module.h
+ // will be analyzed ever again. So you will not see warnings from the file
+ // that imports module.h anyway. And you can't even do the same thing for PCHs
+ // because they can only be included from the command line.
+
+ if (SourceMgr.isLocalSourceLocation(Loc))
+ return TestInMap(SafeBufferOptOutMap, Loc);
+
+ const SafeBufferOptOutRegionsTy *LoadedRegions =
+ LoadedSafeBufferOptOutMap.lookupLoadedOptOutMap(Loc, SourceMgr);
+
+ if (LoadedRegions)
+ return TestInMap(*LoadedRegions, Loc);
return false;
}
@@ -1543,6 +1582,47 @@ bool Preprocessor::isPPInSafeBufferOptOutRegion(SourceLocation &StartLoc) {
return InSafeBufferOptOutRegion;
}
+SmallVector<SourceLocation, 64>
+Preprocessor::serializeSafeBufferOptOutMap() const {
+ assert(!InSafeBufferOptOutRegion &&
+ "Attempt to serialize safe buffer opt-out regions before file being "
+ "completely preprocessed");
+
+ SmallVector<SourceLocation, 64> SrcSeq;
+
+ for (const auto &[begin, end] : SafeBufferOptOutMap) {
+ SrcSeq.push_back(begin);
+ SrcSeq.push_back(end);
+ }
+ // Only `SafeBufferOptOutMap` gets serialized. No need to serialize
+ // `LoadedSafeBufferOptOutMap` because if this TU loads a pch/module, every
+ // pch/module in the pch-chain/module-DAG will be loaded one by one in order.
+ // It means that for each loading pch/module m, it just needs to load m's own
+ // `SafeBufferOptOutMap`.
+ return SrcSeq;
+}
+
+bool Preprocessor::setDeserializedSafeBufferOptOutMap(
+ const SmallVectorImpl<SourceLocation> &SourceLocations) {
+ if (SourceLocations.size() == 0)
+ return false;
+
+ assert(SourceLocations.size() % 2 == 0 &&
+ "ill-formed SourceLocation sequence");
+
+ auto It = SourceLocations.begin();
+ SafeBufferOptOutRegionsTy &Regions =
+ LoadedSafeBufferOptOutMap.findAndConsLoadedOptOutMap(*It, SourceMgr);
+
+ do {
+ SourceLocation Begin = *It++;
+ SourceLocation End = *It++;
+
+ Regions.emplace_back(Begin, End);
+ } while (It != SourceLocations.end());
+ return true;
+}
+
ModuleLoader::~ModuleLoader() = default;
CommentHandler::~CommentHandler() = default;
@@ -1558,3 +1638,19 @@ void Preprocessor::createPreprocessingRecord() {
Record = new PreprocessingRecord(getSourceManager());
addPPCallbacks(std::unique_ptr<PPCallbacks>(Record));
}
+
+const char *Preprocessor::getCheckPoint(FileID FID, const char *Start) const {
+ if (auto It = CheckPoints.find(FID); It != CheckPoints.end()) {
+ const SmallVector<const char *> &FileCheckPoints = It->second;
+ const char *Last = nullptr;
+ // FIXME: Do better than a linear search.
+ for (const char *P : FileCheckPoints) {
+ if (P > Start)
+ break;
+ Last = P;
+ }
+ return Last;
+ }
+
+ return nullptr;
+}
diff --git a/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp b/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp
index 1b3201bd805b..865879d18053 100644
--- a/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp
@@ -193,9 +193,12 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
if (Tok.isAnnotation()) {
// Modules annotation can show up when generated automatically for includes.
assert(Tok.isOneOf(tok::annot_module_include, tok::annot_module_begin,
- tok::annot_module_end) &&
+ tok::annot_module_end, tok::annot_embed) &&
"unexpected annotation in AvoidConcat");
+
ConcatInfo = 0;
+ if (Tok.is(tok::annot_embed))
+ return true;
}
if (ConcatInfo == 0)
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp b/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
index 77ab3b556da5..e008cc0e38ce 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
@@ -152,7 +152,15 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
bool HaveLexer = S.getPreprocessor().getCurrentLexer();
if (HaveLexer) {
- llvm::TimeTraceScope TimeScope("Frontend");
+ llvm::TimeTraceScope TimeScope("Frontend", [&]() {
+ llvm::TimeTraceMetadata M;
+ if (llvm::isTimeTraceVerbose()) {
+ const SourceManager &SM = S.getSourceManager();
+ if (const auto *FE = SM.getFileEntryForID(SM.getMainFileID()))
+ M.File = FE->tryGetRealPathName();
+ }
+ return M;
+ });
P.Initialize();
Parser::DeclGroupPtrTy ADecl;
Sema::ModuleImportState ImportState;
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
index 573c90a36eea..9ccbbf9a7d5d 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -20,6 +20,49 @@
using namespace clang;
+/// Parse the optional ("message") part of a deleted-function-body.
+StringLiteral *Parser::ParseCXXDeletedFunctionMessage() {
+ if (!Tok.is(tok::l_paren))
+ return nullptr;
+ StringLiteral *Message = nullptr;
+ BalancedDelimiterTracker BT{*this, tok::l_paren};
+ BT.consumeOpen();
+
+ if (isTokenStringLiteral()) {
+ ExprResult Res = ParseUnevaluatedStringLiteralExpression();
+ if (Res.isUsable()) {
+ Message = Res.getAs<StringLiteral>();
+ Diag(Message->getBeginLoc(), getLangOpts().CPlusPlus26
+ ? diag::warn_cxx23_delete_with_message
+ : diag::ext_delete_with_message)
+ << Message->getSourceRange();
+ }
+ } else {
+ Diag(Tok.getLocation(), diag::err_expected_string_literal)
+ << /*Source='in'*/ 0 << "'delete'";
+ SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
+ }
+
+ BT.consumeClose();
+ return Message;
+}
+
+/// If we've encountered '= delete' in a context where it is ill-formed, such
+/// as in the declaration of a non-function, also skip the ("message") part if
+/// it is present to avoid issuing further diagnostics.
+void Parser::SkipDeletedFunctionBody() {
+ if (!Tok.is(tok::l_paren))
+ return;
+
+ BalancedDelimiterTracker BT{*this, tok::l_paren};
+ BT.consumeOpen();
+
+ // Just skip to the end of the current declaration.
+ SkipUntil(tok::r_paren, tok::comma, StopAtSemi | StopBeforeMatch);
+ if (Tok.is(tok::r_paren))
+ BT.consumeClose();
+}
+
/// ParseCXXInlineMethodDef - We parsed and verified that the specified
/// Declarator is a well formed C++ inline method definition. Now lex its body
/// and store its tokens for parsing after the C++ class is complete.
@@ -70,7 +113,8 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(
? diag::warn_cxx98_compat_defaulted_deleted_function
: diag::ext_defaulted_deleted_function)
<< 1 /* deleted */;
- Actions.SetDeclDeleted(FnD, KWLoc);
+ StringLiteral *Message = ParseCXXDeletedFunctionMessage();
+ Actions.SetDeclDeleted(FnD, KWLoc, Message);
Delete = true;
if (auto *DeclAsFunction = dyn_cast<FunctionDecl>(FnD)) {
DeclAsFunction->setRangeEnd(KWEndLoc);
@@ -422,14 +466,14 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
ConsumeAnyToken();
} else if (HasUnparsed) {
assert(Param->hasInheritedDefaultArg());
- const FunctionDecl *Old;
+ FunctionDecl *Old;
if (const auto *FunTmpl = dyn_cast<FunctionTemplateDecl>(LM.Method))
Old =
cast<FunctionDecl>(FunTmpl->getTemplatedDecl())->getPreviousDecl();
else
Old = cast<FunctionDecl>(LM.Method)->getPreviousDecl();
if (Old) {
- ParmVarDecl *OldParam = const_cast<ParmVarDecl*>(Old->getParamDecl(I));
+ ParmVarDecl *OldParam = Old->getParamDecl(I);
assert(!OldParam->hasUnparsedDefaultArg());
if (OldParam->hasUninstantiatedDefaultArg())
Param->setUninstantiatedDefaultArg(
@@ -467,11 +511,28 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
// and the end of the function-definition, member-declarator, or
// declarator.
CXXMethodDecl *Method;
+ FunctionDecl *FunctionToPush;
if (FunctionTemplateDecl *FunTmpl
= dyn_cast<FunctionTemplateDecl>(LM.Method))
- Method = dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
+ FunctionToPush = FunTmpl->getTemplatedDecl();
else
- Method = dyn_cast<CXXMethodDecl>(LM.Method);
+ FunctionToPush = cast<FunctionDecl>(LM.Method);
+ Method = dyn_cast<CXXMethodDecl>(FunctionToPush);
+
+ // Push a function scope so that tryCaptureVariable() can properly visit
+ // function scopes involving function parameters that are referenced inside
+ // the noexcept specifier e.g. through a lambda expression.
+ // Example:
+ // struct X {
+ // void ICE(int val) noexcept(noexcept([val]{}));
+ // };
+ // Setup the CurScope to match the function DeclContext - we have such
+ // assumption in IsInFnTryBlockHandler().
+ ParseScope FnScope(this, Scope::FnScope);
+ Sema::ContextRAII FnContext(Actions, FunctionToPush,
+ /*NewThisContext=*/false);
+ Sema::FunctionScopeRAII PopFnContext(Actions);
+ Actions.PushFunctionScope();
Sema::CXXThisScopeRAII ThisScope(
Actions, Method ? Method->getParent() : nullptr,
@@ -559,6 +620,8 @@ void Parser::ParseLexedMethodDef(LexedMethod &LM) {
// to be re-used for method bodies as well.
ParseScope FnScope(this, Scope::FnScope | Scope::DeclScope |
Scope::CompoundStmtScope);
+ Sema::FPFeaturesStateRAII SaveFPFeatures(Actions);
+
Actions.ActOnStartOfFunctionDef(getCurScope(), LM.D);
if (Tok.is(tok::kw_try)) {
@@ -978,6 +1041,19 @@ bool Parser::ConsumeAndStoreFunctionPrologue(CachedTokens &Toks) {
} else {
break;
}
+ // Pack indexing
+ if (Tok.is(tok::ellipsis) && NextToken().is(tok::l_square)) {
+ Toks.push_back(Tok);
+ SourceLocation OpenLoc = ConsumeToken();
+ Toks.push_back(Tok);
+ ConsumeBracket();
+ if (!ConsumeAndStoreUntil(tok::r_square, Toks, /*StopAtSemi=*/true)) {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::r_square;
+ Diag(OpenLoc, diag::note_matching) << tok::l_square;
+ return true;
+ }
+ }
+
} while (Tok.is(tok::coloncolon));
if (Tok.is(tok::code_completion)) {
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
index 356e7851ec63..7ce9a9cea1c7 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
@@ -26,7 +26,11 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaCUDA.h"
+#include "clang/Sema/SemaCodeCompletion.h"
#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenMP.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
@@ -89,13 +93,23 @@ static StringRef normalizeAttrName(StringRef Name) {
return Name;
}
-/// isAttributeLateParsed - Return true if the attribute has arguments that
-/// require late parsing.
-static bool isAttributeLateParsed(const IdentifierInfo &II) {
+/// returns true iff attribute is annotated with `LateAttrParseExperimentalExt`
+/// in `Attr.td`.
+static bool IsAttributeLateParsedExperimentalExt(const IdentifierInfo &II) {
+#define CLANG_ATTR_LATE_PARSED_EXPERIMENTAL_EXT_LIST
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(false);
+#undef CLANG_ATTR_LATE_PARSED_EXPERIMENTAL_EXT_LIST
+}
+
+/// returns true iff attribute is annotated with `LateAttrParseStandard` in
+/// `Attr.td`.
+static bool IsAttributeLateParsedStandard(const IdentifierInfo &II) {
#define CLANG_ATTR_LATE_PARSED_LIST
- return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
#include "clang/Parse/AttrParserStringSwitches.inc"
- .Default(false);
+ .Default(false);
#undef CLANG_ATTR_LATE_PARSED_LIST
}
@@ -205,7 +219,8 @@ void Parser::ParseGNUAttributes(ParsedAttributes &Attrs,
break;
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteAttribute(AttributeCommonInfo::Syntax::AS_GNU);
+ Actions.CodeCompletion().CodeCompleteAttribute(
+ AttributeCommonInfo::Syntax::AS_GNU);
break;
}
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
@@ -220,8 +235,26 @@ void Parser::ParseGNUAttributes(ParsedAttributes &Attrs,
continue;
}
+ bool LateParse = false;
+ if (!LateAttrs)
+ LateParse = false;
+ else if (LateAttrs->lateAttrParseExperimentalExtOnly()) {
+ // The caller requested that this attribute **only** be late
+ // parsed for `LateAttrParseExperimentalExt` attributes. This will
+ // only be late parsed if the experimental language option is enabled.
+ LateParse = getLangOpts().ExperimentalLateParseAttributes &&
+ IsAttributeLateParsedExperimentalExt(*AttrName);
+ } else {
+ // The caller did not restrict late parsing to only
+ // `LateAttrParseExperimentalExt` attributes so late parse
+ // both `LateAttrParseStandard` and `LateAttrParseExperimentalExt`
+ // attributes.
+ LateParse = IsAttributeLateParsedExperimentalExt(*AttrName) ||
+ IsAttributeLateParsedStandard(*AttrName);
+ }
+
// Handle "parameterized" attributes
- if (!LateAttrs || !isAttributeLateParsed(*AttrName)) {
+ if (!LateParse) {
ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, &EndLoc, nullptr,
SourceLocation(), ParsedAttr::Form::GNU(), D);
continue;
@@ -291,7 +324,7 @@ static bool attributeHasIdentifierArg(const IdentifierInfo &II) {
/// Determine whether the given attribute has an identifier argument.
static ParsedAttributeArgumentsProperties
-attributeStringLiteralListArg(const IdentifierInfo &II) {
+attributeStringLiteralListArg(const llvm::Triple &T, const IdentifierInfo &II) {
#define CLANG_ATTR_STRING_LITERAL_ARG_LIST
return llvm::StringSwitch<uint32_t>(normalizeAttrName(II.getName()))
#include "clang/Parse/AttrParserStringSwitches.inc"
@@ -335,6 +368,27 @@ static bool attributeIsTypeArgAttr(const IdentifierInfo &II) {
#undef CLANG_ATTR_TYPE_ARG_LIST
}
+/// Determine whether the given attribute takes identifier arguments.
+static bool attributeHasStrictIdentifierArgs(const IdentifierInfo &II) {
+#define CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST
+ return (llvm::StringSwitch<uint64_t>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(0)) != 0;
+#undef CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST
+}
+
+/// Determine whether the given attribute takes an identifier argument at a
+/// specific index
+static bool attributeHasStrictIdentifierArgAtIndex(const IdentifierInfo &II,
+ size_t argIndex) {
+#define CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST
+ return (llvm::StringSwitch<uint64_t>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(0)) &
+ (1ull << argIndex);
+#undef CLANG_ATTR_STRICT_IDENTIFIER_ARG_AT_INDEX_LIST
+}
+
/// Determine whether the given attribute requires parsing its arguments
/// in an unevaluated context or not.
static bool attributeParsedArgsUnevaluated(const IdentifierInfo &II) {
@@ -434,6 +488,11 @@ bool Parser::ParseAttributeArgumentList(
break;
}
+ if (Actions.DiagnoseUnexpandedParameterPack(Expr.get())) {
+ SawError = true;
+ break;
+ }
+
Exprs.push_back(Expr.get());
if (Tok.isNot(tok::comma))
@@ -508,7 +567,8 @@ unsigned Parser::ParseAttributeArgsCommon(
}
if (T.isUsable())
TheParsedType = T.get();
- } else if (AttributeHasVariadicIdentifierArg) {
+ } else if (AttributeHasVariadicIdentifierArg ||
+ attributeHasStrictIdentifierArgs(*AttrName)) {
// Parse variadic identifier arg. This can either consume identifiers or
// expressions. Variadic identifier args do not support parameter packs
// because those are typically used for attributes with enumeration
@@ -519,6 +579,12 @@ unsigned Parser::ParseAttributeArgsCommon(
if (ChangeKWThisToIdent && Tok.is(tok::kw_this))
Tok.setKind(tok::identifier);
+ if (Tok.is(tok::identifier) && attributeHasStrictIdentifierArgAtIndex(
+ *AttrName, ArgExprs.size())) {
+ ArgExprs.push_back(ParseIdentifierLoc());
+ continue;
+ }
+
ExprResult ArgExpr;
if (Tok.is(tok::identifier)) {
ArgExprs.push_back(ParseIdentifierLoc());
@@ -527,7 +593,9 @@ unsigned Parser::ParseAttributeArgsCommon(
EnterExpressionEvaluationContext Unevaluated(
Actions,
Uneval ? Sema::ExpressionEvaluationContext::Unevaluated
- : Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ : Sema::ExpressionEvaluationContext::ConstantEvaluated,
+ nullptr,
+ Sema::ExpressionEvaluationContextRecord::EK_AttrArgument);
ExprResult ArgExpr(
Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
@@ -544,13 +612,16 @@ unsigned Parser::ParseAttributeArgsCommon(
// General case. Parse all available expressions.
bool Uneval = attributeParsedArgsUnevaluated(*AttrName);
EnterExpressionEvaluationContext Unevaluated(
- Actions, Uneval
- ? Sema::ExpressionEvaluationContext::Unevaluated
- : Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ Actions,
+ Uneval ? Sema::ExpressionEvaluationContext::Unevaluated
+ : Sema::ExpressionEvaluationContext::ConstantEvaluated,
+ nullptr,
+ Sema::ExpressionEvaluationContextRecord::ExpressionKind::
+ EK_AttrArgument);
ExprVector ParsedExprs;
ParsedAttributeArgumentsProperties ArgProperties =
- attributeStringLiteralListArg(*AttrName);
+ attributeStringLiteralListArg(getTargetInfo().getTriple(), *AttrName);
if (ParseAttributeArgumentList(*AttrName, ParsedExprs, ArgProperties)) {
SkipUntil(tok::r_paren, StopAtSemi);
return 0;
@@ -629,6 +700,16 @@ void Parser::ParseGNUAttributeArgs(
ParseAttributeWithTypeArg(*AttrName, AttrNameLoc, Attrs, ScopeName,
ScopeLoc, Form);
return;
+ } else if (AttrKind == ParsedAttr::AT_CountedBy ||
+ AttrKind == ParsedAttr::AT_CountedByOrNull ||
+ AttrKind == ParsedAttr::AT_SizedBy ||
+ AttrKind == ParsedAttr::AT_SizedByOrNull) {
+ ParseBoundsAttribute(*AttrName, AttrNameLoc, Attrs, ScopeName, ScopeLoc,
+ Form);
+ return;
+ } else if (AttrKind == ParsedAttr::AT_CXXAssume) {
+ ParseCXXAssumeAttributeArg(Attrs, AttrName, AttrNameLoc, EndLoc, Form);
+ return;
}
// These may refer to the function arguments, but need to be parsed early to
@@ -683,6 +764,10 @@ unsigned Parser::ParseClangAttributeArgs(
ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Form);
break;
+
+ case ParsedAttr::AT_CXXAssume:
+ ParseCXXAssumeAttributeArg(Attrs, AttrName, AttrNameLoc, EndLoc, Form);
+ break;
}
return !Attrs.empty() ? Attrs.begin()->getNumArgs() : 0;
}
@@ -860,7 +945,8 @@ void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteAttribute(AttributeCommonInfo::AS_Declspec);
+ Actions.CodeCompletion().CodeCompleteAttribute(
+ AttributeCommonInfo::AS_Declspec);
return;
}
@@ -1218,6 +1304,7 @@ void Parser::ParseAvailabilityAttribute(
enum { Introduced, Deprecated, Obsoleted, Unknown };
AvailabilityChange Changes[Unknown];
ExprResult MessageExpr, ReplacementExpr;
+ IdentifierLoc *EnvironmentLoc = nullptr;
// Opening '('.
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -1234,8 +1321,11 @@ void Parser::ParseAvailabilityAttribute(
}
IdentifierLoc *Platform = ParseIdentifierLoc();
if (const IdentifierInfo *const Ident = Platform->Ident) {
+ // Disallow xrOS for availability attributes.
+ if (Ident->getName().contains("xrOS") || Ident->getName().contains("xros"))
+ Diag(Platform->Loc, diag::warn_availability_unknown_platform) << Ident;
// Canonicalize platform name from "macosx" to "macos".
- if (Ident->getName() == "macosx")
+ else if (Ident->getName() == "macosx")
Platform->Ident = PP.getIdentifierInfo("macos");
// Canonicalize platform name from "macosx_app_extension" to
// "macos_app_extension".
@@ -1262,6 +1352,7 @@ void Parser::ParseAvailabilityAttribute(
Ident_message = PP.getIdentifierInfo("message");
Ident_strict = PP.getIdentifierInfo("strict");
Ident_replacement = PP.getIdentifierInfo("replacement");
+ Ident_environment = PP.getIdentifierInfo("environment");
}
// Parse the optional "strict", the optional "replacement" and the set of
@@ -1309,6 +1400,13 @@ void Parser::ParseAvailabilityAttribute(
continue;
}
+ if (Keyword == Ident_environment) {
+ if (EnvironmentLoc != nullptr) {
+ Diag(KeywordLoc, diag::err_availability_redundant)
+ << Keyword << SourceRange(EnvironmentLoc->Loc);
+ }
+ }
+
if (Tok.isNot(tok::equal)) {
Diag(Tok, diag::err_expected_after) << Keyword << tok::equal;
SkipUntil(tok::r_paren, StopAtSemi);
@@ -1330,6 +1428,15 @@ void Parser::ParseAvailabilityAttribute(
continue;
}
}
+ if (Keyword == Ident_environment) {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_availability_expected_environment);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ EnvironmentLoc = ParseIdentifierLoc();
+ continue;
+ }
// Special handling of 'NA' only when applied to introduced or
// deprecated.
@@ -1411,7 +1518,7 @@ void Parser::ParseAvailabilityAttribute(
SourceRange(AvailabilityLoc, T.getCloseLocation()), ScopeName,
ScopeLoc, Platform, Changes[Introduced], Changes[Deprecated],
Changes[Obsoleted], UnavailableLoc, MessageExpr.get(), Form,
- StrictLoc, ReplacementExpr.get());
+ StrictLoc, ReplacementExpr.get(), EnvironmentLoc);
}
/// Parse the contents of the "external_source_symbol" attribute.
@@ -1864,9 +1971,8 @@ void Parser::DiagnoseCXX11AttributeExtension(ParsedAttributes &Attrs) {
// variable.
// This function moves attributes that should apply to the type off DS to Attrs.
void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributes &Attrs,
- DeclSpec &DS,
- Sema::TagUseKind TUK) {
- if (TUK == Sema::TUK_Reference)
+ DeclSpec &DS, TagUseKind TUK) {
+ if (TUK == TagUseKind::Reference)
return;
llvm::SmallVector<ParsedAttr *, 1> ToBeMoved;
@@ -1916,9 +2022,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(DeclaratorContext Context,
case tok::kw_export:
ProhibitAttributes(DeclAttrs);
ProhibitAttributes(DeclSpecAttrs);
- SingleDecl =
- ParseDeclarationStartingWithTemplate(Context, DeclEnd, DeclAttrs);
- break;
+ return ParseDeclarationStartingWithTemplate(Context, DeclEnd, DeclAttrs);
case tok::kw_inline:
// Could be the start of an inline namespace. Allowed as an ext in C++03.
if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_namespace)) {
@@ -1994,8 +2098,9 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(
ParsingDeclSpec DS(*this);
DS.takeAttributesFrom(DeclSpecAttrs);
+ ParsedTemplateInfo TemplateInfo;
DeclSpecContext DSContext = getDeclSpecContextFromDeclaratorContext(Context);
- ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS_none, DSContext);
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS_none, DSContext);
// If we had a free-standing type definition with a missing semicolon, we
// may get this far before the problem becomes obvious.
@@ -2027,7 +2132,7 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(
if (DeclSpecStart)
DS.SetRangeStart(*DeclSpecStart);
- return ParseDeclGroup(DS, Context, DeclAttrs, &DeclEnd, FRI);
+ return ParseDeclGroup(DS, Context, DeclAttrs, TemplateInfo, &DeclEnd, FRI);
}
/// Returns true if this might be the start of a declarator, or a common typo
@@ -2184,6 +2289,7 @@ void Parser::SkipMalformedDecl() {
Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
DeclaratorContext Context,
ParsedAttributes &Attrs,
+ ParsedTemplateInfo &TemplateInfo,
SourceLocation *DeclEnd,
ForRangeInit *FRI) {
// Parse the first declarator.
@@ -2193,8 +2299,19 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
ParsedAttributes LocalAttrs(AttrFactory);
LocalAttrs.takeAllFrom(Attrs);
ParsingDeclarator D(*this, DS, LocalAttrs, Context);
+ if (TemplateInfo.TemplateParams)
+ D.setTemplateParameterLists(*TemplateInfo.TemplateParams);
+
+ bool IsTemplateSpecOrInst =
+ (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+ SuppressAccessChecks SAC(*this, IsTemplateSpecOrInst);
+
ParseDeclarator(D);
+ if (IsTemplateSpecOrInst)
+ SAC.done();
+
// Bail out if the first declarator didn't seem well-formed.
if (!D.hasName() && !D.mayOmitIdentifier()) {
SkipMalformedDecl();
@@ -2202,7 +2319,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
}
if (getLangOpts().HLSL)
- MaybeParseHLSLSemantics(D);
+ MaybeParseHLSLAnnotations(D);
if (Tok.is(tok::kw_requires))
ParseTrailingRequiresClause(D);
@@ -2237,7 +2354,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// Check to see if we have a function *definition* which must have a body.
if (Tok.is(tok::equal) && NextToken().is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteAfterFunctionEquals(D);
+ Actions.CodeCompletion().CodeCompleteAfterFunctionEquals(D);
return nullptr;
}
// We're at the point where the parsing of function declarator is finished.
@@ -2262,15 +2379,54 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// need to handle the file scope definition case.
if (Context == DeclaratorContext::File) {
if (isStartOfFunctionDefinition(D)) {
+ // C++23 [dcl.typedef] p1:
+ // The typedef specifier shall not be [...], and it shall not be
+ // used in the decl-specifier-seq of a parameter-declaration nor in
+ // the decl-specifier-seq of a function-definition.
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
- Diag(Tok, diag::err_function_declared_typedef);
-
- // Recover by treating the 'typedef' as spurious.
+ // If the user intended to write 'typename', we should have already
+ // suggested adding it elsewhere. In any case, recover by ignoring
+ // 'typedef' and suggest removing it.
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_function_declared_typedef)
+ << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
DS.ClearStorageClassSpecs();
}
+ Decl *TheDecl = nullptr;
+
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
+ if (D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) {
+ // If the declarator-id is not a template-id, issue a diagnostic
+ // and recover by ignoring the 'template' keyword.
+ Diag(Tok, diag::err_template_defn_explicit_instantiation) << 0;
+ TheDecl = ParseFunctionDefinition(D, ParsedTemplateInfo(),
+ &LateParsedAttrs);
+ } else {
+ SourceLocation LAngleLoc =
+ PP.getLocForEndOfToken(TemplateInfo.TemplateLoc);
+ Diag(D.getIdentifierLoc(),
+ diag::err_explicit_instantiation_with_definition)
+ << SourceRange(TemplateInfo.TemplateLoc)
+ << FixItHint::CreateInsertion(LAngleLoc, "<>");
+
+ // Recover as if it were an explicit specialization.
+ TemplateParameterLists FakedParamLists;
+ FakedParamLists.push_back(Actions.ActOnTemplateParameterList(
+ 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc,
+ std::nullopt, LAngleLoc, nullptr));
+
+ TheDecl = ParseFunctionDefinition(
+ D,
+ ParsedTemplateInfo(&FakedParamLists,
+ /*isSpecialization=*/true,
+ /*lastParameterListWasEmpty=*/true),
+ &LateParsedAttrs);
+ }
+ } else {
+ TheDecl =
+ ParseFunctionDefinition(D, TemplateInfo, &LateParsedAttrs);
+ }
- Decl *TheDecl = ParseFunctionDefinition(D, ParsedTemplateInfo(),
- &LateParsedAttrs);
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
@@ -2312,12 +2468,34 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
bool IsForRangeLoop = false;
if (TryConsumeToken(tok::colon, FRI->ColonLoc)) {
IsForRangeLoop = true;
+ EnterExpressionEvaluationContext ForRangeInitContext(
+ Actions, Sema::ExpressionEvaluationContext::PotentiallyEvaluated,
+ /*LambdaContextDecl=*/nullptr,
+ Sema::ExpressionEvaluationContextRecord::EK_Other,
+ getLangOpts().CPlusPlus23);
+
+ // P2718R0 - Lifetime extension in range-based for loops.
+ if (getLangOpts().CPlusPlus23) {
+ auto &LastRecord = Actions.ExprEvalContexts.back();
+ LastRecord.InLifetimeExtendingContext = true;
+ }
+
if (getLangOpts().OpenMP)
- Actions.startOpenMPCXXRangeFor();
+ Actions.OpenMP().startOpenMPCXXRangeFor();
if (Tok.is(tok::l_brace))
FRI->RangeExpr = ParseBraceInitializer();
else
FRI->RangeExpr = ParseExpression();
+
+ // Before c++23, ForRangeLifetimeExtendTemps should be empty.
+ assert(
+ getLangOpts().CPlusPlus23 ||
+ Actions.ExprEvalContexts.back().ForRangeLifetimeExtendTemps.empty());
+
+ // Move the collected materialized temporaries into ForRangeInit before
+ // ForRangeInitContext exit.
+ FRI->LifetimeExtendTemps = std::move(
+ Actions.ExprEvalContexts.back().ForRangeLifetimeExtendTemps);
}
Decl *ThisDecl = Actions.ActOnDeclarator(getCurScope(), D);
@@ -2334,8 +2512,8 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
}
SmallVector<Decl *, 8> DeclsInGroup;
- Decl *FirstDecl = ParseDeclarationAfterDeclaratorAndAttributes(
- D, ParsedTemplateInfo(), FRI);
+ Decl *FirstDecl =
+ ParseDeclarationAfterDeclaratorAndAttributes(D, TemplateInfo, FRI);
if (LateParsedAttrs.size() > 0)
ParseLexedAttributeList(LateParsedAttrs, FirstDecl, true, false);
D.complete(FirstDecl);
@@ -2358,6 +2536,16 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
break;
}
+ // C++23 [temp.pre]p5:
+ // In a template-declaration, explicit specialization, or explicit
+ // instantiation the init-declarator-list in the declaration shall
+ // contain at most one declarator.
+ if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
+ D.isFirstDeclarator()) {
+ Diag(CommaLoc, diag::err_multiple_template_declarators)
+ << TemplateInfo.Kind;
+ }
+
// Parse the next declarator.
D.clear();
D.setCommaLoc(CommaLoc);
@@ -2378,7 +2566,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
ParseDeclarator(D);
if (getLangOpts().HLSL)
- MaybeParseHLSLSemantics(D);
+ MaybeParseHLSLAnnotations(D);
if (!D.isInvalidType()) {
// C++2a [dcl.decl]p1
@@ -2387,7 +2575,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// declarator requires-clause
if (Tok.is(tok::kw_requires))
ParseTrailingRequiresClause(D);
- Decl *ThisDecl = ParseDeclarationAfterDeclarator(D);
+ Decl *ThisDecl = ParseDeclarationAfterDeclarator(D, TemplateInfo);
D.complete(ThisDecl);
if (ThisDecl)
DeclsInGroup.push_back(ThisDecl);
@@ -2468,25 +2656,30 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
Parser &P;
Declarator &D;
Decl *ThisDecl;
+ bool Entered;
InitializerScopeRAII(Parser &P, Declarator &D, Decl *ThisDecl)
- : P(P), D(D), ThisDecl(ThisDecl) {
+ : P(P), D(D), ThisDecl(ThisDecl), Entered(false) {
if (ThisDecl && P.getLangOpts().CPlusPlus) {
Scope *S = nullptr;
if (D.getCXXScopeSpec().isSet()) {
P.EnterScope(0);
S = P.getCurScope();
}
- P.Actions.ActOnCXXEnterDeclInitializer(S, ThisDecl);
+ if (ThisDecl && !ThisDecl->isInvalidDecl()) {
+ P.Actions.ActOnCXXEnterDeclInitializer(S, ThisDecl);
+ Entered = true;
+ }
}
}
- ~InitializerScopeRAII() { pop(); }
- void pop() {
+ ~InitializerScopeRAII() {
if (ThisDecl && P.getLangOpts().CPlusPlus) {
Scope *S = nullptr;
if (D.getCXXScopeSpec().isSet())
S = P.getCurScope();
- P.Actions.ActOnCXXExitDeclInitializer(S, ThisDecl);
+
+ if (Entered)
+ P.Actions.ActOnCXXExitDeclInitializer(S, ThisDecl);
if (S)
P.ExitScope();
}
@@ -2571,7 +2764,8 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
}
}
- Sema::CUDATargetContextRAII X(Actions, Sema::CTCK_InitGlobalVar, ThisDecl);
+ SemaCUDA::CUDATargetContextRAII X(Actions.CUDA(),
+ SemaCUDA::CTCK_InitGlobalVar, ThisDecl);
switch (TheInitKind) {
// Parse declarator '=' initializer.
case InitKind::Equal: {
@@ -2583,6 +2777,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
<< 1 /* delete */;
else
Diag(ConsumeToken(), diag::err_deleted_non_function);
+ SkipDeletedFunctionBody();
} else if (Tok.is(tok::kw_default)) {
if (D.isFunctionDeclarator())
Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration)
@@ -2595,7 +2790,8 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteInitializer(getCurScope(), ThisDecl);
+ Actions.CodeCompletion().CodeCompleteInitializer(getCurScope(),
+ ThisDecl);
Actions.FinalizeDeclaration(ThisDecl);
return nullptr;
}
@@ -2615,8 +2811,6 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
FRI->RangeExpr = Init;
}
- InitScope.pop();
-
if (Init.isInvalid()) {
SmallVector<tok::TokenKind, 2> StopTokens;
StopTokens.push_back(tok::comma);
@@ -2642,10 +2836,11 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
auto ThisVarDecl = dyn_cast_or_null<VarDecl>(ThisDecl);
auto RunSignatureHelp = [&]() {
- QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
- ThisVarDecl->getType()->getCanonicalTypeInternal(),
- ThisDecl->getLocation(), Exprs, T.getOpenLocation(),
- /*Braced=*/false);
+ QualType PreferredType =
+ Actions.CodeCompletion().ProduceConstructorSignatureHelp(
+ ThisVarDecl->getType()->getCanonicalTypeInternal(),
+ ThisDecl->getLocation(), Exprs, T.getOpenLocation(),
+ /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -2664,11 +2859,9 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
bool SawError = ParseExpressionList(Exprs, ExpressionStarts);
- InitScope.pop();
-
if (SawError) {
if (ThisVarDecl && PP.isCodeCompletionReached() && !CalledSignatureHelp) {
- Actions.ProduceConstructorSignatureHelp(
+ Actions.CodeCompletion().ProduceConstructorSignatureHelp(
ThisVarDecl->getType()->getCanonicalTypeInternal(),
ThisDecl->getLocation(), Exprs, T.getOpenLocation(),
/*Braced=*/false);
@@ -2697,8 +2890,6 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
PreferredType.enterVariableInit(Tok.getLocation(), ThisDecl);
ExprResult Init(ParseBraceInitializer());
- InitScope.pop();
-
if (Init.isInvalid()) {
Actions.ActOnInitializerError(ThisDecl);
} else
@@ -2724,10 +2915,11 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
void Parser::ParseSpecifierQualifierList(
DeclSpec &DS, ImplicitTypenameContext AllowImplicitTypename,
AccessSpecifier AS, DeclSpecContext DSC) {
+ ParsedTemplateInfo TemplateInfo;
/// specifier-qualifier-list is a subset of declaration-specifiers. Just
/// parse declaration-specifiers and complain about extra stuff.
/// TODO: diagnose attribute-specifiers and alignment-specifiers.
- ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC, nullptr,
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC, nullptr,
AllowImplicitTypename);
// Validate declspec for type-name.
@@ -2806,7 +2998,7 @@ static bool isValidAfterIdentifierInDeclarator(const Token &T) {
/// other pieces of declspec after it, it returns true.
///
bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
- const ParsedTemplateInfo &TemplateInfo,
+ ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributes &Attrs) {
assert(Tok.is(tok::identifier) && "should have identifier");
@@ -2905,7 +3097,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
<< TokenName << TagName << getLangOpts().CPlusPlus
<< FixItHint::CreateInsertion(Tok.getLocation(), FixitTagName);
- if (Actions.LookupParsedName(R, getCurScope(), SS)) {
+ if (Actions.LookupName(R, getCurScope())) {
for (LookupResult::iterator I = R.begin(), IEnd = R.end();
I != IEnd; ++I)
Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type)
@@ -3161,6 +3353,67 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
}
}
+void Parser::DistributeCLateParsedAttrs(Decl *Dcl,
+ LateParsedAttrList *LateAttrs) {
+ if (!LateAttrs)
+ return;
+
+ if (Dcl) {
+ for (auto *LateAttr : *LateAttrs) {
+ if (LateAttr->Decls.empty())
+ LateAttr->addDecl(Dcl);
+ }
+ }
+}
+
+/// Bounds attributes (e.g., counted_by):
+/// AttrName '(' expression ')'
+void Parser::ParseBoundsAttribute(IdentifierInfo &AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc,
+ ParsedAttr::Form Form) {
+ assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
+
+ BalancedDelimiterTracker Parens(*this, tok::l_paren);
+ Parens.consumeOpen();
+
+ if (Tok.is(tok::r_paren)) {
+ Diag(Tok.getLocation(), diag::err_argument_required_after_attribute);
+ Parens.consumeClose();
+ return;
+ }
+
+ ArgsVector ArgExprs;
+ // Don't evaluate argument when the attribute is ignored.
+ using ExpressionKind =
+ Sema::ExpressionEvaluationContextRecord::ExpressionKind;
+ EnterExpressionEvaluationContext EC(
+ Actions, Sema::ExpressionEvaluationContext::PotentiallyEvaluated, nullptr,
+ ExpressionKind::EK_AttrArgument);
+
+ ExprResult ArgExpr(
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
+
+ if (ArgExpr.isInvalid()) {
+ Parens.skipToEnd();
+ return;
+ }
+
+ ArgExprs.push_back(ArgExpr.get());
+ Parens.consumeClose();
+
+ ASTContext &Ctx = Actions.getASTContext();
+
+ ArgExprs.push_back(IntegerLiteral::Create(
+ Ctx, llvm::APInt(Ctx.getTypeSize(Ctx.getSizeType()), 0),
+ Ctx.getSizeType(), SourceLocation()));
+
+ Attrs.addNew(&AttrName, SourceRange(AttrNameLoc, Parens.getCloseLocation()),
+ ScopeName, ScopeLoc, ArgExprs.data(), ArgExprs.size(), Form);
+}
+
ExprResult Parser::ParseExtIntegerArgument() {
assert(Tok.isOneOf(tok::kw__ExtInt, tok::kw__BitInt) &&
"Not an extended int type");
@@ -3320,7 +3573,7 @@ Parser::DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
/// 'friend': [C++ dcl.friend]
/// 'constexpr': [C++0x dcl.constexpr]
void Parser::ParseDeclarationSpecifiers(
- DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS,
+ DeclSpec &DS, ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS,
DeclSpecContext DSContext, LateParsedAttrList *LateAttrs,
ImplicitTypenameContext AllowImplicitTypename) {
if (DS.getSourceRange().isInvalid()) {
@@ -3432,8 +3685,23 @@ void Parser::ParseDeclarationSpecifiers(
DS.Finish(Actions, Policy);
return;
- case tok::l_square:
+ // alignment-specifier
+ case tok::kw__Alignas:
+ diagnoseUseOfC11Keyword(Tok);
+ [[fallthrough]];
case tok::kw_alignas:
+ // _Alignas and alignas (C23, not C++) should parse the same way. The C++
+ // parsing for alignas happens through the usual attribute parsing. This
+ // ensures that an alignas specifier can appear in a type position in C
+ // despite that not being valid in C++.
+ if (getLangOpts().C23 || Tok.getKind() == tok::kw__Alignas) {
+ if (Tok.getKind() == tok::kw_alignas)
+ Diag(Tok, diag::warn_c23_compat_keyword) << Tok.getName();
+ ParseAlignmentSpecifier(DS.getAttributes());
+ continue;
+ }
+ [[fallthrough]];
+ case tok::l_square:
if (!isAllowedCXX11AttributeSpecifier())
goto DoneWithDeclSpec;
@@ -3450,7 +3718,8 @@ void Parser::ParseDeclarationSpecifiers(
continue;
case tok::code_completion: {
- Sema::ParserCompletionContext CCC = Sema::PCC_Namespace;
+ SemaCodeCompletion::ParserCompletionContext CCC =
+ SemaCodeCompletion::PCC_Namespace;
if (DS.hasTypeSpecifier()) {
bool AllowNonIdentifiers
= (getCurScope()->getFlags() & (Scope::ControlScope |
@@ -3463,25 +3732,25 @@ void Parser::ParseDeclarationSpecifiers(
(DSContext == DeclSpecContext::DSC_class && DS.isFriendSpecified());
cutOffParsing();
- Actions.CodeCompleteDeclSpec(getCurScope(), DS,
- AllowNonIdentifiers,
- AllowNestedNameSpecifiers);
+ Actions.CodeCompletion().CodeCompleteDeclSpec(
+ getCurScope(), DS, AllowNonIdentifiers, AllowNestedNameSpecifiers);
return;
}
// Class context can appear inside a function/block, so prioritise that.
if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate)
- CCC = DSContext == DeclSpecContext::DSC_class ? Sema::PCC_MemberTemplate
- : Sema::PCC_Template;
+ CCC = DSContext == DeclSpecContext::DSC_class
+ ? SemaCodeCompletion::PCC_MemberTemplate
+ : SemaCodeCompletion::PCC_Template;
else if (DSContext == DeclSpecContext::DSC_class)
- CCC = Sema::PCC_Class;
+ CCC = SemaCodeCompletion::PCC_Class;
else if (getCurScope()->getFnParent() || getCurScope()->getBlockParent())
- CCC = Sema::PCC_LocalDeclarationSpecifiers;
+ CCC = SemaCodeCompletion::PCC_LocalDeclarationSpecifiers;
else if (CurParsedObjCImpl)
- CCC = Sema::PCC_ObjCImplementation;
+ CCC = SemaCodeCompletion::PCC_ObjCImplementation;
cutOffParsing();
- Actions.CodeCompleteOrdinaryName(getCurScope(), CCC);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(getCurScope(), CCC);
return;
}
@@ -3706,7 +3975,7 @@ void Parser::ParseDeclarationSpecifiers(
// parse errors if this really is a __declspec attribute. Attempt to
// recognize that scenario and recover gracefully.
if (!getLangOpts().DeclSpecKeyword && Tok.is(tok::identifier) &&
- Tok.getIdentifierInfo()->getName().equals("__declspec")) {
+ Tok.getIdentifierInfo()->getName() == "__declspec") {
Diag(Loc, diag::err_ms_attributes_not_enabled);
// The next token should be an open paren. If it is, eat the entire
@@ -3762,7 +4031,7 @@ void Parser::ParseDeclarationSpecifiers(
if (DSContext == DeclSpecContext::DSC_objc_method_result &&
isObjCInstancetype()) {
- ParsedType TypeRep = Actions.ActOnObjCInstanceType(Loc);
+ ParsedType TypeRep = Actions.ObjC().ActOnObjCInstanceType(Loc);
assert(TypeRep);
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
DiagID, TypeRep, Policy);
@@ -4082,8 +4351,7 @@ void Parser::ParseDeclarationSpecifiers(
isStorageClass = true;
break;
case tok::kw__Thread_local:
- if (!getLangOpts().C11)
- Diag(Tok, diag::ext_c11_feature) << Tok.getName();
+ diagnoseUseOfC11Keyword(Tok);
isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS__Thread_local,
Loc, PrevSpec, DiagID);
isStorageClass = true;
@@ -4143,23 +4411,18 @@ void Parser::ParseDeclarationSpecifiers(
break;
}
case tok::kw__Noreturn:
- if (!getLangOpts().C11)
- Diag(Tok, diag::ext_c11_feature) << Tok.getName();
+ diagnoseUseOfC11Keyword(Tok);
isInvalid = DS.setFunctionSpecNoreturn(Loc, PrevSpec, DiagID);
break;
- // alignment-specifier
- case tok::kw__Alignas:
- if (!getLangOpts().C11)
- Diag(Tok, diag::ext_c11_feature) << Tok.getName();
- ParseAlignmentSpecifier(DS.getAttributes());
- continue;
-
// friend
case tok::kw_friend:
- if (DSContext == DeclSpecContext::DSC_class)
+ if (DSContext == DeclSpecContext::DSC_class) {
isInvalid = DS.SetFriendSpec(Loc, PrevSpec, DiagID);
- else {
+ Scope *CurS = getCurScope();
+ if (!isInvalid && CurS)
+ CurS->setFlags(CurS->getFlags() | Scope::FriendScope);
+ } else {
PrevSpec = ""; // not actually used by the diagnostic
DiagID = diag::err_friend_invalid_in_context;
isInvalid = true;
@@ -4173,6 +4436,8 @@ void Parser::ParseDeclarationSpecifiers(
// constexpr, consteval, constinit specifiers
case tok::kw_constexpr:
+ if (getLangOpts().C23)
+ Diag(Tok, diag::warn_c23_compat_keyword) << Tok.getName();
isInvalid = DS.SetConstexprSpec(ConstexprSpecKind::Constexpr, Loc,
PrevSpec, DiagID);
break;
@@ -4447,6 +4712,10 @@ void Parser::ParseDeclarationSpecifiers(
ParseDecltypeSpecifier(DS);
continue;
+ case tok::annot_pack_indexing_type:
+ ParsePackIndexingType(DS);
+ continue;
+
case tok::annot_pragma_pack:
HandlePragmaPack();
continue;
@@ -4477,9 +4746,7 @@ void Parser::ParseDeclarationSpecifiers(
// If the _Atomic keyword is immediately followed by a left parenthesis,
// it is interpreted as a type specifier (with a type name), not as a
// type qualifier.
- if (!getLangOpts().C11)
- Diag(Tok, diag::ext_c11_feature) << Tok.getName();
-
+ diagnoseUseOfC11Keyword(Tok);
if (NextToken().is(tok::l_paren)) {
ParseAtomicSpecifier(DS);
continue;
@@ -4580,6 +4847,38 @@ void Parser::ParseDeclarationSpecifiers(
}
}
+static void DiagnoseCountAttributedTypeInUnnamedAnon(ParsingDeclSpec &DS,
+ Parser &P) {
+
+ if (DS.getTypeSpecType() != DeclSpec::TST_struct)
+ return;
+
+ auto *RD = dyn_cast<RecordDecl>(DS.getRepAsDecl());
+ // We're only interested in unnamed, non-anonymous struct
+ if (!RD || !RD->getName().empty() || RD->isAnonymousStructOrUnion())
+ return;
+
+ for (auto *I : RD->decls()) {
+ auto *VD = dyn_cast<ValueDecl>(I);
+ if (!VD)
+ continue;
+
+ auto *CAT = VD->getType()->getAs<CountAttributedType>();
+ if (!CAT)
+ continue;
+
+ for (const auto &DD : CAT->dependent_decls()) {
+ if (!RD->containsDecl(DD.getDecl())) {
+ P.Diag(VD->getBeginLoc(), diag::err_count_attr_param_not_in_same_struct)
+ << DD.getDecl() << CAT->getKind() << CAT->isArrayType();
+ P.Diag(DD.getDecl()->getBeginLoc(),
+ diag::note_flexible_array_counted_by_attr_field)
+ << DD.getDecl();
+ }
+ }
+ }
+}
+
/// ParseStructDeclaration - Parse a struct declaration without the terminating
/// semicolon.
///
@@ -4603,13 +4902,14 @@ void Parser::ParseDeclarationSpecifiers(
///
void Parser::ParseStructDeclaration(
ParsingDeclSpec &DS,
- llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback) {
+ llvm::function_ref<Decl *(ParsingFieldDeclarator &)> FieldsCallback,
+ LateParsedAttrList *LateFieldAttrs) {
if (Tok.is(tok::kw___extension__)) {
// __extension__ silences extension warnings in the subexpression.
ExtensionRAIIObject O(Diags); // Use RAII to do this.
ConsumeToken();
- return ParseStructDeclaration(DS, FieldsCallback);
+ return ParseStructDeclaration(DS, FieldsCallback, LateFieldAttrs);
}
// Parse leading attributes.
@@ -4660,6 +4960,11 @@ void Parser::ParseStructDeclaration(
} else
DeclaratorInfo.D.SetIdentifier(nullptr, Tok.getLocation());
+ // Here, we now know that the unnamed struct is not an anonymous struct.
+ // Report an error if a counted_by attribute refers to a field in a
+ // different named struct.
+ DiagnoseCountAttributedTypeInUnnamedAnon(DS, *this);
+
if (TryConsumeToken(tok::colon)) {
ExprResult Res(ParseConstantExpression());
if (Res.isInvalid())
@@ -4669,10 +4974,12 @@ void Parser::ParseStructDeclaration(
}
// If attributes exist after the declarator, parse them.
- MaybeParseGNUAttributes(DeclaratorInfo.D);
+ MaybeParseGNUAttributes(DeclaratorInfo.D, LateFieldAttrs);
// We're done with this declarator; invoke the callback.
- FieldsCallback(DeclaratorInfo);
+ Decl *Field = FieldsCallback(DeclaratorInfo);
+ if (Field)
+ DistributeCLateParsedAttrs(Field, LateFieldAttrs);
// If we don't have a comma, it is either the end of the list (a ';')
// or an error, bail out.
@@ -4683,6 +4990,73 @@ void Parser::ParseStructDeclaration(
}
}
+// TODO: All callers of this function should be moved to
+// `Parser::ParseLexedAttributeList`.
+void Parser::ParseLexedCAttributeList(LateParsedAttrList &LAs, bool EnterScope,
+ ParsedAttributes *OutAttrs) {
+ assert(LAs.parseSoon() &&
+ "Attribute list should be marked for immediate parsing.");
+ for (auto *LA : LAs) {
+ ParseLexedCAttribute(*LA, EnterScope, OutAttrs);
+ delete LA;
+ }
+ LAs.clear();
+}
+
+/// Finish parsing an attribute for which parsing was delayed.
+/// This will be called at the end of parsing a class declaration
+/// for each LateParsedAttribute. We consume the saved tokens and
+/// create an attribute with the arguments filled in. We add this
+/// to the Attribute list for the decl.
+void Parser::ParseLexedCAttribute(LateParsedAttribute &LA, bool EnterScope,
+ ParsedAttributes *OutAttrs) {
+ // Create a fake EOF so that attribute parsing won't go off the end of the
+ // attribute.
+ Token AttrEnd;
+ AttrEnd.startToken();
+ AttrEnd.setKind(tok::eof);
+ AttrEnd.setLocation(Tok.getLocation());
+ AttrEnd.setEofData(LA.Toks.data());
+ LA.Toks.push_back(AttrEnd);
+
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LA.Toks.push_back(Tok);
+ PP.EnterTokenStream(LA.Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/true);
+ // Drop the current token and bring the first cached one. It's the same token
+ // as when we entered this function.
+ ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
+
+ // TODO: Use `EnterScope`
+ (void)EnterScope;
+
+ ParsedAttributes Attrs(AttrFactory);
+
+ assert(LA.Decls.size() <= 1 &&
+ "late field attribute expects to have at most one declaration.");
+
+ // Dispatch based on the attribute and parse it
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, nullptr, nullptr,
+ SourceLocation(), ParsedAttr::Form::GNU(), nullptr);
+
+ for (auto *D : LA.Decls)
+ Actions.ActOnFinishDelayedAttribute(getCurScope(), D, Attrs);
+
+ // Due to a parsing error, we either went over the cached tokens or
+ // there are still cached tokens left, so we skip the leftover tokens.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ // Consume the fake EOF token if it's there
+ if (Tok.is(tok::eof) && Tok.getEofData() == AttrEnd.getEofData())
+ ConsumeAnyToken();
+
+ if (OutAttrs) {
+ OutAttrs->takeAllFrom(Attrs);
+ }
+}
+
/// ParseStructUnionBody
/// struct-contents:
/// struct-declaration-list
@@ -4706,6 +5080,11 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
ParseScope StructScope(this, Scope::ClassScope|Scope::DeclScope);
Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
+ // `LateAttrParseExperimentalExtOnly=true` requests that only attributes
+ // marked with `LateAttrParseExperimentalExt` are late parsed.
+ LateParsedAttrList LateFieldAttrs(/*PSoon=*/true,
+ /*LateAttrParseExperimentalExtOnly=*/true);
+
// While we still have something to read, read the declarations in the struct.
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
@@ -4756,18 +5135,19 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
}
if (!Tok.is(tok::at)) {
- auto CFieldCallback = [&](ParsingFieldDeclarator &FD) {
+ auto CFieldCallback = [&](ParsingFieldDeclarator &FD) -> Decl * {
// Install the declarator into the current TagDecl.
Decl *Field =
Actions.ActOnField(getCurScope(), TagDecl,
FD.D.getDeclSpec().getSourceRange().getBegin(),
FD.D, FD.BitfieldSize);
FD.complete(Field);
+ return Field;
};
// Parse all the comma separated declarators.
ParsingDeclSpec DS(*this);
- ParseStructDeclaration(DS, CFieldCallback);
+ ParseStructDeclaration(DS, CFieldCallback, &LateFieldAttrs);
} else { // Handle @defs
ConsumeToken();
if (!Tok.isObjCAtKeyword(tok::objc_defs)) {
@@ -4783,8 +5163,8 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
continue;
}
SmallVector<Decl *, 16> Fields;
- Actions.ActOnDefs(getCurScope(), TagDecl, Tok.getLocation(),
- Tok.getIdentifierInfo(), Fields);
+ Actions.ObjC().ActOnDefs(getCurScope(), TagDecl, Tok.getLocation(),
+ Tok.getIdentifierInfo(), Fields);
ConsumeToken();
ExpectAndConsume(tok::r_paren);
}
@@ -4808,7 +5188,10 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
ParsedAttributes attrs(AttrFactory);
// If attributes exist after struct contents, parse them.
- MaybeParseGNUAttributes(attrs);
+ MaybeParseGNUAttributes(attrs, &LateFieldAttrs);
+
+ // Late parse field attributes if necessary.
+ ParseLexedCAttributeList(LateFieldAttrs, /*EnterScope=*/false);
SmallVector<Decl *, 32> FieldDecls(TagDecl->fields());
@@ -4855,7 +5238,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
if (Tok.is(tok::code_completion)) {
// Code completion for an enum name.
cutOffParsing();
- Actions.CodeCompleteTag(getCurScope(), DeclSpec::TST_enum);
+ Actions.CodeCompletion().CodeCompleteTag(getCurScope(), DeclSpec::TST_enum);
DS.SetTypeSpecError(); // Needed by ActOnUsingDeclaration.
return;
}
@@ -5042,9 +5425,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// enum foo {..}; void bar() { enum foo; } <- new foo in bar.
// enum foo {..}; void bar() { enum foo x; } <- use of old foo.
//
- Sema::TagUseKind TUK;
+ TagUseKind TUK;
if (AllowEnumSpecifier == AllowDefiningTypeSpec::No)
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
else if (Tok.is(tok::l_brace)) {
if (DS.isFriendSpecified()) {
Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
@@ -5056,9 +5439,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
ScopedEnumKWLoc = SourceLocation();
IsScopedUsingClassTag = false;
BaseType = TypeResult();
- TUK = Sema::TUK_Friend;
+ TUK = TagUseKind::Friend;
} else {
- TUK = Sema::TUK_Definition;
+ TUK = TagUseKind::Definition;
}
} else if (!isTypeSpecifier(DSC) &&
(Tok.is(tok::semi) ||
@@ -5067,7 +5450,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// An opaque-enum-declaration is required to be standalone (no preceding or
// following tokens in the declaration). Sema enforces this separately by
// diagnosing anything else in the DeclSpec.
- TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
+ TUK = DS.isFriendSpecified() ? TagUseKind::Friend : TagUseKind::Declaration;
if (Tok.isNot(tok::semi)) {
// A semicolon was missing after this declaration. Diagnose and recover.
ExpectAndConsume(tok::semi, diag::err_expected_after, "enum");
@@ -5075,21 +5458,21 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
Tok.setKind(tok::semi);
}
} else {
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
}
bool IsElaboratedTypeSpecifier =
- TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend;
+ TUK == TagUseKind::Reference || TUK == TagUseKind::Friend;
// If this is an elaborated type specifier nested in a larger declaration,
// and we delayed diagnostics before, just merge them into the current pool.
- if (TUK == Sema::TUK_Reference && shouldDelayDiagsInTag) {
+ if (TUK == TagUseKind::Reference && shouldDelayDiagsInTag) {
diagsFromTag.redelay();
}
MultiTemplateParamsArg TParams;
if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
- TUK != Sema::TUK_Reference) {
+ TUK != TagUseKind::Reference) {
if (!getLangOpts().CPlusPlus11 || !SS.isSet()) {
// Skip the rest of this declarator, up until the comma or semicolon.
Diag(Tok, diag::err_enum_template);
@@ -5110,7 +5493,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
SS.setTemplateParamLists(TParams);
}
- if (!Name && TUK != Sema::TUK_Definition) {
+ if (!Name && TUK != TagUseKind::Definition) {
Diag(Tok, diag::err_enumerator_unnamed_no_def);
DS.SetTypeSpecError();
@@ -5142,8 +5525,8 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
stripTypeAttributesOffDeclSpec(attrs, DS, TUK);
- Sema::SkipBodyInfo SkipBody;
- if (!Name && TUK == Sema::TUK_Definition && Tok.is(tok::l_brace) &&
+ SkipBodyInfo SkipBody;
+ if (!Name && TUK == TagUseKind::Definition && Tok.is(tok::l_brace) &&
NextToken().is(tok::identifier))
SkipBody = Actions.shouldSkipAnonEnumBody(getCurScope(),
NextToken().getIdentifierInfo(),
@@ -5164,7 +5547,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
OffsetOfState, &SkipBody).get();
if (SkipBody.ShouldSkip) {
- assert(TUK == Sema::TUK_Definition && "can only skip a definition");
+ assert(TUK == TagUseKind::Definition && "can only skip a definition");
BalancedDelimiterTracker T(*this, tok::l_brace);
T.consumeOpen();
@@ -5206,7 +5589,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
if (!TagDecl) {
// The action failed to produce an enumeration tag. If this is a
// definition, consume the entire definition.
- if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
+ if (Tok.is(tok::l_brace) && TUK != TagUseKind::Reference) {
ConsumeBrace();
SkipUntil(tok::r_brace, StopAtSemi);
}
@@ -5215,7 +5598,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return;
}
- if (Tok.is(tok::l_brace) && TUK == Sema::TUK_Definition) {
+ if (Tok.is(tok::l_brace) && TUK == TagUseKind::Definition) {
Decl *D = SkipBody.CheckSameAsPrevious ? SkipBody.New : TagDecl;
ParseEnumBody(StartLoc, D);
if (SkipBody.CheckSameAsPrevious &&
@@ -5582,24 +5965,32 @@ Parser::DeclGroupPtrTy Parser::ParseTopLevelStmtDecl() {
// Parse a top-level-stmt.
Parser::StmtVector Stmts;
ParsedStmtContext SubStmtCtx = ParsedStmtContext();
- Actions.PushFunctionScope();
+ ParseScope FnScope(this, Scope::FnScope | Scope::DeclScope |
+ Scope::CompoundStmtScope);
+ TopLevelStmtDecl *TLSD = Actions.ActOnStartTopLevelStmtDecl(getCurScope());
StmtResult R = ParseStatementOrDeclaration(Stmts, SubStmtCtx);
- Actions.PopFunctionScopeInfo();
if (!R.isUsable())
return nullptr;
- SmallVector<Decl *, 2> DeclsInGroup;
- DeclsInGroup.push_back(Actions.ActOnTopLevelStmtDecl(R.get()));
+ Actions.ActOnFinishTopLevelStmtDecl(TLSD, R.get());
if (Tok.is(tok::annot_repl_input_end) &&
Tok.getAnnotationValue() != nullptr) {
ConsumeAnnotationToken();
- cast<TopLevelStmtDecl>(DeclsInGroup.back())->setSemiMissing();
+ TLSD->setSemiMissing();
}
- // Currently happens for things like -fms-extensions and use `__if_exists`.
- for (Stmt *S : Stmts)
- DeclsInGroup.push_back(Actions.ActOnTopLevelStmtDecl(S));
+ SmallVector<Decl *, 2> DeclsInGroup;
+ DeclsInGroup.push_back(TLSD);
+
+ // Currently happens for things like -fms-extensions and use `__if_exists`.
+ for (Stmt *S : Stmts) {
+ // Here we should be safe as `__if_exists` and friends are not introducing
+ // new variables which need to live outside file scope.
+ TopLevelStmtDecl *D = Actions.ActOnStartTopLevelStmtDecl(getCurScope());
+ Actions.ActOnFinishTopLevelStmtDecl(D, S);
+ DeclsInGroup.push_back(D);
+ }
return Actions.BuildDeclaratorGroup(DeclsInGroup);
}
@@ -5756,6 +6147,7 @@ bool Parser::isDeclarationSpecifier(
// C++11 decltype and constexpr.
case tok::annot_decltype:
+ case tok::annot_pack_indexing_type:
case tok::kw_constexpr:
// C++20 consteval and constinit.
@@ -5766,6 +6158,11 @@ bool Parser::isDeclarationSpecifier(
case tok::kw__Atomic:
return true;
+ case tok::kw_alignas:
+ // alignas is a type-specifier-qualifier in C23, which is a kind of
+ // declaration-specifier. Outside of C23 mode (including in C++), it is not.
+ return getLangOpts().C23;
+
// GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
case tok::less:
return getLangOpts().ObjC;
@@ -6031,7 +6428,7 @@ void Parser::ParseTypeQualifierListOpt(
if (CodeCompletionHandler)
(*CodeCompletionHandler)();
else
- Actions.CodeCompleteTypeQualifiers(DS);
+ Actions.CodeCompletion().CodeCompleteTypeQualifiers(DS);
return;
case tok::kw_const:
@@ -6049,8 +6446,7 @@ void Parser::ParseTypeQualifierListOpt(
case tok::kw__Atomic:
if (!AtomicAllowed)
goto DoneWithTypeQuals;
- if (!getLangOpts().C11)
- Diag(Tok, diag::ext_c11_feature) << Tok.getName();
+ diagnoseUseOfC11Keyword(Tok);
isInvalid = DS.SetTypeQual(DeclSpec::TQ_atomic, Loc, PrevSpec, DiagID,
getLangOpts());
break;
@@ -6495,6 +6891,17 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
/*ObjectHasErrors=*/false, EnteringContext);
}
+ // C++23 [basic.scope.namespace]p1:
+ // For each non-friend redeclaration or specialization whose target scope
+ // is or is contained by the scope, the portion after the declarator-id,
+ // class-head-name, or enum-head-name is also included in the scope.
+ // C++23 [basic.scope.class]p1:
+ // For each non-friend redeclaration or specialization whose target scope
+ // is or is contained by the scope, the portion after the declarator-id,
+ // class-head-name, or enum-head-name is also included in the scope.
+ //
+ // FIXME: We should not be doing this for friend declarations; they have
+ // their own special lookup semantics specified by [basic.lookup.unqual]p6.
if (D.getCXXScopeSpec().isValid()) {
if (Actions.ShouldEnterDeclaratorScope(getCurScope(),
D.getCXXScopeSpec()))
@@ -6561,12 +6968,14 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
}
bool HadScope = D.getCXXScopeSpec().isValid();
+ SourceLocation TemplateKWLoc;
if (ParseUnqualifiedId(D.getCXXScopeSpec(),
/*ObjectType=*/nullptr,
/*ObjectHadErrors=*/false,
/*EnteringContext=*/true,
/*AllowDestructorName=*/true, AllowConstructorName,
- AllowDeductionGuide, nullptr, D.getName()) ||
+ AllowDeductionGuide, &TemplateKWLoc,
+ D.getName()) ||
// Once we're past the identifier, if the scope was bad, mark the
// whole declarator bad.
D.getCXXScopeSpec().isInvalid()) {
@@ -6823,18 +7232,23 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
void Parser::ParseDecompositionDeclarator(Declarator &D) {
assert(Tok.is(tok::l_square));
+ TentativeParsingAction PA(*this);
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+
+ if (isCXX11AttributeSpecifier())
+ DiagnoseAndSkipCXX11Attributes();
+
// If this doesn't look like a structured binding, maybe it's a misplaced
// array declarator.
- // FIXME: Consume the l_square first so we don't need extra lookahead for
- // this.
- if (!(NextToken().is(tok::identifier) &&
- GetLookAheadToken(2).isOneOf(tok::comma, tok::r_square)) &&
- !(NextToken().is(tok::r_square) &&
- GetLookAheadToken(2).isOneOf(tok::equal, tok::l_brace)))
+ if (!(Tok.is(tok::identifier) &&
+ NextToken().isOneOf(tok::comma, tok::r_square, tok::kw_alignas,
+ tok::l_square)) &&
+ !(Tok.is(tok::r_square) &&
+ NextToken().isOneOf(tok::equal, tok::l_brace))) {
+ PA.Revert();
return ParseMisplacedBracketDeclarator(D);
-
- BalancedDelimiterTracker T(*this, tok::l_square);
- T.consumeOpen();
+ }
SmallVector<DecompositionDeclarator::Binding, 32> Bindings;
while (Tok.isNot(tok::r_square)) {
@@ -6859,13 +7273,27 @@ void Parser::ParseDecompositionDeclarator(Declarator &D) {
}
}
+ if (isCXX11AttributeSpecifier())
+ DiagnoseAndSkipCXX11Attributes();
+
if (Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_expected) << tok::identifier;
break;
}
- Bindings.push_back({Tok.getIdentifierInfo(), Tok.getLocation()});
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation Loc = Tok.getLocation();
ConsumeToken();
+
+ ParsedAttributes Attrs(AttrFactory);
+ if (isCXX11AttributeSpecifier()) {
+ Diag(Tok, getLangOpts().CPlusPlus26
+ ? diag::warn_cxx23_compat_decl_attrs_on_binding
+ : diag::ext_decl_attrs_on_binding);
+ MaybeParseCXX11Attributes(Attrs);
+ }
+
+ Bindings.push_back({II, Loc, std::move(Attrs)});
}
if (Tok.isNot(tok::r_square))
@@ -6880,6 +7308,8 @@ void Parser::ParseDecompositionDeclarator(Declarator &D) {
T.consumeClose();
}
+ PA.Commit();
+
return D.setDecompositionBindings(T.getOpenLocation(), Bindings,
T.getCloseLocation());
}
@@ -7135,12 +7565,12 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
// with the pure-specifier in the same way.
// Parse cv-qualifier-seq[opt].
- ParseTypeQualifierListOpt(DS, AR_NoAttributesParsed,
- /*AtomicAllowed*/ false,
- /*IdentifierRequired=*/false,
- llvm::function_ref<void()>([&]() {
- Actions.CodeCompleteFunctionQualifiers(DS, D);
- }));
+ ParseTypeQualifierListOpt(
+ DS, AR_NoAttributesParsed,
+ /*AtomicAllowed*/ false,
+ /*IdentifierRequired=*/false, llvm::function_ref<void()>([&]() {
+ Actions.CodeCompletion().CodeCompleteFunctionQualifiers(DS, D);
+ }));
if (!DS.getSourceRange().getEnd().isInvalid()) {
EndLoc = DS.getSourceRange().getEnd();
}
@@ -7152,12 +7582,20 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
std::optional<Sema::CXXThisScopeRAII> ThisScope;
InitCXXThisScopeForDeclaratorIfRelevant(D, DS, ThisScope);
- // Parse exception-specification[opt].
- // FIXME: Per [class.mem]p6, all exception-specifications at class scope
- // should be delayed, including those for non-members (eg, friend
- // declarations). But only applying this to member declarations is
- // consistent with what other implementations do.
- bool Delayed = D.isFirstDeclarationOfMember() &&
+ // C++ [class.mem.general]p8:
+ // A complete-class context of a class (template) is a
+ // - function body,
+ // - default argument,
+ // - default template argument,
+ // - noexcept-specifier, or
+ // - default member initializer
+ // within the member-specification of the class or class template.
+ //
+ // Parse exception-specification[opt]. If we are in the
+ // member-specification of a class or class template, this is a
+ // complete-class context and parsing of the noexcept-specifier should be
+ // delayed (even if this is a friend declaration).
+ bool Delayed = D.getContext() == DeclaratorContext::Member &&
D.isFunctionDeclaratorAFunctionDeclaration();
if (Delayed && Actions.isLibstdcxxEagerExceptionSpecHack(D) &&
GetLookAheadToken(0).is(tok::kw_noexcept) &&
@@ -7445,11 +7883,25 @@ void Parser::ParseParameterDeclarationClause(
// Parse a C++23 Explicit Object Parameter
// We do that in all language modes to produce a better diagnostic.
SourceLocation ThisLoc;
- if (getLangOpts().CPlusPlus && Tok.is(tok::kw_this))
+ if (getLangOpts().CPlusPlus && Tok.is(tok::kw_this)) {
ThisLoc = ConsumeToken();
+ // C++23 [dcl.fct]p6:
+ // An explicit-object-parameter-declaration is a parameter-declaration
+ // with a this specifier. An explicit-object-parameter-declaration
+ // shall appear only as the first parameter-declaration of a
+ // parameter-declaration-list of either:
+ // - a member-declarator that declares a member function, or
+ // - a lambda-declarator.
+ //
+ // The parameter-declaration-list of a requires-expression is not such
+ // a context.
+ if (DeclaratorCtx == DeclaratorContext::RequiresExpr)
+ Diag(ThisLoc, diag::err_requires_expr_explicit_object_parameter);
+ }
- ParseDeclarationSpecifiers(DS, /*TemplateInfo=*/ParsedTemplateInfo(),
- AS_none, DeclSpecContext::DSC_normal,
+ ParsedTemplateInfo TemplateInfo;
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS_none,
+ DeclSpecContext::DSC_normal,
/*LateAttrs=*/nullptr, AllowImplicitTypename);
DS.takeAttributesFrom(ArgDeclSpecAttrs);
@@ -7471,7 +7923,7 @@ void Parser::ParseParameterDeclarationClause(
// Parse GNU attributes, if present.
MaybeParseGNUAttributes(ParmDeclarator);
if (getLangOpts().HLSL)
- MaybeParseHLSLSemantics(DS.getAttributes());
+ MaybeParseHLSLAnnotations(DS.getAttributes());
if (Tok.is(tok::kw_requires)) {
// User tried to define a requires clause in a parameter declaration,
@@ -7485,7 +7937,7 @@ void Parser::ParseParameterDeclarationClause(
}
// Remember this parsed parameter in ParamInfo.
- IdentifierInfo *ParmII = ParmDeclarator.getIdentifier();
+ const IdentifierInfo *ParmII = ParmDeclarator.getIdentifier();
// DefArgToks is used when the parsing of default arguments needs
// to be delayed.
@@ -7681,7 +8133,7 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
return;
} else if (Tok.getKind() == tok::code_completion) {
cutOffParsing();
- Actions.CodeCompleteBracketDeclarator(getCurScope());
+ Actions.CodeCompletion().CodeCompleteBracketDeclarator(getCurScope());
return;
}
@@ -7816,7 +8268,7 @@ void Parser::ParseMisplacedBracketDeclarator(Declarator &D) {
// Adding back the bracket info to the end of the Declarator.
for (unsigned i = 0, e = TempDeclarator.getNumTypeObjects(); i < e; ++i) {
const DeclaratorChunk &Chunk = TempDeclarator.getTypeObject(i);
- D.AddTypeInfo(Chunk, SourceLocation());
+ D.AddTypeInfo(Chunk, TempDeclarator.getAttributePool(), SourceLocation());
}
// The missing identifier would have been diagnosed in ParseDirectDeclarator.
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
index c0d771dc93da..ce827c689beb 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
@@ -27,6 +27,7 @@
#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaCodeCompletion.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/TimeProfiler.h"
#include <optional>
@@ -69,7 +70,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteNamespaceDecl(getCurScope());
+ Actions.CodeCompletion().CodeCompleteNamespaceDecl(getCurScope());
return nullptr;
}
@@ -140,6 +141,14 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
SkipUntil(tok::semi);
return nullptr;
}
+ if (!ExtraNSs.empty()) {
+ Diag(ExtraNSs.front().NamespaceLoc,
+ diag::err_unexpected_qualified_namespace_alias)
+ << SourceRange(ExtraNSs.front().NamespaceLoc,
+ ExtraNSs.back().IdentLoc);
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
if (attrLoc.isValid())
Diag(attrLoc, diag::err_unexpected_namespace_attributes_alias);
if (InlineLoc.isValid())
@@ -301,7 +310,7 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteNamespaceAliasDecl(getCurScope());
+ Actions.CodeCompletion().CodeCompleteNamespaceAliasDecl(getCurScope());
return nullptr;
}
@@ -436,6 +445,14 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
/// 'export' declaration
/// 'export' '{' declaration-seq[opt] '}'
///
+/// HLSL: Parse export function declaration.
+///
+/// export-function-declaration:
+/// 'export' function-declaration
+///
+/// export-declaration-group:
+/// 'export' '{' function-declaration-seq[opt] '}'
+///
Decl *Parser::ParseExportDeclaration() {
assert(Tok.is(tok::kw_export));
SourceLocation ExportLoc = ConsumeToken();
@@ -484,7 +501,7 @@ Parser::DeclGroupPtrTy Parser::ParseUsingDirectiveOrDeclaration(
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteUsing(getCurScope());
+ Actions.CodeCompletion().CodeCompleteUsing(getCurScope());
return nullptr;
}
@@ -534,7 +551,7 @@ Decl *Parser::ParseUsingDirective(DeclaratorContext Context,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteUsingDirective(getCurScope());
+ Actions.CodeCompletion().CodeCompleteUsingDirective(getCurScope());
return nullptr;
}
@@ -608,7 +625,7 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
}
// Parse nested-name-specifier.
- IdentifierInfo *LastII = nullptr;
+ const IdentifierInfo *LastII = nullptr;
if (ParseOptionalCXXScopeSpecifier(D.SS, /*ObjectType=*/nullptr,
/*ObjectHasErrors=*/false,
/*EnteringContext=*/false,
@@ -683,7 +700,7 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
/// using-enum-declaration: [C++20, dcl.enum]
/// 'using' elaborated-enum-specifier ;
/// The terminal name of the elaborated-enum-specifier undergoes
-/// ordinary lookup
+/// type-only lookup
///
/// elaborated-enum-specifier:
/// 'enum' nested-name-specifier[opt] identifier
@@ -715,7 +732,7 @@ Parser::DeclGroupPtrTy Parser::ParseUsingDeclaration(
/*ObectHasErrors=*/false,
/*EnteringConttext=*/false,
/*MayBePseudoDestructor=*/nullptr,
- /*IsTypename=*/false,
+ /*IsTypename=*/true,
/*IdentifierInfo=*/nullptr,
/*OnlyNamespace=*/false,
/*InUsingDeclaration=*/true)) {
@@ -725,20 +742,53 @@ Parser::DeclGroupPtrTy Parser::ParseUsingDeclaration(
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteUsing(getCurScope());
+ Actions.CodeCompletion().CodeCompleteUsing(getCurScope());
return nullptr;
}
- if (!Tok.is(tok::identifier)) {
+ Decl *UED = nullptr;
+
+ // FIXME: identifier and annot_template_id handling is very similar to
+ // ParseBaseTypeSpecifier. It should be factored out into a function.
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *IdentInfo = Tok.getIdentifierInfo();
+ SourceLocation IdentLoc = ConsumeToken();
+
+ ParsedType Type = Actions.getTypeName(
+ *IdentInfo, IdentLoc, getCurScope(), &SS, /*isClassName=*/true,
+ /*HasTrailingDot=*/false,
+ /*ObjectType=*/nullptr, /*IsCtorOrDtorName=*/false,
+ /*WantNontrivialTypeSourceInfo=*/true);
+
+ UED = Actions.ActOnUsingEnumDeclaration(
+ getCurScope(), AS, UsingLoc, UELoc, IdentLoc, *IdentInfo, Type, &SS);
+ } else if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+
+ if (TemplateId->mightBeType()) {
+ AnnotateTemplateIdTokenAsType(SS, ImplicitTypenameContext::No,
+ /*IsClassName=*/true);
+
+ assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
+ TypeResult Type = getTypeAnnotation(Tok);
+ SourceRange Loc = Tok.getAnnotationRange();
+ ConsumeAnnotationToken();
+
+ UED = Actions.ActOnUsingEnumDeclaration(getCurScope(), AS, UsingLoc,
+ UELoc, Loc, *TemplateId->Name,
+ Type.get(), &SS);
+ } else {
+ Diag(Tok.getLocation(), diag::err_using_enum_not_enum)
+ << TemplateId->Name->getName()
+ << SourceRange(TemplateId->TemplateNameLoc, TemplateId->RAngleLoc);
+ }
+ } else {
Diag(Tok.getLocation(), diag::err_using_enum_expect_identifier)
<< Tok.is(tok::kw_enum);
SkipUntil(tok::semi);
return nullptr;
}
- IdentifierInfo *IdentInfo = Tok.getIdentifierInfo();
- SourceLocation IdentLoc = ConsumeToken();
- Decl *UED = Actions.ActOnUsingEnumDeclaration(
- getCurScope(), AS, UsingLoc, UELoc, IdentLoc, *IdentInfo, &SS);
+
if (!UED) {
SkipUntil(tok::semi);
return nullptr;
@@ -791,6 +841,11 @@ Parser::DeclGroupPtrTy Parser::ParseUsingDeclaration(
ProhibitAttributes(PrefixAttrs);
Decl *DeclFromDeclSpec = nullptr;
+ Scope *CurScope = getCurScope();
+ if (CurScope)
+ CurScope->setFlags(Scope::ScopeFlags::TypeAliasScope |
+ CurScope->getFlags());
+
Decl *AD = ParseAliasDeclarationAfterDeclarator(
TemplateInfo, UsingLoc, D, DeclEnd, AS, Attrs, &DeclFromDeclSpec);
return Actions.ConvertDeclToDeclGroup(AD, DeclFromDeclSpec);
@@ -968,9 +1023,9 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd) {
// Save the token name used for static assertion.
const char *TokName = Tok.getName();
- if (Tok.is(tok::kw__Static_assert) && !getLangOpts().C11)
- Diag(Tok, diag::ext_c11_feature) << Tok.getName();
- if (Tok.is(tok::kw_static_assert)) {
+ if (Tok.is(tok::kw__Static_assert))
+ diagnoseUseOfC11Keyword(Tok);
+ else if (Tok.is(tok::kw_static_assert)) {
if (!getLangOpts().CPlusPlus) {
if (getLangOpts().C23)
Diag(Tok, diag::warn_c23_compat_keyword) << Tok.getName();
@@ -1196,6 +1251,93 @@ void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
PP.AnnotateCachedTokens(Tok);
}
+SourceLocation Parser::ParsePackIndexingType(DeclSpec &DS) {
+ assert(Tok.isOneOf(tok::annot_pack_indexing_type, tok::identifier) &&
+ "Expected an identifier");
+
+ TypeResult Type;
+ SourceLocation StartLoc;
+ SourceLocation EllipsisLoc;
+ const char *PrevSpec;
+ unsigned DiagID;
+ const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
+
+ if (Tok.is(tok::annot_pack_indexing_type)) {
+ StartLoc = Tok.getLocation();
+ SourceLocation EndLoc;
+ Type = getTypeAnnotation(Tok);
+ EndLoc = Tok.getAnnotationEndLoc();
+ // Unfortunately, we don't know the LParen source location as the annotated
+ // token doesn't have it.
+ DS.setTypeArgumentRange(SourceRange(SourceLocation(), EndLoc));
+ ConsumeAnnotationToken();
+ if (Type.isInvalid()) {
+ DS.SetTypeSpecError();
+ return EndLoc;
+ }
+ DS.SetTypeSpecType(DeclSpec::TST_typename_pack_indexing, StartLoc, PrevSpec,
+ DiagID, Type, Policy);
+ return EndLoc;
+ }
+ if (!NextToken().is(tok::ellipsis) ||
+ !GetLookAheadToken(2).is(tok::l_square)) {
+ DS.SetTypeSpecError();
+ return Tok.getEndLoc();
+ }
+
+ ParsedType Ty = Actions.getTypeName(*Tok.getIdentifierInfo(),
+ Tok.getLocation(), getCurScope());
+ if (!Ty) {
+ DS.SetTypeSpecError();
+ return Tok.getEndLoc();
+ }
+ Type = Ty;
+
+ StartLoc = ConsumeToken();
+ EllipsisLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ ExprResult IndexExpr = ParseConstantExpression();
+ T.consumeClose();
+
+ DS.SetRangeStart(StartLoc);
+ DS.SetRangeEnd(T.getCloseLocation());
+
+ if (!IndexExpr.isUsable()) {
+ ASTContext &C = Actions.getASTContext();
+ IndexExpr = IntegerLiteral::Create(C, C.MakeIntValue(0, C.getSizeType()),
+ C.getSizeType(), SourceLocation());
+ }
+
+ DS.SetTypeSpecType(DeclSpec::TST_typename, StartLoc, PrevSpec, DiagID, Type,
+ Policy);
+ DS.SetPackIndexingExpr(EllipsisLoc, IndexExpr.get());
+ return T.getCloseLocation();
+}
+
+void Parser::AnnotateExistingIndexedTypeNamePack(ParsedType T,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // make sure we have a token we can turn into an annotation token
+ if (PP.isBacktrackEnabled()) {
+ PP.RevertCachedTokens(1);
+ if (!T) {
+ // We encountered an error in parsing 'decltype(...)' so lets annotate all
+ // the tokens in the backtracking cache - that we likely had to skip over
+ // to get to a token that allows us to resume parsing, such as a
+ // semi-colon.
+ EndLoc = PP.getLastCachedTokenLocation();
+ }
+ } else
+ PP.EnterToken(Tok, /*IsReinject*/ true);
+
+ Tok.setKind(tok::annot_pack_indexing_type);
+ setTypeAnnotation(Tok, T);
+ Tok.setAnnotationEndLoc(EndLoc);
+ Tok.setLocation(StartLoc);
+ PP.AnnotateCachedTokens(Tok);
+}
+
DeclSpec::TST Parser::TypeTransformTokToDeclSpec() {
switch (Tok.getKind()) {
#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) \
@@ -1293,7 +1435,17 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
return Actions.ActOnTypeName(DeclaratorInfo);
}
+ if (Tok.is(tok::annot_pack_indexing_type)) {
+ DeclSpec DS(AttrFactory);
+ ParsePackIndexingType(DS);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
+ return Actions.ActOnTypeName(DeclaratorInfo);
+ }
+
// Check whether we have a template-id that names a type.
+ // FIXME: identifier and annot_template_id handling in ParseUsingDeclaration
+ // work very similarly. It should be refactored into a separate function.
if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->mightBeType()) {
@@ -1399,6 +1551,15 @@ void Parser::ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs) {
}
}
+void Parser::ParseNullabilityClassAttributes(ParsedAttributes &attrs) {
+ while (Tok.is(tok::kw__Nullable)) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ auto Kind = Tok.getKind();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0, Kind);
+ }
+}
+
/// Determine whether the following tokens are valid after a type-specifier
/// which could be a standalone declaration. This will conservatively return
/// true if there's any doubt, and is appropriate for insert-';' fixits.
@@ -1544,7 +1705,7 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
/// 'union'
void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
SourceLocation StartLoc, DeclSpec &DS,
- const ParsedTemplateInfo &TemplateInfo,
+ ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributes &Attributes) {
@@ -1563,7 +1724,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (Tok.is(tok::code_completion)) {
// Code completion for a struct, class, or union name.
cutOffParsing();
- Actions.CodeCompleteTag(getCurScope(), TagType);
+ Actions.CodeCompletion().CodeCompleteTag(getCurScope(), TagType);
return;
}
@@ -1580,15 +1741,21 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
ParsedAttributes attrs(AttrFactory);
// If attributes exist after tag, parse them.
- MaybeParseAttributes(PAKM_CXX11 | PAKM_Declspec | PAKM_GNU, attrs);
-
- // Parse inheritance specifiers.
- if (Tok.isOneOf(tok::kw___single_inheritance, tok::kw___multiple_inheritance,
- tok::kw___virtual_inheritance))
- ParseMicrosoftInheritanceClassAttributes(attrs);
-
- // Allow attributes to precede or succeed the inheritance specifiers.
- MaybeParseAttributes(PAKM_CXX11 | PAKM_Declspec | PAKM_GNU, attrs);
+ for (;;) {
+ MaybeParseAttributes(PAKM_CXX11 | PAKM_Declspec | PAKM_GNU, attrs);
+ // Parse inheritance specifiers.
+ if (Tok.isOneOf(tok::kw___single_inheritance,
+ tok::kw___multiple_inheritance,
+ tok::kw___virtual_inheritance)) {
+ ParseMicrosoftInheritanceClassAttributes(attrs);
+ continue;
+ }
+ if (Tok.is(tok::kw__Nullable)) {
+ ParseNullabilityClassAttributes(attrs);
+ continue;
+ }
+ break;
+ }
// Source location used by FIXIT to insert misplaced
// C++11 attributes
@@ -1630,6 +1797,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
tok::kw___is_member_pointer,
tok::kw___is_nothrow_assignable,
tok::kw___is_nothrow_constructible,
+ tok::kw___is_nothrow_convertible,
tok::kw___is_nothrow_destructible,
tok::kw___is_nullptr,
tok::kw___is_object,
@@ -1655,9 +1823,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
tok::kw___is_union,
tok::kw___is_unsigned,
tok::kw___is_void,
- tok::kw___is_volatile,
- tok::kw___reference_binds_to_temporary,
- tok::kw___reference_constructs_from_temporary))
+ tok::kw___is_volatile
+ ))
// GNU libstdc++ 4.2 and libc++ use certain intrinsic names as the
// name of struct templates, but some are keywords in GCC >= 4.3
// and Clang. Therefore, when we see the token sequence "struct
@@ -1738,18 +1905,14 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TemplateParams->pop_back();
} else {
TemplateParams = nullptr;
- const_cast<ParsedTemplateInfo &>(TemplateInfo).Kind =
- ParsedTemplateInfo::NonTemplate;
+ TemplateInfo.Kind = ParsedTemplateInfo::NonTemplate;
}
} else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
// Pretend this is just a forward declaration.
TemplateParams = nullptr;
- const_cast<ParsedTemplateInfo &>(TemplateInfo).Kind =
- ParsedTemplateInfo::NonTemplate;
- const_cast<ParsedTemplateInfo &>(TemplateInfo).TemplateLoc =
- SourceLocation();
- const_cast<ParsedTemplateInfo &>(TemplateInfo).ExternLoc =
- SourceLocation();
+ TemplateInfo.Kind = ParsedTemplateInfo::NonTemplate;
+ TemplateInfo.TemplateLoc = SourceLocation();
+ TemplateInfo.ExternLoc = SourceLocation();
}
};
@@ -1760,6 +1923,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (Tok.is(tok::identifier)) {
Name = Tok.getIdentifierInfo();
NameLoc = ConsumeToken();
+ DS.SetRangeEnd(NameLoc);
if (Tok.is(tok::less) && getLangOpts().CPlusPlus) {
// The name was supposed to refer to a template, but didn't.
@@ -1840,11 +2004,11 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
MaybeParseCXX11Attributes(Attributes);
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
- Sema::TagUseKind TUK;
+ TagUseKind TUK;
if (isDefiningTypeSpecifierContext(DSC, getLangOpts().CPlusPlus) ==
AllowDefiningTypeSpec::No ||
(getLangOpts().OpenMP && OpenMPDirectiveParsing))
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
else if (Tok.is(tok::l_brace) ||
(DSC != DeclSpecContext::DSC_association &&
getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
@@ -1859,10 +2023,10 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Skip everything up to the semicolon, so that this looks like a proper
// friend class (or template thereof) declaration.
SkipUntil(tok::semi, StopBeforeMatch);
- TUK = Sema::TUK_Friend;
+ TUK = TagUseKind::Friend;
} else {
// Okay, this is a class definition.
- TUK = Sema::TUK_Definition;
+ TUK = TagUseKind::Definition;
}
} else if (isClassCompatibleKeyword() &&
(NextToken().is(tok::l_square) ||
@@ -1903,15 +2067,15 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
}
if (Tok.isOneOf(tok::l_brace, tok::colon))
- TUK = Sema::TUK_Definition;
+ TUK = TagUseKind::Definition;
else
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
PA.Revert();
} else if (!isTypeSpecifier(DSC) &&
(Tok.is(tok::semi) ||
(Tok.isAtStartOfLine() && !isValidAfterTypeSpecifier(false)))) {
- TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
+ TUK = DS.isFriendSpecified() ? TagUseKind::Friend : TagUseKind::Declaration;
if (Tok.isNot(tok::semi)) {
const PrintingPolicy &PPol = Actions.getASTContext().getPrintingPolicy();
// A semicolon was missing after this declaration. Diagnose and recover.
@@ -1921,11 +2085,11 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
Tok.setKind(tok::semi);
}
} else
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
// Forbid misplaced attributes. In cases of a reference, we pass attributes
// to caller to handle.
- if (TUK != Sema::TUK_Reference) {
+ if (TUK != TagUseKind::Reference) {
// If this is not a reference, then the only possible
// valid place for C++11 attributes to appear here
// is between class-key and class-name. If there are
@@ -1951,7 +2115,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (!Name && !TemplateId &&
(DS.getTypeSpecType() == DeclSpec::TST_error ||
- TUK != Sema::TUK_Definition)) {
+ TUK != TagUseKind::Definition)) {
if (DS.getTypeSpecType() != DeclSpec::TST_error) {
// We have a declaration or reference to an anonymous class.
Diag(StartLoc, diag::err_anon_type_definition)
@@ -1961,7 +2125,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// If we are parsing a definition and stop at a base-clause, continue on
// until the semicolon. Continuing from the comma will just trick us into
// thinking we are seeing a variable declaration.
- if (TUK == Sema::TUK_Definition && Tok.is(tok::colon))
+ if (TUK == TagUseKind::Definition && Tok.is(tok::colon))
SkipUntil(tok::semi, StopBeforeMatch);
else
SkipUntil(tok::comma, StopAtSemi);
@@ -1973,7 +2137,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TypeResult TypeResult = true; // invalid
bool Owned = false;
- Sema::SkipBodyInfo SkipBody;
+ SkipBodyInfo SkipBody;
if (TemplateId) {
// Explicit specialization, class template partial specialization,
// or explicit instantiation.
@@ -1982,7 +2146,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (TemplateId->isInvalid()) {
// Can't build the declaration.
} else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
- TUK == Sema::TUK_Declaration) {
+ TUK == TagUseKind::Declaration) {
// This is an explicit instantiation of a class template.
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
@@ -1998,8 +2162,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// they have template headers, in which case they're ill-formed
// (FIXME: "template <class T> friend class A<T>::B<int>;").
// We diagnose this error in ActOnClassTemplateSpecialization.
- } else if (TUK == Sema::TUK_Reference ||
- (TUK == Sema::TUK_Friend &&
+ } else if (TUK == TagUseKind::Reference ||
+ (TUK == TagUseKind::Friend &&
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
@@ -2024,10 +2188,10 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// It this is friend declaration however, since it cannot have a
// template header, it is most likely that the user meant to
// remove the 'template' keyword.
- assert((TUK == Sema::TUK_Definition || TUK == Sema::TUK_Friend) &&
+ assert((TUK == TagUseKind::Definition || TUK == TagUseKind::Friend) &&
"Expected a definition here");
- if (TUK == Sema::TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
Diag(DS.getFriendSpecLoc(), diag::err_friend_explicit_instantiation);
TemplateParams = nullptr;
} else {
@@ -2058,7 +2222,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
&SkipBody);
}
} else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
- TUK == Sema::TUK_Declaration) {
+ TUK == TagUseKind::Declaration) {
// Explicit instantiation of a member of a class template
// specialization, e.g.,
//
@@ -2069,7 +2233,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TagOrTempResult = Actions.ActOnExplicitInstantiation(
getCurScope(), TemplateInfo.ExternLoc, TemplateInfo.TemplateLoc,
TagType, StartLoc, SS, Name, NameLoc, attrs);
- } else if (TUK == Sema::TUK_Friend &&
+ } else if (TUK == TagUseKind::Friend &&
TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
@@ -2081,12 +2245,12 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
MultiTemplateParamsArg(TemplateParams ? &(*TemplateParams)[0] : nullptr,
TemplateParams ? TemplateParams->size() : 0));
} else {
- if (TUK != Sema::TUK_Declaration && TUK != Sema::TUK_Definition)
+ if (TUK != TagUseKind::Declaration && TUK != TagUseKind::Definition)
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
/* DiagnoseEmptyAttrs=*/true);
- if (TUK == Sema::TUK_Definition &&
+ if (TUK == TagUseKind::Definition &&
TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
// If the declarator-id is not a template-id, issue a diagnostic and
// recover by ignoring the 'template' keyword.
@@ -2101,7 +2265,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// reference. For example, we don't need the template parameters here:
// template <class T> class A *makeA(T t);
MultiTemplateParamsArg TParams;
- if (TUK != Sema::TUK_Reference && TemplateParams)
+ if (TUK != TagUseKind::Reference && TemplateParams)
TParams =
MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size());
@@ -2120,7 +2284,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// If ActOnTag said the type was dependent, try again with the
// less common call.
if (IsDependent) {
- assert(TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend);
+ assert(TUK == TagUseKind::Reference || TUK == TagUseKind::Friend);
TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK, SS,
Name, StartLoc, NameLoc);
}
@@ -2131,13 +2295,13 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// just merge them into the current pool.
if (shouldDelayDiagsInTag) {
diagsFromTag.done();
- if (TUK == Sema::TUK_Reference &&
+ if (TUK == TagUseKind::Reference &&
TemplateInfo.Kind == ParsedTemplateInfo::Template)
diagsFromTag.redelay();
}
// If there is a body, parse it and inform the actions module.
- if (TUK == Sema::TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
assert(Tok.is(tok::l_brace) ||
(getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
isClassCompatibleKeyword());
@@ -2195,7 +2359,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
//
// After a type-specifier, we don't expect a semicolon. This only happens in
// C, since definitions are not permitted in this context in C++.
- if (TUK == Sema::TUK_Definition &&
+ if (TUK == TagUseKind::Definition &&
(getLangOpts().CPlusPlus || !isTypeSpecifier(DSC)) &&
(TemplateInfo.Kind || !isValidAfterTypeSpecifier(false))) {
if (Tok.isNot(tok::semi)) {
@@ -2362,7 +2526,7 @@ void Parser::HandleMemberFunctionDeclDelays(Declarator &DeclaratorInfo,
if (!NeedLateParse) {
// Look ahead to see if there are any default args
for (unsigned ParamIdx = 0; ParamIdx < FTI.NumParams; ++ParamIdx) {
- auto Param = cast<ParmVarDecl>(FTI.Params[ParamIdx].Param);
+ const auto *Param = cast<ParmVarDecl>(FTI.Params[ParamIdx].Param);
if (Param->hasUnparsedDefaultArg()) {
NeedLateParse = true;
break;
@@ -2404,7 +2568,7 @@ VirtSpecifiers::Specifier Parser::isCXX11VirtSpecifier(const Token &Tok) const {
if (!getLangOpts().CPlusPlus || Tok.isNot(tok::identifier))
return VirtSpecifiers::VS_None;
- IdentifierInfo *II = Tok.getIdentifierInfo();
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
// Initialize the contextual keywords.
if (!Ident_final) {
@@ -2525,6 +2689,10 @@ bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
else
DeclaratorInfo.SetIdentifier(nullptr, Tok.getLocation());
+ if (getLangOpts().HLSL)
+ MaybeParseHLSLAnnotations(DeclaratorInfo, nullptr,
+ /*CouldBeBitField*/ true);
+
if (!DeclaratorInfo.isFunctionDeclarator() && TryConsumeToken(tok::colon)) {
assert(DeclaratorInfo.isPastIdentifier() &&
"don't know where identifier would go yet?");
@@ -2599,7 +2767,7 @@ void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
ParseTypeQualifierListOpt(
DS, AR_NoAttributesParsed, false,
/*IdentifierRequired=*/false, llvm::function_ref<void()>([&]() {
- Actions.CodeCompleteFunctionQualifiers(DS, D, &VS);
+ Actions.CodeCompletion().CodeCompleteFunctionQualifiers(DS, D, &VS);
}));
D.ExtendWithDeclSpec(DS);
@@ -2680,11 +2848,9 @@ void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
/// constant-initializer:
/// '=' constant-expression
///
-Parser::DeclGroupPtrTy
-Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
- ParsedAttributes &AccessAttrs,
- const ParsedTemplateInfo &TemplateInfo,
- ParsingDeclRAIIObject *TemplateDiags) {
+Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclaration(
+ AccessSpecifier AS, ParsedAttributes &AccessAttrs,
+ ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject *TemplateDiags) {
assert(getLangOpts().CPlusPlus &&
"ParseCXXClassMemberDeclaration should only be called in C++ mode");
if (Tok.is(tok::at)) {
@@ -2760,7 +2926,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
// static_assert-declaration. A templated static_assert declaration is
- // diagnosed in Parser::ParseSingleDeclarationAfterTemplate.
+ // diagnosed in Parser::ParseDeclarationAfterTemplate.
if (!TemplateInfo.Kind &&
Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert)) {
SourceLocation DeclEnd;
@@ -2773,9 +2939,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
"Nested template improperly parsed?");
ObjCDeclContextSwitch ObjCDC(*this);
SourceLocation DeclEnd;
- return DeclGroupPtrTy::make(
- DeclGroupRef(ParseTemplateDeclarationOrSpecialization(
- DeclaratorContext::Member, DeclEnd, AccessAttrs, AS)));
+ return ParseTemplateDeclarationOrSpecialization(DeclaratorContext::Member,
+ DeclEnd, AccessAttrs, AS);
}
// Handle: member-declaration ::= '__extension__' member-declaration
@@ -2963,7 +3128,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DefinitionKind = FunctionDefinitionKind::Deleted;
else if (KW.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteAfterFunctionEquals(DeclaratorInfo);
+ Actions.CodeCompletion().CodeCompleteAfterFunctionEquals(
+ DeclaratorInfo);
return nullptr;
}
}
@@ -3043,7 +3209,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DeclSpec::SCS_static &&
DeclaratorInfo.getDeclSpec().getStorageClassSpec() !=
DeclSpec::SCS_typedef &&
- !DS.isFriendSpecified()) {
+ !DS.isFriendSpecified() &&
+ TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate) {
// It's a default member initializer.
if (BitfieldSize.get())
Diag(Tok, getLangOpts().CPlusPlus20
@@ -3142,7 +3309,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
} else if (ThisDecl)
Actions.AddInitializerToDecl(ThisDecl, Init.get(),
EqualLoc.isInvalid());
- } else if (ThisDecl && DS.getStorageClassSpec() == DeclSpec::SCS_static)
+ } else if (ThisDecl && DeclaratorInfo.isStaticMember())
// No initializer.
Actions.ActOnUninitializedDecl(ThisDecl);
@@ -3184,6 +3351,16 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
break;
}
+ // C++23 [temp.pre]p5:
+ // In a template-declaration, explicit specialization, or explicit
+ // instantiation the init-declarator-list in the declaration shall
+ // contain at most one declarator.
+ if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
+ DeclaratorInfo.isFirstDeclarator()) {
+ Diag(CommaLoc, diag::err_multiple_template_declarators)
+ << TemplateInfo.Kind;
+ }
+
// Parse the next declarator.
DeclaratorInfo.clear();
VS.clear();
@@ -3269,6 +3446,7 @@ ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
<< 1 /* delete */;
else
Diag(ConsumeToken(), diag::err_deleted_non_function);
+ SkipDeletedFunctionBody();
return ExprError();
}
} else if (Tok.is(tok::kw_default)) {
@@ -3391,8 +3569,10 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
case tok::kw_private:
// FIXME: We don't accept GNU attributes on access specifiers in OpenCL mode
// yet.
- if (getLangOpts().OpenCL && !NextToken().is(tok::colon))
- return ParseCXXClassMemberDeclaration(AS, AccessAttrs);
+ if (getLangOpts().OpenCL && !NextToken().is(tok::colon)) {
+ ParsedTemplateInfo TemplateInfo;
+ return ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo);
+ }
[[fallthrough]];
case tok::kw_public:
case tok::kw_protected: {
@@ -3448,7 +3628,8 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
ConsumeAnnotationToken();
return nullptr;
}
- return ParseCXXClassMemberDeclaration(AS, AccessAttrs);
+ ParsedTemplateInfo TemplateInfo;
+ return ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo);
}
}
@@ -3761,8 +3942,8 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
do {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteConstructorInitializer(ConstructorDecl,
- MemInitializers);
+ Actions.CodeCompletion().CodeCompleteConstructorInitializer(
+ ConstructorDecl, MemInitializers);
return;
}
@@ -3836,6 +4017,10 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// ParseOptionalCXXScopeSpecifier at this point.
// FIXME: Can we get here with a scope specifier?
ParseDecltypeSpecifier(DS);
+ } else if (Tok.is(tok::annot_pack_indexing_type)) {
+ // Uses of T...[N] will already have been converted to
+ // annot_pack_indexing_type by ParseOptionalCXXScopeSpecifier at this point.
+ ParsePackIndexingType(DS);
} else {
TemplateIdAnnotation *TemplateId = Tok.is(tok::annot_template_id)
? takeTemplateIdAnnotation(Tok)
@@ -3878,9 +4063,10 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
auto RunSignatureHelp = [&] {
if (TemplateTypeTy.isInvalid())
return QualType();
- QualType PreferredType = Actions.ProduceCtorInitMemberSignatureHelp(
- ConstructorDecl, SS, TemplateTypeTy.get(), ArgExprs, II,
- T.getOpenLocation(), /*Braced=*/false);
+ QualType PreferredType =
+ Actions.CodeCompletion().ProduceCtorInitMemberSignatureHelp(
+ ConstructorDecl, SS, TemplateTypeTy.get(), ArgExprs, II,
+ T.getOpenLocation(), /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -4129,6 +4315,24 @@ void Parser::ParseTrailingRequiresClause(Declarator &D) {
SourceLocation RequiresKWLoc = ConsumeToken();
+ // C++23 [basic.scope.namespace]p1:
+ // For each non-friend redeclaration or specialization whose target scope
+ // is or is contained by the scope, the portion after the declarator-id,
+ // class-head-name, or enum-head-name is also included in the scope.
+ // C++23 [basic.scope.class]p1:
+ // For each non-friend redeclaration or specialization whose target scope
+ // is or is contained by the scope, the portion after the declarator-id,
+ // class-head-name, or enum-head-name is also included in the scope.
+ //
+ // FIXME: We should really be calling ParseTrailingRequiresClause in
+ // ParseDirectDeclarator, when we are already in the declarator scope.
+ // This would also correctly suppress access checks for specializations
+ // and explicit instantiations, which we currently do not do.
+ CXXScopeSpec &SS = D.getCXXScopeSpec();
+ DeclaratorScopeObj DeclScopeObj(*this, SS);
+ if (SS.isValid() && Actions.ShouldEnterDeclaratorScope(getCurScope(), SS))
+ DeclScopeObj.EnterDeclaratorScope();
+
ExprResult TrailingRequiresClause;
ParseScope ParamScope(this, Scope::DeclScope |
Scope::FunctionDeclarationScope |
@@ -4247,10 +4451,9 @@ void Parser::PopParsingClass(Sema::ParsingClassState state) {
/// If a keyword or an alternative token that satisfies the syntactic
/// requirements of an identifier is contained in an attribute-token,
/// it is considered an identifier.
-IdentifierInfo *
-Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc,
- Sema::AttributeCompletion Completion,
- const IdentifierInfo *Scope) {
+IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(
+ SourceLocation &Loc, SemaCodeCompletion::AttributeCompletion Completion,
+ const IdentifierInfo *Scope) {
switch (Tok.getKind()) {
default:
// Identifiers and keywords have identifier info attached.
@@ -4264,9 +4467,9 @@ Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc,
case tok::code_completion:
cutOffParsing();
- Actions.CodeCompleteAttribute(getLangOpts().CPlusPlus ? ParsedAttr::AS_CXX11
- : ParsedAttr::AS_C23,
- Completion, Scope);
+ Actions.CodeCompletion().CodeCompleteAttribute(
+ getLangOpts().CPlusPlus ? ParsedAttr::AS_CXX11 : ParsedAttr::AS_C23,
+ Completion, Scope);
return nullptr;
case tok::numeric_constant: {
@@ -4393,14 +4596,70 @@ static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
case ParsedAttr::AT_Unlikely:
return true;
case ParsedAttr::AT_WarnUnusedResult:
- return !ScopeName && AttrName->getName().equals("nodiscard");
+ return !ScopeName && AttrName->getName() == "nodiscard";
case ParsedAttr::AT_Unused:
- return !ScopeName && AttrName->getName().equals("maybe_unused");
+ return !ScopeName && AttrName->getName() == "maybe_unused";
default:
return false;
}
}
+/// Parse the argument to C++23's [[assume()]] attribute.
+bool Parser::ParseCXXAssumeAttributeArg(ParsedAttributes &Attrs,
+ IdentifierInfo *AttrName,
+ SourceLocation AttrNameLoc,
+ SourceLocation *EndLoc,
+ ParsedAttr::Form Form) {
+ assert(Tok.is(tok::l_paren) && "Not a C++11 attribute argument list");
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ // [dcl.attr.assume]: The expression is potentially evaluated.
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+
+ TentativeParsingAction TPA(*this);
+ ExprResult Res(
+ Actions.CorrectDelayedTyposInExpr(ParseConditionalExpression()));
+ if (Res.isInvalid()) {
+ TPA.Commit();
+ SkipUntil(tok::r_paren, tok::r_square, StopAtSemi | StopBeforeMatch);
+ if (Tok.is(tok::r_paren))
+ T.consumeClose();
+ return true;
+ }
+
+ if (!Tok.isOneOf(tok::r_paren, tok::r_square)) {
+ // Emit a better diagnostic if this is an otherwise valid expression that
+ // is not allowed here.
+ TPA.Revert();
+ Res = ParseExpression();
+ if (!Res.isInvalid()) {
+ auto *E = Res.get();
+ Diag(E->getExprLoc(), diag::err_assume_attr_expects_cond_expr)
+ << AttrName << FixItHint::CreateInsertion(E->getBeginLoc(), "(")
+ << FixItHint::CreateInsertion(PP.getLocForEndOfToken(E->getEndLoc()),
+ ")")
+ << E->getSourceRange();
+ }
+
+ T.consumeClose();
+ return true;
+ }
+
+ TPA.Commit();
+ ArgsUnion Assumption = Res.get();
+ auto RParen = Tok.getLocation();
+ T.consumeClose();
+ Attrs.addNew(AttrName, SourceRange(AttrNameLoc, RParen), nullptr,
+ SourceLocation(), &Assumption, 1, Form);
+
+ if (EndLoc)
+ *EndLoc = RParen;
+
+ return false;
+}
+
/// ParseCXX11AttributeArgs -- Parse a C++11 attribute-argument-clause.
///
/// [C++11] attribute-argument-clause:
@@ -4452,7 +4711,9 @@ bool Parser::ParseCXX11AttributeArgs(
return true;
}
- if (ScopeName && ScopeName->isStr("omp")) {
+ // [[omp::directive]] and [[omp::sequence]] need special handling.
+ if (ScopeName && ScopeName->isStr("omp") &&
+ (AttrName->isStr("directive") || AttrName->isStr("sequence"))) {
Diag(AttrNameLoc, getLangOpts().OpenMP >= 51
? diag::warn_omp51_compat_attributes
: diag::ext_omp_attributes);
@@ -4469,7 +4730,12 @@ bool Parser::ParseCXX11AttributeArgs(
if (ScopeName && (ScopeName->isStr("clang") || ScopeName->isStr("_Clang")))
NumArgs = ParseClangAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Form);
- else
+ // So does C++23's assume() attribute.
+ else if (!ScopeName && AttrName->isStr("assume")) {
+ if (ParseCXXAssumeAttributeArg(Attrs, AttrName, AttrNameLoc, EndLoc, Form))
+ return true;
+ NumArgs = 1;
+ } else
NumArgs = ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Form);
@@ -4534,10 +4800,12 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
CachedTokens &OpenMPTokens,
SourceLocation *EndLoc) {
if (Tok.is(tok::kw_alignas)) {
- if (getLangOpts().C23)
- Diag(Tok, diag::warn_c23_compat_keyword) << Tok.getName();
- else
- Diag(Tok.getLocation(), diag::warn_cxx98_compat_alignas);
+ // alignas is a valid token in C23 but it is not an attribute, it's a type-
+ // specifier-qualifier, which means it has different parsing behavior. We
+ // handle this in ParseDeclarationSpecifiers() instead of here in C. We
+ // should not get here for C any longer.
+ assert(getLangOpts().CPlusPlus && "'alignas' is not an attribute in C");
+ Diag(Tok.getLocation(), diag::warn_cxx98_compat_alignas);
ParseAlignmentSpecifier(Attrs, EndLoc);
return;
}
@@ -4585,7 +4853,7 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
ConsumeToken();
CommonScopeName = TryParseCXX11AttributeIdentifier(
- CommonScopeLoc, Sema::AttributeCompletion::Scope);
+ CommonScopeLoc, SemaCodeCompletion::AttributeCompletion::Scope);
if (!CommonScopeName) {
Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
SkipUntil(tok::r_square, tok::colon, StopBeforeMatch);
@@ -4614,7 +4882,8 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
IdentifierInfo *ScopeName = nullptr, *AttrName = nullptr;
AttrName = TryParseCXX11AttributeIdentifier(
- AttrLoc, Sema::AttributeCompletion::Attribute, CommonScopeName);
+ AttrLoc, SemaCodeCompletion::AttributeCompletion::Attribute,
+ CommonScopeName);
if (!AttrName)
// Break out to the "expected ']'" diagnostic.
break;
@@ -4625,7 +4894,8 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
ScopeLoc = AttrLoc;
AttrName = TryParseCXX11AttributeIdentifier(
- AttrLoc, Sema::AttributeCompletion::Attribute, ScopeName);
+ AttrLoc, SemaCodeCompletion::AttributeCompletion::Attribute,
+ ScopeName);
if (!AttrName) {
Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
SkipUntil(tok::r_square, tok::comma, StopAtSemi | StopBeforeMatch);
@@ -4849,9 +5119,10 @@ void Parser::ParseMicrosoftAttributes(ParsedAttributes &Attrs) {
StopAtSemi | StopBeforeMatch | StopAtCodeCompletion);
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteAttribute(AttributeCommonInfo::AS_Microsoft,
- Sema::AttributeCompletion::Attribute,
- /*Scope=*/nullptr);
+ Actions.CodeCompletion().CodeCompleteAttribute(
+ AttributeCommonInfo::AS_Microsoft,
+ SemaCodeCompletion::AttributeCompletion::Attribute,
+ /*Scope=*/nullptr);
break;
}
if (Tok.isNot(tok::identifier)) // ']', but also eof
@@ -4947,8 +5218,9 @@ void Parser::ParseMicrosoftIfExistsClassDeclaration(
continue;
}
+ ParsedTemplateInfo TemplateInfo;
// Parse all the comma separated declarators.
- ParseCXXClassMemberDeclaration(CurAS, AccessAttrs);
+ ParseCXXClassMemberDeclaration(CurAS, AccessAttrs, TemplateInfo);
}
Braces.consumeClose();
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
index e862856a08ca..e82b56527283 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
@@ -30,6 +30,12 @@
#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaCUDA.h"
+#include "clang/Sema/SemaCodeCompletion.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenACC.h"
+#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaSYCL.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/SmallVector.h"
#include <optional>
@@ -163,8 +169,8 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteExpression(getCurScope(),
- PreferredType.get(Tok.getLocation()));
+ Actions.CodeCompletion().CodeCompleteExpression(
+ getCurScope(), PreferredType.get(Tok.getLocation()));
return ExprError();
}
@@ -179,6 +185,19 @@ ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
return ParseRHSOfBinaryExpression(LHS, prec::Assignment);
}
+ExprResult Parser::ParseConditionalExpression() {
+ if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
+ Actions.CodeCompletion().CodeCompleteExpression(
+ getCurScope(), PreferredType.get(Tok.getLocation()));
+ return ExprError();
+ }
+
+ ExprResult LHS = ParseCastExpression(
+ AnyCastExpr, /*isAddressOfOperand=*/false, NotTypeCast);
+ return ParseRHSOfBinaryExpression(LHS, prec::Conditional);
+}
+
/// Parse an assignment expression where part of an Objective-C message
/// send has already been parsed.
///
@@ -741,6 +760,109 @@ class CastExpressionIdValidator final : public CorrectionCandidateCallback {
};
}
+bool Parser::isRevertibleTypeTrait(const IdentifierInfo *II,
+ tok::TokenKind *Kind) {
+ if (RevertibleTypeTraits.empty()) {
+// Revertible type trait is a feature for backwards compatibility with older
+// standard libraries that declare their own structs with the same name as
+// the builtins listed below. New builtins should NOT be added to this list.
+#define RTT_JOIN(X, Y) X##Y
+#define REVERTIBLE_TYPE_TRAIT(Name) \
+ RevertibleTypeTraits[PP.getIdentifierInfo(#Name)] = RTT_JOIN(tok::kw_, Name)
+
+ REVERTIBLE_TYPE_TRAIT(__is_abstract);
+ REVERTIBLE_TYPE_TRAIT(__is_aggregate);
+ REVERTIBLE_TYPE_TRAIT(__is_arithmetic);
+ REVERTIBLE_TYPE_TRAIT(__is_array);
+ REVERTIBLE_TYPE_TRAIT(__is_assignable);
+ REVERTIBLE_TYPE_TRAIT(__is_base_of);
+ REVERTIBLE_TYPE_TRAIT(__is_bounded_array);
+ REVERTIBLE_TYPE_TRAIT(__is_class);
+ REVERTIBLE_TYPE_TRAIT(__is_complete_type);
+ REVERTIBLE_TYPE_TRAIT(__is_compound);
+ REVERTIBLE_TYPE_TRAIT(__is_const);
+ REVERTIBLE_TYPE_TRAIT(__is_constructible);
+ REVERTIBLE_TYPE_TRAIT(__is_convertible);
+ REVERTIBLE_TYPE_TRAIT(__is_convertible_to);
+ REVERTIBLE_TYPE_TRAIT(__is_destructible);
+ REVERTIBLE_TYPE_TRAIT(__is_empty);
+ REVERTIBLE_TYPE_TRAIT(__is_enum);
+ REVERTIBLE_TYPE_TRAIT(__is_floating_point);
+ REVERTIBLE_TYPE_TRAIT(__is_final);
+ REVERTIBLE_TYPE_TRAIT(__is_function);
+ REVERTIBLE_TYPE_TRAIT(__is_fundamental);
+ REVERTIBLE_TYPE_TRAIT(__is_integral);
+ REVERTIBLE_TYPE_TRAIT(__is_interface_class);
+ REVERTIBLE_TYPE_TRAIT(__is_literal);
+ REVERTIBLE_TYPE_TRAIT(__is_lvalue_expr);
+ REVERTIBLE_TYPE_TRAIT(__is_lvalue_reference);
+ REVERTIBLE_TYPE_TRAIT(__is_member_function_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_member_object_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_member_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_nothrow_assignable);
+ REVERTIBLE_TYPE_TRAIT(__is_nothrow_constructible);
+ REVERTIBLE_TYPE_TRAIT(__is_nothrow_destructible);
+ REVERTIBLE_TYPE_TRAIT(__is_nullptr);
+ REVERTIBLE_TYPE_TRAIT(__is_object);
+ REVERTIBLE_TYPE_TRAIT(__is_pod);
+ REVERTIBLE_TYPE_TRAIT(__is_pointer);
+ REVERTIBLE_TYPE_TRAIT(__is_polymorphic);
+ REVERTIBLE_TYPE_TRAIT(__is_reference);
+ REVERTIBLE_TYPE_TRAIT(__is_referenceable);
+ REVERTIBLE_TYPE_TRAIT(__is_rvalue_expr);
+ REVERTIBLE_TYPE_TRAIT(__is_rvalue_reference);
+ REVERTIBLE_TYPE_TRAIT(__is_same);
+ REVERTIBLE_TYPE_TRAIT(__is_scalar);
+ REVERTIBLE_TYPE_TRAIT(__is_scoped_enum);
+ REVERTIBLE_TYPE_TRAIT(__is_sealed);
+ REVERTIBLE_TYPE_TRAIT(__is_signed);
+ REVERTIBLE_TYPE_TRAIT(__is_standard_layout);
+ REVERTIBLE_TYPE_TRAIT(__is_trivial);
+ REVERTIBLE_TYPE_TRAIT(__is_trivially_assignable);
+ REVERTIBLE_TYPE_TRAIT(__is_trivially_constructible);
+ REVERTIBLE_TYPE_TRAIT(__is_trivially_copyable);
+ REVERTIBLE_TYPE_TRAIT(__is_unbounded_array);
+ REVERTIBLE_TYPE_TRAIT(__is_union);
+ REVERTIBLE_TYPE_TRAIT(__is_unsigned);
+ REVERTIBLE_TYPE_TRAIT(__is_void);
+ REVERTIBLE_TYPE_TRAIT(__is_volatile);
+ REVERTIBLE_TYPE_TRAIT(__reference_binds_to_temporary);
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) \
+ REVERTIBLE_TYPE_TRAIT(RTT_JOIN(__, Trait));
+#include "clang/Basic/TransformTypeTraits.def"
+#undef REVERTIBLE_TYPE_TRAIT
+#undef RTT_JOIN
+ }
+ llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind>::iterator Known =
+ RevertibleTypeTraits.find(II);
+ if (Known != RevertibleTypeTraits.end()) {
+ if (Kind)
+ *Kind = Known->second;
+ return true;
+ }
+ return false;
+}
+
+ExprResult Parser::ParseBuiltinPtrauthTypeDiscriminator() {
+ SourceLocation Loc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume())
+ return ExprError();
+
+ TypeResult Ty = ParseTypeName();
+ if (Ty.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+
+ SourceLocation EndLoc = Tok.getLocation();
+ T.consumeClose();
+ return Actions.ActOnUnaryExprOrTypeTraitExpr(
+ Loc, UETT_PtrAuthTypeDiscriminator,
+ /*isType=*/true, Ty.get().getAsOpaquePtr(), SourceRange(Loc, EndLoc));
+}
+
/// Parse a cast-expression, or, if \pisUnaryExpression is true, parse
/// a unary-expression.
///
@@ -999,6 +1121,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// primary-expression
case tok::numeric_constant:
+ case tok::binary_data:
// constant: integer-constant
// constant: floating-constant
@@ -1047,6 +1170,12 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
break;
}
+ case tok::annot_embed: {
+ injectEmbedTokens();
+ return ParseCastExpression(ParseKind, isAddressOfOperand, isTypeCast,
+ isVectorLiteral, NotPrimaryExpression);
+ }
+
case tok::kw___super:
case tok::kw_decltype:
// Annotate the token and tail recurse.
@@ -1065,103 +1194,37 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
if (getLangOpts().CPlusPlus) {
// Avoid the unnecessary parse-time lookup in the common case
// where the syntax forbids a type.
- const Token &Next = NextToken();
+ Token Next = NextToken();
+
+ if (Next.is(tok::ellipsis) && Tok.is(tok::identifier) &&
+ GetLookAheadToken(2).is(tok::l_square)) {
+ // Annotate the token and tail recurse.
+ // If the token is not annotated, then it might be an expression pack
+ // indexing
+ if (!TryAnnotateTypeOrScopeToken() &&
+ Tok.is(tok::annot_pack_indexing_type))
+ return ParseCastExpression(ParseKind, isAddressOfOperand, isTypeCast,
+ isVectorLiteral, NotPrimaryExpression);
+ }
// If this identifier was reverted from a token ID, and the next token
// is a parenthesis, this is likely to be a use of a type trait. Check
// those tokens.
- if (Next.is(tok::l_paren) &&
- Tok.is(tok::identifier) &&
- Tok.getIdentifierInfo()->hasRevertedTokenIDToIdentifier()) {
+ else if (Next.is(tok::l_paren) && Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo()->hasRevertedTokenIDToIdentifier()) {
IdentifierInfo *II = Tok.getIdentifierInfo();
- // Build up the mapping of revertible type traits, for future use.
- if (RevertibleTypeTraits.empty()) {
-#define RTT_JOIN(X,Y) X##Y
-#define REVERTIBLE_TYPE_TRAIT(Name) \
- RevertibleTypeTraits[PP.getIdentifierInfo(#Name)] \
- = RTT_JOIN(tok::kw_,Name)
-
- REVERTIBLE_TYPE_TRAIT(__is_abstract);
- REVERTIBLE_TYPE_TRAIT(__is_aggregate);
- REVERTIBLE_TYPE_TRAIT(__is_arithmetic);
- REVERTIBLE_TYPE_TRAIT(__is_array);
- REVERTIBLE_TYPE_TRAIT(__is_assignable);
- REVERTIBLE_TYPE_TRAIT(__is_base_of);
- REVERTIBLE_TYPE_TRAIT(__is_bounded_array);
- REVERTIBLE_TYPE_TRAIT(__is_class);
- REVERTIBLE_TYPE_TRAIT(__is_complete_type);
- REVERTIBLE_TYPE_TRAIT(__is_compound);
- REVERTIBLE_TYPE_TRAIT(__is_const);
- REVERTIBLE_TYPE_TRAIT(__is_constructible);
- REVERTIBLE_TYPE_TRAIT(__is_convertible);
- REVERTIBLE_TYPE_TRAIT(__is_convertible_to);
- REVERTIBLE_TYPE_TRAIT(__is_destructible);
- REVERTIBLE_TYPE_TRAIT(__is_empty);
- REVERTIBLE_TYPE_TRAIT(__is_enum);
- REVERTIBLE_TYPE_TRAIT(__is_floating_point);
- REVERTIBLE_TYPE_TRAIT(__is_final);
- REVERTIBLE_TYPE_TRAIT(__is_function);
- REVERTIBLE_TYPE_TRAIT(__is_fundamental);
- REVERTIBLE_TYPE_TRAIT(__is_integral);
- REVERTIBLE_TYPE_TRAIT(__is_interface_class);
- REVERTIBLE_TYPE_TRAIT(__is_literal);
- REVERTIBLE_TYPE_TRAIT(__is_lvalue_expr);
- REVERTIBLE_TYPE_TRAIT(__is_lvalue_reference);
- REVERTIBLE_TYPE_TRAIT(__is_member_function_pointer);
- REVERTIBLE_TYPE_TRAIT(__is_member_object_pointer);
- REVERTIBLE_TYPE_TRAIT(__is_member_pointer);
- REVERTIBLE_TYPE_TRAIT(__is_nothrow_assignable);
- REVERTIBLE_TYPE_TRAIT(__is_nothrow_constructible);
- REVERTIBLE_TYPE_TRAIT(__is_nothrow_destructible);
- REVERTIBLE_TYPE_TRAIT(__is_nullptr);
- REVERTIBLE_TYPE_TRAIT(__is_object);
- REVERTIBLE_TYPE_TRAIT(__is_pod);
- REVERTIBLE_TYPE_TRAIT(__is_pointer);
- REVERTIBLE_TYPE_TRAIT(__is_polymorphic);
- REVERTIBLE_TYPE_TRAIT(__is_reference);
- REVERTIBLE_TYPE_TRAIT(__is_referenceable);
- REVERTIBLE_TYPE_TRAIT(__is_rvalue_expr);
- REVERTIBLE_TYPE_TRAIT(__is_rvalue_reference);
- REVERTIBLE_TYPE_TRAIT(__is_same);
- REVERTIBLE_TYPE_TRAIT(__is_scalar);
- REVERTIBLE_TYPE_TRAIT(__is_scoped_enum);
- REVERTIBLE_TYPE_TRAIT(__is_sealed);
- REVERTIBLE_TYPE_TRAIT(__is_signed);
- REVERTIBLE_TYPE_TRAIT(__is_standard_layout);
- REVERTIBLE_TYPE_TRAIT(__is_trivial);
- REVERTIBLE_TYPE_TRAIT(__is_trivially_assignable);
- REVERTIBLE_TYPE_TRAIT(__is_trivially_constructible);
- REVERTIBLE_TYPE_TRAIT(__is_trivially_copyable);
- REVERTIBLE_TYPE_TRAIT(__is_unbounded_array);
- REVERTIBLE_TYPE_TRAIT(__is_union);
- REVERTIBLE_TYPE_TRAIT(__is_unsigned);
- REVERTIBLE_TYPE_TRAIT(__is_void);
- REVERTIBLE_TYPE_TRAIT(__is_volatile);
- REVERTIBLE_TYPE_TRAIT(__reference_binds_to_temporary);
- REVERTIBLE_TYPE_TRAIT(__reference_constructs_from_temporary);
-#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) \
- REVERTIBLE_TYPE_TRAIT(RTT_JOIN(__, Trait));
-#include "clang/Basic/TransformTypeTraits.def"
-#undef REVERTIBLE_TYPE_TRAIT
-#undef RTT_JOIN
- }
-
- // If we find that this is in fact the name of a type trait,
- // update the token kind in place and parse again to treat it as
- // the appropriate kind of type trait.
- llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind>::iterator Known
- = RevertibleTypeTraits.find(II);
- if (Known != RevertibleTypeTraits.end()) {
- Tok.setKind(Known->second);
+ tok::TokenKind Kind;
+ if (isRevertibleTypeTrait(II, &Kind)) {
+ Tok.setKind(Kind);
return ParseCastExpression(ParseKind, isAddressOfOperand,
NotCastExpr, isTypeCast,
isVectorLiteral, NotPrimaryExpression);
}
}
- if ((!ColonIsSacred && Next.is(tok::colon)) ||
- Next.isOneOf(tok::coloncolon, tok::less, tok::l_paren,
- tok::l_brace)) {
+ else if ((!ColonIsSacred && Next.is(tok::colon)) ||
+ Next.isOneOf(tok::coloncolon, tok::less, tok::l_paren,
+ tok::l_brace)) {
// If TryAnnotateTypeOrScopeToken annotates the token, tail recurse.
if (TryAnnotateTypeOrScopeToken())
return ExprError();
@@ -1187,7 +1250,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
if (Tok.is(tok::code_completion) && &II != Ident_super) {
cutOffParsing();
- Actions.CodeCompleteObjCClassPropertyRefExpr(
+ Actions.CodeCompletion().CodeCompleteObjCClassPropertyRefExpr(
getCurScope(), II, ILoc, ExprStatementTokLoc == ILoc);
return ExprError();
}
@@ -1200,8 +1263,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
IdentifierInfo &PropertyName = *Tok.getIdentifierInfo();
SourceLocation PropertyLoc = ConsumeToken();
- Res = Actions.ActOnClassPropertyRefExpr(II, PropertyName,
- ILoc, PropertyLoc);
+ Res = Actions.ObjC().ActOnClassPropertyRefExpr(II, PropertyName, ILoc,
+ PropertyLoc);
break;
}
@@ -1289,6 +1352,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
/*isVectorLiteral=*/false,
NotPrimaryExpression);
}
+ Res = tryParseCXXPackIndexingExpression(Res);
if (!Res.isInvalid() && Tok.is(tok::less))
checkPotentialAngleBracket(Res);
break;
@@ -1451,8 +1515,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
return Res;
}
case tok::kw__Alignof: // unary-expression: '_Alignof' '(' type-name ')'
- if (!getLangOpts().C11)
- Diag(Tok, diag::ext_c11_feature) << Tok.getName();
+ diagnoseUseOfC11Keyword(Tok);
[[fallthrough]];
case tok::kw_alignof: // unary-expression: 'alignof' '(' type-id ')'
case tok::kw___alignof: // unary-expression: '__alignof' unary-expression
@@ -1549,6 +1612,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
[[fallthrough]];
case tok::annot_decltype:
+ case tok::annot_pack_indexing_type:
case tok::kw_char:
case tok::kw_wchar_t:
case tok::kw_char8_t:
@@ -1597,7 +1661,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
if (TryAnnotateTypeOrScopeToken())
return ExprError();
- if (!Actions.isSimpleTypeSpecifier(Tok.getKind()))
+ if (!Tok.isSimpleTypeSpecifier(getLangOpts()))
// We are trying to parse a simple-type-specifier but might not get such
// a token after error recovery.
return ExprError();
@@ -1764,6 +1828,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Res = ParseArrayTypeTrait();
break;
+ case tok::kw___builtin_ptrauth_type_discriminator:
+ return ParseBuiltinPtrauthTypeDiscriminator();
+
case tok::kw___is_lvalue_expr:
case tok::kw___is_rvalue_expr:
if (NotPrimaryExpression)
@@ -1782,8 +1849,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
break;
case tok::code_completion: {
cutOffParsing();
- Actions.CodeCompleteExpression(getCurScope(),
- PreferredType.get(Tok.getLocation()));
+ Actions.CodeCompletion().CodeCompleteExpression(
+ getCurScope(), PreferredType.get(Tok.getLocation()));
return ExprError();
}
#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
@@ -1798,7 +1865,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
}
goto ExpectedExpression;
case tok::l_square:
- if (getLangOpts().CPlusPlus11) {
+ if (getLangOpts().CPlusPlus) {
if (getLangOpts().ObjC) {
// C++11 lambda expressions and Objective-C message sends both start with a
// square bracket. There are three possibilities here:
@@ -1927,7 +1994,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
return LHS;
cutOffParsing();
- Actions.CodeCompletePostfixExpression(
+ Actions.CodeCompletion().CodeCompletePostfixExpression(
getCurScope(), LHS, PreferredType.get(Tok.getLocation()));
return ExprError();
@@ -2010,7 +2077,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (Tok.is(tok::colon)) {
// Consume ':'
ColonLocFirst = ConsumeToken();
- Length = Actions.CorrectDelayedTyposInExpr(ParseExpression());
+ if (Tok.isNot(tok::r_square))
+ Length = Actions.CorrectDelayedTyposInExpr(ParseExpression());
}
} else if (ArgExprs.size() <= 1 && getLangOpts().OpenMP) {
ColonProtectionRAIIObject RAII(*this);
@@ -2042,15 +2110,22 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (!LHS.isInvalid() && !HasError && !Length.isInvalid() &&
!Stride.isInvalid() && Tok.is(tok::r_square)) {
if (ColonLocFirst.isValid() || ColonLocSecond.isValid()) {
- // FIXME: OpenACC hasn't implemented Sema/Array section handling at a
- // semantic level yet. For now, just reuse the OpenMP implementation
- // as it gets the parsing/type management mostly right, and we can
- // replace this call to ActOnOpenACCArraySectionExpr in the future.
- // Eventually we'll genericize the OPenMPArraySectionExpr type as
- // well.
- LHS = Actions.ActOnOMPArraySectionExpr(
- LHS.get(), Loc, ArgExprs.empty() ? nullptr : ArgExprs[0],
- ColonLocFirst, ColonLocSecond, Length.get(), Stride.get(), RLoc);
+ // Like above, AllowOpenACCArraySections is 'more specific' and only
+ // enabled when actively parsing a 'var' in a 'var-list' during
+ // clause/'cache' construct parsing, so it is more specific. So we
+ // should do it first, so that the correct node gets created.
+ if (AllowOpenACCArraySections) {
+ assert(!Stride.isUsable() && !ColonLocSecond.isValid() &&
+ "Stride/second colon not allowed for OpenACC");
+ LHS = Actions.OpenACC().ActOnArraySectionExpr(
+ LHS.get(), Loc, ArgExprs.empty() ? nullptr : ArgExprs[0],
+ ColonLocFirst, Length.get(), RLoc);
+ } else {
+ LHS = Actions.OpenMP().ActOnOMPArraySectionExpr(
+ LHS.get(), Loc, ArgExprs.empty() ? nullptr : ArgExprs[0],
+ ColonLocFirst, ColonLocSecond, Length.get(), Stride.get(),
+ RLoc);
+ }
} else {
LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), LHS.get(), Loc,
ArgExprs, RLoc);
@@ -2103,10 +2178,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
}
if (!LHS.isInvalid()) {
- ExprResult ECResult = Actions.ActOnCUDAExecConfigExpr(getCurScope(),
- OpenLoc,
- ExecConfigExprs,
- CloseLoc);
+ ExprResult ECResult = Actions.CUDA().ActOnExecConfigExpr(
+ getCurScope(), OpenLoc, ExecConfigExprs, CloseLoc);
if (ECResult.isInvalid())
LHS = ExprError();
else
@@ -2119,8 +2192,9 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
ExprVector ArgExprs;
auto RunSignatureHelp = [&]() -> QualType {
- QualType PreferredType = Actions.ProduceCallSignatureHelp(
- LHS.get(), ArgExprs, PT.getOpenLocation());
+ QualType PreferredType =
+ Actions.CodeCompletion().ProduceCallSignatureHelp(
+ LHS.get(), ArgExprs, PT.getOpenLocation());
CalledSignatureHelp = true;
return PreferredType;
};
@@ -2244,7 +2318,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// Code completion for a member access expression.
cutOffParsing();
- Actions.CodeCompleteMemberReferenceExpr(
+ Actions.CodeCompletion().CodeCompleteMemberReferenceExpr(
getCurScope(), Base, CorrectedBase, OpLoc, OpKind == tok::arrow,
Base && ExprStatementTokLoc == Base->getBeginLoc(),
PreferredType.get(Tok.getLocation()));
@@ -2465,8 +2539,8 @@ ExprResult Parser::ParseSYCLUniqueStableNameExpression() {
if (T.consumeClose())
return ExprError();
- return Actions.ActOnSYCLUniqueStableNameExpr(OpLoc, T.getOpenLocation(),
- T.getCloseLocation(), Ty.get());
+ return Actions.SYCL().ActOnUniqueStableNameExpr(
+ OpLoc, T.getOpenLocation(), T.getCloseLocation(), Ty.get());
}
/// Parse a sizeof or alignof expression.
@@ -2966,7 +3040,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteExpression(
+ Actions.CodeCompletion().CodeCompleteExpression(
getCurScope(), PreferredType.get(Tok.getLocation()),
/*IsParenthesized=*/ExprType >= CompoundLiteral);
return ExprError();
@@ -3059,9 +3133,9 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
if (Ty.isInvalid() || SubExpr.isInvalid())
return ExprError();
- return Actions.ActOnObjCBridgedCast(getCurScope(), OpenLoc, Kind,
- BridgeKeywordLoc, Ty.get(),
- RParenLoc, SubExpr.get());
+ return Actions.ObjC().ActOnObjCBridgedCast(getCurScope(), OpenLoc, Kind,
+ BridgeKeywordLoc, Ty.get(),
+ RParenLoc, SubExpr.get());
} else if (ExprType >= CompoundLiteral &&
isTypeIdInParens(isAmbiguousTypeId)) {
@@ -3252,7 +3326,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
if (ErrorFound) {
Result = ExprError();
} else if (!Result.isInvalid()) {
- Result = Actions.ActOnOMPArrayShapingExpr(
+ Result = Actions.OpenMP().ActOnOMPArrayShapingExpr(
Result.get(), OpenLoc, RParenLoc, OMPDimensions, OMPBracketsRanges);
}
return Result;
@@ -3377,8 +3451,8 @@ ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral,
/// \endverbatim
ExprResult Parser::ParseGenericSelectionExpression() {
assert(Tok.is(tok::kw__Generic) && "_Generic keyword expected");
- if (!getLangOpts().C11)
- Diag(Tok, diag::ext_c11_feature) << Tok.getName();
+
+ diagnoseUseOfC11Keyword(Tok);
SourceLocation KeyLoc = ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -3397,7 +3471,8 @@ ExprResult Parser::ParseGenericSelectionExpression() {
}
const auto *LIT = cast<LocInfoType>(ControllingType.get().get());
SourceLocation Loc = LIT->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
- Diag(Loc, diag::ext_generic_with_type_arg);
+ Diag(Loc, getLangOpts().C2y ? diag::warn_c2y_compat_generic_with_type_arg
+ : diag::ext_c2y_generic_with_type_arg);
} else {
// C11 6.5.1.1p3 "The controlling expression of a generic selection is
// not evaluated."
@@ -3526,6 +3601,31 @@ ExprResult Parser::ParseFoldExpression(ExprResult LHS,
T.getCloseLocation());
}
+void Parser::injectEmbedTokens() {
+ EmbedAnnotationData *Data =
+ reinterpret_cast<EmbedAnnotationData *>(Tok.getAnnotationValue());
+ MutableArrayRef<Token> Toks(PP.getPreprocessorAllocator().Allocate<Token>(
+ Data->BinaryData.size() * 2 - 1),
+ Data->BinaryData.size() * 2 - 1);
+ unsigned I = 0;
+ for (auto &Byte : Data->BinaryData) {
+ Toks[I].startToken();
+ Toks[I].setKind(tok::binary_data);
+ Toks[I].setLocation(Tok.getLocation());
+ Toks[I].setLength(1);
+ Toks[I].setLiteralData(&Byte);
+ if (I != ((Data->BinaryData.size() - 1) * 2)) {
+ Toks[I + 1].startToken();
+ Toks[I + 1].setKind(tok::comma);
+ Toks[I + 1].setLocation(Tok.getLocation());
+ }
+ I += 2;
+ }
+ PP.EnterTokenStream(std::move(Toks), /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/true);
+ ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
+}
+
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
///
/// \verbatim
@@ -3643,7 +3743,8 @@ bool Parser::ParseSimpleExpressionList(SmallVectorImpl<Expr *> &Exprs) {
void Parser::ParseBlockId(SourceLocation CaretLoc) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Type);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(
+ getCurScope(), SemaCodeCompletion::PCC_Type);
return;
}
@@ -3777,7 +3878,7 @@ ExprResult Parser::ParseBlockLiteralExpression() {
/// '__objc_no'
ExprResult Parser::ParseObjCBoolLiteral() {
tok::TokenKind Kind = Tok.getKind();
- return Actions.ActOnObjCBoolLiteral(ConsumeToken(), Kind);
+ return Actions.ObjC().ActOnObjCBoolLiteral(ConsumeToken(), Kind);
}
/// Validate availability spec list, emitting diagnostics if necessary. Returns
@@ -3832,7 +3933,7 @@ std::optional<AvailabilitySpec> Parser::ParseAvailabilitySpec() {
// Parse the platform name.
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteAvailabilityPlatformName();
+ Actions.CodeCompletion().CodeCompleteAvailabilityPlatformName();
return std::nullopt;
}
if (Tok.isNot(tok::identifier)) {
@@ -3851,7 +3952,8 @@ std::optional<AvailabilitySpec> Parser::ParseAvailabilitySpec() {
StringRef Platform =
AvailabilityAttr::canonicalizePlatformName(GivenPlatform);
- if (AvailabilityAttr::getPrettyPlatformName(Platform).empty()) {
+ if (AvailabilityAttr::getPrettyPlatformName(Platform).empty() ||
+ (GivenPlatform.contains("xros") || GivenPlatform.contains("xrOS"))) {
Diag(PlatformIdentifier->Loc,
diag::err_avail_query_unrecognized_platform_name)
<< GivenPlatform;
@@ -3897,6 +3999,6 @@ ExprResult Parser::ParseAvailabilityCheckExpr(SourceLocation BeginLoc) {
if (Parens.consumeClose())
return ExprError();
- return Actions.ActOnObjCAvailabilityCheckExpr(AvailSpecs, BeginLoc,
- Parens.getCloseLocation());
+ return Actions.ObjC().ActOnObjCAvailabilityCheckExpr(
+ AvailSpecs, BeginLoc, Parens.getCloseLocation());
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
index d61f414406f0..1d364f77a814 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Parse/ParseDiagnostic.h"
@@ -23,6 +24,7 @@
#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaCodeCompletion.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <numeric>
@@ -157,7 +159,8 @@ void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType,
bool Parser::ParseOptionalCXXScopeSpecifier(
CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors,
bool EnteringContext, bool *MayBePseudoDestructor, bool IsTypename,
- IdentifierInfo **LastII, bool OnlyNamespace, bool InUsingDeclaration) {
+ const IdentifierInfo **LastII, bool OnlyNamespace,
+ bool InUsingDeclaration) {
assert(getLangOpts().CPlusPlus &&
"Call sites of this function should be guarded by checking for C++");
@@ -233,6 +236,34 @@ bool Parser::ParseOptionalCXXScopeSpecifier(
HasScopeSpecifier = true;
}
+ else if (!HasScopeSpecifier && Tok.is(tok::identifier) &&
+ GetLookAheadToken(1).is(tok::ellipsis) &&
+ GetLookAheadToken(2).is(tok::l_square)) {
+ SourceLocation Start = Tok.getLocation();
+ DeclSpec DS(AttrFactory);
+ SourceLocation CCLoc;
+ SourceLocation EndLoc = ParsePackIndexingType(DS);
+ if (DS.getTypeSpecType() == DeclSpec::TST_error)
+ return false;
+
+ QualType Type = Actions.ActOnPackIndexingType(
+ DS.getRepAsType().get(), DS.getPackIndexingExpr(), DS.getBeginLoc(),
+ DS.getEllipsisLoc());
+
+ if (Type.isNull())
+ return false;
+
+ if (!TryConsumeToken(tok::coloncolon, CCLoc)) {
+ AnnotateExistingIndexedTypeNamePack(ParsedType::make(Type), Start,
+ EndLoc);
+ return false;
+ }
+ if (Actions.ActOnCXXNestedNameSpecifierIndexedPack(SS, DS, CCLoc,
+ std::move(Type)))
+ SS.SetInvalid(SourceRange(Start, CCLoc));
+ HasScopeSpecifier = true;
+ }
+
// Preferred type might change when parsing qualifiers, we need the original.
auto SavedType = PreferredType;
while (true) {
@@ -241,9 +272,9 @@ bool Parser::ParseOptionalCXXScopeSpecifier(
cutOffParsing();
// Code completion for a nested-name-specifier, where the code
// completion token follows the '::'.
- Actions.CodeCompleteQualifiedId(getCurScope(), SS, EnteringContext,
- InUsingDeclaration, ObjectType.get(),
- SavedType.get(SS.getBeginLoc()));
+ Actions.CodeCompletion().CodeCompleteQualifiedId(
+ getCurScope(), SS, EnteringContext, InUsingDeclaration,
+ ObjectType.get(), SavedType.get(SS.getBeginLoc()));
// Include code completion token into the range of the scope otherwise
// when we try to annotate the scope tokens the dangling code completion
// token will cause assertion in
@@ -378,6 +409,20 @@ bool Parser::ParseOptionalCXXScopeSpecifier(
continue;
}
+ switch (Tok.getKind()) {
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
+ if (!NextToken().is(tok::l_paren)) {
+ Tok.setKind(tok::identifier);
+ Diag(Tok, diag::ext_keyword_as_ident)
+ << Tok.getIdentifierInfo()->getName() << 0;
+ continue;
+ }
+ [[fallthrough]];
+ default:
+ break;
+ }
+
// The rest of the nested-name-specifier possibilities start with
// tok::identifier.
if (Tok.isNot(tok::identifier))
@@ -617,11 +662,38 @@ ExprResult Parser::tryParseCXXIdExpression(CXXScopeSpec &SS,
break;
}
+ // Might be a pack index expression!
+ E = tryParseCXXPackIndexingExpression(E);
+
if (!E.isInvalid() && !E.isUnset() && Tok.is(tok::less))
checkPotentialAngleBracket(E);
return E;
}
+ExprResult Parser::ParseCXXPackIndexingExpression(ExprResult PackIdExpression) {
+ assert(Tok.is(tok::ellipsis) && NextToken().is(tok::l_square) &&
+ "expected ...[");
+ SourceLocation EllipsisLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ ExprResult IndexExpr = ParseConstantExpression();
+ if (T.consumeClose() || IndexExpr.isInvalid())
+ return ExprError();
+ return Actions.ActOnPackIndexingExpr(getCurScope(), PackIdExpression.get(),
+ EllipsisLoc, T.getOpenLocation(),
+ IndexExpr.get(), T.getCloseLocation());
+}
+
+ExprResult
+Parser::tryParseCXXPackIndexingExpression(ExprResult PackIdExpression) {
+ ExprResult E = PackIdExpression;
+ if (!PackIdExpression.isInvalid() && !PackIdExpression.isUnset() &&
+ Tok.is(tok::ellipsis) && NextToken().is(tok::l_square)) {
+ E = ParseCXXPackIndexingExpression(E);
+ }
+ return E;
+}
+
/// ParseCXXIdExpression - Handle id-expression.
///
/// id-expression:
@@ -751,9 +823,8 @@ ExprResult Parser::ParseLambdaExpression() {
///
/// If we are not looking at a lambda expression, returns ExprError().
ExprResult Parser::TryParseLambdaExpression() {
- assert(getLangOpts().CPlusPlus11
- && Tok.is(tok::l_square)
- && "Not at the start of a possible lambda expression.");
+ assert(getLangOpts().CPlusPlus && Tok.is(tok::l_square) &&
+ "Not at the start of a possible lambda expression.");
const Token Next = NextToken();
if (Next.is(tok::eof)) // Nothing else to lookup here...
@@ -885,8 +956,9 @@ bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
if (Tok.is(tok::code_completion) &&
!(getLangOpts().ObjC && Tentative)) {
cutOffParsing();
- Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
- /*AfterAmpersand=*/false);
+ Actions.CodeCompletion().CodeCompleteLambdaIntroducer(
+ getCurScope(), Intro,
+ /*AfterAmpersand=*/false);
break;
}
@@ -902,10 +974,11 @@ bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
// If we're in Objective-C++ and we have a bare '[', then this is more
// likely to be a message receiver.
if (getLangOpts().ObjC && Tentative && First)
- Actions.CodeCompleteObjCMessageReceiver(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCMessageReceiver(getCurScope());
else
- Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
- /*AfterAmpersand=*/false);
+ Actions.CodeCompletion().CodeCompleteLambdaIntroducer(
+ getCurScope(), Intro,
+ /*AfterAmpersand=*/false);
break;
}
@@ -951,8 +1024,9 @@ bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
- /*AfterAmpersand=*/true);
+ Actions.CodeCompletion().CodeCompleteLambdaIntroducer(
+ getCurScope(), Intro,
+ /*AfterAmpersand=*/true);
break;
}
}
@@ -1271,7 +1345,9 @@ static void DiagnoseStaticSpecifierRestrictions(Parser &P,
ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro) {
SourceLocation LambdaBeginLoc = Intro.Range.getBegin();
- Diag(LambdaBeginLoc, diag::warn_cxx98_compat_lambda);
+ Diag(LambdaBeginLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_lambda
+ : diag::ext_lambda);
PrettyStackTraceLoc CrashInfo(PP.getSourceManager(), LambdaBeginLoc,
"lambda expression parsing");
@@ -1330,6 +1406,16 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
Diag(RAngleLoc,
diag::err_lambda_template_parameter_list_empty);
} else {
+ // We increase the template depth before recursing into a requires-clause.
+ //
+ // This depth is used for setting up a LambdaScopeInfo (in
+ // Sema::RecordParsingTemplateParameterDepth), which is used later when
+ // inventing template parameters in InventTemplateParameter.
+ //
+ // This way, abbreviated generic lambdas could have different template
+ // depths, avoiding substitution into the wrong template parameters during
+ // constraint satisfaction check.
+ ++CurTemplateDepthTracker;
ExprResult RequiresClause;
if (TryConsumeToken(tok::kw_requires)) {
RequiresClause =
@@ -1341,7 +1427,6 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
Actions.ActOnLambdaExplicitTemplateParameterList(
Intro, LAngleLoc, TemplateParams, RAngleLoc, RequiresClause);
- ++CurTemplateDepthTracker;
}
}
@@ -1496,7 +1581,10 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
TrailingReturnTypeLoc, &DS),
std::move(Attributes), DeclEndLoc);
- Actions.ActOnLambdaClosureQualifiers(Intro, MutableLoc);
+ // We have called ActOnLambdaClosureQualifiers for parentheses-less cases
+ // above.
+ if (HasParentheses)
+ Actions.ActOnLambdaClosureQualifiers(Intro, MutableLoc);
if (HasParentheses && Tok.is(tok::kw_requires))
ParseTrailingRequiresClause(D);
@@ -1810,6 +1898,15 @@ Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
return ExprError();
}
+ // pack-index-specifier
+ if (GetLookAheadToken(1).is(tok::ellipsis) &&
+ GetLookAheadToken(2).is(tok::l_square)) {
+ DeclSpec DS(AttrFactory);
+ ParsePackIndexingType(DS);
+ return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, OpLoc, OpKind,
+ TildeLoc, DS);
+ }
+
// Parse the second type.
UnqualifiedId SecondTypeName;
IdentifierInfo *Name = Tok.getIdentifierInfo();
@@ -1942,9 +2039,10 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
auto RunSignatureHelp = [&]() {
QualType PreferredType;
if (TypeRep)
- PreferredType = Actions.ProduceConstructorSignatureHelp(
- TypeRep.get()->getCanonicalTypeInternal(), DS.getEndLoc(), Exprs,
- T.getOpenLocation(), /*Braced=*/false);
+ PreferredType =
+ Actions.CodeCompletion().ProduceConstructorSignatureHelp(
+ TypeRep.get()->getCanonicalTypeInternal(), DS.getEndLoc(),
+ Exprs, T.getOpenLocation(), /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -2051,7 +2149,8 @@ Parser::ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Condition);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(
+ getCurScope(), SemaCodeCompletion::PCC_Condition);
return Sema::ConditionError();
}
@@ -2253,7 +2352,6 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
getTypeAnnotation(Tok), Policy);
DS.SetRangeEnd(Tok.getAnnotationEndLoc());
ConsumeAnnotationToken();
-
DS.Finish(Actions, Policy);
return;
}
@@ -2364,6 +2462,10 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
DS.SetRangeEnd(ParseDecltypeSpecifier(DS));
return DS.Finish(Actions, Policy);
+ case tok::annot_pack_indexing_type:
+ DS.SetRangeEnd(ParsePackIndexingType(DS));
+ return DS.Finish(Actions, Policy);
+
// GNU typeof support.
case tok::kw_typeof:
ParseTypeofSpecifier(DS);
@@ -2549,7 +2651,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(
// UnqualifiedId.
// FIXME: Store name for literal operator too.
- IdentifierInfo *TemplateII =
+ const IdentifierInfo *TemplateII =
Id.getKind() == UnqualifiedIdKind::IK_Identifier ? Id.Identifier
: nullptr;
OverloadedOperatorKind OpKind =
@@ -2700,7 +2802,7 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
// Don't try to parse any further.
cutOffParsing();
// Code completion for the operator name.
- Actions.CodeCompleteOperatorName(getCurScope());
+ Actions.CodeCompletion().CodeCompleteOperatorName(getCurScope());
return true;
}
@@ -2928,13 +3030,23 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
SS, ObjectType, ObjectHadErrors,
TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), Id, IdLoc,
EnteringContext, Result, TemplateSpecified);
- else if (TemplateSpecified &&
- Actions.ActOnTemplateName(
- getCurScope(), SS, *TemplateKWLoc, Result, ObjectType,
- EnteringContext, Template,
- /*AllowInjectedClassName*/ true) == TNK_Non_template)
- return true;
+ if (TemplateSpecified) {
+ TemplateNameKind TNK =
+ Actions.ActOnTemplateName(getCurScope(), SS, *TemplateKWLoc, Result,
+ ObjectType, EnteringContext, Template,
+ /*AllowInjectedClassName=*/true);
+ if (TNK == TNK_Non_template)
+ return true;
+
+ // C++2c [tem.names]p6
+ // A name prefixed by the keyword template shall be followed by a template
+ // argument list or refer to a class template or an alias template.
+ if ((TNK == TNK_Function_template || TNK == TNK_Dependent_template_name ||
+ TNK == TNK_Var_template) &&
+ !Tok.is(tok::less))
+ Diag(IdLoc, diag::missing_template_arg_list_after_template_kw);
+ }
return false;
}
@@ -3258,10 +3370,12 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
// the passing DeclaratorInfo is valid, e.g. running SignatureHelp on
// `new decltype(invalid) (^)`.
if (TypeRep)
- PreferredType = Actions.ProduceConstructorSignatureHelp(
- TypeRep.get()->getCanonicalTypeInternal(),
- DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen,
- /*Braced=*/false);
+ PreferredType =
+ Actions.CodeCompletion().ProduceConstructorSignatureHelp(
+ TypeRep.get()->getCanonicalTypeInternal(),
+ DeclaratorInfo.getEndLoc(), ConstructorArgs,
+ ConstructorLParen,
+ /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -3832,7 +3946,10 @@ ExprResult Parser::ParseTypeTrait() {
SmallVector<ParsedType, 2> Args;
do {
// Parse the next type.
- TypeResult Ty = ParseTypeName();
+ TypeResult Ty = ParseTypeName(/*SourceRange=*/nullptr,
+ getLangOpts().CPlusPlus
+ ? DeclaratorContext::TemplateTypeArg
+ : DeclaratorContext::TypeName);
if (Ty.isInvalid()) {
Parens.skipToEnd();
return ExprError();
@@ -3874,7 +3991,8 @@ ExprResult Parser::ParseArrayTypeTrait() {
if (T.expectAndConsume())
return ExprError();
- TypeResult Ty = ParseTypeName();
+ TypeResult Ty = ParseTypeName(/*SourceRange=*/nullptr,
+ DeclaratorContext::TemplateTypeArg);
if (Ty.isInvalid()) {
SkipUntil(tok::comma, StopAtSemi);
SkipUntil(tok::r_paren, StopAtSemi);
@@ -3896,6 +4014,9 @@ ExprResult Parser::ParseArrayTypeTrait() {
ExprResult DimExpr = ParseExpression();
T.consumeClose();
+ if (DimExpr.isInvalid())
+ return ExprError();
+
return Actions.ActOnArrayTypeTrait(ATT, Loc, Ty.get(), DimExpr.get(),
T.getCloseLocation());
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp b/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp
index 4fc6a2203cec..b36ea4012c26 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp
@@ -15,6 +15,7 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
+#include "clang/Sema/SemaHLSL.h"
using namespace clang;
@@ -62,7 +63,7 @@ Decl *Parser::ParseHLSLBuffer(SourceLocation &DeclEnd) {
SourceLocation IdentifierLoc = ConsumeToken();
ParsedAttributes Attrs(AttrFactory);
- MaybeParseHLSLSemantics(Attrs, nullptr);
+ MaybeParseHLSLAnnotations(Attrs, nullptr);
ParseScope BufferScope(this, Scope::DeclScope);
BalancedDelimiterTracker T(*this, tok::l_brace);
@@ -71,9 +72,9 @@ Decl *Parser::ParseHLSLBuffer(SourceLocation &DeclEnd) {
return nullptr;
}
- Decl *D = Actions.ActOnStartHLSLBuffer(getCurScope(), IsCBuffer, BufferLoc,
- Identifier, IdentifierLoc,
- T.getOpenLocation());
+ Decl *D = Actions.HLSL().ActOnStartBuffer(getCurScope(), IsCBuffer, BufferLoc,
+ Identifier, IdentifierLoc,
+ T.getOpenLocation());
while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
// FIXME: support attribute on constants inside cbuffer/tbuffer.
@@ -87,7 +88,7 @@ Decl *Parser::ParseHLSLBuffer(SourceLocation &DeclEnd) {
T.skipToEnd();
DeclEnd = T.getCloseLocation();
BufferScope.Exit();
- Actions.ActOnFinishHLSLBuffer(D, DeclEnd);
+ Actions.HLSL().ActOnFinishBuffer(D, DeclEnd);
return nullptr;
}
}
@@ -95,7 +96,7 @@ Decl *Parser::ParseHLSLBuffer(SourceLocation &DeclEnd) {
T.consumeClose();
DeclEnd = T.getCloseLocation();
BufferScope.Exit();
- Actions.ActOnFinishHLSLBuffer(D, DeclEnd);
+ Actions.HLSL().ActOnFinishBuffer(D, DeclEnd);
Actions.ProcessDeclAttributeList(Actions.CurScope, D, Attrs);
return D;
@@ -117,12 +118,12 @@ static void fixSeparateAttrArgAndNumber(StringRef ArgStr, SourceLocation ArgLoc,
Slot = IdentifierLoc::create(Ctx, ArgLoc, PP.getIdentifierInfo(FixedArg));
}
-void Parser::ParseHLSLSemantics(ParsedAttributes &Attrs,
- SourceLocation *EndLoc) {
- // FIXME: HLSLSemantic is shared for Semantic and resource binding which is
- // confusing. Need a better name to avoid misunderstanding. Issue
- // https://github.com/llvm/llvm-project/issues/57882
- assert(Tok.is(tok::colon) && "Not a HLSL Semantic");
+void Parser::ParseHLSLAnnotations(ParsedAttributes &Attrs,
+ SourceLocation *EndLoc,
+ bool CouldBeBitField) {
+
+ assert(Tok.is(tok::colon) && "Not a HLSL Annotation");
+ Token OldToken = Tok;
ConsumeToken();
IdentifierInfo *II = nullptr;
@@ -132,6 +133,10 @@ void Parser::ParseHLSLSemantics(ParsedAttributes &Attrs,
II = Tok.getIdentifierInfo();
if (!II) {
+ if (CouldBeBitField) {
+ UnconsumeToken(OldToken);
+ return;
+ }
Diag(Tok.getLocation(), diag::err_expected_semantic_identifier);
return;
}
@@ -140,7 +145,7 @@ void Parser::ParseHLSLSemantics(ParsedAttributes &Attrs,
if (EndLoc)
*EndLoc = Tok.getLocation();
ParsedAttr::Kind AttrKind =
- ParsedAttr::getParsedKind(II, nullptr, ParsedAttr::AS_HLSLSemantic);
+ ParsedAttr::getParsedKind(II, nullptr, ParsedAttr::AS_HLSLAnnotation);
ArgsVector ArgExprs;
switch (AttrKind) {
@@ -175,7 +180,7 @@ void Parser::ParseHLSLSemantics(ParsedAttributes &Attrs,
ArgExprs.push_back(ParseIdentifierLoc());
// Add numeric_constant for fix-it.
- if (SpaceStr.equals("space") && Tok.is(tok::numeric_constant))
+ if (SpaceStr == "space" && Tok.is(tok::numeric_constant))
fixSeparateAttrArgAndNumber(SpaceStr, SpaceLoc, Tok, ArgExprs, *this,
Actions.Context, PP);
}
@@ -184,6 +189,94 @@ void Parser::ParseHLSLSemantics(ParsedAttributes &Attrs,
return;
}
} break;
+ case ParsedAttr::AT_HLSLPackOffset: {
+ // Parse 'packoffset( c[Subcomponent][.component] )'.
+ // Check '('.
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after)) {
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ // Check c[Subcomponent] as an identifier.
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ StringRef OffsetStr = Tok.getIdentifierInfo()->getName();
+ SourceLocation SubComponentLoc = Tok.getLocation();
+ if (OffsetStr[0] != 'c') {
+ Diag(Tok.getLocation(), diag::err_hlsl_packoffset_invalid_reg)
+ << OffsetStr;
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ OffsetStr = OffsetStr.substr(1);
+ unsigned SubComponent = 0;
+ if (!OffsetStr.empty()) {
+ // Make sure SubComponent is a number.
+ if (OffsetStr.getAsInteger(10, SubComponent)) {
+ Diag(SubComponentLoc.getLocWithOffset(1),
+ diag::err_hlsl_unsupported_register_number);
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ }
+ unsigned Component = 0;
+ ConsumeToken(); // consume identifier.
+ SourceLocation ComponentLoc;
+ if (Tok.is(tok::period)) {
+ ConsumeToken(); // consume period.
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ StringRef ComponentStr = Tok.getIdentifierInfo()->getName();
+ ComponentLoc = Tok.getLocation();
+ ConsumeToken(); // consume identifier.
+ // Make sure Component is a single character.
+ if (ComponentStr.size() != 1) {
+ Diag(ComponentLoc, diag::err_hlsl_unsupported_component)
+ << ComponentStr;
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ switch (ComponentStr[0]) {
+ case 'x':
+ case 'r':
+ Component = 0;
+ break;
+ case 'y':
+ case 'g':
+ Component = 1;
+ break;
+ case 'z':
+ case 'b':
+ Component = 2;
+ break;
+ case 'w':
+ case 'a':
+ Component = 3;
+ break;
+ default:
+ Diag(ComponentLoc, diag::err_hlsl_unsupported_component)
+ << ComponentStr;
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ }
+ ASTContext &Ctx = Actions.getASTContext();
+ QualType SizeTy = Ctx.getSizeType();
+ uint64_t SizeTySize = Ctx.getTypeSize(SizeTy);
+ ArgExprs.push_back(IntegerLiteral::Create(
+ Ctx, llvm::APInt(SizeTySize, SubComponent), SizeTy, SubComponentLoc));
+ ArgExprs.push_back(IntegerLiteral::Create(
+ Ctx, llvm::APInt(SizeTySize, Component), SizeTy, ComponentLoc));
+ if (ExpectAndConsume(tok::r_paren, diag::err_expected)) {
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ } break;
case ParsedAttr::UnknownAttribute:
Diag(Loc, diag::err_unknown_hlsl_semantic) << II;
return;
@@ -191,10 +284,10 @@ void Parser::ParseHLSLSemantics(ParsedAttributes &Attrs,
case ParsedAttr::AT_HLSLSV_DispatchThreadID:
break;
default:
- llvm_unreachable("invalid HLSL Semantic");
+ llvm_unreachable("invalid HLSL Annotation");
break;
}
Attrs.addNew(II, Loc, nullptr, SourceLocation(), ArgExprs.data(),
- ArgExprs.size(), ParsedAttr::Form::HLSLSemantic());
+ ArgExprs.size(), ParsedAttr::Form::HLSLAnnotation());
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp b/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
index 637f21176792..0a9a359cdaf9 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
@@ -18,6 +18,8 @@
#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaCodeCompletion.h"
+#include "clang/Sema/SemaObjC.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
using namespace clang;
@@ -35,7 +37,7 @@ bool Parser::MayBeDesignationStart() {
return true;
case tok::l_square: { // designator: array-designator
- if (!PP.getLangOpts().CPlusPlus11)
+ if (!PP.getLangOpts().CPlusPlus)
return true;
// C++11 lambda expressions and C99 designators can be ambiguous all the
@@ -203,8 +205,9 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteDesignator(DesignatorCompletion.PreferredBaseType,
- DesignatorCompletion.InitExprs, Desig);
+ Actions.CodeCompletion().CodeCompleteDesignator(
+ DesignatorCompletion.PreferredBaseType,
+ DesignatorCompletion.InitExprs, Desig);
return ExprError();
}
if (Tok.isNot(tok::identifier)) {
@@ -290,15 +293,15 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
// Three cases. This is a message send to a type: [type foo]
// This is a message send to super: [super foo]
// This is a message sent to an expr: [super.bar foo]
- switch (Actions.getObjCMessageKind(
+ switch (Actions.ObjC().getObjCMessageKind(
getCurScope(), II, IILoc, II == Ident_super,
NextToken().is(tok::period), ReceiverType)) {
- case Sema::ObjCSuperMessage:
+ case SemaObjC::ObjCSuperMessage:
CheckArrayDesignatorSyntax(*this, StartLoc, Desig);
return ParseAssignmentExprWithObjCMessageExprStart(
StartLoc, ConsumeToken(), nullptr, nullptr);
- case Sema::ObjCClassMessage:
+ case SemaObjC::ObjCClassMessage:
CheckArrayDesignatorSyntax(*this, StartLoc, Desig);
ConsumeToken(); // the identifier
if (!ReceiverType) {
@@ -326,7 +329,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
ReceiverType,
nullptr);
- case Sema::ObjCInstanceMessage:
+ case SemaObjC::ObjCInstanceMessage:
// Fall through; we'll just parse the expression and
// (possibly) treat this like an Objective-C message send
// later.
@@ -425,6 +428,34 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
return ExprError();
}
+ExprResult Parser::createEmbedExpr() {
+ assert(Tok.getKind() == tok::annot_embed);
+ EmbedAnnotationData *Data =
+ reinterpret_cast<EmbedAnnotationData *>(Tok.getAnnotationValue());
+ ExprResult Res;
+ ASTContext &Context = Actions.getASTContext();
+ SourceLocation StartLoc = ConsumeAnnotationToken();
+ if (Data->BinaryData.size() == 1) {
+ Res = IntegerLiteral::Create(Context,
+ llvm::APInt(CHAR_BIT, Data->BinaryData.back()),
+ Context.UnsignedCharTy, StartLoc);
+ } else {
+ auto CreateStringLiteralFromStringRef = [&](StringRef Str, QualType Ty) {
+ llvm::APSInt ArraySize =
+ Context.MakeIntValue(Str.size(), Context.getSizeType());
+ QualType ArrayTy = Context.getConstantArrayType(
+ Ty, ArraySize, nullptr, ArraySizeModifier::Normal, 0);
+ return StringLiteral::Create(Context, Str, StringLiteralKind::Ordinary,
+ false, ArrayTy, StartLoc);
+ };
+
+ StringLiteral *BinaryDataArg = CreateStringLiteralFromStringRef(
+ Data->BinaryData, Context.UnsignedCharTy);
+ Res = Actions.ActOnEmbedExpr(StartLoc, BinaryDataArg);
+ }
+ return Res;
+}
+
/// ParseBraceInitializer - Called when parsing an initializer that has a
/// leading open brace.
///
@@ -470,7 +501,7 @@ ExprResult Parser::ParseBraceInitializer() {
auto RunSignatureHelp = [&] {
QualType PreferredType;
if (!LikelyType.isNull())
- PreferredType = Actions.ProduceConstructorSignatureHelp(
+ PreferredType = Actions.CodeCompletion().ProduceConstructorSignatureHelp(
LikelyType->getCanonicalTypeInternal(), T.getOpenLocation(),
InitExprs, T.getOpenLocation(), /*Braced=*/true);
CalledSignatureHelp = true;
@@ -498,6 +529,8 @@ ExprResult Parser::ParseBraceInitializer() {
ExprResult SubElt;
if (MayBeDesignationStart())
SubElt = ParseInitializerWithPotentialDesignator(DesignatorCompletion);
+ else if (Tok.getKind() == tok::annot_embed)
+ SubElt = createEmbedExpr();
else
SubElt = ParseInitializer();
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
index 849fd1ac95a4..6a2088a73c55 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
@@ -20,6 +20,8 @@
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaCodeCompletion.h"
+#include "clang/Sema/SemaObjC.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
@@ -55,7 +57,7 @@ Parser::ParseObjCAtDirectives(ParsedAttributes &DeclAttrs,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCAtDirective(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCAtDirective(getCurScope());
return nullptr;
}
@@ -132,7 +134,7 @@ public:
void leave() {
if (Params)
- Actions.popObjCTypeParamList(S, Params);
+ Actions.ObjC().popObjCTypeParamList(S, Params);
Params = nullptr;
}
};
@@ -155,7 +157,7 @@ Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
MaybeSkipAttributes(tok::objc_class);
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCClassForwardDecl(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCClassForwardDecl(getCurScope());
return Actions.ConvertDeclToDeclGroup(nullptr);
}
if (expectIdentifier()) {
@@ -179,23 +181,22 @@ Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
if (ExpectAndConsume(tok::semi, diag::err_expected_after, "@class"))
return Actions.ConvertDeclToDeclGroup(nullptr);
- return Actions.ActOnForwardClassDeclaration(atLoc, ClassNames.data(),
- ClassLocs.data(),
- ClassTypeParams,
- ClassNames.size());
+ return Actions.ObjC().ActOnForwardClassDeclaration(
+ atLoc, ClassNames.data(), ClassLocs.data(), ClassTypeParams,
+ ClassNames.size());
}
void Parser::CheckNestedObjCContexts(SourceLocation AtLoc)
{
- Sema::ObjCContainerKind ock = Actions.getObjCContainerKind();
- if (ock == Sema::OCK_None)
+ SemaObjC::ObjCContainerKind ock = Actions.ObjC().getObjCContainerKind();
+ if (ock == SemaObjC::OCK_None)
return;
- Decl *Decl = Actions.getObjCDeclContext();
+ Decl *Decl = Actions.ObjC().getObjCDeclContext();
if (CurParsedObjCImpl) {
CurParsedObjCImpl->finish(AtLoc);
} else {
- Actions.ActOnAtEnd(getCurScope(), AtLoc);
+ Actions.ObjC().ActOnAtEnd(getCurScope(), AtLoc);
}
Diag(AtLoc, diag::err_objc_missing_end)
<< FixItHint::CreateInsertion(AtLoc, "@end\n");
@@ -242,7 +243,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
// Code completion after '@interface'.
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCInterfaceDecl(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCInterfaceDecl(getCurScope());
return nullptr;
}
@@ -276,7 +277,8 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
IdentifierInfo *categoryId = nullptr;
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCInterfaceCategory(getCurScope(), nameId, nameLoc);
+ Actions.CodeCompletion().CodeCompleteObjCInterfaceCategory(
+ getCurScope(), nameId, nameLoc);
return nullptr;
}
@@ -305,7 +307,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
/*consumeLastToken=*/true))
return nullptr;
- ObjCCategoryDecl *CategoryType = Actions.ActOnStartCategoryInterface(
+ ObjCCategoryDecl *CategoryType = Actions.ObjC().ActOnStartCategoryInterface(
AtLoc, nameId, nameLoc, typeParameterList, categoryId, categoryLoc,
ProtocolRefs.data(), ProtocolRefs.size(), ProtocolLocs.data(),
EndProtoLoc, attrs);
@@ -331,7 +333,8 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
// Code completion of superclass names.
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCSuperclass(getCurScope(), nameId, nameLoc);
+ Actions.CodeCompletion().CodeCompleteObjCSuperclass(getCurScope(), nameId,
+ nameLoc);
return nullptr;
}
@@ -360,9 +363,9 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
for (const auto &pair : ProtocolIdents) {
protocolLocs.push_back(pair.second);
}
- Actions.FindProtocolDeclaration(/*WarnOnDeclarations=*/true,
- /*ForObjCContainer=*/true,
- ProtocolIdents, protocols);
+ Actions.ObjC().FindProtocolDeclaration(/*WarnOnDeclarations=*/true,
+ /*ForObjCContainer=*/true,
+ ProtocolIdents, protocols);
}
} else if (protocols.empty() && Tok.is(tok::less) &&
ParseObjCProtocolReferences(protocols, protocolLocs, true, true,
@@ -372,11 +375,11 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
}
if (Tok.isNot(tok::less))
- Actions.ActOnTypedefedProtocols(protocols, protocolLocs,
- superClassId, superClassLoc);
+ Actions.ObjC().ActOnTypedefedProtocols(protocols, protocolLocs,
+ superClassId, superClassLoc);
- Sema::SkipBodyInfo SkipBody;
- ObjCInterfaceDecl *ClsType = Actions.ActOnStartClassInterface(
+ SkipBodyInfo SkipBody;
+ ObjCInterfaceDecl *ClsType = Actions.ObjC().ActOnStartClassInterface(
getCurScope(), AtLoc, nameId, nameLoc, typeParameterList, superClassId,
superClassLoc, typeArgs,
SourceRange(typeArgsLAngleLoc, typeArgsRAngleLoc), protocols.data(),
@@ -468,7 +471,7 @@ ObjCTypeParamList *Parser::parseObjCTypeParamListOrProtocolRefs(
auto makeProtocolIdentsIntoTypeParameters = [&]() {
unsigned index = 0;
for (const auto &pair : protocolIdents) {
- DeclResult typeParam = Actions.actOnObjCTypeParam(
+ DeclResult typeParam = Actions.ObjC().actOnObjCTypeParam(
getCurScope(), ObjCTypeParamVariance::Invariant, SourceLocation(),
index++, pair.first, pair.second, SourceLocation(), nullptr);
if (typeParam.isUsable())
@@ -508,7 +511,8 @@ ObjCTypeParamList *Parser::parseObjCTypeParamListOrProtocolRefs(
// FIXME: If these aren't protocol references, we'll need different
// completions.
cutOffParsing();
- Actions.CodeCompleteObjCProtocolReferences(protocolIdents);
+ Actions.CodeCompletion().CodeCompleteObjCProtocolReferences(
+ protocolIdents);
// FIXME: Better recovery here?.
return nullptr;
@@ -546,7 +550,7 @@ ObjCTypeParamList *Parser::parseObjCTypeParamListOrProtocolRefs(
}
// Create the type parameter.
- DeclResult typeParam = Actions.actOnObjCTypeParam(
+ DeclResult typeParam = Actions.ObjC().actOnObjCTypeParam(
getCurScope(), variance, varianceLoc, typeParams.size(), paramName,
paramLoc, colonLoc, boundType.isUsable() ? boundType.get() : nullptr);
if (typeParam.isUsable())
@@ -587,11 +591,8 @@ ObjCTypeParamList *Parser::parseObjCTypeParamListOrProtocolRefs(
}
// Form the type parameter list and enter its scope.
- ObjCTypeParamList *list = Actions.actOnObjCTypeParamList(
- getCurScope(),
- lAngleLoc,
- typeParams,
- rAngleLoc);
+ ObjCTypeParamList *list = Actions.ObjC().actOnObjCTypeParamList(
+ getCurScope(), lAngleLoc, typeParams, rAngleLoc);
Scope.enter(list);
// Clear out the angle locations; they're used by the caller to indicate
@@ -684,9 +685,10 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
// Code completion within an Objective-C interface.
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteOrdinaryName(getCurScope(),
- CurParsedObjCImpl? Sema::PCC_ObjCImplementation
- : Sema::PCC_ObjCInterface);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(
+ getCurScope(), CurParsedObjCImpl
+ ? SemaCodeCompletion::PCC_ObjCImplementation
+ : SemaCodeCompletion::PCC_ObjCInterface);
return;
}
@@ -723,7 +725,7 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
const auto &NextTok = NextToken();
if (NextTok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCAtDirective(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCAtDirective(getCurScope());
return;
}
@@ -778,16 +780,16 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
}
bool addedToDeclSpec = false;
- auto ObjCPropertyCallback = [&](ParsingFieldDeclarator &FD) {
+ auto ObjCPropertyCallback = [&](ParsingFieldDeclarator &FD) -> Decl * {
if (FD.D.getIdentifier() == nullptr) {
Diag(AtLoc, diag::err_objc_property_requires_field_name)
<< FD.D.getSourceRange();
- return;
+ return nullptr;
}
if (FD.BitfieldSize) {
Diag(AtLoc, diag::err_objc_property_bitfield)
<< FD.D.getSourceRange();
- return;
+ return nullptr;
}
// Map a nullability property attribute to a context-sensitive keyword
@@ -799,11 +801,11 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
addedToDeclSpec);
// Install the property declarator into interfaceDecl.
- IdentifierInfo *SelName =
+ const IdentifierInfo *SelName =
OCDS.getGetterName() ? OCDS.getGetterName() : FD.D.getIdentifier();
Selector GetterSel = PP.getSelectorTable().getNullarySelector(SelName);
- IdentifierInfo *SetterName = OCDS.getSetterName();
+ const IdentifierInfo *SetterName = OCDS.getSetterName();
Selector SetterSel;
if (SetterName)
SetterSel = PP.getSelectorTable().getSelector(1, &SetterName);
@@ -811,11 +813,12 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
SetterSel = SelectorTable::constructSetterSelector(
PP.getIdentifierTable(), PP.getSelectorTable(),
FD.D.getIdentifier());
- Decl *Property = Actions.ActOnProperty(
+ Decl *Property = Actions.ObjC().ActOnProperty(
getCurScope(), AtLoc, LParenLoc, FD, OCDS, GetterSel, SetterSel,
MethodImplKind);
FD.complete(Property);
+ return Property;
};
// Parse all the comma separated declarators.
@@ -836,14 +839,14 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Diag(Tok, diag::err_objc_missing_end)
<< FixItHint::CreateInsertion(Tok.getLocation(), "\n@end\n");
Diag(CDecl->getBeginLoc(), diag::note_objc_container_start)
- << (int)Actions.getObjCContainerKind();
+ << (int)Actions.ObjC().getObjCContainerKind();
AtEnd.setBegin(Tok.getLocation());
AtEnd.setEnd(Tok.getLocation());
}
// Insert collected methods declarations into the @interface object.
// This passes in an invalid SourceLocation for AtEndLoc when EOF is hit.
- Actions.ActOnAtEnd(getCurScope(), AtEnd, allMethods, allTUVariables);
+ Actions.ObjC().ActOnAtEnd(getCurScope(), AtEnd, allMethods, allTUVariables);
}
/// Diagnose redundant or conflicting nullability information.
@@ -898,7 +901,7 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
while (true) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCPropertyFlags(getCurScope(), DS);
+ Actions.CodeCompletion().CodeCompleteObjCPropertyFlags(getCurScope(), DS);
return;
}
const IdentifierInfo *II = Tok.getIdentifierInfo();
@@ -946,9 +949,11 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
if (IsSetter)
- Actions.CodeCompleteObjCPropertySetter(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCPropertySetter(
+ getCurScope());
else
- Actions.CodeCompleteObjCPropertyGetter(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCPropertyGetter(
+ getCurScope());
return;
}
@@ -1199,7 +1204,7 @@ void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
while (true) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCPassingType(
+ Actions.CodeCompletion().CodeCompleteObjCPassingType(
getCurScope(), DS, Context == DeclaratorContext::ObjCParameter);
return;
}
@@ -1393,8 +1398,9 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
- /*ReturnType=*/nullptr);
+ Actions.CodeCompletion().CodeCompleteObjCMethodDecl(getCurScope(),
+ mType == tok::minus,
+ /*ReturnType=*/nullptr);
return nullptr;
}
@@ -1412,8 +1418,8 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
- ReturnType);
+ Actions.CodeCompletion().CodeCompleteObjCMethodDecl(
+ getCurScope(), mType == tok::minus, ReturnType);
return nullptr;
}
@@ -1437,7 +1443,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
methodAttrs);
Selector Sel = PP.getSelectorTable().getNullarySelector(SelIdent);
- Decl *Result = Actions.ActOnMethodDeclaration(
+ Decl *Result = Actions.ObjC().ActOnMethodDeclaration(
getCurScope(), mLoc, Tok.getLocation(), mType, DSRet, ReturnType,
selLoc, Sel, nullptr, CParamInfo.data(), CParamInfo.size(), methodAttrs,
MethodImplKind, false, MethodDefinition);
@@ -1445,16 +1451,16 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
return Result;
}
- SmallVector<IdentifierInfo *, 12> KeyIdents;
+ SmallVector<const IdentifierInfo *, 12> KeyIdents;
SmallVector<SourceLocation, 12> KeyLocs;
- SmallVector<Sema::ObjCArgInfo, 12> ArgInfos;
+ SmallVector<SemaObjC::ObjCArgInfo, 12> ArgInfos;
ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope |
Scope::FunctionDeclarationScope | Scope::DeclScope);
AttributePool allParamAttrs(AttrFactory);
while (true) {
ParsedAttributes paramAttrs(AttrFactory);
- Sema::ObjCArgInfo ArgInfo;
+ SemaObjC::ObjCArgInfo ArgInfo;
// Each iteration parses a single keyword argument.
if (ExpectAndConsume(tok::colon))
@@ -1475,10 +1481,9 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
KeyIdents.push_back(SelIdent);
- Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(),
- mType == tok::minus,
- /*AtParameterName=*/true,
- ReturnType, KeyIdents);
+ Actions.CodeCompletion().CodeCompleteObjCMethodDeclSelector(
+ getCurScope(), mType == tok::minus,
+ /*AtParameterName=*/true, ReturnType, KeyIdents);
return nullptr;
}
@@ -1499,10 +1504,9 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// Code completion for the next piece of the selector.
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(),
- mType == tok::minus,
- /*AtParameterName=*/false,
- ReturnType, KeyIdents);
+ Actions.CodeCompletion().CodeCompleteObjCMethodDeclSelector(
+ getCurScope(), mType == tok::minus,
+ /*AtParameterName=*/false, ReturnType, KeyIdents);
return nullptr;
}
@@ -1536,12 +1540,13 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
cStyleParamWarned = true;
}
DeclSpec DS(AttrFactory);
- ParseDeclarationSpecifiers(DS);
+ ParsedTemplateInfo TemplateInfo;
+ ParseDeclarationSpecifiers(DS, TemplateInfo);
// Parse the declarator.
Declarator ParmDecl(DS, ParsedAttributesView::none(),
DeclaratorContext::Prototype);
ParseDeclarator(ParmDecl);
- IdentifierInfo *ParmII = ParmDecl.getIdentifier();
+ const IdentifierInfo *ParmII = ParmDecl.getIdentifier();
Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl);
CParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
ParmDecl.getIdentifierLoc(),
@@ -1559,7 +1564,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
Selector Sel = PP.getSelectorTable().getSelector(KeyIdents.size(),
&KeyIdents[0]);
- Decl *Result = Actions.ActOnMethodDeclaration(
+ Decl *Result = Actions.ObjC().ActOnMethodDeclaration(
getCurScope(), mLoc, Tok.getLocation(), mType, DSRet, ReturnType, KeyLocs,
Sel, &ArgInfos[0], CParamInfo.data(), CParamInfo.size(), methodAttrs,
MethodImplKind, isVariadic, MethodDefinition);
@@ -1586,7 +1591,8 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
while (true) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCProtocolReferences(ProtocolIdents);
+ Actions.CodeCompletion().CodeCompleteObjCProtocolReferences(
+ ProtocolIdents);
return true;
}
@@ -1609,8 +1615,8 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
return true;
// Convert the list of protocols identifiers into a list of protocol decls.
- Actions.FindProtocolDeclaration(WarnOnDeclarations, ForObjCContainer,
- ProtocolIdents, Protocols);
+ Actions.ObjC().FindProtocolDeclaration(WarnOnDeclarations, ForObjCContainer,
+ ProtocolIdents, Protocols);
return false;
}
@@ -1624,10 +1630,8 @@ TypeResult Parser::parseObjCProtocolQualifierType(SourceLocation &rAngleLoc) {
(void)ParseObjCProtocolReferences(protocols, protocolLocs, false, false,
lAngleLoc, rAngleLoc,
/*consumeLastToken=*/true);
- TypeResult result = Actions.actOnObjCProtocolQualifierType(lAngleLoc,
- protocols,
- protocolLocs,
- rAngleLoc);
+ TypeResult result = Actions.ObjC().actOnObjCProtocolQualifierType(
+ lAngleLoc, protocols, protocolLocs, rAngleLoc);
if (result.isUsable()) {
Diag(lAngleLoc, diag::warn_objc_protocol_qualifier_missing_id)
<< FixItHint::CreateInsertion(lAngleLoc, "id")
@@ -1686,9 +1690,11 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
QualType BaseT = Actions.GetTypeFromParser(baseType);
cutOffParsing();
if (!BaseT.isNull() && BaseT->acceptsObjCTypeParams()) {
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Type);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(
+ getCurScope(), SemaCodeCompletion::PCC_Type);
} else {
- Actions.CodeCompleteObjCProtocolReferences(identifierLocPairs);
+ Actions.CodeCompletion().CodeCompleteObjCProtocolReferences(
+ identifierLocPairs);
}
return;
}
@@ -1706,19 +1712,11 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
/*ObjCGenericList=*/true);
// Let Sema figure out what we parsed.
- Actions.actOnObjCTypeArgsOrProtocolQualifiers(getCurScope(),
- baseType,
- lAngleLoc,
- identifiers,
- identifierLocs,
- rAngleLoc,
- typeArgsLAngleLoc,
- typeArgs,
- typeArgsRAngleLoc,
- protocolLAngleLoc,
- protocols,
- protocolRAngleLoc,
- warnOnIncompleteProtocols);
+ Actions.ObjC().actOnObjCTypeArgsOrProtocolQualifiers(
+ getCurScope(), baseType, lAngleLoc, identifiers, identifierLocs,
+ rAngleLoc, typeArgsLAngleLoc, typeArgs, typeArgsRAngleLoc,
+ protocolLAngleLoc, protocols, protocolRAngleLoc,
+ warnOnIncompleteProtocols);
return;
}
@@ -1761,7 +1759,7 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
}
} else {
invalid = true;
- if (!Actions.LookupProtocol(identifiers[i], identifierLocs[i])) {
+ if (!Actions.ObjC().LookupProtocol(identifiers[i], identifierLocs[i])) {
unknownTypeArgs.push_back(identifiers[i]);
unknownTypeArgsLoc.push_back(identifierLocs[i]);
} else if (!foundProtocolId) {
@@ -1796,9 +1794,9 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
// Diagnose the mix between type args and protocols.
if (foundProtocolId && foundValidTypeId)
- Actions.DiagnoseTypeArgsAndProtocols(foundProtocolId, foundProtocolSrcLoc,
- foundValidTypeId,
- foundValidTypeSrcLoc);
+ Actions.ObjC().DiagnoseTypeArgsAndProtocols(
+ foundProtocolId, foundProtocolSrcLoc, foundValidTypeId,
+ foundValidTypeSrcLoc);
// Diagnose unknown arg types.
ParsedType T;
@@ -1904,17 +1902,9 @@ TypeResult Parser::parseObjCTypeArgsAndProtocolQualifiers(
else
endLoc = Tok.getLocation();
- return Actions.actOnObjCTypeArgsAndProtocolQualifiers(
- getCurScope(),
- loc,
- type,
- typeArgsLAngleLoc,
- typeArgs,
- typeArgsRAngleLoc,
- protocolLAngleLoc,
- protocols,
- protocolLocs,
- protocolRAngleLoc);
+ return Actions.ObjC().actOnObjCTypeArgsAndProtocolQualifiers(
+ getCurScope(), loc, type, typeArgsLAngleLoc, typeArgs, typeArgsRAngleLoc,
+ protocolLAngleLoc, protocols, protocolLocs, protocolRAngleLoc);
}
void Parser::HelperActionsForIvarDeclarations(
@@ -1979,7 +1969,7 @@ void Parser::ParseObjCClassInstanceVariables(ObjCContainerDecl *interfaceDecl,
if (TryConsumeToken(tok::at)) { // parse objc-visibility-spec
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCAtVisibility(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCAtVisibility(getCurScope());
return;
}
@@ -2010,8 +2000,8 @@ void Parser::ParseObjCClassInstanceVariables(ObjCContainerDecl *interfaceDecl,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteOrdinaryName(getCurScope(),
- Sema::PCC_ObjCInstanceVariableList);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(
+ getCurScope(), SemaCodeCompletion::PCC_ObjCInstanceVariableList);
return;
}
@@ -2024,17 +2014,18 @@ void Parser::ParseObjCClassInstanceVariables(ObjCContainerDecl *interfaceDecl,
continue;
}
- auto ObjCIvarCallback = [&](ParsingFieldDeclarator &FD) {
+ auto ObjCIvarCallback = [&](ParsingFieldDeclarator &FD) -> Decl * {
assert(getObjCDeclContext() == interfaceDecl &&
"Ivar should have interfaceDecl as its decl context");
// Install the declarator into the interface decl.
FD.D.setObjCIvar(true);
- Decl *Field = Actions.ActOnIvar(
+ Decl *Field = Actions.ObjC().ActOnIvar(
getCurScope(), FD.D.getDeclSpec().getSourceRange().getBegin(), FD.D,
FD.BitfieldSize, visibility);
if (Field)
AllIvarDecls.push_back(Field);
FD.complete(Field);
+ return Field;
};
// Parse all the comma separated declarators.
@@ -2078,7 +2069,7 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCProtocolDecl(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCProtocolDecl(getCurScope());
return nullptr;
}
@@ -2092,7 +2083,8 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
if (TryConsumeToken(tok::semi)) { // forward declaration of one protocol.
IdentifierLocPair ProtoInfo(protocolName, nameLoc);
- return Actions.ActOnForwardProtocolDeclaration(AtLoc, ProtoInfo, attrs);
+ return Actions.ObjC().ActOnForwardProtocolDeclaration(AtLoc, ProtoInfo,
+ attrs);
}
CheckNestedObjCContexts(AtLoc);
@@ -2119,7 +2111,8 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
if (ExpectAndConsume(tok::semi, diag::err_expected_after, "@protocol"))
return nullptr;
- return Actions.ActOnForwardProtocolDeclaration(AtLoc, ProtocolRefs, attrs);
+ return Actions.ObjC().ActOnForwardProtocolDeclaration(AtLoc, ProtocolRefs,
+ attrs);
}
// Last, and definitely not least, parse a protocol declaration.
@@ -2133,8 +2126,8 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
/*consumeLastToken=*/true))
return nullptr;
- Sema::SkipBodyInfo SkipBody;
- ObjCProtocolDecl *ProtoType = Actions.ActOnStartProtocolInterface(
+ SkipBodyInfo SkipBody;
+ ObjCProtocolDecl *ProtoType = Actions.ObjC().ActOnStartProtocolInterface(
AtLoc, protocolName, nameLoc, ProtocolRefs.data(), ProtocolRefs.size(),
ProtocolLocs.data(), EndProtoLoc, attrs, &SkipBody);
@@ -2174,7 +2167,7 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
// Code completion after '@implementation'.
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCImplementationDecl(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCImplementationDecl(getCurScope());
return nullptr;
}
@@ -2212,7 +2205,8 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCImplementationCategory(getCurScope(), nameId, nameLoc);
+ Actions.CodeCompletion().CodeCompleteObjCImplementationCategory(
+ getCurScope(), nameId, nameLoc);
return nullptr;
}
@@ -2241,7 +2235,7 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
protocolLAngleLoc, protocolRAngleLoc,
/*consumeLastToken=*/true);
}
- ObjCImpDecl = Actions.ActOnStartCategoryImplementation(
+ ObjCImpDecl = Actions.ObjC().ActOnStartCategoryImplementation(
AtLoc, nameId, nameLoc, categoryId, categoryLoc, Attrs);
} else {
@@ -2255,7 +2249,7 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
superClassId = Tok.getIdentifierInfo();
superClassLoc = ConsumeToken(); // Consume super class name
}
- ObjCImpDecl = Actions.ActOnStartClassImplementation(
+ ObjCImpDecl = Actions.ObjC().ActOnStartClassImplementation(
AtLoc, nameId, nameLoc, superClassId, superClassLoc, Attrs);
if (Tok.is(tok::l_brace)) // we have ivars
@@ -2291,7 +2285,8 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
}
}
- return Actions.ActOnFinishObjCImplementation(ObjCImpDecl, DeclsInGroup);
+ return Actions.ObjC().ActOnFinishObjCImplementation(ObjCImpDecl,
+ DeclsInGroup);
}
Parser::DeclGroupPtrTy
@@ -2314,7 +2309,7 @@ Parser::ObjCImplParsingDataRAII::~ObjCImplParsingDataRAII() {
P.Diag(P.Tok, diag::err_objc_missing_end)
<< FixItHint::CreateInsertion(P.Tok.getLocation(), "\n@end\n");
P.Diag(Dcl->getBeginLoc(), diag::note_objc_container_start)
- << Sema::OCK_Implementation;
+ << SemaObjC::OCK_Implementation;
}
}
P.CurParsedObjCImpl = nullptr;
@@ -2323,12 +2318,13 @@ Parser::ObjCImplParsingDataRAII::~ObjCImplParsingDataRAII() {
void Parser::ObjCImplParsingDataRAII::finish(SourceRange AtEnd) {
assert(!Finished);
- P.Actions.DefaultSynthesizeProperties(P.getCurScope(), Dcl, AtEnd.getBegin());
+ P.Actions.ObjC().DefaultSynthesizeProperties(P.getCurScope(), Dcl,
+ AtEnd.getBegin());
for (size_t i = 0; i < LateParsedObjCMethods.size(); ++i)
P.ParseLexedObjCMethodDefs(*LateParsedObjCMethods[i],
true/*Methods*/);
- P.Actions.ActOnAtEnd(P.getCurScope(), AtEnd);
+ P.Actions.ObjC().ActOnAtEnd(P.getCurScope(), AtEnd);
if (HasCFunction)
for (size_t i = 0; i < LateParsedObjCMethods.size(); ++i)
@@ -2361,8 +2357,8 @@ Decl *Parser::ParseObjCAtAliasDeclaration(SourceLocation atLoc) {
IdentifierInfo *classId = Tok.getIdentifierInfo();
SourceLocation classLoc = ConsumeToken(); // consume class-name;
ExpectAndConsume(tok::semi, diag::err_expected_after, "@compatibility_alias");
- return Actions.ActOnCompatibilityAlias(atLoc, aliasId, aliasLoc,
- classId, classLoc);
+ return Actions.ObjC().ActOnCompatibilityAlias(atLoc, aliasId, aliasLoc,
+ classId, classLoc);
}
/// property-synthesis:
@@ -2384,7 +2380,8 @@ Decl *Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
while (true) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCPropertyDefinition(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCPropertyDefinition(
+ getCurScope());
return nullptr;
}
@@ -2402,7 +2399,8 @@ Decl *Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
// property '=' ivar-name
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCPropertySynthesizeIvar(getCurScope(), propertyId);
+ Actions.CodeCompletion().CodeCompleteObjCPropertySynthesizeIvar(
+ getCurScope(), propertyId);
return nullptr;
}
@@ -2411,10 +2409,9 @@ Decl *Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
propertyIvar = Tok.getIdentifierInfo();
propertyIvarLoc = ConsumeToken(); // consume ivar-name
}
- Actions.ActOnPropertyImplDecl(
- getCurScope(), atLoc, propertyLoc, true,
- propertyId, propertyIvar, propertyIvarLoc,
- ObjCPropertyQueryKind::OBJC_PR_query_unknown);
+ Actions.ObjC().ActOnPropertyImplDecl(
+ getCurScope(), atLoc, propertyLoc, true, propertyId, propertyIvar,
+ propertyIvarLoc, ObjCPropertyQueryKind::OBJC_PR_query_unknown);
if (Tok.isNot(tok::comma))
break;
ConsumeToken(); // consume ','
@@ -2462,7 +2459,8 @@ Decl *Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) {
while (true) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCPropertyDefinition(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCPropertyDefinition(
+ getCurScope());
return nullptr;
}
@@ -2473,11 +2471,11 @@ Decl *Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) {
IdentifierInfo *propertyId = Tok.getIdentifierInfo();
SourceLocation propertyLoc = ConsumeToken(); // consume property name
- Actions.ActOnPropertyImplDecl(
- getCurScope(), atLoc, propertyLoc, false,
- propertyId, nullptr, SourceLocation(),
- isClassProperty ? ObjCPropertyQueryKind::OBJC_PR_query_class :
- ObjCPropertyQueryKind::OBJC_PR_query_unknown);
+ Actions.ObjC().ActOnPropertyImplDecl(
+ getCurScope(), atLoc, propertyLoc, false, propertyId, nullptr,
+ SourceLocation(),
+ isClassProperty ? ObjCPropertyQueryKind::OBJC_PR_query_class
+ : ObjCPropertyQueryKind::OBJC_PR_query_unknown);
if (Tok.isNot(tok::comma))
break;
@@ -2502,7 +2500,7 @@ StmtResult Parser::ParseObjCThrowStmt(SourceLocation atLoc) {
}
// consume ';'
ExpectAndConsume(tok::semi, diag::err_expected_after, "@throw");
- return Actions.ActOnObjCAtThrowStmt(atLoc, Res.get(), getCurScope());
+ return Actions.ObjC().ActOnObjCAtThrowStmt(atLoc, Res.get(), getCurScope());
}
/// objc-synchronized-statement:
@@ -2539,7 +2537,8 @@ Parser::ParseObjCSynchronizedStmt(SourceLocation atLoc) {
// Check the @synchronized operand now.
if (!operand.isInvalid())
- operand = Actions.ActOnObjCAtSynchronizedOperand(atLoc, operand.get());
+ operand =
+ Actions.ObjC().ActOnObjCAtSynchronizedOperand(atLoc, operand.get());
// Parse the compound statement within a new scope.
ParseScope bodyScope(this, Scope::DeclScope | Scope::CompoundStmtScope);
@@ -2554,7 +2553,8 @@ Parser::ParseObjCSynchronizedStmt(SourceLocation atLoc) {
if (body.isInvalid())
body = Actions.ActOnNullStmt(Tok.getLocation());
- return Actions.ActOnObjCAtSynchronizedStmt(atLoc, operand.get(), body.get());
+ return Actions.ObjC().ActOnObjCAtSynchronizedStmt(atLoc, operand.get(),
+ body.get());
}
/// objc-try-catch-statement:
@@ -2604,14 +2604,16 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
Scope::AtCatchScope);
if (Tok.isNot(tok::ellipsis)) {
DeclSpec DS(AttrFactory);
- ParseDeclarationSpecifiers(DS);
+ ParsedTemplateInfo TemplateInfo;
+ ParseDeclarationSpecifiers(DS, TemplateInfo);
Declarator ParmDecl(DS, ParsedAttributesView::none(),
DeclaratorContext::ObjCCatch);
ParseDeclarator(ParmDecl);
// Inform the actions module about the declarator, so it
// gets added to the current scope.
- FirstPart = Actions.ActOnObjCExceptionDecl(getCurScope(), ParmDecl);
+ FirstPart =
+ Actions.ObjC().ActOnObjCExceptionDecl(getCurScope(), ParmDecl);
} else
ConsumeToken(); // consume '...'
@@ -2630,10 +2632,8 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
if (CatchBody.isInvalid())
CatchBody = Actions.ActOnNullStmt(Tok.getLocation());
- StmtResult Catch = Actions.ActOnObjCAtCatchStmt(AtCatchFinallyLoc,
- RParenLoc,
- FirstPart,
- CatchBody.get());
+ StmtResult Catch = Actions.ObjC().ActOnObjCAtCatchStmt(
+ AtCatchFinallyLoc, RParenLoc, FirstPart, CatchBody.get());
if (!Catch.isInvalid())
CatchStmts.push_back(Catch.get());
@@ -2669,8 +2669,8 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
FinallyBody = Actions.ActOnCapturedRegionEnd(FinallyBody.get());
}
- FinallyStmt = Actions.ActOnObjCAtFinallyStmt(AtCatchFinallyLoc,
- FinallyBody.get());
+ FinallyStmt = Actions.ObjC().ActOnObjCAtFinallyStmt(AtCatchFinallyLoc,
+ FinallyBody.get());
catch_or_finally_seen = true;
break;
}
@@ -2680,9 +2680,8 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
return StmtError();
}
- return Actions.ActOnObjCAtTryStmt(atLoc, TryBody.get(),
- CatchStmts,
- FinallyStmt.get());
+ return Actions.ObjC().ActOnObjCAtTryStmt(atLoc, TryBody.get(), CatchStmts,
+ FinallyStmt.get());
}
/// objc-autoreleasepool-statement:
@@ -2704,8 +2703,8 @@ Parser::ParseObjCAutoreleasePoolStmt(SourceLocation atLoc) {
BodyScope.Exit();
if (AutoreleasePoolBody.isInvalid())
AutoreleasePoolBody = Actions.ActOnNullStmt(Tok.getLocation());
- return Actions.ActOnObjCAutoreleasePoolStmt(atLoc,
- AutoreleasePoolBody.get());
+ return Actions.ObjC().ActOnObjCAutoreleasePoolStmt(atLoc,
+ AutoreleasePoolBody.get());
}
/// StashAwayMethodOrFunctionBodyTokens - Consume the tokens and store them
@@ -2788,7 +2787,7 @@ Decl *Parser::ParseObjCMethodDefinition() {
}
// Allow the rest of sema to find private method decl implementations.
- Actions.AddAnyMethodToGlobalPool(MDecl);
+ Actions.ObjC().AddAnyMethodToGlobalPool(MDecl);
assert (CurParsedObjCImpl
&& "ParseObjCMethodDefinition - Method out of @implementation");
// Consume the tokens and store them for later parsing.
@@ -2800,7 +2799,7 @@ StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc,
ParsedStmtContext StmtCtx) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCAtStatement(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCAtStatement(getCurScope());
return StmtError();
}
@@ -2841,7 +2840,7 @@ ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
switch (Tok.getKind()) {
case tok::code_completion:
cutOffParsing();
- Actions.CodeCompleteObjCAtExpression(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCAtExpression(getCurScope());
return ExprError();
case tok::minus:
@@ -2872,7 +2871,7 @@ ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
return Lit;
return ParsePostfixExpressionSuffix(
- Actions.BuildObjCNumericLiteral(AtLoc, Lit.get()));
+ Actions.ObjC().BuildObjCNumericLiteral(AtLoc, Lit.get()));
}
case tok::string_literal: // primary-expression: string-literal
@@ -2971,7 +2970,7 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
tok::annot_cxxscope))
TryAnnotateTypeOrScopeToken();
- if (!Actions.isSimpleTypeSpecifier(Tok.getKind())) {
+ if (!Tok.isSimpleTypeSpecifier(getLangOpts())) {
// objc-receiver:
// expression
// Make sure any typos in the receiver are corrected or diagnosed, so that
@@ -3089,7 +3088,7 @@ ExprResult Parser::ParseObjCMessageExpression() {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCMessageReceiver(getCurScope());
+ Actions.CodeCompletion().CodeCompleteObjCMessageReceiver(getCurScope());
return ExprError();
}
@@ -3128,15 +3127,14 @@ ExprResult Parser::ParseObjCMessageExpression() {
IdentifierInfo *Name = Tok.getIdentifierInfo();
SourceLocation NameLoc = Tok.getLocation();
ParsedType ReceiverType;
- switch (Actions.getObjCMessageKind(getCurScope(), Name, NameLoc,
- Name == Ident_super,
- NextToken().is(tok::period),
- ReceiverType)) {
- case Sema::ObjCSuperMessage:
+ switch (Actions.ObjC().getObjCMessageKind(
+ getCurScope(), Name, NameLoc, Name == Ident_super,
+ NextToken().is(tok::period), ReceiverType)) {
+ case SemaObjC::ObjCSuperMessage:
return ParseObjCMessageExpressionBody(LBracLoc, ConsumeToken(), nullptr,
nullptr);
- case Sema::ObjCClassMessage:
+ case SemaObjC::ObjCClassMessage:
if (!ReceiverType) {
SkipUntil(tok::r_square, StopAtSemi);
return ExprError();
@@ -3162,7 +3160,7 @@ ExprResult Parser::ParseObjCMessageExpression() {
return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
ReceiverType, nullptr);
- case Sema::ObjCInstanceMessage:
+ case SemaObjC::ObjCInstanceMessage:
// Fall through to parse an expression.
break;
}
@@ -3227,14 +3225,14 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
if (SuperLoc.isValid())
- Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
- std::nullopt, false);
+ Actions.CodeCompletion().CodeCompleteObjCSuperMessage(
+ getCurScope(), SuperLoc, std::nullopt, false);
else if (ReceiverType)
- Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
- std::nullopt, false);
+ Actions.CodeCompletion().CodeCompleteObjCClassMessage(
+ getCurScope(), ReceiverType, std::nullopt, false);
else
- Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
- std::nullopt, false);
+ Actions.CodeCompletion().CodeCompleteObjCInstanceMessage(
+ getCurScope(), ReceiverExpr, std::nullopt, false);
return ExprError();
}
@@ -3242,7 +3240,7 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
SourceLocation Loc;
IdentifierInfo *selIdent = ParseObjCSelectorPiece(Loc);
- SmallVector<IdentifierInfo *, 12> KeyIdents;
+ SmallVector<const IdentifierInfo *, 12> KeyIdents;
SmallVector<SourceLocation, 12> KeyLocs;
ExprVector KeyExprs;
@@ -3265,17 +3263,17 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
if (SuperLoc.isValid())
- Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
- KeyIdents,
- /*AtArgumentExpression=*/true);
+ Actions.CodeCompletion().CodeCompleteObjCSuperMessage(
+ getCurScope(), SuperLoc, KeyIdents,
+ /*AtArgumentExpression=*/true);
else if (ReceiverType)
- Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
- KeyIdents,
- /*AtArgumentExpression=*/true);
+ Actions.CodeCompletion().CodeCompleteObjCClassMessage(
+ getCurScope(), ReceiverType, KeyIdents,
+ /*AtArgumentExpression=*/true);
else
- Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
- KeyIdents,
- /*AtArgumentExpression=*/true);
+ Actions.CodeCompletion().CodeCompleteObjCInstanceMessage(
+ getCurScope(), ReceiverExpr, KeyIdents,
+ /*AtArgumentExpression=*/true);
return ExprError();
}
@@ -3303,17 +3301,17 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
if (SuperLoc.isValid())
- Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
- KeyIdents,
- /*AtArgumentExpression=*/false);
+ Actions.CodeCompletion().CodeCompleteObjCSuperMessage(
+ getCurScope(), SuperLoc, KeyIdents,
+ /*AtArgumentExpression=*/false);
else if (ReceiverType)
- Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
- KeyIdents,
- /*AtArgumentExpression=*/false);
+ Actions.CodeCompletion().CodeCompleteObjCClassMessage(
+ getCurScope(), ReceiverType, KeyIdents,
+ /*AtArgumentExpression=*/false);
else
- Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
- KeyIdents,
- /*AtArgumentExpression=*/false);
+ Actions.CodeCompletion().CodeCompleteObjCInstanceMessage(
+ getCurScope(), ReceiverExpr, KeyIdents,
+ /*AtArgumentExpression=*/false);
return ExprError();
}
@@ -3375,13 +3373,14 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
Selector Sel = PP.getSelectorTable().getSelector(nKeys, &KeyIdents[0]);
if (SuperLoc.isValid())
- return Actions.ActOnSuperMessage(getCurScope(), SuperLoc, Sel,
- LBracLoc, KeyLocs, RBracLoc, KeyExprs);
+ return Actions.ObjC().ActOnSuperMessage(
+ getCurScope(), SuperLoc, Sel, LBracLoc, KeyLocs, RBracLoc, KeyExprs);
else if (ReceiverType)
- return Actions.ActOnClassMessage(getCurScope(), ReceiverType, Sel,
- LBracLoc, KeyLocs, RBracLoc, KeyExprs);
- return Actions.ActOnInstanceMessage(getCurScope(), ReceiverExpr, Sel,
- LBracLoc, KeyLocs, RBracLoc, KeyExprs);
+ return Actions.ObjC().ActOnClassMessage(getCurScope(), ReceiverType, Sel,
+ LBracLoc, KeyLocs, RBracLoc,
+ KeyExprs);
+ return Actions.ObjC().ActOnInstanceMessage(
+ getCurScope(), ReceiverExpr, Sel, LBracLoc, KeyLocs, RBracLoc, KeyExprs);
}
ExprResult Parser::ParseObjCStringLiteral(SourceLocation AtLoc) {
@@ -3410,7 +3409,7 @@ ExprResult Parser::ParseObjCStringLiteral(SourceLocation AtLoc) {
AtStrings.push_back(Lit.get());
}
- return Actions.ParseObjCStringLiteral(AtLocs.data(), AtStrings);
+ return Actions.ObjC().ParseObjCStringLiteral(AtLocs.data(), AtStrings);
}
/// ParseObjCBooleanLiteral -
@@ -3421,7 +3420,7 @@ ExprResult Parser::ParseObjCStringLiteral(SourceLocation AtLoc) {
ExprResult Parser::ParseObjCBooleanLiteral(SourceLocation AtLoc,
bool ArgValue) {
SourceLocation EndLoc = ConsumeToken(); // consume the keyword.
- return Actions.ActOnObjCBoolLiteral(AtLoc, EndLoc, ArgValue);
+ return Actions.ObjC().ActOnObjCBoolLiteral(AtLoc, EndLoc, ArgValue);
}
/// ParseObjCCharacterLiteral -
@@ -3433,7 +3432,7 @@ ExprResult Parser::ParseObjCCharacterLiteral(SourceLocation AtLoc) {
return Lit;
}
ConsumeToken(); // Consume the literal token.
- return Actions.BuildObjCNumericLiteral(AtLoc, Lit.get());
+ return Actions.ObjC().BuildObjCNumericLiteral(AtLoc, Lit.get());
}
/// ParseObjCNumericLiteral -
@@ -3447,7 +3446,7 @@ ExprResult Parser::ParseObjCNumericLiteral(SourceLocation AtLoc) {
return Lit;
}
ConsumeToken(); // Consume the literal token.
- return Actions.BuildObjCNumericLiteral(AtLoc, Lit.get());
+ return Actions.ObjC().BuildObjCNumericLiteral(AtLoc, Lit.get());
}
/// ParseObjCBoxedExpr -
@@ -3471,8 +3470,8 @@ Parser::ParseObjCBoxedExpr(SourceLocation AtLoc) {
// a boxed expression from a literal.
SourceLocation LPLoc = T.getOpenLocation(), RPLoc = T.getCloseLocation();
ValueExpr = Actions.ActOnParenExpr(LPLoc, RPLoc, ValueExpr.get());
- return Actions.BuildObjCBoxedExpr(SourceRange(AtLoc, RPLoc),
- ValueExpr.get());
+ return Actions.ObjC().BuildObjCBoxedExpr(SourceRange(AtLoc, RPLoc),
+ ValueExpr.get());
}
ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
@@ -3515,7 +3514,7 @@ ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
return ExprError();
MultiExprArg Args(ElementExprs);
- return Actions.BuildObjCArrayLiteral(SourceRange(AtLoc, EndLoc), Args);
+ return Actions.ObjC().BuildObjCArrayLiteral(SourceRange(AtLoc, EndLoc), Args);
}
ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
@@ -3580,8 +3579,8 @@ ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
return ExprError();
// Create the ObjCDictionaryLiteral.
- return Actions.BuildObjCDictionaryLiteral(SourceRange(AtLoc, EndLoc),
- Elements);
+ return Actions.ObjC().BuildObjCDictionaryLiteral(SourceRange(AtLoc, EndLoc),
+ Elements);
}
/// objc-encode-expression:
@@ -3605,8 +3604,8 @@ Parser::ParseObjCEncodeExpression(SourceLocation AtLoc) {
if (Ty.isInvalid())
return ExprError();
- return Actions.ParseObjCEncodeExpression(AtLoc, EncLoc, T.getOpenLocation(),
- Ty.get(), T.getCloseLocation());
+ return Actions.ObjC().ParseObjCEncodeExpression(
+ AtLoc, EncLoc, T.getOpenLocation(), Ty.get(), T.getCloseLocation());
}
/// objc-protocol-expression
@@ -3629,9 +3628,9 @@ Parser::ParseObjCProtocolExpression(SourceLocation AtLoc) {
T.consumeClose();
- return Actions.ParseObjCProtocolExpression(protocolId, AtLoc, ProtoLoc,
- T.getOpenLocation(), ProtoIdLoc,
- T.getCloseLocation());
+ return Actions.ObjC().ParseObjCProtocolExpression(
+ protocolId, AtLoc, ProtoLoc, T.getOpenLocation(), ProtoIdLoc,
+ T.getCloseLocation());
}
/// objc-selector-expression
@@ -3642,7 +3641,7 @@ ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
if (Tok.isNot(tok::l_paren))
return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@selector");
- SmallVector<IdentifierInfo *, 12> KeyIdents;
+ SmallVector<const IdentifierInfo *, 12> KeyIdents;
SourceLocation sLoc;
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -3653,7 +3652,7 @@ ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCSelector(getCurScope(), KeyIdents);
+ Actions.CodeCompletion().CodeCompleteObjCSelector(getCurScope(), KeyIdents);
return ExprError();
}
@@ -3679,7 +3678,8 @@ ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCSelector(getCurScope(), KeyIdents);
+ Actions.CodeCompletion().CodeCompleteObjCSelector(getCurScope(),
+ KeyIdents);
return ExprError();
}
@@ -3695,18 +3695,17 @@ ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
ConsumeParen(); // ')'
T.consumeClose();
Selector Sel = PP.getSelectorTable().getSelector(nColons, &KeyIdents[0]);
- return Actions.ParseObjCSelectorExpression(Sel, AtLoc, SelectorLoc,
- T.getOpenLocation(),
- T.getCloseLocation(),
- !HasOptionalParen);
+ return Actions.ObjC().ParseObjCSelectorExpression(
+ Sel, AtLoc, SelectorLoc, T.getOpenLocation(), T.getCloseLocation(),
+ !HasOptionalParen);
}
void Parser::ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod) {
// MCDecl might be null due to error in method or c-function prototype, etc.
Decl *MCDecl = LM.D;
- bool skip = MCDecl &&
- ((parseMethod && !Actions.isObjCMethodDecl(MCDecl)) ||
- (!parseMethod && Actions.isObjCMethodDecl(MCDecl)));
+ bool skip =
+ MCDecl && ((parseMethod && !Actions.ObjC().isObjCMethodDecl(MCDecl)) ||
+ (!parseMethod && Actions.ObjC().isObjCMethodDecl(MCDecl)));
if (skip)
return;
@@ -3736,11 +3735,12 @@ void Parser::ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod) {
ParseScope BodyScope(this, (parseMethod ? Scope::ObjCMethodScope : 0) |
Scope::FnScope | Scope::DeclScope |
Scope::CompoundStmtScope);
+ Sema::FPFeaturesStateRAII SaveFPFeatures(Actions);
// Tell the actions module that we have entered a method or c-function definition
// with the specified Declarator for the method/function.
if (parseMethod)
- Actions.ActOnStartOfObjCMethodDef(getCurScope(), MCDecl);
+ Actions.ObjC().ActOnStartOfObjCMethodDef(getCurScope(), MCDecl);
else
Actions.ActOnStartOfFunctionDef(getCurScope(), MCDecl);
if (Tok.is(tok::kw_try))
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseOpenACC.cpp b/contrib/llvm-project/clang/lib/Parse/ParseOpenACC.cpp
index 9f7e63ecdc95..0261e8ea3c9b 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseOpenACC.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseOpenACC.cpp
@@ -10,10 +10,12 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/OpenACCClause.h"
#include "clang/Basic/OpenACCKinds.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
+#include "clang/Sema/SemaOpenACC.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
@@ -54,7 +56,7 @@ OpenACCDirectiveKindEx getOpenACCDirectiveKind(Token Tok) {
.Case("declare", OpenACCDirectiveKind::Declare)
.Case("init", OpenACCDirectiveKind::Init)
.Case("shutdown", OpenACCDirectiveKind::Shutdown)
- .Case("set", OpenACCDirectiveKind::Shutdown)
+ .Case("set", OpenACCDirectiveKind::Set)
.Case("update", OpenACCDirectiveKind::Update)
.Case("wait", OpenACCDirectiveKind::Wait)
.Default(OpenACCDirectiveKind::Invalid);
@@ -84,19 +86,32 @@ OpenACCClauseKind getOpenACCClauseKind(Token Tok) {
if (Tok.is(tok::kw_if))
return OpenACCClauseKind::If;
+ // 'private' is also a keyword, make sure we pare it correctly.
+ if (Tok.is(tok::kw_private))
+ return OpenACCClauseKind::Private;
+
if (!Tok.is(tok::identifier))
return OpenACCClauseKind::Invalid;
return llvm::StringSwitch<OpenACCClauseKind>(
Tok.getIdentifierInfo()->getName())
+ .Case("async", OpenACCClauseKind::Async)
.Case("attach", OpenACCClauseKind::Attach)
.Case("auto", OpenACCClauseKind::Auto)
.Case("bind", OpenACCClauseKind::Bind)
.Case("create", OpenACCClauseKind::Create)
+ .Case("pcreate", OpenACCClauseKind::PCreate)
+ .Case("present_or_create", OpenACCClauseKind::PresentOrCreate)
.Case("collapse", OpenACCClauseKind::Collapse)
.Case("copy", OpenACCClauseKind::Copy)
+ .Case("pcopy", OpenACCClauseKind::PCopy)
+ .Case("present_or_copy", OpenACCClauseKind::PresentOrCopy)
.Case("copyin", OpenACCClauseKind::CopyIn)
+ .Case("pcopyin", OpenACCClauseKind::PCopyIn)
+ .Case("present_or_copyin", OpenACCClauseKind::PresentOrCopyIn)
.Case("copyout", OpenACCClauseKind::CopyOut)
+ .Case("pcopyout", OpenACCClauseKind::PCopyOut)
+ .Case("present_or_copyout", OpenACCClauseKind::PresentOrCopyOut)
.Case("default", OpenACCClauseKind::Default)
.Case("default_async", OpenACCClauseKind::DefaultAsync)
.Case("delete", OpenACCClauseKind::Delete)
@@ -109,6 +124,7 @@ OpenACCClauseKind getOpenACCClauseKind(Token Tok) {
.Case("dtype", OpenACCClauseKind::DType)
.Case("finalize", OpenACCClauseKind::Finalize)
.Case("firstprivate", OpenACCClauseKind::FirstPrivate)
+ .Case("gang", OpenACCClauseKind::Gang)
.Case("host", OpenACCClauseKind::Host)
.Case("if", OpenACCClauseKind::If)
.Case("if_present", OpenACCClauseKind::IfPresent)
@@ -123,9 +139,11 @@ OpenACCClauseKind getOpenACCClauseKind(Token Tok) {
.Case("reduction", OpenACCClauseKind::Reduction)
.Case("self", OpenACCClauseKind::Self)
.Case("seq", OpenACCClauseKind::Seq)
+ .Case("tile", OpenACCClauseKind::Tile)
.Case("use_device", OpenACCClauseKind::UseDevice)
.Case("vector", OpenACCClauseKind::Vector)
.Case("vector_length", OpenACCClauseKind::VectorLength)
+ .Case("wait", OpenACCClauseKind::Wait)
.Case("worker", OpenACCClauseKind::Worker)
.Default(OpenACCClauseKind::Invalid);
}
@@ -163,9 +181,14 @@ enum class OpenACCSpecialTokenKind {
Force,
Num,
Length,
+ Dim,
+ Static,
};
bool isOpenACCSpecialToken(OpenACCSpecialTokenKind Kind, Token Tok) {
+ if (Tok.is(tok::kw_static) && Kind == OpenACCSpecialTokenKind::Static)
+ return true;
+
if (!Tok.is(tok::identifier))
return false;
@@ -184,6 +207,10 @@ bool isOpenACCSpecialToken(OpenACCSpecialTokenKind Kind, Token Tok) {
return Tok.getIdentifierInfo()->isStr("num");
case OpenACCSpecialTokenKind::Length:
return Tok.getIdentifierInfo()->isStr("length");
+ case OpenACCSpecialTokenKind::Dim:
+ return Tok.getIdentifierInfo()->isStr("dim");
+ case OpenACCSpecialTokenKind::Static:
+ return Tok.getIdentifierInfo()->isStr("static");
}
llvm_unreachable("Unknown 'Kind' Passed");
}
@@ -312,7 +339,7 @@ OpenACCReductionOperator ParseReductionOperator(Parser &P) {
return OpenACCReductionOperator::Max;
if (ReductionKindTok.getIdentifierInfo()->isStr("min"))
return OpenACCReductionOperator::Min;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
P.Diag(ReductionKindTok, diag::err_acc_invalid_reduction_operator);
return OpenACCReductionOperator::Invalid;
@@ -459,16 +486,27 @@ ClauseParensKind getClauseParensKind(OpenACCDirectiveKind DirKind,
case OpenACCClauseKind::Self:
return DirKind == OpenACCDirectiveKind::Update ? ClauseParensKind::Required
: ClauseParensKind::Optional;
+ case OpenACCClauseKind::Async:
case OpenACCClauseKind::Worker:
case OpenACCClauseKind::Vector:
+ case OpenACCClauseKind::Gang:
+ case OpenACCClauseKind::Wait:
return ClauseParensKind::Optional;
case OpenACCClauseKind::Default:
case OpenACCClauseKind::If:
case OpenACCClauseKind::Create:
+ case OpenACCClauseKind::PCreate:
+ case OpenACCClauseKind::PresentOrCreate:
case OpenACCClauseKind::Copy:
+ case OpenACCClauseKind::PCopy:
+ case OpenACCClauseKind::PresentOrCopy:
case OpenACCClauseKind::CopyIn:
+ case OpenACCClauseKind::PCopyIn:
+ case OpenACCClauseKind::PresentOrCopyIn:
case OpenACCClauseKind::CopyOut:
+ case OpenACCClauseKind::PCopyOut:
+ case OpenACCClauseKind::PresentOrCopyOut:
case OpenACCClauseKind::UseDevice:
case OpenACCClauseKind::NoCreate:
case OpenACCClauseKind::Present:
@@ -492,6 +530,7 @@ ClauseParensKind getClauseParensKind(OpenACCDirectiveKind DirKind,
case OpenACCClauseKind::DefaultAsync:
case OpenACCClauseKind::DeviceType:
case OpenACCClauseKind::DType:
+ case OpenACCClauseKind::Tile:
return ClauseParensKind::Required;
case OpenACCClauseKind::Auto:
@@ -516,14 +555,6 @@ bool ClauseHasRequiredParens(OpenACCDirectiveKind DirKind,
return getClauseParensKind(DirKind, Kind) == ClauseParensKind::Required;
}
-ExprResult ParseOpenACCConditionalExpr(Parser &P) {
- // FIXME: It isn't clear if the spec saying 'condition' means the same as
- // it does in an if/while/etc (See ParseCXXCondition), however as it was
- // written with Fortran/C in mind, we're going to assume it just means an
- // 'expression evaluating to boolean'.
- return P.getActions().CorrectDelayedTyposInExpr(P.ParseExpression());
-}
-
// Skip until we see the end of pragma token, but don't consume it. This is us
// just giving up on the rest of the pragma so we can continue executing. We
// have to do this because 'SkipUntil' considers paren balancing, which isn't
@@ -533,14 +564,75 @@ void SkipUntilEndOfDirective(Parser &P) {
P.ConsumeAnyToken();
}
+bool doesDirectiveHaveAssociatedStmt(OpenACCDirectiveKind DirKind) {
+ switch (DirKind) {
+ default:
+ return false;
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Loop:
+ return true;
+ }
+ llvm_unreachable("Unhandled directive->assoc stmt");
+}
+
+unsigned getOpenACCScopeFlags(OpenACCDirectiveKind DirKind) {
+ switch (DirKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ // Mark this as a BreakScope/ContinueScope as well as a compute construct
+ // so that we can diagnose trying to 'break'/'continue' inside of one.
+ return Scope::BreakScope | Scope::ContinueScope |
+ Scope::OpenACCComputeConstructScope;
+ case OpenACCDirectiveKind::Invalid:
+ llvm_unreachable("Shouldn't be creating a scope for an invalid construct");
+ default:
+ break;
+ }
+ return 0;
+}
+
} // namespace
+Parser::OpenACCClauseParseResult Parser::OpenACCCanContinue() {
+ return {nullptr, OpenACCParseCanContinue::Can};
+}
+
+Parser::OpenACCClauseParseResult Parser::OpenACCCannotContinue() {
+ return {nullptr, OpenACCParseCanContinue::Cannot};
+}
+
+Parser::OpenACCClauseParseResult Parser::OpenACCSuccess(OpenACCClause *Clause) {
+ return {Clause, OpenACCParseCanContinue::Can};
+}
+
+ExprResult Parser::ParseOpenACCConditionExpr() {
+ // FIXME: It isn't clear if the spec saying 'condition' means the same as
+ // it does in an if/while/etc (See ParseCXXCondition), however as it was
+ // written with Fortran/C in mind, we're going to assume it just means an
+ // 'expression evaluating to boolean'.
+ ExprResult ER = getActions().CorrectDelayedTyposInExpr(ParseExpression());
+
+ if (!ER.isUsable())
+ return ER;
+
+ Sema::ConditionResult R =
+ getActions().ActOnCondition(getCurScope(), ER.get()->getExprLoc(),
+ ER.get(), Sema::ConditionKind::Boolean);
+
+ return R.isInvalid() ? ExprError() : R.get().second;
+}
+
// OpenACC 3.3, section 1.7:
// To simplify the specification and convey appropriate constraint information,
// a pqr-list is a comma-separated list of pdr items. The one exception is a
// clause-list, which is a list of one or more clauses optionally separated by
// commas.
-void Parser::ParseOpenACCClauseList(OpenACCDirectiveKind DirKind) {
+SmallVector<OpenACCClause *>
+Parser::ParseOpenACCClauseList(OpenACCDirectiveKind DirKind) {
+ SmallVector<OpenACCClause *> Clauses;
bool FirstClause = true;
while (getCurToken().isNot(tok::annot_pragma_openacc_end)) {
// Comma is optional in a clause-list.
@@ -548,39 +640,65 @@ void Parser::ParseOpenACCClauseList(OpenACCDirectiveKind DirKind) {
ConsumeToken();
FirstClause = false;
- // Recovering from a bad clause is really difficult, so we just give up on
- // error.
- if (ParseOpenACCClause(DirKind)) {
+ OpenACCClauseParseResult Result = ParseOpenACCClause(Clauses, DirKind);
+ if (OpenACCClause *Clause = Result.getPointer()) {
+ Clauses.push_back(Clause);
+ } else if (Result.getInt() == OpenACCParseCanContinue::Cannot) {
+ // Recovering from a bad clause is really difficult, so we just give up on
+ // error.
SkipUntilEndOfDirective(*this);
- return;
+ return Clauses;
}
}
+ return Clauses;
}
-ExprResult Parser::ParseOpenACCIntExpr() {
- // FIXME: this is required to be an integer expression (or dependent), so we
- // should ensure that is the case by passing this to SEMA here.
- return getActions().CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+Parser::OpenACCIntExprParseResult
+Parser::ParseOpenACCIntExpr(OpenACCDirectiveKind DK, OpenACCClauseKind CK,
+ SourceLocation Loc) {
+ ExprResult ER = ParseAssignmentExpression();
+
+ // If the actual parsing failed, we don't know the state of the parse, so
+ // don't try to continue.
+ if (!ER.isUsable())
+ return {ER, OpenACCParseCanContinue::Cannot};
+
+ // Parsing can continue after the initial assignment expression parsing, so
+ // even if there was a typo, we can continue.
+ ER = getActions().CorrectDelayedTyposInExpr(ER);
+ if (!ER.isUsable())
+ return {ER, OpenACCParseCanContinue::Can};
+
+ return {getActions().OpenACC().ActOnIntExpr(DK, CK, Loc, ER.get()),
+ OpenACCParseCanContinue::Can};
}
-bool Parser::ParseOpenACCClauseVarList(OpenACCClauseKind Kind) {
- // FIXME: Future clauses will require 'special word' parsing, check for one,
- // then parse it based on whether it is a clause that requires a 'special
- // word'.
- (void)Kind;
+bool Parser::ParseOpenACCIntExprList(OpenACCDirectiveKind DK,
+ OpenACCClauseKind CK, SourceLocation Loc,
+ llvm::SmallVectorImpl<Expr *> &IntExprs) {
+ OpenACCIntExprParseResult CurResult = ParseOpenACCIntExpr(DK, CK, Loc);
- // If the var parsing fails, skip until the end of the directive as this is
- // an expression and gets messy if we try to continue otherwise.
- if (ParseOpenACCVar())
+ if (!CurResult.first.isUsable() &&
+ CurResult.second == OpenACCParseCanContinue::Cannot) {
+ SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end,
+ Parser::StopBeforeMatch);
return true;
+ }
+
+ IntExprs.push_back(CurResult.first.get());
while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
ExpectAndConsume(tok::comma);
- // If the var parsing fails, skip until the end of the directive as this is
- // an expression and gets messy if we try to continue otherwise.
- if (ParseOpenACCVar())
+ CurResult = ParseOpenACCIntExpr(DK, CK, Loc);
+
+ if (!CurResult.first.isUsable() &&
+ CurResult.second == OpenACCParseCanContinue::Cannot) {
+ SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end,
+ Parser::StopBeforeMatch);
return true;
+ }
+ IntExprs.push_back(CurResult.first.get());
}
return false;
}
@@ -594,14 +712,16 @@ bool Parser::ParseOpenACCClauseVarList(OpenACCClauseKind Kind) {
/// device_type( device-type-list )
///
/// The device_type clause may be abbreviated to dtype.
-bool Parser::ParseOpenACCDeviceTypeList() {
+bool Parser::ParseOpenACCDeviceTypeList(
+ llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>> &Archs) {
if (expectIdentifierOrKeyword(*this)) {
SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end,
Parser::StopBeforeMatch);
- return false;
+ return true;
}
- ConsumeToken();
+ IdentifierInfo *Ident = getCurToken().getIdentifierInfo();
+ Archs.emplace_back(Ident, ConsumeToken());
while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
ExpectAndConsume(tok::comma);
@@ -609,9 +729,108 @@ bool Parser::ParseOpenACCDeviceTypeList() {
if (expectIdentifierOrKeyword(*this)) {
SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end,
Parser::StopBeforeMatch);
+ return true;
+ }
+ Ident = getCurToken().getIdentifierInfo();
+ Archs.emplace_back(Ident, ConsumeToken());
+ }
+ return false;
+}
+
+/// OpenACC 3.3 Section 2.9:
+/// size-expr is one of:
+// *
+// int-expr
+// Note that this is specified under 'gang-arg-list', but also applies to 'tile'
+// via reference.
+bool Parser::ParseOpenACCSizeExpr() {
+ // FIXME: Ensure these are constant expressions.
+
+ // The size-expr ends up being ambiguous when only looking at the current
+ // token, as it could be a deref of a variable/expression.
+ if (getCurToken().is(tok::star) &&
+ NextToken().isOneOf(tok::comma, tok::r_paren,
+ tok::annot_pragma_openacc_end)) {
+ ConsumeToken();
+ return false;
+ }
+
+ return getActions()
+ .CorrectDelayedTyposInExpr(ParseAssignmentExpression())
+ .isInvalid();
+}
+
+bool Parser::ParseOpenACCSizeExprList() {
+ if (ParseOpenACCSizeExpr()) {
+ SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end,
+ Parser::StopBeforeMatch);
+ return false;
+ }
+
+ while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
+ ExpectAndConsume(tok::comma);
+
+ if (ParseOpenACCSizeExpr()) {
+ SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end,
+ Parser::StopBeforeMatch);
return false;
}
+ }
+ return false;
+}
+
+/// OpenACC 3.3 Section 2.9:
+///
+/// where gang-arg is one of:
+/// [num:]int-expr
+/// dim:int-expr
+/// static:size-expr
+bool Parser::ParseOpenACCGangArg(SourceLocation GangLoc) {
+
+ if (isOpenACCSpecialToken(OpenACCSpecialTokenKind::Static, getCurToken()) &&
+ NextToken().is(tok::colon)) {
+ // 'static' just takes a size-expr, which is an int-expr or an asterisk.
ConsumeToken();
+ ConsumeToken();
+ return ParseOpenACCSizeExpr();
+ }
+
+ if (isOpenACCSpecialToken(OpenACCSpecialTokenKind::Dim, getCurToken()) &&
+ NextToken().is(tok::colon)) {
+ ConsumeToken();
+ ConsumeToken();
+ return ParseOpenACCIntExpr(OpenACCDirectiveKind::Invalid,
+ OpenACCClauseKind::Gang, GangLoc)
+ .first.isInvalid();
+ }
+
+ if (isOpenACCSpecialToken(OpenACCSpecialTokenKind::Num, getCurToken()) &&
+ NextToken().is(tok::colon)) {
+ ConsumeToken();
+ ConsumeToken();
+ // Fallthrough to the 'int-expr' handling for when 'num' is omitted.
+ }
+ // This is just the 'num' case where 'num' is optional.
+ return ParseOpenACCIntExpr(OpenACCDirectiveKind::Invalid,
+ OpenACCClauseKind::Gang, GangLoc)
+ .first.isInvalid();
+}
+
+bool Parser::ParseOpenACCGangArgList(SourceLocation GangLoc) {
+ if (ParseOpenACCGangArg(GangLoc)) {
+ SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end,
+ Parser::StopBeforeMatch);
+ return false;
+ }
+
+ while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
+ ExpectAndConsume(tok::comma);
+
+ if (ParseOpenACCGangArg(GangLoc)) {
+ SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end,
+ Parser::StopBeforeMatch);
+ return false;
+ }
}
return false;
}
@@ -621,183 +840,325 @@ bool Parser::ParseOpenACCDeviceTypeList() {
// really have its owner grammar and each individual one has its own definition.
// However, they all are named with a single-identifier (or auto/default!)
// token, followed in some cases by either braces or parens.
-bool Parser::ParseOpenACCClause(OpenACCDirectiveKind DirKind) {
+Parser::OpenACCClauseParseResult
+Parser::ParseOpenACCClause(ArrayRef<const OpenACCClause *> ExistingClauses,
+ OpenACCDirectiveKind DirKind) {
// A number of clause names are actually keywords, so accept a keyword that
// can be converted to a name.
if (expectIdentifierOrKeyword(*this))
- return true;
+ return OpenACCCannotContinue();
OpenACCClauseKind Kind = getOpenACCClauseKind(getCurToken());
- if (Kind == OpenACCClauseKind::Invalid)
- return Diag(getCurToken(), diag::err_acc_invalid_clause)
- << getCurToken().getIdentifierInfo();
+ if (Kind == OpenACCClauseKind::Invalid) {
+ Diag(getCurToken(), diag::err_acc_invalid_clause)
+ << getCurToken().getIdentifierInfo();
+ return OpenACCCannotContinue();
+ }
// Consume the clause name.
- ConsumeToken();
+ SourceLocation ClauseLoc = ConsumeToken();
- return ParseOpenACCClauseParams(DirKind, Kind);
+ return ParseOpenACCClauseParams(ExistingClauses, DirKind, Kind, ClauseLoc);
}
-bool Parser::ParseOpenACCClauseParams(OpenACCDirectiveKind DirKind,
- OpenACCClauseKind Kind) {
+Parser::OpenACCClauseParseResult Parser::ParseOpenACCClauseParams(
+ ArrayRef<const OpenACCClause *> ExistingClauses,
+ OpenACCDirectiveKind DirKind, OpenACCClauseKind ClauseKind,
+ SourceLocation ClauseLoc) {
BalancedDelimiterTracker Parens(*this, tok::l_paren,
tok::annot_pragma_openacc_end);
+ SemaOpenACC::OpenACCParsedClause ParsedClause(DirKind, ClauseKind, ClauseLoc);
- if (ClauseHasRequiredParens(DirKind, Kind)) {
+ if (ClauseHasRequiredParens(DirKind, ClauseKind)) {
if (Parens.expectAndConsume()) {
// We are missing a paren, so assume that the person just forgot the
// parameter. Return 'false' so we try to continue on and parse the next
// clause.
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openacc_end,
Parser::StopBeforeMatch);
- return false;
+ return OpenACCCanContinue();
}
+ ParsedClause.setLParenLoc(Parens.getOpenLocation());
- switch (Kind) {
+ switch (ClauseKind) {
case OpenACCClauseKind::Default: {
Token DefKindTok = getCurToken();
- if (expectIdentifierOrKeyword(*this))
- break;
+ if (expectIdentifierOrKeyword(*this)) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
ConsumeToken();
- if (getOpenACCDefaultClauseKind(DefKindTok) ==
- OpenACCDefaultClauseKind::Invalid)
+ OpenACCDefaultClauseKind DefKind =
+ getOpenACCDefaultClauseKind(DefKindTok);
+
+ if (DefKind == OpenACCDefaultClauseKind::Invalid) {
Diag(DefKindTok, diag::err_acc_invalid_default_clause_kind);
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
+ ParsedClause.setDefaultDetails(DefKind);
break;
}
case OpenACCClauseKind::If: {
- ExprResult CondExpr = ParseOpenACCConditionalExpr(*this);
- // An invalid expression can be just about anything, so just give up on
- // this clause list.
- if (CondExpr.isInvalid())
- return true;
+ ExprResult CondExpr = ParseOpenACCConditionExpr();
+ ParsedClause.setConditionDetails(CondExpr.isUsable() ? CondExpr.get()
+ : nullptr);
+
+ if (CondExpr.isInvalid()) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
+
break;
}
case OpenACCClauseKind::CopyIn:
- tryParseAndConsumeSpecialTokenKind(
- *this, OpenACCSpecialTokenKind::ReadOnly, Kind);
- if (ParseOpenACCClauseVarList(Kind))
- return true;
+ case OpenACCClauseKind::PCopyIn:
+ case OpenACCClauseKind::PresentOrCopyIn: {
+ bool IsReadOnly = tryParseAndConsumeSpecialTokenKind(
+ *this, OpenACCSpecialTokenKind::ReadOnly, ClauseKind);
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
+ IsReadOnly,
+ /*IsZero=*/false);
break;
+ }
case OpenACCClauseKind::Create:
+ case OpenACCClauseKind::PCreate:
+ case OpenACCClauseKind::PresentOrCreate:
case OpenACCClauseKind::CopyOut:
- tryParseAndConsumeSpecialTokenKind(*this, OpenACCSpecialTokenKind::Zero,
- Kind);
- if (ParseOpenACCClauseVarList(Kind))
- return true;
+ case OpenACCClauseKind::PCopyOut:
+ case OpenACCClauseKind::PresentOrCopyOut: {
+ bool IsZero = tryParseAndConsumeSpecialTokenKind(
+ *this, OpenACCSpecialTokenKind::Zero, ClauseKind);
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
+ /*IsReadOnly=*/false, IsZero);
break;
- case OpenACCClauseKind::Reduction:
+ }
+ case OpenACCClauseKind::Reduction: {
// If we're missing a clause-kind (or it is invalid), see if we can parse
// the var-list anyway.
- ParseReductionOperator(*this);
- if (ParseOpenACCClauseVarList(Kind))
- return true;
+ OpenACCReductionOperator Op = ParseReductionOperator(*this);
+ ParsedClause.setReductionDetails(Op, ParseOpenACCVarList(ClauseKind));
break;
+ }
case OpenACCClauseKind::Self:
// The 'self' clause is a var-list instead of a 'condition' in the case of
// the 'update' clause, so we have to handle it here. U se an assert to
// make sure we get the right differentiator.
assert(DirKind == OpenACCDirectiveKind::Update);
- LLVM_FALLTHROUGH;
- case OpenACCClauseKind::Attach:
- case OpenACCClauseKind::Copy:
+ [[fallthrough]];
case OpenACCClauseKind::Delete:
case OpenACCClauseKind::Detach:
case OpenACCClauseKind::Device:
case OpenACCClauseKind::DeviceResident:
- case OpenACCClauseKind::DevicePtr:
- case OpenACCClauseKind::FirstPrivate:
case OpenACCClauseKind::Host:
case OpenACCClauseKind::Link:
+ case OpenACCClauseKind::UseDevice:
+ ParseOpenACCVarList(ClauseKind);
+ break;
+ case OpenACCClauseKind::Attach:
+ case OpenACCClauseKind::DevicePtr:
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+ break;
+ case OpenACCClauseKind::Copy:
+ case OpenACCClauseKind::PCopy:
+ case OpenACCClauseKind::PresentOrCopy:
+ case OpenACCClauseKind::FirstPrivate:
case OpenACCClauseKind::NoCreate:
case OpenACCClauseKind::Present:
case OpenACCClauseKind::Private:
- case OpenACCClauseKind::UseDevice:
- if (ParseOpenACCClauseVarList(Kind))
- return true;
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
+ /*IsReadOnly=*/false, /*IsZero=*/false);
break;
case OpenACCClauseKind::Collapse: {
tryParseAndConsumeSpecialTokenKind(*this, OpenACCSpecialTokenKind::Force,
- Kind);
+ ClauseKind);
ExprResult NumLoops =
getActions().CorrectDelayedTyposInExpr(ParseConstantExpression());
- if (NumLoops.isInvalid())
- return true;
+ if (NumLoops.isInvalid()) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
break;
}
case OpenACCClauseKind::Bind: {
ExprResult BindArg = ParseOpenACCBindClauseArgument();
- if (BindArg.isInvalid())
- return true;
+ if (BindArg.isInvalid()) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
+ break;
+ }
+ case OpenACCClauseKind::NumGangs: {
+ llvm::SmallVector<Expr *> IntExprs;
+
+ if (ParseOpenACCIntExprList(OpenACCDirectiveKind::Invalid,
+ OpenACCClauseKind::NumGangs, ClauseLoc,
+ IntExprs)) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
+ ParsedClause.setIntExprDetails(std::move(IntExprs));
break;
}
- case OpenACCClauseKind::NumGangs:
case OpenACCClauseKind::NumWorkers:
case OpenACCClauseKind::DeviceNum:
case OpenACCClauseKind::DefaultAsync:
case OpenACCClauseKind::VectorLength: {
- ExprResult IntExpr = ParseOpenACCIntExpr();
- if (IntExpr.isInvalid())
- return true;
+ ExprResult IntExpr = ParseOpenACCIntExpr(OpenACCDirectiveKind::Invalid,
+ ClauseKind, ClauseLoc)
+ .first;
+ if (IntExpr.isInvalid()) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
+
+ // TODO OpenACC: as we implement the 'rest' of the above, this 'if' should
+ // be removed leaving just the 'setIntExprDetails'.
+ if (ClauseKind == OpenACCClauseKind::NumWorkers ||
+ ClauseKind == OpenACCClauseKind::VectorLength)
+ ParsedClause.setIntExprDetails(IntExpr.get());
+
break;
}
case OpenACCClauseKind::DType:
- case OpenACCClauseKind::DeviceType:
+ case OpenACCClauseKind::DeviceType: {
+ llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>> Archs;
if (getCurToken().is(tok::star)) {
// FIXME: We want to mark that this is an 'everything else' type of
// device_type in Sema.
- ConsumeToken();
- } else if (ParseOpenACCDeviceTypeList()) {
- return true;
+ ParsedClause.setDeviceTypeDetails({{nullptr, ConsumeToken()}});
+ } else if (!ParseOpenACCDeviceTypeList(Archs)) {
+ ParsedClause.setDeviceTypeDetails(std::move(Archs));
+ } else {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
+ break;
+ }
+ case OpenACCClauseKind::Tile:
+ if (ParseOpenACCSizeExprList()) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
}
break;
default:
llvm_unreachable("Not a required parens type?");
}
- return Parens.consumeClose();
- } else if (ClauseHasOptionalParens(DirKind, Kind)) {
+ ParsedClause.setEndLoc(getCurToken().getLocation());
+
+ if (Parens.consumeClose())
+ return OpenACCCannotContinue();
+
+ } else if (ClauseHasOptionalParens(DirKind, ClauseKind)) {
if (!Parens.consumeOpen()) {
- switch (Kind) {
+ ParsedClause.setLParenLoc(Parens.getOpenLocation());
+ switch (ClauseKind) {
case OpenACCClauseKind::Self: {
assert(DirKind != OpenACCDirectiveKind::Update);
- ExprResult CondExpr = ParseOpenACCConditionalExpr(*this);
- // An invalid expression can be just about anything, so just give up on
- // this clause list.
- if (CondExpr.isInvalid())
- return true;
+ ExprResult CondExpr = ParseOpenACCConditionExpr();
+ ParsedClause.setConditionDetails(CondExpr.isUsable() ? CondExpr.get()
+ : nullptr);
+
+ if (CondExpr.isInvalid()) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
break;
}
case OpenACCClauseKind::Vector:
case OpenACCClauseKind::Worker: {
tryParseAndConsumeSpecialTokenKind(*this,
- Kind == OpenACCClauseKind::Vector
+ ClauseKind ==
+ OpenACCClauseKind::Vector
? OpenACCSpecialTokenKind::Length
: OpenACCSpecialTokenKind::Num,
- Kind);
- ExprResult IntExpr = ParseOpenACCIntExpr();
- if (IntExpr.isInvalid())
- return true;
+ ClauseKind);
+ ExprResult IntExpr = ParseOpenACCIntExpr(OpenACCDirectiveKind::Invalid,
+ ClauseKind, ClauseLoc)
+ .first;
+ if (IntExpr.isInvalid()) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
+ break;
+ }
+ case OpenACCClauseKind::Async: {
+ ExprResult AsyncArg =
+ ParseOpenACCAsyncArgument(OpenACCDirectiveKind::Invalid,
+ OpenACCClauseKind::Async, ClauseLoc)
+ .first;
+ ParsedClause.setIntExprDetails(AsyncArg.isUsable() ? AsyncArg.get()
+ : nullptr);
+ if (AsyncArg.isInvalid()) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
+ break;
+ }
+ case OpenACCClauseKind::Gang:
+ if (ParseOpenACCGangArgList(ClauseLoc)) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
+ break;
+ case OpenACCClauseKind::Wait: {
+ OpenACCWaitParseInfo Info =
+ ParseOpenACCWaitArgument(ClauseLoc,
+ /*IsDirective=*/false);
+ if (Info.Failed) {
+ Parens.skipToEnd();
+ return OpenACCCanContinue();
+ }
+
+ ParsedClause.setWaitDetails(Info.DevNumExpr, Info.QueuesLoc,
+ std::move(Info.QueueIdExprs));
break;
}
default:
llvm_unreachable("Not an optional parens type?");
}
- Parens.consumeClose();
+ ParsedClause.setEndLoc(getCurToken().getLocation());
+ if (Parens.consumeClose())
+ return OpenACCCannotContinue();
+ } else {
+ // If we have optional parens, make sure we set the end-location to the
+ // clause, as we are a 'single token' clause.
+ ParsedClause.setEndLoc(ClauseLoc);
}
+ } else {
+ ParsedClause.setEndLoc(ClauseLoc);
}
- return false;
+ return OpenACCSuccess(
+ Actions.OpenACC().ActOnClause(ExistingClauses, ParsedClause));
+}
+
+/// OpenACC 3.3 section 2.16:
+/// In this section and throughout the specification, the term async-argument
+/// means a nonnegative scalar integer expression (int for C or C++, integer for
+/// Fortran), or one of the special values acc_async_noval or acc_async_sync, as
+/// defined in the C header file and the Fortran openacc module. The special
+/// values are negative values, so as not to conflict with a user-specified
+/// nonnegative async-argument.
+Parser::OpenACCIntExprParseResult
+Parser::ParseOpenACCAsyncArgument(OpenACCDirectiveKind DK, OpenACCClauseKind CK,
+ SourceLocation Loc) {
+ return ParseOpenACCIntExpr(DK, CK, Loc);
}
/// OpenACC 3.3, section 2.16:
/// In this section and throughout the specification, the term wait-argument
/// means:
/// [ devnum : int-expr : ] [ queues : ] async-argument-list
-bool Parser::ParseOpenACCWaitArgument() {
+Parser::OpenACCWaitParseInfo
+Parser::ParseOpenACCWaitArgument(SourceLocation Loc, bool IsDirective) {
+ OpenACCWaitParseInfo Result;
// [devnum : int-expr : ]
if (isOpenACCSpecialToken(OpenACCSpecialTokenKind::DevNum, Tok) &&
NextToken().is(tok::colon)) {
@@ -806,20 +1167,30 @@ bool Parser::ParseOpenACCWaitArgument() {
// Consume colon.
ConsumeToken();
- ExprResult IntExpr =
- getActions().CorrectDelayedTyposInExpr(ParseAssignmentExpression());
- if (IntExpr.isInvalid())
- return true;
+ OpenACCIntExprParseResult Res = ParseOpenACCIntExpr(
+ IsDirective ? OpenACCDirectiveKind::Wait
+ : OpenACCDirectiveKind::Invalid,
+ IsDirective ? OpenACCClauseKind::Invalid : OpenACCClauseKind::Wait,
+ Loc);
+ if (Res.first.isInvalid() &&
+ Res.second == OpenACCParseCanContinue::Cannot) {
+ Result.Failed = true;
+ return Result;
+ }
- if (ExpectAndConsume(tok::colon))
- return true;
+ if (ExpectAndConsume(tok::colon)) {
+ Result.Failed = true;
+ return Result;
+ }
+
+ Result.DevNumExpr = Res.first.get();
}
// [ queues : ]
if (isOpenACCSpecialToken(OpenACCSpecialTokenKind::Queues, Tok) &&
NextToken().is(tok::colon)) {
// Consume queues.
- ConsumeToken();
+ Result.QueuesLoc = ConsumeToken();
// Consume colon.
ConsumeToken();
}
@@ -828,30 +1199,32 @@ bool Parser::ParseOpenACCWaitArgument() {
// the term 'async-argument' means a nonnegative scalar integer expression, or
// one of the special values 'acc_async_noval' or 'acc_async_sync', as defined
// in the C header file and the Fortran opacc module.
- //
- // We are parsing this simply as list of assignment expressions (to avoid
- // comma being troublesome), and will ensure it is an integral type. The
- // 'special' types are defined as macros, so we can't really check those
- // (other than perhaps as values at one point?), but the standard does say it
- // is implementation-defined to use any other negative value.
- //
- //
bool FirstArg = true;
while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
if (!FirstArg) {
- if (ExpectAndConsume(tok::comma))
- return true;
+ if (ExpectAndConsume(tok::comma)) {
+ Result.Failed = true;
+ return Result;
+ }
}
FirstArg = false;
- ExprResult CurArg =
- getActions().CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ OpenACCIntExprParseResult Res = ParseOpenACCAsyncArgument(
+ IsDirective ? OpenACCDirectiveKind::Wait
+ : OpenACCDirectiveKind::Invalid,
+ IsDirective ? OpenACCClauseKind::Invalid : OpenACCClauseKind::Wait,
+ Loc);
- if (CurArg.isInvalid())
- return true;
+ if (Res.first.isInvalid() &&
+ Res.second == OpenACCParseCanContinue::Cannot) {
+ Result.Failed = true;
+ return Result;
+ }
+
+ Result.QueueIdExprs.push_back(Res.first.get());
}
- return false;
+ return Result;
}
ExprResult Parser::ParseOpenACCIDExpression() {
@@ -905,16 +1278,51 @@ ExprResult Parser::ParseOpenACCBindClauseArgument() {
/// OpenACC 3.3, section 1.6:
/// In this spec, a 'var' (in italics) is one of the following:
-/// - a variable name (a scalar, array, or compisite variable name)
+/// - a variable name (a scalar, array, or composite variable name)
/// - a subarray specification with subscript ranges
/// - an array element
/// - a member of a composite variable
/// - a common block name between slashes (fortran only)
-bool Parser::ParseOpenACCVar() {
+Parser::OpenACCVarParseResult Parser::ParseOpenACCVar(OpenACCClauseKind CK) {
OpenACCArraySectionRAII ArraySections(*this);
- ExprResult Res =
- getActions().CorrectDelayedTyposInExpr(ParseAssignmentExpression());
- return Res.isInvalid();
+
+ ExprResult Res = ParseAssignmentExpression();
+ if (!Res.isUsable())
+ return {Res, OpenACCParseCanContinue::Cannot};
+
+ Res = getActions().CorrectDelayedTyposInExpr(Res.get());
+ if (!Res.isUsable())
+ return {Res, OpenACCParseCanContinue::Can};
+
+ Res = getActions().OpenACC().ActOnVar(CK, Res.get());
+
+ return {Res, OpenACCParseCanContinue::Can};
+}
+
+llvm::SmallVector<Expr *> Parser::ParseOpenACCVarList(OpenACCClauseKind CK) {
+ llvm::SmallVector<Expr *> Vars;
+
+ auto [Res, CanContinue] = ParseOpenACCVar(CK);
+ if (Res.isUsable()) {
+ Vars.push_back(Res.get());
+ } else if (CanContinue == OpenACCParseCanContinue::Cannot) {
+ SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end, StopBeforeMatch);
+ return Vars;
+ }
+
+ while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
+ ExpectAndConsume(tok::comma);
+
+ auto [Res, CanContinue] = ParseOpenACCVar(CK);
+
+ if (Res.isUsable()) {
+ Vars.push_back(Res.get());
+ } else if (CanContinue == OpenACCParseCanContinue::Cannot) {
+ SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end, StopBeforeMatch);
+ return Vars;
+ }
+ }
+ return Vars;
}
/// OpenACC 3.3, section 2.10:
@@ -937,29 +1345,19 @@ void Parser::ParseOpenACCCacheVarList() {
// Sema/AST generation.
}
- bool FirstArray = true;
- while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
- if (!FirstArray)
- ExpectAndConsume(tok::comma);
- FirstArray = false;
-
- // OpenACC 3.3, section 2.10:
- // A 'var' in a cache directive must be a single array element or a simple
- // subarray. In C and C++, a simple subarray is an array name followed by
- // an extended array range specification in brackets, with a start and
- // length such as:
- //
- // arr[lower:length]
- //
- if (ParseOpenACCVar())
- SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end, tok::comma,
- StopBeforeMatch);
- }
+ // ParseOpenACCVarList should leave us before a r-paren, so no need to skip
+ // anything here.
+ ParseOpenACCVarList(OpenACCClauseKind::Invalid);
}
-void Parser::ParseOpenACCDirective() {
+Parser::OpenACCDirectiveParseInfo
+Parser::ParseOpenACCDirective() {
+ SourceLocation StartLoc = ConsumeAnnotationToken();
+ SourceLocation DirLoc = getCurToken().getLocation();
OpenACCDirectiveKind DirKind = ParseOpenACCDirectiveKind(*this);
+ getActions().OpenACC().ActOnConstruct(DirKind, DirLoc);
+
// Once we've parsed the construct/directive name, some have additional
// specifiers that need to be taken care of. Atomic has an 'atomic-clause'
// that needs to be parsed.
@@ -997,7 +1395,7 @@ void Parser::ParseOpenACCDirective() {
break;
case OpenACCDirectiveKind::Wait:
// OpenACC has an optional paren-wrapped 'wait-argument'.
- if (ParseOpenACCWaitArgument())
+ if (ParseOpenACCWaitArgument(DirLoc, /*IsDirective=*/true).Failed)
T.skipToEnd();
else
T.consumeClose();
@@ -1010,13 +1408,18 @@ void Parser::ParseOpenACCDirective() {
Diag(Tok, diag::err_expected) << tok::l_paren;
}
- // Parses the list of clauses, if present.
- ParseOpenACCClauseList(DirKind);
+ // Parses the list of clauses, if present, plus set up return value.
+ OpenACCDirectiveParseInfo ParseInfo{DirKind, StartLoc, DirLoc,
+ SourceLocation{},
+ ParseOpenACCClauseList(DirKind)};
- Diag(getCurToken(), diag::warn_pragma_acc_unimplemented);
assert(Tok.is(tok::annot_pragma_openacc_end) &&
"Didn't parse all OpenACC Clauses");
- ConsumeAnnotationToken();
+ ParseInfo.EndLoc = ConsumeAnnotationToken();
+ assert(ParseInfo.EndLoc.isValid() &&
+ "Terminating annotation token not present");
+
+ return ParseInfo;
}
// Parse OpenACC directive on a declaration.
@@ -1024,11 +1427,15 @@ Parser::DeclGroupPtrTy Parser::ParseOpenACCDirectiveDecl() {
assert(Tok.is(tok::annot_pragma_openacc) && "expected OpenACC Start Token");
ParsingOpenACCDirectiveRAII DirScope(*this);
- ConsumeAnnotationToken();
- ParseOpenACCDirective();
+ OpenACCDirectiveParseInfo DirInfo = ParseOpenACCDirective();
+
+ if (getActions().OpenACC().ActOnStartDeclDirective(DirInfo.DirKind,
+ DirInfo.StartLoc))
+ return nullptr;
- return nullptr;
+ // TODO OpenACC: Do whatever decl parsing is required here.
+ return DeclGroupPtrTy::make(getActions().OpenACC().ActOnEndDeclDirective());
}
// Parse OpenACC Directive on a Statement.
@@ -1036,9 +1443,24 @@ StmtResult Parser::ParseOpenACCDirectiveStmt() {
assert(Tok.is(tok::annot_pragma_openacc) && "expected OpenACC Start Token");
ParsingOpenACCDirectiveRAII DirScope(*this);
- ConsumeAnnotationToken();
- ParseOpenACCDirective();
+ OpenACCDirectiveParseInfo DirInfo = ParseOpenACCDirective();
+ if (getActions().OpenACC().ActOnStartStmtDirective(DirInfo.DirKind,
+ DirInfo.StartLoc))
+ return StmtError();
+
+ StmtResult AssocStmt;
+ SemaOpenACC::AssociatedStmtRAII AssocStmtRAII(getActions().OpenACC(),
+ DirInfo.DirKind);
+ if (doesDirectiveHaveAssociatedStmt(DirInfo.DirKind)) {
+ ParsingOpenACCDirectiveRAII DirScope(*this, /*Value=*/false);
+ ParseScope ACCScope(this, getOpenACCScopeFlags(DirInfo.DirKind));
+
+ AssocStmt = getActions().OpenACC().ActOnAssociatedStmt(
+ DirInfo.StartLoc, DirInfo.DirKind, ParseStatement());
+ }
- return StmtEmpty();
+ return getActions().OpenACC().ActOnEndStmtDirective(
+ DirInfo.DirKind, DirInfo.StartLoc, DirInfo.DirLoc, DirInfo.EndLoc,
+ DirInfo.Clauses, AssocStmt);
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
index da5f6605c6ff..f5b44d210680 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
@@ -21,9 +21,11 @@
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
-#include "llvm/ADT/PointerIntPair.h"
+#include "clang/Sema/SemaAMDGPU.h"
+#include "clang/Sema/SemaCodeCompletion.h"
+#include "clang/Sema/SemaOpenMP.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/UniqueVector.h"
#include "llvm/Frontend/OpenMP/OMPAssume.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include <optional>
@@ -87,7 +89,7 @@ public:
DeclDirectiveListParserHelper(Parser *P, OpenMPDirectiveKind Kind)
: P(P), Kind(Kind) {}
void operator()(CXXScopeSpec &SS, DeclarationNameInfo NameInfo) {
- ExprResult Res = P->getActions().ActOnOpenMPIdExpression(
+ ExprResult Res = P->getActions().OpenMP().ActOnOpenMPIdExpression(
P->getCurScope(), SS, NameInfo, Kind);
if (Res.isUsable())
Identifiers.push_back(Res.get());
@@ -322,8 +324,8 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
SourceRange Range;
TypeResult TR = ParseTypeName(&Range, DeclaratorContext::Prototype, AS);
if (TR.isUsable()) {
- QualType ReductionType =
- Actions.ActOnOpenMPDeclareReductionType(Range.getBegin(), TR);
+ QualType ReductionType = Actions.OpenMP().ActOnOpenMPDeclareReductionType(
+ Range.getBegin(), TR);
if (!ReductionType.isNull()) {
ReductionTypes.push_back(
std::make_pair(ReductionType, Range.getBegin()));
@@ -363,8 +365,10 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
return DeclGroupPtrTy();
}
- DeclGroupPtrTy DRD = Actions.ActOnOpenMPDeclareReductionDirectiveStart(
- getCurScope(), Actions.getCurLexicalContext(), Name, ReductionTypes, AS);
+ DeclGroupPtrTy DRD =
+ Actions.OpenMP().ActOnOpenMPDeclareReductionDirectiveStart(
+ getCurScope(), Actions.getCurLexicalContext(), Name, ReductionTypes,
+ AS);
// Parse <combiner> expression and then parse initializer if any for each
// correct type.
@@ -375,10 +379,11 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
Scope::CompoundStmtScope |
Scope::OpenMPDirectiveScope);
// Parse <combiner> expression.
- Actions.ActOnOpenMPDeclareReductionCombinerStart(getCurScope(), D);
+ Actions.OpenMP().ActOnOpenMPDeclareReductionCombinerStart(getCurScope(), D);
ExprResult CombinerResult = Actions.ActOnFinishFullExpr(
ParseExpression().get(), D->getLocation(), /*DiscardedValue*/ false);
- Actions.ActOnOpenMPDeclareReductionCombinerEnd(D, CombinerResult.get());
+ Actions.OpenMP().ActOnOpenMPDeclareReductionCombinerEnd(
+ D, CombinerResult.get());
if (CombinerResult.isInvalid() && Tok.isNot(tok::r_paren) &&
Tok.isNot(tok::annot_pragma_openmp_end)) {
@@ -411,8 +416,8 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
Scope::OpenMPDirectiveScope);
// Parse expression.
VarDecl *OmpPrivParm =
- Actions.ActOnOpenMPDeclareReductionInitializerStart(getCurScope(),
- D);
+ Actions.OpenMP().ActOnOpenMPDeclareReductionInitializerStart(
+ getCurScope(), D);
// Check if initializer is omp_priv <init_expr> or something else.
if (Tok.is(tok::identifier) &&
Tok.getIdentifierInfo()->isStr("omp_priv")) {
@@ -423,7 +428,7 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
ParseAssignmentExpression().get(), D->getLocation(),
/*DiscardedValue*/ false);
}
- Actions.ActOnOpenMPDeclareReductionInitializerEnd(
+ Actions.OpenMP().ActOnOpenMPDeclareReductionInitializerEnd(
D, InitializerResult.get(), OmpPrivParm);
if (InitializerResult.isInvalid() && Tok.isNot(tok::r_paren) &&
Tok.isNot(tok::annot_pragma_openmp_end)) {
@@ -444,8 +449,8 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
else
TPA.Commit();
}
- return Actions.ActOnOpenMPDeclareReductionDirectiveEnd(getCurScope(), DRD,
- IsCorrect);
+ return Actions.OpenMP().ActOnOpenMPDeclareReductionDirectiveEnd(
+ getCurScope(), DRD, IsCorrect);
}
void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
@@ -456,7 +461,8 @@ void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteInitializer(getCurScope(), OmpPrivParm);
+ Actions.CodeCompletion().CodeCompleteInitializer(getCurScope(),
+ OmpPrivParm);
Actions.FinalizeDeclaration(OmpPrivParm);
return;
}
@@ -480,9 +486,10 @@ void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
SourceLocation LParLoc = T.getOpenLocation();
auto RunSignatureHelp = [this, OmpPrivParm, LParLoc, &Exprs]() {
- QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
- OmpPrivParm->getType()->getCanonicalTypeInternal(),
- OmpPrivParm->getLocation(), Exprs, LParLoc, /*Braced=*/false);
+ QualType PreferredType =
+ Actions.CodeCompletion().ProduceConstructorSignatureHelp(
+ OmpPrivParm->getType()->getCanonicalTypeInternal(),
+ OmpPrivParm->getLocation(), Exprs, LParLoc, /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -569,8 +576,8 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) {
SourceRange Range;
TypeResult ParsedType = parseOpenMPDeclareMapperVarDecl(Range, VName, AS);
if (ParsedType.isUsable())
- MapperType =
- Actions.ActOnOpenMPDeclareMapperType(Range.getBegin(), ParsedType);
+ MapperType = Actions.OpenMP().ActOnOpenMPDeclareMapperType(Range.getBegin(),
+ ParsedType);
if (MapperType.isNull())
IsCorrect = false;
if (!IsCorrect) {
@@ -591,11 +598,13 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) {
unsigned ScopeFlags = Scope::FnScope | Scope::DeclScope |
Scope::CompoundStmtScope | Scope::OpenMPDirectiveScope;
ParseScope OMPDirectiveScope(this, ScopeFlags);
- Actions.StartOpenMPDSABlock(OMPD_declare_mapper, DirName, getCurScope(), Loc);
+ Actions.OpenMP().StartOpenMPDSABlock(OMPD_declare_mapper, DirName,
+ getCurScope(), Loc);
// Add the mapper variable declaration.
- ExprResult MapperVarRef = Actions.ActOnOpenMPDeclareMapperDirectiveVarDecl(
- getCurScope(), MapperType, Range.getBegin(), VName);
+ ExprResult MapperVarRef =
+ Actions.OpenMP().ActOnOpenMPDeclareMapperDirectiveVarDecl(
+ getCurScope(), MapperType, Range.getBegin(), VName);
// Parse map clauses.
SmallVector<OMPClause *, 6> Clauses;
@@ -603,7 +612,7 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) {
OpenMPClauseKind CKind = Tok.isAnnotation()
? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
- Actions.StartOpenMPClause(CKind);
+ Actions.OpenMP().StartOpenMPClause(CKind);
OMPClause *Clause =
ParseOpenMPClause(OMPD_declare_mapper, CKind, Clauses.empty());
if (Clause)
@@ -613,7 +622,7 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) {
// Skip ',' if any.
if (Tok.is(tok::comma))
ConsumeToken();
- Actions.EndOpenMPClause();
+ Actions.OpenMP().EndOpenMPClause();
}
if (Clauses.empty()) {
Diag(Tok, diag::err_omp_expected_clause)
@@ -622,9 +631,9 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) {
}
// Exit scope.
- Actions.EndOpenMPDSABlock(nullptr);
+ Actions.OpenMP().EndOpenMPDSABlock(nullptr);
OMPDirectiveScope.Exit();
- DeclGroupPtrTy DG = Actions.ActOnOpenMPDeclareMapperDirective(
+ DeclGroupPtrTy DG = Actions.OpenMP().ActOnOpenMPDeclareMapperDirective(
getCurScope(), Actions.getCurLexicalContext(), MapperId, MapperType,
Range.getBegin(), VName, AS, MapperVarRef.get(), Clauses);
if (!IsCorrect)
@@ -652,7 +661,8 @@ TypeResult Parser::parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
}
Name = Actions.GetNameForDeclarator(DeclaratorInfo).getName();
- return Actions.ActOnOpenMPDeclareMapperVarDecl(getCurScope(), DeclaratorInfo);
+ return Actions.OpenMP().ActOnOpenMPDeclareMapperVarDecl(getCurScope(),
+ DeclaratorInfo);
}
namespace {
@@ -733,7 +743,7 @@ static bool parseDeclareSimdClauses(
BS = Out;
BSRange = SourceRange(Tok.getLocation(), Tok.getEndLoc());
P.ConsumeToken();
- } else if (ClauseName.equals("simdlen")) {
+ } else if (ClauseName == "simdlen") {
if (SimdLen.isUsable()) {
P.Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(OMPD_declare_simd) << ClauseName << 0;
@@ -748,7 +758,7 @@ static bool parseDeclareSimdClauses(
OpenMPClauseKind CKind = getOpenMPClauseKind(ClauseName);
if (CKind == OMPC_uniform || CKind == OMPC_aligned ||
CKind == OMPC_linear) {
- Sema::OpenMPVarListDataTy Data;
+ SemaOpenMP::OpenMPVarListDataTy Data;
SmallVectorImpl<Expr *> *Vars = &Uniforms;
if (CKind == OMPC_aligned) {
Vars = &Aligneds;
@@ -768,7 +778,7 @@ static bool parseDeclareSimdClauses(
assert(0 <= Data.ExtraModifier &&
Data.ExtraModifier <= OMPC_LINEAR_unknown &&
"Unexpected linear modifier.");
- if (P.getActions().CheckOpenMPLinearModifier(
+ if (P.getActions().OpenMP().CheckOpenMPLinearModifier(
static_cast<OpenMPLinearClauseKind>(Data.ExtraModifier),
Data.ExtraModifierLoc))
Data.ExtraModifier = OMPC_LINEAR_val;
@@ -816,7 +826,7 @@ Parser::ParseOMPDeclareSimdClauses(Parser::DeclGroupPtrTy Ptr,
SourceLocation EndLoc = ConsumeAnnotationToken();
if (IsError)
return Ptr;
- return Actions.ActOnOpenMPDeclareSimdDirective(
+ return Actions.OpenMP().ActOnOpenMPDeclareSimdDirective(
Ptr, BS, Simdlen.get(), Uniforms, Aligneds, Alignments, Linears,
LinModifiers, Steps, SourceRange(Loc, EndLoc));
}
@@ -1099,7 +1109,7 @@ static ExprResult parseContextScore(Parser &P) {
llvm::SmallString<16> Buffer;
StringRef SelectorName =
P.getPreprocessor().getSpelling(P.getCurToken(), Buffer);
- if (!SelectorName.equals("score"))
+ if (SelectorName != "score")
return ScoreExpr;
(void)P.ConsumeToken();
SourceLocation RLoc;
@@ -1412,7 +1422,8 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr,
return;
}
- OMPTraitInfo *ParentTI = Actions.getOMPTraitInfoForSurroundingScope();
+ OMPTraitInfo *ParentTI =
+ Actions.OpenMP().getOMPTraitInfoForSurroundingScope();
ASTContext &ASTCtx = Actions.getASTContext();
OMPTraitInfo &TI = ASTCtx.getNewOMPTraitInfo();
SmallVector<Expr *, 6> AdjustNothing;
@@ -1445,7 +1456,7 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr,
case OMPC_adjust_args: {
AdjustArgsLoc = Tok.getLocation();
ConsumeToken();
- Sema::OpenMPVarListDataTy Data;
+ SemaOpenMP::OpenMPVarListDataTy Data;
SmallVector<Expr *> Vars;
IsError = ParseOpenMPVarList(OMPD_declare_variant, OMPC_adjust_args,
Vars, Data);
@@ -1486,12 +1497,12 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr,
}
std::optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
- Actions.checkOpenMPDeclareVariantFunction(
+ Actions.OpenMP().checkOpenMPDeclareVariantFunction(
Ptr, AssociatedFunction.get(), TI, AppendArgs.size(),
SourceRange(Loc, Tok.getLocation()));
if (DeclVarData && !TI.Sets.empty())
- Actions.ActOnOpenMPDeclareVariantDirective(
+ Actions.OpenMP().ActOnOpenMPDeclareVariantDirective(
DeclVarData->first, DeclVarData->second, TI, AdjustNothing,
AdjustNeedDevicePtr, AppendArgs, AdjustArgsLoc, AppendArgsLoc,
SourceRange(Loc, Tok.getLocation()));
@@ -1635,29 +1646,27 @@ bool Parser::parseOMPDeclareVariantMatchClause(SourceLocation Loc,
void Parser::ParseOpenMPClauses(OpenMPDirectiveKind DKind,
SmallVectorImpl<OMPClause *> &Clauses,
SourceLocation Loc) {
- SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
- llvm::omp::Clause_enumSize + 1>
- FirstClauses(llvm::omp::Clause_enumSize + 1);
+ std::bitset<llvm::omp::Clause_enumSize + 1> SeenClauses;
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
OpenMPClauseKind CKind = Tok.isAnnotation()
? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
- Actions.StartOpenMPClause(CKind);
- OMPClause *Clause = ParseOpenMPClause(
- DKind, CKind, !FirstClauses[unsigned(CKind)].getInt());
+ Actions.OpenMP().StartOpenMPClause(CKind);
+ OMPClause *Clause =
+ ParseOpenMPClause(DKind, CKind, !SeenClauses[unsigned(CKind)]);
SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- FirstClauses[unsigned(CKind)].setInt(true);
+ SeenClauses[unsigned(CKind)] = true;
if (Clause != nullptr)
Clauses.push_back(Clause);
if (Tok.is(tok::annot_pragma_openmp_end)) {
- Actions.EndOpenMPClause();
+ Actions.OpenMP().EndOpenMPClause();
break;
}
// Skip ',' if any.
if (Tok.is(tok::comma))
ConsumeToken();
- Actions.EndOpenMPClause();
+ Actions.OpenMP().EndOpenMPClause();
}
}
@@ -1750,12 +1759,13 @@ void Parser::ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
Assumptions.push_back(Assumption);
}
- Actions.ActOnOpenMPAssumesDirective(Loc, DKind, Assumptions, SkippedClauses);
+ Actions.OpenMP().ActOnOpenMPAssumesDirective(Loc, DKind, Assumptions,
+ SkippedClauses);
}
void Parser::ParseOpenMPEndAssumesDirective(SourceLocation Loc) {
- if (Actions.isInOpenMPAssumeScope())
- Actions.ActOnOpenMPEndAssumesDirective();
+ if (Actions.OpenMP().isInOpenMPAssumeScope())
+ Actions.OpenMP().ActOnOpenMPEndAssumesDirective();
else
Diag(Loc, diag::err_expected_begin_assumes);
}
@@ -1811,7 +1821,7 @@ parseOpenMPSimpleClause(Parser &P, OpenMPClauseKind Kind) {
}
void Parser::ParseOMPDeclareTargetClauses(
- Sema::DeclareTargetContextInfo &DTCI) {
+ SemaOpenMP::DeclareTargetContextInfo &DTCI) {
SourceLocation DeviceTypeLoc;
bool RequiresToOrLinkOrIndirectClause = false;
bool HasToOrLinkOrIndirectClause = false;
@@ -1910,11 +1920,11 @@ void Parser::ParseOMPDeclareTargetClauses(
if (DTCI.Kind == OMPD_declare_target || HasIdentifier) {
auto &&Callback = [this, MT, &DTCI](CXXScopeSpec &SS,
DeclarationNameInfo NameInfo) {
- NamedDecl *ND =
- Actions.lookupOpenMPDeclareTargetName(getCurScope(), SS, NameInfo);
+ NamedDecl *ND = Actions.OpenMP().lookupOpenMPDeclareTargetName(
+ getCurScope(), SS, NameInfo);
if (!ND)
return;
- Sema::DeclareTargetContextInfo::MapInfo MI{MT, NameInfo.getLoc()};
+ SemaOpenMP::DeclareTargetContextInfo::MapInfo MI{MT, NameInfo.getLoc()};
bool FirstMapping = DTCI.ExplicitlyMapped.try_emplace(ND, MI).second;
if (!FirstMapping)
Diag(NameInfo.getLoc(), diag::err_omp_declare_target_multiple)
@@ -2090,8 +2100,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
skipUntilPragmaOpenMPEnd(DKind);
// Skip the last annot_pragma_openmp_end.
ConsumeAnnotationToken();
- return Actions.ActOnOpenMPThreadprivateDirective(Loc,
- Helper.getIdentifiers());
+ return Actions.OpenMP().ActOnOpenMPThreadprivateDirective(
+ Loc, Helper.getIdentifiers());
}
break;
}
@@ -2102,45 +2112,41 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
/*AllowScopeSpecifier=*/true)) {
SmallVector<OMPClause *, 1> Clauses;
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
- llvm::omp::Clause_enumSize + 1>
- FirstClauses(llvm::omp::Clause_enumSize + 1);
+ std::bitset<llvm::omp::Clause_enumSize + 1> SeenClauses;
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
OpenMPClauseKind CKind =
Tok.isAnnotation() ? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
- Actions.StartOpenMPClause(CKind);
- OMPClause *Clause = ParseOpenMPClause(
- OMPD_allocate, CKind, !FirstClauses[unsigned(CKind)].getInt());
+ Actions.OpenMP().StartOpenMPClause(CKind);
+ OMPClause *Clause = ParseOpenMPClause(OMPD_allocate, CKind,
+ !SeenClauses[unsigned(CKind)]);
SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- FirstClauses[unsigned(CKind)].setInt(true);
+ SeenClauses[unsigned(CKind)] = true;
if (Clause != nullptr)
Clauses.push_back(Clause);
if (Tok.is(tok::annot_pragma_openmp_end)) {
- Actions.EndOpenMPClause();
+ Actions.OpenMP().EndOpenMPClause();
break;
}
// Skip ',' if any.
if (Tok.is(tok::comma))
ConsumeToken();
- Actions.EndOpenMPClause();
+ Actions.OpenMP().EndOpenMPClause();
}
skipUntilPragmaOpenMPEnd(DKind);
}
// Skip the last annot_pragma_openmp_end.
ConsumeAnnotationToken();
- return Actions.ActOnOpenMPAllocateDirective(Loc, Helper.getIdentifiers(),
- Clauses);
+ return Actions.OpenMP().ActOnOpenMPAllocateDirective(
+ Loc, Helper.getIdentifiers(), Clauses);
}
break;
}
case OMPD_requires: {
SourceLocation StartLoc = ConsumeToken();
SmallVector<OMPClause *, 5> Clauses;
- SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
- llvm::omp::Clause_enumSize + 1>
- FirstClauses(llvm::omp::Clause_enumSize + 1);
+ llvm::SmallBitVector SeenClauses(llvm::omp::Clause_enumSize + 1);
if (Tok.is(tok::annot_pragma_openmp_end)) {
Diag(Tok, diag::err_omp_expected_clause)
<< getOpenMPDirectiveName(OMPD_requires);
@@ -2150,22 +2156,22 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
OpenMPClauseKind CKind = Tok.isAnnotation()
? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
- Actions.StartOpenMPClause(CKind);
- OMPClause *Clause = ParseOpenMPClause(
- OMPD_requires, CKind, !FirstClauses[unsigned(CKind)].getInt());
+ Actions.OpenMP().StartOpenMPClause(CKind);
+ OMPClause *Clause = ParseOpenMPClause(OMPD_requires, CKind,
+ !SeenClauses[unsigned(CKind)]);
SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- FirstClauses[unsigned(CKind)].setInt(true);
+ SeenClauses[unsigned(CKind)] = true;
if (Clause != nullptr)
Clauses.push_back(Clause);
if (Tok.is(tok::annot_pragma_openmp_end)) {
- Actions.EndOpenMPClause();
+ Actions.OpenMP().EndOpenMPClause();
break;
}
// Skip ',' if any.
if (Tok.is(tok::comma))
ConsumeToken();
- Actions.EndOpenMPClause();
+ Actions.OpenMP().EndOpenMPClause();
}
// Consume final annot_pragma_openmp_end
if (Clauses.empty()) {
@@ -2175,14 +2181,15 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
return nullptr;
}
ConsumeAnnotationToken();
- return Actions.ActOnOpenMPRequiresDirective(StartLoc, Clauses);
+ return Actions.OpenMP().ActOnOpenMPRequiresDirective(StartLoc, Clauses);
}
case OMPD_error: {
SmallVector<OMPClause *, 1> Clauses;
SourceLocation StartLoc = ConsumeToken();
ParseOpenMPClauses(DKind, Clauses, StartLoc);
- Actions.ActOnOpenMPErrorDirective(Clauses, StartLoc, SourceLocation(),
- /*InExContext = */ false);
+ Actions.OpenMP().ActOnOpenMPErrorDirective(Clauses, StartLoc,
+ SourceLocation(),
+ /*InExContext = */ false);
break;
}
case OMPD_assumes:
@@ -2217,7 +2224,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
// { #pragma omp end declare variant }
//
ConsumeToken();
- OMPTraitInfo *ParentTI = Actions.getOMPTraitInfoForSurroundingScope();
+ OMPTraitInfo *ParentTI =
+ Actions.OpenMP().getOMPTraitInfoForSurroundingScope();
ASTContext &ASTCtx = Actions.getASTContext();
OMPTraitInfo &TI = ASTCtx.getNewOMPTraitInfo();
if (parseOMPDeclareVariantMatchClause(Loc, TI, ParentTI)) {
@@ -2248,7 +2256,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
/* ConstructTraits */ ArrayRef<llvm::omp::TraitProperty>());
if (isVariantApplicableInContext(VMI, OMPCtx, /* DeviceSetOnly */ true)) {
- Actions.ActOnOpenMPBeginDeclareVariant(Loc, TI);
+ Actions.OpenMP().ActOnOpenMPBeginDeclareVariant(Loc, TI);
break;
}
@@ -2275,8 +2283,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
break;
}
case OMPD_end_declare_variant: {
- if (Actions.isInOpenMPDeclareVariantScope())
- Actions.ActOnOpenMPEndDeclareVariant();
+ if (Actions.OpenMP().isInOpenMPDeclareVariantScope())
+ Actions.OpenMP().ActOnOpenMPEndDeclareVariant();
else
Diag(Loc, diag::err_expected_begin_declare_variant);
ConsumeToken();
@@ -2331,7 +2339,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_declare_target: {
SourceLocation DTLoc = ConsumeAnyToken();
bool HasClauses = Tok.isNot(tok::annot_pragma_openmp_end);
- Sema::DeclareTargetContextInfo DTCI(DKind, DTLoc);
+ SemaOpenMP::DeclareTargetContextInfo DTCI(DKind, DTLoc);
if (HasClauses)
ParseOMPDeclareTargetClauses(DTCI);
bool HasImplicitMappings = DKind == OMPD_begin_declare_target ||
@@ -2342,105 +2350,43 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
ConsumeAnyToken();
if (HasImplicitMappings) {
- Actions.ActOnStartOpenMPDeclareTargetContext(DTCI);
+ Actions.OpenMP().ActOnStartOpenMPDeclareTargetContext(DTCI);
return nullptr;
}
- Actions.ActOnFinishedOpenMPDeclareTargetContext(DTCI);
+ Actions.OpenMP().ActOnFinishedOpenMPDeclareTargetContext(DTCI);
llvm::SmallVector<Decl *, 4> Decls;
for (auto &It : DTCI.ExplicitlyMapped)
Decls.push_back(It.first);
return Actions.BuildDeclaratorGroup(Decls);
}
case OMPD_end_declare_target: {
- if (!Actions.isInOpenMPDeclareTargetContext()) {
+ if (!Actions.OpenMP().isInOpenMPDeclareTargetContext()) {
Diag(Tok, diag::err_omp_unexpected_directive)
<< 1 << getOpenMPDirectiveName(DKind);
break;
}
- const Sema::DeclareTargetContextInfo &DTCI =
- Actions.ActOnOpenMPEndDeclareTargetDirective();
+ const SemaOpenMP::DeclareTargetContextInfo &DTCI =
+ Actions.OpenMP().ActOnOpenMPEndDeclareTargetDirective();
ParseOMPEndDeclareTargetDirective(DTCI.Kind, DKind, DTCI.Loc);
return nullptr;
}
case OMPD_unknown:
Diag(Tok, diag::err_omp_unknown_directive);
break;
- case OMPD_parallel:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_task:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_for:
- case OMPD_for_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_ordered:
- case OMPD_critical:
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_parallel_sections:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_atomic:
- case OMPD_target:
- case OMPD_teams:
- case OMPD_cancellation_point:
- case OMPD_cancel:
- case OMPD_target_data:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data:
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_masked_taskloop:
- case OMPD_masked_taskloop_simd:
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_masked_taskloop_simd:
- case OMPD_distribute:
- case OMPD_target_update:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_distribute_simd:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_simd:
- case OMPD_scope:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_target_teams_distribute_simd:
- case OMPD_dispatch:
- case OMPD_masked:
- case OMPD_metadirective:
- case OMPD_loop:
- case OMPD_teams_loop:
- case OMPD_target_teams_loop:
- case OMPD_parallel_loop:
- case OMPD_target_parallel_loop:
- Diag(Tok, diag::err_omp_unexpected_directive)
- << 1 << getOpenMPDirectiveName(DKind);
- break;
default:
- break;
+ switch (getDirectiveCategory(DKind)) {
+ case Category::Executable:
+ case Category::Meta:
+ case Category::Subsidiary:
+ case Category::Utility:
+ Diag(Tok, diag::err_omp_unexpected_directive)
+ << 1 << getOpenMPDirectiveName(DKind);
+ break;
+ case Category::Declarative:
+ case Category::Informational:
+ break;
+ }
}
while (Tok.isNot(tok::annot_pragma_openmp_end))
ConsumeAnyToken();
@@ -2448,6 +2394,184 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
return nullptr;
}
+StmtResult Parser::ParseOpenMPExecutableDirective(
+ ParsedStmtContext StmtCtx, OpenMPDirectiveKind DKind, SourceLocation Loc,
+ bool ReadDirectiveWithinMetadirective) {
+ assert(isOpenMPExecutableDirective(DKind) && "Unexpected directive category");
+
+ bool HasAssociatedStatement = true;
+ Association Assoc = getDirectiveAssociation(DKind);
+
+ // OMPD_ordered has None as association, but it comes in two variants,
+ // the second of which is associated with a block.
+ // OMPD_scan and OMPD_section are both "separating", but section is treated
+ // as if it was associated with a statement, while scan is not.
+ if (DKind != OMPD_ordered && DKind != OMPD_section &&
+ (Assoc == Association::None || Assoc == Association::Separating)) {
+ if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
+ ParsedStmtContext()) {
+ Diag(Tok, diag::err_omp_immediate_directive)
+ << getOpenMPDirectiveName(DKind) << 0;
+ if (DKind == OMPD_error) {
+ SkipUntil(tok::annot_pragma_openmp_end);
+ return StmtError();
+ }
+ }
+ HasAssociatedStatement = false;
+ }
+
+ SourceLocation EndLoc;
+ SmallVector<OMPClause *, 5> Clauses;
+ llvm::SmallBitVector SeenClauses(llvm::omp::Clause_enumSize + 1);
+ DeclarationNameInfo DirName;
+ OpenMPDirectiveKind CancelRegion = OMPD_unknown;
+ unsigned ScopeFlags = Scope::FnScope | Scope::DeclScope |
+ Scope::CompoundStmtScope | Scope::OpenMPDirectiveScope;
+
+ // Special processing for flush and depobj clauses.
+ Token ImplicitTok;
+ bool ImplicitClauseAllowed = false;
+ if (DKind == OMPD_flush || DKind == OMPD_depobj) {
+ ImplicitTok = Tok;
+ ImplicitClauseAllowed = true;
+ }
+ ConsumeToken();
+ // Parse directive name of the 'critical' directive if any.
+ if (DKind == OMPD_critical) {
+ BalancedDelimiterTracker T(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ if (!T.consumeOpen()) {
+ if (Tok.isAnyIdentifier()) {
+ DirName =
+ DeclarationNameInfo(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeAnyToken();
+ } else {
+ Diag(Tok, diag::err_omp_expected_identifier_for_critical);
+ }
+ T.consumeClose();
+ }
+ } else if (DKind == OMPD_cancellation_point || DKind == OMPD_cancel) {
+ CancelRegion = parseOpenMPDirectiveKind(*this);
+ if (Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeToken();
+ }
+
+ if (isOpenMPLoopDirective(DKind))
+ ScopeFlags |= Scope::OpenMPLoopDirectiveScope;
+ if (isOpenMPSimdDirective(DKind))
+ ScopeFlags |= Scope::OpenMPSimdDirectiveScope;
+ ParseScope OMPDirectiveScope(this, ScopeFlags);
+ Actions.OpenMP().StartOpenMPDSABlock(DKind, DirName, Actions.getCurScope(),
+ Loc);
+
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ // If we are parsing for a directive within a metadirective, the directive
+ // ends with a ')'.
+ if (ReadDirectiveWithinMetadirective && Tok.is(tok::r_paren)) {
+ while (Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ break;
+ }
+ bool HasImplicitClause = false;
+ if (ImplicitClauseAllowed && Tok.is(tok::l_paren)) {
+ HasImplicitClause = true;
+ // Push copy of the current token back to stream to properly parse
+ // pseudo-clause OMPFlushClause or OMPDepobjClause.
+ PP.EnterToken(Tok, /*IsReinject*/ true);
+ PP.EnterToken(ImplicitTok, /*IsReinject*/ true);
+ ConsumeAnyToken();
+ }
+ OpenMPClauseKind CKind = Tok.isAnnotation()
+ ? OMPC_unknown
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ if (HasImplicitClause) {
+ assert(CKind == OMPC_unknown && "Must be unknown implicit clause.");
+ if (DKind == OMPD_flush) {
+ CKind = OMPC_flush;
+ } else {
+ assert(DKind == OMPD_depobj && "Expected flush or depobj directives.");
+ CKind = OMPC_depobj;
+ }
+ }
+ // No more implicit clauses allowed.
+ ImplicitClauseAllowed = false;
+ Actions.OpenMP().StartOpenMPClause(CKind);
+ HasImplicitClause = false;
+ OMPClause *Clause =
+ ParseOpenMPClause(DKind, CKind, !SeenClauses[unsigned(CKind)]);
+ SeenClauses[unsigned(CKind)] = true;
+ if (Clause)
+ Clauses.push_back(Clause);
+
+ // Skip ',' if any.
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ Actions.OpenMP().EndOpenMPClause();
+ }
+ // End location of the directive.
+ EndLoc = Tok.getLocation();
+ // Consume final annot_pragma_openmp_end.
+ ConsumeAnnotationToken();
+
+ if (DKind == OMPD_ordered) {
+ // If the depend or doacross clause is specified, the ordered construct
+ // is a stand-alone directive.
+ for (auto CK : {OMPC_depend, OMPC_doacross}) {
+ if (SeenClauses[unsigned(CK)]) {
+ if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
+ ParsedStmtContext()) {
+ Diag(Loc, diag::err_omp_immediate_directive)
+ << getOpenMPDirectiveName(DKind) << 1 << getOpenMPClauseName(CK);
+ }
+ HasAssociatedStatement = false;
+ }
+ }
+ }
+
+ if (DKind == OMPD_tile && !SeenClauses[unsigned(OMPC_sizes)]) {
+ Diag(Loc, diag::err_omp_required_clause)
+ << getOpenMPDirectiveName(OMPD_tile) << "sizes";
+ }
+
+ StmtResult AssociatedStmt;
+ if (HasAssociatedStatement) {
+ // The body is a block scope like in Lambdas and Blocks.
+ Actions.OpenMP().ActOnOpenMPRegionStart(DKind, getCurScope());
+ // FIXME: We create a bogus CompoundStmt scope to hold the contents of
+ // the captured region. Code elsewhere assumes that any FunctionScopeInfo
+ // should have at least one compound statement scope within it.
+ ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
+ {
+ Sema::CompoundScopeRAII Scope(Actions);
+ AssociatedStmt = ParseStatement();
+
+ if (AssociatedStmt.isUsable() && isOpenMPLoopDirective(DKind) &&
+ getLangOpts().OpenMPIRBuilder)
+ AssociatedStmt =
+ Actions.OpenMP().ActOnOpenMPLoopnest(AssociatedStmt.get());
+ }
+ AssociatedStmt =
+ Actions.OpenMP().ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
+ } else if (DKind == OMPD_target_update || DKind == OMPD_target_enter_data ||
+ DKind == OMPD_target_exit_data) {
+ Actions.OpenMP().ActOnOpenMPRegionStart(DKind, getCurScope());
+ AssociatedStmt = (Sema::CompoundScopeRAII(Actions),
+ Actions.ActOnCompoundStmt(Loc, Loc, std::nullopt,
+ /*isStmtExpr=*/false));
+ AssociatedStmt =
+ Actions.OpenMP().ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
+ }
+
+ StmtResult Directive = Actions.OpenMP().ActOnOpenMPExecutableDirective(
+ DKind, DirName, CancelRegion, Clauses, AssociatedStmt.get(), Loc, EndLoc);
+
+ // Exit scope.
+ Actions.OpenMP().EndOpenMPDSABlock(Directive.get());
+ OMPDirectiveScope.Exit();
+
+ return Directive;
+}
+
/// Parsing of declarative or executable OpenMP directives.
///
/// threadprivate-directive:
@@ -2495,26 +2619,30 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
"Not an OpenMP directive!");
ParsingOpenMPDirectiveRAII DirScope(*this);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
- SmallVector<OMPClause *, 5> Clauses;
- SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
- llvm::omp::Clause_enumSize + 1>
- FirstClauses(llvm::omp::Clause_enumSize + 1);
- unsigned ScopeFlags = Scope::FnScope | Scope::DeclScope |
- Scope::CompoundStmtScope | Scope::OpenMPDirectiveScope;
SourceLocation Loc = ReadDirectiveWithinMetadirective
? Tok.getLocation()
- : ConsumeAnnotationToken(),
- EndLoc;
+ : ConsumeAnnotationToken();
OpenMPDirectiveKind DKind = parseOpenMPDirectiveKind(*this);
if (ReadDirectiveWithinMetadirective && DKind == OMPD_unknown) {
Diag(Tok, diag::err_omp_unknown_directive);
return StmtError();
}
- OpenMPDirectiveKind CancelRegion = OMPD_unknown;
- // Name of critical directive.
- DeclarationNameInfo DirName;
+
StmtResult Directive = StmtError();
- bool HasAssociatedStatement = true;
+
+ bool IsExecutable = [&]() {
+ if (DKind == OMPD_error) // OMPD_error is handled as executable
+ return true;
+ auto Res = getDirectiveCategory(DKind);
+ return Res == Category::Executable || Res == Category::Subsidiary;
+ }();
+
+ if (IsExecutable) {
+ Directive = ParseOpenMPExecutableDirective(
+ StmtCtx, DKind, Loc, ReadDirectiveWithinMetadirective);
+ assert(!Directive.isUnset() && "Executable directive remained unprocessed");
+ return Directive;
+ }
switch (DKind) {
case OMPD_nothing:
@@ -2683,7 +2811,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
if (!ParseOpenMPSimpleVarList(DKind, Helper,
/*AllowScopeSpecifier=*/false)) {
skipUntilPragmaOpenMPEnd(DKind);
- DeclGroupPtrTy Res = Actions.ActOnOpenMPThreadprivateDirective(
+ DeclGroupPtrTy Res = Actions.OpenMP().ActOnOpenMPThreadprivateDirective(
Loc, Helper.getIdentifiers());
Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation());
}
@@ -2703,33 +2831,31 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
/*AllowScopeSpecifier=*/false)) {
SmallVector<OMPClause *, 1> Clauses;
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
- llvm::omp::Clause_enumSize + 1>
- FirstClauses(llvm::omp::Clause_enumSize + 1);
+ llvm::SmallBitVector SeenClauses(llvm::omp::Clause_enumSize + 1);
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
OpenMPClauseKind CKind =
Tok.isAnnotation() ? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
- Actions.StartOpenMPClause(CKind);
- OMPClause *Clause = ParseOpenMPClause(
- OMPD_allocate, CKind, !FirstClauses[unsigned(CKind)].getInt());
+ Actions.OpenMP().StartOpenMPClause(CKind);
+ OMPClause *Clause = ParseOpenMPClause(OMPD_allocate, CKind,
+ !SeenClauses[unsigned(CKind)]);
SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- FirstClauses[unsigned(CKind)].setInt(true);
+ SeenClauses[unsigned(CKind)] = true;
if (Clause != nullptr)
Clauses.push_back(Clause);
if (Tok.is(tok::annot_pragma_openmp_end)) {
- Actions.EndOpenMPClause();
+ Actions.OpenMP().EndOpenMPClause();
break;
}
// Skip ',' if any.
if (Tok.is(tok::comma))
ConsumeToken();
- Actions.EndOpenMPClause();
+ Actions.OpenMP().EndOpenMPClause();
}
skipUntilPragmaOpenMPEnd(DKind);
}
- DeclGroupPtrTy Res = Actions.ActOnOpenMPAllocateDirective(
+ DeclGroupPtrTy Res = Actions.OpenMP().ActOnOpenMPAllocateDirective(
Loc, Helper.getIdentifiers(), Clauses);
Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation());
}
@@ -2759,233 +2885,31 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
}
break;
}
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_taskyield:
- case OMPD_error:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_cancel:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data:
- case OMPD_target_update:
- case OMPD_interop:
- if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
- ParsedStmtContext()) {
- Diag(Tok, diag::err_omp_immediate_directive)
- << getOpenMPDirectiveName(DKind) << 0;
- if (DKind == OMPD_error) {
- SkipUntil(tok::annot_pragma_openmp_end);
- break;
- }
- }
- HasAssociatedStatement = false;
- // Fall through for further analysis.
- [[fallthrough]];
- case OMPD_parallel:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_for:
- case OMPD_for_simd:
- case OMPD_sections:
- case OMPD_single:
- case OMPD_section:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_parallel_sections:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_task:
- case OMPD_ordered:
- case OMPD_atomic:
- case OMPD_target:
- case OMPD_teams:
- case OMPD_taskgroup:
- case OMPD_target_data:
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_loop:
- case OMPD_teams_loop:
- case OMPD_target_teams_loop:
- case OMPD_parallel_loop:
- case OMPD_target_parallel_loop:
- case OMPD_scope:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_masked_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_masked_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_parallel_masked_taskloop_simd:
- case OMPD_distribute:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_distribute_simd:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_target_teams_distribute_simd:
- case OMPD_dispatch:
- case OMPD_masked: {
- // Special processing for flush and depobj clauses.
- Token ImplicitTok;
- bool ImplicitClauseAllowed = false;
- if (DKind == OMPD_flush || DKind == OMPD_depobj) {
- ImplicitTok = Tok;
- ImplicitClauseAllowed = true;
- }
- ConsumeToken();
- // Parse directive name of the 'critical' directive if any.
- if (DKind == OMPD_critical) {
- BalancedDelimiterTracker T(*this, tok::l_paren,
- tok::annot_pragma_openmp_end);
- if (!T.consumeOpen()) {
- if (Tok.isAnyIdentifier()) {
- DirName =
- DeclarationNameInfo(Tok.getIdentifierInfo(), Tok.getLocation());
- ConsumeAnyToken();
- } else {
- Diag(Tok, diag::err_omp_expected_identifier_for_critical);
- }
- T.consumeClose();
- }
- } else if (DKind == OMPD_cancellation_point || DKind == OMPD_cancel) {
- CancelRegion = parseOpenMPDirectiveKind(*this);
- if (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeToken();
- }
-
- if (isOpenMPLoopDirective(DKind))
- ScopeFlags |= Scope::OpenMPLoopDirectiveScope;
- if (isOpenMPSimdDirective(DKind))
- ScopeFlags |= Scope::OpenMPSimdDirectiveScope;
- ParseScope OMPDirectiveScope(this, ScopeFlags);
- Actions.StartOpenMPDSABlock(DKind, DirName, Actions.getCurScope(), Loc);
-
- while (Tok.isNot(tok::annot_pragma_openmp_end)) {
- // If we are parsing for a directive within a metadirective, the directive
- // ends with a ')'.
- if (ReadDirectiveWithinMetadirective && Tok.is(tok::r_paren)) {
- while (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeAnyToken();
- break;
- }
- bool HasImplicitClause = false;
- if (ImplicitClauseAllowed && Tok.is(tok::l_paren)) {
- HasImplicitClause = true;
- // Push copy of the current token back to stream to properly parse
- // pseudo-clause OMPFlushClause or OMPDepobjClause.
- PP.EnterToken(Tok, /*IsReinject*/ true);
- PP.EnterToken(ImplicitTok, /*IsReinject*/ true);
- ConsumeAnyToken();
- }
- OpenMPClauseKind CKind = Tok.isAnnotation()
- ? OMPC_unknown
- : getOpenMPClauseKind(PP.getSpelling(Tok));
- if (HasImplicitClause) {
- assert(CKind == OMPC_unknown && "Must be unknown implicit clause.");
- if (DKind == OMPD_flush) {
- CKind = OMPC_flush;
- } else {
- assert(DKind == OMPD_depobj &&
- "Expected flush or depobj directives.");
- CKind = OMPC_depobj;
- }
- }
- // No more implicit clauses allowed.
- ImplicitClauseAllowed = false;
- Actions.StartOpenMPClause(CKind);
- HasImplicitClause = false;
- OMPClause *Clause = ParseOpenMPClause(
- DKind, CKind, !FirstClauses[unsigned(CKind)].getInt());
- FirstClauses[unsigned(CKind)].setInt(true);
- if (Clause) {
- FirstClauses[unsigned(CKind)].setPointer(Clause);
- Clauses.push_back(Clause);
- }
-
- // Skip ',' if any.
- if (Tok.is(tok::comma))
- ConsumeToken();
- Actions.EndOpenMPClause();
- }
- // End location of the directive.
- EndLoc = Tok.getLocation();
- // Consume final annot_pragma_openmp_end.
- ConsumeAnnotationToken();
-
- if (DKind == OMPD_ordered) {
- // If the depend or doacross clause is specified, the ordered construct
- // is a stand-alone directive.
- for (auto CK : {OMPC_depend, OMPC_doacross}) {
- if (FirstClauses[unsigned(CK)].getInt()) {
- if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
- ParsedStmtContext()) {
- Diag(Loc, diag::err_omp_immediate_directive)
- << getOpenMPDirectiveName(DKind) << 1
- << getOpenMPClauseName(CK);
- }
- HasAssociatedStatement = false;
- }
- }
- }
+ case OMPD_reverse:
+ case OMPD_interchange:
+ case OMPD_declare_target: {
+ SourceLocation DTLoc = ConsumeAnyToken();
+ bool HasClauses = Tok.isNot(tok::annot_pragma_openmp_end);
+ SemaOpenMP::DeclareTargetContextInfo DTCI(DKind, DTLoc);
+ if (HasClauses)
+ ParseOMPDeclareTargetClauses(DTCI);
+ bool HasImplicitMappings =
+ !HasClauses || (DTCI.ExplicitlyMapped.empty() && DTCI.Indirect);
- if (DKind == OMPD_tile && !FirstClauses[unsigned(OMPC_sizes)].getInt()) {
- Diag(Loc, diag::err_omp_required_clause)
- << getOpenMPDirectiveName(OMPD_tile) << "sizes";
+ if (HasImplicitMappings) {
+ Diag(Tok, diag::err_omp_unexpected_directive)
+ << 1 << getOpenMPDirectiveName(DKind);
+ SkipUntil(tok::annot_pragma_openmp_end);
+ break;
}
- StmtResult AssociatedStmt;
- if (HasAssociatedStatement) {
- // The body is a block scope like in Lambdas and Blocks.
- Actions.ActOnOpenMPRegionStart(DKind, getCurScope());
- // FIXME: We create a bogus CompoundStmt scope to hold the contents of
- // the captured region. Code elsewhere assumes that any FunctionScopeInfo
- // should have at least one compound statement scope within it.
- ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
- {
- Sema::CompoundScopeRAII Scope(Actions);
- AssociatedStmt = ParseStatement();
-
- if (AssociatedStmt.isUsable() && isOpenMPLoopDirective(DKind) &&
- getLangOpts().OpenMPIRBuilder)
- AssociatedStmt = Actions.ActOnOpenMPLoopnest(AssociatedStmt.get());
- }
- AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
- } else if (DKind == OMPD_target_update || DKind == OMPD_target_enter_data ||
- DKind == OMPD_target_exit_data) {
- Actions.ActOnOpenMPRegionStart(DKind, getCurScope());
- AssociatedStmt = (Sema::CompoundScopeRAII(Actions),
- Actions.ActOnCompoundStmt(Loc, Loc, std::nullopt,
- /*isStmtExpr=*/false));
- AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
- }
- Directive = Actions.ActOnOpenMPExecutableDirective(
- DKind, DirName, CancelRegion, Clauses, AssociatedStmt.get(), Loc,
- EndLoc);
+ // Skip the last annot_pragma_openmp_end.
+ ConsumeAnyToken();
- // Exit scope.
- Actions.EndOpenMPDSABlock(Directive.get());
- OMPDirectiveScope.Exit();
+ Actions.OpenMP().ActOnFinishedOpenMPDeclareTargetContext(DTCI);
break;
}
case OMPD_declare_simd:
- case OMPD_declare_target:
case OMPD_begin_declare_target:
case OMPD_end_declare_target:
case OMPD_requires:
@@ -3071,34 +2995,14 @@ bool Parser::ParseOpenMPSimpleVarList(
}
OMPClause *Parser::ParseOpenMPSizesClause() {
- SourceLocation ClauseNameLoc = ConsumeToken();
+ SourceLocation ClauseNameLoc, OpenLoc, CloseLoc;
SmallVector<Expr *, 4> ValExprs;
-
- BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
- if (T.consumeOpen()) {
- Diag(Tok, diag::err_expected) << tok::l_paren;
+ if (ParseOpenMPExprListClause(OMPC_sizes, ClauseNameLoc, OpenLoc, CloseLoc,
+ ValExprs))
return nullptr;
- }
-
- while (true) {
- ExprResult Val = ParseConstantExpression();
- if (!Val.isUsable()) {
- T.skipToEnd();
- return nullptr;
- }
-
- ValExprs.push_back(Val.get());
-
- if (Tok.is(tok::r_paren) || Tok.is(tok::annot_pragma_openmp_end))
- break;
- ExpectAndConsume(tok::comma);
- }
-
- T.consumeClose();
-
- return Actions.ActOnOpenMPSizesClause(
- ValExprs, ClauseNameLoc, T.getOpenLocation(), T.getCloseLocation());
+ return Actions.OpenMP().ActOnOpenMPSizesClause(ValExprs, ClauseNameLoc,
+ OpenLoc, CloseLoc);
}
OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
@@ -3109,7 +3013,7 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after, "uses_allocator"))
return nullptr;
- SmallVector<Sema::UsesAllocatorsData, 4> Data;
+ SmallVector<SemaOpenMP::UsesAllocatorsData, 4> Data;
do {
CXXScopeSpec SS;
Token Replacement;
@@ -3123,7 +3027,7 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
StopBeforeMatch);
break;
}
- Sema::UsesAllocatorsData &D = Data.emplace_back();
+ SemaOpenMP::UsesAllocatorsData &D = Data.emplace_back();
D.Allocator = Allocator.get();
if (Tok.is(tok::l_paren)) {
BalancedDelimiterTracker T(*this, tok::l_paren,
@@ -3148,8 +3052,8 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
ConsumeAnyToken();
} while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end));
T.consumeClose();
- return Actions.ActOnOpenMPUsesAllocatorClause(Loc, T.getOpenLocation(),
- T.getCloseLocation(), Data);
+ return Actions.OpenMP().ActOnOpenMPUsesAllocatorClause(
+ Loc, T.getOpenLocation(), T.getCloseLocation(), Data);
}
/// Parsing of OpenMP clauses.
@@ -3314,6 +3218,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_acquire:
case OMPC_release:
case OMPC_relaxed:
+ case OMPC_weak:
case OMPC_threads:
case OMPC_simd:
case OMPC_nogroup:
@@ -3516,15 +3421,16 @@ OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
if (ParseOnly)
return nullptr;
- return Actions.ActOnOpenMPSingleExprClause(Kind, Val.get(), Loc, LLoc, RLoc);
+ return Actions.OpenMP().ActOnOpenMPSingleExprClause(Kind, Val.get(), Loc,
+ LLoc, RLoc);
}
/// Parse indirect clause for '#pragma omp declare target' directive.
/// 'indirect' '[' '(' invoked-by-fptr ')' ']'
/// where invoked-by-fptr is a constant boolean expression that evaluates to
/// true or false at compile time.
-bool Parser::ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI,
- bool ParseOnly) {
+bool Parser::ParseOpenMPIndirectClause(
+ SemaOpenMP::DeclareTargetContextInfo &DTCI, bool ParseOnly) {
SourceLocation Loc = ConsumeToken();
SourceLocation RLoc;
@@ -3699,15 +3605,16 @@ OMPClause *Parser::ParseOpenMPInteropClause(OpenMPClauseKind Kind,
return nullptr;
if (Kind == OMPC_init)
- return Actions.ActOnOpenMPInitClause(InteropVarExpr.get(), InteropInfo, Loc,
- T.getOpenLocation(), VarLoc, RLoc);
+ return Actions.OpenMP().ActOnOpenMPInitClause(
+ InteropVarExpr.get(), InteropInfo, Loc, T.getOpenLocation(), VarLoc,
+ RLoc);
if (Kind == OMPC_use)
- return Actions.ActOnOpenMPUseClause(InteropVarExpr.get(), Loc,
- T.getOpenLocation(), VarLoc, RLoc);
+ return Actions.OpenMP().ActOnOpenMPUseClause(
+ InteropVarExpr.get(), Loc, T.getOpenLocation(), VarLoc, RLoc);
if (Kind == OMPC_destroy)
- return Actions.ActOnOpenMPDestroyClause(InteropVarExpr.get(), Loc,
- T.getOpenLocation(), VarLoc, RLoc);
+ return Actions.OpenMP().ActOnOpenMPDestroyClause(
+ InteropVarExpr.get(), Loc, T.getOpenLocation(), VarLoc, RLoc);
llvm_unreachable("Unexpected interop variable clause.");
}
@@ -3736,7 +3643,7 @@ OMPClause *Parser::ParseOpenMPOMPXAttributesClause(bool ParseOnly) {
case ParsedAttr::AT_AMDGPUFlatWorkGroupSize:
if (!PA.checkExactlyNumArgs(Actions, 2))
continue;
- if (auto *A = Actions.CreateAMDGPUFlatWorkGroupSizeAttr(
+ if (auto *A = Actions.AMDGPU().CreateAMDGPUFlatWorkGroupSizeAttr(
PA, PA.getArgAsExpr(0), PA.getArgAsExpr(1)))
Attrs.push_back(A);
continue;
@@ -3744,7 +3651,7 @@ OMPClause *Parser::ParseOpenMPOMPXAttributesClause(bool ParseOnly) {
if (!PA.checkAtLeastNumArgs(Actions, 1) ||
!PA.checkAtMostNumArgs(Actions, 2))
continue;
- if (auto *A = Actions.CreateAMDGPUWavesPerEUAttr(
+ if (auto *A = Actions.AMDGPU().CreateAMDGPUWavesPerEUAttr(
PA, PA.getArgAsExpr(0),
PA.getNumArgs() > 1 ? PA.getArgAsExpr(1) : nullptr))
Attrs.push_back(A);
@@ -3765,8 +3672,8 @@ OMPClause *Parser::ParseOpenMPOMPXAttributesClause(bool ParseOnly) {
};
}
- return Actions.ActOnOpenMPXAttributeClause(Attrs, Loc, T.getOpenLocation(),
- T.getCloseLocation());
+ return Actions.OpenMP().ActOnOpenMPXAttributeClause(
+ Attrs, Loc, T.getOpenLocation(), T.getCloseLocation());
}
/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
@@ -3801,9 +3708,8 @@ OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind,
<< getOpenMPClauseName(OMPC_default) << "5.1";
return nullptr;
}
- return Actions.ActOnOpenMPSimpleClause(Kind, Val->Type,
- Val->TypeLoc, Val->LOpen,
- Val->Loc, Val->RLoc);
+ return Actions.OpenMP().ActOnOpenMPSimpleClause(
+ Kind, Val->Type, Val->TypeLoc, Val->LOpen, Val->Loc, Val->RLoc);
}
/// Parsing of OpenMP clauses like 'ordered'.
@@ -3838,7 +3744,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly) {
if (ParseOnly)
return nullptr;
- return Actions.ActOnOpenMPClause(Kind, Loc, Tok.getLocation());
+ return Actions.OpenMP().ActOnOpenMPClause(Kind, Loc, Tok.getLocation());
}
/// Parsing of OpenMP clauses with single expressions and some additional
@@ -4096,7 +4002,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
if (ParseOnly)
return nullptr;
- return Actions.ActOnOpenMPSingleExprWithArgClause(
+ return Actions.OpenMP().ActOnOpenMPSingleExprWithArgClause(
Kind, Arg, Val.get(), Loc, T.getOpenLocation(), KLoc, DelimLoc, RLoc);
}
@@ -4162,7 +4068,7 @@ static OpenMPMapModifierKind isMapModifier(Parser &P) {
}
/// Parse the mapper modifier in map, to, and from clauses.
-bool Parser::parseMapperModifier(Sema::OpenMPVarListDataTy &Data) {
+bool Parser::parseMapperModifier(SemaOpenMP::OpenMPVarListDataTy &Data) {
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::colon);
if (T.expectAndConsume(diag::err_expected_lparen_after, "mapper")) {
@@ -4190,13 +4096,20 @@ bool Parser::parseMapperModifier(Sema::OpenMPVarListDataTy &Data) {
return T.consumeClose();
}
+static OpenMPMapClauseKind isMapType(Parser &P);
+
/// Parse map-type-modifiers in map clause.
-/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
+/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] [map-type] : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier) |
/// present
-bool Parser::parseMapTypeModifiers(Sema::OpenMPVarListDataTy &Data) {
+/// where, map-type ::= alloc | delete | from | release | to | tofrom
+bool Parser::parseMapTypeModifiers(SemaOpenMP::OpenMPVarListDataTy &Data) {
+ bool HasMapType = false;
+ SourceLocation PreMapLoc = Tok.getLocation();
+ StringRef PreMapName = "";
while (getCurToken().isNot(tok::colon)) {
OpenMPMapModifierKind TypeModifier = isMapModifier(*this);
+ OpenMPMapClauseKind MapKind = isMapType(*this);
if (TypeModifier == OMPC_MAP_MODIFIER_always ||
TypeModifier == OMPC_MAP_MODIFIER_close ||
TypeModifier == OMPC_MAP_MODIFIER_present ||
@@ -4219,6 +4132,19 @@ bool Parser::parseMapTypeModifiers(Sema::OpenMPVarListDataTy &Data) {
Diag(Data.MapTypeModifiersLoc.back(), diag::err_omp_missing_comma)
<< "map type modifier";
+ } else if (getLangOpts().OpenMP >= 60 && MapKind != OMPC_MAP_unknown) {
+ if (!HasMapType) {
+ HasMapType = true;
+ Data.ExtraModifier = MapKind;
+ MapKind = OMPC_MAP_unknown;
+ PreMapLoc = Tok.getLocation();
+ PreMapName = Tok.getIdentifierInfo()->getName();
+ } else {
+ Diag(Tok, diag::err_omp_more_one_map_type);
+ Diag(PreMapLoc, diag::note_previous_map_type_specified_here)
+ << PreMapName;
+ }
+ ConsumeToken();
} else {
// For the case of unknown map-type-modifier or a map-type.
// Map-type is followed by a colon; the function returns when it
@@ -4229,8 +4155,14 @@ bool Parser::parseMapTypeModifiers(Sema::OpenMPVarListDataTy &Data) {
continue;
}
// Potential map-type token as it is followed by a colon.
- if (PP.LookAhead(0).is(tok::colon))
- return false;
+ if (PP.LookAhead(0).is(tok::colon)) {
+ if (getLangOpts().OpenMP >= 60) {
+ break;
+ } else {
+ return false;
+ }
+ }
+
Diag(Tok, diag::err_omp_unknown_map_type_modifier)
<< (getLangOpts().OpenMP >= 51 ? (getLangOpts().OpenMP >= 52 ? 2 : 1)
: 0)
@@ -4240,27 +4172,38 @@ bool Parser::parseMapTypeModifiers(Sema::OpenMPVarListDataTy &Data) {
if (getCurToken().is(tok::comma))
ConsumeToken();
}
+ if (getLangOpts().OpenMP >= 60 && !HasMapType) {
+ if (!Tok.is(tok::colon)) {
+ Diag(Tok, diag::err_omp_unknown_map_type);
+ ConsumeToken();
+ } else {
+ Data.ExtraModifier = OMPC_MAP_unknown;
+ }
+ }
return false;
}
/// Checks if the token is a valid map-type.
-/// FIXME: It will return an OpenMPMapModifierKind if that's what it parses.
+/// If it is not MapType kind, OMPC_MAP_unknown is returned.
static OpenMPMapClauseKind isMapType(Parser &P) {
Token Tok = P.getCurToken();
// The map-type token can be either an identifier or the C++ delete keyword.
if (!Tok.isOneOf(tok::identifier, tok::kw_delete))
return OMPC_MAP_unknown;
Preprocessor &PP = P.getPreprocessor();
- OpenMPMapClauseKind MapType =
- static_cast<OpenMPMapClauseKind>(getOpenMPSimpleClauseType(
- OMPC_map, PP.getSpelling(Tok), P.getLangOpts()));
- return MapType;
+ unsigned MapType =
+ getOpenMPSimpleClauseType(OMPC_map, PP.getSpelling(Tok), P.getLangOpts());
+ if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_from ||
+ MapType == OMPC_MAP_tofrom || MapType == OMPC_MAP_alloc ||
+ MapType == OMPC_MAP_delete || MapType == OMPC_MAP_release)
+ return static_cast<OpenMPMapClauseKind>(MapType);
+ return OMPC_MAP_unknown;
}
/// Parse map-type in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type ::= to | from | tofrom | alloc | release | delete
-static void parseMapType(Parser &P, Sema::OpenMPVarListDataTy &Data) {
+static void parseMapType(Parser &P, SemaOpenMP::OpenMPVarListDataTy &Data) {
Token Tok = P.getCurToken();
if (Tok.is(tok::colon)) {
P.Diag(Tok, diag::err_omp_map_type_missing);
@@ -4284,7 +4227,7 @@ ExprResult Parser::ParseOpenMPIteratorsExpr() {
return ExprError();
SourceLocation LLoc = T.getOpenLocation();
- SmallVector<Sema::OMPIteratorData, 4> Data;
+ SmallVector<SemaOpenMP::OMPIteratorData, 4> Data;
while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end)) {
// Check if the type parsing is required.
ParsedType IteratorType;
@@ -4358,7 +4301,7 @@ ExprResult Parser::ParseOpenMPIteratorsExpr() {
if (Tok.is(tok::comma))
ConsumeToken();
- Sema::OMPIteratorData &D = Data.emplace_back();
+ SemaOpenMP::OMPIteratorData &D = Data.emplace_back();
D.DeclIdent = II;
D.DeclIdentLoc = IdLoc;
D.Type = IteratorType;
@@ -4375,12 +4318,12 @@ ExprResult Parser::ParseOpenMPIteratorsExpr() {
if (!T.consumeClose())
RLoc = T.getCloseLocation();
- return Actions.ActOnOMPIteratorExpr(getCurScope(), IteratorKwLoc, LLoc, RLoc,
- Data);
+ return Actions.OpenMP().ActOnOMPIteratorExpr(getCurScope(), IteratorKwLoc,
+ LLoc, RLoc, Data);
}
bool Parser::ParseOpenMPReservedLocator(OpenMPClauseKind Kind,
- Sema::OpenMPVarListDataTy &Data,
+ SemaOpenMP::OpenMPVarListDataTy &Data,
const LangOptions &LangOpts) {
// Currently the only reserved locator is 'omp_all_memory' which is only
// allowed on a depend clause.
@@ -4408,7 +4351,7 @@ bool Parser::ParseOpenMPReservedLocator(OpenMPClauseKind Kind,
/// Parse step size expression. Returns true if parsing is successfull,
/// otherwise returns false.
-static bool parseStepSize(Parser &P, Sema::OpenMPVarListDataTy &Data,
+static bool parseStepSize(Parser &P, SemaOpenMP::OpenMPVarListDataTy &Data,
OpenMPClauseKind CKind, SourceLocation ELoc) {
ExprResult Tail = P.ParseAssignmentExpression();
Sema &Actions = P.getActions();
@@ -4429,7 +4372,7 @@ static bool parseStepSize(Parser &P, Sema::OpenMPVarListDataTy &Data,
bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
- Sema::OpenMPVarListDataTy &Data) {
+ SemaOpenMP::OpenMPVarListDataTy &Data) {
UnqualifiedId UnqualifiedReductionId;
bool InvalidReductionId = false;
bool IsInvalidMapperModifier = false;
@@ -4637,8 +4580,10 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// Only parse map-type-modifier[s] and map-type if a colon is present in
// the map clause.
if (ColonPresent) {
+ if (getLangOpts().OpenMP >= 60 && getCurToken().is(tok::colon))
+ Diag(Tok, diag::err_omp_map_modifier_specification_list);
IsInvalidMapperModifier = parseMapTypeModifiers(Data);
- if (!IsInvalidMapperModifier)
+ if (getLangOpts().OpenMP < 60 && !IsInvalidMapperModifier)
parseMapType(*this, Data);
else
SkipUntil(tok::colon, tok::annot_pragma_openmp_end, StopBeforeMatch);
@@ -4737,8 +4682,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
getLangOpts());
Data.ExtraModifierLoc = Tok.getLocation();
if (Data.ExtraModifier == OMPC_ADJUST_ARGS_unknown) {
- SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
- StopBeforeMatch);
+ Diag(Tok, diag::err_omp_unknown_adjust_args_op);
+ SkipUntil(tok::r_paren, tok::annot_pragma_openmp_end, StopBeforeMatch);
} else {
ConsumeToken();
if (Tok.is(tok::colon))
@@ -4751,7 +4696,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
bool IsComma =
(Kind != OMPC_reduction && Kind != OMPC_task_reduction &&
Kind != OMPC_in_reduction && Kind != OMPC_depend &&
- Kind != OMPC_doacross && Kind != OMPC_map) ||
+ Kind != OMPC_doacross && Kind != OMPC_map && Kind != OMPC_adjust_args) ||
(Kind == OMPC_reduction && !InvalidReductionId) ||
(Kind == OMPC_map && Data.ExtraModifier != OMPC_MAP_unknown) ||
(Kind == OMPC_depend && Data.ExtraModifier != OMPC_DEPEND_unknown) ||
@@ -4939,7 +4884,7 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
SourceLocation Loc = Tok.getLocation();
SourceLocation LOpen = ConsumeToken();
SmallVector<Expr *, 4> Vars;
- Sema::OpenMPVarListDataTy Data;
+ SemaOpenMP::OpenMPVarListDataTy Data;
if (ParseOpenMPVarList(DKind, Kind, Vars, Data))
return nullptr;
@@ -4947,5 +4892,40 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
if (ParseOnly)
return nullptr;
OMPVarListLocTy Locs(Loc, LOpen, Data.RLoc);
- return Actions.ActOnOpenMPVarListClause(Kind, Vars, Locs, Data);
+ return Actions.OpenMP().ActOnOpenMPVarListClause(Kind, Vars, Locs, Data);
+}
+
+bool Parser::ParseOpenMPExprListClause(OpenMPClauseKind Kind,
+ SourceLocation &ClauseNameLoc,
+ SourceLocation &OpenLoc,
+ SourceLocation &CloseLoc,
+ SmallVectorImpl<Expr *> &Exprs,
+ bool ReqIntConst) {
+ assert(getOpenMPClauseName(Kind) == PP.getSpelling(Tok) &&
+ "Expected parsing to start at clause name");
+ ClauseNameLoc = ConsumeToken();
+
+ // Parse inside of '(' and ')'.
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_paren;
+ return true;
+ }
+
+ // Parse the list with interleaved commas.
+ do {
+ ExprResult Val =
+ ReqIntConst ? ParseConstantExpression() : ParseAssignmentExpression();
+ if (!Val.isUsable()) {
+ // Encountered something other than an expression; abort to ')'.
+ T.skipToEnd();
+ return true;
+ }
+ Exprs.push_back(Val.get());
+ } while (TryConsumeToken(tok::comma));
+
+ bool Result = T.consumeClose();
+ OpenLoc = T.getOpenLocation();
+ CloseLoc = T.getCloseLocation();
+ return Result;
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
index 730ac1a0fee5..cc6f18b5b319 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
@@ -21,6 +21,9 @@
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaCUDA.h"
+#include "clang/Sema/SemaCodeCompletion.h"
+#include "clang/Sema/SemaRISCV.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSwitch.h"
#include <optional>
@@ -844,6 +847,11 @@ void Parser::HandlePragmaFPContract() {
FPC = LangOptions::FPM_Off;
break;
case tok::OOS_DEFAULT:
+ // According to ISO C99 standard chapter 7.3.4, the default value
+ // for the pragma is ``off'. '-fcomplex-arithmetic=basic',
+ // '-fcx-limited-range', '-fcx-fortran-rules' and
+ // '-fcomplex-arithmetic=improved' control the default value of these
+ // pragmas.
FPC = getLangOpts().getDefaultFPContractMode();
break;
}
@@ -909,15 +917,15 @@ void Parser::HandlePragmaCXLimitedRange() {
LangOptions::ComplexRangeKind Range;
switch (OOS) {
case tok::OOS_ON:
- Range = LangOptions::CX_Limited;
+ Range = LangOptions::CX_Basic;
break;
case tok::OOS_OFF:
Range = LangOptions::CX_Full;
break;
case tok::OOS_DEFAULT:
// According to ISO C99 standard chapter 7.3.4, the default value
- // for the pragma is ``off'. -fcx-limited-range and -fcx-fortran-rules
- // control the default value of these pragmas.
+ // for the pragma is ``off'. -fcomplex-arithmetic controls the default value
+ // of these pragmas.
Range = getLangOpts().getComplexRange();
break;
}
@@ -1563,7 +1571,8 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
ConsumeToken(); // Consume the constant expression eof terminator.
if (Arg2Error || R.isInvalid() ||
- Actions.CheckLoopHintExpr(R.get(), Toks[0].getLocation()))
+ Actions.CheckLoopHintExpr(R.get(), Toks[0].getLocation(),
+ /*AllowZero=*/false))
return false;
// Argument is a constant expression with an integer type.
@@ -1588,7 +1597,8 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
ConsumeToken(); // Consume the constant expression eof terminator.
if (R.isInvalid() ||
- Actions.CheckLoopHintExpr(R.get(), Toks[0].getLocation()))
+ Actions.CheckLoopHintExpr(R.get(), Toks[0].getLocation(),
+ /*AllowZero=*/true))
return false;
// Argument is a constant expression with an integer type.
@@ -1916,7 +1926,8 @@ void Parser::HandlePragmaAttribute() {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
// FIXME: suppress completion of unsupported attributes?
- Actions.CodeCompleteAttribute(AttributeCommonInfo::Syntax::AS_GNU);
+ Actions.CodeCompletion().CodeCompleteAttribute(
+ AttributeCommonInfo::Syntax::AS_GNU);
return SkipToEnd();
}
@@ -3895,8 +3906,8 @@ void PragmaForceCUDAHostDeviceHandler::HandlePragma(
}
if (Info->isStr("begin"))
- Actions.PushForceCUDAHostDevice();
- else if (!Actions.PopForceCUDAHostDevice())
+ Actions.CUDA().PushForceHostDevice();
+ else if (!Actions.CUDA().PopForceHostDevice())
PP.Diag(FirstTok.getLocation(),
diag::err_pragma_cannot_end_force_cuda_host_device);
@@ -4144,7 +4155,7 @@ void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
}
if (II->isStr("vector"))
- Actions.DeclareRISCVVBuiltins = true;
+ Actions.RISCV().DeclareRVVBuiltins = true;
else if (II->isStr("sifive_vector"))
- Actions.DeclareRISCVSiFiveVectorBuiltins = true;
+ Actions.RISCV().DeclareSiFiveVectorBuiltins = true;
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
index d0ff33bd1379..3ac1f0fa27f8 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
@@ -22,6 +22,9 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaCodeCompletion.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenMP.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/STLExtras.h"
#include <optional>
@@ -111,18 +114,21 @@ Parser::ParseStatementOrDeclaration(StmtVector &Stmts,
// here because we don't want to allow arbitrary orderings.
ParsedAttributes CXX11Attrs(AttrFactory);
MaybeParseCXX11Attributes(CXX11Attrs, /*MightBeObjCMessageSend*/ true);
- ParsedAttributes GNUAttrs(AttrFactory);
+ ParsedAttributes GNUOrMSAttrs(AttrFactory);
if (getLangOpts().OpenCL)
- MaybeParseGNUAttributes(GNUAttrs);
+ MaybeParseGNUAttributes(GNUOrMSAttrs);
+
+ if (getLangOpts().HLSL)
+ MaybeParseMicrosoftAttributes(GNUOrMSAttrs);
StmtResult Res = ParseStatementOrDeclarationAfterAttributes(
- Stmts, StmtCtx, TrailingElseLoc, CXX11Attrs, GNUAttrs);
+ Stmts, StmtCtx, TrailingElseLoc, CXX11Attrs, GNUOrMSAttrs);
MaybeDestroyTemplateIds();
// Attributes that are left should all go on the statement, so concatenate the
// two lists.
ParsedAttributes Attrs(AttrFactory);
- takeAndConcatenateAttrs(CXX11Attrs, GNUAttrs, Attrs);
+ takeAndConcatenateAttrs(CXX11Attrs, GNUOrMSAttrs, Attrs);
assert((Attrs.empty() || Res.isInvalid() || Res.isUsable()) &&
"attributes on empty statement");
@@ -189,7 +195,8 @@ Retry:
case tok::code_completion:
cutOffParsing();
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Statement);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(
+ getCurScope(), SemaCodeCompletion::PCC_Statement);
return StmtError();
case tok::identifier:
@@ -235,7 +242,15 @@ Retry:
auto IsStmtAttr = [](ParsedAttr &Attr) { return Attr.isStmtAttr(); };
bool AllAttrsAreStmtAttrs = llvm::all_of(CXX11Attrs, IsStmtAttr) &&
llvm::all_of(GNUAttrs, IsStmtAttr);
- if (((GNUAttributeLoc.isValid() && !(HaveAttrs && AllAttrsAreStmtAttrs)) ||
+ // In C, the grammar production for statement (C23 6.8.1p1) does not allow
+ // for declarations, which is different from C++ (C++23 [stmt.pre]p1). So
+ // in C++, we always allow a declaration, but in C we need to check whether
+ // we're in a statement context that allows declarations. e.g., in C, the
+ // following is invalid: if (1) int x;
+ if ((getLangOpts().CPlusPlus || getLangOpts().MicrosoftExt ||
+ (StmtCtx & ParsedStmtContext::AllowDeclarationsInC) !=
+ ParsedStmtContext()) &&
+ ((GNUAttributeLoc.isValid() && !(HaveAttrs && AllAttrsAreStmtAttrs)) ||
isDeclarationStatement())) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy Decl;
@@ -559,11 +574,8 @@ StmtResult Parser::ParseExprStatement(ParsedStmtContext StmtCtx) {
}
Token *CurTok = nullptr;
- // If the semicolon is missing at the end of REPL input, consider if
- // we want to do value printing. Note this is only enabled in C++ mode
- // since part of the implementation requires C++ language features.
// Note we shouldn't eat the token since the callback needs it.
- if (Tok.is(tok::annot_repl_input_end) && Actions.getLangOpts().CPlusPlus)
+ if (Tok.is(tok::annot_repl_input_end))
CurTok = &Tok;
else
// Otherwise, eat the semicolon.
@@ -841,7 +853,7 @@ StmtResult Parser::ParseCaseStatement(ParsedStmtContext StmtCtx,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteCase(getCurScope());
+ Actions.CodeCompletion().CodeCompleteCase(getCurScope());
return StmtError();
}
@@ -1496,10 +1508,13 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
SourceLocation ConstevalLoc;
if (Tok.is(tok::kw_constexpr)) {
- Diag(Tok, getLangOpts().CPlusPlus17 ? diag::warn_cxx14_compat_constexpr_if
- : diag::ext_constexpr_if);
- IsConstexpr = true;
- ConsumeToken();
+ // C23 supports constexpr keyword, but only for object definitions.
+ if (getLangOpts().CPlusPlus) {
+ Diag(Tok, getLangOpts().CPlusPlus17 ? diag::warn_cxx14_compat_constexpr_if
+ : diag::ext_constexpr_if);
+ IsConstexpr = true;
+ ConsumeToken();
+ }
} else {
if (Tok.is(tok::exclaim)) {
NotLocation = ConsumeToken();
@@ -1647,7 +1662,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
InnerScope.Exit();
} else if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteAfterIf(getCurScope(), IsBracedThen);
+ Actions.CodeCompletion().CodeCompleteAfterIf(getCurScope(), IsBracedThen);
return StmtError();
} else if (InnerStatementTrailingElseLoc.isValid()) {
Diag(InnerStatementTrailingElseLoc, diag::warn_dangling_else);
@@ -2038,9 +2053,9 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteOrdinaryName(getCurScope(),
- C99orCXXorObjC? Sema::PCC_ForInit
- : Sema::PCC_Expression);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(
+ getCurScope(), C99orCXXorObjC ? SemaCodeCompletion::PCC_ForInit
+ : SemaCodeCompletion::PCC_Expression);
return StmtError();
}
@@ -2115,7 +2130,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCForCollection(getCurScope(), DG);
+ Actions.CodeCompletion().CodeCompleteObjCForCollection(getCurScope(),
+ DG);
return StmtError();
}
Collection = ParseExpression();
@@ -2152,7 +2168,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteObjCForCollection(getCurScope(), nullptr);
+ Actions.CodeCompletion().CodeCompleteObjCForCollection(getCurScope(),
+ nullptr);
return StmtError();
}
Collection = ParseExpression();
@@ -2288,20 +2305,18 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ForRangeStmt = Actions.ActOnCXXForRangeStmt(
getCurScope(), ForLoc, CoawaitLoc, FirstPart.get(),
ForRangeInfo.LoopVar.get(), ForRangeInfo.ColonLoc, CorrectedRange.get(),
- T.getCloseLocation(), Sema::BFRK_Build);
-
- // Similarly, we need to do the semantic analysis for a for-range
- // statement immediately in order to close over temporaries correctly.
+ T.getCloseLocation(), Sema::BFRK_Build,
+ ForRangeInfo.LifetimeExtendTemps);
} else if (ForEach) {
- ForEachStmt = Actions.ActOnObjCForCollectionStmt(ForLoc,
- FirstPart.get(),
- Collection.get(),
- T.getCloseLocation());
+ // Similarly, we need to do the semantic analysis for a for-range
+ // statement immediately in order to close over temporaries correctly.
+ ForEachStmt = Actions.ObjC().ActOnObjCForCollectionStmt(
+ ForLoc, FirstPart.get(), Collection.get(), T.getCloseLocation());
} else {
// In OpenMP loop region loop control variable must be captured and be
// private. Perform analysis of first part (if any).
if (getLangOpts().OpenMP && FirstPart.isUsable()) {
- Actions.ActOnOpenMPLoopInitialization(ForLoc, FirstPart.get());
+ Actions.OpenMP().ActOnOpenMPLoopInitialization(ForLoc, FirstPart.get());
}
}
@@ -2344,8 +2359,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
return StmtError();
if (ForEach)
- return Actions.FinishObjCForCollectionStmt(ForEachStmt.get(),
- Body.get());
+ return Actions.ObjC().FinishObjCForCollectionStmt(ForEachStmt.get(),
+ Body.get());
if (ForRangeInfo.ParsedForRangeDecl())
return Actions.FinishCXXForRangeStmt(ForRangeStmt.get(), Body.get());
@@ -2431,8 +2446,8 @@ StmtResult Parser::ParseReturnStatement() {
// FIXME: Code completion for co_return.
if (Tok.is(tok::code_completion) && !IsCoreturn) {
cutOffParsing();
- Actions.CodeCompleteExpression(getCurScope(),
- PreferredType.get(Tok.getLocation()));
+ Actions.CodeCompletion().CodeCompleteExpression(
+ getCurScope(), PreferredType.get(Tok.getLocation()));
return StmtError();
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
index 64fe4d50bba2..a5130f56600e 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
@@ -36,17 +36,19 @@ unsigned Parser::ReenterTemplateScopes(MultiParseScope &S, Decl *D) {
/// Parse a template declaration, explicit instantiation, or
/// explicit specialization.
-Decl *Parser::ParseDeclarationStartingWithTemplate(
- DeclaratorContext Context, SourceLocation &DeclEnd,
- ParsedAttributes &AccessAttrs, AccessSpecifier AS) {
+Parser::DeclGroupPtrTy
+Parser::ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
+ SourceLocation &DeclEnd,
+ ParsedAttributes &AccessAttrs) {
ObjCDeclContextSwitch ObjCDC(*this);
if (Tok.is(tok::kw_template) && NextToken().isNot(tok::less)) {
return ParseExplicitInstantiation(Context, SourceLocation(), ConsumeToken(),
- DeclEnd, AccessAttrs, AS);
+ DeclEnd, AccessAttrs,
+ AccessSpecifier::AS_none);
}
return ParseTemplateDeclarationOrSpecialization(Context, DeclEnd, AccessAttrs,
- AS);
+ AccessSpecifier::AS_none);
}
/// Parse a template declaration or an explicit specialization.
@@ -73,7 +75,7 @@ Decl *Parser::ParseDeclarationStartingWithTemplate(
///
/// explicit-specialization: [ C++ temp.expl.spec]
/// 'template' '<' '>' declaration
-Decl *Parser::ParseTemplateDeclarationOrSpecialization(
+Parser::DeclGroupPtrTy Parser::ParseTemplateDeclarationOrSpecialization(
DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS) {
assert(Tok.isOneOf(tok::kw_export, tok::kw_template) &&
@@ -161,17 +163,20 @@ Decl *Parser::ParseTemplateDeclarationOrSpecialization(
TemplateParams, RAngleLoc, OptionalRequiresClauseConstraintER.get()));
} while (Tok.isOneOf(tok::kw_export, tok::kw_template));
+ ParsedTemplateInfo TemplateInfo(&ParamLists, isSpecialization,
+ LastParamListWasEmpty);
+
// Parse the actual template declaration.
- if (Tok.is(tok::kw_concept))
- return ParseConceptDefinition(
- ParsedTemplateInfo(&ParamLists, isSpecialization,
- LastParamListWasEmpty),
- DeclEnd);
-
- return ParseSingleDeclarationAfterTemplate(
- Context,
- ParsedTemplateInfo(&ParamLists, isSpecialization, LastParamListWasEmpty),
- ParsingTemplateParams, DeclEnd, AccessAttrs, AS);
+ if (Tok.is(tok::kw_concept)) {
+ Decl *ConceptDecl = ParseConceptDefinition(TemplateInfo, DeclEnd);
+ // We need to explicitly pass ConceptDecl to ParsingDeclRAIIObject, so that
+ // delayed diagnostics (e.g. warn_deprecated) have a Decl to work with.
+ ParsingTemplateParams.complete(ConceptDecl);
+ return Actions.ConvertDeclToDeclGroup(ConceptDecl);
+ }
+
+ return ParseDeclarationAfterTemplate(
+ Context, TemplateInfo, ParsingTemplateParams, DeclEnd, AccessAttrs, AS);
}
/// Parse a single declaration that declares a template,
@@ -184,8 +189,8 @@ Decl *Parser::ParseTemplateDeclarationOrSpecialization(
/// declaration. Will be AS_none for namespace-scope declarations.
///
/// \returns the new declaration.
-Decl *Parser::ParseSingleDeclarationAfterTemplate(
- DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
+Parser::DeclGroupPtrTy Parser::ParseDeclarationAfterTemplate(
+ DeclaratorContext Context, ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromTParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS) {
assert(TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
@@ -196,37 +201,29 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
Diag(Tok.getLocation(), diag::err_templated_invalid_declaration)
<< TemplateInfo.getSourceRange();
// Parse the static_assert declaration to improve error recovery.
- return ParseStaticAssertDeclaration(DeclEnd);
+ return Actions.ConvertDeclToDeclGroup(
+ ParseStaticAssertDeclaration(DeclEnd));
}
- if (Context == DeclaratorContext::Member) {
- // We are parsing a member template.
- DeclGroupPtrTy D = ParseCXXClassMemberDeclaration(
- AS, AccessAttrs, TemplateInfo, &DiagsFromTParams);
+ // We are parsing a member template.
+ if (Context == DeclaratorContext::Member)
+ return ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo,
+ &DiagsFromTParams);
- if (!D || !D.get().isSingleDecl())
- return nullptr;
- return D.get().getSingleDecl();
- }
-
- ParsedAttributes prefixAttrs(AttrFactory);
+ ParsedAttributes DeclAttrs(AttrFactory);
ParsedAttributes DeclSpecAttrs(AttrFactory);
// GNU attributes are applied to the declaration specification while the
// standard attributes are applied to the declaration. We parse the two
// attribute sets into different containters so we can apply them during
// the regular parsing process.
- while (MaybeParseCXX11Attributes(prefixAttrs) ||
+ while (MaybeParseCXX11Attributes(DeclAttrs) ||
MaybeParseGNUAttributes(DeclSpecAttrs))
;
- if (Tok.is(tok::kw_using)) {
- auto usingDeclPtr = ParseUsingDirectiveOrDeclaration(Context, TemplateInfo, DeclEnd,
- prefixAttrs);
- if (!usingDeclPtr || !usingDeclPtr.get().isSingleDecl())
- return nullptr;
- return usingDeclPtr.get().getSingleDecl();
- }
+ if (Tok.is(tok::kw_using))
+ return ParseUsingDirectiveOrDeclaration(Context, TemplateInfo, DeclEnd,
+ DeclAttrs);
// Parse the declaration specifiers, stealing any diagnostics from
// the template parameters.
@@ -239,7 +236,7 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
getDeclSpecContextFromDeclaratorContext(Context));
if (Tok.is(tok::semi)) {
- ProhibitAttributes(prefixAttrs);
+ ProhibitAttributes(DeclAttrs);
DeclEnd = ConsumeToken();
RecordDecl *AnonRecord = nullptr;
Decl *Decl = Actions.ParsedFreeStandingDeclSpec(
@@ -252,7 +249,7 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
assert(!AnonRecord &&
"Anonymous unions/structs should not be valid with template");
DS.complete(Decl);
- return Decl;
+ return Actions.ConvertDeclToDeclGroup(Decl);
}
if (DS.hasTagDefinition())
@@ -260,125 +257,9 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
// Move the attributes from the prefix into the DS.
if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
- ProhibitAttributes(prefixAttrs);
-
- // Parse the declarator.
- ParsingDeclarator DeclaratorInfo(*this, DS, prefixAttrs,
- (DeclaratorContext)Context);
- if (TemplateInfo.TemplateParams)
- DeclaratorInfo.setTemplateParameterLists(*TemplateInfo.TemplateParams);
-
- // Turn off usual access checking for template specializations and
- // instantiations.
- // C++20 [temp.spec] 13.9/6.
- // This disables the access checking rules for function template explicit
- // instantiation and explicit specialization:
- // - parameter-list;
- // - template-argument-list;
- // - noexcept-specifier;
- // - dynamic-exception-specifications (deprecated in C++11, removed since
- // C++17).
- bool IsTemplateSpecOrInst =
- (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
- TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
- SuppressAccessChecks SAC(*this, IsTemplateSpecOrInst);
-
- ParseDeclarator(DeclaratorInfo);
-
- if (IsTemplateSpecOrInst)
- SAC.done();
-
- // Error parsing the declarator?
- if (!DeclaratorInfo.hasName()) {
- SkipMalformedDecl();
- return nullptr;
- }
-
- LateParsedAttrList LateParsedAttrs(true);
- if (DeclaratorInfo.isFunctionDeclarator()) {
- if (Tok.is(tok::kw_requires)) {
- CXXScopeSpec &ScopeSpec = DeclaratorInfo.getCXXScopeSpec();
- DeclaratorScopeObj DeclScopeObj(*this, ScopeSpec);
- if (ScopeSpec.isValid() &&
- Actions.ShouldEnterDeclaratorScope(getCurScope(), ScopeSpec))
- DeclScopeObj.EnterDeclaratorScope();
- ParseTrailingRequiresClause(DeclaratorInfo);
- }
+ ProhibitAttributes(DeclAttrs);
- MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
- }
-
- if (DeclaratorInfo.isFunctionDeclarator() &&
- isStartOfFunctionDefinition(DeclaratorInfo)) {
-
- // Function definitions are only allowed at file scope and in C++ classes.
- // The C++ inline method definition case is handled elsewhere, so we only
- // need to handle the file scope definition case.
- if (Context != DeclaratorContext::File) {
- Diag(Tok, diag::err_function_definition_not_allowed);
- SkipMalformedDecl();
- return nullptr;
- }
-
- if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
- // Recover by ignoring the 'typedef'. This was probably supposed to be
- // the 'typename' keyword, which we should have already suggested adding
- // if it's appropriate.
- Diag(DS.getStorageClassSpecLoc(), diag::err_function_declared_typedef)
- << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
- DS.ClearStorageClassSpecs();
- }
-
- if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
- if (DeclaratorInfo.getName().getKind() !=
- UnqualifiedIdKind::IK_TemplateId) {
- // If the declarator-id is not a template-id, issue a diagnostic and
- // recover by ignoring the 'template' keyword.
- Diag(Tok, diag::err_template_defn_explicit_instantiation) << 0;
- return ParseFunctionDefinition(DeclaratorInfo, ParsedTemplateInfo(),
- &LateParsedAttrs);
- } else {
- SourceLocation LAngleLoc
- = PP.getLocForEndOfToken(TemplateInfo.TemplateLoc);
- Diag(DeclaratorInfo.getIdentifierLoc(),
- diag::err_explicit_instantiation_with_definition)
- << SourceRange(TemplateInfo.TemplateLoc)
- << FixItHint::CreateInsertion(LAngleLoc, "<>");
-
- // Recover as if it were an explicit specialization.
- TemplateParameterLists FakedParamLists;
- FakedParamLists.push_back(Actions.ActOnTemplateParameterList(
- 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc,
- std::nullopt, LAngleLoc, nullptr));
-
- return ParseFunctionDefinition(
- DeclaratorInfo, ParsedTemplateInfo(&FakedParamLists,
- /*isSpecialization=*/true,
- /*lastParameterListWasEmpty=*/true),
- &LateParsedAttrs);
- }
- }
- return ParseFunctionDefinition(DeclaratorInfo, TemplateInfo,
- &LateParsedAttrs);
- }
-
- // Parse this declaration.
- Decl *ThisDecl = ParseDeclarationAfterDeclarator(DeclaratorInfo,
- TemplateInfo);
-
- if (Tok.is(tok::comma)) {
- Diag(Tok, diag::err_multiple_template_declarators)
- << (int)TemplateInfo.Kind;
- SkipUntil(tok::semi);
- return ThisDecl;
- }
-
- // Eat the semi colon after the declaration.
- ExpectAndConsumeSemi(diag::err_expected_semi_declaration);
- if (LateParsedAttrs.size() > 0)
- ParseLexedAttributeList(LateParsedAttrs, ThisDecl, true, false);
- DeclaratorInfo.complete(ThisDecl);
- return ThisDecl;
+ return ParseDeclGroup(DS, Context, DeclAttrs, TemplateInfo, &DeclEnd);
}
/// \brief Parse a single declaration that declares a concept.
@@ -436,10 +317,11 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
return nullptr;
}
- IdentifierInfo *Id = Result.Identifier;
+ const IdentifierInfo *Id = Result.Identifier;
SourceLocation IdLoc = Result.getBeginLoc();
- DiagnoseAndSkipCXX11Attributes();
+ ParsedAttributes Attrs(AttrFactory);
+ MaybeParseAttributes(PAKM_GNU | PAKM_CXX11, Attrs);
if (!TryConsumeToken(tok::equal)) {
Diag(Tok.getLocation(), diag::err_expected) << tok::equal;
@@ -458,8 +340,8 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
ExpectAndConsumeSemi(diag::err_expected_semi_declaration);
Expr *ConstraintExpr = ConstraintExprResult.get();
return Actions.ActOnConceptDefinition(getCurScope(),
- *TemplateInfo.TemplateParams,
- Id, IdLoc, ConstraintExpr);
+ *TemplateInfo.TemplateParams, Id, IdLoc,
+ ConstraintExpr, Attrs);
}
/// ParseTemplateParameters - Parses a template-parameter-list enclosed in
@@ -856,7 +738,12 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
// we introduce the type parameter into the local scope.
SourceLocation EqualLoc;
ParsedType DefaultArg;
+ std::optional<DelayTemplateIdDestructionRAII> DontDestructTemplateIds;
if (TryConsumeToken(tok::equal, EqualLoc)) {
+ // The default argument might contain a lambda declaration; avoid destroying
+ // parsed template ids at the end of that declaration because they can be
+ // used in a type constraint later.
+ DontDestructTemplateIds.emplace(*this, /*DelayTemplateIdDestruction=*/true);
// The default argument may declare template parameters, notably
// if it contains a generic lambda, so we need to increase
// the template depth as these parameters would not be instantiated
@@ -928,10 +815,12 @@ NamedDecl *Parser::ParseTemplateTemplateParameter(unsigned Depth,
// identifier, comma, or greater. Provide a fixit if the identifier, comma,
// or greater appear immediately or after 'struct'. In the latter case,
// replace the keyword with 'class'.
+ bool TypenameKeyword = false;
if (!TryConsumeToken(tok::kw_class)) {
bool Replace = Tok.isOneOf(tok::kw_typename, tok::kw_struct);
const Token &Next = Tok.is(tok::kw_struct) ? NextToken() : Tok;
if (Tok.is(tok::kw_typename)) {
+ TypenameKeyword = true;
Diag(Tok.getLocation(),
getLangOpts().CPlusPlus17
? diag::warn_cxx14_compat_template_template_param_typename
@@ -1001,10 +890,9 @@ NamedDecl *Parser::ParseTemplateTemplateParameter(unsigned Depth,
}
}
- return Actions.ActOnTemplateTemplateParameter(getCurScope(), TemplateLoc,
- ParamList, EllipsisLoc,
- ParamName, NameLoc, Depth,
- Position, EqualLoc, DefaultArg);
+ return Actions.ActOnTemplateTemplateParameter(
+ getCurScope(), TemplateLoc, ParamList, TypenameKeyword, EllipsisLoc,
+ ParamName, NameLoc, Depth, Position, EqualLoc, DefaultArg);
}
/// ParseNonTypeTemplateParameter - Handle the parsing of non-type
@@ -1019,7 +907,8 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
// FIXME: The type should probably be restricted in some way... Not all
// declarators (parts of declarators?) are accepted for parameters.
DeclSpec DS(AttrFactory);
- ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS_none,
+ ParsedTemplateInfo TemplateInfo;
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS_none,
DeclSpecContext::DSC_template_param);
// Parse this as a typename.
@@ -1412,7 +1301,7 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
// later.
Tok.setKind(tok::annot_template_id);
- IdentifierInfo *TemplateII =
+ const IdentifierInfo *TemplateII =
TemplateName.getKind() == UnqualifiedIdKind::IK_Identifier
? TemplateName.Identifier
: nullptr;
@@ -1652,8 +1541,8 @@ bool Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs,
if (!Template)
return QualType();
CalledSignatureHelp = true;
- return Actions.ProduceTemplateArgumentSignatureHelp(Template, TemplateArgs,
- OpenLoc);
+ return Actions.CodeCompletion().ProduceTemplateArgumentSignatureHelp(
+ Template, TemplateArgs, OpenLoc);
};
do {
@@ -1686,19 +1575,16 @@ bool Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs,
/// 'extern' [opt] 'template' declaration
///
/// Note that the 'extern' is a GNU extension and C++11 feature.
-Decl *Parser::ParseExplicitInstantiation(DeclaratorContext Context,
- SourceLocation ExternLoc,
- SourceLocation TemplateLoc,
- SourceLocation &DeclEnd,
- ParsedAttributes &AccessAttrs,
- AccessSpecifier AS) {
+Parser::DeclGroupPtrTy Parser::ParseExplicitInstantiation(
+ DeclaratorContext Context, SourceLocation ExternLoc,
+ SourceLocation TemplateLoc, SourceLocation &DeclEnd,
+ ParsedAttributes &AccessAttrs, AccessSpecifier AS) {
// This isn't really required here.
ParsingDeclRAIIObject
ParsingTemplateParams(*this, ParsingDeclRAIIObject::NoParent);
-
- return ParseSingleDeclarationAfterTemplate(
- Context, ParsedTemplateInfo(ExternLoc, TemplateLoc),
- ParsingTemplateParams, DeclEnd, AccessAttrs, AS);
+ ParsedTemplateInfo TemplateInfo(ExternLoc, TemplateLoc);
+ return ParseDeclarationAfterTemplate(
+ Context, TemplateInfo, ParsingTemplateParams, DeclEnd, AccessAttrs, AS);
}
SourceRange Parser::ParsedTemplateInfo::getSourceRange() const {
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
index 5bfabf55f50c..0142271b8e6d 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
@@ -79,9 +79,9 @@ bool Parser::isCXXDeclarationStatement(
getCurScope(), *II, Tok.getLocation(), SS, /*Template=*/nullptr);
if (Actions.isCurrentClassName(*II, getCurScope(), &SS) ||
isDeductionGuide) {
- if (isConstructorDeclarator(/*Unqualified=*/SS.isEmpty(),
- isDeductionGuide,
- DeclSpec::FriendSpecified::No))
+ if (isConstructorDeclarator(
+ /*Unqualified=*/SS.isEmpty(), isDeductionGuide,
+ /*IsFriend=*/DeclSpec::FriendSpecified::No))
return true;
} else if (SS.isNotEmpty()) {
// If the scope is not empty, it could alternatively be something like
@@ -737,7 +737,8 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
Parser::CXX11AttributeKind
Parser::isCXX11AttributeSpecifier(bool Disambiguate,
bool OuterMightBeMessageSend) {
- if (Tok.is(tok::kw_alignas))
+ // alignas is an attribute specifier in C++ but not in C23.
+ if (Tok.is(tok::kw_alignas) && !getLangOpts().C23)
return CAK_AttributeSpecifier;
if (Tok.isRegularKeywordAttribute())
@@ -1363,6 +1364,17 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
};
switch (Tok.getKind()) {
case tok::identifier: {
+ if (GetLookAheadToken(1).is(tok::ellipsis) &&
+ GetLookAheadToken(2).is(tok::l_square)) {
+
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error;
+ if (Tok.is(tok::identifier))
+ return TPResult::False;
+ return isCXXDeclarationSpecifier(ImplicitTypenameContext::No,
+ BracedCastResult, InvalidAsDeclSpec);
+ }
+
// Check for need to substitute AltiVec __vector keyword
// for "vector" identifier.
if (TryAltiVecVectorToken())
@@ -1373,6 +1385,15 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
if (!getLangOpts().ObjC && Next.is(tok::identifier))
return TPResult::True;
+ // If this identifier was reverted from a token ID, and the next token
+ // is a '(', we assume it to be a use of a type trait, so this
+ // can never be a type name.
+ if (Next.is(tok::l_paren) &&
+ Tok.getIdentifierInfo()->hasRevertedTokenIDToIdentifier() &&
+ isRevertibleTypeTrait(Tok.getIdentifierInfo())) {
+ return TPResult::False;
+ }
+
if (Next.isNot(tok::coloncolon) && Next.isNot(tok::less)) {
// Determine whether this is a valid expression. If not, we will hit
// a parse error one way or another. In that case, tell the caller that
@@ -1755,6 +1776,7 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
return TPResult::True;
}
+
[[fallthrough]];
case tok::kw_char:
@@ -1782,6 +1804,7 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
case tok::kw__Accum:
case tok::kw__Fract:
case tok::kw__Sat:
+ case tok::annot_pack_indexing_type:
#define GENERIC_IMAGE_TYPE(ImgType, Id) case tok::kw_##ImgType##_t:
#include "clang/Basic/OpenCLImageTypes.def"
if (NextToken().is(tok::l_paren))
@@ -1828,6 +1851,9 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
#include "clang/Basic/TransformTypeTraits.def"
return TPResult::True;
+ // C11 _Alignas
+ case tok::kw__Alignas:
+ return TPResult::True;
// C11 _Atomic
case tok::kw__Atomic:
return TPResult::True;
@@ -1860,6 +1886,7 @@ bool Parser::isCXXDeclarationSpecifierAType() {
switch (Tok.getKind()) {
// typename-specifier
case tok::annot_decltype:
+ case tok::annot_pack_indexing_type:
case tok::annot_template_id:
case tok::annot_typename:
case tok::kw_typeof:
diff --git a/contrib/llvm-project/clang/lib/Parse/Parser.cpp b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
index 0b092181bca7..5ebe71e496a2 100644
--- a/contrib/llvm-project/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
@@ -21,6 +21,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaCodeCompletion.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TimeProfiler.h"
using namespace clang;
@@ -685,7 +686,7 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result,
// FIXME: We need a better way to disambiguate C++ clang modules and
// standard C++ modules.
if (!getLangOpts().CPlusPlusModules || !Mod->isHeaderUnit())
- Actions.ActOnModuleInclude(Loc, Mod);
+ Actions.ActOnAnnotModuleInclude(Loc, Mod);
else {
DeclResult Import =
Actions.ActOnModuleImport(Loc, SourceLocation(), Loc, Mod);
@@ -697,15 +698,17 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result,
}
case tok::annot_module_begin:
- Actions.ActOnModuleBegin(Tok.getLocation(), reinterpret_cast<Module *>(
- Tok.getAnnotationValue()));
+ Actions.ActOnAnnotModuleBegin(
+ Tok.getLocation(),
+ reinterpret_cast<Module *>(Tok.getAnnotationValue()));
ConsumeAnnotationToken();
ImportState = Sema::ModuleImportState::NotACXX20Module;
return false;
case tok::annot_module_end:
- Actions.ActOnModuleEnd(Tok.getLocation(), reinterpret_cast<Module *>(
- Tok.getAnnotationValue()));
+ Actions.ActOnAnnotModuleEnd(
+ Tok.getLocation(),
+ reinterpret_cast<Module *>(Tok.getAnnotationValue()));
ConsumeAnnotationToken();
ImportState = Sema::ModuleImportState::NotACXX20Module;
return false;
@@ -942,20 +945,21 @@ Parser::ParseExternalDeclaration(ParsedAttributes &Attrs,
cutOffParsing();
if (CurParsedObjCImpl) {
// Code-complete Objective-C methods even without leading '-'/'+' prefix.
- Actions.CodeCompleteObjCMethodDecl(getCurScope(),
- /*IsInstanceMethod=*/std::nullopt,
- /*ReturnType=*/nullptr);
+ Actions.CodeCompletion().CodeCompleteObjCMethodDecl(
+ getCurScope(),
+ /*IsInstanceMethod=*/std::nullopt,
+ /*ReturnType=*/nullptr);
}
- Sema::ParserCompletionContext PCC;
+ SemaCodeCompletion::ParserCompletionContext PCC;
if (CurParsedObjCImpl) {
- PCC = Sema::PCC_ObjCImplementation;
+ PCC = SemaCodeCompletion::PCC_ObjCImplementation;
} else if (PP.isIncrementalProcessingEnabled()) {
- PCC = Sema::PCC_TopLevelOrExpression;
+ PCC = SemaCodeCompletion::PCC_TopLevelOrExpression;
} else {
- PCC = Sema::PCC_Namespace;
+ PCC = SemaCodeCompletion::PCC_Namespace;
};
- Actions.CodeCompleteOrdinaryName(getCurScope(), PCC);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(getCurScope(), PCC);
return nullptr;
case tok::kw_import: {
Sema::ModuleImportState IS = Sema::ModuleImportState::NotACXX20Module;
@@ -966,7 +970,7 @@ Parser::ParseExternalDeclaration(ParsedAttributes &Attrs,
SingleDecl = ParseModuleImport(SourceLocation(), IS);
} break;
case tok::kw_export:
- if (getLangOpts().CPlusPlusModules) {
+ if (getLangOpts().CPlusPlusModules || getLangOpts().HLSL) {
ProhibitAttributes(Attrs);
SingleDecl = ParseExportDeclaration();
break;
@@ -1040,8 +1044,8 @@ Parser::ParseExternalDeclaration(ParsedAttributes &Attrs,
diag::warn_cxx98_compat_extern_template :
diag::ext_extern_template) << SourceRange(ExternLoc, TemplateLoc);
SourceLocation DeclEnd;
- return Actions.ConvertDeclToDeclGroup(ParseExplicitInstantiation(
- DeclaratorContext::File, ExternLoc, TemplateLoc, DeclEnd, Attrs));
+ return ParseExplicitInstantiation(DeclaratorContext::File, ExternLoc,
+ TemplateLoc, DeclEnd, Attrs);
}
goto dont_know;
@@ -1143,9 +1147,10 @@ Parser::DeclGroupPtrTy Parser::ParseDeclOrFunctionDefInternal(
DS.SetRangeEnd(DeclSpecAttrs.Range.getEnd());
DS.takeAttributesFrom(DeclSpecAttrs);
+ ParsedTemplateInfo TemplateInfo;
MaybeParseMicrosoftAttributes(DS.getAttributes());
// Parse the common declaration-specifiers piece.
- ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS,
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS,
DeclSpecContext::DSC_top_level);
// If we had a free-standing type definition with a missing semicolon, we
@@ -1241,7 +1246,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclOrFunctionDefInternal(
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- return ParseDeclGroup(DS, DeclaratorContext::File, Attrs);
+ return ParseDeclGroup(DS, DeclaratorContext::File, Attrs, TemplateInfo);
}
Parser::DeclGroupPtrTy Parser::ParseDeclarationOrFunctionDefinition(
@@ -1401,6 +1406,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// Parse function body eagerly if it is either '= delete;' or '= default;' as
// ActOnStartOfFunctionDef needs to know whether the function is deleted.
+ StringLiteral *DeletedMessage = nullptr;
Sema::FnBodyKind BodyKind = Sema::FnBodyKind::Other;
SourceLocation KWLoc;
if (TryConsumeToken(tok::equal)) {
@@ -1412,6 +1418,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
: diag::ext_defaulted_deleted_function)
<< 1 /* deleted */;
BodyKind = Sema::FnBodyKind::Delete;
+ DeletedMessage = ParseCXXDeletedFunctionMessage();
} else if (TryConsumeToken(tok::kw_default, KWLoc)) {
Diag(KWLoc, getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_defaulted_deleted_function
@@ -1434,9 +1441,11 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
}
}
+ Sema::FPFeaturesStateRAII SaveFPFeatures(Actions);
+
// Tell the actions module that we have entered a function definition with the
// specified Declarator for the function.
- Sema::SkipBodyInfo SkipBody;
+ SkipBodyInfo SkipBody;
Decl *Res = Actions.ActOnStartOfFunctionDef(getCurScope(), D,
TemplateInfo.TemplateParams
? *TemplateInfo.TemplateParams
@@ -1470,7 +1479,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
D.getMutableDeclSpec().abort();
if (BodyKind != Sema::FnBodyKind::Other) {
- Actions.SetFunctionBodyKind(Res, KWLoc, BodyKind);
+ Actions.SetFunctionBodyKind(Res, KWLoc, BodyKind, DeletedMessage);
Stmt *GeneratedBody = Res ? Res->getBody() : nullptr;
Actions.ActOnFinishFunctionBody(Res, GeneratedBody, false);
return Res;
@@ -1555,7 +1564,8 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
// Parse the common declaration-specifiers piece.
DeclSpec DS(AttrFactory);
- ParseDeclarationSpecifiers(DS);
+ ParsedTemplateInfo TemplateInfo;
+ ParseDeclarationSpecifiers(DS, TemplateInfo);
// C99 6.9.1p6: 'each declaration in the declaration list shall have at
// least one declarator'.
@@ -1992,7 +2002,8 @@ bool Parser::TryAnnotateTypeOrScopeToken(
assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::annot_template_id) ||
- Tok.is(tok::kw___super) || Tok.is(tok::kw_auto)) &&
+ Tok.is(tok::kw___super) || Tok.is(tok::kw_auto) ||
+ Tok.is(tok::annot_pack_indexing_type)) &&
"Cannot be a type or scope token!");
if (Tok.is(tok::kw_typename)) {
@@ -2049,9 +2060,19 @@ bool Parser::TryAnnotateTypeOrScopeToken(
return true;
}
+ bool TemplateKWPresent = false;
+ if (Tok.is(tok::kw_template)) {
+ ConsumeToken();
+ TemplateKWPresent = true;
+ }
+
TypeResult Ty;
if (Tok.is(tok::identifier)) {
- // FIXME: check whether the next token is '<', first!
+ if (TemplateKWPresent && NextToken().isNot(tok::less)) {
+ Diag(Tok.getLocation(),
+ diag::missing_template_arg_list_after_template_kw);
+ return true;
+ }
Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS,
*Tok.getIdentifierInfo(),
Tok.getLocation());
@@ -2272,54 +2293,57 @@ SourceLocation Parser::handleUnexpectedCodeCompletionToken() {
for (Scope *S = getCurScope(); S; S = S->getParent()) {
if (S->isFunctionScope()) {
cutOffParsing();
- Actions.CodeCompleteOrdinaryName(getCurScope(),
- Sema::PCC_RecoveryInFunction);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(
+ getCurScope(), SemaCodeCompletion::PCC_RecoveryInFunction);
return PrevTokLocation;
}
if (S->isClassScope()) {
cutOffParsing();
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Class);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(
+ getCurScope(), SemaCodeCompletion::PCC_Class);
return PrevTokLocation;
}
}
cutOffParsing();
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Namespace);
+ Actions.CodeCompletion().CodeCompleteOrdinaryName(
+ getCurScope(), SemaCodeCompletion::PCC_Namespace);
return PrevTokLocation;
}
// Code-completion pass-through functions
void Parser::CodeCompleteDirective(bool InConditional) {
- Actions.CodeCompletePreprocessorDirective(InConditional);
+ Actions.CodeCompletion().CodeCompletePreprocessorDirective(InConditional);
}
void Parser::CodeCompleteInConditionalExclusion() {
- Actions.CodeCompleteInPreprocessorConditionalExclusion(getCurScope());
+ Actions.CodeCompletion().CodeCompleteInPreprocessorConditionalExclusion(
+ getCurScope());
}
void Parser::CodeCompleteMacroName(bool IsDefinition) {
- Actions.CodeCompletePreprocessorMacroName(IsDefinition);
+ Actions.CodeCompletion().CodeCompletePreprocessorMacroName(IsDefinition);
}
void Parser::CodeCompletePreprocessorExpression() {
- Actions.CodeCompletePreprocessorExpression();
+ Actions.CodeCompletion().CodeCompletePreprocessorExpression();
}
void Parser::CodeCompleteMacroArgument(IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned ArgumentIndex) {
- Actions.CodeCompletePreprocessorMacroArgument(getCurScope(), Macro, MacroInfo,
- ArgumentIndex);
+ Actions.CodeCompletion().CodeCompletePreprocessorMacroArgument(
+ getCurScope(), Macro, MacroInfo, ArgumentIndex);
}
void Parser::CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) {
- Actions.CodeCompleteIncludedFile(Dir, IsAngled);
+ Actions.CodeCompletion().CodeCompleteIncludedFile(Dir, IsAngled);
}
void Parser::CodeCompleteNaturalLanguage() {
- Actions.CodeCompleteNaturalLanguage();
+ Actions.CodeCompletion().CodeCompleteNaturalLanguage();
}
bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
@@ -2673,7 +2697,7 @@ bool Parser::ParseModuleName(
if (!Tok.is(tok::identifier)) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
- Actions.CodeCompleteModuleImport(UseLoc, Path);
+ Actions.CodeCompletion().CodeCompleteModuleImport(UseLoc, Path);
return true;
}
@@ -2706,9 +2730,9 @@ bool Parser::parseMisplacedModuleImport() {
// happens.
if (MisplacedModuleBeginCount) {
--MisplacedModuleBeginCount;
- Actions.ActOnModuleEnd(Tok.getLocation(),
- reinterpret_cast<Module *>(
- Tok.getAnnotationValue()));
+ Actions.ActOnAnnotModuleEnd(
+ Tok.getLocation(),
+ reinterpret_cast<Module *>(Tok.getAnnotationValue()));
ConsumeAnnotationToken();
continue;
}
@@ -2718,18 +2742,18 @@ bool Parser::parseMisplacedModuleImport() {
return true;
case tok::annot_module_begin:
// Recover by entering the module (Sema will diagnose).
- Actions.ActOnModuleBegin(Tok.getLocation(),
- reinterpret_cast<Module *>(
- Tok.getAnnotationValue()));
+ Actions.ActOnAnnotModuleBegin(
+ Tok.getLocation(),
+ reinterpret_cast<Module *>(Tok.getAnnotationValue()));
ConsumeAnnotationToken();
++MisplacedModuleBeginCount;
continue;
case tok::annot_module_include:
// Module import found where it should not be, for instance, inside a
// namespace. Recover by importing the module.
- Actions.ActOnModuleInclude(Tok.getLocation(),
- reinterpret_cast<Module *>(
- Tok.getAnnotationValue()));
+ Actions.ActOnAnnotModuleInclude(
+ Tok.getLocation(),
+ reinterpret_cast<Module *>(Tok.getAnnotationValue()));
ConsumeAnnotationToken();
// If there is another module import, process it.
continue;
@@ -2740,6 +2764,15 @@ bool Parser::parseMisplacedModuleImport() {
return false;
}
+void Parser::diagnoseUseOfC11Keyword(const Token &Tok) {
+ // Warn that this is a C11 extension if in an older mode or if in C++.
+ // Otherwise, warn that it is incompatible with standards before C11 if in
+ // C11 or later.
+ Diag(Tok, getLangOpts().C11 ? diag::warn_c11_compat_keyword
+ : diag::ext_c11_feature)
+ << Tok.getName();
+}
+
bool BalancedDelimiterTracker::diagnoseOverflow() {
P.Diag(P.Tok, diag::err_bracket_depth_exceeded)
<< P.getLangOpts().BracketDepth;
diff --git a/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp b/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
index 083a9c09297e..a96ca0764ae7 100644
--- a/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
+++ b/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
@@ -21,8 +21,10 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
-using namespace clang;
+using namespace clang;
+using namespace llvm;
+using namespace html;
/// HighlightRange - Highlight a range in the source code with the specified
/// start/end tags. B/E must be in the same file. This ensures that
@@ -104,6 +106,32 @@ void html::HighlightRange(RewriteBuffer &RB, unsigned B, unsigned E,
}
}
+namespace clang::html {
+struct RelexRewriteCache {
+ // These structs mimic input arguments of HighlightRange().
+ struct Highlight {
+ SourceLocation B, E;
+ std::string StartTag, EndTag;
+ bool IsTokenRange;
+ };
+ struct RawHighlight {
+ unsigned B, E;
+ std::string StartTag, EndTag;
+ };
+
+ // SmallVector isn't appropriate because these vectors are almost never small.
+ using HighlightList = std::vector<Highlight>;
+ using RawHighlightList = std::vector<RawHighlight>;
+
+ DenseMap<FileID, RawHighlightList> SyntaxHighlights;
+ DenseMap<FileID, HighlightList> MacroHighlights;
+};
+} // namespace clang::html
+
+html::RelexRewriteCacheRef html::instantiateRelexRewriteCache() {
+ return std::make_shared<RelexRewriteCache>();
+}
+
void html::EscapeText(Rewriter &R, FileID FID,
bool EscapeSpaces, bool ReplaceTabs) {
@@ -442,13 +470,18 @@ input.spoilerhider:checked + label + .spoiler{
/// information about keywords, macro expansions etc. This uses the macro
/// table state from the end of the file, so it won't be perfectly perfect,
/// but it will be reasonably close.
-void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
- RewriteBuffer &RB = R.getEditBuffer(FID);
+static void SyntaxHighlightImpl(
+ Rewriter &R, FileID FID, const Preprocessor &PP,
+ llvm::function_ref<void(RewriteBuffer &, unsigned, unsigned, const char *,
+ const char *, const char *)>
+ HighlightRangeCallback) {
+ RewriteBuffer &RB = R.getEditBuffer(FID);
const SourceManager &SM = PP.getSourceManager();
llvm::MemoryBufferRef FromFile = SM.getBufferOrFake(FID);
+ const char *BufferStart = FromFile.getBuffer().data();
+
Lexer L(FID, FromFile, SM, PP.getLangOpts());
- const char *BufferStart = L.getBuffer().data();
// Inform the preprocessor that we want to retain comments as tokens, so we
// can highlight them.
@@ -475,13 +508,13 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
// If this is a pp-identifier, for a keyword, highlight it as such.
if (Tok.isNot(tok::identifier))
- HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
- "<span class='keyword'>", "</span>");
+ HighlightRangeCallback(RB, TokOffs, TokOffs + TokLen, BufferStart,
+ "<span class='keyword'>", "</span>");
break;
}
case tok::comment:
- HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
- "<span class='comment'>", "</span>");
+ HighlightRangeCallback(RB, TokOffs, TokOffs + TokLen, BufferStart,
+ "<span class='comment'>", "</span>");
break;
case tok::utf8_string_literal:
// Chop off the u part of u8 prefix
@@ -498,8 +531,8 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
[[fallthrough]];
case tok::string_literal:
// FIXME: Exclude the optional ud-suffix from the highlighted range.
- HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
- "<span class='string_literal'>", "</span>");
+ HighlightRangeCallback(RB, TokOffs, TokOffs + TokLen, BufferStart,
+ "<span class='string_literal'>", "</span>");
break;
case tok::hash: {
// If this is a preprocessor directive, all tokens to end of line are too.
@@ -516,8 +549,8 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
}
// Find end of line. This is a hack.
- HighlightRange(RB, TokOffs, TokEnd, BufferStart,
- "<span class='directive'>", "</span>");
+ HighlightRangeCallback(RB, TokOffs, TokEnd, BufferStart,
+ "<span class='directive'>", "</span>");
// Don't skip the next token.
continue;
@@ -527,12 +560,43 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
L.LexFromRawLexer(Tok);
}
}
+void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP,
+ RelexRewriteCacheRef Cache) {
+ RewriteBuffer &RB = R.getEditBuffer(FID);
+ const SourceManager &SM = PP.getSourceManager();
+ llvm::MemoryBufferRef FromFile = SM.getBufferOrFake(FID);
+ const char *BufferStart = FromFile.getBuffer().data();
+
+ if (Cache) {
+ auto CacheIt = Cache->SyntaxHighlights.find(FID);
+ if (CacheIt != Cache->SyntaxHighlights.end()) {
+ for (const RelexRewriteCache::RawHighlight &H : CacheIt->second) {
+ HighlightRange(RB, H.B, H.E, BufferStart, H.StartTag.data(),
+ H.EndTag.data());
+ }
+ return;
+ }
+ }
+
+ // "Every time you would call HighlightRange, cache the inputs as well."
+ auto HighlightRangeCallback = [&](RewriteBuffer &RB, unsigned B, unsigned E,
+ const char *BufferStart,
+ const char *StartTag, const char *EndTag) {
+ HighlightRange(RB, B, E, BufferStart, StartTag, EndTag);
+
+ if (Cache)
+ Cache->SyntaxHighlights[FID].push_back({B, E, StartTag, EndTag});
+ };
+
+ SyntaxHighlightImpl(R, FID, PP, HighlightRangeCallback);
+}
+
+static void HighlightMacrosImpl(
+ Rewriter &R, FileID FID, const Preprocessor &PP,
+ llvm::function_ref<void(Rewriter &, SourceLocation, SourceLocation,
+ const char *, const char *, bool)>
+ HighlightRangeCallback) {
-/// HighlightMacros - This uses the macro table state from the end of the
-/// file, to re-expand macros and insert (into the HTML) information about the
-/// macro expansions. This won't be perfectly perfect, but it will be
-/// reasonably close.
-void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// Re-lex the raw token stream into a token buffer.
const SourceManager &SM = PP.getSourceManager();
std::vector<Token> TokenStream;
@@ -659,11 +723,44 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// get highlighted.
Expansion = "<span class='macro_popup'>" + Expansion + "</span></span>";
- HighlightRange(R, LLoc.getBegin(), LLoc.getEnd(), "<span class='macro'>",
- Expansion.c_str(), LLoc.isTokenRange());
+ HighlightRangeCallback(R, LLoc.getBegin(), LLoc.getEnd(),
+ "<span class='macro'>", Expansion.c_str(),
+ LLoc.isTokenRange());
}
// Restore the preprocessor's old state.
TmpPP.setDiagnostics(*OldDiags);
TmpPP.setPragmasEnabled(PragmasPreviouslyEnabled);
}
+
+/// HighlightMacros - This uses the macro table state from the end of the
+/// file, to re-expand macros and insert (into the HTML) information about the
+/// macro expansions. This won't be perfectly perfect, but it will be
+/// reasonably close.
+void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor &PP,
+ RelexRewriteCacheRef Cache) {
+ if (Cache) {
+ auto CacheIt = Cache->MacroHighlights.find(FID);
+ if (CacheIt != Cache->MacroHighlights.end()) {
+ for (const RelexRewriteCache::Highlight &H : CacheIt->second) {
+ HighlightRange(R, H.B, H.E, H.StartTag.data(), H.EndTag.data(),
+ H.IsTokenRange);
+ }
+ return;
+ }
+ }
+
+ // "Every time you would call HighlightRange, cache the inputs as well."
+ auto HighlightRangeCallback = [&](Rewriter &R, SourceLocation B,
+ SourceLocation E, const char *StartTag,
+ const char *EndTag, bool isTokenRange) {
+ HighlightRange(R, B, E, StartTag, EndTag, isTokenRange);
+
+ if (Cache) {
+ Cache->MacroHighlights[FID].push_back(
+ {B, E, StartTag, EndTag, isTokenRange});
+ }
+ };
+
+ HighlightMacrosImpl(R, FID, PP, HighlightRangeCallback);
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
index 9e9294572df9..0f604c61fa3a 100644
--- a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -1,4 +1,4 @@
-//=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=//
+//=== AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -26,7 +26,6 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
-#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
#include "clang/Analysis/Analyses/CalledOnceCheck.h"
@@ -39,6 +38,7 @@
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Preprocessor.h"
@@ -442,7 +442,7 @@ static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
if (!live[B->getBlockID()]) {
if (B->pred_begin() == B->pred_end()) {
const Stmt *Term = B->getTerminatorStmt();
- if (Term && isa<CXXTryStmt>(Term))
+ if (isa_and_nonnull<CXXTryStmt>(Term))
// When not adding EH edges from calls, catch clauses
// can otherwise seem dead. Avoid noting them as dead.
count += reachable_code::ScanReachableFromBlock(B, live);
@@ -1100,7 +1100,7 @@ namespace {
// issue a warn_fallthrough_attr_unreachable for them.
for (const auto *B : *Cfg) {
const Stmt *L = B->getLabel();
- if (L && isa<SwitchCase>(L) && ReachableBlocks.insert(B).second)
+ if (isa_and_nonnull<SwitchCase>(L) && ReachableBlocks.insert(B).second)
BlockQueue.push_back(B);
}
@@ -1128,7 +1128,7 @@ namespace {
if (!P) continue;
const Stmt *Term = P->getTerminatorStmt();
- if (Term && isa<SwitchStmt>(Term))
+ if (isa_and_nonnull<SwitchStmt>(Term))
continue; // Switch statement, good.
const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel());
@@ -1327,7 +1327,7 @@ static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
B = *B->succ_begin();
Term = B->getTerminatorStmt();
}
- if (!(B->empty() && Term && isa<BreakStmt>(Term))) {
+ if (!(B->empty() && isa_and_nonnull<BreakStmt>(Term))) {
Preprocessor &PP = S.getPreprocessor();
StringRef AnnotationSpelling = getFallthroughAttrSpelling(PP, L);
SmallString<64> TextToInsert(AnnotationSpelling);
@@ -2257,7 +2257,7 @@ public:
MsgParam = 1;
}
} else {
- if (isa<CallExpr>(Operation)) {
+ if (isa<CallExpr>(Operation) || isa<CXXConstructExpr>(Operation)) {
// note_unsafe_buffer_operation doesn't have this mode yet.
assert(!IsRelatedToDecl && "Not implemented yet!");
MsgParam = 3;
@@ -2292,9 +2292,30 @@ public:
}
}
+ void handleUnsafeOperationInContainer(const Stmt *Operation,
+ bool IsRelatedToDecl,
+ ASTContext &Ctx) override {
+ SourceLocation Loc;
+ SourceRange Range;
+ unsigned MsgParam = 0;
+
+ // This function only handles SpanTwoParamConstructorGadget so far, which
+ // always gives a CXXConstructExpr.
+ const auto *CtorExpr = cast<CXXConstructExpr>(Operation);
+ Loc = CtorExpr->getLocation();
+
+ S.Diag(Loc, diag::warn_unsafe_buffer_usage_in_container);
+ if (IsRelatedToDecl) {
+ assert(!SuggestSuggestions &&
+ "Variables blamed for unsafe buffer usage without suggestions!");
+ S.Diag(Loc, diag::note_unsafe_buffer_operation) << MsgParam << Range;
+ }
+ }
+
void handleUnsafeVariableGroup(const VarDecl *Variable,
const VariableGroupsManager &VarGrpMgr,
- FixItList &&Fixes, const Decl *D) override {
+ FixItList &&Fixes, const Decl *D,
+ const FixitStrategy &VarTargetTypes) override {
assert(!SuggestSuggestions &&
"Unsafe buffer usage fixits displayed without suggestions!");
S.Diag(Variable->getLocation(), diag::warn_unsafe_buffer_variable)
@@ -2309,7 +2330,18 @@ public:
// NOT explain how the variables are grouped as the reason is non-trivial
// and irrelavant to users' experience:
const auto VarGroupForVD = VarGrpMgr.getGroupOfVar(Variable, &BriefMsg);
- unsigned FixItStrategy = 0; // For now we only have 'std::span' strategy
+ unsigned FixItStrategy = 0;
+ switch (VarTargetTypes.lookup(Variable)) {
+ case clang::FixitStrategy::Kind::Span:
+ FixItStrategy = 0;
+ break;
+ case clang::FixitStrategy::Kind::Array:
+ FixItStrategy = 1;
+ break;
+ default:
+ assert(false && "We support only std::span and std::array");
+ };
+
const auto &FD =
S.Diag(Variable->getLocation(),
BriefMsg ? diag::note_unsafe_buffer_variable_fixit_together
@@ -2334,6 +2366,10 @@ public:
return S.PP.isSafeBufferOptOut(S.getSourceManager(), Loc);
}
+ bool ignoreUnsafeBufferInContainer(const SourceLocation &Loc) const override {
+ return S.Diags.isIgnored(diag::warn_unsafe_buffer_usage_in_container, Loc);
+ }
+
// Returns the text representation of clang::unsafe_buffer_usage attribute.
// `WSSuffix` holds customized "white-space"s, e.g., newline or whilespace
// characters.
@@ -2498,6 +2534,8 @@ void clang::sema::AnalysisBasedWarnings::IssueWarnings(
if (!Diags.isIgnored(diag::warn_unsafe_buffer_operation,
Node->getBeginLoc()) ||
!Diags.isIgnored(diag::warn_unsafe_buffer_variable,
+ Node->getBeginLoc()) ||
+ !Diags.isIgnored(diag::warn_unsafe_buffer_usage_in_container,
Node->getBeginLoc())) {
clang::checkUnsafeBufferUsage(Node, R,
UnsafeBufferUsageShouldEmitSuggestions);
@@ -2508,7 +2546,9 @@ void clang::sema::AnalysisBasedWarnings::IssueWarnings(
// Emit per-function analysis-based warnings that require the whole-TU
// reasoning. Check if any of them is enabled at all before scanning the AST:
if (!Diags.isIgnored(diag::warn_unsafe_buffer_operation, SourceLocation()) ||
- !Diags.isIgnored(diag::warn_unsafe_buffer_variable, SourceLocation())) {
+ !Diags.isIgnored(diag::warn_unsafe_buffer_variable, SourceLocation()) ||
+ !Diags.isIgnored(diag::warn_unsafe_buffer_usage_in_container,
+ SourceLocation())) {
CallableVisitor(CallAnalyzers).TraverseTranslationUnitDecl(TU);
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/CheckExprLifetime.cpp b/contrib/llvm-project/clang/lib/Sema/CheckExprLifetime.cpp
new file mode 100644
index 000000000000..112cf3d08182
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/CheckExprLifetime.cpp
@@ -0,0 +1,1330 @@
+//===--- CheckExprLifetime.cpp --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CheckExprLifetime.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/PointerIntPair.h"
+
+namespace clang::sema {
+namespace {
+enum LifetimeKind {
+ /// The lifetime of a temporary bound to this entity ends at the end of the
+ /// full-expression, and that's (probably) fine.
+ LK_FullExpression,
+
+ /// The lifetime of a temporary bound to this entity is extended to the
+ /// lifeitme of the entity itself.
+ LK_Extended,
+
+ /// The lifetime of a temporary bound to this entity probably ends too soon,
+ /// because the entity is allocated in a new-expression.
+ LK_New,
+
+ /// The lifetime of a temporary bound to this entity ends too soon, because
+ /// the entity is a return object.
+ LK_Return,
+
+ /// The lifetime of a temporary bound to this entity ends too soon, because
+ /// the entity is the result of a statement expression.
+ LK_StmtExprResult,
+
+ /// This is a mem-initializer: if it would extend a temporary (other than via
+ /// a default member initializer), the program is ill-formed.
+ LK_MemInitializer,
+
+ /// The lifetime of a temporary bound to this entity probably ends too soon,
+ /// because the entity is a pointer and we assign the address of a temporary
+ /// object to it.
+ LK_Assignment,
+};
+using LifetimeResult =
+ llvm::PointerIntPair<const InitializedEntity *, 3, LifetimeKind>;
+} // namespace
+
+/// Determine the declaration which an initialized entity ultimately refers to,
+/// for the purpose of lifetime-extending a temporary bound to a reference in
+/// the initialization of \p Entity.
+static LifetimeResult
+getEntityLifetime(const InitializedEntity *Entity,
+ const InitializedEntity *InitField = nullptr) {
+ // C++11 [class.temporary]p5:
+ switch (Entity->getKind()) {
+ case InitializedEntity::EK_Variable:
+ // The temporary [...] persists for the lifetime of the reference
+ return {Entity, LK_Extended};
+
+ case InitializedEntity::EK_Member:
+ // For subobjects, we look at the complete object.
+ if (Entity->getParent())
+ return getEntityLifetime(Entity->getParent(), Entity);
+
+ // except:
+ // C++17 [class.base.init]p8:
+ // A temporary expression bound to a reference member in a
+ // mem-initializer is ill-formed.
+ // C++17 [class.base.init]p11:
+ // A temporary expression bound to a reference member from a
+ // default member initializer is ill-formed.
+ //
+ // The context of p11 and its example suggest that it's only the use of a
+ // default member initializer from a constructor that makes the program
+ // ill-formed, not its mere existence, and that it can even be used by
+ // aggregate initialization.
+ return {Entity, Entity->isDefaultMemberInitializer() ? LK_Extended
+ : LK_MemInitializer};
+
+ case InitializedEntity::EK_Binding:
+ // Per [dcl.decomp]p3, the binding is treated as a variable of reference
+ // type.
+ return {Entity, LK_Extended};
+
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Parameter_CF_Audited:
+ // -- A temporary bound to a reference parameter in a function call
+ // persists until the completion of the full-expression containing
+ // the call.
+ return {nullptr, LK_FullExpression};
+
+ case InitializedEntity::EK_TemplateParameter:
+ // FIXME: This will always be ill-formed; should we eagerly diagnose it
+ // here?
+ return {nullptr, LK_FullExpression};
+
+ case InitializedEntity::EK_Result:
+ // -- The lifetime of a temporary bound to the returned value in a
+ // function return statement is not extended; the temporary is
+ // destroyed at the end of the full-expression in the return statement.
+ return {nullptr, LK_Return};
+
+ case InitializedEntity::EK_StmtExprResult:
+ // FIXME: Should we lifetime-extend through the result of a statement
+ // expression?
+ return {nullptr, LK_StmtExprResult};
+
+ case InitializedEntity::EK_New:
+ // -- A temporary bound to a reference in a new-initializer persists
+ // until the completion of the full-expression containing the
+ // new-initializer.
+ return {nullptr, LK_New};
+
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_CompoundLiteralInit:
+ case InitializedEntity::EK_RelatedResult:
+ // We don't yet know the storage duration of the surrounding temporary.
+ // Assume it's got full-expression duration for now, it will patch up our
+ // storage duration if that's not correct.
+ return {nullptr, LK_FullExpression};
+
+ case InitializedEntity::EK_ArrayElement:
+ // For subobjects, we look at the complete object.
+ return getEntityLifetime(Entity->getParent(), InitField);
+
+ case InitializedEntity::EK_Base:
+ // For subobjects, we look at the complete object.
+ if (Entity->getParent())
+ return getEntityLifetime(Entity->getParent(), InitField);
+ return {InitField, LK_MemInitializer};
+
+ case InitializedEntity::EK_Delegating:
+ // We can reach this case for aggregate initialization in a constructor:
+ // struct A { int &&r; };
+ // struct B : A { B() : A{0} {} };
+ // In this case, use the outermost field decl as the context.
+ return {InitField, LK_MemInitializer};
+
+ case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
+ case InitializedEntity::EK_LambdaCapture:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ return {nullptr, LK_FullExpression};
+
+ case InitializedEntity::EK_Exception:
+ // FIXME: Can we diagnose lifetime problems with exceptions?
+ return {nullptr, LK_FullExpression};
+
+ case InitializedEntity::EK_ParenAggInitMember:
+ // -- A temporary object bound to a reference element of an aggregate of
+ // class type initialized from a parenthesized expression-list
+ // [dcl.init, 9.3] persists until the completion of the full-expression
+ // containing the expression-list.
+ return {nullptr, LK_FullExpression};
+ }
+
+ llvm_unreachable("unknown entity kind");
+}
+
+namespace {
+enum ReferenceKind {
+ /// Lifetime would be extended by a reference binding to a temporary.
+ RK_ReferenceBinding,
+ /// Lifetime would be extended by a std::initializer_list object binding to
+ /// its backing array.
+ RK_StdInitializerList,
+};
+
+/// A temporary or local variable. This will be one of:
+/// * A MaterializeTemporaryExpr.
+/// * A DeclRefExpr whose declaration is a local.
+/// * An AddrLabelExpr.
+/// * A BlockExpr for a block with captures.
+using Local = Expr *;
+
+/// Expressions we stepped over when looking for the local state. Any steps
+/// that would inhibit lifetime extension or take us out of subexpressions of
+/// the initializer are included.
+struct IndirectLocalPathEntry {
+ enum EntryKind {
+ DefaultInit,
+ AddressOf,
+ VarInit,
+ LValToRVal,
+ LifetimeBoundCall,
+ TemporaryCopy,
+ LambdaCaptureInit,
+ GslReferenceInit,
+ GslPointerInit,
+ GslPointerAssignment,
+ } Kind;
+ Expr *E;
+ union {
+ const Decl *D = nullptr;
+ const LambdaCapture *Capture;
+ };
+ IndirectLocalPathEntry() {}
+ IndirectLocalPathEntry(EntryKind K, Expr *E) : Kind(K), E(E) {}
+ IndirectLocalPathEntry(EntryKind K, Expr *E, const Decl *D)
+ : Kind(K), E(E), D(D) {}
+ IndirectLocalPathEntry(EntryKind K, Expr *E, const LambdaCapture *Capture)
+ : Kind(K), E(E), Capture(Capture) {}
+};
+
+using IndirectLocalPath = llvm::SmallVectorImpl<IndirectLocalPathEntry>;
+
+struct RevertToOldSizeRAII {
+ IndirectLocalPath &Path;
+ unsigned OldSize = Path.size();
+ RevertToOldSizeRAII(IndirectLocalPath &Path) : Path(Path) {}
+ ~RevertToOldSizeRAII() { Path.resize(OldSize); }
+};
+
+using LocalVisitor = llvm::function_ref<bool(IndirectLocalPath &Path, Local L,
+ ReferenceKind RK)>;
+} // namespace
+
+static bool isVarOnPath(IndirectLocalPath &Path, VarDecl *VD) {
+ for (auto E : Path)
+ if (E.Kind == IndirectLocalPathEntry::VarInit && E.D == VD)
+ return true;
+ return false;
+}
+
+static bool pathContainsInit(IndirectLocalPath &Path) {
+ return llvm::any_of(Path, [=](IndirectLocalPathEntry E) {
+ return E.Kind == IndirectLocalPathEntry::DefaultInit ||
+ E.Kind == IndirectLocalPathEntry::VarInit;
+ });
+}
+
+static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
+ Expr *Init, LocalVisitor Visit,
+ bool RevisitSubinits,
+ bool EnableLifetimeWarnings);
+
+static void visitLocalsRetainedByReferenceBinding(IndirectLocalPath &Path,
+ Expr *Init, ReferenceKind RK,
+ LocalVisitor Visit,
+ bool EnableLifetimeWarnings);
+
+template <typename T> static bool isRecordWithAttr(QualType Type) {
+ if (auto *RD = Type->getAsCXXRecordDecl())
+ return RD->hasAttr<T>();
+ return false;
+}
+
+// Decl::isInStdNamespace will return false for iterators in some STL
+// implementations due to them being defined in a namespace outside of the std
+// namespace.
+static bool isInStlNamespace(const Decl *D) {
+ const DeclContext *DC = D->getDeclContext();
+ if (!DC)
+ return false;
+ if (const auto *ND = dyn_cast<NamespaceDecl>(DC))
+ if (const IdentifierInfo *II = ND->getIdentifier()) {
+ StringRef Name = II->getName();
+ if (Name.size() >= 2 && Name.front() == '_' &&
+ (Name[1] == '_' || isUppercase(Name[1])))
+ return true;
+ }
+
+ return DC->isStdNamespace();
+}
+
+static bool shouldTrackImplicitObjectArg(const CXXMethodDecl *Callee) {
+ if (auto *Conv = dyn_cast_or_null<CXXConversionDecl>(Callee))
+ if (isRecordWithAttr<PointerAttr>(Conv->getConversionType()))
+ return true;
+ if (!isInStlNamespace(Callee->getParent()))
+ return false;
+ if (!isRecordWithAttr<PointerAttr>(
+ Callee->getFunctionObjectParameterType()) &&
+ !isRecordWithAttr<OwnerAttr>(Callee->getFunctionObjectParameterType()))
+ return false;
+ if (Callee->getReturnType()->isPointerType() ||
+ isRecordWithAttr<PointerAttr>(Callee->getReturnType())) {
+ if (!Callee->getIdentifier())
+ return false;
+ return llvm::StringSwitch<bool>(Callee->getName())
+ .Cases("begin", "rbegin", "cbegin", "crbegin", true)
+ .Cases("end", "rend", "cend", "crend", true)
+ .Cases("c_str", "data", "get", true)
+ // Map and set types.
+ .Cases("find", "equal_range", "lower_bound", "upper_bound", true)
+ .Default(false);
+ } else if (Callee->getReturnType()->isReferenceType()) {
+ if (!Callee->getIdentifier()) {
+ auto OO = Callee->getOverloadedOperator();
+ return OO == OverloadedOperatorKind::OO_Subscript ||
+ OO == OverloadedOperatorKind::OO_Star;
+ }
+ return llvm::StringSwitch<bool>(Callee->getName())
+ .Cases("front", "back", "at", "top", "value", true)
+ .Default(false);
+ }
+ return false;
+}
+
+static bool shouldTrackFirstArgument(const FunctionDecl *FD) {
+ if (!FD->getIdentifier() || FD->getNumParams() != 1)
+ return false;
+ const auto *RD = FD->getParamDecl(0)->getType()->getPointeeCXXRecordDecl();
+ if (!FD->isInStdNamespace() || !RD || !RD->isInStdNamespace())
+ return false;
+ if (!RD->hasAttr<PointerAttr>() && !RD->hasAttr<OwnerAttr>())
+ return false;
+ if (FD->getReturnType()->isPointerType() ||
+ isRecordWithAttr<PointerAttr>(FD->getReturnType())) {
+ return llvm::StringSwitch<bool>(FD->getName())
+ .Cases("begin", "rbegin", "cbegin", "crbegin", true)
+ .Cases("end", "rend", "cend", "crend", true)
+ .Case("data", true)
+ .Default(false);
+ } else if (FD->getReturnType()->isReferenceType()) {
+ return llvm::StringSwitch<bool>(FD->getName())
+ .Cases("get", "any_cast", true)
+ .Default(false);
+ }
+ return false;
+}
+
+static void handleGslAnnotatedTypes(IndirectLocalPath &Path, Expr *Call,
+ LocalVisitor Visit) {
+ auto VisitPointerArg = [&](const Decl *D, Expr *Arg, bool Value) {
+ // We are not interested in the temporary base objects of gsl Pointers:
+ // Temp().ptr; // Here ptr might not dangle.
+ if (isa<MemberExpr>(Arg->IgnoreImpCasts()))
+ return;
+ // Once we initialized a value with a reference, it can no longer dangle.
+ if (!Value) {
+ for (const IndirectLocalPathEntry &PE : llvm::reverse(Path)) {
+ if (PE.Kind == IndirectLocalPathEntry::GslReferenceInit)
+ continue;
+ if (PE.Kind == IndirectLocalPathEntry::GslPointerInit ||
+ PE.Kind == IndirectLocalPathEntry::GslPointerAssignment)
+ return;
+ break;
+ }
+ }
+ Path.push_back({Value ? IndirectLocalPathEntry::GslPointerInit
+ : IndirectLocalPathEntry::GslReferenceInit,
+ Arg, D});
+ if (Arg->isGLValue())
+ visitLocalsRetainedByReferenceBinding(Path, Arg, RK_ReferenceBinding,
+ Visit,
+ /*EnableLifetimeWarnings=*/true);
+ else
+ visitLocalsRetainedByInitializer(Path, Arg, Visit, true,
+ /*EnableLifetimeWarnings=*/true);
+ Path.pop_back();
+ };
+
+ if (auto *MCE = dyn_cast<CXXMemberCallExpr>(Call)) {
+ const auto *MD = cast_or_null<CXXMethodDecl>(MCE->getDirectCallee());
+ if (MD && shouldTrackImplicitObjectArg(MD))
+ VisitPointerArg(MD, MCE->getImplicitObjectArgument(),
+ !MD->getReturnType()->isReferenceType());
+ return;
+ } else if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(Call)) {
+ FunctionDecl *Callee = OCE->getDirectCallee();
+ if (Callee && Callee->isCXXInstanceMember() &&
+ shouldTrackImplicitObjectArg(cast<CXXMethodDecl>(Callee)))
+ VisitPointerArg(Callee, OCE->getArg(0),
+ !Callee->getReturnType()->isReferenceType());
+ return;
+ } else if (auto *CE = dyn_cast<CallExpr>(Call)) {
+ FunctionDecl *Callee = CE->getDirectCallee();
+ if (Callee && shouldTrackFirstArgument(Callee))
+ VisitPointerArg(Callee, CE->getArg(0),
+ !Callee->getReturnType()->isReferenceType());
+ return;
+ }
+
+ if (auto *CCE = dyn_cast<CXXConstructExpr>(Call)) {
+ const auto *Ctor = CCE->getConstructor();
+ const CXXRecordDecl *RD = Ctor->getParent();
+ if (CCE->getNumArgs() > 0 && RD->hasAttr<PointerAttr>())
+ VisitPointerArg(Ctor->getParamDecl(0), CCE->getArgs()[0], true);
+ }
+}
+
+static bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD) {
+ const TypeSourceInfo *TSI = FD->getTypeSourceInfo();
+ if (!TSI)
+ return false;
+ // Don't declare this variable in the second operand of the for-statement;
+ // GCC miscompiles that by ending its lifetime before evaluating the
+ // third operand. See gcc.gnu.org/PR86769.
+ AttributedTypeLoc ATL;
+ for (TypeLoc TL = TSI->getTypeLoc();
+ (ATL = TL.getAsAdjusted<AttributedTypeLoc>());
+ TL = ATL.getModifiedLoc()) {
+ if (ATL.getAttrAs<LifetimeBoundAttr>())
+ return true;
+ }
+
+ // Assume that all assignment operators with a "normal" return type return
+ // *this, that is, an lvalue reference that is the same type as the implicit
+ // object parameter (or the LHS for a non-member operator$=).
+ OverloadedOperatorKind OO = FD->getDeclName().getCXXOverloadedOperator();
+ if (OO == OO_Equal || isCompoundAssignmentOperator(OO)) {
+ QualType RetT = FD->getReturnType();
+ if (RetT->isLValueReferenceType()) {
+ ASTContext &Ctx = FD->getASTContext();
+ QualType LHST;
+ auto *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD && MD->isCXXInstanceMember())
+ LHST = Ctx.getLValueReferenceType(MD->getFunctionObjectParameterType());
+ else
+ LHST = MD->getParamDecl(0)->getType();
+ if (Ctx.hasSameType(RetT, LHST))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void visitLifetimeBoundArguments(IndirectLocalPath &Path, Expr *Call,
+ LocalVisitor Visit) {
+ const FunctionDecl *Callee;
+ ArrayRef<Expr *> Args;
+
+ if (auto *CE = dyn_cast<CallExpr>(Call)) {
+ Callee = CE->getDirectCallee();
+ Args = llvm::ArrayRef(CE->getArgs(), CE->getNumArgs());
+ } else {
+ auto *CCE = cast<CXXConstructExpr>(Call);
+ Callee = CCE->getConstructor();
+ Args = llvm::ArrayRef(CCE->getArgs(), CCE->getNumArgs());
+ }
+ if (!Callee)
+ return;
+
+ Expr *ObjectArg = nullptr;
+ if (isa<CXXOperatorCallExpr>(Call) && Callee->isCXXInstanceMember()) {
+ ObjectArg = Args[0];
+ Args = Args.slice(1);
+ } else if (auto *MCE = dyn_cast<CXXMemberCallExpr>(Call)) {
+ ObjectArg = MCE->getImplicitObjectArgument();
+ }
+
+ auto VisitLifetimeBoundArg = [&](const Decl *D, Expr *Arg) {
+ Path.push_back({IndirectLocalPathEntry::LifetimeBoundCall, Arg, D});
+ if (Arg->isGLValue())
+ visitLocalsRetainedByReferenceBinding(Path, Arg, RK_ReferenceBinding,
+ Visit,
+ /*EnableLifetimeWarnings=*/false);
+ else
+ visitLocalsRetainedByInitializer(Path, Arg, Visit, true,
+ /*EnableLifetimeWarnings=*/false);
+ Path.pop_back();
+ };
+
+ bool CheckCoroCall = false;
+ if (const auto *RD = Callee->getReturnType()->getAsRecordDecl()) {
+ CheckCoroCall = RD->hasAttr<CoroLifetimeBoundAttr>() &&
+ RD->hasAttr<CoroReturnTypeAttr>() &&
+ !Callee->hasAttr<CoroDisableLifetimeBoundAttr>();
+ }
+
+ if (ObjectArg) {
+ bool CheckCoroObjArg = CheckCoroCall;
+ // Coroutine lambda objects with empty capture list are not lifetimebound.
+ if (auto *LE = dyn_cast<LambdaExpr>(ObjectArg->IgnoreImplicit());
+ LE && LE->captures().empty())
+ CheckCoroObjArg = false;
+ // Allow `get_return_object()` as the object param (__promise) is not
+ // lifetimebound.
+ if (Sema::CanBeGetReturnObject(Callee))
+ CheckCoroObjArg = false;
+ if (implicitObjectParamIsLifetimeBound(Callee) || CheckCoroObjArg)
+ VisitLifetimeBoundArg(Callee, ObjectArg);
+ }
+
+ for (unsigned I = 0,
+ N = std::min<unsigned>(Callee->getNumParams(), Args.size());
+ I != N; ++I) {
+ if (CheckCoroCall || Callee->getParamDecl(I)->hasAttr<LifetimeBoundAttr>())
+ VisitLifetimeBoundArg(Callee->getParamDecl(I), Args[I]);
+ }
+}
+
+/// Visit the locals that would be reachable through a reference bound to the
+/// glvalue expression \c Init.
+static void visitLocalsRetainedByReferenceBinding(IndirectLocalPath &Path,
+ Expr *Init, ReferenceKind RK,
+ LocalVisitor Visit,
+ bool EnableLifetimeWarnings) {
+ RevertToOldSizeRAII RAII(Path);
+
+ // Walk past any constructs which we can lifetime-extend across.
+ Expr *Old;
+ do {
+ Old = Init;
+
+ if (auto *FE = dyn_cast<FullExpr>(Init))
+ Init = FE->getSubExpr();
+
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ // If this is just redundant braces around an initializer, step over it.
+ if (ILE->isTransparent())
+ Init = ILE->getInit(0);
+ }
+
+ // Step over any subobject adjustments; we may have a materialized
+ // temporary inside them.
+ Init = const_cast<Expr *>(Init->skipRValueSubobjectAdjustments());
+
+ // Per current approach for DR1376, look through casts to reference type
+ // when performing lifetime extension.
+ if (CastExpr *CE = dyn_cast<CastExpr>(Init))
+ if (CE->getSubExpr()->isGLValue())
+ Init = CE->getSubExpr();
+
+ // Per the current approach for DR1299, look through array element access
+ // on array glvalues when performing lifetime extension.
+ if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Init)) {
+ Init = ASE->getBase();
+ auto *ICE = dyn_cast<ImplicitCastExpr>(Init);
+ if (ICE && ICE->getCastKind() == CK_ArrayToPointerDecay)
+ Init = ICE->getSubExpr();
+ else
+ // We can't lifetime extend through this but we might still find some
+ // retained temporaries.
+ return visitLocalsRetainedByInitializer(Path, Init, Visit, true,
+ EnableLifetimeWarnings);
+ }
+
+ // Step into CXXDefaultInitExprs so we can diagnose cases where a
+ // constructor inherits one as an implicit mem-initializer.
+ if (auto *DIE = dyn_cast<CXXDefaultInitExpr>(Init)) {
+ Path.push_back(
+ {IndirectLocalPathEntry::DefaultInit, DIE, DIE->getField()});
+ Init = DIE->getExpr();
+ }
+ } while (Init != Old);
+
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Init)) {
+ if (Visit(Path, Local(MTE), RK))
+ visitLocalsRetainedByInitializer(Path, MTE->getSubExpr(), Visit, true,
+ EnableLifetimeWarnings);
+ }
+
+ if (auto *M = dyn_cast<MemberExpr>(Init)) {
+ // Lifetime of a non-reference type field is same as base object.
+ if (auto *F = dyn_cast<FieldDecl>(M->getMemberDecl());
+ F && !F->getType()->isReferenceType())
+ visitLocalsRetainedByInitializer(Path, M->getBase(), Visit, true,
+ EnableLifetimeWarnings);
+ }
+
+ if (isa<CallExpr>(Init)) {
+ if (EnableLifetimeWarnings)
+ handleGslAnnotatedTypes(Path, Init, Visit);
+ return visitLifetimeBoundArguments(Path, Init, Visit);
+ }
+
+ switch (Init->getStmtClass()) {
+ case Stmt::DeclRefExprClass: {
+ // If we find the name of a local non-reference parameter, we could have a
+ // lifetime problem.
+ auto *DRE = cast<DeclRefExpr>(Init);
+ auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (VD && VD->hasLocalStorage() &&
+ !DRE->refersToEnclosingVariableOrCapture()) {
+ if (!VD->getType()->isReferenceType()) {
+ Visit(Path, Local(DRE), RK);
+ } else if (isa<ParmVarDecl>(DRE->getDecl())) {
+ // The lifetime of a reference parameter is unknown; assume it's OK
+ // for now.
+ break;
+ } else if (VD->getInit() && !isVarOnPath(Path, VD)) {
+ Path.push_back({IndirectLocalPathEntry::VarInit, DRE, VD});
+ visitLocalsRetainedByReferenceBinding(Path, VD->getInit(),
+ RK_ReferenceBinding, Visit,
+ EnableLifetimeWarnings);
+ }
+ }
+ break;
+ }
+
+ case Stmt::UnaryOperatorClass: {
+ // The only unary operator that make sense to handle here
+ // is Deref. All others don't resolve to a "name." This includes
+ // handling all sorts of rvalues passed to a unary operator.
+ const UnaryOperator *U = cast<UnaryOperator>(Init);
+ if (U->getOpcode() == UO_Deref)
+ visitLocalsRetainedByInitializer(Path, U->getSubExpr(), Visit, true,
+ EnableLifetimeWarnings);
+ break;
+ }
+
+ case Stmt::ArraySectionExprClass: {
+ visitLocalsRetainedByInitializer(Path,
+ cast<ArraySectionExpr>(Init)->getBase(),
+ Visit, true, EnableLifetimeWarnings);
+ break;
+ }
+
+ case Stmt::ConditionalOperatorClass:
+ case Stmt::BinaryConditionalOperatorClass: {
+ auto *C = cast<AbstractConditionalOperator>(Init);
+ if (!C->getTrueExpr()->getType()->isVoidType())
+ visitLocalsRetainedByReferenceBinding(Path, C->getTrueExpr(), RK, Visit,
+ EnableLifetimeWarnings);
+ if (!C->getFalseExpr()->getType()->isVoidType())
+ visitLocalsRetainedByReferenceBinding(Path, C->getFalseExpr(), RK, Visit,
+ EnableLifetimeWarnings);
+ break;
+ }
+
+ case Stmt::CompoundLiteralExprClass: {
+ if (auto *CLE = dyn_cast<CompoundLiteralExpr>(Init)) {
+ if (!CLE->isFileScope())
+ Visit(Path, Local(CLE), RK);
+ }
+ break;
+ }
+
+ // FIXME: Visit the left-hand side of an -> or ->*.
+
+ default:
+ break;
+ }
+}
+
+/// Visit the locals that would be reachable through an object initialized by
+/// the prvalue expression \c Init.
+static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
+ Expr *Init, LocalVisitor Visit,
+ bool RevisitSubinits,
+ bool EnableLifetimeWarnings) {
+ RevertToOldSizeRAII RAII(Path);
+
+ Expr *Old;
+ do {
+ Old = Init;
+
+ // Step into CXXDefaultInitExprs so we can diagnose cases where a
+ // constructor inherits one as an implicit mem-initializer.
+ if (auto *DIE = dyn_cast<CXXDefaultInitExpr>(Init)) {
+ Path.push_back(
+ {IndirectLocalPathEntry::DefaultInit, DIE, DIE->getField()});
+ Init = DIE->getExpr();
+ }
+
+ if (auto *FE = dyn_cast<FullExpr>(Init))
+ Init = FE->getSubExpr();
+
+ // Dig out the expression which constructs the extended temporary.
+ Init = const_cast<Expr *>(Init->skipRValueSubobjectAdjustments());
+
+ if (CXXBindTemporaryExpr *BTE = dyn_cast<CXXBindTemporaryExpr>(Init))
+ Init = BTE->getSubExpr();
+
+ Init = Init->IgnoreParens();
+
+ // Step over value-preserving rvalue casts.
+ if (auto *CE = dyn_cast<CastExpr>(Init)) {
+ switch (CE->getCastKind()) {
+ case CK_LValueToRValue:
+ // If we can match the lvalue to a const object, we can look at its
+ // initializer.
+ Path.push_back({IndirectLocalPathEntry::LValToRVal, CE});
+ return visitLocalsRetainedByReferenceBinding(
+ Path, Init, RK_ReferenceBinding,
+ [&](IndirectLocalPath &Path, Local L, ReferenceKind RK) -> bool {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(L)) {
+ auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (VD && VD->getType().isConstQualified() && VD->getInit() &&
+ !isVarOnPath(Path, VD)) {
+ Path.push_back({IndirectLocalPathEntry::VarInit, DRE, VD});
+ visitLocalsRetainedByInitializer(
+ Path, VD->getInit(), Visit, true, EnableLifetimeWarnings);
+ }
+ } else if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(L)) {
+ if (MTE->getType().isConstQualified())
+ visitLocalsRetainedByInitializer(Path, MTE->getSubExpr(),
+ Visit, true,
+ EnableLifetimeWarnings);
+ }
+ return false;
+ },
+ EnableLifetimeWarnings);
+
+ // We assume that objects can be retained by pointers cast to integers,
+ // but not if the integer is cast to floating-point type or to _Complex.
+ // We assume that casts to 'bool' do not preserve enough information to
+ // retain a local object.
+ case CK_NoOp:
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_AddressSpaceConversion:
+ break;
+
+ case CK_ArrayToPointerDecay:
+ // Model array-to-pointer decay as taking the address of the array
+ // lvalue.
+ Path.push_back({IndirectLocalPathEntry::AddressOf, CE});
+ return visitLocalsRetainedByReferenceBinding(Path, CE->getSubExpr(),
+ RK_ReferenceBinding, Visit,
+ EnableLifetimeWarnings);
+
+ default:
+ return;
+ }
+
+ Init = CE->getSubExpr();
+ }
+ } while (Old != Init);
+
+ // C++17 [dcl.init.list]p6:
+ // initializing an initializer_list object from the array extends the
+ // lifetime of the array exactly like binding a reference to a temporary.
+ if (auto *ILE = dyn_cast<CXXStdInitializerListExpr>(Init))
+ return visitLocalsRetainedByReferenceBinding(Path, ILE->getSubExpr(),
+ RK_StdInitializerList, Visit,
+ EnableLifetimeWarnings);
+
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ // We already visited the elements of this initializer list while
+ // performing the initialization. Don't visit them again unless we've
+ // changed the lifetime of the initialized entity.
+ if (!RevisitSubinits)
+ return;
+
+ if (ILE->isTransparent())
+ return visitLocalsRetainedByInitializer(Path, ILE->getInit(0), Visit,
+ RevisitSubinits,
+ EnableLifetimeWarnings);
+
+ if (ILE->getType()->isArrayType()) {
+ for (unsigned I = 0, N = ILE->getNumInits(); I != N; ++I)
+ visitLocalsRetainedByInitializer(Path, ILE->getInit(I), Visit,
+ RevisitSubinits,
+ EnableLifetimeWarnings);
+ return;
+ }
+
+ if (CXXRecordDecl *RD = ILE->getType()->getAsCXXRecordDecl()) {
+ assert(RD->isAggregate() && "aggregate init on non-aggregate");
+
+ // If we lifetime-extend a braced initializer which is initializing an
+ // aggregate, and that aggregate contains reference members which are
+ // bound to temporaries, those temporaries are also lifetime-extended.
+ if (RD->isUnion() && ILE->getInitializedFieldInUnion() &&
+ ILE->getInitializedFieldInUnion()->getType()->isReferenceType())
+ visitLocalsRetainedByReferenceBinding(Path, ILE->getInit(0),
+ RK_ReferenceBinding, Visit,
+ EnableLifetimeWarnings);
+ else {
+ unsigned Index = 0;
+ for (; Index < RD->getNumBases() && Index < ILE->getNumInits(); ++Index)
+ visitLocalsRetainedByInitializer(Path, ILE->getInit(Index), Visit,
+ RevisitSubinits,
+ EnableLifetimeWarnings);
+ for (const auto *I : RD->fields()) {
+ if (Index >= ILE->getNumInits())
+ break;
+ if (I->isUnnamedBitField())
+ continue;
+ Expr *SubInit = ILE->getInit(Index);
+ if (I->getType()->isReferenceType())
+ visitLocalsRetainedByReferenceBinding(Path, SubInit,
+ RK_ReferenceBinding, Visit,
+ EnableLifetimeWarnings);
+ else
+ // This might be either aggregate-initialization of a member or
+ // initialization of a std::initializer_list object. Regardless,
+ // we should recursively lifetime-extend that initializer.
+ visitLocalsRetainedByInitializer(
+ Path, SubInit, Visit, RevisitSubinits, EnableLifetimeWarnings);
+ ++Index;
+ }
+ }
+ }
+ return;
+ }
+
+ // The lifetime of an init-capture is that of the closure object constructed
+ // by a lambda-expression.
+ if (auto *LE = dyn_cast<LambdaExpr>(Init)) {
+ LambdaExpr::capture_iterator CapI = LE->capture_begin();
+ for (Expr *E : LE->capture_inits()) {
+ assert(CapI != LE->capture_end());
+ const LambdaCapture &Cap = *CapI++;
+ if (!E)
+ continue;
+ if (Cap.capturesVariable())
+ Path.push_back({IndirectLocalPathEntry::LambdaCaptureInit, E, &Cap});
+ if (E->isGLValue())
+ visitLocalsRetainedByReferenceBinding(Path, E, RK_ReferenceBinding,
+ Visit, EnableLifetimeWarnings);
+ else
+ visitLocalsRetainedByInitializer(Path, E, Visit, true,
+ EnableLifetimeWarnings);
+ if (Cap.capturesVariable())
+ Path.pop_back();
+ }
+ }
+
+ // Assume that a copy or move from a temporary references the same objects
+ // that the temporary does.
+ if (auto *CCE = dyn_cast<CXXConstructExpr>(Init)) {
+ if (CCE->getConstructor()->isCopyOrMoveConstructor()) {
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(CCE->getArg(0))) {
+ // assert(false && "hit temporary copy path");
+ Expr *Arg = MTE->getSubExpr();
+ Path.push_back({IndirectLocalPathEntry::TemporaryCopy, Arg,
+ CCE->getConstructor()});
+ visitLocalsRetainedByInitializer(Path, Arg, Visit, true,
+ /*EnableLifetimeWarnings*/ false);
+ Path.pop_back();
+ }
+ }
+ }
+
+ if (isa<CallExpr>(Init) || isa<CXXConstructExpr>(Init)) {
+ if (EnableLifetimeWarnings)
+ handleGslAnnotatedTypes(Path, Init, Visit);
+ return visitLifetimeBoundArguments(Path, Init, Visit);
+ }
+
+ switch (Init->getStmtClass()) {
+ case Stmt::UnaryOperatorClass: {
+ auto *UO = cast<UnaryOperator>(Init);
+ // If the initializer is the address of a local, we could have a lifetime
+ // problem.
+ if (UO->getOpcode() == UO_AddrOf) {
+ // If this is &rvalue, then it's ill-formed and we have already diagnosed
+ // it. Don't produce a redundant warning about the lifetime of the
+ // temporary.
+ if (isa<MaterializeTemporaryExpr>(UO->getSubExpr()))
+ return;
+
+ Path.push_back({IndirectLocalPathEntry::AddressOf, UO});
+ visitLocalsRetainedByReferenceBinding(Path, UO->getSubExpr(),
+ RK_ReferenceBinding, Visit,
+ EnableLifetimeWarnings);
+ }
+ break;
+ }
+
+ case Stmt::BinaryOperatorClass: {
+ // Handle pointer arithmetic.
+ auto *BO = cast<BinaryOperator>(Init);
+ BinaryOperatorKind BOK = BO->getOpcode();
+ if (!BO->getType()->isPointerType() || (BOK != BO_Add && BOK != BO_Sub))
+ break;
+
+ if (BO->getLHS()->getType()->isPointerType())
+ visitLocalsRetainedByInitializer(Path, BO->getLHS(), Visit, true,
+ EnableLifetimeWarnings);
+ else if (BO->getRHS()->getType()->isPointerType())
+ visitLocalsRetainedByInitializer(Path, BO->getRHS(), Visit, true,
+ EnableLifetimeWarnings);
+ break;
+ }
+
+ case Stmt::ConditionalOperatorClass:
+ case Stmt::BinaryConditionalOperatorClass: {
+ auto *C = cast<AbstractConditionalOperator>(Init);
+ // In C++, we can have a throw-expression operand, which has 'void' type
+ // and isn't interesting from a lifetime perspective.
+ if (!C->getTrueExpr()->getType()->isVoidType())
+ visitLocalsRetainedByInitializer(Path, C->getTrueExpr(), Visit, true,
+ EnableLifetimeWarnings);
+ if (!C->getFalseExpr()->getType()->isVoidType())
+ visitLocalsRetainedByInitializer(Path, C->getFalseExpr(), Visit, true,
+ EnableLifetimeWarnings);
+ break;
+ }
+
+ case Stmt::BlockExprClass:
+ if (cast<BlockExpr>(Init)->getBlockDecl()->hasCaptures()) {
+ // This is a local block, whose lifetime is that of the function.
+ Visit(Path, Local(cast<BlockExpr>(Init)), RK_ReferenceBinding);
+ }
+ break;
+
+ case Stmt::AddrLabelExprClass:
+ // We want to warn if the address of a label would escape the function.
+ Visit(Path, Local(cast<AddrLabelExpr>(Init)), RK_ReferenceBinding);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/// Whether a path to an object supports lifetime extension.
+enum PathLifetimeKind {
+ /// Lifetime-extend along this path.
+ Extend,
+ /// We should lifetime-extend, but we don't because (due to technical
+ /// limitations) we can't. This happens for default member initializers,
+ /// which we don't clone for every use, so we don't have a unique
+ /// MaterializeTemporaryExpr to update.
+ ShouldExtend,
+ /// Do not lifetime extend along this path.
+ NoExtend
+};
+
+/// Determine whether this is an indirect path to a temporary that we are
+/// supposed to lifetime-extend along.
+static PathLifetimeKind
+shouldLifetimeExtendThroughPath(const IndirectLocalPath &Path) {
+ PathLifetimeKind Kind = PathLifetimeKind::Extend;
+ for (auto Elem : Path) {
+ if (Elem.Kind == IndirectLocalPathEntry::DefaultInit)
+ Kind = PathLifetimeKind::ShouldExtend;
+ else if (Elem.Kind != IndirectLocalPathEntry::LambdaCaptureInit)
+ return PathLifetimeKind::NoExtend;
+ }
+ return Kind;
+}
+
+/// Find the range for the first interesting entry in the path at or after I.
+static SourceRange nextPathEntryRange(const IndirectLocalPath &Path, unsigned I,
+ Expr *E) {
+ for (unsigned N = Path.size(); I != N; ++I) {
+ switch (Path[I].Kind) {
+ case IndirectLocalPathEntry::AddressOf:
+ case IndirectLocalPathEntry::LValToRVal:
+ case IndirectLocalPathEntry::LifetimeBoundCall:
+ case IndirectLocalPathEntry::TemporaryCopy:
+ case IndirectLocalPathEntry::GslReferenceInit:
+ case IndirectLocalPathEntry::GslPointerInit:
+ case IndirectLocalPathEntry::GslPointerAssignment:
+ // These exist primarily to mark the path as not permitting or
+ // supporting lifetime extension.
+ break;
+
+ case IndirectLocalPathEntry::VarInit:
+ if (cast<VarDecl>(Path[I].D)->isImplicit())
+ return SourceRange();
+ [[fallthrough]];
+ case IndirectLocalPathEntry::DefaultInit:
+ return Path[I].E->getSourceRange();
+
+ case IndirectLocalPathEntry::LambdaCaptureInit:
+ if (!Path[I].Capture->capturesVariable())
+ continue;
+ return Path[I].E->getSourceRange();
+ }
+ }
+ return E->getSourceRange();
+}
+
+static bool pathOnlyHandlesGslPointer(IndirectLocalPath &Path) {
+ for (const auto &It : llvm::reverse(Path)) {
+ switch (It.Kind) {
+ case IndirectLocalPathEntry::VarInit:
+ case IndirectLocalPathEntry::AddressOf:
+ case IndirectLocalPathEntry::LifetimeBoundCall:
+ continue;
+ case IndirectLocalPathEntry::GslPointerInit:
+ case IndirectLocalPathEntry::GslReferenceInit:
+ case IndirectLocalPathEntry::GslPointerAssignment:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+static void checkExprLifetimeImpl(Sema &SemaRef,
+ const InitializedEntity *InitEntity,
+ const InitializedEntity *ExtendingEntity,
+ LifetimeKind LK,
+ const AssignedEntity *AEntity, Expr *Init,
+ bool EnableLifetimeWarnings) {
+ assert((AEntity && LK == LK_Assignment) ||
+ (InitEntity && LK != LK_Assignment));
+ // If this entity doesn't have an interesting lifetime, don't bother looking
+ // for temporaries within its initializer.
+ if (LK == LK_FullExpression)
+ return;
+
+ // FIXME: consider moving the TemporaryVisitor and visitLocalsRetained*
+ // functions to a dedicated class.
+ auto TemporaryVisitor = [&](IndirectLocalPath &Path, Local L,
+ ReferenceKind RK) -> bool {
+ SourceRange DiagRange = nextPathEntryRange(Path, 0, L);
+ SourceLocation DiagLoc = DiagRange.getBegin();
+
+ auto *MTE = dyn_cast<MaterializeTemporaryExpr>(L);
+
+ bool IsGslPtrValueFromGslTempOwner = false;
+ bool IsLocalGslOwner = false;
+ if (pathOnlyHandlesGslPointer(Path)) {
+ if (isa<DeclRefExpr>(L)) {
+ // We do not want to follow the references when returning a pointer
+ // originating from a local owner to avoid the following false positive:
+ // int &p = *localUniquePtr;
+ // someContainer.add(std::move(localUniquePtr));
+ // return p;
+ IsLocalGslOwner = isRecordWithAttr<OwnerAttr>(L->getType());
+ if (pathContainsInit(Path) || !IsLocalGslOwner)
+ return false;
+ } else {
+ IsGslPtrValueFromGslTempOwner =
+ MTE && !MTE->getExtendingDecl() &&
+ isRecordWithAttr<OwnerAttr>(MTE->getType());
+ // Skipping a chain of initializing gsl::Pointer annotated objects.
+ // We are looking only for the final source to find out if it was
+ // a local or temporary owner or the address of a local variable/param.
+ if (!IsGslPtrValueFromGslTempOwner)
+ return true;
+ }
+ }
+
+ switch (LK) {
+ case LK_FullExpression:
+ llvm_unreachable("already handled this");
+
+ case LK_Extended: {
+ if (!MTE) {
+ // The initialized entity has lifetime beyond the full-expression,
+ // and the local entity does too, so don't warn.
+ //
+ // FIXME: We should consider warning if a static / thread storage
+ // duration variable retains an automatic storage duration local.
+ return false;
+ }
+
+ if (IsGslPtrValueFromGslTempOwner && DiagLoc.isValid()) {
+ SemaRef.Diag(DiagLoc, diag::warn_dangling_lifetime_pointer)
+ << DiagRange;
+ return false;
+ }
+
+ switch (shouldLifetimeExtendThroughPath(Path)) {
+ case PathLifetimeKind::Extend:
+ // Update the storage duration of the materialized temporary.
+ // FIXME: Rebuild the expression instead of mutating it.
+ MTE->setExtendingDecl(ExtendingEntity->getDecl(),
+ ExtendingEntity->allocateManglingNumber());
+ // Also visit the temporaries lifetime-extended by this initializer.
+ return true;
+
+ case PathLifetimeKind::ShouldExtend:
+ // We're supposed to lifetime-extend the temporary along this path (per
+ // the resolution of DR1815), but we don't support that yet.
+ //
+ // FIXME: Properly handle this situation. Perhaps the easiest approach
+ // would be to clone the initializer expression on each use that would
+ // lifetime extend its temporaries.
+ SemaRef.Diag(DiagLoc, diag::warn_unsupported_lifetime_extension)
+ << RK << DiagRange;
+ break;
+
+ case PathLifetimeKind::NoExtend:
+ // If the path goes through the initialization of a variable or field,
+ // it can't possibly reach a temporary created in this full-expression.
+ // We will have already diagnosed any problems with the initializer.
+ if (pathContainsInit(Path))
+ return false;
+
+ SemaRef.Diag(DiagLoc, diag::warn_dangling_variable)
+ << RK << !InitEntity->getParent()
+ << ExtendingEntity->getDecl()->isImplicit()
+ << ExtendingEntity->getDecl() << Init->isGLValue() << DiagRange;
+ break;
+ }
+ break;
+ }
+
+ case LK_Assignment: {
+ if (!MTE || pathContainsInit(Path))
+ return false;
+ assert(shouldLifetimeExtendThroughPath(Path) ==
+ PathLifetimeKind::NoExtend &&
+ "No lifetime extension for assignments");
+ SemaRef.Diag(DiagLoc,
+ IsGslPtrValueFromGslTempOwner
+ ? diag::warn_dangling_lifetime_pointer_assignment
+ : diag::warn_dangling_pointer_assignment)
+ << AEntity->LHS << DiagRange;
+ return false;
+ }
+ case LK_MemInitializer: {
+ if (MTE) {
+ // Under C++ DR1696, if a mem-initializer (or a default member
+ // initializer used by the absence of one) would lifetime-extend a
+ // temporary, the program is ill-formed.
+ if (auto *ExtendingDecl =
+ ExtendingEntity ? ExtendingEntity->getDecl() : nullptr) {
+ if (IsGslPtrValueFromGslTempOwner) {
+ SemaRef.Diag(DiagLoc, diag::warn_dangling_lifetime_pointer_member)
+ << ExtendingDecl << DiagRange;
+ SemaRef.Diag(ExtendingDecl->getLocation(),
+ diag::note_ref_or_ptr_member_declared_here)
+ << true;
+ return false;
+ }
+ bool IsSubobjectMember = ExtendingEntity != InitEntity;
+ SemaRef.Diag(DiagLoc, shouldLifetimeExtendThroughPath(Path) !=
+ PathLifetimeKind::NoExtend
+ ? diag::err_dangling_member
+ : diag::warn_dangling_member)
+ << ExtendingDecl << IsSubobjectMember << RK << DiagRange;
+ // Don't bother adding a note pointing to the field if we're inside
+ // its default member initializer; our primary diagnostic points to
+ // the same place in that case.
+ if (Path.empty() ||
+ Path.back().Kind != IndirectLocalPathEntry::DefaultInit) {
+ SemaRef.Diag(ExtendingDecl->getLocation(),
+ diag::note_lifetime_extending_member_declared_here)
+ << RK << IsSubobjectMember;
+ }
+ } else {
+ // We have a mem-initializer but no particular field within it; this
+ // is either a base class or a delegating initializer directly
+ // initializing the base-class from something that doesn't live long
+ // enough.
+ //
+ // FIXME: Warn on this.
+ return false;
+ }
+ } else {
+ // Paths via a default initializer can only occur during error recovery
+ // (there's no other way that a default initializer can refer to a
+ // local). Don't produce a bogus warning on those cases.
+ if (pathContainsInit(Path))
+ return false;
+
+ // Suppress false positives for code like the one below:
+ // Ctor(unique_ptr<T> up) : member(*up), member2(move(up)) {}
+ if (IsLocalGslOwner && pathOnlyHandlesGslPointer(Path))
+ return false;
+
+ auto *DRE = dyn_cast<DeclRefExpr>(L);
+ auto *VD = DRE ? dyn_cast<VarDecl>(DRE->getDecl()) : nullptr;
+ if (!VD) {
+ // A member was initialized to a local block.
+ // FIXME: Warn on this.
+ return false;
+ }
+
+ if (auto *Member =
+ ExtendingEntity ? ExtendingEntity->getDecl() : nullptr) {
+ bool IsPointer = !Member->getType()->isReferenceType();
+ SemaRef.Diag(DiagLoc,
+ IsPointer ? diag::warn_init_ptr_member_to_parameter_addr
+ : diag::warn_bind_ref_member_to_parameter)
+ << Member << VD << isa<ParmVarDecl>(VD) << DiagRange;
+ SemaRef.Diag(Member->getLocation(),
+ diag::note_ref_or_ptr_member_declared_here)
+ << (unsigned)IsPointer;
+ }
+ }
+ break;
+ }
+
+ case LK_New:
+ if (isa<MaterializeTemporaryExpr>(L)) {
+ if (IsGslPtrValueFromGslTempOwner)
+ SemaRef.Diag(DiagLoc, diag::warn_dangling_lifetime_pointer)
+ << DiagRange;
+ else
+ SemaRef.Diag(DiagLoc, RK == RK_ReferenceBinding
+ ? diag::warn_new_dangling_reference
+ : diag::warn_new_dangling_initializer_list)
+ << !InitEntity->getParent() << DiagRange;
+ } else {
+ // We can't determine if the allocation outlives the local declaration.
+ return false;
+ }
+ break;
+
+ case LK_Return:
+ case LK_StmtExprResult:
+ if (auto *DRE = dyn_cast<DeclRefExpr>(L)) {
+ // We can't determine if the local variable outlives the statement
+ // expression.
+ if (LK == LK_StmtExprResult)
+ return false;
+ SemaRef.Diag(DiagLoc, diag::warn_ret_stack_addr_ref)
+ << InitEntity->getType()->isReferenceType() << DRE->getDecl()
+ << isa<ParmVarDecl>(DRE->getDecl()) << DiagRange;
+ } else if (isa<BlockExpr>(L)) {
+ SemaRef.Diag(DiagLoc, diag::err_ret_local_block) << DiagRange;
+ } else if (isa<AddrLabelExpr>(L)) {
+ // Don't warn when returning a label from a statement expression.
+ // Leaving the scope doesn't end its lifetime.
+ if (LK == LK_StmtExprResult)
+ return false;
+ SemaRef.Diag(DiagLoc, diag::warn_ret_addr_label) << DiagRange;
+ } else if (auto *CLE = dyn_cast<CompoundLiteralExpr>(L)) {
+ SemaRef.Diag(DiagLoc, diag::warn_ret_stack_addr_ref)
+ << InitEntity->getType()->isReferenceType() << CLE->getInitializer()
+ << 2 << DiagRange;
+ } else {
+ // P2748R5: Disallow Binding a Returned Glvalue to a Temporary.
+ // [stmt.return]/p6: In a function whose return type is a reference,
+ // other than an invented function for std::is_convertible ([meta.rel]),
+ // a return statement that binds the returned reference to a temporary
+ // expression ([class.temporary]) is ill-formed.
+ if (SemaRef.getLangOpts().CPlusPlus26 &&
+ InitEntity->getType()->isReferenceType())
+ SemaRef.Diag(DiagLoc, diag::err_ret_local_temp_ref)
+ << InitEntity->getType()->isReferenceType() << DiagRange;
+ else
+ SemaRef.Diag(DiagLoc, diag::warn_ret_local_temp_addr_ref)
+ << InitEntity->getType()->isReferenceType() << DiagRange;
+ }
+ break;
+ }
+
+ for (unsigned I = 0; I != Path.size(); ++I) {
+ auto Elem = Path[I];
+
+ switch (Elem.Kind) {
+ case IndirectLocalPathEntry::AddressOf:
+ case IndirectLocalPathEntry::LValToRVal:
+ // These exist primarily to mark the path as not permitting or
+ // supporting lifetime extension.
+ break;
+
+ case IndirectLocalPathEntry::LifetimeBoundCall:
+ case IndirectLocalPathEntry::TemporaryCopy:
+ case IndirectLocalPathEntry::GslPointerInit:
+ case IndirectLocalPathEntry::GslReferenceInit:
+ case IndirectLocalPathEntry::GslPointerAssignment:
+ // FIXME: Consider adding a note for these.
+ break;
+
+ case IndirectLocalPathEntry::DefaultInit: {
+ auto *FD = cast<FieldDecl>(Elem.D);
+ SemaRef.Diag(FD->getLocation(),
+ diag::note_init_with_default_member_initializer)
+ << FD << nextPathEntryRange(Path, I + 1, L);
+ break;
+ }
+
+ case IndirectLocalPathEntry::VarInit: {
+ const VarDecl *VD = cast<VarDecl>(Elem.D);
+ SemaRef.Diag(VD->getLocation(), diag::note_local_var_initializer)
+ << VD->getType()->isReferenceType() << VD->isImplicit()
+ << VD->getDeclName() << nextPathEntryRange(Path, I + 1, L);
+ break;
+ }
+
+ case IndirectLocalPathEntry::LambdaCaptureInit:
+ if (!Elem.Capture->capturesVariable())
+ break;
+ // FIXME: We can't easily tell apart an init-capture from a nested
+ // capture of an init-capture.
+ const ValueDecl *VD = Elem.Capture->getCapturedVar();
+ SemaRef.Diag(Elem.Capture->getLocation(),
+ diag::note_lambda_capture_initializer)
+ << VD << VD->isInitCapture() << Elem.Capture->isExplicit()
+ << (Elem.Capture->getCaptureKind() == LCK_ByRef) << VD
+ << nextPathEntryRange(Path, I + 1, L);
+ break;
+ }
+ }
+
+ // We didn't lifetime-extend, so don't go any further; we don't need more
+ // warnings or errors on inner temporaries within this one's initializer.
+ return false;
+ };
+
+ llvm::SmallVector<IndirectLocalPathEntry, 8> Path;
+ if (EnableLifetimeWarnings && LK == LK_Assignment &&
+ isRecordWithAttr<PointerAttr>(AEntity->LHS->getType()))
+ Path.push_back({IndirectLocalPathEntry::GslPointerAssignment, Init});
+
+ if (Init->isGLValue())
+ visitLocalsRetainedByReferenceBinding(Path, Init, RK_ReferenceBinding,
+ TemporaryVisitor,
+ EnableLifetimeWarnings);
+ else
+ visitLocalsRetainedByInitializer(
+ Path, Init, TemporaryVisitor,
+ // Don't revisit the sub inits for the intialization case.
+ /*RevisitSubinits=*/!InitEntity, EnableLifetimeWarnings);
+}
+
+void checkExprLifetime(Sema &SemaRef, const InitializedEntity &Entity,
+ Expr *Init) {
+ auto LTResult = getEntityLifetime(&Entity);
+ LifetimeKind LK = LTResult.getInt();
+ const InitializedEntity *ExtendingEntity = LTResult.getPointer();
+ bool EnableLifetimeWarnings = !SemaRef.getDiagnostics().isIgnored(
+ diag::warn_dangling_lifetime_pointer, SourceLocation());
+ checkExprLifetimeImpl(SemaRef, &Entity, ExtendingEntity, LK,
+ /*AEntity*/ nullptr, Init, EnableLifetimeWarnings);
+}
+
+void checkExprLifetime(Sema &SemaRef, const AssignedEntity &Entity,
+ Expr *Init) {
+ bool EnableLifetimeWarnings = !SemaRef.getDiagnostics().isIgnored(
+ diag::warn_dangling_lifetime_pointer, SourceLocation());
+ bool RunAnalysis = Entity.LHS->getType()->isPointerType() ||
+ (EnableLifetimeWarnings &&
+ isRecordWithAttr<PointerAttr>(Entity.LHS->getType()));
+
+ if (!RunAnalysis)
+ return;
+
+ checkExprLifetimeImpl(SemaRef, /*InitEntity=*/nullptr,
+ /*ExtendingEntity=*/nullptr, LK_Assignment, &Entity,
+ Init, EnableLifetimeWarnings);
+}
+
+} // namespace clang::sema
diff --git a/contrib/llvm-project/clang/lib/Sema/CheckExprLifetime.h b/contrib/llvm-project/clang/lib/Sema/CheckExprLifetime.h
new file mode 100644
index 000000000000..af381fb96c4d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/CheckExprLifetime.h
@@ -0,0 +1,39 @@
+//===- CheckExprLifetime.h ----------------------------------- -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//===----------------------------------------------------------------------===//
+//
+// This files implements a statement-local lifetime analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_CHECK_EXPR_LIFETIME_H
+#define LLVM_CLANG_SEMA_CHECK_EXPR_LIFETIME_H
+
+#include "clang/AST/Expr.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang::sema {
+
+/// Describes an entity that is being assigned.
+struct AssignedEntity {
+ // The left-hand side expression of the assignment.
+ Expr *LHS = nullptr;
+};
+
+/// Check that the lifetime of the given expr (and its subobjects) is
+/// sufficient for initializing the entity, and perform lifetime extension
+/// (when permitted) if not.
+void checkExprLifetime(Sema &SemaRef, const InitializedEntity &Entity,
+ Expr *Init);
+
+/// Check that the lifetime of the given expr (and its subobjects) is
+/// sufficient for assigning to the entity.
+void checkExprLifetime(Sema &SemaRef, const AssignedEntity &Entity, Expr *Init);
+
+} // namespace clang::sema
+
+#endif // LLVM_CLANG_SEMA_CHECK_EXPR_LIFETIME_H
diff --git a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
index 350bd78b5710..91713d71786e 100644
--- a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -854,7 +854,8 @@ StringRef CodeCompletionResult::getOrderedName(std::string &Saved) const {
if (IdentifierInfo *Id = Name.getAsIdentifierInfo())
return Id->getName();
if (Name.isObjCZeroArgSelector())
- if (IdentifierInfo *Id = Name.getObjCSelector().getIdentifierInfoForSlot(0))
+ if (const IdentifierInfo *Id =
+ Name.getObjCSelector().getIdentifierInfoForSlot(0))
return Id->getName();
Saved = Name.getAsString();
diff --git a/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp b/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
index 781f24cb71ae..9a4d52d4b6b7 100644
--- a/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
@@ -293,7 +293,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
void Declarator::setDecompositionBindings(
SourceLocation LSquareLoc,
- ArrayRef<DecompositionDeclarator::Binding> Bindings,
+ MutableArrayRef<DecompositionDeclarator::Binding> Bindings,
SourceLocation RSquareLoc) {
assert(!hasName() && "declarator given multiple names!");
@@ -317,7 +317,7 @@ void Declarator::setDecompositionBindings(
new DecompositionDeclarator::Binding[Bindings.size()];
BindingGroup.DeleteBindings = true;
}
- std::uninitialized_copy(Bindings.begin(), Bindings.end(),
+ std::uninitialized_move(Bindings.begin(), Bindings.end(),
BindingGroup.Bindings);
}
}
@@ -374,6 +374,7 @@ bool Declarator::isDeclarationOfFunction() const {
case TST_void:
case TST_wchar:
case TST_BFloat16:
+ case TST_typename_pack_indexing:
#define GENERIC_IMAGE_TYPE(ImgType, Id) case TST_##ImgType##_t:
#include "clang/Basic/OpenCLImageTypes.def"
return false;
@@ -415,6 +416,7 @@ bool Declarator::isDeclarationOfFunction() const {
bool Declarator::isStaticMember() {
assert(getContext() == DeclaratorContext::Member);
return getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
+ (!isDeclarationOfFunction() && !getTemplateParameterLists().empty()) ||
(getName().getKind() == UnqualifiedIdKind::IK_OperatorFunctionId &&
CXXMethodDecl::isStaticOverloadedOperator(
getName().OperatorFunctionId.Operator));
@@ -585,6 +587,8 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_struct: return "struct";
case DeclSpec::TST_interface: return "__interface";
case DeclSpec::TST_typename: return "type-name";
+ case DeclSpec::TST_typename_pack_indexing:
+ return "type-name-pack-indexing";
case DeclSpec::TST_typeofType:
case DeclSpec::TST_typeofExpr: return "typeof";
case DeclSpec::TST_typeof_unqualType:
@@ -775,6 +779,15 @@ bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc,
TSTLoc = TagKwLoc;
TSTNameLoc = TagNameLoc;
TypeSpecOwned = false;
+
+ if (T == TST_typename_pack_indexing) {
+ // we got there from a an annotation. Reconstruct the type
+ // Ugly...
+ QualType QT = Rep.get();
+ const PackIndexingType *LIT = cast<PackIndexingType>(QT);
+ TypeRep = ParsedType::make(LIT->getPattern());
+ PackIndexingExpr = LIT->getIndexExpr();
+ }
return false;
}
@@ -973,6 +986,15 @@ bool DeclSpec::SetBitIntType(SourceLocation KWLoc, Expr *BitsExpr,
return false;
}
+void DeclSpec::SetPackIndexingExpr(SourceLocation EllipsisLoc,
+ Expr *IndexingExpr) {
+ assert(TypeSpecType == TST_typename &&
+ "pack indexing can only be applied to typename");
+ TypeSpecType = TST_typename_pack_indexing;
+ PackIndexingExpr = IndexingExpr;
+ this->EllipsisLoc = EllipsisLoc;
+}
+
bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID, const LangOptions &Lang) {
// Duplicates are permitted in C99 onwards, but are not permitted in C89 or
@@ -1081,18 +1103,13 @@ bool DeclSpec::setFunctionSpecNoreturn(SourceLocation Loc,
bool DeclSpec::SetFriendSpec(SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID) {
- if (Friend_specified) {
+ if (isFriendSpecified()) {
PrevSpec = "friend";
- // Keep the later location, so that we can later diagnose ill-formed
- // declarations like 'friend class X friend;'. Per [class.friend]p3,
- // 'friend' must be the first token in a friend declaration that is
- // not a function declaration.
- FriendLoc = Loc;
DiagID = diag::warn_duplicate_declspec;
return true;
}
- Friend_specified = true;
+ FriendSpecifiedFirst = isEmpty();
FriendLoc = Loc;
return false;
}
@@ -1129,7 +1146,7 @@ void DeclSpec::SaveWrittenBuiltinSpecs() {
}
/// Finish - This does final analysis of the declspec, rejecting things like
-/// "_Imaginary" (lacking an FP type). After calling this method, DeclSpec is
+/// "_Complex" (lacking an FP type). After calling this method, DeclSpec is
/// guaranteed to be self-consistent, even if an error occurred.
void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// Before possibly changing their values, save specs as written.
@@ -1186,7 +1203,10 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
!S.Context.getTargetInfo().hasFeature("power8-vector"))
S.Diag(TSTLoc, diag::err_invalid_vector_int128_decl_spec);
- if (TypeAltiVecBool) {
+ // Complex vector types are not supported.
+ if (TypeSpecComplex != TSC_unspecified)
+ S.Diag(TSCLoc, diag::err_invalid_vector_complex_decl_spec);
+ else if (TypeAltiVecBool) {
// Sign specifiers are not allowed with vector bool. (PIM 2.1)
if (getTypeSpecSign() != TypeSpecifierSign::Unspecified) {
S.Diag(TSSLoc, diag::err_invalid_vector_bool_decl_spec)
@@ -1311,8 +1331,8 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
break;
}
- // TODO: if the implementation does not implement _Complex or _Imaginary,
- // disallow their use. Need information about the backend.
+ // TODO: if the implementation does not implement _Complex, disallow their
+ // use. Need information about the backend.
if (TypeSpecComplex != TSC_unspecified) {
if (TypeSpecType == TST_unspecified) {
S.Diag(TSCLoc, diag::ext_plain_complex)
@@ -1361,6 +1381,20 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
ThreadStorageClassSpec = TSCS_unspecified;
ThreadStorageClassSpecLoc = SourceLocation();
}
+ if (S.getLangOpts().C23 &&
+ getConstexprSpecifier() == ConstexprSpecKind::Constexpr) {
+ S.Diag(ConstexprLoc, diag::err_invalid_decl_spec_combination)
+ << DeclSpec::getSpecifierName(getThreadStorageClassSpec())
+ << SourceRange(getThreadStorageClassSpecLoc());
+ }
+ }
+
+ if (S.getLangOpts().C23 &&
+ getConstexprSpecifier() == ConstexprSpecKind::Constexpr &&
+ StorageClassSpec == SCS_extern) {
+ S.Diag(ConstexprLoc, diag::err_invalid_decl_spec_combination)
+ << DeclSpec::getSpecifierName(getStorageClassSpec())
+ << SourceRange(getStorageClassSpecLoc());
}
// If no type specifier was provided and we're parsing a language where
diff --git a/contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp b/contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp
index 1a1febf7a352..ca88d138aef5 100644
--- a/contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp
@@ -115,23 +115,28 @@ struct BuiltinTypeDeclBuilder {
return addMemberVariable("h", Ty, Access);
}
- BuiltinTypeDeclBuilder &annotateResourceClass(ResourceClass RC,
- ResourceKind RK, bool IsROV) {
+ BuiltinTypeDeclBuilder &annotateHLSLResource(ResourceClass RC,
+ ResourceKind RK, bool IsROV) {
if (Record->isCompleteDefinition())
return *this;
- Record->addAttr(HLSLResourceAttr::CreateImplicit(Record->getASTContext(),
- RC, RK, IsROV));
+ Record->addAttr(
+ HLSLResourceClassAttr::CreateImplicit(Record->getASTContext(), RC));
+ Record->addAttr(
+ HLSLResourceAttr::CreateImplicit(Record->getASTContext(), RK, IsROV));
return *this;
}
static DeclRefExpr *lookupBuiltinFunction(ASTContext &AST, Sema &S,
StringRef Name) {
- CXXScopeSpec SS;
IdentifierInfo &II = AST.Idents.get(Name, tok::TokenKind::identifier);
DeclarationNameInfo NameInfo =
DeclarationNameInfo(DeclarationName(&II), SourceLocation());
LookupResult R(S, NameInfo, Sema::LookupOrdinaryName);
- S.LookupParsedName(R, S.getCurScope(), &SS, false);
+ // AllowBuiltinCreation is false but LookupDirect will create
+ // the builtin when searching the global scope anyways...
+ S.LookupName(R, S.getCurScope());
+ // FIXME: If the builtin function was user-declared in global scope,
+ // this assert *will* fail. Should this call LookupBuiltin instead?
assert(R.isSingleResult() &&
"Since this is a builtin it should always resolve!");
auto *VD = cast<ValueDecl>(R.getFoundDecl());
@@ -168,7 +173,6 @@ struct BuiltinTypeDeclBuilder {
DeclRefExpr *Fn =
lookupBuiltinFunction(AST, S, "__builtin_hlsl_create_handle");
-
Expr *RCExpr = emitResourceClassExpr(AST, RC);
Expr *Call = CallExpr::Create(AST, Fn, {RCExpr}, AST.VoidPtrTy, VK_PRValue,
SourceLocation(), FPOptionsOverride());
@@ -305,17 +309,18 @@ struct BuiltinTypeDeclBuilder {
return *this;
}
- TemplateParameterListBuilder addTemplateArgumentList();
- BuiltinTypeDeclBuilder &addSimpleTemplateParams(ArrayRef<StringRef> Names);
+ TemplateParameterListBuilder addTemplateArgumentList(Sema &S);
+ BuiltinTypeDeclBuilder &addSimpleTemplateParams(Sema &S,
+ ArrayRef<StringRef> Names);
};
struct TemplateParameterListBuilder {
BuiltinTypeDeclBuilder &Builder;
- ASTContext &AST;
+ Sema &S;
llvm::SmallVector<NamedDecl *> Params;
- TemplateParameterListBuilder(BuiltinTypeDeclBuilder &RB)
- : Builder(RB), AST(RB.Record->getASTContext()) {}
+ TemplateParameterListBuilder(Sema &S, BuiltinTypeDeclBuilder &RB)
+ : Builder(RB), S(S) {}
~TemplateParameterListBuilder() { finalizeTemplateArgs(); }
@@ -325,12 +330,15 @@ struct TemplateParameterListBuilder {
return *this;
unsigned Position = static_cast<unsigned>(Params.size());
auto *Decl = TemplateTypeParmDecl::Create(
- AST, Builder.Record->getDeclContext(), SourceLocation(),
+ S.Context, Builder.Record->getDeclContext(), SourceLocation(),
SourceLocation(), /* TemplateDepth */ 0, Position,
- &AST.Idents.get(Name, tok::TokenKind::identifier), /* Typename */ false,
+ &S.Context.Idents.get(Name, tok::TokenKind::identifier),
+ /* Typename */ false,
/* ParameterPack */ false);
if (!DefaultValue.isNull())
- Decl->setDefaultArgument(AST.getTrivialTypeSourceInfo(DefaultValue));
+ Decl->setDefaultArgument(
+ S.Context, S.getTrivialTemplateArgumentLoc(DefaultValue, QualType(),
+ SourceLocation()));
Params.emplace_back(Decl);
return *this;
@@ -339,11 +347,11 @@ struct TemplateParameterListBuilder {
BuiltinTypeDeclBuilder &finalizeTemplateArgs() {
if (Params.empty())
return Builder;
- auto *ParamList =
- TemplateParameterList::Create(AST, SourceLocation(), SourceLocation(),
- Params, SourceLocation(), nullptr);
+ auto *ParamList = TemplateParameterList::Create(S.Context, SourceLocation(),
+ SourceLocation(), Params,
+ SourceLocation(), nullptr);
Builder.Template = ClassTemplateDecl::Create(
- AST, Builder.Record->getDeclContext(), SourceLocation(),
+ S.Context, Builder.Record->getDeclContext(), SourceLocation(),
DeclarationName(Builder.Record->getIdentifier()), ParamList,
Builder.Record);
Builder.Record->setDescribedClassTemplate(Builder.Template);
@@ -356,20 +364,22 @@ struct TemplateParameterListBuilder {
Params.clear();
QualType T = Builder.Template->getInjectedClassNameSpecialization();
- T = AST.getInjectedClassNameType(Builder.Record, T);
+ T = S.Context.getInjectedClassNameType(Builder.Record, T);
return Builder;
}
};
} // namespace
-TemplateParameterListBuilder BuiltinTypeDeclBuilder::addTemplateArgumentList() {
- return TemplateParameterListBuilder(*this);
+TemplateParameterListBuilder
+BuiltinTypeDeclBuilder::addTemplateArgumentList(Sema &S) {
+ return TemplateParameterListBuilder(S, *this);
}
BuiltinTypeDeclBuilder &
-BuiltinTypeDeclBuilder::addSimpleTemplateParams(ArrayRef<StringRef> Names) {
- TemplateParameterListBuilder Builder = this->addTemplateArgumentList();
+BuiltinTypeDeclBuilder::addSimpleTemplateParams(Sema &S,
+ ArrayRef<StringRef> Names) {
+ TemplateParameterListBuilder Builder = this->addTemplateArgumentList(S);
for (StringRef Name : Names)
Builder.addTypeParameter(Name);
return Builder.finalizeTemplateArgs();
@@ -423,7 +433,9 @@ void HLSLExternalSemaSource::defineHLSLVectorAlias() {
auto *TypeParam = TemplateTypeParmDecl::Create(
AST, HLSLNamespace, SourceLocation(), SourceLocation(), 0, 0,
&AST.Idents.get("element", tok::TokenKind::identifier), false, false);
- TypeParam->setDefaultArgument(AST.getTrivialTypeSourceInfo(AST.FloatTy));
+ TypeParam->setDefaultArgument(
+ AST, SemaPtr->getTrivialTemplateArgumentLoc(
+ TemplateArgument(AST.FloatTy), QualType(), SourceLocation()));
TemplateParams.emplace_back(TypeParam);
@@ -431,10 +443,12 @@ void HLSLExternalSemaSource::defineHLSLVectorAlias() {
AST, HLSLNamespace, SourceLocation(), SourceLocation(), 0, 1,
&AST.Idents.get("element_count", tok::TokenKind::identifier), AST.IntTy,
false, AST.getTrivialTypeSourceInfo(AST.IntTy));
- Expr *LiteralExpr =
- IntegerLiteral::Create(AST, llvm::APInt(AST.getIntWidth(AST.IntTy), 4),
- AST.IntTy, SourceLocation());
- SizeParam->setDefaultArgument(LiteralExpr);
+ llvm::APInt Val(AST.getIntWidth(AST.IntTy), 4);
+ TemplateArgument Default(AST, llvm::APSInt(std::move(Val)), AST.IntTy,
+ /*IsDefaulted=*/true);
+ SizeParam->setDefaultArgument(
+ AST, SemaPtr->getTrivialTemplateArgumentLoc(Default, AST.IntTy,
+ SourceLocation(), SizeParam));
TemplateParams.emplace_back(SizeParam);
auto *ParamList =
@@ -468,12 +482,6 @@ void HLSLExternalSemaSource::defineHLSLVectorAlias() {
void HLSLExternalSemaSource::defineTrivialHLSLTypes() {
defineHLSLVectorAlias();
-
- ResourceDecl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "Resource")
- .startDefinition()
- .addHandleMember(AccessSpecifier::AS_public)
- .completeDefinition()
- .Record;
}
/// Set up common members and attributes for buffer types
@@ -483,13 +491,13 @@ static BuiltinTypeDeclBuilder setupBufferType(CXXRecordDecl *Decl, Sema &S,
return BuiltinTypeDeclBuilder(Decl)
.addHandleMember()
.addDefaultHandleConstructor(S, RC)
- .annotateResourceClass(RC, RK, IsROV);
+ .annotateHLSLResource(RC, RK, IsROV);
}
void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
CXXRecordDecl *Decl;
Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RWBuffer")
- .addSimpleTemplateParams({"element_type"})
+ .addSimpleTemplateParams(*SemaPtr, {"element_type"})
.Record;
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV,
@@ -500,7 +508,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
Decl =
BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RasterizerOrderedBuffer")
- .addSimpleTemplateParams({"element_type"})
+ .addSimpleTemplateParams(*SemaPtr, {"element_type"})
.Record;
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV,
diff --git a/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
index 45ff36d5fe23..8af36d5c24e3 100644
--- a/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Sema/SemaInternal.h"
@@ -179,7 +180,8 @@ static ScopePair GetDiagForGotoScopeDecl(Sema &S, const Decl *D) {
}
const Expr *Init = VD->getInit();
- if (S.Context.getLangOpts().CPlusPlus && VD->hasLocalStorage() && Init) {
+ if (S.Context.getLangOpts().CPlusPlus && VD->hasLocalStorage() && Init &&
+ !Init->containsErrors()) {
// C++11 [stmt.dcl]p3:
// A program that jumps from a point where a variable with automatic
// storage duration is not in scope to a point where it is in scope
@@ -577,11 +579,8 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
// automatic storage duration.
MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(S);
if (MTE->getStorageDuration() == SD_Automatic) {
- SmallVector<const Expr *, 4> CommaLHS;
- SmallVector<SubobjectAdjustment, 4> Adjustments;
const Expr *ExtendedObject =
- MTE->getSubExpr()->skipRValueSubobjectAdjustments(CommaLHS,
- Adjustments);
+ MTE->getSubExpr()->skipRValueSubobjectAdjustments();
if (ExtendedObject->getType().isDestructedType()) {
Scopes.push_back(GotoScope(ParentScope, 0,
diag::note_exits_temporary_dtor,
@@ -607,6 +606,16 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
break;
}
+ case Stmt::OpenACCComputeConstructClass: {
+ unsigned NewParentScope = Scopes.size();
+ OpenACCComputeConstruct *CC = cast<OpenACCComputeConstruct>(S);
+ Scopes.push_back(GotoScope(
+ ParentScope, diag::note_acc_branch_into_compute_construct,
+ diag::note_acc_branch_out_of_compute_construct, CC->getBeginLoc()));
+ BuildScopeInformation(CC->getStructuredBlock(), NewParentScope);
+ return;
+ }
+
default:
if (auto *ED = dyn_cast<OMPExecutableDirective>(S)) {
if (!ED->isStandaloneDirective()) {
@@ -939,11 +948,16 @@ void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
if (Scopes[I].InDiag == diag::note_protected_by_seh_finally) {
S.Diag(From->getBeginLoc(), diag::warn_jump_out_of_seh_finally);
break;
- }
- if (Scopes[I].InDiag == diag::note_omp_protected_structured_block) {
+ } else if (Scopes[I].InDiag ==
+ diag::note_omp_protected_structured_block) {
S.Diag(From->getBeginLoc(), diag::err_goto_into_protected_scope);
S.Diag(To->getBeginLoc(), diag::note_omp_exits_structured_block);
break;
+ } else if (Scopes[I].InDiag ==
+ diag::note_acc_branch_into_compute_construct) {
+ S.Diag(From->getBeginLoc(), diag::err_goto_into_protected_scope);
+ S.Diag(Scopes[I].Loc, diag::note_acc_branch_out_of_compute_construct);
+ return;
}
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp b/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
index 058e22cb2b81..79e656eb4b7e 100644
--- a/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -46,7 +46,7 @@ void MultiplexExternalSemaSource::AddSource(ExternalSemaSource *Source) {
// ExternalASTSource.
//===----------------------------------------------------------------------===//
-Decl *MultiplexExternalSemaSource::GetExternalDecl(uint32_t ID) {
+Decl *MultiplexExternalSemaSource::GetExternalDecl(GlobalDeclID ID) {
for(size_t i = 0; i < Sources.size(); ++i)
if (Decl *Result = Sources[i]->GetExternalDecl(ID))
return Result;
diff --git a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
index 0cceba090bd8..4da61429fcce 100644
--- a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
+++ b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
@@ -936,7 +936,7 @@ def : Builtin<"read_mem_fence", [Void, MemFenceFlags]>;
def : Builtin<"write_mem_fence", [Void, MemFenceFlags]>;
// OpenCL v3.0 s6.15.10 - Address Space Qualifier Functions.
-// to_global, to_local, to_private are declared in Builtins.def.
+// to_global, to_local, to_private are declared in Builtins.td.
let Extension = FuncExtOpenCLCGenericAddressSpace in {
// The OpenCL 3.0 specification defines these with a "gentype" argument indicating any builtin
@@ -1448,25 +1448,25 @@ let Extension = FuncExtOpenCLCWGCollectiveFunctions in {
//--------------------------------------------------------------------
// OpenCL2.0 : 6.13.16 : Pipe Functions
// --- Table 27 ---
-// Defined in Builtins.def
+// Defined in Builtins.td
// --- Table 28 ---
-// Builtins taking pipe arguments are defined in Builtins.def
+// Builtins taking pipe arguments are defined in Builtins.td
let Extension = FuncExtOpenCLCPipes in {
def : Builtin<"is_valid_reserve_id", [Bool, ReserveId]>;
}
// --- Table 29 ---
-// Defined in Builtins.def
+// Defined in Builtins.td
//--------------------------------------------------------------------
// OpenCL2.0 : 6.13.17 : Enqueuing Kernels
// --- Table 30 ---
-// Defined in Builtins.def
+// Defined in Builtins.td
// --- Table 32 ---
-// Defined in Builtins.def
+// Defined in Builtins.td
// --- Table 33 ---
let Extension = FuncExtOpenCLCDeviceEnqueue in {
@@ -1852,6 +1852,20 @@ let Extension = FunctionExtension<"cl_khr_subgroup_rotate"> in {
def : Builtin<"sub_group_clustered_rotate", [AGenType1, AGenType1, Int, UInt], Attr.Convergent>;
}
+// cl_khr_kernel_clock
+let Extension = FunctionExtension<"cl_khr_kernel_clock __opencl_c_kernel_clock_scope_device"> in {
+ def : Builtin<"clock_read_device", [ULong]>;
+ def : Builtin<"clock_read_hilo_device", [VectorType<UInt, 2>]>;
+}
+let Extension = FunctionExtension<"cl_khr_kernel_clock __opencl_c_kernel_clock_scope_work_group"> in {
+ def : Builtin<"clock_read_work_group", [ULong]>;
+ def : Builtin<"clock_read_hilo_work_group", [VectorType<UInt, 2>]>;
+}
+let Extension = FunctionExtension<"cl_khr_kernel_clock __opencl_c_kernel_clock_scope_sub_group"> in {
+ def : Builtin<"clock_read_sub_group", [ULong]>;
+ def : Builtin<"clock_read_hilo_sub_group", [VectorType<UInt, 2>]>;
+}
+
//--------------------------------------------------------------------
// Arm extensions.
let Extension = ArmIntegerDotProductInt8 in {
diff --git a/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp b/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
index 06c213267c7e..2109494aa588 100644
--- a/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
@@ -100,6 +100,12 @@ void AttributePool::takePool(AttributePool &pool) {
pool.Attrs.clear();
}
+void AttributePool::takeFrom(ParsedAttributesView &List, AttributePool &Pool) {
+ assert(&Pool != this && "AttributePool can't take attributes from itself");
+ llvm::for_each(List.AttrList, [&Pool](ParsedAttr *A) { Pool.remove(A); });
+ Attrs.insert(Attrs.end(), List.AttrList.begin(), List.AttrList.end());
+}
+
namespace {
#include "clang/Sema/AttrParsedAttrImpl.inc"
@@ -219,7 +225,7 @@ bool ParsedAttr::slidesFromDeclToDeclSpecLegacyBehavior() const {
// atributes.
return false;
- assert(isStandardAttributeSyntax());
+ assert(isStandardAttributeSyntax() || isAlignas());
// We have historically allowed some type attributes with standard attribute
// syntax to slide to the decl-specifier-seq, so we have to keep supporting
diff --git a/contrib/llvm-project/clang/lib/Sema/Scope.cpp b/contrib/llvm-project/clang/lib/Sema/Scope.cpp
index 4570d8c615fe..5bc7e79a6818 100644
--- a/contrib/llvm-project/clang/lib/Sema/Scope.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Scope.cpp
@@ -37,6 +37,7 @@ void Scope::setFlags(Scope *parent, unsigned flags) {
FnParent = parent->FnParent;
BlockParent = parent->BlockParent;
TemplateParamParent = parent->TemplateParamParent;
+ DeclParent = parent->DeclParent;
MSLastManglingParent = parent->MSLastManglingParent;
MSCurManglingNumber = getMSLastManglingNumber();
if ((Flags & (FnScope | ClassScope | BlockScope | TemplateParamScope |
@@ -52,6 +53,7 @@ void Scope::setFlags(Scope *parent, unsigned flags) {
PrototypeIndex = 0;
MSLastManglingParent = FnParent = BlockParent = nullptr;
TemplateParamParent = nullptr;
+ DeclParent = nullptr;
MSLastManglingNumber = 1;
MSCurManglingNumber = 1;
}
@@ -76,6 +78,7 @@ void Scope::setFlags(Scope *parent, unsigned flags) {
PrototypeDepth++;
if (flags & DeclScope) {
+ DeclParent = this;
if (flags & FunctionPrototypeScope)
; // Prototype scopes are uninteresting.
else if ((flags & ClassScope) && getParent()->isClassScope())
@@ -225,6 +228,12 @@ void Scope::dumpImpl(raw_ostream &OS) const {
{CompoundStmtScope, "CompoundStmtScope"},
{ClassInheritanceScope, "ClassInheritanceScope"},
{CatchScope, "CatchScope"},
+ {ConditionVarScope, "ConditionVarScope"},
+ {OpenMPOrderClauseScope, "OpenMPOrderClauseScope"},
+ {LambdaScope, "LambdaScope"},
+ {OpenACCComputeConstructScope, "OpenACCComputeConstructScope"},
+ {TypeAliasScope, "TypeAliasScope"},
+ {FriendScope, "FriendScope"},
};
for (auto Info : FlagInfo) {
diff --git a/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp b/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp
index ce90451f2613..12fb70607272 100644
--- a/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp
@@ -39,6 +39,7 @@ void FunctionScopeInfo::Clear() {
FirstReturnLoc = SourceLocation();
FirstCXXOrObjCTryLoc = SourceLocation();
FirstSEHTryLoc = SourceLocation();
+ FirstVLALoc = SourceLocation();
FoundImmediateEscalatingExpression = false;
// Coroutine state
diff --git a/contrib/llvm-project/clang/lib/Sema/Sema.cpp b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
index 2d4e6d1d058c..2e989f0ba6fe 100644
--- a/contrib/llvm-project/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
@@ -41,8 +41,33 @@
#include "clang/Sema/RISCVIntrinsicManager.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaAMDGPU.h"
+#include "clang/Sema/SemaARM.h"
+#include "clang/Sema/SemaAVR.h"
+#include "clang/Sema/SemaBPF.h"
+#include "clang/Sema/SemaCUDA.h"
+#include "clang/Sema/SemaCodeCompletion.h"
#include "clang/Sema/SemaConsumer.h"
+#include "clang/Sema/SemaHLSL.h"
+#include "clang/Sema/SemaHexagon.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaLoongArch.h"
+#include "clang/Sema/SemaM68k.h"
+#include "clang/Sema/SemaMIPS.h"
+#include "clang/Sema/SemaMSP430.h"
+#include "clang/Sema/SemaNVPTX.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenACC.h"
+#include "clang/Sema/SemaOpenCL.h"
+#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPPC.h"
+#include "clang/Sema/SemaPseudoObject.h"
+#include "clang/Sema/SemaRISCV.h"
+#include "clang/Sema/SemaSYCL.h"
+#include "clang/Sema/SemaSwift.h"
+#include "clang/Sema/SemaSystemZ.h"
+#include "clang/Sema/SemaWasm.h"
+#include "clang/Sema/SemaX86.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "clang/Sema/TypoCorrection.h"
@@ -89,9 +114,8 @@ DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
return nullptr;
}
-IdentifierInfo *
-Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
- unsigned int Index) {
+IdentifierInfo *Sema::InventAbbreviatedTemplateParameterTypeName(
+ const IdentifierInfo *ParamName, unsigned int Index) {
std::string InventedName;
llvm::raw_string_ostream OS(InventedName);
@@ -135,6 +159,7 @@ namespace sema {
class SemaPPCallbacks : public PPCallbacks {
Sema *S = nullptr;
llvm::SmallVector<SourceLocation, 8> IncludeStack;
+ llvm::SmallVector<llvm::TimeTraceProfilerEntry *, 8> ProfilerStack;
public:
void set(Sema &S) { this->S = &S; }
@@ -153,8 +178,8 @@ public:
if (IncludeLoc.isValid()) {
if (llvm::timeTraceProfilerEnabled()) {
OptionalFileEntryRef FE = SM.getFileEntryRefForID(SM.getFileID(Loc));
- llvm::timeTraceProfilerBegin("Source", FE ? FE->getName()
- : StringRef("<unknown>"));
+ ProfilerStack.push_back(llvm::timeTraceAsyncProfilerBegin(
+ "Source", FE ? FE->getName() : StringRef("<unknown>")));
}
IncludeStack.push_back(IncludeLoc);
@@ -167,7 +192,7 @@ public:
case ExitFile:
if (!IncludeStack.empty()) {
if (llvm::timeTraceProfilerEnabled())
- llvm::timeTraceProfilerEnd();
+ llvm::timeTraceProfilerEnd(ProfilerStack.pop_back_val());
S->DiagnoseNonDefaultPragmaAlignPack(
Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
@@ -188,47 +213,67 @@ const uint64_t Sema::MaximumAlignment;
Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
- : ExternalSource(nullptr), CurFPFeatures(pp.getLangOpts()),
- LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
- Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
- APINotes(SourceMgr, LangOpts), CollectStats(false),
- CodeCompleter(CodeCompleter), CurContext(nullptr),
- OriginalLexicalContext(nullptr), MSStructPragmaOn(false),
+ : SemaBase(*this), CollectStats(false), TUKind(TUKind),
+ CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
+ Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
+ SourceMgr(PP.getSourceManager()), APINotes(SourceMgr, LangOpts),
+ AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr),
+ LateTemplateParser(nullptr), LateTemplateParserCleanup(nullptr),
+ OpaqueParser(nullptr), CurContext(nullptr), ExternalSource(nullptr),
+ CurScope(nullptr), Ident_super(nullptr),
+ AMDGPUPtr(std::make_unique<SemaAMDGPU>(*this)),
+ ARMPtr(std::make_unique<SemaARM>(*this)),
+ AVRPtr(std::make_unique<SemaAVR>(*this)),
+ BPFPtr(std::make_unique<SemaBPF>(*this)),
+ CodeCompletionPtr(
+ std::make_unique<SemaCodeCompletion>(*this, CodeCompleter)),
+ CUDAPtr(std::make_unique<SemaCUDA>(*this)),
+ HLSLPtr(std::make_unique<SemaHLSL>(*this)),
+ HexagonPtr(std::make_unique<SemaHexagon>(*this)),
+ LoongArchPtr(std::make_unique<SemaLoongArch>(*this)),
+ M68kPtr(std::make_unique<SemaM68k>(*this)),
+ MIPSPtr(std::make_unique<SemaMIPS>(*this)),
+ MSP430Ptr(std::make_unique<SemaMSP430>(*this)),
+ NVPTXPtr(std::make_unique<SemaNVPTX>(*this)),
+ ObjCPtr(std::make_unique<SemaObjC>(*this)),
+ OpenACCPtr(std::make_unique<SemaOpenACC>(*this)),
+ OpenCLPtr(std::make_unique<SemaOpenCL>(*this)),
+ OpenMPPtr(std::make_unique<SemaOpenMP>(*this)),
+ PPCPtr(std::make_unique<SemaPPC>(*this)),
+ PseudoObjectPtr(std::make_unique<SemaPseudoObject>(*this)),
+ RISCVPtr(std::make_unique<SemaRISCV>(*this)),
+ SYCLPtr(std::make_unique<SemaSYCL>(*this)),
+ SwiftPtr(std::make_unique<SemaSwift>(*this)),
+ SystemZPtr(std::make_unique<SemaSystemZ>(*this)),
+ WasmPtr(std::make_unique<SemaWasm>(*this)),
+ X86Ptr(std::make_unique<SemaX86>(*this)),
MSPointerToMemberRepresentationMethod(
LangOpts.getMSPointerToMemberRepresentationMethod()),
- VtorDispStack(LangOpts.getVtorDispMode()),
+ MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()),
AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
- IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr),
- LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
- StdInitializerList(nullptr), StdCoroutineTraitsCache(nullptr),
- CXXTypeInfoDecl(nullptr), StdSourceLocationImplDecl(nullptr),
- NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr),
- StringWithUTF8StringMethod(nullptr),
- ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
- ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
- DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false),
- TUKind(TUKind), NumSFINAEErrors(0),
+ StdCoroutineTraitsCache(nullptr), IdResolver(pp),
+ OriginalLexicalContext(nullptr), StdInitializerList(nullptr),
FullyCheckedComparisonCategories(
static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
- SatisfactionCache(Context), AccessCheckingSFINAE(false),
+ StdSourceLocationImplDecl(nullptr), CXXTypeInfoDecl(nullptr),
+ GlobalNewDeleteDeclared(false), DisableTypoCorrection(false),
+ TyposCorrected(0), IsBuildingRecoveryCallExpr(false), NumSFINAEErrors(0),
+ AccessCheckingSFINAE(false), CurrentInstantiationScope(nullptr),
InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
- ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr),
- DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this),
- ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
- CurScope(nullptr), Ident_super(nullptr) {
+ ArgumentPackSubstitutionIndex(-1), SatisfactionCache(Context) {
assert(pp.TUKind == TUKind);
TUScope = nullptr;
LoadedExternalKnownNamespaces = false;
for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
- NSNumberLiteralMethods[I] = nullptr;
+ ObjC().NSNumberLiteralMethods[I] = nullptr;
if (getLangOpts().ObjC)
- NSAPIObj.reset(new NSAPI(Context));
+ ObjC().NSAPIObj.reset(new NSAPI(Context));
if (getLangOpts().CPlusPlus)
FieldCollector.reset(new CXXFieldCollector());
@@ -244,7 +289,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
nullptr, ExpressionEvaluationContextRecord::EK_Other);
// Initialization of data sharing attributes stack for OpenMP
- InitDataSharingAttributesStack();
+ OpenMP().InitDataSharingAttributesStack();
std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
std::make_unique<sema::SemaPPCallbacks>();
@@ -424,7 +469,9 @@ void Sema::Initialize() {
#include "clang/Basic/OpenCLExtensionTypes.def"
}
- if (Context.getTargetInfo().hasAArch64SVETypes()) {
+ if (Context.getTargetInfo().hasAArch64SVETypes() ||
+ (Context.getAuxTargetInfo() &&
+ Context.getAuxTargetInfo()->hasAArch64SVETypes())) {
#define SVE_TYPE(Name, Id, SingletonId) \
addImplicitTypedef(Name, Context.SingletonId);
#include "clang/Basic/AArch64SVEACLETypes.def"
@@ -452,6 +499,14 @@ void Sema::Initialize() {
#include "clang/Basic/WebAssemblyReferenceTypes.def"
}
+ if (Context.getTargetInfo().getTriple().isAMDGPU() ||
+ (Context.getAuxTargetInfo() &&
+ Context.getAuxTargetInfo()->getTriple().isAMDGPU())) {
+#define AMDGPU_TYPE(Name, Id, SingletonId) \
+ addImplicitTypedef(Name, Context.SingletonId);
+#include "clang/Basic/AMDGPUTypes.def"
+ }
+
if (Context.getTargetInfo().hasBuiltinMSVaList()) {
DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
if (IdResolver.begin(MSVaList) == IdResolver.end())
@@ -493,7 +548,7 @@ Sema::~Sema() {
threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache);
// Destroys data sharing attributes stack for OpenMP
- DestroyDataSharingAttributesStack();
+ OpenMP().DestroyDataSharingAttributesStack();
// Detach from the PP callback handler which outlives Sema since it's owned
// by the preprocessor.
@@ -513,10 +568,6 @@ void Sema::runWithSufficientStackSpace(SourceLocation Loc,
clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn);
}
-/// makeUnavailableInSystemHeader - There is an error in the current
-/// context. If we're still in a system header, and we can plausibly
-/// make the relevant declaration unavailable instead of erroring, do
-/// so and return true.
bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason) {
// If we're not in a function, it's an error.
@@ -542,11 +593,6 @@ ASTMutationListener *Sema::getASTMutationListener() const {
return getASTConsumer().GetASTMutationListener();
}
-///Registers an external source. If an external source already exists,
-/// creates a multiplex external source and appends to it.
-///
-///\param[in] E - A non-null external sema source.
-///
void Sema::addExternalSource(ExternalSemaSource *E) {
assert(E && "Cannot use with NULL ptr");
@@ -561,7 +607,6 @@ void Sema::addExternalSource(ExternalSemaSource *E) {
ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E);
}
-/// Print out statistics about the semantic analysis.
void Sema::PrintStats() const {
llvm::errs() << "\n*** Semantic Analysis Stats:\n";
llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
@@ -585,6 +630,19 @@ void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
}
+// Generate diagnostics when adding or removing effects in a type conversion.
+void Sema::diagnoseFunctionEffectConversion(QualType DstType, QualType SrcType,
+ SourceLocation Loc) {
+ const auto SrcFX = FunctionEffectsRef::get(SrcType);
+ const auto DstFX = FunctionEffectsRef::get(DstType);
+ if (SrcFX != DstFX) {
+ for (const auto &Diff : FunctionEffectDifferences(SrcFX, DstFX)) {
+ if (Diff.shouldDiagnoseConversion(SrcType, SrcFX, DstType, DstFX))
+ Diag(Loc, diag::warn_invalid_add_func_effects) << Diff.effectName();
+ }
+ }
+}
+
void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
// nullptr only exists from C++11 on, so don't warn on its absence earlier.
if (!getLangOpts().CPlusPlus11)
@@ -652,6 +710,7 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
case CK_FunctionToPointerDecay:
case CK_ToVoid:
case CK_NonAtomicToAtomic:
+ case CK_HLSLArrayRValue:
break;
}
}
@@ -661,6 +720,9 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
diagnoseZeroToNullptrConversion(Kind, E);
+ if (Context.hasAnyFunctionEffects() && !isCast(CCK) &&
+ Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
+ diagnoseFunctionEffectConversion(Ty, E->getType(), E->getBeginLoc());
QualType ExprTy = Context.getCanonicalType(E->getType());
QualType TypeTy = Context.getCanonicalType(Ty);
@@ -712,8 +774,6 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
CurFPFeatureOverrides());
}
-/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
-/// to the conversion from scalar type ScalarTy to the Boolean type.
CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
switch (ScalarTy->getScalarTypeKind()) {
case Type::STK_Bool: return CK_NoOp;
@@ -1030,9 +1090,6 @@ void Sema::emitAndClearUnusedLocalTypedefWarnings() {
UnusedLocalTypedefNameCandidates.clear();
}
-/// This is called before the very first declaration in the translation unit
-/// is parsed. Note that the ASTContext may have already injected some
-/// declarations.
void Sema::ActOnStartOfTranslationUnit() {
if (getLangOpts().CPlusPlusModules &&
getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
@@ -1104,9 +1161,6 @@ void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
DelayedTypos.clear();
}
-/// ActOnEndOfTranslationUnit - This is called at the very end of the
-/// translation unit when EOF is reached and all but the top-level scope is
-/// popped.
void Sema::ActOnEndOfTranslationUnit() {
assert(DelayedDiagnostics.getCurrentPool() == nullptr
&& "reached end of translation unit with a pool attached?");
@@ -1119,7 +1173,7 @@ void Sema::ActOnEndOfTranslationUnit() {
// Complete translation units and modules define vtables and perform implicit
// instantiations. PCH files do not.
if (TUKind != TU_Prefix) {
- DiagnoseUseOfUnimplementedSelectors();
+ ObjC().DiagnoseUseOfUnimplementedSelectors();
ActOnEndOfTranslationUnitFragment(
!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
@@ -1150,7 +1204,7 @@ void Sema::ActOnEndOfTranslationUnit() {
DiagnoseUnterminatedPragmaAlignPack();
DiagnoseUnterminatedPragmaAttribute();
- DiagnoseUnterminatedOpenMPDeclareTarget();
+ OpenMP().DiagnoseUnterminatedOpenMPDeclareTarget();
// All delayed member exception specs should be checked or we end up accepting
// incompatible declarations.
@@ -1207,26 +1261,35 @@ void Sema::ActOnEndOfTranslationUnit() {
}
// A global-module-fragment is only permitted within a module unit.
- bool DiagnosedMissingModuleDeclaration = false;
if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
Module::ExplicitGlobalModuleFragment) {
Diag(ModuleScopes.back().BeginLoc,
diag::err_module_declaration_missing_after_global_module_introducer);
- DiagnosedMissingModuleDeclaration = true;
- }
-
- if (TUKind == TU_Module) {
- // If we are building a module interface unit, we need to have seen the
- // module declaration by now.
- if (getLangOpts().getCompilingModule() ==
- LangOptions::CMK_ModuleInterface &&
- !isCurrentModulePurview() && !DiagnosedMissingModuleDeclaration) {
- // FIXME: Make a better guess as to where to put the module declaration.
- Diag(getSourceManager().getLocForStartOfFile(
- getSourceManager().getMainFileID()),
- diag::err_module_declaration_missing);
- }
+ }
+
+ // Now we can decide whether the modules we're building need an initializer.
+ if (Module *CurrentModule = getCurrentModule();
+ CurrentModule && CurrentModule->isInterfaceOrPartition()) {
+ auto DoesModNeedInit = [this](Module *M) {
+ if (!getASTContext().getModuleInitializers(M).empty())
+ return true;
+ for (auto [Exported, _] : M->Exports)
+ if (Exported->isNamedModuleInterfaceHasInit())
+ return true;
+ for (Module *I : M->Imports)
+ if (I->isNamedModuleInterfaceHasInit())
+ return true;
+ return false;
+ };
+
+ CurrentModule->NamedModuleHasInit =
+ DoesModNeedInit(CurrentModule) ||
+ llvm::any_of(CurrentModule->submodules(),
+ [&](auto *SubM) { return DoesModNeedInit(SubM); });
+ }
+
+ if (TUKind == TU_ClangModule) {
// If we are building a module, resolve all of the exported declarations
// now.
if (Module *CurrentModule = PP.getCurrentModule()) {
@@ -1251,28 +1314,6 @@ void Sema::ActOnEndOfTranslationUnit() {
}
}
- // Now we can decide whether the modules we're building need an initializer.
- if (Module *CurrentModule = getCurrentModule();
- CurrentModule && CurrentModule->isInterfaceOrPartition()) {
- auto DoesModNeedInit = [this](Module *M) {
- if (!getASTContext().getModuleInitializers(M).empty())
- return true;
- for (auto [Exported, _] : M->Exports)
- if (Exported->isNamedModuleInterfaceHasInit())
- return true;
- for (Module *I : M->Imports)
- if (I->isNamedModuleInterfaceHasInit())
- return true;
-
- return false;
- };
-
- CurrentModule->NamedModuleHasInit =
- DoesModNeedInit(CurrentModule) ||
- llvm::any_of(CurrentModule->submodules(),
- [&](auto *SubM) { return DoesModNeedInit(SubM); });
- }
-
// Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
// modules when they are built, not every time they are used.
emitAndClearUnusedLocalTypedefWarnings();
@@ -1354,11 +1395,15 @@ void Sema::ActOnEndOfTranslationUnit() {
Consumer.CompleteExternalDeclaration(D);
}
+ if (LangOpts.HLSL)
+ HLSL().DiagnoseAvailabilityViolations(
+ getASTContext().getTranslationUnitDecl());
+
// If there were errors, disable 'unused' warnings since they will mostly be
// noise. Don't warn for a use from a module: either we should warn on all
// file-scope declarations in modules or not at all, but whether the
// declaration is used is immaterial.
- if (!Diags.hasErrorOccurred() && TUKind != TU_Module) {
+ if (!Diags.hasErrorOccurred() && TUKind != TU_ClangModule) {
// Output warning for unused file scoped decls.
for (UnusedFileScopedDeclsType::iterator
I = UnusedFileScopedDecls.begin(ExternalSource.get()),
@@ -1393,7 +1438,8 @@ void Sema::ActOnEndOfTranslationUnit() {
Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
<< /*function=*/0 << DiagD << DiagRange;
}
- } else {
+ } else if (!FD->isTargetMultiVersion() ||
+ FD->isTargetMultiVersionDefault()) {
if (FD->getDescribedFunctionTemplate())
Diag(DiagD->getLocation(), diag::warn_unused_template)
<< /*function=*/0 << DiagD << DiagRange;
@@ -1410,7 +1456,7 @@ void Sema::ActOnEndOfTranslationUnit() {
SourceRange DiagRange = DiagD->getLocation();
if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(DiagD)) {
if (const ASTTemplateArgumentListInfo *ASTTAL =
- VTSD->getTemplateArgsInfo())
+ VTSD->getTemplateArgsAsWritten())
DiagRange.setEnd(ASTTAL->RAngleLoc);
}
if (DiagD->isReferenced()) {
@@ -1621,11 +1667,6 @@ void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
PrintContextStack();
}
-Sema::SemaDiagnosticBuilder
-Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) {
- return Diag(Loc, PD.getDiagID(), DeferHint) << PD;
-}
-
bool Sema::hasUncompilableErrorOccurred() const {
if (getDiagnostics().hasUncompilableErrorOccurred())
return true;
@@ -1645,15 +1686,15 @@ bool Sema::hasUncompilableErrorOccurred() const {
// Print notes showing how we can reach FD starting from an a priori
// known-callable function.
static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
- auto FnIt = S.DeviceKnownEmittedFns.find(FD);
- while (FnIt != S.DeviceKnownEmittedFns.end()) {
+ auto FnIt = S.CUDA().DeviceKnownEmittedFns.find(FD);
+ while (FnIt != S.CUDA().DeviceKnownEmittedFns.end()) {
// Respect error limit.
if (S.Diags.hasFatalErrorOccurred())
return;
DiagnosticBuilder Builder(
S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
Builder << FnIt->second.FD;
- FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD);
+ FnIt = S.CUDA().DeviceKnownEmittedFns.find(FnIt->second.FD);
}
}
@@ -1755,9 +1796,9 @@ public:
// Finalize analysis of OpenMP-specific constructs.
if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
(ShouldEmitRootNode || InOMPDeviceContext))
- S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
+ S.OpenMP().finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
if (Caller)
- S.DeviceKnownEmittedFns[FD] = {Caller, Loc};
+ S.CUDA().DeviceKnownEmittedFns[FD] = {Caller, Loc};
// Always emit deferred diagnostics for the direct users. This does not
// lead to explosion of diagnostics since each user is visited at most
// twice.
@@ -1846,8 +1887,8 @@ void Sema::emitDeferredDiags() {
// which other not-known-emitted functions.
//
// When we see something which is illegal if the current function is emitted
-// (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or
-// CheckCUDACall), we first check if the current function is known-emitted. If
+// (usually by way of DiagIfDeviceCode, DiagIfHostCode, or
+// CheckCall), we first check if the current function is known-emitted. If
// so, we immediately output the diagnostic.
//
// Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags
@@ -1907,42 +1948,19 @@ Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
FD = FD ? FD : getCurFunctionDecl();
if (LangOpts.OpenMP)
return LangOpts.OpenMPIsTargetDevice
- ? diagIfOpenMPDeviceCode(Loc, DiagID, FD)
- : diagIfOpenMPHostCode(Loc, DiagID, FD);
+ ? OpenMP().diagIfOpenMPDeviceCode(Loc, DiagID, FD)
+ : OpenMP().diagIfOpenMPHostCode(Loc, DiagID, FD);
if (getLangOpts().CUDA)
- return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
- : CUDADiagIfHostCode(Loc, DiagID);
+ return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID)
+ : CUDA().DiagIfHostCode(Loc, DiagID);
if (getLangOpts().SYCLIsDevice)
- return SYCLDiagIfDeviceCode(Loc, DiagID);
+ return SYCL().DiagIfDeviceCode(Loc, DiagID);
return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
FD, *this);
}
-Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID,
- bool DeferHint) {
- bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID);
- bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag &&
- DiagnosticIDs::isDeferrable(DiagID) &&
- (DeferHint || DeferDiags || !IsError);
- auto SetIsLastErrorImmediate = [&](bool Flag) {
- if (IsError)
- IsLastErrorImmediate = Flag;
- };
- if (!ShouldDefer) {
- SetIsLastErrorImmediate(true);
- return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc,
- DiagID, getCurFunctionDecl(), *this);
- }
-
- SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice
- ? CUDADiagIfDeviceCode(Loc, DiagID)
- : CUDADiagIfHostCode(Loc, DiagID);
- SetIsLastErrorImmediate(DB.isImmediate());
- return DB;
-}
-
void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
if (isUnevaluatedContext() || Ty.isNull())
return;
@@ -1953,7 +1971,7 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
// constant byte size like zero length arrays. So, do a deep check for SYCL.
if (D && LangOpts.SYCLIsDevice) {
llvm::DenseSet<QualType> Visited;
- deepTypeCheckForSYCLDevice(Loc, Visited, D);
+ SYCL().deepTypeCheckForDevice(Loc, Visited, D);
}
Decl *C = cast<Decl>(getCurLexicalContext());
@@ -2076,16 +2094,24 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
}
- if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType())
- checkRVVTypeSupport(Ty, Loc, D);
+ if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) {
+ llvm::StringMap<bool> CallerFeatureMap;
+ Context.getFunctionFeatureMap(CallerFeatureMap, FD);
+ RISCV().checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap);
+ }
// Don't allow SVE types in functions without a SVE target.
- if (Ty->isSVESizelessBuiltinType() && FD && FD->hasBody()) {
+ if (Ty->isSVESizelessBuiltinType() && FD) {
llvm::StringMap<bool> CallerFeatureMap;
Context.getFunctionFeatureMap(CallerFeatureMap, FD);
- if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap) &&
- !Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap))
- Diag(D->getLocation(), diag::err_sve_vector_in_non_sve_target) << Ty;
+ if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap)) {
+ if (!Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap))
+ Diag(Loc, diag::err_sve_vector_in_non_sve_target) << Ty;
+ else if (!IsArmStreamingFunction(FD,
+ /*IncludeLocallyStreaming=*/true)) {
+ Diag(Loc, diag::err_sve_vector_in_non_streaming_function) << Ty;
+ }
+ }
}
};
@@ -2099,10 +2125,6 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
}
-/// Looks through the macro-expansion chain for the given
-/// location, looking for a macro expansion with the given name.
-/// If one is found, returns true and sets the location to that
-/// expansion loc.
bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
SourceLocation loc = locref;
if (!loc.isMacroID()) return false;
@@ -2120,17 +2142,6 @@ bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
return false;
}
-/// Determines the active Scope associated with the given declaration
-/// context.
-///
-/// This routine maps a declaration context to the active Scope object that
-/// represents that declaration context in the parser. It is typically used
-/// from "scope-less" code (e.g., template instantiation, lazy creation of
-/// declarations) that injects a name for name-lookup purposes and, therefore,
-/// must update the Scope.
-///
-/// \returns The scope corresponding to the given declaraion context, or NULL
-/// if no such scope is open.
Scope *Sema::getScopeForContext(DeclContext *Ctx) {
if (!Ctx)
@@ -2159,7 +2170,7 @@ void Sema::PushFunctionScope() {
FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
}
if (LangOpts.OpenMP)
- pushOpenMPFunctionRegion();
+ OpenMP().pushOpenMPFunctionRegion();
}
void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
@@ -2218,7 +2229,7 @@ static void checkEscapingByref(VarDecl *VD, Sema &S) {
// block copy/destroy functions. Resolve it here.
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
if (CXXDestructorDecl *DD = RD->getDestructor()) {
- auto *FPT = DD->getType()->getAs<FunctionProtoType>();
+ auto *FPT = DD->getType()->castAs<FunctionProtoType>();
S.ResolveExceptionSpec(Loc, FPT);
}
}
@@ -2261,13 +2272,6 @@ static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
}
}
-/// Pop a function (or block or lambda or captured region) scope from the stack.
-///
-/// \param WP The warning policy to use for CFG-based warnings, or null if such
-/// warnings should not be produced.
-/// \param D The declaration corresponding to this function scope, if producing
-/// CFG-based warnings.
-/// \param BlockType The type of the block expression, if D is a BlockDecl.
Sema::PoppedFunctionScopePtr
Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
const Decl *D, QualType BlockType) {
@@ -2279,7 +2283,7 @@ Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
PoppedFunctionScopeDeleter(this));
if (LangOpts.OpenMP)
- popOpenMPFunctionRegion(Scope.get());
+ OpenMP().popOpenMPFunctionRegion(Scope.get());
// Issue any analysis-based warnings.
if (WP && D)
@@ -2314,8 +2318,6 @@ void Sema::PopCompoundScope() {
CurFunction->CompoundScopes.pop_back();
}
-/// Determine whether any errors occurred within this function/method/
-/// block.
bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
return getCurFunction()->hasUnrecoverableErrorOccurred();
}
@@ -2466,17 +2468,6 @@ void ExternalSemaSource::ReadUndefinedButUsed(
void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
-/// Figure out if an expression could be turned into a call.
-///
-/// Use this when trying to recover from an error where the programmer may have
-/// written just the name of a function instead of actually calling it.
-///
-/// \param E - The expression to examine.
-/// \param ZeroArgCallReturnTy - If the expression can be turned into a call
-/// with no arguments, this parameter is set to the type returned by such a
-/// call; otherwise, it is set to an empty QualType.
-/// \param OverloadSet - If the expression is an overloaded function
-/// name, this parameter is populated with the decls of the various overloads.
bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &OverloadSet) {
ZeroArgCallReturnTy = QualType();
@@ -2485,7 +2476,7 @@ bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
const OverloadExpr *Overloads = nullptr;
bool IsMemExpr = false;
if (E.getType() == Context.OverloadTy) {
- OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E));
+ OverloadExpr::FindResult FR = OverloadExpr::find(&E);
// Ignore overloads that are pointer-to-member constants.
if (FR.HasFormOfMemberPointer)
@@ -2715,7 +2706,9 @@ void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
unsigned OpenMPCaptureLevel) {
auto *CSI = new CapturedRegionScopeInfo(
getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
- (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0,
+ (getLangOpts().OpenMP && K == CR_OpenMP)
+ ? OpenMP().getOpenMPNestingLevel()
+ : 0,
OpenMPCaptureLevel);
CSI->ReturnType = Context.VoidTy;
FunctionScopes.push_back(CSI);
@@ -2768,3 +2761,153 @@ bool Sema::isDeclaratorFunctionLike(Declarator &D) {
});
return Result;
}
+
+FunctionEffectDifferences::FunctionEffectDifferences(
+ const FunctionEffectsRef &Old, const FunctionEffectsRef &New) {
+
+ FunctionEffectsRef::iterator POld = Old.begin();
+ FunctionEffectsRef::iterator OldEnd = Old.end();
+ FunctionEffectsRef::iterator PNew = New.begin();
+ FunctionEffectsRef::iterator NewEnd = New.end();
+
+ while (true) {
+ int cmp = 0;
+ if (POld == OldEnd) {
+ if (PNew == NewEnd)
+ break;
+ cmp = 1;
+ } else if (PNew == NewEnd)
+ cmp = -1;
+ else {
+ FunctionEffectWithCondition Old = *POld;
+ FunctionEffectWithCondition New = *PNew;
+ if (Old.Effect.kind() < New.Effect.kind())
+ cmp = -1;
+ else if (New.Effect.kind() < Old.Effect.kind())
+ cmp = 1;
+ else {
+ cmp = 0;
+ if (Old.Cond.getCondition() != New.Cond.getCondition()) {
+ // FIXME: Cases where the expressions are equivalent but
+ // don't have the same identity.
+ push_back(FunctionEffectDiff{
+ Old.Effect.kind(), FunctionEffectDiff::Kind::ConditionMismatch,
+ Old, New});
+ }
+ }
+ }
+
+ if (cmp < 0) {
+ // removal
+ FunctionEffectWithCondition Old = *POld;
+ push_back(FunctionEffectDiff{
+ Old.Effect.kind(), FunctionEffectDiff::Kind::Removed, Old, {}});
+ ++POld;
+ } else if (cmp > 0) {
+ // addition
+ FunctionEffectWithCondition New = *PNew;
+ push_back(FunctionEffectDiff{
+ New.Effect.kind(), FunctionEffectDiff::Kind::Added, {}, New});
+ ++PNew;
+ } else {
+ ++POld;
+ ++PNew;
+ }
+ }
+}
+
+bool FunctionEffectDiff::shouldDiagnoseConversion(
+ QualType SrcType, const FunctionEffectsRef &SrcFX, QualType DstType,
+ const FunctionEffectsRef &DstFX) const {
+
+ switch (EffectKind) {
+ case FunctionEffect::Kind::NonAllocating:
+ // nonallocating can't be added (spoofed) during a conversion, unless we
+ // have nonblocking.
+ if (DiffKind == Kind::Added) {
+ for (const auto &CFE : SrcFX) {
+ if (CFE.Effect.kind() == FunctionEffect::Kind::NonBlocking)
+ return false;
+ }
+ }
+ [[fallthrough]];
+ case FunctionEffect::Kind::NonBlocking:
+ // nonblocking can't be added (spoofed) during a conversion.
+ switch (DiffKind) {
+ case Kind::Added:
+ return true;
+ case Kind::Removed:
+ return false;
+ case Kind::ConditionMismatch:
+ // FIXME: Condition mismatches are too coarse right now -- expressions
+ // which are equivalent but don't have the same identity are detected as
+ // mismatches. We're going to diagnose those anyhow until expression
+ // matching is better.
+ return true;
+ }
+ case FunctionEffect::Kind::Blocking:
+ case FunctionEffect::Kind::Allocating:
+ return false;
+ case FunctionEffect::Kind::None:
+ break;
+ }
+ llvm_unreachable("unknown effect kind");
+}
+
+bool FunctionEffectDiff::shouldDiagnoseRedeclaration(
+ const FunctionDecl &OldFunction, const FunctionEffectsRef &OldFX,
+ const FunctionDecl &NewFunction, const FunctionEffectsRef &NewFX) const {
+ switch (EffectKind) {
+ case FunctionEffect::Kind::NonAllocating:
+ case FunctionEffect::Kind::NonBlocking:
+ // nonblocking/nonallocating can't be removed in a redeclaration.
+ switch (DiffKind) {
+ case Kind::Added:
+ return false; // No diagnostic.
+ case Kind::Removed:
+ return true; // Issue diagnostic.
+ case Kind::ConditionMismatch:
+ // All these forms of mismatches are diagnosed.
+ return true;
+ }
+ case FunctionEffect::Kind::Blocking:
+ case FunctionEffect::Kind::Allocating:
+ return false;
+ case FunctionEffect::Kind::None:
+ break;
+ }
+ llvm_unreachable("unknown effect kind");
+}
+
+FunctionEffectDiff::OverrideResult
+FunctionEffectDiff::shouldDiagnoseMethodOverride(
+ const CXXMethodDecl &OldMethod, const FunctionEffectsRef &OldFX,
+ const CXXMethodDecl &NewMethod, const FunctionEffectsRef &NewFX) const {
+ switch (EffectKind) {
+ case FunctionEffect::Kind::NonAllocating:
+ case FunctionEffect::Kind::NonBlocking:
+ switch (DiffKind) {
+
+ // If added on an override, that's fine and not diagnosed.
+ case Kind::Added:
+ return OverrideResult::NoAction;
+
+ // If missing from an override (removed), propagate from base to derived.
+ case Kind::Removed:
+ return OverrideResult::Merge;
+
+ // If there's a mismatch involving the effect's polarity or condition,
+ // issue a warning.
+ case Kind::ConditionMismatch:
+ return OverrideResult::Warn;
+ }
+
+ case FunctionEffect::Kind::Blocking:
+ case FunctionEffect::Kind::Allocating:
+ return OverrideResult::NoAction;
+
+ case FunctionEffect::Kind::None:
+ break;
+ }
+ llvm_unreachable("unknown effect kind");
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAMDGPU.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAMDGPU.cpp
new file mode 100644
index 000000000000..d11bc9eec330
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAMDGPU.cpp
@@ -0,0 +1,311 @@
+//===------ SemaAMDGPU.cpp ------- AMDGPU target-specific routines --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to AMDGPU.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaAMDGPU.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Ownership.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include <cstdint>
+
+namespace clang {
+
+SemaAMDGPU::SemaAMDGPU(Sema &S) : SemaBase(S) {}
+
+bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ // position of memory order and scope arguments in the builtin
+ unsigned OrderIndex, ScopeIndex;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_global_load_lds: {
+ constexpr const int SizeIdx = 2;
+ llvm::APSInt Size;
+ Expr *ArgExpr = TheCall->getArg(SizeIdx);
+ [[maybe_unused]] ExprResult R =
+ SemaRef.VerifyIntegerConstantExpression(ArgExpr, &Size);
+ assert(!R.isInvalid());
+ switch (Size.getSExtValue()) {
+ case 1:
+ case 2:
+ case 4:
+ return false;
+ default:
+ Diag(ArgExpr->getExprLoc(),
+ diag::err_amdgcn_global_load_lds_size_invalid_value)
+ << ArgExpr->getSourceRange();
+ Diag(ArgExpr->getExprLoc(),
+ diag::note_amdgcn_global_load_lds_size_valid_value)
+ << ArgExpr->getSourceRange();
+ return true;
+ }
+ }
+ case AMDGPU::BI__builtin_amdgcn_get_fpenv:
+ case AMDGPU::BI__builtin_amdgcn_set_fpenv:
+ return false;
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
+ OrderIndex = 2;
+ ScopeIndex = 3;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_fence:
+ OrderIndex = 0;
+ ScopeIndex = 1;
+ break;
+ default:
+ return false;
+ }
+
+ ExprResult Arg = TheCall->getArg(OrderIndex);
+ auto ArgExpr = Arg.get();
+ Expr::EvalResult ArgResult;
+
+ if (!ArgExpr->EvaluateAsInt(ArgResult, getASTContext()))
+ return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
+ << ArgExpr->getType();
+ auto Ord = ArgResult.Val.getInt().getZExtValue();
+
+ // Check validity of memory ordering as per C11 / C++11's memody model.
+ // Only fence needs check. Atomic dec/inc allow all memory orders.
+ if (!llvm::isValidAtomicOrderingCABI(Ord))
+ return Diag(ArgExpr->getBeginLoc(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << 0 << ArgExpr->getSourceRange();
+ switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
+ case llvm::AtomicOrderingCABI::relaxed:
+ case llvm::AtomicOrderingCABI::consume:
+ if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
+ return Diag(ArgExpr->getBeginLoc(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << 0 << ArgExpr->getSourceRange();
+ break;
+ case llvm::AtomicOrderingCABI::acquire:
+ case llvm::AtomicOrderingCABI::release:
+ case llvm::AtomicOrderingCABI::acq_rel:
+ case llvm::AtomicOrderingCABI::seq_cst:
+ break;
+ }
+
+ Arg = TheCall->getArg(ScopeIndex);
+ ArgExpr = Arg.get();
+ Expr::EvalResult ArgResult1;
+ // Check that sync scope is a constant literal
+ if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, getASTContext()))
+ return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
+ << ArgExpr->getType();
+
+ return false;
+}
+
+static bool
+checkAMDGPUFlatWorkGroupSizeArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr,
+ const AMDGPUFlatWorkGroupSizeAttr &Attr) {
+ // Accept template arguments for now as they depend on something else.
+ // We'll get to check them when they eventually get instantiated.
+ if (MinExpr->isValueDependent() || MaxExpr->isValueDependent())
+ return false;
+
+ uint32_t Min = 0;
+ if (!S.checkUInt32Argument(Attr, MinExpr, Min, 0))
+ return true;
+
+ uint32_t Max = 0;
+ if (!S.checkUInt32Argument(Attr, MaxExpr, Max, 1))
+ return true;
+
+ if (Min == 0 && Max != 0) {
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 0;
+ return true;
+ }
+ if (Min > Max) {
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 1;
+ return true;
+ }
+
+ return false;
+}
+
+AMDGPUFlatWorkGroupSizeAttr *
+SemaAMDGPU::CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
+ ASTContext &Context = getASTContext();
+ AMDGPUFlatWorkGroupSizeAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
+
+ if (checkAMDGPUFlatWorkGroupSizeArguments(SemaRef, MinExpr, MaxExpr, TmpAttr))
+ return nullptr;
+ return ::new (Context)
+ AMDGPUFlatWorkGroupSizeAttr(Context, CI, MinExpr, MaxExpr);
+}
+
+void SemaAMDGPU::addAMDGPUFlatWorkGroupSizeAttr(Decl *D,
+ const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
+ if (auto *Attr = CreateAMDGPUFlatWorkGroupSizeAttr(CI, MinExpr, MaxExpr))
+ D->addAttr(Attr);
+}
+
+void SemaAMDGPU::handleAMDGPUFlatWorkGroupSizeAttr(Decl *D,
+ const ParsedAttr &AL) {
+ Expr *MinExpr = AL.getArgAsExpr(0);
+ Expr *MaxExpr = AL.getArgAsExpr(1);
+
+ addAMDGPUFlatWorkGroupSizeAttr(D, AL, MinExpr, MaxExpr);
+}
+
+static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr,
+ Expr *MaxExpr,
+ const AMDGPUWavesPerEUAttr &Attr) {
+ if (S.DiagnoseUnexpandedParameterPack(MinExpr) ||
+ (MaxExpr && S.DiagnoseUnexpandedParameterPack(MaxExpr)))
+ return true;
+
+ // Accept template arguments for now as they depend on something else.
+ // We'll get to check them when they eventually get instantiated.
+ if (MinExpr->isValueDependent() || (MaxExpr && MaxExpr->isValueDependent()))
+ return false;
+
+ uint32_t Min = 0;
+ if (!S.checkUInt32Argument(Attr, MinExpr, Min, 0))
+ return true;
+
+ uint32_t Max = 0;
+ if (MaxExpr && !S.checkUInt32Argument(Attr, MaxExpr, Max, 1))
+ return true;
+
+ if (Min == 0 && Max != 0) {
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 0;
+ return true;
+ }
+ if (Max != 0 && Min > Max) {
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 1;
+ return true;
+ }
+
+ return false;
+}
+
+AMDGPUWavesPerEUAttr *
+SemaAMDGPU::CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
+ ASTContext &Context = getASTContext();
+ AMDGPUWavesPerEUAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
+
+ if (checkAMDGPUWavesPerEUArguments(SemaRef, MinExpr, MaxExpr, TmpAttr))
+ return nullptr;
+
+ return ::new (Context) AMDGPUWavesPerEUAttr(Context, CI, MinExpr, MaxExpr);
+}
+
+void SemaAMDGPU::addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
+ if (auto *Attr = CreateAMDGPUWavesPerEUAttr(CI, MinExpr, MaxExpr))
+ D->addAttr(Attr);
+}
+
+void SemaAMDGPU::handleAMDGPUWavesPerEUAttr(Decl *D, const ParsedAttr &AL) {
+ if (!AL.checkAtLeastNumArgs(SemaRef, 1) || !AL.checkAtMostNumArgs(SemaRef, 2))
+ return;
+
+ Expr *MinExpr = AL.getArgAsExpr(0);
+ Expr *MaxExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
+
+ addAMDGPUWavesPerEUAttr(D, AL, MinExpr, MaxExpr);
+}
+
+void SemaAMDGPU::handleAMDGPUNumSGPRAttr(Decl *D, const ParsedAttr &AL) {
+ uint32_t NumSGPR = 0;
+ Expr *NumSGPRExpr = AL.getArgAsExpr(0);
+ if (!SemaRef.checkUInt32Argument(AL, NumSGPRExpr, NumSGPR))
+ return;
+
+ D->addAttr(::new (getASTContext())
+ AMDGPUNumSGPRAttr(getASTContext(), AL, NumSGPR));
+}
+
+void SemaAMDGPU::handleAMDGPUNumVGPRAttr(Decl *D, const ParsedAttr &AL) {
+ uint32_t NumVGPR = 0;
+ Expr *NumVGPRExpr = AL.getArgAsExpr(0);
+ if (!SemaRef.checkUInt32Argument(AL, NumVGPRExpr, NumVGPR))
+ return;
+
+ D->addAttr(::new (getASTContext())
+ AMDGPUNumVGPRAttr(getASTContext(), AL, NumVGPR));
+}
+
+static bool
+checkAMDGPUMaxNumWorkGroupsArguments(Sema &S, Expr *XExpr, Expr *YExpr,
+ Expr *ZExpr,
+ const AMDGPUMaxNumWorkGroupsAttr &Attr) {
+ if (S.DiagnoseUnexpandedParameterPack(XExpr) ||
+ (YExpr && S.DiagnoseUnexpandedParameterPack(YExpr)) ||
+ (ZExpr && S.DiagnoseUnexpandedParameterPack(ZExpr)))
+ return true;
+
+ // Accept template arguments for now as they depend on something else.
+ // We'll get to check them when they eventually get instantiated.
+ if (XExpr->isValueDependent() || (YExpr && YExpr->isValueDependent()) ||
+ (ZExpr && ZExpr->isValueDependent()))
+ return false;
+
+ uint32_t NumWG = 0;
+ Expr *Exprs[3] = {XExpr, YExpr, ZExpr};
+ for (int i = 0; i < 3; i++) {
+ if (Exprs[i]) {
+ if (!S.checkUInt32Argument(Attr, Exprs[i], NumWG, i,
+ /*StrictlyUnsigned=*/true))
+ return true;
+ if (NumWG == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero)
+ << &Attr << Exprs[i]->getSourceRange();
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+AMDGPUMaxNumWorkGroupsAttr *SemaAMDGPU::CreateAMDGPUMaxNumWorkGroupsAttr(
+ const AttributeCommonInfo &CI, Expr *XExpr, Expr *YExpr, Expr *ZExpr) {
+ ASTContext &Context = getASTContext();
+ AMDGPUMaxNumWorkGroupsAttr TmpAttr(Context, CI, XExpr, YExpr, ZExpr);
+
+ if (checkAMDGPUMaxNumWorkGroupsArguments(SemaRef, XExpr, YExpr, ZExpr,
+ TmpAttr))
+ return nullptr;
+
+ return ::new (Context)
+ AMDGPUMaxNumWorkGroupsAttr(Context, CI, XExpr, YExpr, ZExpr);
+}
+
+void SemaAMDGPU::addAMDGPUMaxNumWorkGroupsAttr(Decl *D,
+ const AttributeCommonInfo &CI,
+ Expr *XExpr, Expr *YExpr,
+ Expr *ZExpr) {
+ if (auto *Attr = CreateAMDGPUMaxNumWorkGroupsAttr(CI, XExpr, YExpr, ZExpr))
+ D->addAttr(Attr);
+}
+
+void SemaAMDGPU::handleAMDGPUMaxNumWorkGroupsAttr(Decl *D,
+ const ParsedAttr &AL) {
+ Expr *YExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
+ Expr *ZExpr = (AL.getNumArgs() > 2) ? AL.getArgAsExpr(2) : nullptr;
+ addAMDGPUMaxNumWorkGroupsAttr(D, AL, AL.getArgAsExpr(0), YExpr, ZExpr);
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAPINotes.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAPINotes.cpp
new file mode 100644
index 000000000000..055e66a0c348
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAPINotes.cpp
@@ -0,0 +1,1036 @@
+//===--- SemaAPINotes.cpp - API Notes Handling ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the mapping from API notes to declaration attributes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/APINotes/APINotesReader.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaSwift.h"
+#include <stack>
+
+using namespace clang;
+
+namespace {
+enum class IsActive_t : bool { Inactive, Active };
+enum class IsSubstitution_t : bool { Original, Replacement };
+
+struct VersionedInfoMetadata {
+ /// An empty version refers to unversioned metadata.
+ VersionTuple Version;
+ unsigned IsActive : 1;
+ unsigned IsReplacement : 1;
+
+ VersionedInfoMetadata(VersionTuple Version, IsActive_t Active,
+ IsSubstitution_t Replacement)
+ : Version(Version), IsActive(Active == IsActive_t::Active),
+ IsReplacement(Replacement == IsSubstitution_t::Replacement) {}
+};
+} // end anonymous namespace
+
+/// Determine whether this is a multi-level pointer type.
+static bool isIndirectPointerType(QualType Type) {
+ QualType Pointee = Type->getPointeeType();
+ if (Pointee.isNull())
+ return false;
+
+ return Pointee->isAnyPointerType() || Pointee->isObjCObjectPointerType() ||
+ Pointee->isMemberPointerType();
+}
+
+/// Apply nullability to the given declaration.
+static void applyNullability(Sema &S, Decl *D, NullabilityKind Nullability,
+ VersionedInfoMetadata Metadata) {
+ if (!Metadata.IsActive)
+ return;
+
+ auto GetModified =
+ [&](Decl *D, QualType QT,
+ NullabilityKind Nullability) -> std::optional<QualType> {
+ QualType Original = QT;
+ S.CheckImplicitNullabilityTypeSpecifier(QT, Nullability, D->getLocation(),
+ isa<ParmVarDecl>(D),
+ /*OverrideExisting=*/true);
+ return (QT.getTypePtr() != Original.getTypePtr()) ? std::optional(QT)
+ : std::nullopt;
+ };
+
+ if (auto Function = dyn_cast<FunctionDecl>(D)) {
+ if (auto Modified =
+ GetModified(D, Function->getReturnType(), Nullability)) {
+ const FunctionType *FnType = Function->getType()->castAs<FunctionType>();
+ if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(FnType))
+ Function->setType(S.Context.getFunctionType(
+ *Modified, proto->getParamTypes(), proto->getExtProtoInfo()));
+ else
+ Function->setType(
+ S.Context.getFunctionNoProtoType(*Modified, FnType->getExtInfo()));
+ }
+ } else if (auto Method = dyn_cast<ObjCMethodDecl>(D)) {
+ if (auto Modified = GetModified(D, Method->getReturnType(), Nullability)) {
+ Method->setReturnType(*Modified);
+
+ // Make it a context-sensitive keyword if we can.
+ if (!isIndirectPointerType(*Modified))
+ Method->setObjCDeclQualifier(Decl::ObjCDeclQualifier(
+ Method->getObjCDeclQualifier() | Decl::OBJC_TQ_CSNullability));
+ }
+ } else if (auto Value = dyn_cast<ValueDecl>(D)) {
+ if (auto Modified = GetModified(D, Value->getType(), Nullability)) {
+ Value->setType(*Modified);
+
+ // Make it a context-sensitive keyword if we can.
+ if (auto Parm = dyn_cast<ParmVarDecl>(D)) {
+ if (Parm->isObjCMethodParameter() && !isIndirectPointerType(*Modified))
+ Parm->setObjCDeclQualifier(Decl::ObjCDeclQualifier(
+ Parm->getObjCDeclQualifier() | Decl::OBJC_TQ_CSNullability));
+ }
+ }
+ } else if (auto Property = dyn_cast<ObjCPropertyDecl>(D)) {
+ if (auto Modified = GetModified(D, Property->getType(), Nullability)) {
+ Property->setType(*Modified, Property->getTypeSourceInfo());
+
+ // Make it a property attribute if we can.
+ if (!isIndirectPointerType(*Modified))
+ Property->setPropertyAttributes(
+ ObjCPropertyAttribute::kind_null_resettable);
+ }
+ }
+}
+
+/// Copy a string into ASTContext-allocated memory.
+static StringRef ASTAllocateString(ASTContext &Ctx, StringRef String) {
+ void *mem = Ctx.Allocate(String.size(), alignof(char *));
+ memcpy(mem, String.data(), String.size());
+ return StringRef(static_cast<char *>(mem), String.size());
+}
+
+static AttributeCommonInfo getPlaceholderAttrInfo() {
+ return AttributeCommonInfo(SourceRange(),
+ AttributeCommonInfo::UnknownAttribute,
+ {AttributeCommonInfo::AS_GNU,
+ /*Spelling*/ 0, /*IsAlignas*/ false,
+ /*IsRegularKeywordAttribute*/ false});
+}
+
+namespace {
+template <typename A> struct AttrKindFor {};
+
+#define ATTR(X) \
+ template <> struct AttrKindFor<X##Attr> { \
+ static const attr::Kind value = attr::X; \
+ };
+#include "clang/Basic/AttrList.inc"
+
+/// Handle an attribute introduced by API notes.
+///
+/// \param IsAddition Whether we should add a new attribute
+/// (otherwise, we might remove an existing attribute).
+/// \param CreateAttr Create the new attribute to be added.
+template <typename A>
+void handleAPINotedAttribute(
+ Sema &S, Decl *D, bool IsAddition, VersionedInfoMetadata Metadata,
+ llvm::function_ref<A *()> CreateAttr,
+ llvm::function_ref<Decl::attr_iterator(const Decl *)> GetExistingAttr) {
+ if (Metadata.IsActive) {
+ auto Existing = GetExistingAttr(D);
+ if (Existing != D->attr_end()) {
+ // Remove the existing attribute, and treat it as a superseded
+ // non-versioned attribute.
+ auto *Versioned = SwiftVersionedAdditionAttr::CreateImplicit(
+ S.Context, Metadata.Version, *Existing, /*IsReplacedByActive*/ true);
+
+ D->getAttrs().erase(Existing);
+ D->addAttr(Versioned);
+ }
+
+ // If we're supposed to add a new attribute, do so.
+ if (IsAddition) {
+ if (auto Attr = CreateAttr())
+ D->addAttr(Attr);
+ }
+
+ return;
+ }
+ if (IsAddition) {
+ if (auto Attr = CreateAttr()) {
+ auto *Versioned = SwiftVersionedAdditionAttr::CreateImplicit(
+ S.Context, Metadata.Version, Attr,
+ /*IsReplacedByActive*/ Metadata.IsReplacement);
+ D->addAttr(Versioned);
+ }
+ } else {
+ // FIXME: This isn't preserving enough information for things like
+ // availability, where we're trying to remove a /specific/ kind of
+ // attribute.
+ auto *Versioned = SwiftVersionedRemovalAttr::CreateImplicit(
+ S.Context, Metadata.Version, AttrKindFor<A>::value,
+ /*IsReplacedByActive*/ Metadata.IsReplacement);
+ D->addAttr(Versioned);
+ }
+}
+
+template <typename A>
+void handleAPINotedAttribute(Sema &S, Decl *D, bool ShouldAddAttribute,
+ VersionedInfoMetadata Metadata,
+ llvm::function_ref<A *()> CreateAttr) {
+ handleAPINotedAttribute<A>(
+ S, D, ShouldAddAttribute, Metadata, CreateAttr, [](const Decl *D) {
+ return llvm::find_if(D->attrs(),
+ [](const Attr *Next) { return isa<A>(Next); });
+ });
+}
+} // namespace
+
+template <typename A>
+static void handleAPINotedRetainCountAttribute(Sema &S, Decl *D,
+ bool ShouldAddAttribute,
+ VersionedInfoMetadata Metadata) {
+ // The template argument has a default to make the "removal" case more
+ // concise; it doesn't matter /which/ attribute is being removed.
+ handleAPINotedAttribute<A>(
+ S, D, ShouldAddAttribute, Metadata,
+ [&] { return new (S.Context) A(S.Context, getPlaceholderAttrInfo()); },
+ [](const Decl *D) -> Decl::attr_iterator {
+ return llvm::find_if(D->attrs(), [](const Attr *Next) -> bool {
+ return isa<CFReturnsRetainedAttr>(Next) ||
+ isa<CFReturnsNotRetainedAttr>(Next) ||
+ isa<NSReturnsRetainedAttr>(Next) ||
+ isa<NSReturnsNotRetainedAttr>(Next) ||
+ isa<CFAuditedTransferAttr>(Next);
+ });
+ });
+}
+
+static void handleAPINotedRetainCountConvention(
+ Sema &S, Decl *D, VersionedInfoMetadata Metadata,
+ std::optional<api_notes::RetainCountConventionKind> Convention) {
+ if (!Convention)
+ return;
+ switch (*Convention) {
+ case api_notes::RetainCountConventionKind::None:
+ if (isa<FunctionDecl>(D)) {
+ handleAPINotedRetainCountAttribute<CFUnknownTransferAttr>(
+ S, D, /*shouldAddAttribute*/ true, Metadata);
+ } else {
+ handleAPINotedRetainCountAttribute<CFReturnsRetainedAttr>(
+ S, D, /*shouldAddAttribute*/ false, Metadata);
+ }
+ break;
+ case api_notes::RetainCountConventionKind::CFReturnsRetained:
+ handleAPINotedRetainCountAttribute<CFReturnsRetainedAttr>(
+ S, D, /*shouldAddAttribute*/ true, Metadata);
+ break;
+ case api_notes::RetainCountConventionKind::CFReturnsNotRetained:
+ handleAPINotedRetainCountAttribute<CFReturnsNotRetainedAttr>(
+ S, D, /*shouldAddAttribute*/ true, Metadata);
+ break;
+ case api_notes::RetainCountConventionKind::NSReturnsRetained:
+ handleAPINotedRetainCountAttribute<NSReturnsRetainedAttr>(
+ S, D, /*shouldAddAttribute*/ true, Metadata);
+ break;
+ case api_notes::RetainCountConventionKind::NSReturnsNotRetained:
+ handleAPINotedRetainCountAttribute<NSReturnsNotRetainedAttr>(
+ S, D, /*shouldAddAttribute*/ true, Metadata);
+ break;
+ }
+}
+
+static void ProcessAPINotes(Sema &S, Decl *D,
+ const api_notes::CommonEntityInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ // Availability
+ if (Info.Unavailable) {
+ handleAPINotedAttribute<UnavailableAttr>(S, D, true, Metadata, [&] {
+ return new (S.Context)
+ UnavailableAttr(S.Context, getPlaceholderAttrInfo(),
+ ASTAllocateString(S.Context, Info.UnavailableMsg));
+ });
+ }
+
+ if (Info.UnavailableInSwift) {
+ handleAPINotedAttribute<AvailabilityAttr>(
+ S, D, true, Metadata,
+ [&] {
+ return new (S.Context) AvailabilityAttr(
+ S.Context, getPlaceholderAttrInfo(),
+ &S.Context.Idents.get("swift"), VersionTuple(), VersionTuple(),
+ VersionTuple(),
+ /*Unavailable=*/true,
+ ASTAllocateString(S.Context, Info.UnavailableMsg),
+ /*Strict=*/false,
+ /*Replacement=*/StringRef(),
+ /*Priority=*/Sema::AP_Explicit,
+ /*Environment=*/nullptr);
+ },
+ [](const Decl *D) {
+ return llvm::find_if(D->attrs(), [](const Attr *next) -> bool {
+ if (const auto *AA = dyn_cast<AvailabilityAttr>(next))
+ if (const auto *II = AA->getPlatform())
+ return II->isStr("swift");
+ return false;
+ });
+ });
+ }
+
+ // swift_private
+ if (auto SwiftPrivate = Info.isSwiftPrivate()) {
+ handleAPINotedAttribute<SwiftPrivateAttr>(
+ S, D, *SwiftPrivate, Metadata, [&] {
+ return new (S.Context)
+ SwiftPrivateAttr(S.Context, getPlaceholderAttrInfo());
+ });
+ }
+
+ // swift_name
+ if (!Info.SwiftName.empty()) {
+ handleAPINotedAttribute<SwiftNameAttr>(
+ S, D, true, Metadata, [&]() -> SwiftNameAttr * {
+ AttributeFactory AF{};
+ AttributePool AP{AF};
+ auto &C = S.getASTContext();
+ ParsedAttr *SNA =
+ AP.create(&C.Idents.get("swift_name"), SourceRange(), nullptr,
+ SourceLocation(), nullptr, nullptr, nullptr,
+ ParsedAttr::Form::GNU());
+
+ if (!S.Swift().DiagnoseName(D, Info.SwiftName, D->getLocation(), *SNA,
+ /*IsAsync=*/false))
+ return nullptr;
+
+ return new (S.Context)
+ SwiftNameAttr(S.Context, getPlaceholderAttrInfo(),
+ ASTAllocateString(S.Context, Info.SwiftName));
+ });
+ }
+}
+
+static void ProcessAPINotes(Sema &S, Decl *D,
+ const api_notes::CommonTypeInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ // swift_bridge
+ if (auto SwiftBridge = Info.getSwiftBridge()) {
+ handleAPINotedAttribute<SwiftBridgeAttr>(
+ S, D, !SwiftBridge->empty(), Metadata, [&] {
+ return new (S.Context)
+ SwiftBridgeAttr(S.Context, getPlaceholderAttrInfo(),
+ ASTAllocateString(S.Context, *SwiftBridge));
+ });
+ }
+
+ // ns_error_domain
+ if (auto NSErrorDomain = Info.getNSErrorDomain()) {
+ handleAPINotedAttribute<NSErrorDomainAttr>(
+ S, D, !NSErrorDomain->empty(), Metadata, [&] {
+ return new (S.Context)
+ NSErrorDomainAttr(S.Context, getPlaceholderAttrInfo(),
+ &S.Context.Idents.get(*NSErrorDomain));
+ });
+ }
+
+ ProcessAPINotes(S, D, static_cast<const api_notes::CommonEntityInfo &>(Info),
+ Metadata);
+}
+
+/// Check that the replacement type provided by API notes is reasonable.
+///
+/// This is a very weak form of ABI check.
+static bool checkAPINotesReplacementType(Sema &S, SourceLocation Loc,
+ QualType OrigType,
+ QualType ReplacementType) {
+ if (S.Context.getTypeSize(OrigType) !=
+ S.Context.getTypeSize(ReplacementType)) {
+ S.Diag(Loc, diag::err_incompatible_replacement_type)
+ << ReplacementType << OrigType;
+ return true;
+ }
+
+ return false;
+}
+
+/// Process API notes for a variable or property.
+static void ProcessAPINotes(Sema &S, Decl *D,
+ const api_notes::VariableInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ // Type override.
+ if (Metadata.IsActive && !Info.getType().empty() &&
+ S.ParseTypeFromStringCallback) {
+ auto ParsedType = S.ParseTypeFromStringCallback(
+ Info.getType(), "<API Notes>", D->getLocation());
+ if (ParsedType.isUsable()) {
+ QualType Type = Sema::GetTypeFromParser(ParsedType.get());
+ auto TypeInfo =
+ S.Context.getTrivialTypeSourceInfo(Type, D->getLocation());
+
+ if (auto Var = dyn_cast<VarDecl>(D)) {
+ // Make adjustments to parameter types.
+ if (isa<ParmVarDecl>(Var)) {
+ Type = S.ObjC().AdjustParameterTypeForObjCAutoRefCount(
+ Type, D->getLocation(), TypeInfo);
+ Type = S.Context.getAdjustedParameterType(Type);
+ }
+
+ if (!checkAPINotesReplacementType(S, Var->getLocation(), Var->getType(),
+ Type)) {
+ Var->setType(Type);
+ Var->setTypeSourceInfo(TypeInfo);
+ }
+ } else if (auto Property = dyn_cast<ObjCPropertyDecl>(D)) {
+ if (!checkAPINotesReplacementType(S, Property->getLocation(),
+ Property->getType(), Type))
+ Property->setType(Type, TypeInfo);
+
+ } else
+ llvm_unreachable("API notes allowed a type on an unknown declaration");
+ }
+ }
+
+ // Nullability.
+ if (auto Nullability = Info.getNullability())
+ applyNullability(S, D, *Nullability, Metadata);
+
+ // Handle common entity information.
+ ProcessAPINotes(S, D, static_cast<const api_notes::CommonEntityInfo &>(Info),
+ Metadata);
+}
+
+/// Process API notes for a parameter.
+static void ProcessAPINotes(Sema &S, ParmVarDecl *D,
+ const api_notes::ParamInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ // noescape
+ if (auto NoEscape = Info.isNoEscape())
+ handleAPINotedAttribute<NoEscapeAttr>(S, D, *NoEscape, Metadata, [&] {
+ return new (S.Context) NoEscapeAttr(S.Context, getPlaceholderAttrInfo());
+ });
+
+ // Retain count convention
+ handleAPINotedRetainCountConvention(S, D, Metadata,
+ Info.getRetainCountConvention());
+
+ // Handle common entity information.
+ ProcessAPINotes(S, D, static_cast<const api_notes::VariableInfo &>(Info),
+ Metadata);
+}
+
+/// Process API notes for a global variable.
+static void ProcessAPINotes(Sema &S, VarDecl *D,
+ const api_notes::GlobalVariableInfo &Info,
+ VersionedInfoMetadata metadata) {
+ // Handle common entity information.
+ ProcessAPINotes(S, D, static_cast<const api_notes::VariableInfo &>(Info),
+ metadata);
+}
+
+/// Process API notes for an Objective-C property.
+static void ProcessAPINotes(Sema &S, ObjCPropertyDecl *D,
+ const api_notes::ObjCPropertyInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ // Handle common entity information.
+ ProcessAPINotes(S, D, static_cast<const api_notes::VariableInfo &>(Info),
+ Metadata);
+
+ if (auto AsAccessors = Info.getSwiftImportAsAccessors()) {
+ handleAPINotedAttribute<SwiftImportPropertyAsAccessorsAttr>(
+ S, D, *AsAccessors, Metadata, [&] {
+ return new (S.Context) SwiftImportPropertyAsAccessorsAttr(
+ S.Context, getPlaceholderAttrInfo());
+ });
+ }
+}
+
+namespace {
+typedef llvm::PointerUnion<FunctionDecl *, ObjCMethodDecl *> FunctionOrMethod;
+}
+
+/// Process API notes for a function or method.
+static void ProcessAPINotes(Sema &S, FunctionOrMethod AnyFunc,
+ const api_notes::FunctionInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ // Find the declaration itself.
+ FunctionDecl *FD = AnyFunc.dyn_cast<FunctionDecl *>();
+ Decl *D = FD;
+ ObjCMethodDecl *MD = nullptr;
+ if (!D) {
+ MD = AnyFunc.get<ObjCMethodDecl *>();
+ D = MD;
+ }
+
+ assert((FD || MD) && "Expecting Function or ObjCMethod");
+
+ // Nullability of return type.
+ if (Info.NullabilityAudited)
+ applyNullability(S, D, Info.getReturnTypeInfo(), Metadata);
+
+ // Parameters.
+ unsigned NumParams = FD ? FD->getNumParams() : MD->param_size();
+
+ bool AnyTypeChanged = false;
+ for (unsigned I = 0; I != NumParams; ++I) {
+ ParmVarDecl *Param = FD ? FD->getParamDecl(I) : MD->param_begin()[I];
+ QualType ParamTypeBefore = Param->getType();
+
+ if (I < Info.Params.size())
+ ProcessAPINotes(S, Param, Info.Params[I], Metadata);
+
+ // Nullability.
+ if (Info.NullabilityAudited)
+ applyNullability(S, Param, Info.getParamTypeInfo(I), Metadata);
+
+ if (ParamTypeBefore.getAsOpaquePtr() != Param->getType().getAsOpaquePtr())
+ AnyTypeChanged = true;
+ }
+
+ // Result type override.
+ QualType OverriddenResultType;
+ if (Metadata.IsActive && !Info.ResultType.empty() &&
+ S.ParseTypeFromStringCallback) {
+ auto ParsedType = S.ParseTypeFromStringCallback(
+ Info.ResultType, "<API Notes>", D->getLocation());
+ if (ParsedType.isUsable()) {
+ QualType ResultType = Sema::GetTypeFromParser(ParsedType.get());
+
+ if (MD) {
+ if (!checkAPINotesReplacementType(S, D->getLocation(),
+ MD->getReturnType(), ResultType)) {
+ auto ResultTypeInfo =
+ S.Context.getTrivialTypeSourceInfo(ResultType, D->getLocation());
+ MD->setReturnType(ResultType);
+ MD->setReturnTypeSourceInfo(ResultTypeInfo);
+ }
+ } else if (!checkAPINotesReplacementType(
+ S, FD->getLocation(), FD->getReturnType(), ResultType)) {
+ OverriddenResultType = ResultType;
+ AnyTypeChanged = true;
+ }
+ }
+ }
+
+ // If the result type or any of the parameter types changed for a function
+ // declaration, we have to rebuild the type.
+ if (FD && AnyTypeChanged) {
+ if (const auto *fnProtoType = FD->getType()->getAs<FunctionProtoType>()) {
+ if (OverriddenResultType.isNull())
+ OverriddenResultType = fnProtoType->getReturnType();
+
+ SmallVector<QualType, 4> ParamTypes;
+ for (auto Param : FD->parameters())
+ ParamTypes.push_back(Param->getType());
+
+ FD->setType(S.Context.getFunctionType(OverriddenResultType, ParamTypes,
+ fnProtoType->getExtProtoInfo()));
+ } else if (!OverriddenResultType.isNull()) {
+ const auto *FnNoProtoType = FD->getType()->castAs<FunctionNoProtoType>();
+ FD->setType(S.Context.getFunctionNoProtoType(
+ OverriddenResultType, FnNoProtoType->getExtInfo()));
+ }
+ }
+
+ // Retain count convention
+ handleAPINotedRetainCountConvention(S, D, Metadata,
+ Info.getRetainCountConvention());
+
+ // Handle common entity information.
+ ProcessAPINotes(S, D, static_cast<const api_notes::CommonEntityInfo &>(Info),
+ Metadata);
+}
+
+/// Process API notes for a C++ method.
+static void ProcessAPINotes(Sema &S, CXXMethodDecl *Method,
+ const api_notes::CXXMethodInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ ProcessAPINotes(S, (FunctionOrMethod)Method, Info, Metadata);
+}
+
+/// Process API notes for a global function.
+static void ProcessAPINotes(Sema &S, FunctionDecl *D,
+ const api_notes::GlobalFunctionInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ // Handle common function information.
+ ProcessAPINotes(S, FunctionOrMethod(D),
+ static_cast<const api_notes::FunctionInfo &>(Info), Metadata);
+}
+
+/// Process API notes for an enumerator.
+static void ProcessAPINotes(Sema &S, EnumConstantDecl *D,
+ const api_notes::EnumConstantInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ // Handle common information.
+ ProcessAPINotes(S, D, static_cast<const api_notes::CommonEntityInfo &>(Info),
+ Metadata);
+}
+
+/// Process API notes for an Objective-C method.
+static void ProcessAPINotes(Sema &S, ObjCMethodDecl *D,
+ const api_notes::ObjCMethodInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ // Designated initializers.
+ if (Info.DesignatedInit) {
+ handleAPINotedAttribute<ObjCDesignatedInitializerAttr>(
+ S, D, true, Metadata, [&] {
+ if (ObjCInterfaceDecl *IFace = D->getClassInterface())
+ IFace->setHasDesignatedInitializers();
+
+ return new (S.Context) ObjCDesignatedInitializerAttr(
+ S.Context, getPlaceholderAttrInfo());
+ });
+ }
+
+ // Handle common function information.
+ ProcessAPINotes(S, FunctionOrMethod(D),
+ static_cast<const api_notes::FunctionInfo &>(Info), Metadata);
+}
+
+/// Process API notes for a tag.
+static void ProcessAPINotes(Sema &S, TagDecl *D, const api_notes::TagInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ if (auto ImportAs = Info.SwiftImportAs)
+ D->addAttr(SwiftAttrAttr::Create(S.Context, "import_" + ImportAs.value()));
+
+ if (auto RetainOp = Info.SwiftRetainOp)
+ D->addAttr(SwiftAttrAttr::Create(S.Context, "retain:" + RetainOp.value()));
+
+ if (auto ReleaseOp = Info.SwiftReleaseOp)
+ D->addAttr(
+ SwiftAttrAttr::Create(S.Context, "release:" + ReleaseOp.value()));
+
+ if (auto Copyable = Info.isSwiftCopyable()) {
+ if (!*Copyable)
+ D->addAttr(SwiftAttrAttr::Create(S.Context, "~Copyable"));
+ }
+
+ if (auto Extensibility = Info.EnumExtensibility) {
+ using api_notes::EnumExtensibilityKind;
+ bool ShouldAddAttribute = (*Extensibility != EnumExtensibilityKind::None);
+ handleAPINotedAttribute<EnumExtensibilityAttr>(
+ S, D, ShouldAddAttribute, Metadata, [&] {
+ EnumExtensibilityAttr::Kind kind;
+ switch (*Extensibility) {
+ case EnumExtensibilityKind::None:
+ llvm_unreachable("remove only");
+ case EnumExtensibilityKind::Open:
+ kind = EnumExtensibilityAttr::Open;
+ break;
+ case EnumExtensibilityKind::Closed:
+ kind = EnumExtensibilityAttr::Closed;
+ break;
+ }
+ return new (S.Context)
+ EnumExtensibilityAttr(S.Context, getPlaceholderAttrInfo(), kind);
+ });
+ }
+
+ if (auto FlagEnum = Info.isFlagEnum()) {
+ handleAPINotedAttribute<FlagEnumAttr>(S, D, *FlagEnum, Metadata, [&] {
+ return new (S.Context) FlagEnumAttr(S.Context, getPlaceholderAttrInfo());
+ });
+ }
+
+ // Handle common type information.
+ ProcessAPINotes(S, D, static_cast<const api_notes::CommonTypeInfo &>(Info),
+ Metadata);
+}
+
+/// Process API notes for a typedef.
+static void ProcessAPINotes(Sema &S, TypedefNameDecl *D,
+ const api_notes::TypedefInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ // swift_wrapper
+ using SwiftWrapperKind = api_notes::SwiftNewTypeKind;
+
+ if (auto SwiftWrapper = Info.SwiftWrapper) {
+ handleAPINotedAttribute<SwiftNewTypeAttr>(
+ S, D, *SwiftWrapper != SwiftWrapperKind::None, Metadata, [&] {
+ SwiftNewTypeAttr::NewtypeKind Kind;
+ switch (*SwiftWrapper) {
+ case SwiftWrapperKind::None:
+ llvm_unreachable("Shouldn't build an attribute");
+
+ case SwiftWrapperKind::Struct:
+ Kind = SwiftNewTypeAttr::NK_Struct;
+ break;
+
+ case SwiftWrapperKind::Enum:
+ Kind = SwiftNewTypeAttr::NK_Enum;
+ break;
+ }
+ AttributeCommonInfo SyntaxInfo{
+ SourceRange(),
+ AttributeCommonInfo::AT_SwiftNewType,
+ {AttributeCommonInfo::AS_GNU, SwiftNewTypeAttr::GNU_swift_wrapper,
+ /*IsAlignas*/ false, /*IsRegularKeywordAttribute*/ false}};
+ return new (S.Context) SwiftNewTypeAttr(S.Context, SyntaxInfo, Kind);
+ });
+ }
+
+ // Handle common type information.
+ ProcessAPINotes(S, D, static_cast<const api_notes::CommonTypeInfo &>(Info),
+ Metadata);
+}
+
+/// Process API notes for an Objective-C class or protocol.
+static void ProcessAPINotes(Sema &S, ObjCContainerDecl *D,
+ const api_notes::ContextInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ // Handle common type information.
+ ProcessAPINotes(S, D, static_cast<const api_notes::CommonTypeInfo &>(Info),
+ Metadata);
+}
+
+/// Process API notes for an Objective-C class.
+static void ProcessAPINotes(Sema &S, ObjCInterfaceDecl *D,
+ const api_notes::ContextInfo &Info,
+ VersionedInfoMetadata Metadata) {
+ if (auto AsNonGeneric = Info.getSwiftImportAsNonGeneric()) {
+ handleAPINotedAttribute<SwiftImportAsNonGenericAttr>(
+ S, D, *AsNonGeneric, Metadata, [&] {
+ return new (S.Context)
+ SwiftImportAsNonGenericAttr(S.Context, getPlaceholderAttrInfo());
+ });
+ }
+
+ if (auto ObjcMembers = Info.getSwiftObjCMembers()) {
+ handleAPINotedAttribute<SwiftObjCMembersAttr>(
+ S, D, *ObjcMembers, Metadata, [&] {
+ return new (S.Context)
+ SwiftObjCMembersAttr(S.Context, getPlaceholderAttrInfo());
+ });
+ }
+
+ // Handle information common to Objective-C classes and protocols.
+ ProcessAPINotes(S, static_cast<clang::ObjCContainerDecl *>(D), Info,
+ Metadata);
+}
+
+/// If we're applying API notes with an active, non-default version, and the
+/// versioned API notes have a SwiftName but the declaration normally wouldn't
+/// have one, add a removal attribute to make it clear that the new SwiftName
+/// attribute only applies to the active version of \p D, not to all versions.
+///
+/// This must be run \em before processing API notes for \p D, because otherwise
+/// any existing SwiftName attribute will have been packaged up in a
+/// SwiftVersionedAdditionAttr.
+template <typename SpecificInfo>
+static void maybeAttachUnversionedSwiftName(
+ Sema &S, Decl *D,
+ const api_notes::APINotesReader::VersionedInfo<SpecificInfo> Info) {
+ if (D->hasAttr<SwiftNameAttr>())
+ return;
+ if (!Info.getSelected())
+ return;
+
+ // Is the active slice versioned, and does it set a Swift name?
+ VersionTuple SelectedVersion;
+ SpecificInfo SelectedInfoSlice;
+ std::tie(SelectedVersion, SelectedInfoSlice) = Info[*Info.getSelected()];
+ if (SelectedVersion.empty())
+ return;
+ if (SelectedInfoSlice.SwiftName.empty())
+ return;
+
+ // Does the unversioned slice /not/ set a Swift name?
+ for (const auto &VersionAndInfoSlice : Info) {
+ if (!VersionAndInfoSlice.first.empty())
+ continue;
+ if (!VersionAndInfoSlice.second.SwiftName.empty())
+ return;
+ }
+
+ // Then explicitly call that out with a removal attribute.
+ VersionedInfoMetadata DummyFutureMetadata(
+ SelectedVersion, IsActive_t::Inactive, IsSubstitution_t::Replacement);
+ handleAPINotedAttribute<SwiftNameAttr>(
+ S, D, /*add*/ false, DummyFutureMetadata, []() -> SwiftNameAttr * {
+ llvm_unreachable("should not try to add an attribute here");
+ });
+}
+
+/// Processes all versions of versioned API notes.
+///
+/// Just dispatches to the various ProcessAPINotes functions in this file.
+template <typename SpecificDecl, typename SpecificInfo>
+static void ProcessVersionedAPINotes(
+ Sema &S, SpecificDecl *D,
+ const api_notes::APINotesReader::VersionedInfo<SpecificInfo> Info) {
+
+ maybeAttachUnversionedSwiftName(S, D, Info);
+
+ unsigned Selected = Info.getSelected().value_or(Info.size());
+
+ VersionTuple Version;
+ SpecificInfo InfoSlice;
+ for (unsigned i = 0, e = Info.size(); i != e; ++i) {
+ std::tie(Version, InfoSlice) = Info[i];
+ auto Active = (i == Selected) ? IsActive_t::Active : IsActive_t::Inactive;
+ auto Replacement = IsSubstitution_t::Original;
+ if (Active == IsActive_t::Inactive && Version.empty()) {
+ Replacement = IsSubstitution_t::Replacement;
+ Version = Info[Selected].first;
+ }
+ ProcessAPINotes(S, D, InfoSlice,
+ VersionedInfoMetadata(Version, Active, Replacement));
+ }
+}
+
+/// Process API notes that are associated with this declaration, mapping them
+/// to attributes as appropriate.
+void Sema::ProcessAPINotes(Decl *D) {
+ if (!D)
+ return;
+
+ auto GetNamespaceContext =
+ [&](DeclContext *DC) -> std::optional<api_notes::Context> {
+ if (auto NamespaceContext = dyn_cast<NamespaceDecl>(DC)) {
+ for (auto Reader :
+ APINotes.findAPINotes(NamespaceContext->getLocation())) {
+ // Retrieve the context ID for the parent namespace of the decl.
+ std::stack<NamespaceDecl *> NamespaceStack;
+ {
+ for (auto CurrentNamespace = NamespaceContext; CurrentNamespace;
+ CurrentNamespace =
+ dyn_cast<NamespaceDecl>(CurrentNamespace->getParent())) {
+ if (!CurrentNamespace->isInlineNamespace())
+ NamespaceStack.push(CurrentNamespace);
+ }
+ }
+ std::optional<api_notes::ContextID> NamespaceID;
+ while (!NamespaceStack.empty()) {
+ auto CurrentNamespace = NamespaceStack.top();
+ NamespaceStack.pop();
+ NamespaceID = Reader->lookupNamespaceID(CurrentNamespace->getName(),
+ NamespaceID);
+ if (!NamespaceID)
+ break;
+ }
+ if (NamespaceID)
+ return api_notes::Context(*NamespaceID,
+ api_notes::ContextKind::Namespace);
+ }
+ }
+ return std::nullopt;
+ };
+
+ // Globals.
+ if (D->getDeclContext()->isFileContext() ||
+ D->getDeclContext()->isNamespace() ||
+ D->getDeclContext()->isExternCContext() ||
+ D->getDeclContext()->isExternCXXContext()) {
+ std::optional<api_notes::Context> APINotesContext =
+ GetNamespaceContext(D->getDeclContext());
+ // Global variables.
+ if (auto VD = dyn_cast<VarDecl>(D)) {
+ for (auto Reader : APINotes.findAPINotes(D->getLocation())) {
+ auto Info =
+ Reader->lookupGlobalVariable(VD->getName(), APINotesContext);
+ ProcessVersionedAPINotes(*this, VD, Info);
+ }
+
+ return;
+ }
+
+ // Global functions.
+ if (auto FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getDeclName().isIdentifier()) {
+ for (auto Reader : APINotes.findAPINotes(D->getLocation())) {
+ auto Info =
+ Reader->lookupGlobalFunction(FD->getName(), APINotesContext);
+ ProcessVersionedAPINotes(*this, FD, Info);
+ }
+ }
+
+ return;
+ }
+
+ // Objective-C classes.
+ if (auto Class = dyn_cast<ObjCInterfaceDecl>(D)) {
+ for (auto Reader : APINotes.findAPINotes(D->getLocation())) {
+ auto Info = Reader->lookupObjCClassInfo(Class->getName());
+ ProcessVersionedAPINotes(*this, Class, Info);
+ }
+
+ return;
+ }
+
+ // Objective-C protocols.
+ if (auto Protocol = dyn_cast<ObjCProtocolDecl>(D)) {
+ for (auto Reader : APINotes.findAPINotes(D->getLocation())) {
+ auto Info = Reader->lookupObjCProtocolInfo(Protocol->getName());
+ ProcessVersionedAPINotes(*this, Protocol, Info);
+ }
+
+ return;
+ }
+
+ // Tags
+ if (auto Tag = dyn_cast<TagDecl>(D)) {
+ std::string LookupName = Tag->getName().str();
+
+ // Use the source location to discern if this Tag is an OPTIONS macro.
+ // For now we would like to limit this trick of looking up the APINote tag
+ // using the EnumDecl's QualType in the case where the enum is anonymous.
+ // This is only being used to support APINotes lookup for C++
+ // NS/CF_OPTIONS when C++-Interop is enabled.
+ std::string MacroName =
+ LookupName.empty() && Tag->getOuterLocStart().isMacroID()
+ ? clang::Lexer::getImmediateMacroName(
+ Tag->getOuterLocStart(),
+ Tag->getASTContext().getSourceManager(), LangOpts)
+ .str()
+ : "";
+
+ if (LookupName.empty() && isa<clang::EnumDecl>(Tag) &&
+ (MacroName == "CF_OPTIONS" || MacroName == "NS_OPTIONS" ||
+ MacroName == "OBJC_OPTIONS" || MacroName == "SWIFT_OPTIONS")) {
+
+ clang::QualType T = llvm::cast<clang::EnumDecl>(Tag)->getIntegerType();
+ LookupName = clang::QualType::getAsString(
+ T.split(), getASTContext().getPrintingPolicy());
+ }
+
+ for (auto Reader : APINotes.findAPINotes(D->getLocation())) {
+ auto Info = Reader->lookupTag(LookupName, APINotesContext);
+ ProcessVersionedAPINotes(*this, Tag, Info);
+ }
+
+ return;
+ }
+
+ // Typedefs
+ if (auto Typedef = dyn_cast<TypedefNameDecl>(D)) {
+ for (auto Reader : APINotes.findAPINotes(D->getLocation())) {
+ auto Info = Reader->lookupTypedef(Typedef->getName(), APINotesContext);
+ ProcessVersionedAPINotes(*this, Typedef, Info);
+ }
+
+ return;
+ }
+ }
+
+ // Enumerators.
+ if (D->getDeclContext()->getRedeclContext()->isFileContext() ||
+ D->getDeclContext()->getRedeclContext()->isExternCContext()) {
+ if (auto EnumConstant = dyn_cast<EnumConstantDecl>(D)) {
+ for (auto Reader : APINotes.findAPINotes(D->getLocation())) {
+ auto Info = Reader->lookupEnumConstant(EnumConstant->getName());
+ ProcessVersionedAPINotes(*this, EnumConstant, Info);
+ }
+
+ return;
+ }
+ }
+
+ if (auto ObjCContainer = dyn_cast<ObjCContainerDecl>(D->getDeclContext())) {
+ // Location function that looks up an Objective-C context.
+ auto GetContext = [&](api_notes::APINotesReader *Reader)
+ -> std::optional<api_notes::ContextID> {
+ if (auto Protocol = dyn_cast<ObjCProtocolDecl>(ObjCContainer)) {
+ if (auto Found = Reader->lookupObjCProtocolID(Protocol->getName()))
+ return *Found;
+
+ return std::nullopt;
+ }
+
+ if (auto Impl = dyn_cast<ObjCCategoryImplDecl>(ObjCContainer)) {
+ if (auto Cat = Impl->getCategoryDecl())
+ ObjCContainer = Cat->getClassInterface();
+ else
+ return std::nullopt;
+ }
+
+ if (auto Category = dyn_cast<ObjCCategoryDecl>(ObjCContainer)) {
+ if (Category->getClassInterface())
+ ObjCContainer = Category->getClassInterface();
+ else
+ return std::nullopt;
+ }
+
+ if (auto Impl = dyn_cast<ObjCImplDecl>(ObjCContainer)) {
+ if (Impl->getClassInterface())
+ ObjCContainer = Impl->getClassInterface();
+ else
+ return std::nullopt;
+ }
+
+ if (auto Class = dyn_cast<ObjCInterfaceDecl>(ObjCContainer)) {
+ if (auto Found = Reader->lookupObjCClassID(Class->getName()))
+ return *Found;
+
+ return std::nullopt;
+ }
+
+ return std::nullopt;
+ };
+
+ // Objective-C methods.
+ if (auto Method = dyn_cast<ObjCMethodDecl>(D)) {
+ for (auto Reader : APINotes.findAPINotes(D->getLocation())) {
+ if (auto Context = GetContext(Reader)) {
+ // Map the selector.
+ Selector Sel = Method->getSelector();
+ SmallVector<StringRef, 2> SelPieces;
+ if (Sel.isUnarySelector()) {
+ SelPieces.push_back(Sel.getNameForSlot(0));
+ } else {
+ for (unsigned i = 0, n = Sel.getNumArgs(); i != n; ++i)
+ SelPieces.push_back(Sel.getNameForSlot(i));
+ }
+
+ api_notes::ObjCSelectorRef SelectorRef;
+ SelectorRef.NumArgs = Sel.getNumArgs();
+ SelectorRef.Identifiers = SelPieces;
+
+ auto Info = Reader->lookupObjCMethod(*Context, SelectorRef,
+ Method->isInstanceMethod());
+ ProcessVersionedAPINotes(*this, Method, Info);
+ }
+ }
+ }
+
+ // Objective-C properties.
+ if (auto Property = dyn_cast<ObjCPropertyDecl>(D)) {
+ for (auto Reader : APINotes.findAPINotes(D->getLocation())) {
+ if (auto Context = GetContext(Reader)) {
+ bool isInstanceProperty =
+ (Property->getPropertyAttributesAsWritten() &
+ ObjCPropertyAttribute::kind_class) == 0;
+ auto Info = Reader->lookupObjCProperty(*Context, Property->getName(),
+ isInstanceProperty);
+ ProcessVersionedAPINotes(*this, Property, Info);
+ }
+ }
+
+ return;
+ }
+ }
+
+ if (auto CXXRecord = dyn_cast<CXXRecordDecl>(D->getDeclContext())) {
+ auto GetRecordContext = [&](api_notes::APINotesReader *Reader)
+ -> std::optional<api_notes::ContextID> {
+ auto ParentContext = GetNamespaceContext(CXXRecord->getDeclContext());
+ if (auto Found = Reader->lookupTagID(CXXRecord->getName(), ParentContext))
+ return *Found;
+
+ return std::nullopt;
+ };
+
+ if (auto CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
+ for (auto Reader : APINotes.findAPINotes(D->getLocation())) {
+ if (auto Context = GetRecordContext(Reader)) {
+ auto Info = Reader->lookupCXXMethod(*Context, CXXMethod->getName());
+ ProcessVersionedAPINotes(*this, CXXMethod, Info);
+ }
+ }
+ }
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaARM.cpp b/contrib/llvm-project/clang/lib/Sema/SemaARM.cpp
new file mode 100644
index 000000000000..d8dd4fe16e3a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaARM.cpp
@@ -0,0 +1,1340 @@
+//===------ SemaARM.cpp ---------- ARM target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to ARM.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaARM.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+
+SemaARM::SemaARM(Sema &S) : SemaBase(S) {}
+
+/// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
+bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ ASTContext &Context = getASTContext();
+
+ if (BuiltinID == AArch64::BI__builtin_arm_irg) {
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+ Expr *Arg0 = TheCall->getArg(0);
+ Expr *Arg1 = TheCall->getArg(1);
+
+ ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+ TheCall->setArg(0, FirstArg.get());
+
+ ExprResult SecArg = SemaRef.DefaultLvalueConversion(Arg1);
+ if (SecArg.isInvalid())
+ return true;
+ QualType SecArgType = SecArg.get()->getType();
+ if (!SecArgType->isIntegerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
+ << "second" << SecArgType << Arg1->getSourceRange();
+
+ // Derive the return type from the pointer argument.
+ TheCall->setType(FirstArgType);
+ return false;
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_addg) {
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+
+ Expr *Arg0 = TheCall->getArg(0);
+ ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+ TheCall->setArg(0, FirstArg.get());
+
+ // Derive the return type from the pointer argument.
+ TheCall->setType(FirstArgType);
+
+ // Second arg must be an constant in range [0,15]
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+ Expr *Arg0 = TheCall->getArg(0);
+ Expr *Arg1 = TheCall->getArg(1);
+
+ ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+
+ QualType SecArgType = Arg1->getType();
+ if (!SecArgType->isIntegerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
+ << "second" << SecArgType << Arg1->getSourceRange();
+ TheCall->setType(Context.IntTy);
+ return false;
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
+ BuiltinID == AArch64::BI__builtin_arm_stg) {
+ if (SemaRef.checkArgCount(TheCall, 1))
+ return true;
+ Expr *Arg0 = TheCall->getArg(0);
+ ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+ TheCall->setArg(0, FirstArg.get());
+
+ // Derive the return type from the pointer argument.
+ if (BuiltinID == AArch64::BI__builtin_arm_ldg)
+ TheCall->setType(FirstArgType);
+ return false;
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_subp) {
+ Expr *ArgA = TheCall->getArg(0);
+ Expr *ArgB = TheCall->getArg(1);
+
+ ExprResult ArgExprA = SemaRef.DefaultFunctionArrayLvalueConversion(ArgA);
+ ExprResult ArgExprB = SemaRef.DefaultFunctionArrayLvalueConversion(ArgB);
+
+ if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
+ return true;
+
+ QualType ArgTypeA = ArgExprA.get()->getType();
+ QualType ArgTypeB = ArgExprB.get()->getType();
+
+ auto isNull = [&](Expr *E) -> bool {
+ return E->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull);
+ };
+
+ // argument should be either a pointer or null
+ if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
+ << "first" << ArgTypeA << ArgA->getSourceRange();
+
+ if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
+ << "second" << ArgTypeB << ArgB->getSourceRange();
+
+ // Ensure Pointee types are compatible
+ if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
+ ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
+ QualType pointeeA = ArgTypeA->getPointeeType();
+ QualType pointeeB = ArgTypeB->getPointeeType();
+ if (!Context.typesAreCompatible(
+ Context.getCanonicalType(pointeeA).getUnqualifiedType(),
+ Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_typecheck_sub_ptr_compatible)
+ << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
+ << ArgB->getSourceRange();
+ }
+ }
+
+ // at least one argument should be pointer type
+ if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
+ << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
+
+ if (isNull(ArgA)) // adopt type of the other pointer
+ ArgExprA =
+ SemaRef.ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
+
+ if (isNull(ArgB))
+ ArgExprB =
+ SemaRef.ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
+
+ TheCall->setArg(0, ArgExprA.get());
+ TheCall->setArg(1, ArgExprB.get());
+ TheCall->setType(Context.LongLongTy);
+ return false;
+ }
+ assert(false && "Unhandled ARM MTE intrinsic");
+ return true;
+}
+
+/// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
+/// TheCall is an ARM/AArch64 special register string literal.
+bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
+ int ArgNum, unsigned ExpectedFieldNum,
+ bool AllowName) {
+ bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_wsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_rsr ||
+ BuiltinID == ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == ARM::BI__builtin_arm_wsr ||
+ BuiltinID == ARM::BI__builtin_arm_wsrp;
+ bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr ||
+ BuiltinID == AArch64::BI__builtin_arm_wsrp;
+ assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check if the argument is a string literal.
+ if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
+ return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
+ << Arg->getSourceRange();
+
+ // Check the type of special register given.
+ StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
+ SmallVector<StringRef, 6> Fields;
+ Reg.split(Fields, ":");
+
+ if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
+ return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
+ << Arg->getSourceRange();
+
+ // If the string is the name of a register then we cannot check that it is
+ // valid here but if the string is of one the forms described in ACLE then we
+ // can check that the supplied fields are integers and within the valid
+ // ranges.
+ if (Fields.size() > 1) {
+ bool FiveFields = Fields.size() == 5;
+
+ bool ValidString = true;
+ if (IsARMBuiltin) {
+ ValidString &= Fields[0].starts_with_insensitive("cp") ||
+ Fields[0].starts_with_insensitive("p");
+ if (ValidString)
+ Fields[0] = Fields[0].drop_front(
+ Fields[0].starts_with_insensitive("cp") ? 2 : 1);
+
+ ValidString &= Fields[2].starts_with_insensitive("c");
+ if (ValidString)
+ Fields[2] = Fields[2].drop_front(1);
+
+ if (FiveFields) {
+ ValidString &= Fields[3].starts_with_insensitive("c");
+ if (ValidString)
+ Fields[3] = Fields[3].drop_front(1);
+ }
+ }
+
+ SmallVector<int, 5> Ranges;
+ if (FiveFields)
+ Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
+ else
+ Ranges.append({15, 7, 15});
+
+ for (unsigned i = 0; i < Fields.size(); ++i) {
+ int IntField;
+ ValidString &= !Fields[i].getAsInteger(10, IntField);
+ ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
+ }
+
+ if (!ValidString)
+ return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
+ << Arg->getSourceRange();
+ } else if (IsAArch64Builtin && Fields.size() == 1) {
+ // This code validates writes to PSTATE registers.
+
+ // Not a write.
+ if (TheCall->getNumArgs() != 2)
+ return false;
+
+ // The 128-bit system register accesses do not touch PSTATE.
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr128)
+ return false;
+
+ // These are the named PSTATE accesses using "MSR (immediate)" instructions,
+ // along with the upper limit on the immediates allowed.
+ auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
+ .CaseLower("spsel", 15)
+ .CaseLower("daifclr", 15)
+ .CaseLower("daifset", 15)
+ .CaseLower("pan", 15)
+ .CaseLower("uao", 15)
+ .CaseLower("dit", 15)
+ .CaseLower("ssbs", 15)
+ .CaseLower("tco", 15)
+ .CaseLower("allint", 1)
+ .CaseLower("pm", 1)
+ .Default(std::nullopt);
+
+ // If this is not a named PSTATE, just continue without validating, as this
+ // will be lowered to an "MSR (register)" instruction directly
+ if (!MaxLimit)
+ return false;
+
+ // Here we only allow constants in the range for that pstate, as required by
+ // the ACLE.
+ //
+ // While clang also accepts the names of system registers in its ACLE
+ // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
+ // as the value written via a register is different to the value used as an
+ // immediate to have the same effect. e.g., for the instruction `msr tco,
+ // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
+ // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
+ //
+ // If a programmer wants to codegen the MSR (register) form of `msr tco,
+ // xN`, they can still do so by specifying the register using five
+ // colon-separated numbers in a string.
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit);
+ }
+
+ return false;
+}
+
+// Get the valid immediate range for the specified NEON type code.
+static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
+ NeonTypeFlags Type(t);
+ int IsQuad = ForceQuad ? true : Type.isQuad();
+ switch (Type.getEltType()) {
+ case NeonTypeFlags::Int8:
+ case NeonTypeFlags::Poly8:
+ return shift ? 7 : (8 << IsQuad) - 1;
+ case NeonTypeFlags::Int16:
+ case NeonTypeFlags::Poly16:
+ return shift ? 15 : (4 << IsQuad) - 1;
+ case NeonTypeFlags::Int32:
+ return shift ? 31 : (2 << IsQuad) - 1;
+ case NeonTypeFlags::Int64:
+ case NeonTypeFlags::Poly64:
+ return shift ? 63 : (1 << IsQuad) - 1;
+ case NeonTypeFlags::Poly128:
+ return shift ? 127 : (1 << IsQuad) - 1;
+ case NeonTypeFlags::Float16:
+ assert(!shift && "cannot shift float types!");
+ return (4 << IsQuad) - 1;
+ case NeonTypeFlags::Float32:
+ assert(!shift && "cannot shift float types!");
+ return (2 << IsQuad) - 1;
+ case NeonTypeFlags::Float64:
+ assert(!shift && "cannot shift float types!");
+ return (1 << IsQuad) - 1;
+ case NeonTypeFlags::BFloat16:
+ assert(!shift && "cannot shift float types!");
+ return (4 << IsQuad) - 1;
+ }
+ llvm_unreachable("Invalid NeonTypeFlag!");
+}
+
+/// getNeonEltType - Return the QualType corresponding to the elements of
+/// the vector type specified by the NeonTypeFlags. This is used to check
+/// the pointer arguments for Neon load/store intrinsics.
+static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
+ bool IsPolyUnsigned, bool IsInt64Long) {
+ switch (Flags.getEltType()) {
+ case NeonTypeFlags::Int8:
+ return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
+ case NeonTypeFlags::Int16:
+ return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
+ case NeonTypeFlags::Int32:
+ return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
+ case NeonTypeFlags::Int64:
+ if (IsInt64Long)
+ return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
+ else
+ return Flags.isUnsigned() ? Context.UnsignedLongLongTy
+ : Context.LongLongTy;
+ case NeonTypeFlags::Poly8:
+ return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
+ case NeonTypeFlags::Poly16:
+ return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
+ case NeonTypeFlags::Poly64:
+ if (IsInt64Long)
+ return Context.UnsignedLongTy;
+ else
+ return Context.UnsignedLongLongTy;
+ case NeonTypeFlags::Poly128:
+ break;
+ case NeonTypeFlags::Float16:
+ return Context.HalfTy;
+ case NeonTypeFlags::Float32:
+ return Context.FloatTy;
+ case NeonTypeFlags::Float64:
+ return Context.DoubleTy;
+ case NeonTypeFlags::BFloat16:
+ return Context.BFloat16Ty;
+ }
+ llvm_unreachable("Invalid NeonTypeFlag!");
+}
+
+enum ArmSMEState : unsigned {
+ ArmNoState = 0,
+
+ ArmInZA = 0b01,
+ ArmOutZA = 0b10,
+ ArmInOutZA = 0b11,
+ ArmZAMask = 0b11,
+
+ ArmInZT0 = 0b01 << 2,
+ ArmOutZT0 = 0b10 << 2,
+ ArmInOutZT0 = 0b11 << 2,
+ ArmZT0Mask = 0b11 << 2
+};
+
+bool SemaARM::ParseSVEImmChecks(
+ CallExpr *TheCall, SmallVector<std::tuple<int, int, int>, 3> &ImmChecks) {
+ // Perform all the immediate checks for this builtin call.
+ bool HasError = false;
+ for (auto &I : ImmChecks) {
+ int ArgNum, CheckTy, ElementSizeInBits;
+ std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
+
+ typedef bool (*OptionSetCheckFnTy)(int64_t Value);
+
+ // Function that checks whether the operand (ArgNum) is an immediate
+ // that is one of the predefined values.
+ auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
+ int ErrDiag) -> bool {
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ llvm::APSInt Imm;
+ if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Imm))
+ return true;
+
+ if (!CheckImm(Imm.getSExtValue()))
+ return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
+ return false;
+ };
+
+ switch ((SVETypeFlags::ImmCheckType)CheckTy) {
+ case SVETypeFlags::ImmCheck0_31:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_13:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_16:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_7:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_1:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_3:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 3))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_7:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 7))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckExtract:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (2048 / ElementSizeInBits) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftRight:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1,
+ ElementSizeInBits))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftRightNarrow:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1,
+ ElementSizeInBits / 2))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftLeft:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
+ ElementSizeInBits - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndex:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (1 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndexCompRotate:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (2 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndexDot:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (4 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckComplexRot90_270:
+ if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
+ diag::err_rotation_argument_to_cadd))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckComplexRotAll90:
+ if (CheckImmediateInSet(
+ [](int64_t V) {
+ return V == 0 || V == 90 || V == 180 || V == 270;
+ },
+ diag::err_rotation_argument_to_cmla))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_1:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_2:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_3:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_0:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 0))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_15:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 15))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_255:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 255))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck2_4_Mul2:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 2, 4) ||
+ SemaRef.BuiltinConstantArgMultiple(TheCall, ArgNum, 2))
+ HasError = true;
+ break;
+ }
+ }
+
+ return HasError;
+}
+
+SemaARM::ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
+ if (FD->hasAttr<ArmLocallyStreamingAttr>())
+ return SemaARM::ArmStreaming;
+ if (const Type *Ty = FD->getType().getTypePtrOrNull()) {
+ if (const auto *FPT = Ty->getAs<FunctionProtoType>()) {
+ if (FPT->getAArch64SMEAttributes() &
+ FunctionType::SME_PStateSMEnabledMask)
+ return SemaARM::ArmStreaming;
+ if (FPT->getAArch64SMEAttributes() &
+ FunctionType::SME_PStateSMCompatibleMask)
+ return SemaARM::ArmStreamingCompatible;
+ }
+ }
+ return SemaARM::ArmNonStreaming;
+}
+
+static bool checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
+ const FunctionDecl *FD,
+ SemaARM::ArmStreamingType BuiltinType,
+ unsigned BuiltinID) {
+ SemaARM::ArmStreamingType FnType = getArmStreamingFnType(FD);
+
+ // Check if the intrinsic is available in the right mode, i.e.
+ // * When compiling for SME only, the caller must be in streaming mode.
+ // * When compiling for SVE only, the caller must be in non-streaming mode.
+ // * When compiling for both SVE and SME, the caller can be in either mode.
+ if (BuiltinType == SemaARM::VerifyRuntimeMode) {
+ auto DisableFeatures = [](llvm::StringMap<bool> &Map, StringRef S) {
+ for (StringRef K : Map.keys())
+ if (K.starts_with(S))
+ Map[K] = false;
+ };
+
+ llvm::StringMap<bool> CallerFeatureMapWithoutSVE;
+ S.Context.getFunctionFeatureMap(CallerFeatureMapWithoutSVE, FD);
+ DisableFeatures(CallerFeatureMapWithoutSVE, "sve");
+
+ // Avoid emitting diagnostics for a function that can never compile.
+ if (FnType == SemaARM::ArmStreaming && !CallerFeatureMapWithoutSVE["sme"])
+ return false;
+
+ llvm::StringMap<bool> CallerFeatureMapWithoutSME;
+ S.Context.getFunctionFeatureMap(CallerFeatureMapWithoutSME, FD);
+ DisableFeatures(CallerFeatureMapWithoutSME, "sme");
+
+ // We know the builtin requires either some combination of SVE flags, or
+ // some combination of SME flags, but we need to figure out which part
+ // of the required features is satisfied by the target features.
+ //
+ // For a builtin with target guard 'sve2p1|sme2', if we compile with
+ // '+sve2p1,+sme', then we know that it satisfies the 'sve2p1' part if we
+ // evaluate the features for '+sve2p1,+sme,+nosme'.
+ //
+ // Similarly, if we compile with '+sve2,+sme2', then we know it satisfies
+ // the 'sme2' part if we evaluate the features for '+sve2,+sme2,+nosve'.
+ StringRef BuiltinTargetGuards(
+ S.Context.BuiltinInfo.getRequiredFeatures(BuiltinID));
+ bool SatisfiesSVE = Builtin::evaluateRequiredTargetFeatures(
+ BuiltinTargetGuards, CallerFeatureMapWithoutSME);
+ bool SatisfiesSME = Builtin::evaluateRequiredTargetFeatures(
+ BuiltinTargetGuards, CallerFeatureMapWithoutSVE);
+
+ if ((SatisfiesSVE && SatisfiesSME) ||
+ (SatisfiesSVE && FnType == SemaARM::ArmStreamingCompatible))
+ return false;
+ else if (SatisfiesSVE)
+ BuiltinType = SemaARM::ArmNonStreaming;
+ else if (SatisfiesSME)
+ BuiltinType = SemaARM::ArmStreaming;
+ else
+ // This should be diagnosed by CodeGen
+ return false;
+ }
+
+ if (FnType != SemaARM::ArmNonStreaming &&
+ BuiltinType == SemaARM::ArmNonStreaming)
+ S.Diag(TheCall->getBeginLoc(), diag::err_attribute_arm_sm_incompat_builtin)
+ << TheCall->getSourceRange() << "non-streaming";
+ else if (FnType != SemaARM::ArmStreaming &&
+ BuiltinType == SemaARM::ArmStreaming)
+ S.Diag(TheCall->getBeginLoc(), diag::err_attribute_arm_sm_incompat_builtin)
+ << TheCall->getSourceRange() << "streaming";
+ else
+ return false;
+
+ return true;
+}
+
+static bool hasArmZAState(const FunctionDecl *FD) {
+ const auto *T = FD->getType()->getAs<FunctionProtoType>();
+ return (T && FunctionType::getArmZAState(T->getAArch64SMEAttributes()) !=
+ FunctionType::ARM_None) ||
+ (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZA());
+}
+
+static bool hasArmZT0State(const FunctionDecl *FD) {
+ const auto *T = FD->getType()->getAs<FunctionProtoType>();
+ return (T && FunctionType::getArmZT0State(T->getAArch64SMEAttributes()) !=
+ FunctionType::ARM_None) ||
+ (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZT0());
+}
+
+static ArmSMEState getSMEState(unsigned BuiltinID) {
+ switch (BuiltinID) {
+ default:
+ return ArmNoState;
+#define GET_SME_BUILTIN_GET_STATE
+#include "clang/Basic/arm_sme_builtins_za_state.inc"
+#undef GET_SME_BUILTIN_GET_STATE
+ }
+}
+
+bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
+ std::optional<ArmStreamingType> BuiltinType;
+
+ switch (BuiltinID) {
+#define GET_SME_STREAMING_ATTRS
+#include "clang/Basic/arm_sme_streaming_attrs.inc"
+#undef GET_SME_STREAMING_ATTRS
+ }
+
+ if (BuiltinType &&
+ checkArmStreamingBuiltin(SemaRef, TheCall, FD, *BuiltinType, BuiltinID))
+ return true;
+
+ if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
+ Diag(TheCall->getBeginLoc(),
+ diag::warn_attribute_arm_za_builtin_no_za_state)
+ << TheCall->getSourceRange();
+
+ if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
+ Diag(TheCall->getBeginLoc(),
+ diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
+ << TheCall->getSourceRange();
+ }
+
+ // Range check SME intrinsics that take immediate values.
+ SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
+
+ switch (BuiltinID) {
+ default:
+ return false;
+#define GET_SME_IMMEDIATE_CHECK
+#include "clang/Basic/arm_sme_sema_rangechecks.inc"
+#undef GET_SME_IMMEDIATE_CHECK
+ }
+
+ return ParseSVEImmChecks(TheCall, ImmChecks);
+}
+
+bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
+ std::optional<ArmStreamingType> BuiltinType;
+
+ switch (BuiltinID) {
+#define GET_SVE_STREAMING_ATTRS
+#include "clang/Basic/arm_sve_streaming_attrs.inc"
+#undef GET_SVE_STREAMING_ATTRS
+ }
+ if (BuiltinType &&
+ checkArmStreamingBuiltin(SemaRef, TheCall, FD, *BuiltinType, BuiltinID))
+ return true;
+ }
+ // Range check SVE intrinsics that take immediate values.
+ SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
+
+ switch (BuiltinID) {
+ default:
+ return false;
+#define GET_SVE_IMMEDIATE_CHECK
+#include "clang/Basic/arm_sve_sema_rangechecks.inc"
+#undef GET_SVE_IMMEDIATE_CHECK
+ }
+
+ return ParseSVEImmChecks(TheCall, ImmChecks);
+}
+
+bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
+
+ switch (BuiltinID) {
+ default:
+ break;
+#define GET_NEON_BUILTINS
+#define TARGET_BUILTIN(id, ...) case NEON::BI##id:
+#define BUILTIN(id, ...) case NEON::BI##id:
+#include "clang/Basic/arm_neon.inc"
+ if (checkArmStreamingBuiltin(SemaRef, TheCall, FD, ArmNonStreaming,
+ BuiltinID))
+ return true;
+ break;
+#undef TARGET_BUILTIN
+#undef BUILTIN
+#undef GET_NEON_BUILTINS
+ }
+ }
+
+ llvm::APSInt Result;
+ uint64_t mask = 0;
+ unsigned TV = 0;
+ int PtrArgNum = -1;
+ bool HasConstPtr = false;
+ switch (BuiltinID) {
+#define GET_NEON_OVERLOAD_CHECK
+#include "clang/Basic/arm_fp16.inc"
+#include "clang/Basic/arm_neon.inc"
+#undef GET_NEON_OVERLOAD_CHECK
+ }
+
+ // For NEON intrinsics which are overloaded on vector element type, validate
+ // the immediate which specifies which variant to emit.
+ unsigned ImmArg = TheCall->getNumArgs() - 1;
+ if (mask) {
+ if (SemaRef.BuiltinConstantArg(TheCall, ImmArg, Result))
+ return true;
+
+ TV = Result.getLimitedValue(64);
+ if ((TV > 63) || (mask & (1ULL << TV)) == 0)
+ return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
+ << TheCall->getArg(ImmArg)->getSourceRange();
+ }
+
+ if (PtrArgNum >= 0) {
+ // Check that pointer arguments have the specified type.
+ Expr *Arg = TheCall->getArg(PtrArgNum);
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
+ Arg = ICE->getSubExpr();
+ ExprResult RHS = SemaRef.DefaultFunctionArrayLvalueConversion(Arg);
+ QualType RHSTy = RHS.get()->getType();
+
+ llvm::Triple::ArchType Arch = TI.getTriple().getArch();
+ bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
+ Arch == llvm::Triple::aarch64_32 ||
+ Arch == llvm::Triple::aarch64_be;
+ bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
+ QualType EltTy = getNeonEltType(NeonTypeFlags(TV), getASTContext(),
+ IsPolyUnsigned, IsInt64Long);
+ if (HasConstPtr)
+ EltTy = EltTy.withConst();
+ QualType LHSTy = getASTContext().getPointerType(EltTy);
+ Sema::AssignConvertType ConvTy;
+ ConvTy = SemaRef.CheckSingleAssignmentConstraints(LHSTy, RHS);
+ if (RHS.isInvalid())
+ return true;
+ if (SemaRef.DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy,
+ RHSTy, RHS.get(), Sema::AA_Assigning))
+ return true;
+ }
+
+ // For NEON intrinsics which take an immediate value as part of the
+ // instruction, range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default:
+ return false;
+#define GET_NEON_IMMEDIATE_CHECK
+#include "clang/Basic/arm_fp16.inc"
+#include "clang/Basic/arm_neon.inc"
+#undef GET_NEON_IMMEDIATE_CHECK
+ }
+
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u + l);
+}
+
+bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ default:
+ return false;
+#include "clang/Basic/arm_mve_builtin_sema.inc"
+ }
+}
+
+bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ bool Err = false;
+ switch (BuiltinID) {
+ default:
+ return false;
+#include "clang/Basic/arm_cde_builtin_sema.inc"
+ }
+
+ if (Err)
+ return true;
+
+ return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
+}
+
+bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI,
+ const Expr *CoprocArg,
+ bool WantCDE) {
+ ASTContext &Context = getASTContext();
+ if (SemaRef.isConstantEvaluatedContext())
+ return false;
+
+ // We can't check the value of a dependent argument.
+ if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
+ return false;
+
+ llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
+ int64_t CoprocNo = CoprocNoAP.getExtValue();
+ assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
+
+ uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
+ bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
+
+ if (IsCDECoproc != WantCDE)
+ return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
+ << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
+
+ return false;
+}
+
+bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID,
+ CallExpr *TheCall,
+ unsigned MaxWidth) {
+ assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM::BI__builtin_arm_ldaex ||
+ BuiltinID == ARM::BI__builtin_arm_strex ||
+ BuiltinID == ARM::BI__builtin_arm_stlex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldaex ||
+ BuiltinID == AArch64::BI__builtin_arm_strex ||
+ BuiltinID == AArch64::BI__builtin_arm_stlex) &&
+ "unexpected ARM builtin");
+ bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM::BI__builtin_arm_ldaex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldaex;
+
+ ASTContext &Context = getASTContext();
+ DeclRefExpr *DRE =
+ cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+
+ // Ensure that we have the proper number of arguments.
+ if (SemaRef.checkArgCount(TheCall, IsLdrex ? 1 : 2))
+ return true;
+
+ // Inspect the pointer argument of the atomic builtin. This should always be
+ // a pointer type, whose element is an integral scalar or pointer type.
+ // Because it is a pointer type, we don't have to worry about any implicit
+ // casts here.
+ Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
+ ExprResult PointerArgRes =
+ SemaRef.DefaultFunctionArrayLvalueConversion(PointerArg);
+ if (PointerArgRes.isInvalid())
+ return true;
+ PointerArg = PointerArgRes.get();
+
+ const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
+ << PointerArg->getType() << 0 << PointerArg->getSourceRange();
+ return true;
+ }
+
+ // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
+ // task is to insert the appropriate casts into the AST. First work out just
+ // what the appropriate type is.
+ QualType ValType = pointerType->getPointeeType();
+ QualType AddrType = ValType.getUnqualifiedType().withVolatile();
+ if (IsLdrex)
+ AddrType.addConst();
+
+ // Issue a warning if the cast is dodgy.
+ CastKind CastNeeded = CK_NoOp;
+ if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
+ CastNeeded = CK_BitCast;
+ Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
+ << PointerArg->getType() << Context.getPointerType(AddrType)
+ << Sema::AA_Passing << PointerArg->getSourceRange();
+ }
+
+ // Finally, do the cast and replace the argument with the corrected version.
+ AddrType = Context.getPointerType(AddrType);
+ PointerArgRes = SemaRef.ImpCastExprToType(PointerArg, AddrType, CastNeeded);
+ if (PointerArgRes.isInvalid())
+ return true;
+ PointerArg = PointerArgRes.get();
+
+ TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
+
+ // In general, we allow ints, floats and pointers to be loaded and stored.
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
+ Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
+ << PointerArg->getType() << 0 << PointerArg->getSourceRange();
+ return true;
+ }
+
+ // But ARM doesn't have instructions to deal with 128-bit versions.
+ if (Context.getTypeSize(ValType) > MaxWidth) {
+ assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
+ Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ switch (ValType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // okay
+ break;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
+ << ValType << PointerArg->getSourceRange();
+ return true;
+ }
+
+ if (IsLdrex) {
+ TheCall->setType(ValType);
+ return false;
+ }
+
+ // Initialize the argument to be stored.
+ ExprResult ValArg = TheCall->getArg(0);
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, ValType, /*consume*/ false);
+ ValArg = SemaRef.PerformCopyInitialization(Entity, SourceLocation(), ValArg);
+ if (ValArg.isInvalid())
+ return true;
+ TheCall->setArg(0, ValArg.get());
+
+ // __builtin_arm_strex always returns an int. It's marked as such in the .def,
+ // but the custom checker bypasses all default analysis.
+ TheCall->setType(Context.IntTy);
+ return false;
+}
+
+bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM::BI__builtin_arm_ldaex ||
+ BuiltinID == ARM::BI__builtin_arm_strex ||
+ BuiltinID == ARM::BI__builtin_arm_stlex) {
+ return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 1);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_wsr64)
+ return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
+
+ if (BuiltinID == ARM::BI__builtin_arm_rsr ||
+ BuiltinID == ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == ARM::BI__builtin_arm_wsr ||
+ BuiltinID == ARM::BI__builtin_arm_wsrp)
+ return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
+
+ if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
+ return true;
+ if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
+ return true;
+ if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
+ return true;
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ // FIXME: VFP Intrinsics should error if VFP not present.
+ switch (BuiltinID) {
+ default:
+ return false;
+ case ARM::BI__builtin_arm_ssat:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 32);
+ case ARM::BI__builtin_arm_usat:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case ARM::BI__builtin_arm_ssat16:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 16);
+ case ARM::BI__builtin_arm_usat16:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case ARM::BI__builtin_arm_vcvtr_f:
+ case ARM::BI__builtin_arm_vcvtr_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case ARM::BI__builtin_arm_dmb:
+ case ARM::BI__builtin_arm_dsb:
+ case ARM::BI__builtin_arm_isb:
+ case ARM::BI__builtin_arm_dbg:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15);
+ case ARM::BI__builtin_arm_cdp:
+ case ARM::BI__builtin_arm_cdp2:
+ case ARM::BI__builtin_arm_mcr:
+ case ARM::BI__builtin_arm_mcr2:
+ case ARM::BI__builtin_arm_mrc:
+ case ARM::BI__builtin_arm_mrc2:
+ case ARM::BI__builtin_arm_mcrr:
+ case ARM::BI__builtin_arm_mcrr2:
+ case ARM::BI__builtin_arm_mrrc:
+ case ARM::BI__builtin_arm_mrrc2:
+ case ARM::BI__builtin_arm_ldc:
+ case ARM::BI__builtin_arm_ldcl:
+ case ARM::BI__builtin_arm_ldc2:
+ case ARM::BI__builtin_arm_ldc2l:
+ case ARM::BI__builtin_arm_stc:
+ case ARM::BI__builtin_arm_stcl:
+ case ARM::BI__builtin_arm_stc2:
+ case ARM::BI__builtin_arm_stc2l:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15) ||
+ CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
+ /*WantCDE*/ false);
+ }
+}
+
+bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldaex ||
+ BuiltinID == AArch64::BI__builtin_arm_strex ||
+ BuiltinID == AArch64::BI__builtin_arm_stlex) {
+ return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 1) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 1);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr128)
+ return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
+
+ // Memory Tagging Extensions (MTE) Intrinsics
+ if (BuiltinID == AArch64::BI__builtin_arm_irg ||
+ BuiltinID == AArch64::BI__builtin_arm_addg ||
+ BuiltinID == AArch64::BI__builtin_arm_gmi ||
+ BuiltinID == AArch64::BI__builtin_arm_ldg ||
+ BuiltinID == AArch64::BI__builtin_arm_stg ||
+ BuiltinID == AArch64::BI__builtin_arm_subp) {
+ return BuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr ||
+ BuiltinID == AArch64::BI__builtin_arm_wsrp)
+ return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
+
+ // Only check the valid encoding range. Any constant in this range would be
+ // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
+ // an exception for incorrect registers. This matches MSVC behavior.
+ if (BuiltinID == AArch64::BI_ReadStatusReg ||
+ BuiltinID == AArch64::BI_WriteStatusReg)
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
+
+ if (BuiltinID == AArch64::BI__getReg)
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 31);
+
+ if (BuiltinID == AArch64::BI__break)
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 0xffff);
+
+ if (BuiltinID == AArch64::BI__hlt)
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 0xffff);
+
+ if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
+ return true;
+
+ if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
+ return true;
+
+ if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
+ return true;
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case AArch64::BI__builtin_arm_dmb:
+ case AArch64::BI__builtin_arm_dsb:
+ case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
+ case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
+ }
+
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u + l);
+}
+
+namespace {
+struct IntrinToName {
+ uint32_t Id;
+ int32_t FullName;
+ int32_t ShortName;
+};
+} // unnamed namespace
+
+static bool BuiltinAliasValid(unsigned BuiltinID, StringRef AliasName,
+ ArrayRef<IntrinToName> Map,
+ const char *IntrinNames) {
+ AliasName.consume_front("__arm_");
+ const IntrinToName *It =
+ llvm::lower_bound(Map, BuiltinID, [](const IntrinToName &L, unsigned Id) {
+ return L.Id < Id;
+ });
+ if (It == Map.end() || It->Id != BuiltinID)
+ return false;
+ StringRef FullName(&IntrinNames[It->FullName]);
+ if (AliasName == FullName)
+ return true;
+ if (It->ShortName == -1)
+ return false;
+ StringRef ShortName(&IntrinNames[It->ShortName]);
+ return AliasName == ShortName;
+}
+
+bool SemaARM::MveAliasValid(unsigned BuiltinID, StringRef AliasName) {
+#include "clang/Basic/arm_mve_builtin_aliases.inc"
+ // The included file defines:
+ // - ArrayRef<IntrinToName> Map
+ // - const char IntrinNames[]
+ return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
+}
+
+bool SemaARM::CdeAliasValid(unsigned BuiltinID, StringRef AliasName) {
+#include "clang/Basic/arm_cde_builtin_aliases.inc"
+ return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
+}
+
+bool SemaARM::SveAliasValid(unsigned BuiltinID, StringRef AliasName) {
+ if (getASTContext().BuiltinInfo.isAuxBuiltinID(BuiltinID))
+ BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(BuiltinID);
+ return BuiltinID >= AArch64::FirstSVEBuiltin &&
+ BuiltinID <= AArch64::LastSVEBuiltin;
+}
+
+bool SemaARM::SmeAliasValid(unsigned BuiltinID, StringRef AliasName) {
+ if (getASTContext().BuiltinInfo.isAuxBuiltinID(BuiltinID))
+ BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(BuiltinID);
+ return BuiltinID >= AArch64::FirstSMEBuiltin &&
+ BuiltinID <= AArch64::LastSMEBuiltin;
+}
+
+void SemaARM::handleBuiltinAliasAttr(Decl *D, const ParsedAttr &AL) {
+ ASTContext &Context = getASTContext();
+ if (!AL.isArgIdent(0)) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL << 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierInfo *Ident = AL.getArgAsIdent(0)->Ident;
+ unsigned BuiltinID = Ident->getBuiltinID();
+ StringRef AliasName = cast<FunctionDecl>(D)->getIdentifier()->getName();
+
+ bool IsAArch64 = Context.getTargetInfo().getTriple().isAArch64();
+ if ((IsAArch64 && !SveAliasValid(BuiltinID, AliasName) &&
+ !SmeAliasValid(BuiltinID, AliasName)) ||
+ (!IsAArch64 && !MveAliasValid(BuiltinID, AliasName) &&
+ !CdeAliasValid(BuiltinID, AliasName))) {
+ Diag(AL.getLoc(), diag::err_attribute_arm_builtin_alias);
+ return;
+ }
+
+ D->addAttr(::new (Context) ArmBuiltinAliasAttr(Context, AL, Ident));
+}
+
+static bool checkNewAttrMutualExclusion(
+ Sema &S, const ParsedAttr &AL, const FunctionProtoType *FPT,
+ FunctionType::ArmStateValue CurrentState, StringRef StateName) {
+ auto CheckForIncompatibleAttr =
+ [&](FunctionType::ArmStateValue IncompatibleState,
+ StringRef IncompatibleStateName) {
+ if (CurrentState == IncompatibleState) {
+ S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
+ << (std::string("'__arm_new(\"") + StateName.str() + "\")'")
+ << (std::string("'") + IncompatibleStateName.str() + "(\"" +
+ StateName.str() + "\")'")
+ << true;
+ AL.setInvalid();
+ }
+ };
+
+ CheckForIncompatibleAttr(FunctionType::ARM_In, "__arm_in");
+ CheckForIncompatibleAttr(FunctionType::ARM_Out, "__arm_out");
+ CheckForIncompatibleAttr(FunctionType::ARM_InOut, "__arm_inout");
+ CheckForIncompatibleAttr(FunctionType::ARM_Preserves, "__arm_preserves");
+ return AL.isInvalid();
+}
+
+void SemaARM::handleNewAttr(Decl *D, const ParsedAttr &AL) {
+ if (!AL.getNumArgs()) {
+ Diag(AL.getLoc(), diag::err_missing_arm_state) << AL;
+ AL.setInvalid();
+ return;
+ }
+
+ std::vector<StringRef> NewState;
+ if (const auto *ExistingAttr = D->getAttr<ArmNewAttr>()) {
+ for (StringRef S : ExistingAttr->newArgs())
+ NewState.push_back(S);
+ }
+
+ bool HasZA = false;
+ bool HasZT0 = false;
+ for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
+ StringRef StateName;
+ SourceLocation LiteralLoc;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, I, StateName, &LiteralLoc))
+ return;
+
+ if (StateName == "za")
+ HasZA = true;
+ else if (StateName == "zt0")
+ HasZT0 = true;
+ else {
+ Diag(LiteralLoc, diag::err_unknown_arm_state) << StateName;
+ AL.setInvalid();
+ return;
+ }
+
+ if (!llvm::is_contained(NewState, StateName)) // Avoid adding duplicates.
+ NewState.push_back(StateName);
+ }
+
+ if (auto *FPT = dyn_cast<FunctionProtoType>(D->getFunctionType())) {
+ FunctionType::ArmStateValue ZAState =
+ FunctionType::getArmZAState(FPT->getAArch64SMEAttributes());
+ if (HasZA && ZAState != FunctionType::ARM_None &&
+ checkNewAttrMutualExclusion(SemaRef, AL, FPT, ZAState, "za"))
+ return;
+ FunctionType::ArmStateValue ZT0State =
+ FunctionType::getArmZT0State(FPT->getAArch64SMEAttributes());
+ if (HasZT0 && ZT0State != FunctionType::ARM_None &&
+ checkNewAttrMutualExclusion(SemaRef, AL, FPT, ZT0State, "zt0"))
+ return;
+ }
+
+ D->dropAttr<ArmNewAttr>();
+ D->addAttr(::new (getASTContext()) ArmNewAttr(
+ getASTContext(), AL, NewState.data(), NewState.size()));
+}
+
+void SemaARM::handleCmseNSEntryAttr(Decl *D, const ParsedAttr &AL) {
+ if (getLangOpts().CPlusPlus && !D->getDeclContext()->isExternCContext()) {
+ Diag(AL.getLoc(), diag::err_attribute_not_clinkage) << AL;
+ return;
+ }
+
+ const auto *FD = cast<FunctionDecl>(D);
+ if (!FD->isExternallyVisible()) {
+ Diag(AL.getLoc(), diag::warn_attribute_cmse_entry_static);
+ return;
+ }
+
+ D->addAttr(::new (getASTContext()) CmseNSEntryAttr(getASTContext(), AL));
+}
+
+void SemaARM::handleInterruptAttr(Decl *D, const ParsedAttr &AL) {
+ // Check the attribute arguments.
+ if (AL.getNumArgs() > 1) {
+ Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 1;
+ return;
+ }
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+
+ if (AL.getNumArgs() == 0)
+ Str = "";
+ else if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+
+ ARMInterruptAttr::InterruptType Kind;
+ if (!ARMInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
+ Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL << Str << ArgLoc;
+ return;
+ }
+
+ const TargetInfo &TI = getASTContext().getTargetInfo();
+ if (TI.hasFeature("vfp"))
+ Diag(D->getLocation(), diag::warn_arm_interrupt_vfp_clobber);
+
+ D->addAttr(::new (getASTContext())
+ ARMInterruptAttr(getASTContext(), AL, Kind));
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAVR.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAVR.cpp
new file mode 100644
index 000000000000..47368780b620
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAVR.cpp
@@ -0,0 +1,49 @@
+//===------ SemaAVR.cpp ---------- AVR target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to AVR.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaAVR.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Sema/Attr.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+SemaAVR::SemaAVR(Sema &S) : SemaBase(S) {}
+
+void SemaAVR::handleInterruptAttr(Decl *D, const ParsedAttr &AL) {
+ if (!isFuncOrMethodForAttrSubject(D)) {
+ Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
+ return;
+ }
+
+ if (!AL.checkExactlyNumArgs(SemaRef, 0))
+ return;
+
+ handleSimpleAttribute<AVRInterruptAttr>(*this, D, AL);
+}
+
+void SemaAVR::handleSignalAttr(Decl *D, const ParsedAttr &AL) {
+ if (!isFuncOrMethodForAttrSubject(D)) {
+ Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
+ return;
+ }
+
+ if (!AL.checkExactlyNumArgs(SemaRef, 0))
+ return;
+
+ handleSimpleAttribute<AVRSignalAttr>(*this, D, AL);
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
index 4af3c0f30a8e..df6edb21a50d 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
@@ -10,8 +10,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Basic/Specifiers.h"
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclCXX.h"
@@ -19,9 +17,12 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DependentDiagnostic.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/STLForwardCompat.h"
using namespace clang;
using namespace sema;
@@ -33,9 +34,6 @@ enum AccessResult {
AR_dependent
};
-/// SetMemberAccessSpecifier - Set the access specifier of a member.
-/// Returns true on error (when the previous member decl access specifier
-/// is different from the new member decl access specifier).
bool Sema::SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS) {
@@ -1472,12 +1470,32 @@ static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc,
// specifier, like this:
// A::private_type A::foo() { ... }
//
- // Or we might be parsing something that will turn out to be a friend:
- // void foo(A::private_type);
- // void B::foo(A::private_type);
+ // friend declaration should not be delayed because it may lead to incorrect
+ // redeclaration chain, such as:
+ // class D {
+ // class E{
+ // class F{};
+ // friend void foo(D::E::F& q);
+ // };
+ // friend void foo(D::E::F& q);
+ // };
if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
- S.DelayedDiagnostics.add(DelayedDiagnostic::makeAccess(Loc, Entity));
- return Sema::AR_delayed;
+ // [class.friend]p9:
+ // A member nominated by a friend declaration shall be accessible in the
+ // class containing the friend declaration. The meaning of the friend
+ // declaration is the same whether the friend declaration appears in the
+ // private, protected, or public ([class.mem]) portion of the class
+ // member-specification.
+ Scope *TS = S.getCurScope();
+ bool IsFriendDeclaration = false;
+ while (TS && !IsFriendDeclaration) {
+ IsFriendDeclaration = TS->isFriendScope();
+ TS = TS->getParent();
+ }
+ if (!IsFriendDeclaration) {
+ S.DelayedDiagnostics.add(DelayedDiagnostic::makeAccess(Loc, Entity));
+ return Sema::AR_delayed;
+ }
}
EffectiveContext EC(S.CurContext);
@@ -1569,8 +1587,6 @@ Sema::AccessResult Sema::CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
return CheckAccess(*this, E->getNameLoc(), Entity);
}
-/// Perform access-control checking on a previously-unresolved member
-/// access which has now been resolved to a member.
Sema::AccessResult Sema::CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair Found) {
if (!getLangOpts().AccessControl ||
@@ -1588,8 +1604,6 @@ Sema::AccessResult Sema::CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
return CheckAccess(*this, E->getMemberLoc(), Entity);
}
-/// Is the given member accessible for the purposes of deciding whether to
-/// define a special member function as deleted?
bool Sema::isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType,
@@ -1637,7 +1651,6 @@ Sema::AccessResult Sema::CheckDestructorAccess(SourceLocation Loc,
return CheckAccess(*this, Loc, Entity);
}
-/// Checks access to a constructor.
Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
CXXConstructorDecl *Constructor,
DeclAccessPair Found,
@@ -1658,21 +1671,24 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
case InitializedEntity::EK_Base:
PD = PDiag(diag::err_access_base_ctor);
PD << Entity.isInheritedVirtualBase()
- << Entity.getBaseSpecifier()->getType() << getSpecialMember(Constructor);
+ << Entity.getBaseSpecifier()->getType()
+ << llvm::to_underlying(getSpecialMember(Constructor));
break;
case InitializedEntity::EK_Member:
case InitializedEntity::EK_ParenAggInitMember: {
const FieldDecl *Field = cast<FieldDecl>(Entity.getDecl());
PD = PDiag(diag::err_access_field_ctor);
- PD << Field->getType() << getSpecialMember(Constructor);
+ PD << Field->getType()
+ << llvm::to_underlying(getSpecialMember(Constructor));
break;
}
case InitializedEntity::EK_LambdaCapture: {
StringRef VarName = Entity.getCapturedVarName();
PD = PDiag(diag::err_access_lambda_capture);
- PD << VarName << Entity.getType() << getSpecialMember(Constructor);
+ PD << VarName << Entity.getType()
+ << llvm::to_underlying(getSpecialMember(Constructor));
break;
}
@@ -1681,7 +1697,6 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
return CheckConstructorAccess(UseLoc, Constructor, Found, Entity, PD);
}
-/// Checks access to a constructor.
Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
CXXConstructorDecl *Constructor,
DeclAccessPair Found,
@@ -1723,7 +1738,6 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
return CheckAccess(*this, UseLoc, AccessEntity);
}
-/// Checks access to an overloaded operator new or delete.
Sema::AccessResult Sema::CheckAllocationAccess(SourceLocation OpLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
@@ -1743,7 +1757,6 @@ Sema::AccessResult Sema::CheckAllocationAccess(SourceLocation OpLoc,
return CheckAccess(*this, OpLoc, Entity);
}
-/// Checks access to a member.
Sema::AccessResult Sema::CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found) {
@@ -1758,7 +1771,6 @@ Sema::AccessResult Sema::CheckMemberAccess(SourceLocation UseLoc,
return CheckAccess(*this, UseLoc, Entity);
}
-/// Checks implicit access to a member in a structured binding.
Sema::AccessResult
Sema::CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
@@ -1791,8 +1803,6 @@ Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
return CheckAccess(*this, OpLoc, Entity);
}
-/// Checks access to an overloaded member operator, including
-/// conversion operators.
Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
Expr *ObjectExpr,
Expr *ArgExpr,
@@ -1815,7 +1825,6 @@ Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
return CheckMemberOperatorAccess(OpLoc, ObjectExpr, R, FoundDecl);
}
-/// Checks access to the target of a friend declaration.
Sema::AccessResult Sema::CheckFriendAccess(NamedDecl *target) {
assert(isa<CXXMethodDecl>(target->getAsFunction()));
@@ -1865,12 +1874,6 @@ Sema::AccessResult Sema::CheckAddressOfMemberAccess(Expr *OvlExpr,
return CheckAccess(*this, Ovl->getNameLoc(), Entity);
}
-/// Checks access for a hierarchy conversion.
-///
-/// \param ForceCheck true if this check should be performed even if access
-/// control is disabled; some things rely on this for semantics
-/// \param ForceUnprivileged true if this check should proceed as if the
-/// context had no special privileges
Sema::AccessResult Sema::CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base,
QualType Derived,
@@ -1905,7 +1908,6 @@ Sema::AccessResult Sema::CheckBaseClassAccess(SourceLocation AccessLoc,
return CheckAccess(*this, AccessLoc, Entity);
}
-/// Checks access to all the declarations in the given result set.
void Sema::CheckLookupAccess(const LookupResult &R) {
assert(getLangOpts().AccessControl
&& "performing access check without access control");
@@ -1922,23 +1924,6 @@ void Sema::CheckLookupAccess(const LookupResult &R) {
}
}
-/// Checks access to Target from the given class. The check will take access
-/// specifiers into account, but no member access expressions and such.
-///
-/// \param Target the declaration to check if it can be accessed
-/// \param NamingClass the class in which the lookup was started.
-/// \param BaseType type of the left side of member access expression.
-/// \p BaseType and \p NamingClass are used for C++ access control.
-/// Depending on the lookup case, they should be set to the following:
-/// - lhs.target (member access without a qualifier):
-/// \p BaseType and \p NamingClass are both the type of 'lhs'.
-/// - lhs.X::target (member access with a qualifier):
-/// BaseType is the type of 'lhs', NamingClass is 'X'
-/// - X::target (qualified lookup without member access):
-/// BaseType is null, NamingClass is 'X'.
-/// - target (unqualified lookup).
-/// BaseType is null, NamingClass is the parent class of 'target'.
-/// \return true if the Target is accessible from the Class, false otherwise.
bool Sema::IsSimplyAccessible(NamedDecl *Target, CXXRecordDecl *NamingClass,
QualType BaseType) {
// Perform the C++ accessibility checks first.
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
index 0dcf42e48997..b0c239678d0b 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
@@ -117,7 +117,7 @@ void Sema::inferGslPointerAttribute(NamedDecl *ND,
if (!Parent)
return;
- static llvm::StringSet<> Containers{
+ static const llvm::StringSet<> Containers{
"array",
"basic_string",
"deque",
@@ -137,9 +137,9 @@ void Sema::inferGslPointerAttribute(NamedDecl *ND,
"unordered_multimap",
};
- static llvm::StringSet<> Iterators{"iterator", "const_iterator",
- "reverse_iterator",
- "const_reverse_iterator"};
+ static const llvm::StringSet<> Iterators{"iterator", "const_iterator",
+ "reverse_iterator",
+ "const_reverse_iterator"};
if (Parent->isInStdNamespace() && Iterators.count(ND->getName()) &&
Containers.count(Parent->getName()))
@@ -165,7 +165,7 @@ void Sema::inferGslPointerAttribute(TypedefNameDecl *TD) {
}
void Sema::inferGslOwnerPointerAttribute(CXXRecordDecl *Record) {
- static llvm::StringSet<> StdOwners{
+ static const llvm::StringSet<> StdOwners{
"any",
"array",
"basic_regex",
@@ -189,10 +189,11 @@ void Sema::inferGslOwnerPointerAttribute(CXXRecordDecl *Record) {
"unordered_multimap",
"variant",
};
- static llvm::StringSet<> StdPointers{
+ static const llvm::StringSet<> StdPointers{
"basic_string_view",
"reference_wrapper",
"regex_iterator",
+ "span",
};
if (!Record->getIdentifier())
@@ -215,6 +216,18 @@ void Sema::inferGslOwnerPointerAttribute(CXXRecordDecl *Record) {
inferGslPointerAttribute(Record, Record);
}
+void Sema::inferNullableClassAttribute(CXXRecordDecl *CRD) {
+ static const llvm::StringSet<> Nullable{
+ "auto_ptr", "shared_ptr", "unique_ptr", "exception_ptr",
+ "coroutine_handle", "function", "move_only_function",
+ };
+
+ if (CRD->isInStdNamespace() && Nullable.count(CRD->getName()) &&
+ !CRD->hasAttr<TypeNullableAttr>())
+ for (Decl *Redecl : CRD->redecls())
+ Redecl->addAttr(TypeNullableAttr::CreateImplicit(Context));
+}
+
void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc) {
PragmaMsStackAction Action = Sema::PSK_Reset;
@@ -825,7 +838,7 @@ void Sema::ActOnPragmaUnused(const Token &IdTok, Scope *curScope,
IdentifierInfo *Name = IdTok.getIdentifierInfo();
LookupResult Lookup(*this, Name, IdTok.getLocation(), LookupOrdinaryName);
- LookupParsedName(Lookup, curScope, nullptr, true);
+ LookupName(Lookup, curScope, /*AllowBuiltinCreation=*/true);
if (Lookup.empty()) {
Diag(PragmaLoc, diag::warn_pragma_unused_undeclared_var)
@@ -848,22 +861,6 @@ void Sema::ActOnPragmaUnused(const Token &IdTok, Scope *curScope,
UnusedAttr::GNU_unused));
}
-void Sema::AddCFAuditedAttribute(Decl *D) {
- IdentifierInfo *Ident;
- SourceLocation Loc;
- std::tie(Ident, Loc) = PP.getPragmaARCCFCodeAuditedInfo();
- if (!Loc.isValid()) return;
-
- // Don't add a redundant or conflicting attribute.
- if (D->hasAttr<CFAuditedTransferAttr>() ||
- D->hasAttr<CFUnknownTransferAttr>())
- return;
-
- AttributeCommonInfo Info(Ident, SourceRange(Loc),
- AttributeCommonInfo::Form::Pragma());
- D->addAttr(CFAuditedTransferAttr::CreateImplicit(Context, Info));
-}
-
namespace {
std::optional<attr::SubjectMatchRule>
@@ -1235,7 +1232,6 @@ void Sema::AddPushedVisibilityAttribute(Decl *D) {
D->addAttr(VisibilityAttr::CreateImplicit(Context, type, loc));
}
-/// FreeVisContext - Deallocate and null out VisContext.
void Sema::FreeVisContext() {
delete static_cast<VisStack*>(VisContext);
VisContext = nullptr;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp
index 846a31a79673..17566c226ec8 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp
@@ -12,21 +12,45 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaObjC.h"
+#include "llvm/ADT/StringRef.h"
#include <optional>
using namespace clang;
using namespace sema;
+static bool hasMatchingEnvironmentOrNone(const ASTContext &Context,
+ const AvailabilityAttr *AA) {
+ IdentifierInfo *IIEnvironment = AA->getEnvironment();
+ auto Environment = Context.getTargetInfo().getTriple().getEnvironment();
+ if (!IIEnvironment || Environment == llvm::Triple::UnknownEnvironment)
+ return true;
+
+ llvm::Triple::EnvironmentType ET =
+ AvailabilityAttr::getEnvironmentType(IIEnvironment->getName());
+ return Environment == ET;
+}
+
static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
const Decl *D) {
+ AvailabilityAttr const *PartialMatch = nullptr;
// Check each AvailabilityAttr to find the one for this platform.
+ // For multiple attributes with the same platform try to find one for this
+ // environment.
+ // The attribute is always on the FunctionDecl, not on the
+ // FunctionTemplateDecl.
+ if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ D = FTD->getTemplatedDecl();
for (const auto *A : D->attrs()) {
if (const auto *Avail = dyn_cast<AvailabilityAttr>(A)) {
// FIXME: this is copied from CheckAvailability. We should try to
@@ -45,11 +69,15 @@ static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
StringRef TargetPlatform = Context.getTargetInfo().getPlatformName();
// Match the platform name.
- if (RealizedPlatform == TargetPlatform)
- return Avail;
+ if (RealizedPlatform == TargetPlatform) {
+ // Find the best matching attribute for this environment
+ if (hasMatchingEnvironmentOrNone(Context, Avail))
+ return Avail;
+ PartialMatch = Avail;
+ }
}
}
- return nullptr;
+ return PartialMatch;
}
/// The diagnostic we should emit for \c D, and the declaration that
@@ -79,6 +107,12 @@ ShouldDiagnoseAvailabilityOfDecl(Sema &S, const NamedDecl *D,
break;
}
+ // For alias templates, get the underlying declaration.
+ if (const auto *ADecl = dyn_cast<TypeAliasTemplateDecl>(D)) {
+ D = ADecl->getTemplatedDecl();
+ Result = D->getAvailability(Message);
+ }
+
// Forward class declarations get their attributes from their definition.
if (const auto *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) {
if (IDecl->getDefinition()) {
@@ -98,11 +132,11 @@ ShouldDiagnoseAvailabilityOfDecl(Sema &S, const NamedDecl *D,
// For +new, infer availability from -init.
if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
- if (S.NSAPIObj && ClassReceiver) {
+ if (S.ObjC().NSAPIObj && ClassReceiver) {
ObjCMethodDecl *Init = ClassReceiver->lookupInstanceMethod(
- S.NSAPIObj->getInitSelector());
+ S.ObjC().NSAPIObj->getInitSelector());
if (Init && Result == AR_Available && MD->isClassMethod() &&
- MD->getSelector() == S.NSAPIObj->getNewSelector() &&
+ MD->getSelector() == S.ObjC().NSAPIObj->getNewSelector() &&
MD->definedInNSObject(S.getASTContext())) {
Result = Init->getAvailability(Message);
D = Init;
@@ -117,10 +151,9 @@ ShouldDiagnoseAvailabilityOfDecl(Sema &S, const NamedDecl *D,
/// whether we should emit a diagnostic for \c K and \c DeclVersion in
/// the context of \c Ctx. For example, we should emit an unavailable diagnostic
/// in a deprecated context, but not the other way around.
-static bool
-ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
- VersionTuple DeclVersion, Decl *Ctx,
- const NamedDecl *OffendingDecl) {
+static bool ShouldDiagnoseAvailabilityInContext(
+ Sema &S, AvailabilityResult K, VersionTuple DeclVersion,
+ const IdentifierInfo *DeclEnv, Decl *Ctx, const NamedDecl *OffendingDecl) {
assert(K != AR_Available && "Expected an unavailable declaration here!");
// If this was defined using CF_OPTIONS, etc. then ignore the diagnostic.
@@ -135,11 +168,26 @@ ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
}
}
+ // In HLSL, skip emitting diagnostic if the diagnostic mode is not set to
+ // strict (-fhlsl-strict-availability), or if the target is library and the
+ // availability is restricted to a specific environment/shader stage.
+ // For libraries the availability will be checked later in
+ // DiagnoseHLSLAvailability class once where the specific environment/shader
+ // stage of the caller is known.
+ if (S.getLangOpts().HLSL) {
+ if (!S.getLangOpts().HLSLStrictAvailability ||
+ (DeclEnv != nullptr &&
+ S.getASTContext().getTargetInfo().getTriple().getEnvironment() ==
+ llvm::Triple::EnvironmentType::Library))
+ return false;
+ }
+
// Checks if we should emit the availability diagnostic in the context of C.
auto CheckContext = [&](const Decl *C) {
if (K == AR_NotYetIntroduced) {
if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, C))
- if (AA->getIntroduced() >= DeclVersion)
+ if (AA->getIntroduced() >= DeclVersion &&
+ AA->getEnvironment() == DeclEnv)
return true;
} else if (K == AR_Deprecated) {
if (C->isDeprecated())
@@ -187,13 +235,16 @@ ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
return true;
}
-static bool
-shouldDiagnoseAvailabilityByDefault(const ASTContext &Context,
- const VersionTuple &DeploymentVersion,
- const VersionTuple &DeclVersion) {
+static unsigned getAvailabilityDiagnosticKind(
+ const ASTContext &Context, const VersionTuple &DeploymentVersion,
+ const VersionTuple &DeclVersion, bool HasMatchingEnv) {
const auto &Triple = Context.getTargetInfo().getTriple();
VersionTuple ForceAvailabilityFromVersion;
switch (Triple.getOS()) {
+ // For iOS, emit the diagnostic even if -Wunguarded-availability is
+ // not specified for deployment targets >= to iOS 11 or equivalent or
+ // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
+ // later.
case llvm::Triple::IOS:
case llvm::Triple::TvOS:
ForceAvailabilityFromVersion = VersionTuple(/*Major=*/11);
@@ -205,15 +256,26 @@ shouldDiagnoseAvailabilityByDefault(const ASTContext &Context,
case llvm::Triple::MacOSX:
ForceAvailabilityFromVersion = VersionTuple(/*Major=*/10, /*Minor=*/13);
break;
+ // For HLSL, use diagnostic from HLSLAvailability group which
+ // are reported as errors by default and in strict diagnostic mode
+ // (-fhlsl-strict-availability) and as warnings in relaxed diagnostic
+ // mode (-Wno-error=hlsl-availability)
case llvm::Triple::ShaderModel:
- // Always enable availability diagnostics for shader models.
- return true;
+ return HasMatchingEnv ? diag::warn_hlsl_availability
+ : diag::warn_hlsl_availability_unavailable;
default:
- // New targets should always warn about availability.
- return Triple.getVendor() == llvm::Triple::Apple;
+ // New Apple targets should always warn about availability.
+ ForceAvailabilityFromVersion =
+ (Triple.getVendor() == llvm::Triple::Apple)
+ ? VersionTuple(/*Major=*/0, 0)
+ : VersionTuple(/*Major=*/(unsigned)-1, (unsigned)-1);
}
- return DeploymentVersion >= ForceAvailabilityFromVersion ||
- DeclVersion >= ForceAvailabilityFromVersion;
+ if (DeploymentVersion >= ForceAvailabilityFromVersion ||
+ DeclVersion >= ForceAvailabilityFromVersion)
+ return HasMatchingEnv ? diag::warn_unguarded_availability_new
+ : diag::warn_unguarded_availability_unavailable_new;
+ return HasMatchingEnv ? diag::warn_unguarded_availability
+ : diag::warn_unguarded_availability_unavailable;
}
static NamedDecl *findEnclosingDeclToAnnotate(Decl *OrigCtx) {
@@ -343,10 +405,14 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
unsigned available_here_select_kind;
VersionTuple DeclVersion;
- if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, OffendingDecl))
+ const AvailabilityAttr *AA = getAttrForPlatform(S.Context, OffendingDecl);
+ const IdentifierInfo *IIEnv = nullptr;
+ if (AA) {
DeclVersion = AA->getIntroduced();
+ IIEnv = AA->getEnvironment();
+ }
- if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, Ctx,
+ if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, IIEnv, Ctx,
OffendingDecl))
return;
@@ -354,8 +420,7 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
// The declaration can have multiple availability attributes, we are looking
// at one of them.
- const AvailabilityAttr *A = getAttrForPlatform(S.Context, OffendingDecl);
- if (A && A->isInherited()) {
+ if (AA && AA->isInherited()) {
for (const Decl *Redecl = OffendingDecl->getMostRecentDecl(); Redecl;
Redecl = Redecl->getPreviousDecl()) {
const AvailabilityAttr *AForRedecl =
@@ -375,26 +440,38 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
// not specified for deployment targets >= to iOS 11 or equivalent or
// for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
// later.
- const AvailabilityAttr *AA =
- getAttrForPlatform(S.getASTContext(), OffendingDecl);
+ assert(AA != nullptr && "expecting valid availability attribute");
VersionTuple Introduced = AA->getIntroduced();
-
- bool UseNewWarning = shouldDiagnoseAvailabilityByDefault(
+ bool EnvironmentMatchesOrNone =
+ hasMatchingEnvironmentOrNone(S.getASTContext(), AA);
+
+ const TargetInfo &TI = S.getASTContext().getTargetInfo();
+ std::string PlatformName(
+ AvailabilityAttr::getPrettyPlatformName(TI.getPlatformName()));
+ llvm::StringRef TargetEnvironment(
+ llvm::Triple::getEnvironmentTypeName(TI.getTriple().getEnvironment()));
+ llvm::StringRef AttrEnvironment =
+ AA->getEnvironment() ? AA->getEnvironment()->getName() : "";
+ bool UseEnvironment =
+ (!AttrEnvironment.empty() && !TargetEnvironment.empty());
+
+ unsigned DiagKind = getAvailabilityDiagnosticKind(
S.Context, S.Context.getTargetInfo().getPlatformMinVersion(),
- Introduced);
- unsigned Warning = UseNewWarning ? diag::warn_unguarded_availability_new
- : diag::warn_unguarded_availability;
-
- std::string PlatformName(AvailabilityAttr::getPrettyPlatformName(
- S.getASTContext().getTargetInfo().getPlatformName()));
+ Introduced, EnvironmentMatchesOrNone);
- S.Diag(Loc, Warning) << OffendingDecl << PlatformName
- << Introduced.getAsString();
+ S.Diag(Loc, DiagKind) << OffendingDecl << PlatformName
+ << Introduced.getAsString() << UseEnvironment
+ << TargetEnvironment;
S.Diag(OffendingDecl->getLocation(),
diag::note_partial_availability_specified_here)
<< OffendingDecl << PlatformName << Introduced.getAsString()
- << S.Context.getTargetInfo().getPlatformMinVersion().getAsString();
+ << S.Context.getTargetInfo().getPlatformMinVersion().getAsString()
+ << UseEnvironment << AttrEnvironment << TargetEnvironment;
+
+ // Do not offer to silence the warning or fixits for HLSL
+ if (S.getLangOpts().HLSL)
+ return;
if (const auto *Enclosing = findEnclosingDeclToAnnotate(Ctx)) {
if (const auto *TD = dyn_cast<TagDecl>(Enclosing))
@@ -771,40 +848,48 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
const AvailabilityAttr *AA =
getAttrForPlatform(SemaRef.getASTContext(), OffendingDecl);
+ assert(AA != nullptr && "expecting valid availability attribute");
+ bool EnvironmentMatchesOrNone =
+ hasMatchingEnvironmentOrNone(SemaRef.getASTContext(), AA);
VersionTuple Introduced = AA->getIntroduced();
- if (AvailabilityStack.back() >= Introduced)
+ if (EnvironmentMatchesOrNone && AvailabilityStack.back() >= Introduced)
return;
// If the context of this function is less available than D, we should not
// emit a diagnostic.
- if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced, Ctx,
+ if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced,
+ AA->getEnvironment(), Ctx,
OffendingDecl))
return;
- // We would like to emit the diagnostic even if -Wunguarded-availability is
- // not specified for deployment targets >= to iOS 11 or equivalent or
- // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
- // later.
- unsigned DiagKind =
- shouldDiagnoseAvailabilityByDefault(
- SemaRef.Context,
- SemaRef.Context.getTargetInfo().getPlatformMinVersion(), Introduced)
- ? diag::warn_unguarded_availability_new
- : diag::warn_unguarded_availability;
+ const TargetInfo &TI = SemaRef.getASTContext().getTargetInfo();
+ std::string PlatformName(
+ AvailabilityAttr::getPrettyPlatformName(TI.getPlatformName()));
+ llvm::StringRef TargetEnvironment(TI.getTriple().getEnvironmentName());
+ llvm::StringRef AttrEnvironment =
+ AA->getEnvironment() ? AA->getEnvironment()->getName() : "";
+ bool UseEnvironment =
+ (!AttrEnvironment.empty() && !TargetEnvironment.empty());
- std::string PlatformName(AvailabilityAttr::getPrettyPlatformName(
- SemaRef.getASTContext().getTargetInfo().getPlatformName()));
+ unsigned DiagKind = getAvailabilityDiagnosticKind(
+ SemaRef.Context,
+ SemaRef.Context.getTargetInfo().getPlatformMinVersion(), Introduced,
+ EnvironmentMatchesOrNone);
SemaRef.Diag(Range.getBegin(), DiagKind)
- << Range << D << PlatformName << Introduced.getAsString();
+ << Range << D << PlatformName << Introduced.getAsString()
+ << UseEnvironment << TargetEnvironment;
SemaRef.Diag(OffendingDecl->getLocation(),
diag::note_partial_availability_specified_here)
<< OffendingDecl << PlatformName << Introduced.getAsString()
- << SemaRef.Context.getTargetInfo()
- .getPlatformMinVersion()
- .getAsString();
+ << SemaRef.Context.getTargetInfo().getPlatformMinVersion().getAsString()
+ << UseEnvironment << AttrEnvironment << TargetEnvironment;
+
+ // Do not offer to silence the warning or fixits for HLSL
+ if (SemaRef.getLangOpts().HLSL)
+ return;
auto FixitDiag =
SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
@@ -928,11 +1013,6 @@ void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
Stmt *Body = nullptr;
if (auto *FD = D->getAsFunction()) {
- // FIXME: We only examine the pattern decl for availability violations now,
- // but we should also examine instantiated templates.
- if (FD->isTemplateInstantiation())
- return;
-
Body = FD->getBody();
if (auto *CD = dyn_cast<CXXConstructorDecl>(FD))
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaBPF.cpp b/contrib/llvm-project/clang/lib/Sema/SemaBPF.cpp
new file mode 100644
index 000000000000..7c00084d62dd
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaBPF.cpp
@@ -0,0 +1,194 @@
+//===------ SemaBPF.cpp ---------- BPF target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to BPF.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaBPF.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/APSInt.h"
+#include <optional>
+
+namespace clang {
+
+SemaBPF::SemaBPF(Sema &S) : SemaBase(S) {}
+
+static bool isValidPreserveFieldInfoArg(Expr *Arg) {
+ if (Arg->getType()->getAsPlaceholderType())
+ return false;
+
+ // The first argument needs to be a record field access.
+ // If it is an array element access, we delay decision
+ // to BPF backend to check whether the access is a
+ // field access or not.
+ return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
+ isa<MemberExpr>(Arg->IgnoreParens()) ||
+ isa<ArraySubscriptExpr>(Arg->IgnoreParens()));
+}
+
+static bool isValidPreserveTypeInfoArg(Expr *Arg) {
+ QualType ArgType = Arg->getType();
+ if (ArgType->getAsPlaceholderType())
+ return false;
+
+ // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type
+ // format:
+ // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
+ // 2. <type> var;
+ // __builtin_preserve_type_info(var, flag);
+ if (!isa<DeclRefExpr>(Arg->IgnoreParens()) &&
+ !isa<UnaryOperator>(Arg->IgnoreParens()))
+ return false;
+
+ // Typedef type.
+ if (ArgType->getAs<TypedefType>())
+ return true;
+
+ // Record type or Enum type.
+ const Type *Ty = ArgType->getUnqualifiedDesugaredType();
+ if (const auto *RT = Ty->getAs<RecordType>()) {
+ if (!RT->getDecl()->getDeclName().isEmpty())
+ return true;
+ } else if (const auto *ET = Ty->getAs<EnumType>()) {
+ if (!ET->getDecl()->getDeclName().isEmpty())
+ return true;
+ }
+
+ return false;
+}
+
+static bool isValidPreserveEnumValueArg(Expr *Arg) {
+ QualType ArgType = Arg->getType();
+ if (ArgType->getAsPlaceholderType())
+ return false;
+
+ // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
+ // format:
+ // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
+ // flag);
+ const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens());
+ if (!UO)
+ return false;
+
+ const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
+ if (!CE)
+ return false;
+ if (CE->getCastKind() != CK_IntegralToPointer &&
+ CE->getCastKind() != CK_NullToPointer)
+ return false;
+
+ // The integer must be from an EnumConstantDecl.
+ const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
+ if (!DR)
+ return false;
+
+ const EnumConstantDecl *Enumerator =
+ dyn_cast<EnumConstantDecl>(DR->getDecl());
+ if (!Enumerator)
+ return false;
+
+ // The type must be EnumType.
+ const Type *Ty = ArgType->getUnqualifiedDesugaredType();
+ const auto *ET = Ty->getAs<EnumType>();
+ if (!ET)
+ return false;
+
+ // The enum value must be supported.
+ return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator);
+}
+
+bool SemaBPF::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
+ BuiltinID == BPF::BI__builtin_btf_type_id ||
+ BuiltinID == BPF::BI__builtin_preserve_type_info ||
+ BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
+ "unexpected BPF builtin");
+ ASTContext &Context = getASTContext();
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+
+ // The second argument needs to be a constant int
+ Expr *Arg = TheCall->getArg(1);
+ std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context);
+ diag::kind kind;
+ if (!Value) {
+ if (BuiltinID == BPF::BI__builtin_preserve_field_info)
+ kind = diag::err_preserve_field_info_not_const;
+ else if (BuiltinID == BPF::BI__builtin_btf_type_id)
+ kind = diag::err_btf_type_id_not_const;
+ else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
+ kind = diag::err_preserve_type_info_not_const;
+ else
+ kind = diag::err_preserve_enum_value_not_const;
+ Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
+ return true;
+ }
+
+ // The first argument
+ Arg = TheCall->getArg(0);
+ bool InvalidArg = false;
+ bool ReturnUnsignedInt = true;
+ if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
+ if (!isValidPreserveFieldInfoArg(Arg)) {
+ InvalidArg = true;
+ kind = diag::err_preserve_field_info_not_field;
+ }
+ } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
+ if (!isValidPreserveTypeInfoArg(Arg)) {
+ InvalidArg = true;
+ kind = diag::err_preserve_type_info_invalid;
+ }
+ } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
+ if (!isValidPreserveEnumValueArg(Arg)) {
+ InvalidArg = true;
+ kind = diag::err_preserve_enum_value_invalid;
+ }
+ ReturnUnsignedInt = false;
+ } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
+ ReturnUnsignedInt = false;
+ }
+
+ if (InvalidArg) {
+ Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
+ return true;
+ }
+
+ if (ReturnUnsignedInt)
+ TheCall->setType(Context.UnsignedIntTy);
+ else
+ TheCall->setType(Context.UnsignedLongTy);
+ return false;
+}
+
+void SemaBPF::handlePreserveAIRecord(RecordDecl *RD) {
+ // Add preserve_access_index attribute to all fields and inner records.
+ for (auto *D : RD->decls()) {
+ if (D->hasAttr<BPFPreserveAccessIndexAttr>())
+ continue;
+
+ D->addAttr(BPFPreserveAccessIndexAttr::CreateImplicit(getASTContext()));
+ if (auto *Rec = dyn_cast<RecordDecl>(D))
+ handlePreserveAIRecord(Rec);
+ }
+}
+
+void SemaBPF::handlePreserveAccessIndexAttr(Decl *D, const ParsedAttr &AL) {
+ auto *Rec = cast<RecordDecl>(D);
+ handlePreserveAIRecord(Rec);
+ Rec->addAttr(::new (getASTContext())
+ BPFPreserveAccessIndexAttr(getASTContext(), AL));
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaBase.cpp b/contrib/llvm-project/clang/lib/Sema/SemaBase.cpp
new file mode 100644
index 000000000000..a2f12d622e8c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaBase.cpp
@@ -0,0 +1,90 @@
+#include "clang/Sema/SemaBase.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaCUDA.h"
+
+namespace clang {
+
+SemaBase::SemaBase(Sema &S) : SemaRef(S) {}
+
+ASTContext &SemaBase::getASTContext() const { return SemaRef.Context; }
+DiagnosticsEngine &SemaBase::getDiagnostics() const { return SemaRef.Diags; }
+const LangOptions &SemaBase::getLangOpts() const { return SemaRef.LangOpts; }
+
+SemaBase::ImmediateDiagBuilder::~ImmediateDiagBuilder() {
+ // If we aren't active, there is nothing to do.
+ if (!isActive())
+ return;
+
+ // Otherwise, we need to emit the diagnostic. First clear the diagnostic
+ // builder itself so it won't emit the diagnostic in its own destructor.
+ //
+ // This seems wasteful, in that as written the DiagnosticBuilder dtor will
+ // do its own needless checks to see if the diagnostic needs to be
+ // emitted. However, because we take care to ensure that the builder
+ // objects never escape, a sufficiently smart compiler will be able to
+ // eliminate that code.
+ Clear();
+
+ // Dispatch to Sema to emit the diagnostic.
+ SemaRef.EmitCurrentDiagnostic(DiagID);
+}
+
+PartialDiagnostic SemaBase::PDiag(unsigned DiagID) {
+ return PartialDiagnostic(DiagID, SemaRef.Context.getDiagAllocator());
+}
+
+const SemaBase::SemaDiagnosticBuilder &
+operator<<(const SemaBase::SemaDiagnosticBuilder &Diag,
+ const PartialDiagnostic &PD) {
+ if (Diag.ImmediateDiag)
+ PD.Emit(*Diag.ImmediateDiag);
+ else if (Diag.PartialDiagId)
+ Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD;
+ return Diag;
+}
+
+void SemaBase::SemaDiagnosticBuilder::AddFixItHint(
+ const FixItHint &Hint) const {
+ if (ImmediateDiag)
+ ImmediateDiag->AddFixItHint(Hint);
+ else if (PartialDiagId)
+ S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint);
+}
+
+llvm::DenseMap<CanonicalDeclPtr<const FunctionDecl>,
+ std::vector<PartialDiagnosticAt>> &
+SemaBase::SemaDiagnosticBuilder::getDeviceDeferredDiags() const {
+ return S.DeviceDeferredDiags;
+}
+
+Sema::SemaDiagnosticBuilder SemaBase::Diag(SourceLocation Loc, unsigned DiagID,
+ bool DeferHint) {
+ bool IsError =
+ getDiagnostics().getDiagnosticIDs()->isDefaultMappingAsError(DiagID);
+ bool ShouldDefer = getLangOpts().CUDA && getLangOpts().GPUDeferDiag &&
+ DiagnosticIDs::isDeferrable(DiagID) &&
+ (DeferHint || SemaRef.DeferDiags || !IsError);
+ auto SetIsLastErrorImmediate = [&](bool Flag) {
+ if (IsError)
+ SemaRef.IsLastErrorImmediate = Flag;
+ };
+ if (!ShouldDefer) {
+ SetIsLastErrorImmediate(true);
+ return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc,
+ DiagID, SemaRef.getCurFunctionDecl(), SemaRef);
+ }
+
+ SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice
+ ? SemaRef.CUDA().DiagIfDeviceCode(Loc, DiagID)
+ : SemaRef.CUDA().DiagIfHostCode(Loc, DiagID);
+ SetIsLastErrorImmediate(DB.isImmediate());
+ return DB;
+}
+
+Sema::SemaDiagnosticBuilder SemaBase::Diag(SourceLocation Loc,
+ const PartialDiagnostic &PD,
+ bool DeferHint) {
+ return Diag(Loc, PD.getDiagID(), DeferHint) << PD;
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaBoundsSafety.cpp b/contrib/llvm-project/clang/lib/Sema/SemaBoundsSafety.cpp
new file mode 100644
index 000000000000..290c82093889
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaBoundsSafety.cpp
@@ -0,0 +1,193 @@
+//===-- SemaBoundsSafety.cpp - Bounds Safety specific routines-*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to `-fbounds-safety`
+/// (Bounds Safety) and also its attributes when used without `-fbounds-safety`
+/// (e.g. `counted_by`)
+///
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+
+static CountAttributedType::DynamicCountPointerKind
+getCountAttrKind(bool CountInBytes, bool OrNull) {
+ if (CountInBytes)
+ return OrNull ? CountAttributedType::SizedByOrNull
+ : CountAttributedType::SizedBy;
+ return OrNull ? CountAttributedType::CountedByOrNull
+ : CountAttributedType::CountedBy;
+}
+
+static const RecordDecl *GetEnclosingNamedOrTopAnonRecord(const FieldDecl *FD) {
+ const auto *RD = FD->getParent();
+ // An unnamed struct is anonymous struct only if it's not instantiated.
+ // However, the struct may not be fully processed yet to determine
+ // whether it's anonymous or not. In that case, this function treats it as
+ // an anonymous struct and tries to find a named parent.
+ while (RD && (RD->isAnonymousStructOrUnion() ||
+ (!RD->isCompleteDefinition() && RD->getName().empty()))) {
+ const auto *Parent = dyn_cast<RecordDecl>(RD->getParent());
+ if (!Parent)
+ break;
+ RD = Parent;
+ }
+ return RD;
+}
+
+enum class CountedByInvalidPointeeTypeKind {
+ INCOMPLETE,
+ SIZELESS,
+ FUNCTION,
+ FLEXIBLE_ARRAY_MEMBER,
+ VALID,
+};
+
+bool Sema::CheckCountedByAttrOnField(
+ FieldDecl *FD, Expr *E,
+ llvm::SmallVectorImpl<TypeCoupledDeclRefInfo> &Decls, bool CountInBytes,
+ bool OrNull) {
+ // Check the context the attribute is used in
+
+ unsigned Kind = getCountAttrKind(CountInBytes, OrNull);
+
+ if (FD->getParent()->isUnion()) {
+ Diag(FD->getBeginLoc(), diag::err_count_attr_in_union)
+ << Kind << FD->getSourceRange();
+ return true;
+ }
+
+ const auto FieldTy = FD->getType();
+ if (FieldTy->isArrayType() && (CountInBytes || OrNull)) {
+ Diag(FD->getBeginLoc(),
+ diag::err_count_attr_not_on_ptr_or_flexible_array_member)
+ << Kind << FD->getLocation() << /* suggest counted_by */ 1;
+ return true;
+ }
+ if (!FieldTy->isArrayType() && !FieldTy->isPointerType()) {
+ Diag(FD->getBeginLoc(),
+ diag::err_count_attr_not_on_ptr_or_flexible_array_member)
+ << Kind << FD->getLocation() << /* do not suggest counted_by */ 0;
+ return true;
+ }
+
+ LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
+ LangOptions::StrictFlexArraysLevelKind::IncompleteOnly;
+ if (FieldTy->isArrayType() &&
+ !Decl::isFlexibleArrayMemberLike(getASTContext(), FD, FieldTy,
+ StrictFlexArraysLevel, true)) {
+ Diag(FD->getBeginLoc(),
+ diag::err_counted_by_attr_on_array_not_flexible_array_member)
+ << Kind << FD->getLocation();
+ return true;
+ }
+
+ CountedByInvalidPointeeTypeKind InvalidTypeKind =
+ CountedByInvalidPointeeTypeKind::VALID;
+ QualType PointeeTy;
+ int SelectPtrOrArr = 0;
+ if (FieldTy->isPointerType()) {
+ PointeeTy = FieldTy->getPointeeType();
+ SelectPtrOrArr = 0;
+ } else {
+ assert(FieldTy->isArrayType());
+ const ArrayType *AT = getASTContext().getAsArrayType(FieldTy);
+ PointeeTy = AT->getElementType();
+ SelectPtrOrArr = 1;
+ }
+ // Note: The `Decl::isFlexibleArrayMemberLike` check earlier on means
+ // only `PointeeTy->isStructureTypeWithFlexibleArrayMember()` is reachable
+ // when `FieldTy->isArrayType()`.
+ bool ShouldWarn = false;
+ if (PointeeTy->isIncompleteType() && !CountInBytes) {
+ InvalidTypeKind = CountedByInvalidPointeeTypeKind::INCOMPLETE;
+ } else if (PointeeTy->isSizelessType()) {
+ InvalidTypeKind = CountedByInvalidPointeeTypeKind::SIZELESS;
+ } else if (PointeeTy->isFunctionType()) {
+ InvalidTypeKind = CountedByInvalidPointeeTypeKind::FUNCTION;
+ } else if (PointeeTy->isStructureTypeWithFlexibleArrayMember()) {
+ if (FieldTy->isArrayType() && !getLangOpts().BoundsSafety) {
+ // This is a workaround for the Linux kernel that has already adopted
+ // `counted_by` on a FAM where the pointee is a struct with a FAM. This
+ // should be an error because computing the bounds of the array cannot be
+ // done correctly without manually traversing every struct object in the
+ // array at runtime. To allow the code to be built this error is
+ // downgraded to a warning.
+ ShouldWarn = true;
+ }
+ InvalidTypeKind = CountedByInvalidPointeeTypeKind::FLEXIBLE_ARRAY_MEMBER;
+ }
+
+ if (InvalidTypeKind != CountedByInvalidPointeeTypeKind::VALID) {
+ unsigned DiagID = ShouldWarn
+ ? diag::warn_counted_by_attr_elt_type_unknown_size
+ : diag::err_counted_by_attr_pointee_unknown_size;
+ Diag(FD->getBeginLoc(), DiagID)
+ << SelectPtrOrArr << PointeeTy << (int)InvalidTypeKind
+ << (ShouldWarn ? 1 : 0) << Kind << FD->getSourceRange();
+ return true;
+ }
+
+ // Check the expression
+
+ if (!E->getType()->isIntegerType() || E->getType()->isBooleanType()) {
+ Diag(E->getBeginLoc(), diag::err_count_attr_argument_not_integer)
+ << Kind << E->getSourceRange();
+ return true;
+ }
+
+ auto *DRE = dyn_cast<DeclRefExpr>(E);
+ if (!DRE) {
+ Diag(E->getBeginLoc(),
+ diag::err_count_attr_only_support_simple_decl_reference)
+ << Kind << E->getSourceRange();
+ return true;
+ }
+
+ auto *CountDecl = DRE->getDecl();
+ FieldDecl *CountFD = dyn_cast<FieldDecl>(CountDecl);
+ if (auto *IFD = dyn_cast<IndirectFieldDecl>(CountDecl)) {
+ CountFD = IFD->getAnonField();
+ }
+ if (!CountFD) {
+ Diag(E->getBeginLoc(), diag::err_count_attr_must_be_in_structure)
+ << CountDecl << Kind << E->getSourceRange();
+
+ Diag(CountDecl->getBeginLoc(),
+ diag::note_flexible_array_counted_by_attr_field)
+ << CountDecl << CountDecl->getSourceRange();
+ return true;
+ }
+
+ if (FD->getParent() != CountFD->getParent()) {
+ if (CountFD->getParent()->isUnion()) {
+ Diag(CountFD->getBeginLoc(), diag::err_count_attr_refer_to_union)
+ << Kind << CountFD->getSourceRange();
+ return true;
+ }
+ // Whether CountRD is an anonymous struct is not determined at this
+ // point. Thus, an additional diagnostic in case it's not anonymous struct
+ // is done later in `Parser::ParseStructDeclaration`.
+ auto *RD = GetEnclosingNamedOrTopAnonRecord(FD);
+ auto *CountRD = GetEnclosingNamedOrTopAnonRecord(CountFD);
+
+ if (RD != CountRD) {
+ Diag(E->getBeginLoc(), diag::err_count_attr_param_not_in_same_struct)
+ << CountFD << Kind << FieldTy->isArrayType() << E->getSourceRange();
+ Diag(CountFD->getBeginLoc(),
+ diag::note_flexible_array_counted_by_attr_field)
+ << CountFD << CountFD->getSourceRange();
+ return true;
+ }
+ }
+
+ Decls.push_back(TypeCoupledDeclRefInfo(CountFD, /*IsDref*/ false));
+ return false;
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
index 6a66ecf6f94c..580b9872c6a1 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
@@ -10,6 +10,7 @@
///
//===----------------------------------------------------------------------===//
+#include "clang/Sema/SemaCUDA.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/ExprCXX.h"
@@ -22,10 +23,13 @@
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/SmallVector.h"
#include <optional>
using namespace clang;
+SemaCUDA::SemaCUDA(Sema &S) : SemaBase(S) {}
+
template <typename AttrT> static bool hasExplicitAttr(const VarDecl *D) {
if (!D)
return false;
@@ -34,38 +38,37 @@ template <typename AttrT> static bool hasExplicitAttr(const VarDecl *D) {
return false;
}
-void Sema::PushForceCUDAHostDevice() {
+void SemaCUDA::PushForceHostDevice() {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- ForceCUDAHostDeviceDepth++;
+ ForceHostDeviceDepth++;
}
-bool Sema::PopForceCUDAHostDevice() {
+bool SemaCUDA::PopForceHostDevice() {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- if (ForceCUDAHostDeviceDepth == 0)
+ if (ForceHostDeviceDepth == 0)
return false;
- ForceCUDAHostDeviceDepth--;
+ ForceHostDeviceDepth--;
return true;
}
-ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
+ExprResult SemaCUDA::ActOnExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc) {
- FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
+ FunctionDecl *ConfigDecl = getASTContext().getcudaConfigureCallDecl();
if (!ConfigDecl)
return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
- << getCudaConfigureFuncName());
+ << getConfigureFuncName());
QualType ConfigQTy = ConfigDecl->getType();
- DeclRefExpr *ConfigDR = new (Context)
- DeclRefExpr(Context, ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
- MarkFunctionReferenced(LLLLoc, ConfigDecl);
+ DeclRefExpr *ConfigDR = new (getASTContext()) DeclRefExpr(
+ getASTContext(), ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
+ SemaRef.MarkFunctionReferenced(LLLLoc, ConfigDecl);
- return BuildCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
- /*IsExecConfig=*/true);
+ return SemaRef.BuildCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
+ /*IsExecConfig=*/true);
}
-Sema::CUDAFunctionTarget
-Sema::IdentifyCUDATarget(const ParsedAttributesView &Attrs) {
+CUDAFunctionTarget SemaCUDA::IdentifyTarget(const ParsedAttributesView &Attrs) {
bool HasHostAttr = false;
bool HasDeviceAttr = false;
bool HasGlobalAttr = false;
@@ -90,18 +93,18 @@ Sema::IdentifyCUDATarget(const ParsedAttributesView &Attrs) {
}
if (HasInvalidTargetAttr)
- return CFT_InvalidTarget;
+ return CUDAFunctionTarget::InvalidTarget;
if (HasGlobalAttr)
- return CFT_Global;
+ return CUDAFunctionTarget::Global;
if (HasHostAttr && HasDeviceAttr)
- return CFT_HostDevice;
+ return CUDAFunctionTarget::HostDevice;
if (HasDeviceAttr)
- return CFT_Device;
+ return CUDAFunctionTarget::Device;
- return CFT_Host;
+ return CUDAFunctionTarget::Host;
}
template <typename A>
@@ -112,55 +115,54 @@ static bool hasAttr(const Decl *D, bool IgnoreImplicitAttr) {
});
}
-Sema::CUDATargetContextRAII::CUDATargetContextRAII(Sema &S_,
- CUDATargetContextKind K,
- Decl *D)
+SemaCUDA::CUDATargetContextRAII::CUDATargetContextRAII(
+ SemaCUDA &S_, SemaCUDA::CUDATargetContextKind K, Decl *D)
: S(S_) {
SavedCtx = S.CurCUDATargetCtx;
- assert(K == CTCK_InitGlobalVar);
+ assert(K == SemaCUDA::CTCK_InitGlobalVar);
auto *VD = dyn_cast_or_null<VarDecl>(D);
if (VD && VD->hasGlobalStorage() && !VD->isStaticLocal()) {
- auto Target = CFT_Host;
+ auto Target = CUDAFunctionTarget::Host;
if ((hasAttr<CUDADeviceAttr>(VD, /*IgnoreImplicit=*/true) &&
!hasAttr<CUDAHostAttr>(VD, /*IgnoreImplicit=*/true)) ||
hasAttr<CUDASharedAttr>(VD, /*IgnoreImplicit=*/true) ||
hasAttr<CUDAConstantAttr>(VD, /*IgnoreImplicit=*/true))
- Target = CFT_Device;
+ Target = CUDAFunctionTarget::Device;
S.CurCUDATargetCtx = {Target, K, VD};
}
}
-/// IdentifyCUDATarget - Determine the CUDA compilation target for this function
-Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D,
- bool IgnoreImplicitHDAttr) {
+/// IdentifyTarget - Determine the CUDA compilation target for this function
+CUDAFunctionTarget SemaCUDA::IdentifyTarget(const FunctionDecl *D,
+ bool IgnoreImplicitHDAttr) {
// Code that lives outside a function gets the target from CurCUDATargetCtx.
if (D == nullptr)
return CurCUDATargetCtx.Target;
if (D->hasAttr<CUDAInvalidTargetAttr>())
- return CFT_InvalidTarget;
+ return CUDAFunctionTarget::InvalidTarget;
if (D->hasAttr<CUDAGlobalAttr>())
- return CFT_Global;
+ return CUDAFunctionTarget::Global;
if (hasAttr<CUDADeviceAttr>(D, IgnoreImplicitHDAttr)) {
if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr))
- return CFT_HostDevice;
- return CFT_Device;
+ return CUDAFunctionTarget::HostDevice;
+ return CUDAFunctionTarget::Device;
} else if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr)) {
- return CFT_Host;
+ return CUDAFunctionTarget::Host;
} else if ((D->isImplicit() || !D->isUserProvided()) &&
!IgnoreImplicitHDAttr) {
// Some implicit declarations (like intrinsic functions) are not marked.
// Set the most lenient target on them for maximal flexibility.
- return CFT_HostDevice;
+ return CUDAFunctionTarget::HostDevice;
}
- return CFT_Host;
+ return CUDAFunctionTarget::Host;
}
/// IdentifyTarget - Determine the CUDA compilation target for this variable.
-Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) {
+SemaCUDA::CUDAVariableTarget SemaCUDA::IdentifyTarget(const VarDecl *Var) {
if (Var->hasAttr<HIPManagedAttr>())
return CVT_Unified;
// Only constexpr and const variabless with implicit constant attribute
@@ -180,11 +182,11 @@ Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) {
// - on both sides in host device functions
// - on device side in device or global functions
if (auto *FD = dyn_cast<FunctionDecl>(Var->getDeclContext())) {
- switch (IdentifyCUDATarget(FD)) {
- case CFT_HostDevice:
+ switch (IdentifyTarget(FD)) {
+ case CUDAFunctionTarget::HostDevice:
return CVT_Both;
- case CFT_Device:
- case CFT_Global:
+ case CUDAFunctionTarget::Device:
+ case CUDAFunctionTarget::Global:
return CVT_Device;
default:
return CVT_Host;
@@ -221,58 +223,65 @@ Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) {
// | hd | h | SS | WS | (d) |
// | hd | hd | HD | HD | (b) |
-Sema::CUDAFunctionPreference
-Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
+SemaCUDA::CUDAFunctionPreference
+SemaCUDA::IdentifyPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
assert(Callee && "Callee must be valid.");
// Treat ctor/dtor as host device function in device var initializer to allow
// trivial ctor/dtor without device attr to be used. Non-trivial ctor/dtor
- // will be diagnosed by checkAllowedCUDAInitializer.
+ // will be diagnosed by checkAllowedInitializer.
if (Caller == nullptr && CurCUDATargetCtx.Kind == CTCK_InitGlobalVar &&
- CurCUDATargetCtx.Target == CFT_Device &&
+ CurCUDATargetCtx.Target == CUDAFunctionTarget::Device &&
(isa<CXXConstructorDecl>(Callee) || isa<CXXDestructorDecl>(Callee)))
return CFP_HostDevice;
- CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller);
- CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee);
+ CUDAFunctionTarget CallerTarget = IdentifyTarget(Caller);
+ CUDAFunctionTarget CalleeTarget = IdentifyTarget(Callee);
// If one of the targets is invalid, the check always fails, no matter what
// the other target is.
- if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
+ if (CallerTarget == CUDAFunctionTarget::InvalidTarget ||
+ CalleeTarget == CUDAFunctionTarget::InvalidTarget)
return CFP_Never;
// (a) Can't call global from some contexts until we support CUDA's
// dynamic parallelism.
- if (CalleeTarget == CFT_Global &&
- (CallerTarget == CFT_Global || CallerTarget == CFT_Device))
+ if (CalleeTarget == CUDAFunctionTarget::Global &&
+ (CallerTarget == CUDAFunctionTarget::Global ||
+ CallerTarget == CUDAFunctionTarget::Device))
return CFP_Never;
// (b) Calling HostDevice is OK for everyone.
- if (CalleeTarget == CFT_HostDevice)
+ if (CalleeTarget == CUDAFunctionTarget::HostDevice)
return CFP_HostDevice;
// (c) Best case scenarios
if (CalleeTarget == CallerTarget ||
- (CallerTarget == CFT_Host && CalleeTarget == CFT_Global) ||
- (CallerTarget == CFT_Global && CalleeTarget == CFT_Device))
+ (CallerTarget == CUDAFunctionTarget::Host &&
+ CalleeTarget == CUDAFunctionTarget::Global) ||
+ (CallerTarget == CUDAFunctionTarget::Global &&
+ CalleeTarget == CUDAFunctionTarget::Device))
return CFP_Native;
// HipStdPar mode is special, in that assessing whether a device side call to
// a host target is deferred to a subsequent pass, and cannot unambiguously be
// adjudicated in the AST, hence we optimistically allow them to pass here.
if (getLangOpts().HIPStdPar &&
- (CallerTarget == CFT_Global || CallerTarget == CFT_Device ||
- CallerTarget == CFT_HostDevice) &&
- CalleeTarget == CFT_Host)
+ (CallerTarget == CUDAFunctionTarget::Global ||
+ CallerTarget == CUDAFunctionTarget::Device ||
+ CallerTarget == CUDAFunctionTarget::HostDevice) &&
+ CalleeTarget == CUDAFunctionTarget::Host)
return CFP_HostDevice;
// (d) HostDevice behavior depends on compilation mode.
- if (CallerTarget == CFT_HostDevice) {
+ if (CallerTarget == CUDAFunctionTarget::HostDevice) {
// It's OK to call a compilation-mode matching function from an HD one.
- if ((getLangOpts().CUDAIsDevice && CalleeTarget == CFT_Device) ||
+ if ((getLangOpts().CUDAIsDevice &&
+ CalleeTarget == CUDAFunctionTarget::Device) ||
(!getLangOpts().CUDAIsDevice &&
- (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global)))
+ (CalleeTarget == CUDAFunctionTarget::Host ||
+ CalleeTarget == CUDAFunctionTarget::Global)))
return CFP_SameSide;
// Calls from HD to non-mode-matching functions (i.e., to host functions
@@ -283,9 +292,12 @@ Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
}
// (e) Calling across device/host boundary is not something you should do.
- if ((CallerTarget == CFT_Host && CalleeTarget == CFT_Device) ||
- (CallerTarget == CFT_Device && CalleeTarget == CFT_Host) ||
- (CallerTarget == CFT_Global && CalleeTarget == CFT_Host))
+ if ((CallerTarget == CUDAFunctionTarget::Host &&
+ CalleeTarget == CUDAFunctionTarget::Device) ||
+ (CallerTarget == CUDAFunctionTarget::Device &&
+ CalleeTarget == CUDAFunctionTarget::Host) ||
+ (CallerTarget == CUDAFunctionTarget::Global &&
+ CalleeTarget == CUDAFunctionTarget::Host))
return CFP_Never;
llvm_unreachable("All cases should've been handled by now.");
@@ -299,13 +311,13 @@ template <typename AttrT> static bool hasImplicitAttr(const FunctionDecl *D) {
return D->isImplicit();
}
-bool Sema::isCUDAImplicitHostDeviceFunction(const FunctionDecl *D) {
+bool SemaCUDA::isImplicitHostDeviceFunction(const FunctionDecl *D) {
bool IsImplicitDevAttr = hasImplicitAttr<CUDADeviceAttr>(D);
bool IsImplicitHostAttr = hasImplicitAttr<CUDAHostAttr>(D);
return IsImplicitDevAttr && IsImplicitHostAttr;
}
-void Sema::EraseUnwantedCUDAMatches(
+void SemaCUDA::EraseUnwantedMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) {
if (Matches.size() <= 1)
@@ -315,7 +327,7 @@ void Sema::EraseUnwantedCUDAMatches(
// Gets the CUDA function preference for a call from Caller to Match.
auto GetCFP = [&](const Pair &Match) {
- return IdentifyCUDAPreference(Caller, Match.second);
+ return IdentifyPreference(Caller, Match.second);
};
// Find the best call preference among the functions in Matches.
@@ -337,16 +349,16 @@ void Sema::EraseUnwantedCUDAMatches(
/// \param ResolvedTarget with a target that resolves for both calls.
/// \return true if there's a conflict, false otherwise.
static bool
-resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,
- Sema::CUDAFunctionTarget Target2,
- Sema::CUDAFunctionTarget *ResolvedTarget) {
+resolveCalleeCUDATargetConflict(CUDAFunctionTarget Target1,
+ CUDAFunctionTarget Target2,
+ CUDAFunctionTarget *ResolvedTarget) {
// Only free functions and static member functions may be global.
- assert(Target1 != Sema::CFT_Global);
- assert(Target2 != Sema::CFT_Global);
+ assert(Target1 != CUDAFunctionTarget::Global);
+ assert(Target2 != CUDAFunctionTarget::Global);
- if (Target1 == Sema::CFT_HostDevice) {
+ if (Target1 == CUDAFunctionTarget::HostDevice) {
*ResolvedTarget = Target2;
- } else if (Target2 == Sema::CFT_HostDevice) {
+ } else if (Target2 == CUDAFunctionTarget::HostDevice) {
*ResolvedTarget = Target1;
} else if (Target1 != Target2) {
return true;
@@ -357,8 +369,8 @@ resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,
return false;
}
-bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
- CXXSpecialMember CSM,
+bool SemaCUDA::inferTargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
+ CXXSpecialMemberKind CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose) {
@@ -378,7 +390,7 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
// We're going to invoke special member lookup; mark that these special
// members are called from this one, and not from its caller.
- ContextRAII MethodContext(*this, MemberDecl);
+ Sema::ContextRAII MethodContext(SemaRef, MemberDecl);
// Look for special members in base classes that should be invoked from here.
// Infer the target of this member base on the ones it should call.
@@ -402,17 +414,17 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
Sema::SpecialMemberOverloadResult SMOR =
- LookupSpecialMember(BaseClassDecl, CSM,
- /* ConstArg */ ConstRHS,
- /* VolatileArg */ false,
- /* RValueThis */ false,
- /* ConstThis */ false,
- /* VolatileThis */ false);
+ SemaRef.LookupSpecialMember(BaseClassDecl, CSM,
+ /* ConstArg */ ConstRHS,
+ /* VolatileArg */ false,
+ /* RValueThis */ false,
+ /* ConstThis */ false,
+ /* VolatileThis */ false);
if (!SMOR.getMethod())
continue;
- CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR.getMethod());
+ CUDAFunctionTarget BaseMethodTarget = IdentifyTarget(SMOR.getMethod());
if (!InferredTarget) {
InferredTarget = BaseMethodTarget;
} else {
@@ -422,9 +434,11 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
if (Diagnose) {
Diag(ClassDecl->getLocation(),
diag::note_implicit_member_target_infer_collision)
- << (unsigned)CSM << *InferredTarget << BaseMethodTarget;
+ << (unsigned)CSM << llvm::to_underlying(*InferredTarget)
+ << llvm::to_underlying(BaseMethodTarget);
}
- MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
+ MemberDecl->addAttr(
+ CUDAInvalidTargetAttr::CreateImplicit(getASTContext()));
return true;
}
}
@@ -437,25 +451,24 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
}
const RecordType *FieldType =
- Context.getBaseElementType(F->getType())->getAs<RecordType>();
+ getASTContext().getBaseElementType(F->getType())->getAs<RecordType>();
if (!FieldType) {
continue;
}
CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
Sema::SpecialMemberOverloadResult SMOR =
- LookupSpecialMember(FieldRecDecl, CSM,
- /* ConstArg */ ConstRHS && !F->isMutable(),
- /* VolatileArg */ false,
- /* RValueThis */ false,
- /* ConstThis */ false,
- /* VolatileThis */ false);
+ SemaRef.LookupSpecialMember(FieldRecDecl, CSM,
+ /* ConstArg */ ConstRHS && !F->isMutable(),
+ /* VolatileArg */ false,
+ /* RValueThis */ false,
+ /* ConstThis */ false,
+ /* VolatileThis */ false);
if (!SMOR.getMethod())
continue;
- CUDAFunctionTarget FieldMethodTarget =
- IdentifyCUDATarget(SMOR.getMethod());
+ CUDAFunctionTarget FieldMethodTarget = IdentifyTarget(SMOR.getMethod());
if (!InferredTarget) {
InferredTarget = FieldMethodTarget;
} else {
@@ -465,9 +478,11 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
if (Diagnose) {
Diag(ClassDecl->getLocation(),
diag::note_implicit_member_target_infer_collision)
- << (unsigned)CSM << *InferredTarget << FieldMethodTarget;
+ << (unsigned)CSM << llvm::to_underlying(*InferredTarget)
+ << llvm::to_underlying(FieldMethodTarget);
}
- MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
+ MemberDecl->addAttr(
+ CUDAInvalidTargetAttr::CreateImplicit(getASTContext()));
return true;
}
}
@@ -478,25 +493,25 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
// it's the least restrictive option that can be invoked from any target.
bool NeedsH = true, NeedsD = true;
if (InferredTarget) {
- if (*InferredTarget == CFT_Device)
+ if (*InferredTarget == CUDAFunctionTarget::Device)
NeedsH = false;
- else if (*InferredTarget == CFT_Host)
+ else if (*InferredTarget == CUDAFunctionTarget::Host)
NeedsD = false;
}
// We either setting attributes first time, or the inferred ones must match
// previously set ones.
if (NeedsD && !HasD)
- MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(getASTContext()));
if (NeedsH && !HasH)
- MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(getASTContext()));
return false;
}
-bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) {
+bool SemaCUDA::isEmptyConstructor(SourceLocation Loc, CXXConstructorDecl *CD) {
if (!CD->isDefined() && CD->isTemplateInstantiation())
- InstantiateFunctionDefinition(Loc, CD->getFirstDecl());
+ SemaRef.InstantiateFunctionDefinition(Loc, CD->getFirstDecl());
// (E.2.3.1, CUDA 7.5) A constructor for a class type is considered
// empty at a point in the translation unit, if it is either a
@@ -524,7 +539,7 @@ bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) {
if (!llvm::all_of(CD->inits(), [&](const CXXCtorInitializer *CI) {
if (const CXXConstructExpr *CE =
dyn_cast<CXXConstructExpr>(CI->getInit()))
- return isEmptyCudaConstructor(Loc, CE->getConstructor());
+ return isEmptyConstructor(Loc, CE->getConstructor());
return false;
}))
return false;
@@ -532,13 +547,13 @@ bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) {
return true;
}
-bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
+bool SemaCUDA::isEmptyDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
// No destructor -> no problem.
if (!DD)
return true;
if (!DD->isDefined() && DD->isTemplateInstantiation())
- InstantiateFunctionDefinition(Loc, DD->getFirstDecl());
+ SemaRef.InstantiateFunctionDefinition(Loc, DD->getFirstDecl());
// (E.2.3.1, CUDA 7.5) A destructor for a class type is considered
// empty at a point in the translation unit, if it is either a
@@ -567,7 +582,7 @@ bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
// destructors for all base classes...
if (!llvm::all_of(ClassDecl->bases(), [&](const CXXBaseSpecifier &BS) {
if (CXXRecordDecl *RD = BS.getType()->getAsCXXRecordDecl())
- return isEmptyCudaDestructor(Loc, RD->getDestructor());
+ return isEmptyDestructor(Loc, RD->getDestructor());
return true;
}))
return false;
@@ -577,7 +592,7 @@ bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
if (CXXRecordDecl *RD = Field->getType()
->getBaseElementTypeUnsafe()
->getAsCXXRecordDecl())
- return isEmptyCudaDestructor(Loc, RD->getDestructor());
+ return isEmptyDestructor(Loc, RD->getDestructor());
return true;
}))
return false;
@@ -608,7 +623,7 @@ bool IsDependentVar(VarDecl *VD) {
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
-bool HasAllowedCUDADeviceStaticInitializer(Sema &S, VarDecl *VD,
+bool HasAllowedCUDADeviceStaticInitializer(SemaCUDA &S, VarDecl *VD,
CUDAInitializerCheckKind CheckKind) {
assert(!VD->isInvalidDecl() && VD->hasGlobalStorage());
assert(!IsDependentVar(VD) && "do not check dependent var");
@@ -617,30 +632,30 @@ bool HasAllowedCUDADeviceStaticInitializer(Sema &S, VarDecl *VD,
if (!Init)
return true;
if (const auto *CE = dyn_cast<CXXConstructExpr>(Init)) {
- return S.isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor());
+ return S.isEmptyConstructor(VD->getLocation(), CE->getConstructor());
}
return false;
};
auto IsConstantInit = [&](const Expr *Init) {
assert(Init);
- ASTContext::CUDAConstantEvalContextRAII EvalCtx(S.Context,
+ ASTContext::CUDAConstantEvalContextRAII EvalCtx(S.getASTContext(),
/*NoWronSidedVars=*/true);
- return Init->isConstantInitializer(S.Context,
+ return Init->isConstantInitializer(S.getASTContext(),
VD->getType()->isReferenceType());
};
auto HasEmptyDtor = [&](VarDecl *VD) {
if (const auto *RD = VD->getType()->getAsCXXRecordDecl())
- return S.isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor());
+ return S.isEmptyDestructor(VD->getLocation(), RD->getDestructor());
return true;
};
if (CheckKind == CICK_Shared)
return IsEmptyInit(Init) && HasEmptyDtor(VD);
- return S.LangOpts.GPUAllowDeviceInit ||
+ return S.getLangOpts().GPUAllowDeviceInit ||
((IsEmptyInit(Init) || IsConstantInit(Init)) && HasEmptyDtor(VD));
}
} // namespace
-void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
+void SemaCUDA::checkAllowedInitializer(VarDecl *VD) {
// Return early if VD is inside a non-instantiated template function since
// the implicit constructor is not defined yet.
if (const FunctionDecl *FD =
@@ -676,10 +691,11 @@ void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
InitFn = CE->getDirectCallee();
}
if (InitFn) {
- CUDAFunctionTarget InitFnTarget = IdentifyCUDATarget(InitFn);
- if (InitFnTarget != CFT_Host && InitFnTarget != CFT_HostDevice) {
+ CUDAFunctionTarget InitFnTarget = IdentifyTarget(InitFn);
+ if (InitFnTarget != CUDAFunctionTarget::Host &&
+ InitFnTarget != CUDAFunctionTarget::HostDevice) {
Diag(VD->getLocation(), diag::err_ref_bad_target_global_initializer)
- << InitFnTarget << InitFn;
+ << llvm::to_underlying(InitFnTarget) << InitFn;
Diag(InitFn->getLocation(), diag::note_previous_decl) << InitFn;
VD->setInvalidDecl();
}
@@ -687,21 +703,22 @@ void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
}
}
-void Sema::CUDARecordImplicitHostDeviceFuncUsedByDevice(
+void SemaCUDA::RecordImplicitHostDeviceFuncUsedByDevice(
const FunctionDecl *Callee) {
- FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
+ FunctionDecl *Caller = SemaRef.getCurFunctionDecl(/*AllowLambda=*/true);
if (!Caller)
return;
- if (!isCUDAImplicitHostDeviceFunction(Callee))
+ if (!isImplicitHostDeviceFunction(Callee))
return;
- CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller);
+ CUDAFunctionTarget CallerTarget = IdentifyTarget(Caller);
// Record whether an implicit host device function is used on device side.
- if (CallerTarget != CFT_Device && CallerTarget != CFT_Global &&
- (CallerTarget != CFT_HostDevice ||
- (isCUDAImplicitHostDeviceFunction(Caller) &&
+ if (CallerTarget != CUDAFunctionTarget::Device &&
+ CallerTarget != CUDAFunctionTarget::Global &&
+ (CallerTarget != CUDAFunctionTarget::HostDevice ||
+ (isImplicitHostDeviceFunction(Caller) &&
!getASTContext().CUDAImplicitHostDeviceFunUsedByDevice.count(Caller))))
return;
@@ -717,18 +734,18 @@ void Sema::CUDARecordImplicitHostDeviceFuncUsedByDevice(
// system header, in which case we leave the constexpr function unattributed.
//
// In addition, all function decls are treated as __host__ __device__ when
-// ForceCUDAHostDeviceDepth > 0 (corresponding to code within a
+// ForceHostDeviceDepth > 0 (corresponding to code within a
// #pragma clang force_cuda_host_device_begin/end
// pair).
-void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
+void SemaCUDA::maybeAddHostDeviceAttrs(FunctionDecl *NewD,
const LookupResult &Previous) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- if (ForceCUDAHostDeviceDepth > 0) {
+ if (ForceHostDeviceDepth > 0) {
if (!NewD->hasAttr<CUDAHostAttr>())
- NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ NewD->addAttr(CUDAHostAttr::CreateImplicit(getASTContext()));
if (!NewD->hasAttr<CUDADeviceAttr>())
- NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ NewD->addAttr(CUDADeviceAttr::CreateImplicit(getASTContext()));
return;
}
@@ -739,8 +756,8 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
!NewD->hasAttr<CUDAGlobalAttr>() &&
(NewD->getDescribedFunctionTemplate() ||
NewD->isFunctionTemplateSpecialization())) {
- NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
- NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ NewD->addAttr(CUDAHostAttr::CreateImplicit(getASTContext()));
+ NewD->addAttr(CUDADeviceAttr::CreateImplicit(getASTContext()));
return;
}
@@ -757,8 +774,9 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
FunctionDecl *OldD = D->getAsFunction();
return OldD && OldD->hasAttr<CUDADeviceAttr>() &&
!OldD->hasAttr<CUDAHostAttr>() &&
- !IsOverload(NewD, OldD, /* UseMemberUsingDeclRules = */ false,
- /* ConsiderCudaAttrs = */ false);
+ !SemaRef.IsOverload(NewD, OldD,
+ /* UseMemberUsingDeclRules = */ false,
+ /* ConsiderCudaAttrs = */ false);
};
auto It = llvm::find_if(Previous, IsMatchingDeviceFn);
if (It != Previous.end()) {
@@ -767,7 +785,7 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
// in a system header, in which case we simply return without making NewD
// host+device.
NamedDecl *Match = *It;
- if (!getSourceManager().isInSystemHeader(Match->getLocation())) {
+ if (!SemaRef.getSourceManager().isInSystemHeader(Match->getLocation())) {
Diag(NewD->getLocation(),
diag::err_cuda_unattributed_constexpr_cannot_overload_device)
<< NewD;
@@ -777,14 +795,14 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
return;
}
- NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
- NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ NewD->addAttr(CUDAHostAttr::CreateImplicit(getASTContext()));
+ NewD->addAttr(CUDADeviceAttr::CreateImplicit(getASTContext()));
}
// TODO: `__constant__` memory may be a limited resource for certain targets.
// A safeguard may be needed at the end of compilation pipeline if
// `__constant__` memory usage goes beyond limit.
-void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
+void SemaCUDA::MaybeAddConstantAttr(VarDecl *VD) {
// Do not promote dependent variables since the cotr/dtor/initializer are
// not determined. Do it after instantiation.
if (getLangOpts().CUDAIsDevice && !VD->hasAttr<CUDAConstantAttr>() &&
@@ -798,86 +816,90 @@ void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
}
}
-Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
- unsigned DiagID) {
+SemaBase::SemaDiagnosticBuilder SemaCUDA::DiagIfDeviceCode(SourceLocation Loc,
+ unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true);
+ FunctionDecl *CurFunContext =
+ SemaRef.getCurFunctionDecl(/*AllowLambda=*/true);
SemaDiagnosticBuilder::Kind DiagKind = [&] {
if (!CurFunContext)
return SemaDiagnosticBuilder::K_Nop;
- switch (CurrentCUDATarget()) {
- case CFT_Global:
- case CFT_Device:
+ switch (CurrentTarget()) {
+ case CUDAFunctionTarget::Global:
+ case CUDAFunctionTarget::Device:
return SemaDiagnosticBuilder::K_Immediate;
- case CFT_HostDevice:
+ case CUDAFunctionTarget::HostDevice:
// An HD function counts as host code if we're compiling for host, and
// device code if we're compiling for device. Defer any errors in device
// mode until the function is known-emitted.
if (!getLangOpts().CUDAIsDevice)
return SemaDiagnosticBuilder::K_Nop;
- if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
+ if (SemaRef.IsLastErrorImmediate &&
+ getDiagnostics().getDiagnosticIDs()->isBuiltinNote(DiagID))
return SemaDiagnosticBuilder::K_Immediate;
- return (getEmissionStatus(CurFunContext) ==
- FunctionEmissionStatus::Emitted)
+ return (SemaRef.getEmissionStatus(CurFunContext) ==
+ Sema::FunctionEmissionStatus::Emitted)
? SemaDiagnosticBuilder::K_ImmediateWithCallStack
: SemaDiagnosticBuilder::K_Deferred;
default:
return SemaDiagnosticBuilder::K_Nop;
}
}();
- return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this);
+ return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, SemaRef);
}
-Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
+Sema::SemaDiagnosticBuilder SemaCUDA::DiagIfHostCode(SourceLocation Loc,
unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true);
+ FunctionDecl *CurFunContext =
+ SemaRef.getCurFunctionDecl(/*AllowLambda=*/true);
SemaDiagnosticBuilder::Kind DiagKind = [&] {
if (!CurFunContext)
return SemaDiagnosticBuilder::K_Nop;
- switch (CurrentCUDATarget()) {
- case CFT_Host:
+ switch (CurrentTarget()) {
+ case CUDAFunctionTarget::Host:
return SemaDiagnosticBuilder::K_Immediate;
- case CFT_HostDevice:
+ case CUDAFunctionTarget::HostDevice:
// An HD function counts as host code if we're compiling for host, and
// device code if we're compiling for device. Defer any errors in device
// mode until the function is known-emitted.
if (getLangOpts().CUDAIsDevice)
return SemaDiagnosticBuilder::K_Nop;
- if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
+ if (SemaRef.IsLastErrorImmediate &&
+ getDiagnostics().getDiagnosticIDs()->isBuiltinNote(DiagID))
return SemaDiagnosticBuilder::K_Immediate;
- return (getEmissionStatus(CurFunContext) ==
- FunctionEmissionStatus::Emitted)
+ return (SemaRef.getEmissionStatus(CurFunContext) ==
+ Sema::FunctionEmissionStatus::Emitted)
? SemaDiagnosticBuilder::K_ImmediateWithCallStack
: SemaDiagnosticBuilder::K_Deferred;
default:
return SemaDiagnosticBuilder::K_Nop;
}
}();
- return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this);
+ return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, SemaRef);
}
-bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
+bool SemaCUDA::CheckCall(SourceLocation Loc, FunctionDecl *Callee) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
assert(Callee && "Callee may not be null.");
- const auto &ExprEvalCtx = currentEvaluationContext();
+ const auto &ExprEvalCtx = SemaRef.currentEvaluationContext();
if (ExprEvalCtx.isUnevaluated() || ExprEvalCtx.isConstantEvaluated())
return true;
// FIXME: Is bailing out early correct here? Should we instead assume that
// the caller is a global initializer?
- FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
+ FunctionDecl *Caller = SemaRef.getCurFunctionDecl(/*AllowLambda=*/true);
if (!Caller)
return true;
// If the caller is known-emitted, mark the callee as known-emitted.
// Otherwise, mark the call in our call graph so we can traverse it later.
- bool CallerKnownEmitted =
- getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted;
+ bool CallerKnownEmitted = SemaRef.getEmissionStatus(Caller) ==
+ Sema::FunctionEmissionStatus::Emitted;
SemaDiagnosticBuilder::Kind DiagKind = [this, Caller, Callee,
CallerKnownEmitted] {
- switch (IdentifyCUDAPreference(Caller, Callee)) {
+ switch (IdentifyPreference(Caller, Callee)) {
case CFP_Never:
case CFP_WrongSide:
assert(Caller && "Never/wrongSide calls require a non-null caller");
@@ -894,8 +916,11 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
if (DiagKind == SemaDiagnosticBuilder::K_Nop) {
// For -fgpu-rdc, keep track of external kernels used by host functions.
- if (LangOpts.CUDAIsDevice && LangOpts.GPURelocatableDeviceCode &&
- Callee->hasAttr<CUDAGlobalAttr>() && !Callee->isDefined())
+ if (getLangOpts().CUDAIsDevice && getLangOpts().GPURelocatableDeviceCode &&
+ Callee->hasAttr<CUDAGlobalAttr>() && !Callee->isDefined() &&
+ (!Caller || (!Caller->getDescribedFunctionTemplate() &&
+ getASTContext().GetGVALinkageForFunction(Caller) ==
+ GVA_StrongExternal)))
getASTContext().CUDAExternalDeviceDeclODRUsedByHost.insert(Callee);
return true;
}
@@ -907,12 +932,13 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
if (!LocsWithCUDACallDiags.insert({Caller, Loc}).second)
return true;
- SemaDiagnosticBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this)
- << IdentifyCUDATarget(Callee) << /*function*/ 0 << Callee
- << IdentifyCUDATarget(Caller);
+ SemaDiagnosticBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller,
+ SemaRef)
+ << llvm::to_underlying(IdentifyTarget(Callee)) << /*function*/ 0 << Callee
+ << llvm::to_underlying(IdentifyTarget(Caller));
if (!Callee->getBuiltinID())
SemaDiagnosticBuilder(DiagKind, Callee->getLocation(),
- diag::note_previous_decl, Caller, *this)
+ diag::note_previous_decl, Caller, SemaRef)
<< Callee;
return DiagKind != SemaDiagnosticBuilder::K_Immediate &&
DiagKind != SemaDiagnosticBuilder::K_ImmediateWithCallStack;
@@ -923,7 +949,7 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
// defined and uses the capture by reference when the lambda is called. When
// the capture and use happen on different sides, the capture is invalid and
// should be diagnosed.
-void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
+void SemaCUDA::CheckLambdaCapture(CXXMethodDecl *Callee,
const sema::Capture &Capture) {
// In host compilation we only need to check lambda functions emitted on host
// side. In such lambda functions, a reference capture is invalid only
@@ -933,12 +959,12 @@ void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
// kernel cannot pass a lambda back to a host function since we cannot
// define a kernel argument type which can hold the lambda before the lambda
// itself is defined.
- if (!LangOpts.CUDAIsDevice)
+ if (!getLangOpts().CUDAIsDevice)
return;
// File-scope lambda can only do init captures for global variables, which
// results in passing by value for these global variables.
- FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
+ FunctionDecl *Caller = SemaRef.getCurFunctionDecl(/*AllowLambda=*/true);
if (!Caller)
return;
@@ -955,7 +981,7 @@ void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
auto DiagKind = SemaDiagnosticBuilder::K_Deferred;
if (Capture.isVariableCapture() && !getLangOpts().HIPStdPar) {
SemaDiagnosticBuilder(DiagKind, Capture.getLocation(),
- diag::err_capture_bad_target, Callee, *this)
+ diag::err_capture_bad_target, Callee, SemaRef)
<< Capture.getVariable();
} else if (Capture.isThisCapture()) {
// Capture of this pointer is allowed since this pointer may be pointing to
@@ -964,50 +990,61 @@ void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
// accessible on device side.
SemaDiagnosticBuilder(DiagKind, Capture.getLocation(),
diag::warn_maybe_capture_bad_target_this_ptr, Callee,
- *this);
+ SemaRef);
}
}
-void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {
+void SemaCUDA::SetLambdaAttrs(CXXMethodDecl *Method) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
if (Method->hasAttr<CUDAHostAttr>() || Method->hasAttr<CUDADeviceAttr>())
return;
- Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
- Method->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ Method->addAttr(CUDADeviceAttr::CreateImplicit(getASTContext()));
+ Method->addAttr(CUDAHostAttr::CreateImplicit(getASTContext()));
}
-void Sema::checkCUDATargetOverload(FunctionDecl *NewFD,
+void SemaCUDA::checkTargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
- CUDAFunctionTarget NewTarget = IdentifyCUDATarget(NewFD);
+ CUDAFunctionTarget NewTarget = IdentifyTarget(NewFD);
for (NamedDecl *OldND : Previous) {
FunctionDecl *OldFD = OldND->getAsFunction();
if (!OldFD)
continue;
- CUDAFunctionTarget OldTarget = IdentifyCUDATarget(OldFD);
+ CUDAFunctionTarget OldTarget = IdentifyTarget(OldFD);
// Don't allow HD and global functions to overload other functions with the
// same signature. We allow overloading based on CUDA attributes so that
// functions can have different implementations on the host and device, but
// HD/global functions "exist" in some sense on both the host and device, so
// should have the same implementation on both sides.
if (NewTarget != OldTarget &&
- ((NewTarget == CFT_HostDevice &&
- !(LangOpts.OffloadImplicitHostDeviceTemplates &&
- isCUDAImplicitHostDeviceFunction(NewFD) &&
- OldTarget == CFT_Device)) ||
- (OldTarget == CFT_HostDevice &&
- !(LangOpts.OffloadImplicitHostDeviceTemplates &&
- isCUDAImplicitHostDeviceFunction(OldFD) &&
- NewTarget == CFT_Device)) ||
- (NewTarget == CFT_Global) || (OldTarget == CFT_Global)) &&
- !IsOverload(NewFD, OldFD, /* UseMemberUsingDeclRules = */ false,
- /* ConsiderCudaAttrs = */ false)) {
- Diag(NewFD->getLocation(), diag::err_cuda_ovl_target)
- << NewTarget << NewFD->getDeclName() << OldTarget << OldFD;
- Diag(OldFD->getLocation(), diag::note_previous_declaration);
- NewFD->setInvalidDecl();
- break;
+ !SemaRef.IsOverload(NewFD, OldFD, /* UseMemberUsingDeclRules = */ false,
+ /* ConsiderCudaAttrs = */ false)) {
+ if ((NewTarget == CUDAFunctionTarget::HostDevice &&
+ !(getLangOpts().OffloadImplicitHostDeviceTemplates &&
+ isImplicitHostDeviceFunction(NewFD) &&
+ OldTarget == CUDAFunctionTarget::Device)) ||
+ (OldTarget == CUDAFunctionTarget::HostDevice &&
+ !(getLangOpts().OffloadImplicitHostDeviceTemplates &&
+ isImplicitHostDeviceFunction(OldFD) &&
+ NewTarget == CUDAFunctionTarget::Device)) ||
+ (NewTarget == CUDAFunctionTarget::Global) ||
+ (OldTarget == CUDAFunctionTarget::Global)) {
+ Diag(NewFD->getLocation(), diag::err_cuda_ovl_target)
+ << llvm::to_underlying(NewTarget) << NewFD->getDeclName()
+ << llvm::to_underlying(OldTarget) << OldFD;
+ Diag(OldFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ break;
+ }
+ if ((NewTarget == CUDAFunctionTarget::Host &&
+ OldTarget == CUDAFunctionTarget::Device) ||
+ (NewTarget == CUDAFunctionTarget::Device &&
+ OldTarget == CUDAFunctionTarget::Host)) {
+ Diag(NewFD->getLocation(), diag::warn_offload_incompatible_redeclare)
+ << llvm::to_underlying(NewTarget) << llvm::to_underlying(OldTarget);
+ Diag(OldFD->getLocation(), diag::note_previous_declaration);
+ }
}
}
}
@@ -1022,21 +1059,21 @@ static void copyAttrIfPresent(Sema &S, FunctionDecl *FD,
}
}
-void Sema::inheritCUDATargetAttrs(FunctionDecl *FD,
+void SemaCUDA::inheritTargetAttrs(FunctionDecl *FD,
const FunctionTemplateDecl &TD) {
const FunctionDecl &TemplateFD = *TD.getTemplatedDecl();
- copyAttrIfPresent<CUDAGlobalAttr>(*this, FD, TemplateFD);
- copyAttrIfPresent<CUDAHostAttr>(*this, FD, TemplateFD);
- copyAttrIfPresent<CUDADeviceAttr>(*this, FD, TemplateFD);
+ copyAttrIfPresent<CUDAGlobalAttr>(SemaRef, FD, TemplateFD);
+ copyAttrIfPresent<CUDAHostAttr>(SemaRef, FD, TemplateFD);
+ copyAttrIfPresent<CUDADeviceAttr>(SemaRef, FD, TemplateFD);
}
-std::string Sema::getCudaConfigureFuncName() const {
+std::string SemaCUDA::getConfigureFuncName() const {
if (getLangOpts().HIP)
return getLangOpts().HIPUseNewLaunchAPI ? "__hipPushCallConfiguration"
: "hipConfigureCall";
// New CUDA kernel launch sequence.
- if (CudaFeatureEnabled(Context.getTargetInfo().getSDKVersion(),
+ if (CudaFeatureEnabled(getASTContext().getTargetInfo().getSDKVersion(),
CudaFeature::CUDA_USES_NEW_LAUNCH))
return "__cudaPushCallConfiguration";
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
index 44a40215b90d..5b2d65247e72 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -43,13 +43,6 @@ static CXXRecordDecl *getCurrentInstantiationOf(QualType T,
return nullptr;
}
-/// Compute the DeclContext that is associated with the given type.
-///
-/// \param T the type for which we are attempting to find a DeclContext.
-///
-/// \returns the declaration context represented by the type T,
-/// or NULL if the declaration context cannot be computed (e.g., because it is
-/// dependent and not the current instantiation).
DeclContext *Sema::computeDeclContext(QualType T) {
if (!T->isDependentType())
if (const TagType *Tag = T->getAs<TagType>())
@@ -58,19 +51,6 @@ DeclContext *Sema::computeDeclContext(QualType T) {
return ::getCurrentInstantiationOf(T, CurContext);
}
-/// Compute the DeclContext that is associated with the given
-/// scope specifier.
-///
-/// \param SS the C++ scope specifier as it appears in the source
-///
-/// \param EnteringContext when true, we will be entering the context of
-/// this scope specifier, so we can retrieve the declaration context of a
-/// class template or class template partial specialization even if it is
-/// not the current instantiation.
-///
-/// \returns the declaration context represented by the scope specifier @p SS,
-/// or NULL if the declaration context cannot be computed (e.g., because it is
-/// dependent and not the current instantiation).
DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext) {
if (!SS.isSet() || SS.isInvalid())
@@ -190,11 +170,6 @@ bool Sema::isDependentScopeSpecifier(const CXXScopeSpec &SS) {
return SS.getScopeRep()->isDependent();
}
-/// If the given nested name specifier refers to the current
-/// instantiation, return the declaration that corresponds to that
-/// current instantiation (C++0x [temp.dep.type]p1).
-///
-/// \param NNS a dependent nested name specifier.
CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier *NNS) {
assert(getLangOpts().CPlusPlus && "Only callable in C++");
assert(NNS->isDependent() && "Only dependent nested-name-specifier allowed");
@@ -341,11 +316,6 @@ bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
return false;
}
-/// Determines whether the given declaration is an valid acceptable
-/// result for name lookup of a nested-name-specifier.
-/// \param SD Declaration checked for nested-name-specifier.
-/// \param IsExtension If not null and the declaration is accepted as an
-/// extension, the pointed variable is assigned true.
bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *IsExtension) {
if (!SD)
@@ -386,10 +356,6 @@ bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD,
return false;
}
-/// If the given nested-name-specifier begins with a bare identifier
-/// (e.g., Base::), perform name lookup for that identifier as a
-/// nested-name-specifier within the given scope, and return the result of that
-/// name lookup.
NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS) {
if (!S || !NNS)
return nullptr;
@@ -439,37 +405,6 @@ public:
}
-/// Build a new nested-name-specifier for "identifier::", as described
-/// by ActOnCXXNestedNameSpecifier.
-///
-/// \param S Scope in which the nested-name-specifier occurs.
-/// \param IdInfo Parser information about an identifier in the
-/// nested-name-spec.
-/// \param EnteringContext If true, enter the context specified by the
-/// nested-name-specifier.
-/// \param SS Optional nested name specifier preceding the identifier.
-/// \param ScopeLookupResult Provides the result of name lookup within the
-/// scope of the nested-name-specifier that was computed at template
-/// definition time.
-/// \param ErrorRecoveryLookup Specifies if the method is called to improve
-/// error recovery and what kind of recovery is performed.
-/// \param IsCorrectedToColon If not null, suggestion of replace '::' -> ':'
-/// are allowed. The bool value pointed by this parameter is set to
-/// 'true' if the identifier is treated as if it was followed by ':',
-/// not '::'.
-/// \param OnlyNamespace If true, only considers namespaces in lookup.
-///
-/// This routine differs only slightly from ActOnCXXNestedNameSpecifier, in
-/// that it contains an extra parameter \p ScopeLookupResult, which provides
-/// the result of name lookup within the scope of the nested-name-specifier
-/// that was computed at template definition time.
-///
-/// If ErrorRecoveryLookup is true, then this call is used to improve error
-/// recovery. This means that it should not emit diagnostics, it should
-/// just return true on failure. It also means it should only return a valid
-/// scope if it *knows* that the result is correct. It should not return in a
-/// dependent context, for example. Nor will it extend \p SS with the scope
-/// specifier.
bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
bool EnteringContext, CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
@@ -796,6 +731,14 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
Diag(IdInfo.IdentifierLoc,
diag::ext_undeclared_unqual_id_with_dependent_base)
<< IdInfo.Identifier << ContainingClass;
+ // Fake up a nested-name-specifier that starts with the
+ // injected-class-name of the enclosing class.
+ QualType T = Context.getTypeDeclType(ContainingClass);
+ TypeLocBuilder TLB;
+ TLB.pushTrivial(Context, T, IdInfo.IdentifierLoc);
+ SS.Extend(Context, /*TemplateKWLoc=*/SourceLocation(),
+ TLB.getTypeLocInContext(Context, T), IdInfo.IdentifierLoc);
+ // Add the identifier to form a dependent name.
SS.Extend(Context, IdInfo.Identifier, IdInfo.IdentifierLoc,
IdInfo.CCLoc);
return false;
@@ -867,12 +810,29 @@ bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
return false;
}
-/// IsInvalidUnlessNestedName - This method is used for error recovery
-/// purposes to determine whether the specified identifier is only valid as
-/// a nested name specifier, for example a namespace name. It is
-/// conservatively correct to always return false from this method.
-///
-/// The arguments are the same as those passed to ActOnCXXNestedNameSpecifier.
+bool Sema::ActOnCXXNestedNameSpecifierIndexedPack(CXXScopeSpec &SS,
+ const DeclSpec &DS,
+ SourceLocation ColonColonLoc,
+ QualType Type) {
+ if (SS.isInvalid() || DS.getTypeSpecType() == DeclSpec::TST_error)
+ return true;
+
+ assert(DS.getTypeSpecType() == DeclSpec::TST_typename_pack_indexing);
+
+ if (Type.isNull())
+ return true;
+
+ TypeLocBuilder TLB;
+ TLB.pushTrivial(getASTContext(),
+ cast<PackIndexingType>(Type.getTypePtr())->getPattern(),
+ DS.getBeginLoc());
+ PackIndexingTypeLoc PIT = TLB.push<PackIndexingTypeLoc>(Type);
+ PIT.setEllipsisLoc(DS.getEllipsisLoc());
+ SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, Type),
+ ColonColonLoc);
+ return false;
+}
+
bool Sema::IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext) {
@@ -943,7 +903,7 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
R.setBegin(SS.getRange().getBegin());
Diag(CCLoc, diag::err_non_type_template_in_nested_name_specifier)
- << (TD && isa<VarTemplateDecl>(TD)) << Template << R;
+ << isa_and_nonnull<VarTemplateDecl>(TD) << Template << R;
NoteAllFoundTemplates(Template);
return true;
}
@@ -1057,12 +1017,6 @@ bool Sema::ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
-/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
-/// scope or nested-name-specifier) is parsed, part of a declarator-id.
-/// After this method is called, according to [C++ 3.4.3p3], names should be
-/// looked up in the declarator-id's scope, until the declarator is parsed and
-/// ActOnCXXExitDeclaratorScope is called.
-/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool Sema::ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS) {
assert(SS.isSet() && "Parser passed invalid CXXScopeSpec.");
@@ -1085,11 +1039,6 @@ bool Sema::ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS) {
return false;
}
-/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
-/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
-/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
-/// Used to indicate that names should revert to being looked up in the
-/// defining scope.
void Sema::ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
assert(SS.isSet() && "Parser passed invalid CXXScopeSpec.");
if (SS.isInvalid())
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
index 9d85568d97b2..eca8363ee960 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
@@ -24,6 +24,8 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaRISCV.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include <set>
@@ -155,12 +157,12 @@ namespace {
Self.CheckCastAlign(SrcExpr.get(), DestType, OpRange);
}
- void checkObjCConversion(Sema::CheckedConversionKind CCK) {
+ void checkObjCConversion(CheckedConversionKind CCK) {
assert(Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers());
Expr *src = SrcExpr.get();
- if (Self.CheckObjCConversion(OpRange, DestType, src, CCK) ==
- Sema::ACR_unbridged)
+ if (Self.ObjC().CheckObjCConversion(OpRange, DestType, src, CCK) ==
+ SemaObjC::ACR_unbridged)
IsARCUnbridgedCast = true;
SrcExpr = src;
}
@@ -248,18 +250,14 @@ static TryCastResult TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExp
CastKind &Kind,
CXXCastPath &BasePath);
-static TryCastResult TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr,
- QualType DestType,
- Sema::CheckedConversionKind CCK,
- SourceRange OpRange,
- unsigned &msg, CastKind &Kind,
- bool ListInitialization);
+static TryCastResult
+TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
+ CheckedConversionKind CCK, SourceRange OpRange,
+ unsigned &msg, CastKind &Kind, bool ListInitialization);
static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
- QualType DestType,
- Sema::CheckedConversionKind CCK,
- SourceRange OpRange,
- unsigned &msg, CastKind &Kind,
- CXXCastPath &BasePath,
+ QualType DestType, CheckedConversionKind CCK,
+ SourceRange OpRange, unsigned &msg,
+ CastKind &Kind, CXXCastPath &BasePath,
bool ListInitialization);
static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
@@ -272,8 +270,6 @@ static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
unsigned &msg, CastKind &Kind);
-/// ActOnCXXNamedCast - Parse
-/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult
Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
SourceLocation LAngleBracketLoc, Declarator &D,
@@ -498,10 +494,22 @@ static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
howManyCandidates = OCD_AmbiguousCandidates;
break;
- case OR_Deleted:
- msg = diag::err_ovl_deleted_conversion_in_cast;
- howManyCandidates = OCD_ViableCandidates;
- break;
+ case OR_Deleted: {
+ OverloadCandidateSet::iterator Best;
+ [[maybe_unused]] OverloadingResult Res =
+ candidates.BestViableFunction(S, range.getBegin(), Best);
+ assert(Res == OR_Deleted && "Inconsistent overload resolution");
+
+ StringLiteral *Msg = Best->Function->getDeletedMessage();
+ candidates.NoteCandidates(
+ PartialDiagnosticAt(range.getBegin(),
+ S.PDiag(diag::err_ovl_deleted_conversion_in_cast)
+ << CT << srcType << destType << (Msg != nullptr)
+ << (Msg ? Msg->getString() : StringRef())
+ << range << src->getSourceRange()),
+ S, OCD_ViableCandidates, src);
+ return true;
+ }
}
candidates.NoteCandidates(
@@ -1083,9 +1091,10 @@ static bool argTypeIsABIEquivalent(QualType SrcType, QualType DestType,
return true;
// Allow integral type mismatch if their size are equal.
- if (SrcType->isIntegralType(Context) && DestType->isIntegralType(Context))
- if (Context.getTypeInfoInChars(SrcType).Width ==
- Context.getTypeInfoInChars(DestType).Width)
+ if ((SrcType->isIntegralType(Context) || SrcType->isEnumeralType()) &&
+ (DestType->isIntegralType(Context) || DestType->isEnumeralType()))
+ if (Context.getTypeSizeInChars(SrcType) ==
+ Context.getTypeSizeInChars(DestType))
return true;
return Context.hasSameUnqualifiedType(SrcType, DestType);
@@ -1211,7 +1220,7 @@ void CastOperation::CheckReinterpretCast() {
if (isValidCast(tcr)) {
if (Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers())
- checkObjCConversion(Sema::CCK_OtherCast);
+ checkObjCConversion(CheckedConversionKind::OtherCast);
DiagnoseReinterpretUpDownCast(Self, SrcExpr.get(), DestType, OpRange);
if (unsigned DiagID = checkCastFunctionType(Self, SrcExpr, DestType))
@@ -1262,9 +1271,9 @@ void CastOperation::CheckStaticCast() {
}
unsigned msg = diag::err_bad_cxx_cast_generic;
- TryCastResult tcr
- = TryStaticCast(Self, SrcExpr, DestType, Sema::CCK_OtherCast, OpRange, msg,
- Kind, BasePath, /*ListInitialization=*/false);
+ TryCastResult tcr =
+ TryStaticCast(Self, SrcExpr, DestType, CheckedConversionKind::OtherCast,
+ OpRange, msg, Kind, BasePath, /*ListInitialization=*/false);
if (tcr != TC_Success && msg != 0) {
if (SrcExpr.isInvalid())
return;
@@ -1284,7 +1293,7 @@ void CastOperation::CheckStaticCast() {
if (Kind == CK_BitCast)
checkCastAlign();
if (Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers())
- checkObjCConversion(Sema::CCK_OtherCast);
+ checkObjCConversion(CheckedConversionKind::OtherCast);
} else {
SrcExpr = ExprError();
}
@@ -1305,14 +1314,13 @@ static bool IsAddressSpaceConversion(QualType SrcType, QualType DestType) {
/// possible. If @p CStyle, ignore access restrictions on hierarchy casting
/// and casting away constness.
static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
- QualType DestType,
- Sema::CheckedConversionKind CCK,
+ QualType DestType, CheckedConversionKind CCK,
SourceRange OpRange, unsigned &msg,
CastKind &Kind, CXXCastPath &BasePath,
bool ListInitialization) {
// Determine whether we have the semantics of a C-style cast.
- bool CStyle
- = (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
+ bool CStyle = (CCK == CheckedConversionKind::CStyleCast ||
+ CCK == CheckedConversionKind::FunctionalCast);
// The order the tests is not entirely arbitrary. There is one conversion
// that can be handled in two different ways. Given:
@@ -1492,7 +1500,7 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
// Allow ns-pointer to cf-pointer conversion in either direction
// with static casts.
if (!CStyle &&
- Self.CheckTollFreeBridgeStaticCast(DestType, SrcExpr.get(), Kind))
+ Self.ObjC().CheckTollFreeBridgeStaticCast(DestType, SrcExpr.get(), Kind))
return TC_Success;
// See if it looks like the user is trying to convert between
@@ -1872,11 +1880,11 @@ TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr, QualType SrcType,
///
/// An expression e can be explicitly converted to a type T using a
/// @c static_cast if the declaration "T t(e);" is well-formed [...].
-TryCastResult
-TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
- Sema::CheckedConversionKind CCK,
- SourceRange OpRange, unsigned &msg,
- CastKind &Kind, bool ListInitialization) {
+TryCastResult TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType,
+ CheckedConversionKind CCK,
+ SourceRange OpRange, unsigned &msg,
+ CastKind &Kind, bool ListInitialization) {
if (DestType->isRecordType()) {
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
diag::err_bad_cast_incomplete) ||
@@ -1888,13 +1896,14 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
}
InitializedEntity Entity = InitializedEntity::InitializeTemporary(DestType);
- InitializationKind InitKind
- = (CCK == Sema::CCK_CStyleCast)
- ? InitializationKind::CreateCStyleCast(OpRange.getBegin(), OpRange,
- ListInitialization)
- : (CCK == Sema::CCK_FunctionalCast)
- ? InitializationKind::CreateFunctionalCast(OpRange, ListInitialization)
- : InitializationKind::CreateCast(OpRange);
+ InitializationKind InitKind =
+ (CCK == CheckedConversionKind::CStyleCast)
+ ? InitializationKind::CreateCStyleCast(OpRange.getBegin(), OpRange,
+ ListInitialization)
+ : (CCK == CheckedConversionKind::FunctionalCast)
+ ? InitializationKind::CreateFunctionalCast(OpRange,
+ ListInitialization)
+ : InitializationKind::CreateCast(OpRange);
Expr *SrcExprRaw = SrcExpr.get();
// FIXME: Per DR242, we should check for an implicit conversion sequence
// or for a constructor that could be invoked by direct-initialization
@@ -1906,8 +1915,8 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
// There is no other way that works.
// On the other hand, if we're checking a C-style cast, we've still got
// the reinterpret_cast way.
- bool CStyle
- = (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
+ bool CStyle = (CCK == CheckedConversionKind::CStyleCast ||
+ CCK == CheckedConversionKind::FunctionalCast);
if (InitSeq.Failed() && (CStyle || !DestType->isReferenceType()))
return TC_NotApplicable;
@@ -2382,7 +2391,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
}
// Allow bitcasting between SVE VLATs and VLSTs, and vice-versa.
- if (Self.isValidRVVBitcast(SrcType, DestType)) {
+ if (Self.RISCV().isValidRVVBitcast(SrcType, DestType)) {
Kind = CK_BitCast;
return TC_Success;
}
@@ -2516,7 +2525,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
} else if (IsLValueCast) {
Kind = CK_LValueBitCast;
} else if (DestType->isObjCObjectPointerType()) {
- Kind = Self.PrepareCastToObjCObjectPointer(SrcExpr);
+ Kind = Self.ObjC().PrepareCastToObjCObjectPointer(SrcExpr);
} else if (DestType->isBlockPointerType()) {
if (!SrcType->isBlockPointerType()) {
Kind = CK_AnyPointerToBlockPointerCast;
@@ -2802,8 +2811,9 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
if (isValidCast(tcr))
Kind = CK_NoOp;
- Sema::CheckedConversionKind CCK =
- FunctionalStyle ? Sema::CCK_FunctionalCast : Sema::CCK_CStyleCast;
+ CheckedConversionKind CCK = FunctionalStyle
+ ? CheckedConversionKind::FunctionalCast
+ : CheckedConversionKind::CStyleCast;
if (tcr == TC_NotApplicable) {
tcr = TryAddressSpaceCast(Self, SrcExpr, DestType, /*CStyle*/ true, msg,
Kind);
@@ -2992,7 +3002,7 @@ void CastOperation::CheckCStyleCast() {
// Allow bitcasting between compatible RVV vector types.
if ((SrcType->isVectorType() || DestType->isVectorType()) &&
- Self.isValidRVVBitcast(SrcType, DestType)) {
+ Self.RISCV().isValidRVVBitcast(SrcType, DestType)) {
Kind = CK_BitCast;
return;
}
@@ -3189,7 +3199,7 @@ void CastOperation::CheckCStyleCast() {
// ARC imposes extra restrictions on casts.
if (Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers()) {
- checkObjCConversion(Sema::CCK_CStyleCast);
+ checkObjCConversion(CheckedConversionKind::CStyleCast);
if (SrcExpr.isInvalid())
return;
@@ -3208,8 +3218,8 @@ void CastOperation::CheckCStyleCast() {
return;
}
}
- }
- else if (!Self.CheckObjCARCUnavailableWeakConversion(DestType, SrcType)) {
+ } else if (!Self.ObjC().CheckObjCARCUnavailableWeakConversion(DestType,
+ SrcType)) {
Self.Diag(SrcExpr.get()->getBeginLoc(),
diag::err_arc_convesion_of_weak_unavailable)
<< 1 << SrcType << DestType << SrcExpr.get()->getSourceRange();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
index 09b7e1c62fbd..9088b5e285bf 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
@@ -27,6 +27,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/FormatString.h"
+#include "clang/AST/IgnoreExpr.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/OperationKinds.h"
@@ -60,7 +61,22 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaAMDGPU.h"
+#include "clang/Sema/SemaARM.h"
+#include "clang/Sema/SemaBPF.h"
+#include "clang/Sema/SemaHLSL.h"
+#include "clang/Sema/SemaHexagon.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaLoongArch.h"
+#include "clang/Sema/SemaMIPS.h"
+#include "clang/Sema/SemaNVPTX.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenCL.h"
+#include "clang/Sema/SemaPPC.h"
+#include "clang/Sema/SemaRISCV.h"
+#include "clang/Sema/SemaSystemZ.h"
+#include "clang/Sema/SemaWasm.h"
+#include "clang/Sema/SemaX86.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
@@ -115,50 +131,37 @@ static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A,
return (A << 8) | B;
}
-/// Checks that a call expression's argument count is at least the desired
-/// number. This is useful when doing custom type-checking on a variadic
-/// function. Returns true on error.
-static bool checkArgCountAtLeast(Sema &S, CallExpr *Call,
- unsigned MinArgCount) {
+bool Sema::checkArgCountAtLeast(CallExpr *Call, unsigned MinArgCount) {
unsigned ArgCount = Call->getNumArgs();
if (ArgCount >= MinArgCount)
return false;
- return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
+ return Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
<< 0 /*function call*/ << MinArgCount << ArgCount
<< /*is non object*/ 0 << Call->getSourceRange();
}
-/// Checks that a call expression's argument count is at most the desired
-/// number. This is useful when doing custom type-checking on a variadic
-/// function. Returns true on error.
-static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) {
+bool Sema::checkArgCountAtMost(CallExpr *Call, unsigned MaxArgCount) {
unsigned ArgCount = Call->getNumArgs();
if (ArgCount <= MaxArgCount)
return false;
- return S.Diag(Call->getEndLoc(),
- diag::err_typecheck_call_too_many_args_at_most)
+ return Diag(Call->getEndLoc(), diag::err_typecheck_call_too_many_args_at_most)
<< 0 /*function call*/ << MaxArgCount << ArgCount
<< /*is non object*/ 0 << Call->getSourceRange();
}
-/// Checks that a call expression's argument count is in the desired range. This
-/// is useful when doing custom type-checking on a variadic function. Returns
-/// true on error.
-static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount,
- unsigned MaxArgCount) {
- return checkArgCountAtLeast(S, Call, MinArgCount) ||
- checkArgCountAtMost(S, Call, MaxArgCount);
+bool Sema::checkArgCountRange(CallExpr *Call, unsigned MinArgCount,
+ unsigned MaxArgCount) {
+ return checkArgCountAtLeast(Call, MinArgCount) ||
+ checkArgCountAtMost(Call, MaxArgCount);
}
-/// Checks that a call expression's argument count is the desired number.
-/// This is useful when doing custom type-checking. Returns true on error.
-static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
+bool Sema::checkArgCount(CallExpr *Call, unsigned DesiredArgCount) {
unsigned ArgCount = Call->getNumArgs();
if (ArgCount == DesiredArgCount)
return false;
- if (checkArgCountAtLeast(S, Call, DesiredArgCount))
+ if (checkArgCountAtLeast(Call, DesiredArgCount))
return true;
assert(ArgCount > DesiredArgCount && "should have diagnosed this");
@@ -166,11 +169,38 @@ static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(),
Call->getArg(ArgCount - 1)->getEndLoc());
- return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
+ return Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
<< 0 /*function call*/ << DesiredArgCount << ArgCount
<< /*is non object*/ 0 << Call->getArg(1)->getSourceRange();
}
+static bool checkBuiltinVerboseTrap(CallExpr *Call, Sema &S) {
+ bool HasError = false;
+
+ for (unsigned I = 0; I < Call->getNumArgs(); ++I) {
+ Expr *Arg = Call->getArg(I);
+
+ if (Arg->isValueDependent())
+ continue;
+
+ std::optional<std::string> ArgString = Arg->tryEvaluateString(S.Context);
+ int DiagMsgKind = -1;
+ // Arguments must be pointers to constant strings and cannot use '$'.
+ if (!ArgString.has_value())
+ DiagMsgKind = 0;
+ else if (ArgString->find('$') != std::string::npos)
+ DiagMsgKind = 1;
+
+ if (DiagMsgKind >= 0) {
+ S.Diag(Arg->getBeginLoc(), diag::err_builtin_verbose_trap_arg)
+ << DiagMsgKind << Arg->getSourceRange();
+ HasError = true;
+ }
+ }
+
+ return !HasError;
+}
+
static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) {
if (Value->isTypeDependent())
return false;
@@ -187,8 +217,8 @@ static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) {
/// Check that the first argument to __builtin_annotation is an integer
/// and the second argument is a non-wide string literal.
-static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 2))
+static bool BuiltinAnnotation(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCount(TheCall, 2))
return true;
// First argument should be an integer.
@@ -213,7 +243,7 @@ static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
return false;
}
-static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
+static bool BuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
// We need at least one argument.
if (TheCall->getNumArgs() < 1) {
S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
@@ -237,8 +267,8 @@ static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
/// Check that the argument to __builtin_addressof is a glvalue, and set the
/// result type to the corresponding pointer type.
-static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+static bool BuiltinAddressof(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCount(TheCall, 1))
return true;
ExprResult Arg(TheCall->getArg(0));
@@ -252,8 +282,8 @@ static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
}
/// Check that the argument to __builtin_function_start is a function.
-static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+static bool BuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCount(TheCall, 1))
return true;
ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
@@ -276,8 +306,8 @@ static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
/// Check the number of arguments and set the result type to
/// the argument type.
-static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+static bool BuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCount(TheCall, 1))
return true;
TheCall->setType(TheCall->getArg(0)->getType());
@@ -287,8 +317,8 @@ static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
/// Check that the value argument for __builtin_is_aligned(value, alignment) and
/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
/// type (but not a function pointer) and that the alignment is a power-of-two.
-static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
- if (checkArgCount(S, TheCall, 2))
+static bool BuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
+ if (S.checkArgCount(TheCall, 2))
return true;
clang::Expr *Source = TheCall->getArg(0);
@@ -365,9 +395,8 @@ static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
return false;
}
-static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
- unsigned BuiltinID) {
- if (checkArgCount(S, TheCall, 3))
+static bool BuiltinOverflow(Sema &S, CallExpr *TheCall, unsigned BuiltinID) {
+ if (S.checkArgCount(TheCall, 3))
return true;
std::pair<unsigned, const char *> Builtins[] = {
@@ -622,7 +651,7 @@ struct BuiltinDumpStructGenerator {
for (auto *D : RD->decls()) {
auto *IFD = dyn_cast<IndirectFieldDecl>(D);
auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D);
- if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion())
+ if (!FD || FD->isUnnamedBitField() || FD->isAnonymousStructOrUnion())
continue;
llvm::SmallString<20> Format = llvm::StringRef("%s%s %s ");
@@ -694,8 +723,8 @@ struct BuiltinDumpStructGenerator {
};
} // namespace
-static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
- if (checkArgCountAtLeast(S, TheCall, 2))
+static ExprResult BuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCountAtLeast(TheCall, 2))
return ExprError();
ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0));
@@ -760,8 +789,8 @@ static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
return Generator.buildWrapper();
}
-static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
- if (checkArgCount(S, BuiltinCall, 2))
+static bool BuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
+ if (S.checkArgCount(BuiltinCall, 2))
return true;
SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
@@ -1098,7 +1127,7 @@ static bool ProcessFormatStringLiteral(const Expr *FormatExpr,
const ConstantArrayType *T =
Context.getAsConstantArrayType(Format->getType());
assert(T && "String literal not of constant array type!");
- size_t TypeSize = T->getSize().getZExtValue();
+ size_t TypeSize = T->getZExtSize();
// In case there's a null byte somewhere.
StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
return true;
@@ -1426,9 +1455,9 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
<< FunctionName << DestinationStr << SourceStr);
}
-static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
- Scope::ScopeFlags NeededScopeFlags,
- unsigned DiagID) {
+static bool BuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
+ Scope::ScopeFlags NeededScopeFlags,
+ unsigned DiagID) {
// Scopes aren't available during instantiation. Fortunately, builtin
// functions cannot be template args so they cannot be formed through template
// instantiation. Therefore checking once during the parse is sufficient.
@@ -1448,531 +1477,303 @@ static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
return false;
}
-static inline bool isBlockPointer(Expr *Arg) {
- return Arg->getType()->isBlockPointerType();
-}
-
-/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
-/// void*, which is a requirement of device side enqueue.
-static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
- const BlockPointerType *BPT =
- cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
- ArrayRef<QualType> Params =
- BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
- unsigned ArgCounter = 0;
- bool IllegalParams = false;
- // Iterate through the block parameters until either one is found that is not
- // a local void*, or the block is valid.
- for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
- I != E; ++I, ++ArgCounter) {
- if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
- (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
- LangAS::opencl_local) {
- // Get the location of the error. If a block literal has been passed
- // (BlockExpr) then we can point straight to the offending argument,
- // else we just point to the variable reference.
- SourceLocation ErrorLoc;
- if (isa<BlockExpr>(BlockArg)) {
- BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
- ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
- } else if (isa<DeclRefExpr>(BlockArg)) {
- ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
- }
- S.Diag(ErrorLoc,
- diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
- IllegalParams = true;
- }
- }
+namespace {
+enum PointerAuthOpKind {
+ PAO_Strip,
+ PAO_Sign,
+ PAO_Auth,
+ PAO_SignGeneric,
+ PAO_Discriminator,
+ PAO_BlendPointer,
+ PAO_BlendInteger
+};
+}
+
+bool Sema::checkPointerAuthEnabled(SourceLocation Loc, SourceRange Range) {
+ if (getLangOpts().PointerAuthIntrinsics)
+ return false;
- return IllegalParams;
+ Diag(Loc, diag::err_ptrauth_disabled) << Range;
+ return true;
}
-static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
- // OpenCL device can support extension but not the feature as extension
- // requires subgroup independent forward progress, but subgroup independent
- // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature.
- if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) &&
- !S.getOpenCLOptions().isSupported("__opencl_c_subgroups",
- S.getLangOpts())) {
- S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
- << 1 << Call->getDirectCallee()
- << "cl_khr_subgroups or __opencl_c_subgroups";
- return true;
- }
- return false;
+static bool checkPointerAuthEnabled(Sema &S, Expr *E) {
+ return S.checkPointerAuthEnabled(E->getExprLoc(), E->getSourceRange());
}
-static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 2))
+static bool checkPointerAuthKey(Sema &S, Expr *&Arg) {
+ // Convert it to type 'int'.
+ if (convertArgumentToType(S, Arg, S.Context.IntTy))
return true;
- if (checkOpenCLSubgroupExt(S, TheCall))
- return true;
+ // Value-dependent expressions are okay; wait for template instantiation.
+ if (Arg->isValueDependent())
+ return false;
+
+ unsigned KeyValue;
+ return S.checkConstantPointerAuthKey(Arg, KeyValue);
+}
- // First argument is an ndrange_t type.
- Expr *NDRangeArg = TheCall->getArg(0);
- if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
- S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
- << TheCall->getDirectCallee() << "'ndrange_t'";
+bool Sema::checkConstantPointerAuthKey(Expr *Arg, unsigned &Result) {
+ // Attempt to constant-evaluate the expression.
+ std::optional<llvm::APSInt> KeyValue = Arg->getIntegerConstantExpr(Context);
+ if (!KeyValue) {
+ Diag(Arg->getExprLoc(), diag::err_expr_not_ice)
+ << 0 << Arg->getSourceRange();
return true;
}
- Expr *BlockArg = TheCall->getArg(1);
- if (!isBlockPointer(BlockArg)) {
- S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
- << TheCall->getDirectCallee() << "block";
+ // Ask the target to validate the key parameter.
+ if (!Context.getTargetInfo().validatePointerAuthKey(*KeyValue)) {
+ llvm::SmallString<32> Value;
+ {
+ llvm::raw_svector_ostream Str(Value);
+ Str << *KeyValue;
+ }
+
+ Diag(Arg->getExprLoc(), diag::err_ptrauth_invalid_key)
+ << Value << Arg->getSourceRange();
return true;
}
- return checkOpenCLBlockArgs(S, BlockArg);
+
+ Result = KeyValue->getZExtValue();
+ return false;
}
-/// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
-/// get_kernel_work_group_size
-/// and get_kernel_preferred_work_group_size_multiple builtin functions.
-static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
- return true;
+static std::pair<const ValueDecl *, CharUnits>
+findConstantBaseAndOffset(Sema &S, Expr *E) {
+ // Must evaluate as a pointer.
+ Expr::EvalResult Result;
+ if (!E->EvaluateAsRValue(Result, S.Context) || !Result.Val.isLValue())
+ return {nullptr, CharUnits()};
- Expr *BlockArg = TheCall->getArg(0);
- if (!isBlockPointer(BlockArg)) {
- S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
- << TheCall->getDirectCallee() << "block";
- return true;
- }
- return checkOpenCLBlockArgs(S, BlockArg);
-}
-
-/// Diagnose integer type and any valid implicit conversion to it.
-static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
- const QualType &IntType);
-
-static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
- unsigned Start, unsigned End) {
- bool IllegalParams = false;
- for (unsigned I = Start; I <= End; ++I)
- IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
- S.Context.getSizeType());
- return IllegalParams;
-}
-
-/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
-/// 'local void*' parameter of passed block.
-static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
- Expr *BlockArg,
- unsigned NumNonVarArgs) {
- const BlockPointerType *BPT =
- cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
- unsigned NumBlockParams =
- BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
- unsigned TotalNumArgs = TheCall->getNumArgs();
-
- // For each argument passed to the block, a corresponding uint needs to
- // be passed to describe the size of the local memory.
- if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
- S.Diag(TheCall->getBeginLoc(),
- diag::err_opencl_enqueue_kernel_local_size_args);
- return true;
- }
+ const auto *BaseDecl =
+ Result.Val.getLValueBase().dyn_cast<const ValueDecl *>();
+ if (!BaseDecl)
+ return {nullptr, CharUnits()};
- // Check that the sizes of the local memory are specified by integers.
- return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
- TotalNumArgs - 1);
-}
-
-/// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
-/// overload formats specified in Table 6.13.17.1.
-/// int enqueue_kernel(queue_t queue,
-/// kernel_enqueue_flags_t flags,
-/// const ndrange_t ndrange,
-/// void (^block)(void))
-/// int enqueue_kernel(queue_t queue,
-/// kernel_enqueue_flags_t flags,
-/// const ndrange_t ndrange,
-/// uint num_events_in_wait_list,
-/// clk_event_t *event_wait_list,
-/// clk_event_t *event_ret,
-/// void (^block)(void))
-/// int enqueue_kernel(queue_t queue,
-/// kernel_enqueue_flags_t flags,
-/// const ndrange_t ndrange,
-/// void (^block)(local void*, ...),
-/// uint size0, ...)
-/// int enqueue_kernel(queue_t queue,
-/// kernel_enqueue_flags_t flags,
-/// const ndrange_t ndrange,
-/// uint num_events_in_wait_list,
-/// clk_event_t *event_wait_list,
-/// clk_event_t *event_ret,
-/// void (^block)(local void*, ...),
-/// uint size0, ...)
-static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
- unsigned NumArgs = TheCall->getNumArgs();
+ return {BaseDecl, Result.Val.getLValueOffset()};
+}
- if (NumArgs < 4) {
- S.Diag(TheCall->getBeginLoc(),
- diag::err_typecheck_call_too_few_args_at_least)
- << 0 << 4 << NumArgs << /*is non object*/ 0;
- return true;
+static bool checkPointerAuthValue(Sema &S, Expr *&Arg, PointerAuthOpKind OpKind,
+ bool RequireConstant = false) {
+ if (Arg->hasPlaceholderType()) {
+ ExprResult R = S.CheckPlaceholderExpr(Arg);
+ if (R.isInvalid())
+ return true;
+ Arg = R.get();
}
- Expr *Arg0 = TheCall->getArg(0);
- Expr *Arg1 = TheCall->getArg(1);
- Expr *Arg2 = TheCall->getArg(2);
- Expr *Arg3 = TheCall->getArg(3);
+ auto AllowsPointer = [](PointerAuthOpKind OpKind) {
+ return OpKind != PAO_BlendInteger;
+ };
+ auto AllowsInteger = [](PointerAuthOpKind OpKind) {
+ return OpKind == PAO_Discriminator || OpKind == PAO_BlendInteger ||
+ OpKind == PAO_SignGeneric;
+ };
- // First argument always needs to be a queue_t type.
- if (!Arg0->getType()->isQueueT()) {
- S.Diag(TheCall->getArg(0)->getBeginLoc(),
- diag::err_opencl_builtin_expected_type)
- << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
- return true;
- }
+ // Require the value to have the right range of type.
+ QualType ExpectedTy;
+ if (AllowsPointer(OpKind) && Arg->getType()->isPointerType()) {
+ ExpectedTy = Arg->getType().getUnqualifiedType();
+ } else if (AllowsPointer(OpKind) && Arg->getType()->isNullPtrType()) {
+ ExpectedTy = S.Context.VoidPtrTy;
+ } else if (AllowsInteger(OpKind) &&
+ Arg->getType()->isIntegralOrUnscopedEnumerationType()) {
+ ExpectedTy = S.Context.getUIntPtrType();
- // Second argument always needs to be a kernel_enqueue_flags_t enum value.
- if (!Arg1->getType()->isIntegerType()) {
- S.Diag(TheCall->getArg(1)->getBeginLoc(),
- diag::err_opencl_builtin_expected_type)
- << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
+ } else {
+ // Diagnose the failures.
+ S.Diag(Arg->getExprLoc(), diag::err_ptrauth_value_bad_type)
+ << unsigned(OpKind == PAO_Discriminator ? 1
+ : OpKind == PAO_BlendPointer ? 2
+ : OpKind == PAO_BlendInteger ? 3
+ : 0)
+ << unsigned(AllowsInteger(OpKind) ? (AllowsPointer(OpKind) ? 2 : 1) : 0)
+ << Arg->getType() << Arg->getSourceRange();
return true;
}
- // Third argument is always an ndrange_t type.
- if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
- S.Diag(TheCall->getArg(2)->getBeginLoc(),
- diag::err_opencl_builtin_expected_type)
- << TheCall->getDirectCallee() << "'ndrange_t'";
+ // Convert to that type. This should just be an lvalue-to-rvalue
+ // conversion.
+ if (convertArgumentToType(S, Arg, ExpectedTy))
return true;
- }
- // With four arguments, there is only one form that the function could be
- // called in: no events and no variable arguments.
- if (NumArgs == 4) {
- // check that the last argument is the right block type.
- if (!isBlockPointer(Arg3)) {
- S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
- << TheCall->getDirectCallee() << "block";
- return true;
- }
- // we have a block type, check the prototype
- const BlockPointerType *BPT =
- cast<BlockPointerType>(Arg3->getType().getCanonicalType());
- if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) {
- S.Diag(Arg3->getBeginLoc(),
- diag::err_opencl_enqueue_kernel_blocks_no_args);
- return true;
+ if (!RequireConstant) {
+ // Warn about null pointers for non-generic sign and auth operations.
+ if ((OpKind == PAO_Sign || OpKind == PAO_Auth) &&
+ Arg->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNull)) {
+ S.Diag(Arg->getExprLoc(), OpKind == PAO_Sign
+ ? diag::warn_ptrauth_sign_null_pointer
+ : diag::warn_ptrauth_auth_null_pointer)
+ << Arg->getSourceRange();
}
+
return false;
}
- // we can have block + varargs.
- if (isBlockPointer(Arg3))
- return (checkOpenCLBlockArgs(S, Arg3) ||
- checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
- // last two cases with either exactly 7 args or 7 args and varargs.
- if (NumArgs >= 7) {
- // check common block argument.
- Expr *Arg6 = TheCall->getArg(6);
- if (!isBlockPointer(Arg6)) {
- S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
- << TheCall->getDirectCallee() << "block";
- return true;
- }
- if (checkOpenCLBlockArgs(S, Arg6))
- return true;
- // Forth argument has to be any integer type.
- if (!Arg3->getType()->isIntegerType()) {
- S.Diag(TheCall->getArg(3)->getBeginLoc(),
- diag::err_opencl_builtin_expected_type)
- << TheCall->getDirectCallee() << "integer";
- return true;
- }
- // check remaining common arguments.
- Expr *Arg4 = TheCall->getArg(4);
- Expr *Arg5 = TheCall->getArg(5);
-
- // Fifth argument is always passed as a pointer to clk_event_t.
- if (!Arg4->isNullPointerConstant(S.Context,
- Expr::NPC_ValueDependentIsNotNull) &&
- !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
- S.Diag(TheCall->getArg(4)->getBeginLoc(),
- diag::err_opencl_builtin_expected_type)
- << TheCall->getDirectCallee()
- << S.Context.getPointerType(S.Context.OCLClkEventTy);
- return true;
- }
+ // Perform special checking on the arguments to ptrauth_sign_constant.
- // Sixth argument is always passed as a pointer to clk_event_t.
- if (!Arg5->isNullPointerConstant(S.Context,
- Expr::NPC_ValueDependentIsNotNull) &&
- !(Arg5->getType()->isPointerType() &&
- Arg5->getType()->getPointeeType()->isClkEventT())) {
- S.Diag(TheCall->getArg(5)->getBeginLoc(),
- diag::err_opencl_builtin_expected_type)
- << TheCall->getDirectCallee()
- << S.Context.getPointerType(S.Context.OCLClkEventTy);
- return true;
- }
+ // The main argument.
+ if (OpKind == PAO_Sign) {
+ // Require the value we're signing to have a special form.
+ auto [BaseDecl, Offset] = findConstantBaseAndOffset(S, Arg);
+ bool Invalid;
- if (NumArgs == 7)
- return false;
+ // Must be rooted in a declaration reference.
+ if (!BaseDecl)
+ Invalid = true;
- return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
- }
+ // If it's a function declaration, we can't have an offset.
+ else if (isa<FunctionDecl>(BaseDecl))
+ Invalid = !Offset.isZero();
- // None of the specific case has been detected, give generic error
- S.Diag(TheCall->getBeginLoc(),
- diag::err_opencl_enqueue_kernel_incorrect_args);
- return true;
-}
-
-/// Returns OpenCL access qual.
-static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
- return D->getAttr<OpenCLAccessAttr>();
-}
+ // Otherwise we're fine.
+ else
+ Invalid = false;
-/// Returns true if pipe element type is different from the pointer.
-static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
- const Expr *Arg0 = Call->getArg(0);
- // First argument type should always be pipe.
- if (!Arg0->getType()->isPipeType()) {
- S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
- << Call->getDirectCallee() << Arg0->getSourceRange();
- return true;
+ if (Invalid)
+ S.Diag(Arg->getExprLoc(), diag::err_ptrauth_bad_constant_pointer);
+ return Invalid;
}
- OpenCLAccessAttr *AccessQual =
- getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
- // Validates the access qualifier is compatible with the call.
- // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
- // read_only and write_only, and assumed to be read_only if no qualifier is
- // specified.
- switch (Call->getDirectCallee()->getBuiltinID()) {
- case Builtin::BIread_pipe:
- case Builtin::BIreserve_read_pipe:
- case Builtin::BIcommit_read_pipe:
- case Builtin::BIwork_group_reserve_read_pipe:
- case Builtin::BIsub_group_reserve_read_pipe:
- case Builtin::BIwork_group_commit_read_pipe:
- case Builtin::BIsub_group_commit_read_pipe:
- if (!(!AccessQual || AccessQual->isReadOnly())) {
- S.Diag(Arg0->getBeginLoc(),
- diag::err_opencl_builtin_pipe_invalid_access_modifier)
- << "read_only" << Arg0->getSourceRange();
- return true;
- }
- break;
- case Builtin::BIwrite_pipe:
- case Builtin::BIreserve_write_pipe:
- case Builtin::BIcommit_write_pipe:
- case Builtin::BIwork_group_reserve_write_pipe:
- case Builtin::BIsub_group_reserve_write_pipe:
- case Builtin::BIwork_group_commit_write_pipe:
- case Builtin::BIsub_group_commit_write_pipe:
- if (!(AccessQual && AccessQual->isWriteOnly())) {
- S.Diag(Arg0->getBeginLoc(),
- diag::err_opencl_builtin_pipe_invalid_access_modifier)
- << "write_only" << Arg0->getSourceRange();
- return true;
+
+ // The discriminator argument.
+ assert(OpKind == PAO_Discriminator);
+
+ // Must be a pointer or integer or blend thereof.
+ Expr *Pointer = nullptr;
+ Expr *Integer = nullptr;
+ if (auto *Call = dyn_cast<CallExpr>(Arg->IgnoreParens())) {
+ if (Call->getBuiltinCallee() ==
+ Builtin::BI__builtin_ptrauth_blend_discriminator) {
+ Pointer = Call->getArg(0);
+ Integer = Call->getArg(1);
}
- break;
- default:
- break;
}
- return false;
-}
-
-/// Returns true if pipe element type is different from the pointer.
-static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
- const Expr *Arg0 = Call->getArg(0);
- const Expr *ArgIdx = Call->getArg(Idx);
- const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
- const QualType EltTy = PipeTy->getElementType();
- const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
- // The Idx argument should be a pointer and the type of the pointer and
- // the type of pipe element should also be the same.
- if (!ArgTy ||
- !S.Context.hasSameType(
- EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
- S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
- << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
- << ArgIdx->getType() << ArgIdx->getSourceRange();
- return true;
+ if (!Pointer && !Integer) {
+ if (Arg->getType()->isPointerType())
+ Pointer = Arg;
+ else
+ Integer = Arg;
}
- return false;
-}
-// Performs semantic analysis for the read/write_pipe call.
-// \param S Reference to the semantic analyzer.
-// \param Call A pointer to the builtin call.
-// \return True if a semantic error has been found, false otherwise.
-static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
- // OpenCL v2.0 s6.13.16.2 - The built-in read/write
- // functions have two forms.
- switch (Call->getNumArgs()) {
- case 2:
- if (checkOpenCLPipeArg(S, Call))
- return true;
- // The call with 2 arguments should be
- // read/write_pipe(pipe T, T*).
- // Check packet type T.
- if (checkOpenCLPipePacketType(S, Call, 1))
- return true;
- break;
+ // Check the pointer.
+ bool Invalid = false;
+ if (Pointer) {
+ assert(Pointer->getType()->isPointerType());
- case 4: {
- if (checkOpenCLPipeArg(S, Call))
- return true;
- // The call with 4 arguments should be
- // read/write_pipe(pipe T, reserve_id_t, uint, T*).
- // Check reserve_id_t.
- if (!Call->getArg(1)->getType()->isReserveIDT()) {
- S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
- << Call->getDirectCallee() << S.Context.OCLReserveIDTy
- << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
- return true;
- }
-
- // Check the index.
- const Expr *Arg2 = Call->getArg(2);
- if (!Arg2->getType()->isIntegerType() &&
- !Arg2->getType()->isUnsignedIntegerType()) {
- S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
- << Call->getDirectCallee() << S.Context.UnsignedIntTy
- << Arg2->getType() << Arg2->getSourceRange();
- return true;
- }
+ // TODO: if we're initializing a global, check that the address is
+ // somehow related to what we're initializing. This probably will
+ // never really be feasible and we'll have to catch it at link-time.
+ auto [BaseDecl, Offset] = findConstantBaseAndOffset(S, Pointer);
+ if (!BaseDecl || !isa<VarDecl>(BaseDecl))
+ Invalid = true;
+ }
- // Check packet type T.
- if (checkOpenCLPipePacketType(S, Call, 3))
- return true;
- } break;
- default:
- S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
- << Call->getDirectCallee() << Call->getSourceRange();
- return true;
+ // Check the integer.
+ if (Integer) {
+ assert(Integer->getType()->isIntegerType());
+ if (!Integer->isEvaluatable(S.Context))
+ Invalid = true;
}
- return false;
+ if (Invalid)
+ S.Diag(Arg->getExprLoc(), diag::err_ptrauth_bad_constant_discriminator);
+ return Invalid;
}
-// Performs a semantic analysis on the {work_group_/sub_group_
-// /_}reserve_{read/write}_pipe
-// \param S Reference to the semantic analyzer.
-// \param Call The call to the builtin function to be analyzed.
-// \return True if a semantic error was found, false otherwise.
-static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
- return true;
-
- if (checkOpenCLPipeArg(S, Call))
- return true;
+static ExprResult PointerAuthStrip(Sema &S, CallExpr *Call) {
+ if (S.checkArgCount(Call, 2))
+ return ExprError();
+ if (checkPointerAuthEnabled(S, Call))
+ return ExprError();
+ if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_Strip) ||
+ checkPointerAuthKey(S, Call->getArgs()[1]))
+ return ExprError();
- // Check the reserve size.
- if (!Call->getArg(1)->getType()->isIntegerType() &&
- !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
- S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
- << Call->getDirectCallee() << S.Context.UnsignedIntTy
- << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
- return true;
- }
+ Call->setType(Call->getArgs()[0]->getType());
+ return Call;
+}
- // Since return type of reserve_read/write_pipe built-in function is
- // reserve_id_t, which is not defined in the builtin def file , we used int
- // as return type and need to override the return type of these functions.
- Call->setType(S.Context.OCLReserveIDTy);
+static ExprResult PointerAuthBlendDiscriminator(Sema &S, CallExpr *Call) {
+ if (S.checkArgCount(Call, 2))
+ return ExprError();
+ if (checkPointerAuthEnabled(S, Call))
+ return ExprError();
+ if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_BlendPointer) ||
+ checkPointerAuthValue(S, Call->getArgs()[1], PAO_BlendInteger))
+ return ExprError();
- return false;
+ Call->setType(S.Context.getUIntPtrType());
+ return Call;
}
-// Performs a semantic analysis on {work_group_/sub_group_
-// /_}commit_{read/write}_pipe
-// \param S Reference to the semantic analyzer.
-// \param Call The call to the builtin function to be analyzed.
-// \return True if a semantic error was found, false otherwise.
-static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
- return true;
+static ExprResult PointerAuthSignGenericData(Sema &S, CallExpr *Call) {
+ if (S.checkArgCount(Call, 2))
+ return ExprError();
+ if (checkPointerAuthEnabled(S, Call))
+ return ExprError();
+ if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_SignGeneric) ||
+ checkPointerAuthValue(S, Call->getArgs()[1], PAO_Discriminator))
+ return ExprError();
- if (checkOpenCLPipeArg(S, Call))
- return true;
+ Call->setType(S.Context.getUIntPtrType());
+ return Call;
+}
- // Check reserve_id_t.
- if (!Call->getArg(1)->getType()->isReserveIDT()) {
- S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
- << Call->getDirectCallee() << S.Context.OCLReserveIDTy
- << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
- return true;
- }
+static ExprResult PointerAuthSignOrAuth(Sema &S, CallExpr *Call,
+ PointerAuthOpKind OpKind,
+ bool RequireConstant) {
+ if (S.checkArgCount(Call, 3))
+ return ExprError();
+ if (checkPointerAuthEnabled(S, Call))
+ return ExprError();
+ if (checkPointerAuthValue(S, Call->getArgs()[0], OpKind, RequireConstant) ||
+ checkPointerAuthKey(S, Call->getArgs()[1]) ||
+ checkPointerAuthValue(S, Call->getArgs()[2], PAO_Discriminator,
+ RequireConstant))
+ return ExprError();
- return false;
+ Call->setType(Call->getArgs()[0]->getType());
+ return Call;
}
-// Performs a semantic analysis on the call to built-in Pipe
-// Query Functions.
-// \param S Reference to the semantic analyzer.
-// \param Call The call to the builtin function to be analyzed.
-// \return True if a semantic error was found, false otherwise.
-static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 1))
- return true;
-
- if (!Call->getArg(0)->getType()->isPipeType()) {
- S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
- << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
- return true;
- }
+static ExprResult PointerAuthAuthAndResign(Sema &S, CallExpr *Call) {
+ if (S.checkArgCount(Call, 5))
+ return ExprError();
+ if (checkPointerAuthEnabled(S, Call))
+ return ExprError();
+ if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_Auth) ||
+ checkPointerAuthKey(S, Call->getArgs()[1]) ||
+ checkPointerAuthValue(S, Call->getArgs()[2], PAO_Discriminator) ||
+ checkPointerAuthKey(S, Call->getArgs()[3]) ||
+ checkPointerAuthValue(S, Call->getArgs()[4], PAO_Discriminator))
+ return ExprError();
- return false;
+ Call->setType(Call->getArgs()[0]->getType());
+ return Call;
}
-// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
-// Performs semantic analysis for the to_global/local/private call.
-// \param S Reference to the semantic analyzer.
-// \param BuiltinID ID of the builtin function.
-// \param Call A pointer to the builtin call.
-// \return True if a semantic error has been found, false otherwise.
-static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
- CallExpr *Call) {
- if (checkArgCount(S, Call, 1))
- return true;
-
- auto RT = Call->getArg(0)->getType();
- if (!RT->isPointerType() || RT->getPointeeType()
- .getAddressSpace() == LangAS::opencl_constant) {
- S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
- << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
- return true;
- }
+static ExprResult PointerAuthStringDiscriminator(Sema &S, CallExpr *Call) {
+ if (checkPointerAuthEnabled(S, Call))
+ return ExprError();
- if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
- S.Diag(Call->getArg(0)->getBeginLoc(),
- diag::warn_opencl_generic_address_space_arg)
- << Call->getDirectCallee()->getNameInfo().getAsString()
- << Call->getArg(0)->getSourceRange();
- }
+ // We've already performed normal call type-checking.
+ const Expr *Arg = Call->getArg(0)->IgnoreParenImpCasts();
- RT = RT->getPointeeType();
- auto Qual = RT.getQualifiers();
- switch (BuiltinID) {
- case Builtin::BIto_global:
- Qual.setAddressSpace(LangAS::opencl_global);
- break;
- case Builtin::BIto_local:
- Qual.setAddressSpace(LangAS::opencl_local);
- break;
- case Builtin::BIto_private:
- Qual.setAddressSpace(LangAS::opencl_private);
- break;
- default:
- llvm_unreachable("Invalid builtin function");
+ // Operand must be an ordinary or UTF-8 string literal.
+ const auto *Literal = dyn_cast<StringLiteral>(Arg);
+ if (!Literal || Literal->getCharByteWidth() != 1) {
+ S.Diag(Arg->getExprLoc(), diag::err_ptrauth_string_not_literal)
+ << (Literal ? 1 : 0) << Arg->getSourceRange();
+ return ExprError();
}
- Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
- RT.getUnqualifiedType(), Qual)));
- return false;
+ return Call;
}
-static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+static ExprResult BuiltinLaunder(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCount(TheCall, 1))
return ExprError();
// Compute __builtin_launder's parameter type from the argument.
@@ -2074,45 +1875,46 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
- return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return ARM().CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_32:
case llvm::Triple::aarch64_be:
- return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return ARM().CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::bpfeb:
case llvm::Triple::bpfel:
- return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
+ return BPF().CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::hexagon:
- return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
+ return Hexagon().CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
- return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return MIPS().CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::systemz:
- return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
+ return SystemZ().CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return X86().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::ppc:
case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
- return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return PPC().CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::amdgcn:
- return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
+ return AMDGPU().CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
- return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return RISCV().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::loongarch32:
case llvm::Triple::loongarch64:
- return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return LoongArch().CheckLoongArchBuiltinFunctionCall(TI, BuiltinID,
+ TheCall);
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
- return CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return Wasm().CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
- return CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return NVPTX().CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall);
}
}
@@ -2120,10 +1922,11 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
// not a valid type, emit an error message and return true. Otherwise return
// false.
static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc,
- QualType Ty) {
- if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) {
+ QualType ArgTy, int ArgIndex) {
+ if (!ArgTy->getAs<VectorType>() &&
+ !ConstantMatrixType::isValidElementType(ArgTy)) {
return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
- << 1 << /* vector, integer or float ty*/ 0 << Ty;
+ << ArgIndex << /* vector, integer or float ty*/ 0 << ArgTy;
}
return false;
@@ -2143,6 +1946,116 @@ static bool checkFPMathBuiltinElementType(Sema &S, SourceLocation Loc,
return false;
}
+/// BuiltinCpu{Supports|Is} - Handle __builtin_cpu_{supports|is}(char *).
+/// This checks that the target supports the builtin and that the string
+/// argument is constant and valid.
+static bool BuiltinCpu(Sema &S, const TargetInfo &TI, CallExpr *TheCall,
+ const TargetInfo *AuxTI, unsigned BuiltinID) {
+ assert((BuiltinID == Builtin::BI__builtin_cpu_supports ||
+ BuiltinID == Builtin::BI__builtin_cpu_is) &&
+ "Expecting __builtin_cpu_...");
+
+ bool IsCPUSupports = BuiltinID == Builtin::BI__builtin_cpu_supports;
+ const TargetInfo *TheTI = &TI;
+ auto SupportsBI = [=](const TargetInfo *TInfo) {
+ return TInfo && ((IsCPUSupports && TInfo->supportsCpuSupports()) ||
+ (!IsCPUSupports && TInfo->supportsCpuIs()));
+ };
+ if (!SupportsBI(&TI) && SupportsBI(AuxTI))
+ TheTI = AuxTI;
+
+ if ((!IsCPUSupports && !TheTI->supportsCpuIs()) ||
+ (IsCPUSupports && !TheTI->supportsCpuSupports()))
+ return S.Diag(TheCall->getBeginLoc(),
+ TI.getTriple().isOSAIX()
+ ? diag::err_builtin_aix_os_unsupported
+ : diag::err_builtin_target_unsupported)
+ << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
+
+ Expr *Arg = TheCall->getArg(0)->IgnoreParenImpCasts();
+ // Check if the argument is a string literal.
+ if (!isa<StringLiteral>(Arg))
+ return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
+ << Arg->getSourceRange();
+
+ // Check the contents of the string.
+ StringRef Feature = cast<StringLiteral>(Arg)->getString();
+ if (IsCPUSupports && !TheTI->validateCpuSupports(Feature)) {
+ S.Diag(TheCall->getBeginLoc(), diag::warn_invalid_cpu_supports)
+ << Arg->getSourceRange();
+ return false;
+ }
+ if (!IsCPUSupports && !TheTI->validateCpuIs(Feature))
+ return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
+ << Arg->getSourceRange();
+ return false;
+}
+
+/// Checks that __builtin_popcountg was called with a single argument, which is
+/// an unsigned integer.
+static bool BuiltinPopcountg(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCount(TheCall, 1))
+ return true;
+
+ ExprResult ArgRes = S.DefaultLvalueConversion(TheCall->getArg(0));
+ if (ArgRes.isInvalid())
+ return true;
+
+ Expr *Arg = ArgRes.get();
+ TheCall->setArg(0, Arg);
+
+ QualType ArgTy = Arg->getType();
+
+ if (!ArgTy->isUnsignedIntegerType()) {
+ S.Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /*unsigned integer ty*/ 7 << ArgTy;
+ return true;
+ }
+ return false;
+}
+
+/// Checks that __builtin_{clzg,ctzg} was called with a first argument, which is
+/// an unsigned integer, and an optional second argument, which is promoted to
+/// an 'int'.
+static bool BuiltinCountZeroBitsGeneric(Sema &S, CallExpr *TheCall) {
+ if (S.checkArgCountRange(TheCall, 1, 2))
+ return true;
+
+ ExprResult Arg0Res = S.DefaultLvalueConversion(TheCall->getArg(0));
+ if (Arg0Res.isInvalid())
+ return true;
+
+ Expr *Arg0 = Arg0Res.get();
+ TheCall->setArg(0, Arg0);
+
+ QualType Arg0Ty = Arg0->getType();
+
+ if (!Arg0Ty->isUnsignedIntegerType()) {
+ S.Diag(Arg0->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /*unsigned integer ty*/ 7 << Arg0Ty;
+ return true;
+ }
+
+ if (TheCall->getNumArgs() > 1) {
+ ExprResult Arg1Res = S.UsualUnaryConversions(TheCall->getArg(1));
+ if (Arg1Res.isInvalid())
+ return true;
+
+ Expr *Arg1 = Arg1Res.get();
+ TheCall->setArg(1, Arg1);
+
+ QualType Arg1Ty = Arg1->getType();
+
+ if (!Arg1Ty->isSpecificBuiltinType(BuiltinType::Int)) {
+ S.Diag(Arg1->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 2 << /*'int' ty*/ 8 << Arg1Ty;
+ return true;
+ }
+ }
+
+ return false;
+}
+
ExprResult
Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
CallExpr *TheCall) {
@@ -2164,13 +2077,26 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// If we don't have enough arguments, continue so we can issue better
// diagnostic in checkArgCount(...)
if (ArgNo < TheCall->getNumArgs() &&
- SemaBuiltinConstantArg(TheCall, ArgNo, Result))
+ BuiltinConstantArg(TheCall, ArgNo, Result))
return true;
ICEArguments &= ~(1 << ArgNo);
}
FPOptions FPO;
switch (BuiltinID) {
+ case Builtin::BI__builtin_cpu_supports:
+ case Builtin::BI__builtin_cpu_is:
+ if (BuiltinCpu(*this, Context.getTargetInfo(), TheCall,
+ Context.getAuxTargetInfo(), BuiltinID))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_cpu_init:
+ if (!Context.getTargetInfo().supportsCpuInit()) {
+ Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
+ << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
+ return ExprError();
+ }
+ break;
case Builtin::BI__builtin___CFStringMakeConstantString:
// CFStringMakeConstantString is currently not implemented for GOFF (i.e.,
// on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported
@@ -2180,13 +2106,13 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
assert(TheCall->getNumArgs() == 1 &&
"Wrong # arguments to builtin CFStringMakeConstantString");
- if (CheckObjCString(TheCall->getArg(0)))
+ if (ObjC().CheckObjCString(TheCall->getArg(0)))
return ExprError();
break;
case Builtin::BI__builtin_ms_va_start:
case Builtin::BI__builtin_stdarg_start:
case Builtin::BI__builtin_va_start:
- if (SemaBuiltinVAStart(BuiltinID, TheCall))
+ if (BuiltinVAStart(BuiltinID, TheCall))
return ExprError();
break;
case Builtin::BI__va_start: {
@@ -2194,11 +2120,11 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case llvm::Triple::aarch64:
case llvm::Triple::arm:
case llvm::Triple::thumb:
- if (SemaBuiltinVAStartARMMicrosoft(TheCall))
+ if (BuiltinVAStartARMMicrosoft(TheCall))
return ExprError();
break;
default:
- if (SemaBuiltinVAStart(BuiltinID, TheCall))
+ if (BuiltinVAStart(BuiltinID, TheCall))
return ExprError();
break;
}
@@ -2225,18 +2151,18 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI_bittestandset64:
case Builtin::BI_interlockedbittestandreset64:
case Builtin::BI_interlockedbittestandset64:
- if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall,
- {llvm::Triple::x86_64, llvm::Triple::arm,
- llvm::Triple::thumb,
- llvm::Triple::aarch64}))
+ if (CheckBuiltinTargetInSupported(
+ *this, BuiltinID, TheCall,
+ {llvm::Triple::x86_64, llvm::Triple::arm, llvm::Triple::thumb,
+ llvm::Triple::aarch64, llvm::Triple::amdgcn}))
return ExprError();
break;
case Builtin::BI__builtin_set_flt_rounds:
- if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall,
- {llvm::Triple::x86, llvm::Triple::x86_64,
- llvm::Triple::arm, llvm::Triple::thumb,
- llvm::Triple::aarch64}))
+ if (CheckBuiltinTargetInSupported(
+ *this, BuiltinID, TheCall,
+ {llvm::Triple::x86, llvm::Triple::x86_64, llvm::Triple::arm,
+ llvm::Triple::thumb, llvm::Triple::aarch64, llvm::Triple::amdgcn}))
return ExprError();
break;
@@ -2246,15 +2172,15 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__builtin_islessequal:
case Builtin::BI__builtin_islessgreater:
case Builtin::BI__builtin_isunordered:
- if (SemaBuiltinUnorderedCompare(TheCall, BuiltinID))
+ if (BuiltinUnorderedCompare(TheCall, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_fpclassify:
- if (SemaBuiltinFPClassification(TheCall, 6, BuiltinID))
+ if (BuiltinFPClassification(TheCall, 6, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_isfpclass:
- if (SemaBuiltinFPClassification(TheCall, 2, BuiltinID))
+ if (BuiltinFPClassification(TheCall, 2, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_isfinite:
@@ -2268,20 +2194,20 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__builtin_signbit:
case Builtin::BI__builtin_signbitf:
case Builtin::BI__builtin_signbitl:
- if (SemaBuiltinFPClassification(TheCall, 1, BuiltinID))
+ if (BuiltinFPClassification(TheCall, 1, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_shufflevector:
- return SemaBuiltinShuffleVector(TheCall);
+ return BuiltinShuffleVector(TheCall);
// TheCall will be freed by the smart pointer here, but that's fine, since
- // SemaBuiltinShuffleVector guts it, but then doesn't release it.
+ // BuiltinShuffleVector guts it, but then doesn't release it.
case Builtin::BI__builtin_prefetch:
- if (SemaBuiltinPrefetch(TheCall))
+ if (BuiltinPrefetch(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_alloca_with_align:
case Builtin::BI__builtin_alloca_with_align_uninitialized:
- if (SemaBuiltinAllocaWithAlign(TheCall))
+ if (BuiltinAllocaWithAlign(TheCall))
return ExprError();
[[fallthrough]];
case Builtin::BI__builtin_alloca:
@@ -2290,41 +2216,43 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
<< TheCall->getDirectCallee();
break;
case Builtin::BI__arithmetic_fence:
- if (SemaBuiltinArithmeticFence(TheCall))
+ if (BuiltinArithmeticFence(TheCall))
return ExprError();
break;
case Builtin::BI__assume:
case Builtin::BI__builtin_assume:
- if (SemaBuiltinAssume(TheCall))
+ if (BuiltinAssume(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_assume_aligned:
- if (SemaBuiltinAssumeAligned(TheCall))
+ if (BuiltinAssumeAligned(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size:
- if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
+ if (BuiltinConstantArgRange(TheCall, 1, 0, 3))
return ExprError();
break;
case Builtin::BI__builtin_longjmp:
- if (SemaBuiltinLongjmp(TheCall))
+ if (BuiltinLongjmp(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_setjmp:
- if (SemaBuiltinSetjmp(TheCall))
+ if (BuiltinSetjmp(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_classify_type:
- if (checkArgCount(*this, TheCall, 1)) return true;
+ if (checkArgCount(TheCall, 1))
+ return true;
TheCall->setType(Context.IntTy);
break;
case Builtin::BI__builtin_complex:
- if (SemaBuiltinComplex(TheCall))
+ if (BuiltinComplex(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_constant_p: {
- if (checkArgCount(*this, TheCall, 1)) return true;
+ if (checkArgCount(TheCall, 1))
+ return true;
ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
if (Arg.isInvalid()) return true;
TheCall->setArg(0, Arg.get());
@@ -2332,7 +2260,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
}
case Builtin::BI__builtin_launder:
- return SemaBuiltinLaunder(*this, TheCall);
+ return BuiltinLaunder(*this, TheCall);
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_add_1:
case Builtin::BI__sync_fetch_and_add_2:
@@ -2435,14 +2363,14 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__sync_swap_4:
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
- return SemaBuiltinAtomicOverloaded(TheCallResult);
+ return BuiltinAtomicOverloaded(TheCallResult);
case Builtin::BI__sync_synchronize:
Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
<< TheCall->getCallee()->getSourceRange();
break;
case Builtin::BI__builtin_nontemporal_load:
case Builtin::BI__builtin_nontemporal_store:
- return SemaBuiltinNontemporalOverloaded(TheCallResult);
+ return BuiltinNontemporalOverloaded(TheCallResult);
case Builtin::BI__builtin_memcpy_inline: {
clang::Expr *SizeOp = TheCall->getArg(2);
// We warn about copying to or from `nullptr` pointers when `size` is
@@ -2468,52 +2396,52 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
}
#define BUILTIN(ID, TYPE, ATTRS)
-#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
- case Builtin::BI##ID: \
- return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
-#include "clang/Basic/Builtins.def"
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
+ case Builtin::BI##ID: \
+ return AtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
+#include "clang/Basic/Builtins.inc"
case Builtin::BI__annotation:
- if (SemaBuiltinMSVCAnnotation(*this, TheCall))
+ if (BuiltinMSVCAnnotation(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_annotation:
- if (SemaBuiltinAnnotation(*this, TheCall))
+ if (BuiltinAnnotation(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_addressof:
- if (SemaBuiltinAddressof(*this, TheCall))
+ if (BuiltinAddressof(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_function_start:
- if (SemaBuiltinFunctionStart(*this, TheCall))
+ if (BuiltinFunctionStart(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_is_aligned:
case Builtin::BI__builtin_align_up:
case Builtin::BI__builtin_align_down:
- if (SemaBuiltinAlignment(*this, TheCall, BuiltinID))
+ if (BuiltinAlignment(*this, TheCall, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow:
- if (SemaBuiltinOverflow(*this, TheCall, BuiltinID))
+ if (BuiltinOverflow(*this, TheCall, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_operator_new:
case Builtin::BI__builtin_operator_delete: {
bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
ExprResult Res =
- SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
+ BuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
if (Res.isInvalid())
CorrectDelayedTyposInExpr(TheCallResult.get());
return Res;
}
case Builtin::BI__builtin_dump_struct:
- return SemaBuiltinDumpStruct(*this, TheCall);
+ return BuiltinDumpStruct(*this, TheCall);
case Builtin::BI__builtin_expect_with_probability: {
// We first want to ensure we are called with 3 arguments
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return ExprError();
// then check probability is constant float in range [0.0, 1.0]
const Expr *ProbArg = TheCall->getArg(2);
@@ -2541,27 +2469,27 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
}
case Builtin::BI__builtin_preserve_access_index:
- if (SemaBuiltinPreserveAI(*this, TheCall))
+ if (BuiltinPreserveAI(*this, TheCall))
return ExprError();
break;
case Builtin::BI__builtin_call_with_static_chain:
- if (SemaBuiltinCallWithStaticChain(*this, TheCall))
+ if (BuiltinCallWithStaticChain(*this, TheCall))
return ExprError();
break;
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:
- if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
- diag::err_seh___except_block))
+ if (BuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
+ diag::err_seh___except_block))
return ExprError();
break;
case Builtin::BI__exception_info:
case Builtin::BI_exception_info:
- if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
- diag::err_seh___except_filter))
+ if (BuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
+ diag::err_seh___except_filter))
return ExprError();
break;
case Builtin::BI__GetExceptionInfo:
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return ExprError();
if (CheckCXXThrowOperand(
@@ -2582,7 +2510,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// These are all expected to be of the form
// T &/&&/* f(U &/&&)
// where T and U only differ in qualification.
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return ExprError();
QualType Param = FDecl->getParamDecl(0)->getType();
QualType Result = FDecl->getReturnType();
@@ -2599,76 +2527,95 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
}
break;
}
+ case Builtin::BI__builtin_ptrauth_strip:
+ return PointerAuthStrip(*this, TheCall);
+ case Builtin::BI__builtin_ptrauth_blend_discriminator:
+ return PointerAuthBlendDiscriminator(*this, TheCall);
+ case Builtin::BI__builtin_ptrauth_sign_constant:
+ return PointerAuthSignOrAuth(*this, TheCall, PAO_Sign,
+ /*RequireConstant=*/true);
+ case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
+ return PointerAuthSignOrAuth(*this, TheCall, PAO_Sign,
+ /*RequireConstant=*/false);
+ case Builtin::BI__builtin_ptrauth_auth:
+ return PointerAuthSignOrAuth(*this, TheCall, PAO_Auth,
+ /*RequireConstant=*/false);
+ case Builtin::BI__builtin_ptrauth_sign_generic_data:
+ return PointerAuthSignGenericData(*this, TheCall);
+ case Builtin::BI__builtin_ptrauth_auth_and_resign:
+ return PointerAuthAuthAndResign(*this, TheCall);
+ case Builtin::BI__builtin_ptrauth_string_discriminator:
+ return PointerAuthStringDiscriminator(*this, TheCall);
// OpenCL v2.0, s6.13.16 - Pipe functions
case Builtin::BIread_pipe:
case Builtin::BIwrite_pipe:
// Since those two functions are declared with var args, we need a semantic
// check for the argument.
- if (SemaBuiltinRWPipe(*this, TheCall))
+ if (OpenCL().checkBuiltinRWPipe(TheCall))
return ExprError();
break;
case Builtin::BIreserve_read_pipe:
case Builtin::BIreserve_write_pipe:
case Builtin::BIwork_group_reserve_read_pipe:
case Builtin::BIwork_group_reserve_write_pipe:
- if (SemaBuiltinReserveRWPipe(*this, TheCall))
+ if (OpenCL().checkBuiltinReserveRWPipe(TheCall))
return ExprError();
break;
case Builtin::BIsub_group_reserve_read_pipe:
case Builtin::BIsub_group_reserve_write_pipe:
- if (checkOpenCLSubgroupExt(*this, TheCall) ||
- SemaBuiltinReserveRWPipe(*this, TheCall))
+ if (OpenCL().checkSubgroupExt(TheCall) ||
+ OpenCL().checkBuiltinReserveRWPipe(TheCall))
return ExprError();
break;
case Builtin::BIcommit_read_pipe:
case Builtin::BIcommit_write_pipe:
case Builtin::BIwork_group_commit_read_pipe:
case Builtin::BIwork_group_commit_write_pipe:
- if (SemaBuiltinCommitRWPipe(*this, TheCall))
+ if (OpenCL().checkBuiltinCommitRWPipe(TheCall))
return ExprError();
break;
case Builtin::BIsub_group_commit_read_pipe:
case Builtin::BIsub_group_commit_write_pipe:
- if (checkOpenCLSubgroupExt(*this, TheCall) ||
- SemaBuiltinCommitRWPipe(*this, TheCall))
+ if (OpenCL().checkSubgroupExt(TheCall) ||
+ OpenCL().checkBuiltinCommitRWPipe(TheCall))
return ExprError();
break;
case Builtin::BIget_pipe_num_packets:
case Builtin::BIget_pipe_max_packets:
- if (SemaBuiltinPipePackets(*this, TheCall))
+ if (OpenCL().checkBuiltinPipePackets(TheCall))
return ExprError();
break;
case Builtin::BIto_global:
case Builtin::BIto_local:
case Builtin::BIto_private:
- if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
+ if (OpenCL().checkBuiltinToAddr(BuiltinID, TheCall))
return ExprError();
break;
// OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
case Builtin::BIenqueue_kernel:
- if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
+ if (OpenCL().checkBuiltinEnqueueKernel(TheCall))
return ExprError();
break;
case Builtin::BIget_kernel_work_group_size:
case Builtin::BIget_kernel_preferred_work_group_size_multiple:
- if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
+ if (OpenCL().checkBuiltinKernelWorkGroupSize(TheCall))
return ExprError();
break;
case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
case Builtin::BIget_kernel_sub_group_count_for_ndrange:
- if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
+ if (OpenCL().checkBuiltinNDRangeAndBlock(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_os_log_format:
Cleanup.setExprNeedsCleanups(true);
[[fallthrough]];
case Builtin::BI__builtin_os_log_format_buffer_size:
- if (SemaBuiltinOSLogFormat(TheCall))
+ if (BuiltinOSLogFormat(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_frame_address:
case Builtin::BI__builtin_return_address: {
- if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
+ if (BuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
return ExprError();
// -Wframe-address warning if non-zero passed to builtin
@@ -2686,7 +2633,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
}
case Builtin::BI__builtin_nondeterministic_value: {
- if (SemaBuiltinNonDeterministicValue(TheCall))
+ if (BuiltinNonDeterministicValue(TheCall))
return ExprError();
break;
}
@@ -2713,8 +2660,12 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// These builtins restrict the element type to floating point
// types only.
+ case Builtin::BI__builtin_elementwise_acos:
+ case Builtin::BI__builtin_elementwise_asin:
+ case Builtin::BI__builtin_elementwise_atan:
case Builtin::BI__builtin_elementwise_ceil:
case Builtin::BI__builtin_elementwise_cos:
+ case Builtin::BI__builtin_elementwise_cosh:
case Builtin::BI__builtin_elementwise_exp:
case Builtin::BI__builtin_elementwise_exp2:
case Builtin::BI__builtin_elementwise_floor:
@@ -2726,7 +2677,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__builtin_elementwise_rint:
case Builtin::BI__builtin_elementwise_nearbyint:
case Builtin::BI__builtin_elementwise_sin:
+ case Builtin::BI__builtin_elementwise_sinh:
case Builtin::BI__builtin_elementwise_sqrt:
+ case Builtin::BI__builtin_elementwise_tan:
+ case Builtin::BI__builtin_elementwise_tanh:
case Builtin::BI__builtin_elementwise_trunc:
case Builtin::BI__builtin_elementwise_canonicalize: {
if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
@@ -2739,7 +2693,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
}
case Builtin::BI__builtin_elementwise_fma: {
- if (SemaBuiltinElementwiseTernaryMath(TheCall))
+ if (BuiltinElementwiseTernaryMath(TheCall))
return ExprError();
break;
}
@@ -2747,7 +2701,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// These builtins restrict the element type to floating point
// types only, and take in two arguments.
case Builtin::BI__builtin_elementwise_pow: {
- if (SemaBuiltinElementwiseMath(TheCall))
+ if (BuiltinElementwiseMath(TheCall))
return ExprError();
QualType ArgTy = TheCall->getArg(0)->getType();
@@ -2763,7 +2717,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// types only.
case Builtin::BI__builtin_elementwise_add_sat:
case Builtin::BI__builtin_elementwise_sub_sat: {
- if (SemaBuiltinElementwiseMath(TheCall))
+ if (BuiltinElementwiseMath(TheCall))
return ExprError();
const Expr *Arg = TheCall->getArg(0);
@@ -2783,7 +2737,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__builtin_elementwise_min:
case Builtin::BI__builtin_elementwise_max:
- if (SemaBuiltinElementwiseMath(TheCall))
+ if (BuiltinElementwiseMath(TheCall))
return ExprError();
break;
@@ -2807,7 +2761,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
}
case Builtin::BI__builtin_elementwise_copysign: {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return ExprError();
ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0));
@@ -2842,13 +2796,20 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
const Expr *Arg = TheCall->getArg(0);
const auto *TyA = Arg->getType()->getAs<VectorType>();
- if (!TyA) {
+
+ QualType ElTy;
+ if (TyA)
+ ElTy = TyA->getElementType();
+ else if (Arg->getType()->isSizelessVectorType())
+ ElTy = Arg->getType()->getSizelessVectorEltType(Context);
+
+ if (ElTy.isNull()) {
Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
<< 1 << /* vector ty*/ 4 << Arg->getType();
return ExprError();
}
- TheCall->setType(TyA->getElementType());
+ TheCall->setType(ElTy);
break;
}
@@ -2864,23 +2825,36 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
const Expr *Arg = TheCall->getArg(0);
const auto *TyA = Arg->getType()->getAs<VectorType>();
- if (!TyA || !TyA->getElementType()->isIntegerType()) {
+
+ QualType ElTy;
+ if (TyA)
+ ElTy = TyA->getElementType();
+ else if (Arg->getType()->isSizelessVectorType())
+ ElTy = Arg->getType()->getSizelessVectorEltType(Context);
+
+ if (ElTy.isNull() || !ElTy->isIntegerType()) {
Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
<< 1 << /* vector of integers */ 6 << Arg->getType();
return ExprError();
}
- TheCall->setType(TyA->getElementType());
+
+ TheCall->setType(ElTy);
break;
}
case Builtin::BI__builtin_matrix_transpose:
- return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
+ return BuiltinMatrixTranspose(TheCall, TheCallResult);
case Builtin::BI__builtin_matrix_column_major_load:
- return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
+ return BuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
case Builtin::BI__builtin_matrix_column_major_store:
- return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
+ return BuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
+
+ case Builtin::BI__builtin_verbose_trap:
+ if (!checkBuiltinVerboseTrap(TheCall, *this))
+ return ExprError();
+ break;
case Builtin::BI__builtin_get_device_side_mangled_name: {
auto Check = [](CallExpr *TheCall) {
@@ -2900,9 +2874,33 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
diag::err_hip_invalid_args_builtin_mangled_name);
return ExprError();
}
+ break;
+ }
+ case Builtin::BI__builtin_popcountg:
+ if (BuiltinPopcountg(*this, TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_clzg:
+ case Builtin::BI__builtin_ctzg:
+ if (BuiltinCountZeroBitsGeneric(*this, TheCall))
+ return ExprError();
+ break;
+
+ case Builtin::BI__builtin_allow_runtime_check: {
+ Expr *Arg = TheCall->getArg(0);
+ // Check if the argument is a string literal.
+ if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) {
+ Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
+ << Arg->getSourceRange();
+ return ExprError();
+ }
+ break;
}
}
+ if (getLangOpts().HLSL && HLSL().CheckBuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+
// Since the target specific builtins for each arch overlap, only check those
// of the arch we are compiling for.
if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
@@ -2924,1992 +2922,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return TheCallResult;
}
-// Get the valid immediate range for the specified NEON type code.
-static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
- NeonTypeFlags Type(t);
- int IsQuad = ForceQuad ? true : Type.isQuad();
- switch (Type.getEltType()) {
- case NeonTypeFlags::Int8:
- case NeonTypeFlags::Poly8:
- return shift ? 7 : (8 << IsQuad) - 1;
- case NeonTypeFlags::Int16:
- case NeonTypeFlags::Poly16:
- return shift ? 15 : (4 << IsQuad) - 1;
- case NeonTypeFlags::Int32:
- return shift ? 31 : (2 << IsQuad) - 1;
- case NeonTypeFlags::Int64:
- case NeonTypeFlags::Poly64:
- return shift ? 63 : (1 << IsQuad) - 1;
- case NeonTypeFlags::Poly128:
- return shift ? 127 : (1 << IsQuad) - 1;
- case NeonTypeFlags::Float16:
- assert(!shift && "cannot shift float types!");
- return (4 << IsQuad) - 1;
- case NeonTypeFlags::Float32:
- assert(!shift && "cannot shift float types!");
- return (2 << IsQuad) - 1;
- case NeonTypeFlags::Float64:
- assert(!shift && "cannot shift float types!");
- return (1 << IsQuad) - 1;
- case NeonTypeFlags::BFloat16:
- assert(!shift && "cannot shift float types!");
- return (4 << IsQuad) - 1;
- }
- llvm_unreachable("Invalid NeonTypeFlag!");
-}
-
-/// getNeonEltType - Return the QualType corresponding to the elements of
-/// the vector type specified by the NeonTypeFlags. This is used to check
-/// the pointer arguments for Neon load/store intrinsics.
-static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
- bool IsPolyUnsigned, bool IsInt64Long) {
- switch (Flags.getEltType()) {
- case NeonTypeFlags::Int8:
- return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
- case NeonTypeFlags::Int16:
- return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
- case NeonTypeFlags::Int32:
- return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
- case NeonTypeFlags::Int64:
- if (IsInt64Long)
- return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
- else
- return Flags.isUnsigned() ? Context.UnsignedLongLongTy
- : Context.LongLongTy;
- case NeonTypeFlags::Poly8:
- return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
- case NeonTypeFlags::Poly16:
- return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
- case NeonTypeFlags::Poly64:
- if (IsInt64Long)
- return Context.UnsignedLongTy;
- else
- return Context.UnsignedLongLongTy;
- case NeonTypeFlags::Poly128:
- break;
- case NeonTypeFlags::Float16:
- return Context.HalfTy;
- case NeonTypeFlags::Float32:
- return Context.FloatTy;
- case NeonTypeFlags::Float64:
- return Context.DoubleTy;
- case NeonTypeFlags::BFloat16:
- return Context.BFloat16Ty;
- }
- llvm_unreachable("Invalid NeonTypeFlag!");
-}
-
-enum ArmStreamingType {
- ArmNonStreaming,
- ArmStreaming,
- ArmStreamingCompatible,
- ArmStreamingOrSVE2p1
-};
-
-enum ArmSMEState : unsigned {
- ArmNoState = 0,
-
- ArmInZA = 0b01,
- ArmOutZA = 0b10,
- ArmInOutZA = 0b11,
- ArmZAMask = 0b11,
-
- ArmInZT0 = 0b01 << 2,
- ArmOutZT0 = 0b10 << 2,
- ArmInOutZT0 = 0b11 << 2,
- ArmZT0Mask = 0b11 << 2
-};
-
-bool Sema::ParseSVEImmChecks(
- CallExpr *TheCall, SmallVector<std::tuple<int, int, int>, 3> &ImmChecks) {
- // Perform all the immediate checks for this builtin call.
- bool HasError = false;
- for (auto &I : ImmChecks) {
- int ArgNum, CheckTy, ElementSizeInBits;
- std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
-
- typedef bool (*OptionSetCheckFnTy)(int64_t Value);
-
- // Function that checks whether the operand (ArgNum) is an immediate
- // that is one of the predefined values.
- auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
- int ErrDiag) -> bool {
- // We can't check the value of a dependent argument.
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- return false;
-
- // Check constant-ness first.
- llvm::APSInt Imm;
- if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm))
- return true;
-
- if (!CheckImm(Imm.getSExtValue()))
- return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
- return false;
- };
-
- switch ((SVETypeFlags::ImmCheckType)CheckTy) {
- case SVETypeFlags::ImmCheck0_31:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_13:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck1_16:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_7:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck1_1:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck1_3:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 3))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck1_7:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 7))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckExtract:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
- (2048 / ElementSizeInBits) - 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckShiftRight:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckShiftRightNarrow:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1,
- ElementSizeInBits / 2))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckShiftLeft:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
- ElementSizeInBits - 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckLaneIndex:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
- (128 / (1 * ElementSizeInBits)) - 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckLaneIndexCompRotate:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
- (128 / (2 * ElementSizeInBits)) - 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckLaneIndexDot:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
- (128 / (4 * ElementSizeInBits)) - 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckComplexRot90_270:
- if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
- diag::err_rotation_argument_to_cadd))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckComplexRotAll90:
- if (CheckImmediateInSet(
- [](int64_t V) {
- return V == 0 || V == 90 || V == 180 || V == 270;
- },
- diag::err_rotation_argument_to_cmla))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_1:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_2:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_3:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_0:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 0))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_15:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 15))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_255:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 255))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck2_4_Mul2:
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 2, 4) ||
- SemaBuiltinConstantArgMultiple(TheCall, ArgNum, 2))
- HasError = true;
- break;
- }
- }
-
- return HasError;
-}
-
-static ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
- if (FD->hasAttr<ArmLocallyStreamingAttr>())
- return ArmStreaming;
- if (const auto *T = FD->getType()->getAs<FunctionProtoType>()) {
- if (T->getAArch64SMEAttributes() & FunctionType::SME_PStateSMEnabledMask)
- return ArmStreaming;
- if (T->getAArch64SMEAttributes() & FunctionType::SME_PStateSMCompatibleMask)
- return ArmStreamingCompatible;
- }
- return ArmNonStreaming;
-}
-
-static void checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
- const FunctionDecl *FD,
- ArmStreamingType BuiltinType) {
- ArmStreamingType FnType = getArmStreamingFnType(FD);
- if (BuiltinType == ArmStreamingOrSVE2p1) {
- // Check intrinsics that are available in [sve2p1 or sme/sme2].
- llvm::StringMap<bool> CallerFeatureMap;
- S.Context.getFunctionFeatureMap(CallerFeatureMap, FD);
- if (Builtin::evaluateRequiredTargetFeatures("sve2p1", CallerFeatureMap))
- BuiltinType = ArmStreamingCompatible;
- else
- BuiltinType = ArmStreaming;
- }
-
- if (FnType == ArmStreaming && BuiltinType == ArmNonStreaming) {
- S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
- << TheCall->getSourceRange() << "streaming";
- }
-
- if (FnType == ArmStreamingCompatible &&
- BuiltinType != ArmStreamingCompatible) {
- S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
- << TheCall->getSourceRange() << "streaming compatible";
- return;
- }
-
- if (FnType == ArmNonStreaming && BuiltinType == ArmStreaming) {
- S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
- << TheCall->getSourceRange() << "non-streaming";
- }
-}
-
-static bool hasArmZAState(const FunctionDecl *FD) {
- const auto *T = FD->getType()->getAs<FunctionProtoType>();
- return (T && FunctionType::getArmZAState(T->getAArch64SMEAttributes()) !=
- FunctionType::ARM_None) ||
- (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZA());
-}
-
-static bool hasArmZT0State(const FunctionDecl *FD) {
- const auto *T = FD->getType()->getAs<FunctionProtoType>();
- return (T && FunctionType::getArmZT0State(T->getAArch64SMEAttributes()) !=
- FunctionType::ARM_None) ||
- (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZT0());
-}
-
-static ArmSMEState getSMEState(unsigned BuiltinID) {
- switch (BuiltinID) {
- default:
- return ArmNoState;
-#define GET_SME_BUILTIN_GET_STATE
-#include "clang/Basic/arm_sme_builtins_za_state.inc"
-#undef GET_SME_BUILTIN_GET_STATE
- }
-}
-
-bool Sema::CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- if (const FunctionDecl *FD = getCurFunctionDecl()) {
- std::optional<ArmStreamingType> BuiltinType;
-
- switch (BuiltinID) {
-#define GET_SME_STREAMING_ATTRS
-#include "clang/Basic/arm_sme_streaming_attrs.inc"
-#undef GET_SME_STREAMING_ATTRS
- }
-
- if (BuiltinType)
- checkArmStreamingBuiltin(*this, TheCall, FD, *BuiltinType);
-
- if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
- Diag(TheCall->getBeginLoc(),
- diag::warn_attribute_arm_za_builtin_no_za_state)
- << TheCall->getSourceRange();
-
- if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
- Diag(TheCall->getBeginLoc(),
- diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
- << TheCall->getSourceRange();
- }
-
- // Range check SME intrinsics that take immediate values.
- SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
-
- switch (BuiltinID) {
- default:
- return false;
-#define GET_SME_IMMEDIATE_CHECK
-#include "clang/Basic/arm_sme_sema_rangechecks.inc"
-#undef GET_SME_IMMEDIATE_CHECK
- }
-
- return ParseSVEImmChecks(TheCall, ImmChecks);
-}
-
-bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- if (const FunctionDecl *FD = getCurFunctionDecl()) {
- std::optional<ArmStreamingType> BuiltinType;
-
- switch (BuiltinID) {
-#define GET_SVE_STREAMING_ATTRS
-#include "clang/Basic/arm_sve_streaming_attrs.inc"
-#undef GET_SVE_STREAMING_ATTRS
- }
- if (BuiltinType)
- checkArmStreamingBuiltin(*this, TheCall, FD, *BuiltinType);
- }
- // Range check SVE intrinsics that take immediate values.
- SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
-
- switch (BuiltinID) {
- default:
- return false;
-#define GET_SVE_IMMEDIATE_CHECK
-#include "clang/Basic/arm_sve_sema_rangechecks.inc"
-#undef GET_SVE_IMMEDIATE_CHECK
- }
-
- return ParseSVEImmChecks(TheCall, ImmChecks);
-}
-
-bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID, CallExpr *TheCall) {
- if (const FunctionDecl *FD = getCurFunctionDecl()) {
-
- switch (BuiltinID) {
- default:
- break;
-#define GET_NEON_BUILTINS
-#define TARGET_BUILTIN(id, ...) case NEON::BI##id:
-#define BUILTIN(id, ...) case NEON::BI##id:
-#include "clang/Basic/arm_neon.inc"
- checkArmStreamingBuiltin(*this, TheCall, FD, ArmNonStreaming);
- break;
-#undef TARGET_BUILTIN
-#undef BUILTIN
-#undef GET_NEON_BUILTINS
- }
- }
-
- llvm::APSInt Result;
- uint64_t mask = 0;
- unsigned TV = 0;
- int PtrArgNum = -1;
- bool HasConstPtr = false;
- switch (BuiltinID) {
-#define GET_NEON_OVERLOAD_CHECK
-#include "clang/Basic/arm_neon.inc"
-#include "clang/Basic/arm_fp16.inc"
-#undef GET_NEON_OVERLOAD_CHECK
- }
-
- // For NEON intrinsics which are overloaded on vector element type, validate
- // the immediate which specifies which variant to emit.
- unsigned ImmArg = TheCall->getNumArgs()-1;
- if (mask) {
- if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
- return true;
-
- TV = Result.getLimitedValue(64);
- if ((TV > 63) || (mask & (1ULL << TV)) == 0)
- return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
- << TheCall->getArg(ImmArg)->getSourceRange();
- }
-
- if (PtrArgNum >= 0) {
- // Check that pointer arguments have the specified type.
- Expr *Arg = TheCall->getArg(PtrArgNum);
- if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
- Arg = ICE->getSubExpr();
- ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
- QualType RHSTy = RHS.get()->getType();
-
- llvm::Triple::ArchType Arch = TI.getTriple().getArch();
- bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
- Arch == llvm::Triple::aarch64_32 ||
- Arch == llvm::Triple::aarch64_be;
- bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
- QualType EltTy =
- getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
- if (HasConstPtr)
- EltTy = EltTy.withConst();
- QualType LHSTy = Context.getPointerType(EltTy);
- AssignConvertType ConvTy;
- ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
- if (RHS.isInvalid())
- return true;
- if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
- RHS.get(), AA_Assigning))
- return true;
- }
-
- // For NEON intrinsics which take an immediate value as part of the
- // instruction, range check them here.
- unsigned i = 0, l = 0, u = 0;
- switch (BuiltinID) {
- default:
- return false;
- #define GET_NEON_IMMEDIATE_CHECK
- #include "clang/Basic/arm_neon.inc"
- #include "clang/Basic/arm_fp16.inc"
- #undef GET_NEON_IMMEDIATE_CHECK
- }
-
- return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
-}
-
-bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- switch (BuiltinID) {
- default:
- return false;
- #include "clang/Basic/arm_mve_builtin_sema.inc"
- }
-}
-
-bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
- bool Err = false;
- switch (BuiltinID) {
- default:
- return false;
-#include "clang/Basic/arm_cde_builtin_sema.inc"
- }
-
- if (Err)
- return true;
-
- return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
-}
-
-bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
- const Expr *CoprocArg, bool WantCDE) {
- if (isConstantEvaluatedContext())
- return false;
-
- // We can't check the value of a dependent argument.
- if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
- return false;
-
- llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
- int64_t CoprocNo = CoprocNoAP.getExtValue();
- assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
-
- uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
- bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
-
- if (IsCDECoproc != WantCDE)
- return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
- << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
-
- return false;
-}
-
-bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
- unsigned MaxWidth) {
- assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex ||
- BuiltinID == ARM::BI__builtin_arm_strex ||
- BuiltinID == ARM::BI__builtin_arm_stlex ||
- BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex ||
- BuiltinID == AArch64::BI__builtin_arm_strex ||
- BuiltinID == AArch64::BI__builtin_arm_stlex) &&
- "unexpected ARM builtin");
- bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex ||
- BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex;
-
- DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
-
- // Ensure that we have the proper number of arguments.
- if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
- return true;
-
- // Inspect the pointer argument of the atomic builtin. This should always be
- // a pointer type, whose element is an integral scalar or pointer type.
- // Because it is a pointer type, we don't have to worry about any implicit
- // casts here.
- Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
- ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
- if (PointerArgRes.isInvalid())
- return true;
- PointerArg = PointerArgRes.get();
-
- const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
- if (!pointerType) {
- Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
- // task is to insert the appropriate casts into the AST. First work out just
- // what the appropriate type is.
- QualType ValType = pointerType->getPointeeType();
- QualType AddrType = ValType.getUnqualifiedType().withVolatile();
- if (IsLdrex)
- AddrType.addConst();
-
- // Issue a warning if the cast is dodgy.
- CastKind CastNeeded = CK_NoOp;
- if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
- CastNeeded = CK_BitCast;
- Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
- << PointerArg->getType() << Context.getPointerType(AddrType)
- << AA_Passing << PointerArg->getSourceRange();
- }
-
- // Finally, do the cast and replace the argument with the corrected version.
- AddrType = Context.getPointerType(AddrType);
- PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
- if (PointerArgRes.isInvalid())
- return true;
- PointerArg = PointerArgRes.get();
-
- TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
-
- // In general, we allow ints, floats and pointers to be loaded and stored.
- if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
- !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
- Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- // But ARM doesn't have instructions to deal with 128-bit versions.
- if (Context.getTypeSize(ValType) > MaxWidth) {
- assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
- Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- switch (ValType.getObjCLifetime()) {
- case Qualifiers::OCL_None:
- case Qualifiers::OCL_ExplicitNone:
- // okay
- break;
-
- case Qualifiers::OCL_Weak:
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Autoreleasing:
- Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
- << ValType << PointerArg->getSourceRange();
- return true;
- }
-
- if (IsLdrex) {
- TheCall->setType(ValType);
- return false;
- }
-
- // Initialize the argument to be stored.
- ExprResult ValArg = TheCall->getArg(0);
- InitializedEntity Entity = InitializedEntity::InitializeParameter(
- Context, ValType, /*consume*/ false);
- ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
- if (ValArg.isInvalid())
- return true;
- TheCall->setArg(0, ValArg.get());
-
- // __builtin_arm_strex always returns an int. It's marked as such in the .def,
- // but the custom checker bypasses all default analysis.
- TheCall->setType(Context.IntTy);
- return false;
-}
-
-bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
- if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex ||
- BuiltinID == ARM::BI__builtin_arm_strex ||
- BuiltinID == ARM::BI__builtin_arm_stlex) {
- return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
- }
-
- if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
- }
-
- if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_wsr64)
- return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
-
- if (BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsrp ||
- BuiltinID == ARM::BI__builtin_arm_wsr ||
- BuiltinID == ARM::BI__builtin_arm_wsrp)
- return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
-
- if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
- return true;
- if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
- return true;
- if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
- return true;
-
- // For intrinsics which take an immediate value as part of the instruction,
- // range check them here.
- // FIXME: VFP Intrinsics should error if VFP not present.
- switch (BuiltinID) {
- default: return false;
- case ARM::BI__builtin_arm_ssat:
- return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
- case ARM::BI__builtin_arm_usat:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
- case ARM::BI__builtin_arm_ssat16:
- return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
- case ARM::BI__builtin_arm_usat16:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
- case ARM::BI__builtin_arm_vcvtr_f:
- case ARM::BI__builtin_arm_vcvtr_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
- case ARM::BI__builtin_arm_dmb:
- case ARM::BI__builtin_arm_dsb:
- case ARM::BI__builtin_arm_isb:
- case ARM::BI__builtin_arm_dbg:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
- case ARM::BI__builtin_arm_cdp:
- case ARM::BI__builtin_arm_cdp2:
- case ARM::BI__builtin_arm_mcr:
- case ARM::BI__builtin_arm_mcr2:
- case ARM::BI__builtin_arm_mrc:
- case ARM::BI__builtin_arm_mrc2:
- case ARM::BI__builtin_arm_mcrr:
- case ARM::BI__builtin_arm_mcrr2:
- case ARM::BI__builtin_arm_mrrc:
- case ARM::BI__builtin_arm_mrrc2:
- case ARM::BI__builtin_arm_ldc:
- case ARM::BI__builtin_arm_ldcl:
- case ARM::BI__builtin_arm_ldc2:
- case ARM::BI__builtin_arm_ldc2l:
- case ARM::BI__builtin_arm_stc:
- case ARM::BI__builtin_arm_stcl:
- case ARM::BI__builtin_arm_stc2:
- case ARM::BI__builtin_arm_stc2l:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) ||
- CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
- /*WantCDE*/ false);
- }
-}
-
-bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
- if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex ||
- BuiltinID == AArch64::BI__builtin_arm_strex ||
- BuiltinID == AArch64::BI__builtin_arm_stlex) {
- return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 3) ||
- SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
- SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
- BuiltinID == AArch64::BI__builtin_arm_wsr128)
- return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
-
- // Memory Tagging Extensions (MTE) Intrinsics
- if (BuiltinID == AArch64::BI__builtin_arm_irg ||
- BuiltinID == AArch64::BI__builtin_arm_addg ||
- BuiltinID == AArch64::BI__builtin_arm_gmi ||
- BuiltinID == AArch64::BI__builtin_arm_ldg ||
- BuiltinID == AArch64::BI__builtin_arm_stg ||
- BuiltinID == AArch64::BI__builtin_arm_subp) {
- return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp ||
- BuiltinID == AArch64::BI__builtin_arm_wsr ||
- BuiltinID == AArch64::BI__builtin_arm_wsrp)
- return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
-
- // Only check the valid encoding range. Any constant in this range would be
- // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
- // an exception for incorrect registers. This matches MSVC behavior.
- if (BuiltinID == AArch64::BI_ReadStatusReg ||
- BuiltinID == AArch64::BI_WriteStatusReg)
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
-
- if (BuiltinID == AArch64::BI__getReg)
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
-
- if (BuiltinID == AArch64::BI__break)
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff);
-
- if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
- return true;
-
- if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
- return true;
-
- if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
- return true;
-
- // For intrinsics which take an immediate value as part of the instruction,
- // range check them here.
- unsigned i = 0, l = 0, u = 0;
- switch (BuiltinID) {
- default: return false;
- case AArch64::BI__builtin_arm_dmb:
- case AArch64::BI__builtin_arm_dsb:
- case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
- case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
- }
-
- return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
-}
-
-static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) {
- if (Arg->getType()->getAsPlaceholderType())
- return false;
-
- // The first argument needs to be a record field access.
- // If it is an array element access, we delay decision
- // to BPF backend to check whether the access is a
- // field access or not.
- return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
- isa<MemberExpr>(Arg->IgnoreParens()) ||
- isa<ArraySubscriptExpr>(Arg->IgnoreParens()));
-}
-
-static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
- QualType ArgType = Arg->getType();
- if (ArgType->getAsPlaceholderType())
- return false;
-
- // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type
- // format:
- // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
- // 2. <type> var;
- // __builtin_preserve_type_info(var, flag);
- if (!isa<DeclRefExpr>(Arg->IgnoreParens()) &&
- !isa<UnaryOperator>(Arg->IgnoreParens()))
- return false;
-
- // Typedef type.
- if (ArgType->getAs<TypedefType>())
- return true;
-
- // Record type or Enum type.
- const Type *Ty = ArgType->getUnqualifiedDesugaredType();
- if (const auto *RT = Ty->getAs<RecordType>()) {
- if (!RT->getDecl()->getDeclName().isEmpty())
- return true;
- } else if (const auto *ET = Ty->getAs<EnumType>()) {
- if (!ET->getDecl()->getDeclName().isEmpty())
- return true;
- }
-
- return false;
-}
-
-static bool isValidBPFPreserveEnumValueArg(Expr *Arg) {
- QualType ArgType = Arg->getType();
- if (ArgType->getAsPlaceholderType())
- return false;
-
- // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
- // format:
- // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
- // flag);
- const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens());
- if (!UO)
- return false;
-
- const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
- if (!CE)
- return false;
- if (CE->getCastKind() != CK_IntegralToPointer &&
- CE->getCastKind() != CK_NullToPointer)
- return false;
-
- // The integer must be from an EnumConstantDecl.
- const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
- if (!DR)
- return false;
-
- const EnumConstantDecl *Enumerator =
- dyn_cast<EnumConstantDecl>(DR->getDecl());
- if (!Enumerator)
- return false;
-
- // The type must be EnumType.
- const Type *Ty = ArgType->getUnqualifiedDesugaredType();
- const auto *ET = Ty->getAs<EnumType>();
- if (!ET)
- return false;
-
- // The enum value must be supported.
- return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator);
-}
-
-bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
- assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
- BuiltinID == BPF::BI__builtin_btf_type_id ||
- BuiltinID == BPF::BI__builtin_preserve_type_info ||
- BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
- "unexpected BPF builtin");
-
- if (checkArgCount(*this, TheCall, 2))
- return true;
-
- // The second argument needs to be a constant int
- Expr *Arg = TheCall->getArg(1);
- std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context);
- diag::kind kind;
- if (!Value) {
- if (BuiltinID == BPF::BI__builtin_preserve_field_info)
- kind = diag::err_preserve_field_info_not_const;
- else if (BuiltinID == BPF::BI__builtin_btf_type_id)
- kind = diag::err_btf_type_id_not_const;
- else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
- kind = diag::err_preserve_type_info_not_const;
- else
- kind = diag::err_preserve_enum_value_not_const;
- Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
- return true;
- }
-
- // The first argument
- Arg = TheCall->getArg(0);
- bool InvalidArg = false;
- bool ReturnUnsignedInt = true;
- if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
- if (!isValidBPFPreserveFieldInfoArg(Arg)) {
- InvalidArg = true;
- kind = diag::err_preserve_field_info_not_field;
- }
- } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
- if (!isValidBPFPreserveTypeInfoArg(Arg)) {
- InvalidArg = true;
- kind = diag::err_preserve_type_info_invalid;
- }
- } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
- if (!isValidBPFPreserveEnumValueArg(Arg)) {
- InvalidArg = true;
- kind = diag::err_preserve_enum_value_invalid;
- }
- ReturnUnsignedInt = false;
- } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
- ReturnUnsignedInt = false;
- }
-
- if (InvalidArg) {
- Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
- return true;
- }
-
- if (ReturnUnsignedInt)
- TheCall->setType(Context.UnsignedIntTy);
- else
- TheCall->setType(Context.UnsignedLongTy);
- return false;
-}
-
-bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
- struct ArgInfo {
- uint8_t OpNum;
- bool IsSigned;
- uint8_t BitWidth;
- uint8_t Align;
- };
- struct BuiltinInfo {
- unsigned BuiltinID;
- ArgInfo Infos[2];
- };
-
- static BuiltinInfo Infos[] = {
- { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
- { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
- { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
- { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
- { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
- { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
- { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
- { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
- { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
- { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
- { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
-
- { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
- { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
- { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
- { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
-
- { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
- {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
- {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
- { 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
- { 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
- { 3, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
- { 3, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
- {{ 2, false, 4, 0 },
- { 3, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
- {{ 2, false, 4, 0 },
- { 3, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
- {{ 2, false, 4, 0 },
- { 3, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
- {{ 2, false, 4, 0 },
- { 3, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
- { 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
- { 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
- {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
- {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
- {{ 3, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
- {{ 3, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
- {{ 3, false, 1, 0 }} },
-
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, {{ 2, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B,
- {{ 2, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx,
- {{ 3, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B,
- {{ 3, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, {{ 2, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B,
- {{ 2, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx,
- {{ 3, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B,
- {{ 3, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {{ 3, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B,
- {{ 3, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {{ 3, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B,
- {{ 3, false, 3, 0 }} },
- };
-
- // Use a dynamically initialized static to sort the table exactly once on
- // first run.
- static const bool SortOnce =
- (llvm::sort(Infos,
- [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
- return LHS.BuiltinID < RHS.BuiltinID;
- }),
- true);
- (void)SortOnce;
-
- const BuiltinInfo *F = llvm::partition_point(
- Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
- if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
- return false;
-
- bool Error = false;
-
- for (const ArgInfo &A : F->Infos) {
- // Ignore empty ArgInfo elements.
- if (A.BitWidth == 0)
- continue;
-
- int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
- int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
- if (!A.Align) {
- Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
- } else {
- unsigned M = 1 << A.Align;
- Min *= M;
- Max *= M;
- Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
- Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
- }
- }
- return Error;
-}
-
-bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
- return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
-}
-
-bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
- switch (BuiltinID) {
- default:
- break;
- // Basic intrinsics.
- case LoongArch::BI__builtin_loongarch_cacop_d:
- case LoongArch::BI__builtin_loongarch_cacop_w: {
- SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5));
- SemaBuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12),
- llvm::maxIntN(12));
- break;
- }
- case LoongArch::BI__builtin_loongarch_break:
- case LoongArch::BI__builtin_loongarch_dbar:
- case LoongArch::BI__builtin_loongarch_ibar:
- case LoongArch::BI__builtin_loongarch_syscall:
- // Check if immediate is in [0, 32767].
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 32767);
- case LoongArch::BI__builtin_loongarch_csrrd_w:
- case LoongArch::BI__builtin_loongarch_csrrd_d:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383);
- case LoongArch::BI__builtin_loongarch_csrwr_w:
- case LoongArch::BI__builtin_loongarch_csrwr_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383);
- case LoongArch::BI__builtin_loongarch_csrxchg_w:
- case LoongArch::BI__builtin_loongarch_csrxchg_d:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383);
- case LoongArch::BI__builtin_loongarch_lddir_d:
- case LoongArch::BI__builtin_loongarch_ldpte_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
- case LoongArch::BI__builtin_loongarch_movfcsr2gr:
- case LoongArch::BI__builtin_loongarch_movgr2fcsr:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(2));
-
- // LSX intrinsics.
- case LoongArch::BI__builtin_lsx_vbitclri_b:
- case LoongArch::BI__builtin_lsx_vbitrevi_b:
- case LoongArch::BI__builtin_lsx_vbitseti_b:
- case LoongArch::BI__builtin_lsx_vsat_b:
- case LoongArch::BI__builtin_lsx_vsat_bu:
- case LoongArch::BI__builtin_lsx_vslli_b:
- case LoongArch::BI__builtin_lsx_vsrai_b:
- case LoongArch::BI__builtin_lsx_vsrari_b:
- case LoongArch::BI__builtin_lsx_vsrli_b:
- case LoongArch::BI__builtin_lsx_vsllwil_h_b:
- case LoongArch::BI__builtin_lsx_vsllwil_hu_bu:
- case LoongArch::BI__builtin_lsx_vrotri_b:
- case LoongArch::BI__builtin_lsx_vsrlri_b:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
- case LoongArch::BI__builtin_lsx_vbitclri_h:
- case LoongArch::BI__builtin_lsx_vbitrevi_h:
- case LoongArch::BI__builtin_lsx_vbitseti_h:
- case LoongArch::BI__builtin_lsx_vsat_h:
- case LoongArch::BI__builtin_lsx_vsat_hu:
- case LoongArch::BI__builtin_lsx_vslli_h:
- case LoongArch::BI__builtin_lsx_vsrai_h:
- case LoongArch::BI__builtin_lsx_vsrari_h:
- case LoongArch::BI__builtin_lsx_vsrli_h:
- case LoongArch::BI__builtin_lsx_vsllwil_w_h:
- case LoongArch::BI__builtin_lsx_vsllwil_wu_hu:
- case LoongArch::BI__builtin_lsx_vrotri_h:
- case LoongArch::BI__builtin_lsx_vsrlri_h:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
- case LoongArch::BI__builtin_lsx_vssrarni_b_h:
- case LoongArch::BI__builtin_lsx_vssrarni_bu_h:
- case LoongArch::BI__builtin_lsx_vssrani_b_h:
- case LoongArch::BI__builtin_lsx_vssrani_bu_h:
- case LoongArch::BI__builtin_lsx_vsrarni_b_h:
- case LoongArch::BI__builtin_lsx_vsrlni_b_h:
- case LoongArch::BI__builtin_lsx_vsrlrni_b_h:
- case LoongArch::BI__builtin_lsx_vssrlni_b_h:
- case LoongArch::BI__builtin_lsx_vssrlni_bu_h:
- case LoongArch::BI__builtin_lsx_vssrlrni_b_h:
- case LoongArch::BI__builtin_lsx_vssrlrni_bu_h:
- case LoongArch::BI__builtin_lsx_vsrani_b_h:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
- case LoongArch::BI__builtin_lsx_vslei_bu:
- case LoongArch::BI__builtin_lsx_vslei_hu:
- case LoongArch::BI__builtin_lsx_vslei_wu:
- case LoongArch::BI__builtin_lsx_vslei_du:
- case LoongArch::BI__builtin_lsx_vslti_bu:
- case LoongArch::BI__builtin_lsx_vslti_hu:
- case LoongArch::BI__builtin_lsx_vslti_wu:
- case LoongArch::BI__builtin_lsx_vslti_du:
- case LoongArch::BI__builtin_lsx_vmaxi_bu:
- case LoongArch::BI__builtin_lsx_vmaxi_hu:
- case LoongArch::BI__builtin_lsx_vmaxi_wu:
- case LoongArch::BI__builtin_lsx_vmaxi_du:
- case LoongArch::BI__builtin_lsx_vmini_bu:
- case LoongArch::BI__builtin_lsx_vmini_hu:
- case LoongArch::BI__builtin_lsx_vmini_wu:
- case LoongArch::BI__builtin_lsx_vmini_du:
- case LoongArch::BI__builtin_lsx_vaddi_bu:
- case LoongArch::BI__builtin_lsx_vaddi_hu:
- case LoongArch::BI__builtin_lsx_vaddi_wu:
- case LoongArch::BI__builtin_lsx_vaddi_du:
- case LoongArch::BI__builtin_lsx_vbitclri_w:
- case LoongArch::BI__builtin_lsx_vbitrevi_w:
- case LoongArch::BI__builtin_lsx_vbitseti_w:
- case LoongArch::BI__builtin_lsx_vsat_w:
- case LoongArch::BI__builtin_lsx_vsat_wu:
- case LoongArch::BI__builtin_lsx_vslli_w:
- case LoongArch::BI__builtin_lsx_vsrai_w:
- case LoongArch::BI__builtin_lsx_vsrari_w:
- case LoongArch::BI__builtin_lsx_vsrli_w:
- case LoongArch::BI__builtin_lsx_vsllwil_d_w:
- case LoongArch::BI__builtin_lsx_vsllwil_du_wu:
- case LoongArch::BI__builtin_lsx_vsrlri_w:
- case LoongArch::BI__builtin_lsx_vrotri_w:
- case LoongArch::BI__builtin_lsx_vsubi_bu:
- case LoongArch::BI__builtin_lsx_vsubi_hu:
- case LoongArch::BI__builtin_lsx_vbsrl_v:
- case LoongArch::BI__builtin_lsx_vbsll_v:
- case LoongArch::BI__builtin_lsx_vsubi_wu:
- case LoongArch::BI__builtin_lsx_vsubi_du:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
- case LoongArch::BI__builtin_lsx_vssrarni_h_w:
- case LoongArch::BI__builtin_lsx_vssrarni_hu_w:
- case LoongArch::BI__builtin_lsx_vssrani_h_w:
- case LoongArch::BI__builtin_lsx_vssrani_hu_w:
- case LoongArch::BI__builtin_lsx_vsrarni_h_w:
- case LoongArch::BI__builtin_lsx_vsrani_h_w:
- case LoongArch::BI__builtin_lsx_vfrstpi_b:
- case LoongArch::BI__builtin_lsx_vfrstpi_h:
- case LoongArch::BI__builtin_lsx_vsrlni_h_w:
- case LoongArch::BI__builtin_lsx_vsrlrni_h_w:
- case LoongArch::BI__builtin_lsx_vssrlni_h_w:
- case LoongArch::BI__builtin_lsx_vssrlni_hu_w:
- case LoongArch::BI__builtin_lsx_vssrlrni_h_w:
- case LoongArch::BI__builtin_lsx_vssrlrni_hu_w:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
- case LoongArch::BI__builtin_lsx_vbitclri_d:
- case LoongArch::BI__builtin_lsx_vbitrevi_d:
- case LoongArch::BI__builtin_lsx_vbitseti_d:
- case LoongArch::BI__builtin_lsx_vsat_d:
- case LoongArch::BI__builtin_lsx_vsat_du:
- case LoongArch::BI__builtin_lsx_vslli_d:
- case LoongArch::BI__builtin_lsx_vsrai_d:
- case LoongArch::BI__builtin_lsx_vsrli_d:
- case LoongArch::BI__builtin_lsx_vsrari_d:
- case LoongArch::BI__builtin_lsx_vrotri_d:
- case LoongArch::BI__builtin_lsx_vsrlri_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63);
- case LoongArch::BI__builtin_lsx_vssrarni_w_d:
- case LoongArch::BI__builtin_lsx_vssrarni_wu_d:
- case LoongArch::BI__builtin_lsx_vssrani_w_d:
- case LoongArch::BI__builtin_lsx_vssrani_wu_d:
- case LoongArch::BI__builtin_lsx_vsrarni_w_d:
- case LoongArch::BI__builtin_lsx_vsrlni_w_d:
- case LoongArch::BI__builtin_lsx_vsrlrni_w_d:
- case LoongArch::BI__builtin_lsx_vssrlni_w_d:
- case LoongArch::BI__builtin_lsx_vssrlni_wu_d:
- case LoongArch::BI__builtin_lsx_vssrlrni_w_d:
- case LoongArch::BI__builtin_lsx_vssrlrni_wu_d:
- case LoongArch::BI__builtin_lsx_vsrani_w_d:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 63);
- case LoongArch::BI__builtin_lsx_vssrarni_d_q:
- case LoongArch::BI__builtin_lsx_vssrarni_du_q:
- case LoongArch::BI__builtin_lsx_vssrani_d_q:
- case LoongArch::BI__builtin_lsx_vssrani_du_q:
- case LoongArch::BI__builtin_lsx_vsrarni_d_q:
- case LoongArch::BI__builtin_lsx_vssrlni_d_q:
- case LoongArch::BI__builtin_lsx_vssrlni_du_q:
- case LoongArch::BI__builtin_lsx_vssrlrni_d_q:
- case LoongArch::BI__builtin_lsx_vssrlrni_du_q:
- case LoongArch::BI__builtin_lsx_vsrani_d_q:
- case LoongArch::BI__builtin_lsx_vsrlrni_d_q:
- case LoongArch::BI__builtin_lsx_vsrlni_d_q:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 127);
- case LoongArch::BI__builtin_lsx_vseqi_b:
- case LoongArch::BI__builtin_lsx_vseqi_h:
- case LoongArch::BI__builtin_lsx_vseqi_w:
- case LoongArch::BI__builtin_lsx_vseqi_d:
- case LoongArch::BI__builtin_lsx_vslti_b:
- case LoongArch::BI__builtin_lsx_vslti_h:
- case LoongArch::BI__builtin_lsx_vslti_w:
- case LoongArch::BI__builtin_lsx_vslti_d:
- case LoongArch::BI__builtin_lsx_vslei_b:
- case LoongArch::BI__builtin_lsx_vslei_h:
- case LoongArch::BI__builtin_lsx_vslei_w:
- case LoongArch::BI__builtin_lsx_vslei_d:
- case LoongArch::BI__builtin_lsx_vmaxi_b:
- case LoongArch::BI__builtin_lsx_vmaxi_h:
- case LoongArch::BI__builtin_lsx_vmaxi_w:
- case LoongArch::BI__builtin_lsx_vmaxi_d:
- case LoongArch::BI__builtin_lsx_vmini_b:
- case LoongArch::BI__builtin_lsx_vmini_h:
- case LoongArch::BI__builtin_lsx_vmini_w:
- case LoongArch::BI__builtin_lsx_vmini_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, -16, 15);
- case LoongArch::BI__builtin_lsx_vandi_b:
- case LoongArch::BI__builtin_lsx_vnori_b:
- case LoongArch::BI__builtin_lsx_vori_b:
- case LoongArch::BI__builtin_lsx_vshuf4i_b:
- case LoongArch::BI__builtin_lsx_vshuf4i_h:
- case LoongArch::BI__builtin_lsx_vshuf4i_w:
- case LoongArch::BI__builtin_lsx_vxori_b:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 255);
- case LoongArch::BI__builtin_lsx_vbitseli_b:
- case LoongArch::BI__builtin_lsx_vshuf4i_d:
- case LoongArch::BI__builtin_lsx_vextrins_b:
- case LoongArch::BI__builtin_lsx_vextrins_h:
- case LoongArch::BI__builtin_lsx_vextrins_w:
- case LoongArch::BI__builtin_lsx_vextrins_d:
- case LoongArch::BI__builtin_lsx_vpermi_w:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 255);
- case LoongArch::BI__builtin_lsx_vpickve2gr_b:
- case LoongArch::BI__builtin_lsx_vpickve2gr_bu:
- case LoongArch::BI__builtin_lsx_vreplvei_b:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
- case LoongArch::BI__builtin_lsx_vinsgr2vr_b:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
- case LoongArch::BI__builtin_lsx_vpickve2gr_h:
- case LoongArch::BI__builtin_lsx_vpickve2gr_hu:
- case LoongArch::BI__builtin_lsx_vreplvei_h:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
- case LoongArch::BI__builtin_lsx_vinsgr2vr_h:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
- case LoongArch::BI__builtin_lsx_vpickve2gr_w:
- case LoongArch::BI__builtin_lsx_vpickve2gr_wu:
- case LoongArch::BI__builtin_lsx_vreplvei_w:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
- case LoongArch::BI__builtin_lsx_vinsgr2vr_w:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
- case LoongArch::BI__builtin_lsx_vpickve2gr_d:
- case LoongArch::BI__builtin_lsx_vpickve2gr_du:
- case LoongArch::BI__builtin_lsx_vreplvei_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
- case LoongArch::BI__builtin_lsx_vinsgr2vr_d:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
- case LoongArch::BI__builtin_lsx_vstelm_b:
- return SemaBuiltinConstantArgRange(TheCall, 2, -128, 127) ||
- SemaBuiltinConstantArgRange(TheCall, 3, 0, 15);
- case LoongArch::BI__builtin_lsx_vstelm_h:
- return SemaBuiltinConstantArgRange(TheCall, 2, -256, 254) ||
- SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
- case LoongArch::BI__builtin_lsx_vstelm_w:
- return SemaBuiltinConstantArgRange(TheCall, 2, -512, 508) ||
- SemaBuiltinConstantArgRange(TheCall, 3, 0, 3);
- case LoongArch::BI__builtin_lsx_vstelm_d:
- return SemaBuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
- SemaBuiltinConstantArgRange(TheCall, 3, 0, 1);
- case LoongArch::BI__builtin_lsx_vldrepl_b:
- case LoongArch::BI__builtin_lsx_vld:
- return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2047);
- case LoongArch::BI__builtin_lsx_vldrepl_h:
- return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2046);
- case LoongArch::BI__builtin_lsx_vldrepl_w:
- return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2044);
- case LoongArch::BI__builtin_lsx_vldrepl_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2040);
- case LoongArch::BI__builtin_lsx_vst:
- return SemaBuiltinConstantArgRange(TheCall, 2, -2048, 2047);
- case LoongArch::BI__builtin_lsx_vldi:
- return SemaBuiltinConstantArgRange(TheCall, 0, -4096, 4095);
- case LoongArch::BI__builtin_lsx_vrepli_b:
- case LoongArch::BI__builtin_lsx_vrepli_h:
- case LoongArch::BI__builtin_lsx_vrepli_w:
- case LoongArch::BI__builtin_lsx_vrepli_d:
- return SemaBuiltinConstantArgRange(TheCall, 0, -512, 511);
-
- // LASX intrinsics.
- case LoongArch::BI__builtin_lasx_xvbitclri_b:
- case LoongArch::BI__builtin_lasx_xvbitrevi_b:
- case LoongArch::BI__builtin_lasx_xvbitseti_b:
- case LoongArch::BI__builtin_lasx_xvsat_b:
- case LoongArch::BI__builtin_lasx_xvsat_bu:
- case LoongArch::BI__builtin_lasx_xvslli_b:
- case LoongArch::BI__builtin_lasx_xvsrai_b:
- case LoongArch::BI__builtin_lasx_xvsrari_b:
- case LoongArch::BI__builtin_lasx_xvsrli_b:
- case LoongArch::BI__builtin_lasx_xvsllwil_h_b:
- case LoongArch::BI__builtin_lasx_xvsllwil_hu_bu:
- case LoongArch::BI__builtin_lasx_xvrotri_b:
- case LoongArch::BI__builtin_lasx_xvsrlri_b:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
- case LoongArch::BI__builtin_lasx_xvbitclri_h:
- case LoongArch::BI__builtin_lasx_xvbitrevi_h:
- case LoongArch::BI__builtin_lasx_xvbitseti_h:
- case LoongArch::BI__builtin_lasx_xvsat_h:
- case LoongArch::BI__builtin_lasx_xvsat_hu:
- case LoongArch::BI__builtin_lasx_xvslli_h:
- case LoongArch::BI__builtin_lasx_xvsrai_h:
- case LoongArch::BI__builtin_lasx_xvsrari_h:
- case LoongArch::BI__builtin_lasx_xvsrli_h:
- case LoongArch::BI__builtin_lasx_xvsllwil_w_h:
- case LoongArch::BI__builtin_lasx_xvsllwil_wu_hu:
- case LoongArch::BI__builtin_lasx_xvrotri_h:
- case LoongArch::BI__builtin_lasx_xvsrlri_h:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
- case LoongArch::BI__builtin_lasx_xvssrarni_b_h:
- case LoongArch::BI__builtin_lasx_xvssrarni_bu_h:
- case LoongArch::BI__builtin_lasx_xvssrani_b_h:
- case LoongArch::BI__builtin_lasx_xvssrani_bu_h:
- case LoongArch::BI__builtin_lasx_xvsrarni_b_h:
- case LoongArch::BI__builtin_lasx_xvsrlni_b_h:
- case LoongArch::BI__builtin_lasx_xvsrlrni_b_h:
- case LoongArch::BI__builtin_lasx_xvssrlni_b_h:
- case LoongArch::BI__builtin_lasx_xvssrlni_bu_h:
- case LoongArch::BI__builtin_lasx_xvssrlrni_b_h:
- case LoongArch::BI__builtin_lasx_xvssrlrni_bu_h:
- case LoongArch::BI__builtin_lasx_xvsrani_b_h:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
- case LoongArch::BI__builtin_lasx_xvslei_bu:
- case LoongArch::BI__builtin_lasx_xvslei_hu:
- case LoongArch::BI__builtin_lasx_xvslei_wu:
- case LoongArch::BI__builtin_lasx_xvslei_du:
- case LoongArch::BI__builtin_lasx_xvslti_bu:
- case LoongArch::BI__builtin_lasx_xvslti_hu:
- case LoongArch::BI__builtin_lasx_xvslti_wu:
- case LoongArch::BI__builtin_lasx_xvslti_du:
- case LoongArch::BI__builtin_lasx_xvmaxi_bu:
- case LoongArch::BI__builtin_lasx_xvmaxi_hu:
- case LoongArch::BI__builtin_lasx_xvmaxi_wu:
- case LoongArch::BI__builtin_lasx_xvmaxi_du:
- case LoongArch::BI__builtin_lasx_xvmini_bu:
- case LoongArch::BI__builtin_lasx_xvmini_hu:
- case LoongArch::BI__builtin_lasx_xvmini_wu:
- case LoongArch::BI__builtin_lasx_xvmini_du:
- case LoongArch::BI__builtin_lasx_xvaddi_bu:
- case LoongArch::BI__builtin_lasx_xvaddi_hu:
- case LoongArch::BI__builtin_lasx_xvaddi_wu:
- case LoongArch::BI__builtin_lasx_xvaddi_du:
- case LoongArch::BI__builtin_lasx_xvbitclri_w:
- case LoongArch::BI__builtin_lasx_xvbitrevi_w:
- case LoongArch::BI__builtin_lasx_xvbitseti_w:
- case LoongArch::BI__builtin_lasx_xvsat_w:
- case LoongArch::BI__builtin_lasx_xvsat_wu:
- case LoongArch::BI__builtin_lasx_xvslli_w:
- case LoongArch::BI__builtin_lasx_xvsrai_w:
- case LoongArch::BI__builtin_lasx_xvsrari_w:
- case LoongArch::BI__builtin_lasx_xvsrli_w:
- case LoongArch::BI__builtin_lasx_xvsllwil_d_w:
- case LoongArch::BI__builtin_lasx_xvsllwil_du_wu:
- case LoongArch::BI__builtin_lasx_xvsrlri_w:
- case LoongArch::BI__builtin_lasx_xvrotri_w:
- case LoongArch::BI__builtin_lasx_xvsubi_bu:
- case LoongArch::BI__builtin_lasx_xvsubi_hu:
- case LoongArch::BI__builtin_lasx_xvsubi_wu:
- case LoongArch::BI__builtin_lasx_xvsubi_du:
- case LoongArch::BI__builtin_lasx_xvbsrl_v:
- case LoongArch::BI__builtin_lasx_xvbsll_v:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
- case LoongArch::BI__builtin_lasx_xvssrarni_h_w:
- case LoongArch::BI__builtin_lasx_xvssrarni_hu_w:
- case LoongArch::BI__builtin_lasx_xvssrani_h_w:
- case LoongArch::BI__builtin_lasx_xvssrani_hu_w:
- case LoongArch::BI__builtin_lasx_xvsrarni_h_w:
- case LoongArch::BI__builtin_lasx_xvsrani_h_w:
- case LoongArch::BI__builtin_lasx_xvfrstpi_b:
- case LoongArch::BI__builtin_lasx_xvfrstpi_h:
- case LoongArch::BI__builtin_lasx_xvsrlni_h_w:
- case LoongArch::BI__builtin_lasx_xvsrlrni_h_w:
- case LoongArch::BI__builtin_lasx_xvssrlni_h_w:
- case LoongArch::BI__builtin_lasx_xvssrlni_hu_w:
- case LoongArch::BI__builtin_lasx_xvssrlrni_h_w:
- case LoongArch::BI__builtin_lasx_xvssrlrni_hu_w:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
- case LoongArch::BI__builtin_lasx_xvbitclri_d:
- case LoongArch::BI__builtin_lasx_xvbitrevi_d:
- case LoongArch::BI__builtin_lasx_xvbitseti_d:
- case LoongArch::BI__builtin_lasx_xvsat_d:
- case LoongArch::BI__builtin_lasx_xvsat_du:
- case LoongArch::BI__builtin_lasx_xvslli_d:
- case LoongArch::BI__builtin_lasx_xvsrai_d:
- case LoongArch::BI__builtin_lasx_xvsrli_d:
- case LoongArch::BI__builtin_lasx_xvsrari_d:
- case LoongArch::BI__builtin_lasx_xvrotri_d:
- case LoongArch::BI__builtin_lasx_xvsrlri_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63);
- case LoongArch::BI__builtin_lasx_xvssrarni_w_d:
- case LoongArch::BI__builtin_lasx_xvssrarni_wu_d:
- case LoongArch::BI__builtin_lasx_xvssrani_w_d:
- case LoongArch::BI__builtin_lasx_xvssrani_wu_d:
- case LoongArch::BI__builtin_lasx_xvsrarni_w_d:
- case LoongArch::BI__builtin_lasx_xvsrlni_w_d:
- case LoongArch::BI__builtin_lasx_xvsrlrni_w_d:
- case LoongArch::BI__builtin_lasx_xvssrlni_w_d:
- case LoongArch::BI__builtin_lasx_xvssrlni_wu_d:
- case LoongArch::BI__builtin_lasx_xvssrlrni_w_d:
- case LoongArch::BI__builtin_lasx_xvssrlrni_wu_d:
- case LoongArch::BI__builtin_lasx_xvsrani_w_d:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 63);
- case LoongArch::BI__builtin_lasx_xvssrarni_d_q:
- case LoongArch::BI__builtin_lasx_xvssrarni_du_q:
- case LoongArch::BI__builtin_lasx_xvssrani_d_q:
- case LoongArch::BI__builtin_lasx_xvssrani_du_q:
- case LoongArch::BI__builtin_lasx_xvsrarni_d_q:
- case LoongArch::BI__builtin_lasx_xvssrlni_d_q:
- case LoongArch::BI__builtin_lasx_xvssrlni_du_q:
- case LoongArch::BI__builtin_lasx_xvssrlrni_d_q:
- case LoongArch::BI__builtin_lasx_xvssrlrni_du_q:
- case LoongArch::BI__builtin_lasx_xvsrani_d_q:
- case LoongArch::BI__builtin_lasx_xvsrlni_d_q:
- case LoongArch::BI__builtin_lasx_xvsrlrni_d_q:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 127);
- case LoongArch::BI__builtin_lasx_xvseqi_b:
- case LoongArch::BI__builtin_lasx_xvseqi_h:
- case LoongArch::BI__builtin_lasx_xvseqi_w:
- case LoongArch::BI__builtin_lasx_xvseqi_d:
- case LoongArch::BI__builtin_lasx_xvslti_b:
- case LoongArch::BI__builtin_lasx_xvslti_h:
- case LoongArch::BI__builtin_lasx_xvslti_w:
- case LoongArch::BI__builtin_lasx_xvslti_d:
- case LoongArch::BI__builtin_lasx_xvslei_b:
- case LoongArch::BI__builtin_lasx_xvslei_h:
- case LoongArch::BI__builtin_lasx_xvslei_w:
- case LoongArch::BI__builtin_lasx_xvslei_d:
- case LoongArch::BI__builtin_lasx_xvmaxi_b:
- case LoongArch::BI__builtin_lasx_xvmaxi_h:
- case LoongArch::BI__builtin_lasx_xvmaxi_w:
- case LoongArch::BI__builtin_lasx_xvmaxi_d:
- case LoongArch::BI__builtin_lasx_xvmini_b:
- case LoongArch::BI__builtin_lasx_xvmini_h:
- case LoongArch::BI__builtin_lasx_xvmini_w:
- case LoongArch::BI__builtin_lasx_xvmini_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, -16, 15);
- case LoongArch::BI__builtin_lasx_xvandi_b:
- case LoongArch::BI__builtin_lasx_xvnori_b:
- case LoongArch::BI__builtin_lasx_xvori_b:
- case LoongArch::BI__builtin_lasx_xvshuf4i_b:
- case LoongArch::BI__builtin_lasx_xvshuf4i_h:
- case LoongArch::BI__builtin_lasx_xvshuf4i_w:
- case LoongArch::BI__builtin_lasx_xvxori_b:
- case LoongArch::BI__builtin_lasx_xvpermi_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 255);
- case LoongArch::BI__builtin_lasx_xvbitseli_b:
- case LoongArch::BI__builtin_lasx_xvshuf4i_d:
- case LoongArch::BI__builtin_lasx_xvextrins_b:
- case LoongArch::BI__builtin_lasx_xvextrins_h:
- case LoongArch::BI__builtin_lasx_xvextrins_w:
- case LoongArch::BI__builtin_lasx_xvextrins_d:
- case LoongArch::BI__builtin_lasx_xvpermi_q:
- case LoongArch::BI__builtin_lasx_xvpermi_w:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 255);
- case LoongArch::BI__builtin_lasx_xvrepl128vei_b:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
- case LoongArch::BI__builtin_lasx_xvrepl128vei_h:
- case LoongArch::BI__builtin_lasx_xvpickve2gr_w:
- case LoongArch::BI__builtin_lasx_xvpickve2gr_wu:
- case LoongArch::BI__builtin_lasx_xvpickve_w_f:
- case LoongArch::BI__builtin_lasx_xvpickve_w:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
- case LoongArch::BI__builtin_lasx_xvinsgr2vr_w:
- case LoongArch::BI__builtin_lasx_xvinsve0_w:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
- case LoongArch::BI__builtin_lasx_xvrepl128vei_w:
- case LoongArch::BI__builtin_lasx_xvpickve2gr_d:
- case LoongArch::BI__builtin_lasx_xvpickve2gr_du:
- case LoongArch::BI__builtin_lasx_xvpickve_d_f:
- case LoongArch::BI__builtin_lasx_xvpickve_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
- case LoongArch::BI__builtin_lasx_xvinsve0_d:
- case LoongArch::BI__builtin_lasx_xvinsgr2vr_d:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
- case LoongArch::BI__builtin_lasx_xvstelm_b:
- return SemaBuiltinConstantArgRange(TheCall, 2, -128, 127) ||
- SemaBuiltinConstantArgRange(TheCall, 3, 0, 31);
- case LoongArch::BI__builtin_lasx_xvstelm_h:
- return SemaBuiltinConstantArgRange(TheCall, 2, -256, 254) ||
- SemaBuiltinConstantArgRange(TheCall, 3, 0, 15);
- case LoongArch::BI__builtin_lasx_xvstelm_w:
- return SemaBuiltinConstantArgRange(TheCall, 2, -512, 508) ||
- SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
- case LoongArch::BI__builtin_lasx_xvstelm_d:
- return SemaBuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
- SemaBuiltinConstantArgRange(TheCall, 3, 0, 3);
- case LoongArch::BI__builtin_lasx_xvrepl128vei_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
- case LoongArch::BI__builtin_lasx_xvldrepl_b:
- case LoongArch::BI__builtin_lasx_xvld:
- return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2047);
- case LoongArch::BI__builtin_lasx_xvldrepl_h:
- return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2046);
- case LoongArch::BI__builtin_lasx_xvldrepl_w:
- return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2044);
- case LoongArch::BI__builtin_lasx_xvldrepl_d:
- return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2040);
- case LoongArch::BI__builtin_lasx_xvst:
- return SemaBuiltinConstantArgRange(TheCall, 2, -2048, 2047);
- case LoongArch::BI__builtin_lasx_xvldi:
- return SemaBuiltinConstantArgRange(TheCall, 0, -4096, 4095);
- case LoongArch::BI__builtin_lasx_xvrepli_b:
- case LoongArch::BI__builtin_lasx_xvrepli_h:
- case LoongArch::BI__builtin_lasx_xvrepli_w:
- case LoongArch::BI__builtin_lasx_xvrepli_d:
- return SemaBuiltinConstantArgRange(TheCall, 0, -512, 511);
- }
- return false;
-}
-
-bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID, CallExpr *TheCall) {
- return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
- CheckMipsBuiltinArgument(BuiltinID, TheCall);
-}
-
-bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
-
- if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
- BuiltinID <= Mips::BI__builtin_mips_lwx) {
- if (!TI.hasFeature("dsp"))
- return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
- }
-
- if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
- BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
- if (!TI.hasFeature("dspr2"))
- return Diag(TheCall->getBeginLoc(),
- diag::err_mips_builtin_requires_dspr2);
- }
-
- if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
- BuiltinID <= Mips::BI__builtin_msa_xori_b) {
- if (!TI.hasFeature("msa"))
- return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
- }
-
- return false;
-}
-
-// CheckMipsBuiltinArgument - Checks the constant value passed to the
-// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
-// ordering for DSP is unspecified. MSA is ordered by the data format used
-// by the underlying instruction i.e., df/m, df/n and then by size.
-//
-// FIXME: The size tests here should instead be tablegen'd along with the
-// definitions from include/clang/Basic/BuiltinsMips.def.
-// FIXME: GCC is strict on signedness for some of these intrinsics, we should
-// be too.
-bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
- unsigned i = 0, l = 0, u = 0, m = 0;
- switch (BuiltinID) {
- default: return false;
- case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
- case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
- case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
- case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
- case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
- case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
- case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
- // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
- // df/m field.
- // These intrinsics take an unsigned 3 bit immediate.
- case Mips::BI__builtin_msa_bclri_b:
- case Mips::BI__builtin_msa_bnegi_b:
- case Mips::BI__builtin_msa_bseti_b:
- case Mips::BI__builtin_msa_sat_s_b:
- case Mips::BI__builtin_msa_sat_u_b:
- case Mips::BI__builtin_msa_slli_b:
- case Mips::BI__builtin_msa_srai_b:
- case Mips::BI__builtin_msa_srari_b:
- case Mips::BI__builtin_msa_srli_b:
- case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
- case Mips::BI__builtin_msa_binsli_b:
- case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
- // These intrinsics take an unsigned 4 bit immediate.
- case Mips::BI__builtin_msa_bclri_h:
- case Mips::BI__builtin_msa_bnegi_h:
- case Mips::BI__builtin_msa_bseti_h:
- case Mips::BI__builtin_msa_sat_s_h:
- case Mips::BI__builtin_msa_sat_u_h:
- case Mips::BI__builtin_msa_slli_h:
- case Mips::BI__builtin_msa_srai_h:
- case Mips::BI__builtin_msa_srari_h:
- case Mips::BI__builtin_msa_srli_h:
- case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
- case Mips::BI__builtin_msa_binsli_h:
- case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
- // These intrinsics take an unsigned 5 bit immediate.
- // The first block of intrinsics actually have an unsigned 5 bit field,
- // not a df/n field.
- case Mips::BI__builtin_msa_cfcmsa:
- case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
- case Mips::BI__builtin_msa_clei_u_b:
- case Mips::BI__builtin_msa_clei_u_h:
- case Mips::BI__builtin_msa_clei_u_w:
- case Mips::BI__builtin_msa_clei_u_d:
- case Mips::BI__builtin_msa_clti_u_b:
- case Mips::BI__builtin_msa_clti_u_h:
- case Mips::BI__builtin_msa_clti_u_w:
- case Mips::BI__builtin_msa_clti_u_d:
- case Mips::BI__builtin_msa_maxi_u_b:
- case Mips::BI__builtin_msa_maxi_u_h:
- case Mips::BI__builtin_msa_maxi_u_w:
- case Mips::BI__builtin_msa_maxi_u_d:
- case Mips::BI__builtin_msa_mini_u_b:
- case Mips::BI__builtin_msa_mini_u_h:
- case Mips::BI__builtin_msa_mini_u_w:
- case Mips::BI__builtin_msa_mini_u_d:
- case Mips::BI__builtin_msa_addvi_b:
- case Mips::BI__builtin_msa_addvi_h:
- case Mips::BI__builtin_msa_addvi_w:
- case Mips::BI__builtin_msa_addvi_d:
- case Mips::BI__builtin_msa_bclri_w:
- case Mips::BI__builtin_msa_bnegi_w:
- case Mips::BI__builtin_msa_bseti_w:
- case Mips::BI__builtin_msa_sat_s_w:
- case Mips::BI__builtin_msa_sat_u_w:
- case Mips::BI__builtin_msa_slli_w:
- case Mips::BI__builtin_msa_srai_w:
- case Mips::BI__builtin_msa_srari_w:
- case Mips::BI__builtin_msa_srli_w:
- case Mips::BI__builtin_msa_srlri_w:
- case Mips::BI__builtin_msa_subvi_b:
- case Mips::BI__builtin_msa_subvi_h:
- case Mips::BI__builtin_msa_subvi_w:
- case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
- case Mips::BI__builtin_msa_binsli_w:
- case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
- // These intrinsics take an unsigned 6 bit immediate.
- case Mips::BI__builtin_msa_bclri_d:
- case Mips::BI__builtin_msa_bnegi_d:
- case Mips::BI__builtin_msa_bseti_d:
- case Mips::BI__builtin_msa_sat_s_d:
- case Mips::BI__builtin_msa_sat_u_d:
- case Mips::BI__builtin_msa_slli_d:
- case Mips::BI__builtin_msa_srai_d:
- case Mips::BI__builtin_msa_srari_d:
- case Mips::BI__builtin_msa_srli_d:
- case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
- case Mips::BI__builtin_msa_binsli_d:
- case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
- // These intrinsics take a signed 5 bit immediate.
- case Mips::BI__builtin_msa_ceqi_b:
- case Mips::BI__builtin_msa_ceqi_h:
- case Mips::BI__builtin_msa_ceqi_w:
- case Mips::BI__builtin_msa_ceqi_d:
- case Mips::BI__builtin_msa_clti_s_b:
- case Mips::BI__builtin_msa_clti_s_h:
- case Mips::BI__builtin_msa_clti_s_w:
- case Mips::BI__builtin_msa_clti_s_d:
- case Mips::BI__builtin_msa_clei_s_b:
- case Mips::BI__builtin_msa_clei_s_h:
- case Mips::BI__builtin_msa_clei_s_w:
- case Mips::BI__builtin_msa_clei_s_d:
- case Mips::BI__builtin_msa_maxi_s_b:
- case Mips::BI__builtin_msa_maxi_s_h:
- case Mips::BI__builtin_msa_maxi_s_w:
- case Mips::BI__builtin_msa_maxi_s_d:
- case Mips::BI__builtin_msa_mini_s_b:
- case Mips::BI__builtin_msa_mini_s_h:
- case Mips::BI__builtin_msa_mini_s_w:
- case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
- // These intrinsics take an unsigned 8 bit immediate.
- case Mips::BI__builtin_msa_andi_b:
- case Mips::BI__builtin_msa_nori_b:
- case Mips::BI__builtin_msa_ori_b:
- case Mips::BI__builtin_msa_shf_b:
- case Mips::BI__builtin_msa_shf_h:
- case Mips::BI__builtin_msa_shf_w:
- case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
- case Mips::BI__builtin_msa_bseli_b:
- case Mips::BI__builtin_msa_bmnzi_b:
- case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
- // df/n format
- // These intrinsics take an unsigned 4 bit immediate.
- case Mips::BI__builtin_msa_copy_s_b:
- case Mips::BI__builtin_msa_copy_u_b:
- case Mips::BI__builtin_msa_insve_b:
- case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
- case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
- // These intrinsics take an unsigned 3 bit immediate.
- case Mips::BI__builtin_msa_copy_s_h:
- case Mips::BI__builtin_msa_copy_u_h:
- case Mips::BI__builtin_msa_insve_h:
- case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
- case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
- // These intrinsics take an unsigned 2 bit immediate.
- case Mips::BI__builtin_msa_copy_s_w:
- case Mips::BI__builtin_msa_copy_u_w:
- case Mips::BI__builtin_msa_insve_w:
- case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
- case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
- // These intrinsics take an unsigned 1 bit immediate.
- case Mips::BI__builtin_msa_copy_s_d:
- case Mips::BI__builtin_msa_copy_u_d:
- case Mips::BI__builtin_msa_insve_d:
- case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
- case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
- // Memory offsets and immediate loads.
- // These intrinsics take a signed 10 bit immediate.
- case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
- case Mips::BI__builtin_msa_ldi_h:
- case Mips::BI__builtin_msa_ldi_w:
- case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
- case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
- case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
- case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
- case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
- case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
- case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
- case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
- case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
- case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
- case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
- case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
- case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
- }
-
- if (!m)
- return SemaBuiltinConstantArgRange(TheCall, i, l, u);
-
- return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
- SemaBuiltinConstantArgMultiple(TheCall, i, m);
-}
-
-/// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
-/// advancing the pointer over the consumed characters. The decoded type is
-/// returned. If the decoded type represents a constant integer with a
-/// constraint on its value then Mask is set to that value. The type descriptors
-/// used in Str are specific to PPC MMA builtins and are documented in the file
-/// defining the PPC builtins.
-static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
- unsigned &Mask) {
- bool RequireICE = false;
- ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
- switch (*Str++) {
- case 'V':
- return Context.getVectorType(Context.UnsignedCharTy, 16,
- VectorKind::AltiVecVector);
- case 'i': {
- char *End;
- unsigned size = strtoul(Str, &End, 10);
- assert(End != Str && "Missing constant parameter constraint");
- Str = End;
- Mask = size;
- return Context.IntTy;
- }
- case 'W': {
- char *End;
- unsigned size = strtoul(Str, &End, 10);
- assert(End != Str && "Missing PowerPC MMA type size");
- Str = End;
- QualType Type;
- switch (size) {
- #define PPC_VECTOR_TYPE(typeName, Id, size) \
- case size: Type = Context.Id##Ty; break;
- #include "clang/Basic/PPCTypes.def"
- default: llvm_unreachable("Invalid PowerPC MMA vector type");
- }
- bool CheckVectorArgs = false;
- while (!CheckVectorArgs) {
- switch (*Str++) {
- case '*':
- Type = Context.getPointerType(Type);
- break;
- case 'C':
- Type = Type.withConst();
- break;
- default:
- CheckVectorArgs = true;
- --Str;
- break;
- }
- }
- return Type;
- }
- default:
- return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true);
- }
-}
-
-static bool isPPC_64Builtin(unsigned BuiltinID) {
- // These builtins only work on PPC 64bit targets.
- switch (BuiltinID) {
- case PPC::BI__builtin_divde:
- case PPC::BI__builtin_divdeu:
- case PPC::BI__builtin_bpermd:
- case PPC::BI__builtin_pdepd:
- case PPC::BI__builtin_pextd:
- case PPC::BI__builtin_ppc_ldarx:
- case PPC::BI__builtin_ppc_stdcx:
- case PPC::BI__builtin_ppc_tdw:
- case PPC::BI__builtin_ppc_trapd:
- case PPC::BI__builtin_ppc_cmpeqb:
- case PPC::BI__builtin_ppc_setb:
- case PPC::BI__builtin_ppc_mulhd:
- case PPC::BI__builtin_ppc_mulhdu:
- case PPC::BI__builtin_ppc_maddhd:
- case PPC::BI__builtin_ppc_maddhdu:
- case PPC::BI__builtin_ppc_maddld:
- case PPC::BI__builtin_ppc_load8r:
- case PPC::BI__builtin_ppc_store8r:
- case PPC::BI__builtin_ppc_insert_exp:
- case PPC::BI__builtin_ppc_extract_sig:
- case PPC::BI__builtin_ppc_addex:
- case PPC::BI__builtin_darn:
- case PPC::BI__builtin_darn_raw:
- case PPC::BI__builtin_ppc_compare_and_swaplp:
- case PPC::BI__builtin_ppc_fetch_and_addlp:
- case PPC::BI__builtin_ppc_fetch_and_andlp:
- case PPC::BI__builtin_ppc_fetch_and_orlp:
- case PPC::BI__builtin_ppc_fetch_and_swaplp:
- return true;
- }
- return false;
-}
-
-/// Returns true if the argument consists of one contiguous run of 1s with any
-/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
-/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
-/// since all 1s are not contiguous.
-bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
+bool Sema::ValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
llvm::APSInt Result;
// We can't check the value of a dependent argument.
Expr *Arg = TheCall->getArg(ArgNum);
@@ -4917,7 +2930,7 @@ bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
return false;
// Check constant-ness first.
- if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ if (BuiltinConstantArg(TheCall, ArgNum, Result))
return true;
// Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
@@ -4929,2234 +2942,6 @@ bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
<< ArgNum << Arg->getSourceRange();
}
-bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
- unsigned i = 0, l = 0, u = 0;
- bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
- llvm::APSInt Result;
-
- if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
- return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
- << TheCall->getSourceRange();
-
- switch (BuiltinID) {
- default: return false;
- case PPC::BI__builtin_altivec_crypto_vshasigmaw:
- case PPC::BI__builtin_altivec_crypto_vshasigmad:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
- case PPC::BI__builtin_altivec_dss:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
- case PPC::BI__builtin_tbegin:
- case PPC::BI__builtin_tend:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
- case PPC::BI__builtin_tsr:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7);
- case PPC::BI__builtin_tabortwc:
- case PPC::BI__builtin_tabortdc:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
- case PPC::BI__builtin_tabortwci:
- case PPC::BI__builtin_tabortdci:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
- // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
- // __builtin_(un)pack_longdouble are available only if long double uses IBM
- // extended double representation.
- case PPC::BI__builtin_unpack_longdouble:
- if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1))
- return true;
- [[fallthrough]];
- case PPC::BI__builtin_pack_longdouble:
- if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble())
- return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi)
- << "ibmlongdouble";
- return false;
- case PPC::BI__builtin_altivec_dst:
- case PPC::BI__builtin_altivec_dstt:
- case PPC::BI__builtin_altivec_dstst:
- case PPC::BI__builtin_altivec_dststt:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
- case PPC::BI__builtin_vsx_xxpermdi:
- case PPC::BI__builtin_vsx_xxsldwi:
- return SemaBuiltinVSX(TheCall);
- case PPC::BI__builtin_unpack_vector_int128:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
- case PPC::BI__builtin_altivec_vgnb:
- return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
- case PPC::BI__builtin_vsx_xxeval:
- return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
- case PPC::BI__builtin_altivec_vsldbi:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
- case PPC::BI__builtin_altivec_vsrdbi:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
- case PPC::BI__builtin_vsx_xxpermx:
- return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
- case PPC::BI__builtin_ppc_tw:
- case PPC::BI__builtin_ppc_tdw:
- return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31);
- case PPC::BI__builtin_ppc_cmprb:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
- // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
- // be a constant that represents a contiguous bit field.
- case PPC::BI__builtin_ppc_rlwnm:
- return SemaValueIsRunOfOnes(TheCall, 2);
- case PPC::BI__builtin_ppc_rlwimi:
- case PPC::BI__builtin_ppc_rldimi:
- return SemaBuiltinConstantArg(TheCall, 2, Result) ||
- SemaValueIsRunOfOnes(TheCall, 3);
- case PPC::BI__builtin_ppc_addex: {
- if (SemaBuiltinConstantArgRange(TheCall, 2, 0, 3))
- return true;
- // Output warning for reserved values 1 to 3.
- int ArgValue =
- TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue();
- if (ArgValue != 0)
- Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour)
- << ArgValue;
- return false;
- }
- case PPC::BI__builtin_ppc_mtfsb0:
- case PPC::BI__builtin_ppc_mtfsb1:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
- case PPC::BI__builtin_ppc_mtfsf:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255);
- case PPC::BI__builtin_ppc_mtfsfi:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) ||
- SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
- case PPC::BI__builtin_ppc_alignx:
- return SemaBuiltinConstantArgPower2(TheCall, 0);
- case PPC::BI__builtin_ppc_rdlam:
- return SemaValueIsRunOfOnes(TheCall, 2);
- case PPC::BI__builtin_vsx_ldrmb:
- case PPC::BI__builtin_vsx_strmb:
- return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
- case PPC::BI__builtin_altivec_vcntmbb:
- case PPC::BI__builtin_altivec_vcntmbh:
- case PPC::BI__builtin_altivec_vcntmbw:
- case PPC::BI__builtin_altivec_vcntmbd:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
- case PPC::BI__builtin_vsx_xxgenpcvbm:
- case PPC::BI__builtin_vsx_xxgenpcvhm:
- case PPC::BI__builtin_vsx_xxgenpcvwm:
- case PPC::BI__builtin_vsx_xxgenpcvdm:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
- case PPC::BI__builtin_ppc_test_data_class: {
- // Check if the first argument of the __builtin_ppc_test_data_class call is
- // valid. The argument must be 'float' or 'double' or '__float128'.
- QualType ArgType = TheCall->getArg(0)->getType();
- if (ArgType != QualType(Context.FloatTy) &&
- ArgType != QualType(Context.DoubleTy) &&
- ArgType != QualType(Context.Float128Ty))
- return Diag(TheCall->getBeginLoc(),
- diag::err_ppc_invalid_test_data_class_type);
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 127);
- }
- case PPC::BI__builtin_ppc_maxfe:
- case PPC::BI__builtin_ppc_minfe:
- case PPC::BI__builtin_ppc_maxfl:
- case PPC::BI__builtin_ppc_minfl:
- case PPC::BI__builtin_ppc_maxfs:
- case PPC::BI__builtin_ppc_minfs: {
- if (Context.getTargetInfo().getTriple().isOSAIX() &&
- (BuiltinID == PPC::BI__builtin_ppc_maxfe ||
- BuiltinID == PPC::BI__builtin_ppc_minfe))
- return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type)
- << "builtin" << true << 128 << QualType(Context.LongDoubleTy)
- << false << Context.getTargetInfo().getTriple().str();
- // Argument type should be exact.
- QualType ArgType = QualType(Context.LongDoubleTy);
- if (BuiltinID == PPC::BI__builtin_ppc_maxfl ||
- BuiltinID == PPC::BI__builtin_ppc_minfl)
- ArgType = QualType(Context.DoubleTy);
- else if (BuiltinID == PPC::BI__builtin_ppc_maxfs ||
- BuiltinID == PPC::BI__builtin_ppc_minfs)
- ArgType = QualType(Context.FloatTy);
- for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I)
- if (TheCall->getArg(I)->getType() != ArgType)
- return Diag(TheCall->getBeginLoc(),
- diag::err_typecheck_convert_incompatible)
- << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0;
- return false;
- }
-#define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
- case PPC::BI__builtin_##Name: \
- return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types);
-#include "clang/Basic/BuiltinsPPC.def"
- }
- return SemaBuiltinConstantArgRange(TheCall, i, l, u);
-}
-
-// Check if the given type is a non-pointer PPC MMA type. This function is used
-// in Sema to prevent invalid uses of restricted PPC MMA types.
-bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
- if (Type->isPointerType() || Type->isArrayType())
- return false;
-
- QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
-#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
- if (false
-#include "clang/Basic/PPCTypes.def"
- ) {
- Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
- return true;
- }
- return false;
-}
-
-bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
- // position of memory order and scope arguments in the builtin
- unsigned OrderIndex, ScopeIndex;
- switch (BuiltinID) {
- case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
- case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
- case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
- case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
- OrderIndex = 2;
- ScopeIndex = 3;
- break;
- case AMDGPU::BI__builtin_amdgcn_fence:
- OrderIndex = 0;
- ScopeIndex = 1;
- break;
- default:
- return false;
- }
-
- ExprResult Arg = TheCall->getArg(OrderIndex);
- auto ArgExpr = Arg.get();
- Expr::EvalResult ArgResult;
-
- if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
- return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
- << ArgExpr->getType();
- auto Ord = ArgResult.Val.getInt().getZExtValue();
-
- // Check validity of memory ordering as per C11 / C++11's memody model.
- // Only fence needs check. Atomic dec/inc allow all memory orders.
- if (!llvm::isValidAtomicOrderingCABI(Ord))
- return Diag(ArgExpr->getBeginLoc(),
- diag::warn_atomic_op_has_invalid_memory_order)
- << 0 << ArgExpr->getSourceRange();
- switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
- case llvm::AtomicOrderingCABI::relaxed:
- case llvm::AtomicOrderingCABI::consume:
- if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
- return Diag(ArgExpr->getBeginLoc(),
- diag::warn_atomic_op_has_invalid_memory_order)
- << 0 << ArgExpr->getSourceRange();
- break;
- case llvm::AtomicOrderingCABI::acquire:
- case llvm::AtomicOrderingCABI::release:
- case llvm::AtomicOrderingCABI::acq_rel:
- case llvm::AtomicOrderingCABI::seq_cst:
- break;
- }
-
- Arg = TheCall->getArg(ScopeIndex);
- ArgExpr = Arg.get();
- Expr::EvalResult ArgResult1;
- // Check that sync scope is a constant literal
- if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context))
- return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
- << ArgExpr->getType();
-
- return false;
-}
-
-bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
- llvm::APSInt Result;
-
- // We can't check the value of a dependent argument.
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- return false;
-
- // Check constant-ness first.
- if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
- return true;
-
- int64_t Val = Result.getSExtValue();
- if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
- return false;
-
- return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
- << Arg->getSourceRange();
-}
-
-static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
- Sema &S, QualType Type, int EGW) {
- assert((EGW == 128 || EGW == 256) && "EGW can only be 128 or 256 bits");
-
- // LMUL * VLEN >= EGW
- ASTContext::BuiltinVectorTypeInfo Info =
- S.Context.getBuiltinVectorTypeInfo(Type->castAs<BuiltinType>());
- unsigned ElemSize = S.Context.getTypeSize(Info.ElementType);
- unsigned MinElemCount = Info.EC.getKnownMinValue();
-
- unsigned EGS = EGW / ElemSize;
- // If EGS is less than or equal to the minimum number of elements, then the
- // type is valid.
- if (EGS <= MinElemCount)
- return false;
-
- // Otherwise, we need vscale to be at least EGS / MinElemCont.
- assert(EGS % MinElemCount == 0);
- unsigned VScaleFactor = EGS / MinElemCount;
- // Vscale is VLEN/RVVBitsPerBlock.
- unsigned MinRequiredVLEN = VScaleFactor * llvm::RISCV::RVVBitsPerBlock;
- std::string RequiredExt = "zvl" + std::to_string(MinRequiredVLEN) + "b";
- if (!TI.hasFeature(RequiredExt))
- return S.Diag(TheCall->getBeginLoc(),
- diag::err_riscv_type_requires_extension) << Type << RequiredExt;
-
- return false;
-}
-
-bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
- // CodeGenFunction can also detect this, but this gives a better error
- // message.
- bool FeatureMissing = false;
- SmallVector<StringRef> ReqFeatures;
- StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
- Features.split(ReqFeatures, ',', -1, false);
-
- // Check if each required feature is included
- for (StringRef F : ReqFeatures) {
- SmallVector<StringRef> ReqOpFeatures;
- F.split(ReqOpFeatures, '|');
-
- if (llvm::none_of(ReqOpFeatures,
- [&TI](StringRef OF) { return TI.hasFeature(OF); })) {
- std::string FeatureStrs;
- bool IsExtension = true;
- for (StringRef OF : ReqOpFeatures) {
- // If the feature is 64bit, alter the string so it will print better in
- // the diagnostic.
- if (OF == "64bit") {
- assert(ReqOpFeatures.size() == 1 && "Expected '64bit' to be alone");
- OF = "RV64";
- IsExtension = false;
- }
- if (OF == "32bit") {
- assert(ReqOpFeatures.size() == 1 && "Expected '32bit' to be alone");
- OF = "RV32";
- IsExtension = false;
- }
-
- // Convert features like "zbr" and "experimental-zbr" to "Zbr".
- OF.consume_front("experimental-");
- std::string FeatureStr = OF.str();
- FeatureStr[0] = std::toupper(FeatureStr[0]);
- // Combine strings.
- FeatureStrs += FeatureStrs.empty() ? "" : ", ";
- FeatureStrs += "'";
- FeatureStrs += FeatureStr;
- FeatureStrs += "'";
- }
- // Error message
- FeatureMissing = true;
- Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
- << IsExtension
- << TheCall->getSourceRange() << StringRef(FeatureStrs);
- }
- }
-
- if (FeatureMissing)
- return true;
-
- // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
- // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
- switch (BuiltinID) {
- default:
- break;
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vmulh_vv:
- case RISCVVector::BI__builtin_rvv_vmulh_vx:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv:
- case RISCVVector::BI__builtin_rvv_vsmul_vx:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
- ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(
- TheCall->getType()->castAs<BuiltinType>());
-
- if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v"))
- return Diag(TheCall->getBeginLoc(),
- diag::err_riscv_builtin_requires_extension)
- << /* IsExtension */ true << TheCall->getSourceRange() << "v";
-
- break;
- }
- }
-
- switch (BuiltinID) {
- case RISCVVector::BI__builtin_rvv_vsetvli:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||
- CheckRISCVLMUL(TheCall, 2);
- case RISCVVector::BI__builtin_rvv_vsetvlimax:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- CheckRISCVLMUL(TheCall, 1);
- case RISCVVector::BI__builtin_rvv_vget_v: {
- ASTContext::BuiltinVectorTypeInfo ResVecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getType().getCanonicalType().getTypePtr()));
- ASTContext::BuiltinVectorTypeInfo VecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getArg(0)->getType().getCanonicalType().getTypePtr()));
- unsigned MaxIndex;
- if (VecInfo.NumVectors != 1) // vget for tuple type
- MaxIndex = VecInfo.NumVectors;
- else // vget for non-tuple type
- MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
- (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
- }
- case RISCVVector::BI__builtin_rvv_vset_v: {
- ASTContext::BuiltinVectorTypeInfo ResVecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getType().getCanonicalType().getTypePtr()));
- ASTContext::BuiltinVectorTypeInfo VecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getArg(2)->getType().getCanonicalType().getTypePtr()));
- unsigned MaxIndex;
- if (ResVecInfo.NumVectors != 1) // vset for tuple type
- MaxIndex = ResVecInfo.NumVectors;
- else // vset fo non-tuple type
- MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
- (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
- }
- // Vector Crypto
- case RISCVVector::BI__builtin_rvv_vaeskf1_vi_tu:
- case RISCVVector::BI__builtin_rvv_vaeskf2_vi_tu:
- case RISCVVector::BI__builtin_rvv_vaeskf2_vi:
- case RISCVVector::BI__builtin_rvv_vsm4k_vi_tu: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- QualType Op2Type = TheCall->getArg(1)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
- }
- case RISCVVector::BI__builtin_rvv_vsm3c_vi_tu:
- case RISCVVector::BI__builtin_rvv_vsm3c_vi: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 256) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
- }
- case RISCVVector::BI__builtin_rvv_vaeskf1_vi:
- case RISCVVector::BI__builtin_rvv_vsm4k_vi: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
- SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
- }
- case RISCVVector::BI__builtin_rvv_vaesdf_vv:
- case RISCVVector::BI__builtin_rvv_vaesdf_vs:
- case RISCVVector::BI__builtin_rvv_vaesdm_vv:
- case RISCVVector::BI__builtin_rvv_vaesdm_vs:
- case RISCVVector::BI__builtin_rvv_vaesef_vv:
- case RISCVVector::BI__builtin_rvv_vaesef_vs:
- case RISCVVector::BI__builtin_rvv_vaesem_vv:
- case RISCVVector::BI__builtin_rvv_vaesem_vs:
- case RISCVVector::BI__builtin_rvv_vaesz_vs:
- case RISCVVector::BI__builtin_rvv_vsm4r_vv:
- case RISCVVector::BI__builtin_rvv_vsm4r_vs:
- case RISCVVector::BI__builtin_rvv_vaesdf_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesdf_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesdm_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesdm_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesef_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesef_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesem_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesem_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesz_vs_tu:
- case RISCVVector::BI__builtin_rvv_vsm4r_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsm4r_vs_tu: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- QualType Op2Type = TheCall->getArg(1)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128);
- }
- case RISCVVector::BI__builtin_rvv_vsha2ch_vv:
- case RISCVVector::BI__builtin_rvv_vsha2cl_vv:
- case RISCVVector::BI__builtin_rvv_vsha2ms_vv:
- case RISCVVector::BI__builtin_rvv_vsha2ch_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsha2cl_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsha2ms_vv_tu: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- QualType Op2Type = TheCall->getArg(1)->getType();
- QualType Op3Type = TheCall->getArg(2)->getType();
- ASTContext::BuiltinVectorTypeInfo Info =
- Context.getBuiltinVectorTypeInfo(Op1Type->castAs<BuiltinType>());
- uint64_t ElemSize = Context.getTypeSize(Info.ElementType);
- if (ElemSize == 64 && !TI.hasFeature("zvknhb"))
- return Diag(TheCall->getBeginLoc(),
- diag::err_riscv_builtin_requires_extension)
- << /* IsExtension */ true << TheCall->getSourceRange() << "zvknb";
-
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, ElemSize * 4) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, ElemSize * 4) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op3Type, ElemSize * 4);
- }
-
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf8:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf4:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf2:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m1:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m2:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m4:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m8:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf4:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf2:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m1:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m2:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m4:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m8:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32mf2:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m1:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m2:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m4:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m8:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m1:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m2:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m4:
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m8:
- // bit_27_26, bit_24_20, bit_11_7, simm5
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 31) ||
- SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
- // bit_27_26, bit_11_7, vs2, simm5
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_v_i:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se:
- // bit_27_26, bit_24_20, simm5
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- SemaBuiltinConstantArgRange(TheCall, 2, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_v_iv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se:
- // bit_27_26, vs2, simm5
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- SemaBuiltinConstantArgRange(TheCall, 2, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se:
- // bit_27_26, vd, vs2, simm5
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf8:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf4:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf2:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m1:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m2:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m4:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m8:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf4:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf2:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m1:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m2:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m4:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m8:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32mf2:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m1:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m2:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m4:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m8:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m1:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m2:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m4:
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m8:
- // bit_27_26, bit_24_20, bit_11_7, xs1
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
- case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
- // bit_27_26, bit_11_7, vs2, xs1/vs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_x:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se:
- // bit_27_26, bit_24-20, xs1
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
- case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se:
- // bit_27_26, vd, vs2, xs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se:
- // bit_27_26, vs2, xs1/vs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se:
- // bit_27_26, vd, vs2, xs1/vs1
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
- case RISCVVector::BI__builtin_rvv_sf_vc_fv_se:
- // bit_26, bit_11_7, vs2, fs1
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) ||
- SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
- case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se:
- // bit_26, vd, vs2, fs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se:
- // bit_26, vs2, fs1
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
- // Check if byteselect is in [0, 3]
- case RISCV::BI__builtin_riscv_aes32dsi:
- case RISCV::BI__builtin_riscv_aes32dsmi:
- case RISCV::BI__builtin_riscv_aes32esi:
- case RISCV::BI__builtin_riscv_aes32esmi:
- case RISCV::BI__builtin_riscv_sm4ks:
- case RISCV::BI__builtin_riscv_sm4ed:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
- // Check if rnum is in [0, 10]
- case RISCV::BI__builtin_riscv_aes64ks1i:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10);
- // Check if value range for vxrm is in [0, 3]
- case RISCVVector::BI__builtin_rvv_vaaddu_vv:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx:
- case RISCVVector::BI__builtin_rvv_vaadd_vv:
- case RISCVVector::BI__builtin_rvv_vaadd_vx:
- case RISCVVector::BI__builtin_rvv_vasubu_vv:
- case RISCVVector::BI__builtin_rvv_vasubu_vx:
- case RISCVVector::BI__builtin_rvv_vasub_vv:
- case RISCVVector::BI__builtin_rvv_vasub_vx:
- case RISCVVector::BI__builtin_rvv_vsmul_vv:
- case RISCVVector::BI__builtin_rvv_vsmul_vx:
- case RISCVVector::BI__builtin_rvv_vssra_vv:
- case RISCVVector::BI__builtin_rvv_vssra_vx:
- case RISCVVector::BI__builtin_rvv_vssrl_vv:
- case RISCVVector::BI__builtin_rvv_vssrl_vx:
- case RISCVVector::BI__builtin_rvv_vnclip_wv:
- case RISCVVector::BI__builtin_rvv_vnclip_wx:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_tu:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
- case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
- case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
- case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_m:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_m:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_m:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_m:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_m:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_m:
- case RISCVVector::BI__builtin_rvv_vasub_vv_m:
- case RISCVVector::BI__builtin_rvv_vasub_vx_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
- case RISCVVector::BI__builtin_rvv_vssra_vv_m:
- case RISCVVector::BI__builtin_rvv_vssra_vx_m:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_m:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_m:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_m:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_m:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_m:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_m:
- return SemaBuiltinConstantArgRange(TheCall, 3, 0, 3);
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_tum:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_mu:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_tum:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_mu:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vasub_vv_tum:
- case RISCVVector::BI__builtin_rvv_vasub_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vasub_vv_mu:
- case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
- case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
- case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
- case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
- case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
- case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
- return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3);
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 4);
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, 4);
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
- return SemaBuiltinConstantArgRange(TheCall, 3, 0, 4);
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
- return SemaBuiltinConstantArgRange(TheCall, 4, 0, 4);
- case RISCV::BI__builtin_riscv_ntl_load:
- case RISCV::BI__builtin_riscv_ntl_store:
- DeclRefExpr *DRE =
- cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
- assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store ||
- BuiltinID == RISCV::BI__builtin_riscv_ntl_load) &&
- "Unexpected RISC-V nontemporal load/store builtin!");
- bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store;
- unsigned NumArgs = IsStore ? 3 : 2;
-
- if (checkArgCountAtLeast(*this, TheCall, NumArgs - 1))
- return true;
-
- if (checkArgCountAtMost(*this, TheCall, NumArgs))
- return true;
-
- // Domain value should be compile-time constant.
- // 2 <= domain <= 5
- if (TheCall->getNumArgs() == NumArgs &&
- SemaBuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5))
- return true;
-
- Expr *PointerArg = TheCall->getArg(0);
- ExprResult PointerArgResult =
- DefaultFunctionArrayLvalueConversion(PointerArg);
-
- if (PointerArgResult.isInvalid())
- return true;
- PointerArg = PointerArgResult.get();
-
- const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>();
- if (!PtrType) {
- Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- QualType ValType = PtrType->getPointeeType();
- ValType = ValType.getUnqualifiedType();
- if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
- !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
- !ValType->isVectorType() && !ValType->isRVVSizelessBuiltinType()) {
- Diag(DRE->getBeginLoc(),
- diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- if (!IsStore) {
- TheCall->setType(ValType);
- return false;
- }
-
- ExprResult ValArg = TheCall->getArg(1);
- InitializedEntity Entity = InitializedEntity::InitializeParameter(
- Context, ValType, /*consume*/ false);
- ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
- if (ValArg.isInvalid())
- return true;
-
- TheCall->setArg(1, ValArg.get());
- TheCall->setType(Context.VoidTy);
- return false;
- }
-
- return false;
-}
-
-bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
- if (BuiltinID == SystemZ::BI__builtin_tabort) {
- Expr *Arg = TheCall->getArg(0);
- if (std::optional<llvm::APSInt> AbortCode =
- Arg->getIntegerConstantExpr(Context))
- if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
- return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
- << Arg->getSourceRange();
- }
-
- // For intrinsics which take an immediate value as part of the instruction,
- // range check them here.
- unsigned i = 0, l = 0, u = 0;
- switch (BuiltinID) {
- default: return false;
- case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_verimb:
- case SystemZ::BI__builtin_s390_verimh:
- case SystemZ::BI__builtin_s390_verimf:
- case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
- case SystemZ::BI__builtin_s390_vfaeb:
- case SystemZ::BI__builtin_s390_vfaeh:
- case SystemZ::BI__builtin_s390_vfaef:
- case SystemZ::BI__builtin_s390_vfaebs:
- case SystemZ::BI__builtin_s390_vfaehs:
- case SystemZ::BI__builtin_s390_vfaefs:
- case SystemZ::BI__builtin_s390_vfaezb:
- case SystemZ::BI__builtin_s390_vfaezh:
- case SystemZ::BI__builtin_s390_vfaezf:
- case SystemZ::BI__builtin_s390_vfaezbs:
- case SystemZ::BI__builtin_s390_vfaezhs:
- case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vfisb:
- case SystemZ::BI__builtin_s390_vfidb:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
- case SystemZ::BI__builtin_s390_vftcisb:
- case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
- case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vstrcb:
- case SystemZ::BI__builtin_s390_vstrch:
- case SystemZ::BI__builtin_s390_vstrcf:
- case SystemZ::BI__builtin_s390_vstrczb:
- case SystemZ::BI__builtin_s390_vstrczh:
- case SystemZ::BI__builtin_s390_vstrczf:
- case SystemZ::BI__builtin_s390_vstrcbs:
- case SystemZ::BI__builtin_s390_vstrchs:
- case SystemZ::BI__builtin_s390_vstrcfs:
- case SystemZ::BI__builtin_s390_vstrczbs:
- case SystemZ::BI__builtin_s390_vstrczhs:
- case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vfminsb:
- case SystemZ::BI__builtin_s390_vfmaxsb:
- case SystemZ::BI__builtin_s390_vfmindb:
- case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
- case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
- case SystemZ::BI__builtin_s390_vclfnhs:
- case SystemZ::BI__builtin_s390_vclfnls:
- case SystemZ::BI__builtin_s390_vcfn:
- case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
- }
- return SemaBuiltinConstantArgRange(TheCall, i, l, u);
-}
-
-bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_ref_null_extern:
- return BuiltinWasmRefNullExtern(TheCall);
- case WebAssembly::BI__builtin_wasm_ref_null_func:
- return BuiltinWasmRefNullFunc(TheCall);
- case WebAssembly::BI__builtin_wasm_table_get:
- return BuiltinWasmTableGet(TheCall);
- case WebAssembly::BI__builtin_wasm_table_set:
- return BuiltinWasmTableSet(TheCall);
- case WebAssembly::BI__builtin_wasm_table_size:
- return BuiltinWasmTableSize(TheCall);
- case WebAssembly::BI__builtin_wasm_table_grow:
- return BuiltinWasmTableGrow(TheCall);
- case WebAssembly::BI__builtin_wasm_table_fill:
- return BuiltinWasmTableFill(TheCall);
- case WebAssembly::BI__builtin_wasm_table_copy:
- return BuiltinWasmTableCopy(TheCall);
- }
-
- return false;
-}
-
-void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D) {
- const TargetInfo &TI = Context.getTargetInfo();
-
- ASTContext::BuiltinVectorTypeInfo Info =
- Context.getBuiltinVectorTypeInfo(Ty->castAs<BuiltinType>());
- unsigned EltSize = Context.getTypeSize(Info.ElementType);
- unsigned MinElts = Info.EC.getKnownMinValue();
-
- // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
- // least zve64x
- if (((EltSize == 64 && Info.ElementType->isIntegerType()) || MinElts == 1) &&
- !TI.hasFeature("zve64x"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
- else if (Info.ElementType->isFloat16Type() && !TI.hasFeature("zvfh") &&
- !TI.hasFeature("zvfhmin"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D)
- << Ty << "zvfh or zvfhmin";
- else if (Info.ElementType->isBFloat16Type() &&
- !TI.hasFeature("experimental-zvfbfmin"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
- else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
- !TI.hasFeature("zve32f"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
- else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
- !TI.hasFeature("zve64d"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
- // Given that caller already checked isRVVType() before calling this function,
- // if we don't have at least zve32x supported, then we need to emit error.
- else if (!TI.hasFeature("zve32x"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
-}
-
-bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
- switch (BuiltinID) {
- case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
- case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
- case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
- case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
- return checkArgCountAtMost(*this, TheCall, 3);
- }
-
- return false;
-}
-
-/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
-/// This checks that the target supports __builtin_cpu_supports and
-/// that the string argument is constant and valid.
-static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI,
- CallExpr *TheCall) {
- Expr *Arg = TheCall->getArg(0);
-
- // Check if the argument is a string literal.
- if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
- return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
- << Arg->getSourceRange();
-
- // Check the contents of the string.
- StringRef Feature =
- cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
- if (!TI.validateCpuSupports(Feature))
- return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
- << Arg->getSourceRange();
- return false;
-}
-
-/// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
-/// This checks that the target supports __builtin_cpu_is and
-/// that the string argument is constant and valid.
-static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) {
- Expr *Arg = TheCall->getArg(0);
-
- // Check if the argument is a string literal.
- if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
- return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
- << Arg->getSourceRange();
-
- // Check the contents of the string.
- StringRef Feature =
- cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
- if (!TI.validateCpuIs(Feature))
- return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
- << Arg->getSourceRange();
- return false;
-}
-
-// Check if the rounding mode is legal.
-bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
- // Indicates if this instruction has rounding control or just SAE.
- bool HasRC = false;
-
- unsigned ArgNum = 0;
- switch (BuiltinID) {
- default:
- return false;
- case X86::BI__builtin_ia32_vcvttsd2si32:
- case X86::BI__builtin_ia32_vcvttsd2si64:
- case X86::BI__builtin_ia32_vcvttsd2usi32:
- case X86::BI__builtin_ia32_vcvttsd2usi64:
- case X86::BI__builtin_ia32_vcvttss2si32:
- case X86::BI__builtin_ia32_vcvttss2si64:
- case X86::BI__builtin_ia32_vcvttss2usi32:
- case X86::BI__builtin_ia32_vcvttss2usi64:
- case X86::BI__builtin_ia32_vcvttsh2si32:
- case X86::BI__builtin_ia32_vcvttsh2si64:
- case X86::BI__builtin_ia32_vcvttsh2usi32:
- case X86::BI__builtin_ia32_vcvttsh2usi64:
- ArgNum = 1;
- break;
- case X86::BI__builtin_ia32_maxpd512:
- case X86::BI__builtin_ia32_maxps512:
- case X86::BI__builtin_ia32_minpd512:
- case X86::BI__builtin_ia32_minps512:
- case X86::BI__builtin_ia32_maxph512:
- case X86::BI__builtin_ia32_minph512:
- ArgNum = 2;
- break;
- case X86::BI__builtin_ia32_vcvtph2pd512_mask:
- case X86::BI__builtin_ia32_vcvtph2psx512_mask:
- case X86::BI__builtin_ia32_cvtps2pd512_mask:
- case X86::BI__builtin_ia32_cvttpd2dq512_mask:
- case X86::BI__builtin_ia32_cvttpd2qq512_mask:
- case X86::BI__builtin_ia32_cvttpd2udq512_mask:
- case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
- case X86::BI__builtin_ia32_cvttps2dq512_mask:
- case X86::BI__builtin_ia32_cvttps2qq512_mask:
- case X86::BI__builtin_ia32_cvttps2udq512_mask:
- case X86::BI__builtin_ia32_cvttps2uqq512_mask:
- case X86::BI__builtin_ia32_vcvttph2w512_mask:
- case X86::BI__builtin_ia32_vcvttph2uw512_mask:
- case X86::BI__builtin_ia32_vcvttph2dq512_mask:
- case X86::BI__builtin_ia32_vcvttph2udq512_mask:
- case X86::BI__builtin_ia32_vcvttph2qq512_mask:
- case X86::BI__builtin_ia32_vcvttph2uqq512_mask:
- case X86::BI__builtin_ia32_exp2pd_mask:
- case X86::BI__builtin_ia32_exp2ps_mask:
- case X86::BI__builtin_ia32_getexppd512_mask:
- case X86::BI__builtin_ia32_getexpps512_mask:
- case X86::BI__builtin_ia32_getexpph512_mask:
- case X86::BI__builtin_ia32_rcp28pd_mask:
- case X86::BI__builtin_ia32_rcp28ps_mask:
- case X86::BI__builtin_ia32_rsqrt28pd_mask:
- case X86::BI__builtin_ia32_rsqrt28ps_mask:
- case X86::BI__builtin_ia32_vcomisd:
- case X86::BI__builtin_ia32_vcomiss:
- case X86::BI__builtin_ia32_vcomish:
- case X86::BI__builtin_ia32_vcvtph2ps512_mask:
- ArgNum = 3;
- break;
- case X86::BI__builtin_ia32_cmppd512_mask:
- case X86::BI__builtin_ia32_cmpps512_mask:
- case X86::BI__builtin_ia32_cmpsd_mask:
- case X86::BI__builtin_ia32_cmpss_mask:
- case X86::BI__builtin_ia32_cmpsh_mask:
- case X86::BI__builtin_ia32_vcvtsh2sd_round_mask:
- case X86::BI__builtin_ia32_vcvtsh2ss_round_mask:
- case X86::BI__builtin_ia32_cvtss2sd_round_mask:
- case X86::BI__builtin_ia32_getexpsd128_round_mask:
- case X86::BI__builtin_ia32_getexpss128_round_mask:
- case X86::BI__builtin_ia32_getexpsh128_round_mask:
- case X86::BI__builtin_ia32_getmantpd512_mask:
- case X86::BI__builtin_ia32_getmantps512_mask:
- case X86::BI__builtin_ia32_getmantph512_mask:
- case X86::BI__builtin_ia32_maxsd_round_mask:
- case X86::BI__builtin_ia32_maxss_round_mask:
- case X86::BI__builtin_ia32_maxsh_round_mask:
- case X86::BI__builtin_ia32_minsd_round_mask:
- case X86::BI__builtin_ia32_minss_round_mask:
- case X86::BI__builtin_ia32_minsh_round_mask:
- case X86::BI__builtin_ia32_rcp28sd_round_mask:
- case X86::BI__builtin_ia32_rcp28ss_round_mask:
- case X86::BI__builtin_ia32_reducepd512_mask:
- case X86::BI__builtin_ia32_reduceps512_mask:
- case X86::BI__builtin_ia32_reduceph512_mask:
- case X86::BI__builtin_ia32_rndscalepd_mask:
- case X86::BI__builtin_ia32_rndscaleps_mask:
- case X86::BI__builtin_ia32_rndscaleph_mask:
- case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
- case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
- ArgNum = 4;
- break;
- case X86::BI__builtin_ia32_fixupimmpd512_mask:
- case X86::BI__builtin_ia32_fixupimmpd512_maskz:
- case X86::BI__builtin_ia32_fixupimmps512_mask:
- case X86::BI__builtin_ia32_fixupimmps512_maskz:
- case X86::BI__builtin_ia32_fixupimmsd_mask:
- case X86::BI__builtin_ia32_fixupimmsd_maskz:
- case X86::BI__builtin_ia32_fixupimmss_mask:
- case X86::BI__builtin_ia32_fixupimmss_maskz:
- case X86::BI__builtin_ia32_getmantsd_round_mask:
- case X86::BI__builtin_ia32_getmantss_round_mask:
- case X86::BI__builtin_ia32_getmantsh_round_mask:
- case X86::BI__builtin_ia32_rangepd512_mask:
- case X86::BI__builtin_ia32_rangeps512_mask:
- case X86::BI__builtin_ia32_rangesd128_round_mask:
- case X86::BI__builtin_ia32_rangess128_round_mask:
- case X86::BI__builtin_ia32_reducesd_mask:
- case X86::BI__builtin_ia32_reducess_mask:
- case X86::BI__builtin_ia32_reducesh_mask:
- case X86::BI__builtin_ia32_rndscalesd_round_mask:
- case X86::BI__builtin_ia32_rndscaless_round_mask:
- case X86::BI__builtin_ia32_rndscalesh_round_mask:
- ArgNum = 5;
- break;
- case X86::BI__builtin_ia32_vcvtsd2si64:
- case X86::BI__builtin_ia32_vcvtsd2si32:
- case X86::BI__builtin_ia32_vcvtsd2usi32:
- case X86::BI__builtin_ia32_vcvtsd2usi64:
- case X86::BI__builtin_ia32_vcvtss2si32:
- case X86::BI__builtin_ia32_vcvtss2si64:
- case X86::BI__builtin_ia32_vcvtss2usi32:
- case X86::BI__builtin_ia32_vcvtss2usi64:
- case X86::BI__builtin_ia32_vcvtsh2si32:
- case X86::BI__builtin_ia32_vcvtsh2si64:
- case X86::BI__builtin_ia32_vcvtsh2usi32:
- case X86::BI__builtin_ia32_vcvtsh2usi64:
- case X86::BI__builtin_ia32_sqrtpd512:
- case X86::BI__builtin_ia32_sqrtps512:
- case X86::BI__builtin_ia32_sqrtph512:
- ArgNum = 1;
- HasRC = true;
- break;
- case X86::BI__builtin_ia32_addph512:
- case X86::BI__builtin_ia32_divph512:
- case X86::BI__builtin_ia32_mulph512:
- case X86::BI__builtin_ia32_subph512:
- case X86::BI__builtin_ia32_addpd512:
- case X86::BI__builtin_ia32_addps512:
- case X86::BI__builtin_ia32_divpd512:
- case X86::BI__builtin_ia32_divps512:
- case X86::BI__builtin_ia32_mulpd512:
- case X86::BI__builtin_ia32_mulps512:
- case X86::BI__builtin_ia32_subpd512:
- case X86::BI__builtin_ia32_subps512:
- case X86::BI__builtin_ia32_cvtsi2sd64:
- case X86::BI__builtin_ia32_cvtsi2ss32:
- case X86::BI__builtin_ia32_cvtsi2ss64:
- case X86::BI__builtin_ia32_cvtusi2sd64:
- case X86::BI__builtin_ia32_cvtusi2ss32:
- case X86::BI__builtin_ia32_cvtusi2ss64:
- case X86::BI__builtin_ia32_vcvtusi2sh:
- case X86::BI__builtin_ia32_vcvtusi642sh:
- case X86::BI__builtin_ia32_vcvtsi2sh:
- case X86::BI__builtin_ia32_vcvtsi642sh:
- ArgNum = 2;
- HasRC = true;
- break;
- case X86::BI__builtin_ia32_cvtdq2ps512_mask:
- case X86::BI__builtin_ia32_cvtudq2ps512_mask:
- case X86::BI__builtin_ia32_vcvtpd2ph512_mask:
- case X86::BI__builtin_ia32_vcvtps2phx512_mask:
- case X86::BI__builtin_ia32_cvtpd2ps512_mask:
- case X86::BI__builtin_ia32_cvtpd2dq512_mask:
- case X86::BI__builtin_ia32_cvtpd2qq512_mask:
- case X86::BI__builtin_ia32_cvtpd2udq512_mask:
- case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
- case X86::BI__builtin_ia32_cvtps2dq512_mask:
- case X86::BI__builtin_ia32_cvtps2qq512_mask:
- case X86::BI__builtin_ia32_cvtps2udq512_mask:
- case X86::BI__builtin_ia32_cvtps2uqq512_mask:
- case X86::BI__builtin_ia32_cvtqq2pd512_mask:
- case X86::BI__builtin_ia32_cvtqq2ps512_mask:
- case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
- case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
- case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
- case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
- case X86::BI__builtin_ia32_vcvtw2ph512_mask:
- case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
- case X86::BI__builtin_ia32_vcvtph2w512_mask:
- case X86::BI__builtin_ia32_vcvtph2uw512_mask:
- case X86::BI__builtin_ia32_vcvtph2dq512_mask:
- case X86::BI__builtin_ia32_vcvtph2udq512_mask:
- case X86::BI__builtin_ia32_vcvtph2qq512_mask:
- case X86::BI__builtin_ia32_vcvtph2uqq512_mask:
- case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
- case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
- ArgNum = 3;
- HasRC = true;
- break;
- case X86::BI__builtin_ia32_addsh_round_mask:
- case X86::BI__builtin_ia32_addss_round_mask:
- case X86::BI__builtin_ia32_addsd_round_mask:
- case X86::BI__builtin_ia32_divsh_round_mask:
- case X86::BI__builtin_ia32_divss_round_mask:
- case X86::BI__builtin_ia32_divsd_round_mask:
- case X86::BI__builtin_ia32_mulsh_round_mask:
- case X86::BI__builtin_ia32_mulss_round_mask:
- case X86::BI__builtin_ia32_mulsd_round_mask:
- case X86::BI__builtin_ia32_subsh_round_mask:
- case X86::BI__builtin_ia32_subss_round_mask:
- case X86::BI__builtin_ia32_subsd_round_mask:
- case X86::BI__builtin_ia32_scalefph512_mask:
- case X86::BI__builtin_ia32_scalefpd512_mask:
- case X86::BI__builtin_ia32_scalefps512_mask:
- case X86::BI__builtin_ia32_scalefsd_round_mask:
- case X86::BI__builtin_ia32_scalefss_round_mask:
- case X86::BI__builtin_ia32_scalefsh_round_mask:
- case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
- case X86::BI__builtin_ia32_vcvtss2sh_round_mask:
- case X86::BI__builtin_ia32_vcvtsd2sh_round_mask:
- case X86::BI__builtin_ia32_sqrtsd_round_mask:
- case X86::BI__builtin_ia32_sqrtss_round_mask:
- case X86::BI__builtin_ia32_sqrtsh_round_mask:
- case X86::BI__builtin_ia32_vfmaddsd3_mask:
- case X86::BI__builtin_ia32_vfmaddsd3_maskz:
- case X86::BI__builtin_ia32_vfmaddsd3_mask3:
- case X86::BI__builtin_ia32_vfmaddss3_mask:
- case X86::BI__builtin_ia32_vfmaddss3_maskz:
- case X86::BI__builtin_ia32_vfmaddss3_mask3:
- case X86::BI__builtin_ia32_vfmaddsh3_mask:
- case X86::BI__builtin_ia32_vfmaddsh3_maskz:
- case X86::BI__builtin_ia32_vfmaddsh3_mask3:
- case X86::BI__builtin_ia32_vfmaddpd512_mask:
- case X86::BI__builtin_ia32_vfmaddpd512_maskz:
- case X86::BI__builtin_ia32_vfmaddpd512_mask3:
- case X86::BI__builtin_ia32_vfmsubpd512_mask3:
- case X86::BI__builtin_ia32_vfmaddps512_mask:
- case X86::BI__builtin_ia32_vfmaddps512_maskz:
- case X86::BI__builtin_ia32_vfmaddps512_mask3:
- case X86::BI__builtin_ia32_vfmsubps512_mask3:
- case X86::BI__builtin_ia32_vfmaddph512_mask:
- case X86::BI__builtin_ia32_vfmaddph512_maskz:
- case X86::BI__builtin_ia32_vfmaddph512_mask3:
- case X86::BI__builtin_ia32_vfmsubph512_mask3:
- case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
- case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
- case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
- case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
- case X86::BI__builtin_ia32_vfmaddsubps512_mask:
- case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
- case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
- case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
- case X86::BI__builtin_ia32_vfmaddsubph512_mask:
- case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
- case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
- case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
- case X86::BI__builtin_ia32_vfmaddcsh_mask:
- case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
- case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
- case X86::BI__builtin_ia32_vfmaddcph512_mask:
- case X86::BI__builtin_ia32_vfmaddcph512_maskz:
- case X86::BI__builtin_ia32_vfmaddcph512_mask3:
- case X86::BI__builtin_ia32_vfcmaddcsh_mask:
- case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
- case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
- case X86::BI__builtin_ia32_vfcmaddcph512_mask:
- case X86::BI__builtin_ia32_vfcmaddcph512_maskz:
- case X86::BI__builtin_ia32_vfcmaddcph512_mask3:
- case X86::BI__builtin_ia32_vfmulcsh_mask:
- case X86::BI__builtin_ia32_vfmulcph512_mask:
- case X86::BI__builtin_ia32_vfcmulcsh_mask:
- case X86::BI__builtin_ia32_vfcmulcph512_mask:
- ArgNum = 4;
- HasRC = true;
- break;
- }
-
- llvm::APSInt Result;
-
- // We can't check the value of a dependent argument.
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- return false;
-
- // Check constant-ness first.
- if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
- return true;
-
- // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
- // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
- // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
- // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
- if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
- Result == 8/*ROUND_NO_EXC*/ ||
- (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
- (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
- return false;
-
- return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
- << Arg->getSourceRange();
-}
-
-// Check if the gather/scatter scale is legal.
-bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
- CallExpr *TheCall) {
- unsigned ArgNum = 0;
- switch (BuiltinID) {
- default:
- return false;
- case X86::BI__builtin_ia32_gatherpfdpd:
- case X86::BI__builtin_ia32_gatherpfdps:
- case X86::BI__builtin_ia32_gatherpfqpd:
- case X86::BI__builtin_ia32_gatherpfqps:
- case X86::BI__builtin_ia32_scatterpfdpd:
- case X86::BI__builtin_ia32_scatterpfdps:
- case X86::BI__builtin_ia32_scatterpfqpd:
- case X86::BI__builtin_ia32_scatterpfqps:
- ArgNum = 3;
- break;
- case X86::BI__builtin_ia32_gatherd_pd:
- case X86::BI__builtin_ia32_gatherd_pd256:
- case X86::BI__builtin_ia32_gatherq_pd:
- case X86::BI__builtin_ia32_gatherq_pd256:
- case X86::BI__builtin_ia32_gatherd_ps:
- case X86::BI__builtin_ia32_gatherd_ps256:
- case X86::BI__builtin_ia32_gatherq_ps:
- case X86::BI__builtin_ia32_gatherq_ps256:
- case X86::BI__builtin_ia32_gatherd_q:
- case X86::BI__builtin_ia32_gatherd_q256:
- case X86::BI__builtin_ia32_gatherq_q:
- case X86::BI__builtin_ia32_gatherq_q256:
- case X86::BI__builtin_ia32_gatherd_d:
- case X86::BI__builtin_ia32_gatherd_d256:
- case X86::BI__builtin_ia32_gatherq_d:
- case X86::BI__builtin_ia32_gatherq_d256:
- case X86::BI__builtin_ia32_gather3div2df:
- case X86::BI__builtin_ia32_gather3div2di:
- case X86::BI__builtin_ia32_gather3div4df:
- case X86::BI__builtin_ia32_gather3div4di:
- case X86::BI__builtin_ia32_gather3div4sf:
- case X86::BI__builtin_ia32_gather3div4si:
- case X86::BI__builtin_ia32_gather3div8sf:
- case X86::BI__builtin_ia32_gather3div8si:
- case X86::BI__builtin_ia32_gather3siv2df:
- case X86::BI__builtin_ia32_gather3siv2di:
- case X86::BI__builtin_ia32_gather3siv4df:
- case X86::BI__builtin_ia32_gather3siv4di:
- case X86::BI__builtin_ia32_gather3siv4sf:
- case X86::BI__builtin_ia32_gather3siv4si:
- case X86::BI__builtin_ia32_gather3siv8sf:
- case X86::BI__builtin_ia32_gather3siv8si:
- case X86::BI__builtin_ia32_gathersiv8df:
- case X86::BI__builtin_ia32_gathersiv16sf:
- case X86::BI__builtin_ia32_gatherdiv8df:
- case X86::BI__builtin_ia32_gatherdiv16sf:
- case X86::BI__builtin_ia32_gathersiv8di:
- case X86::BI__builtin_ia32_gathersiv16si:
- case X86::BI__builtin_ia32_gatherdiv8di:
- case X86::BI__builtin_ia32_gatherdiv16si:
- case X86::BI__builtin_ia32_scatterdiv2df:
- case X86::BI__builtin_ia32_scatterdiv2di:
- case X86::BI__builtin_ia32_scatterdiv4df:
- case X86::BI__builtin_ia32_scatterdiv4di:
- case X86::BI__builtin_ia32_scatterdiv4sf:
- case X86::BI__builtin_ia32_scatterdiv4si:
- case X86::BI__builtin_ia32_scatterdiv8sf:
- case X86::BI__builtin_ia32_scatterdiv8si:
- case X86::BI__builtin_ia32_scattersiv2df:
- case X86::BI__builtin_ia32_scattersiv2di:
- case X86::BI__builtin_ia32_scattersiv4df:
- case X86::BI__builtin_ia32_scattersiv4di:
- case X86::BI__builtin_ia32_scattersiv4sf:
- case X86::BI__builtin_ia32_scattersiv4si:
- case X86::BI__builtin_ia32_scattersiv8sf:
- case X86::BI__builtin_ia32_scattersiv8si:
- case X86::BI__builtin_ia32_scattersiv8df:
- case X86::BI__builtin_ia32_scattersiv16sf:
- case X86::BI__builtin_ia32_scatterdiv8df:
- case X86::BI__builtin_ia32_scatterdiv16sf:
- case X86::BI__builtin_ia32_scattersiv8di:
- case X86::BI__builtin_ia32_scattersiv16si:
- case X86::BI__builtin_ia32_scatterdiv8di:
- case X86::BI__builtin_ia32_scatterdiv16si:
- ArgNum = 4;
- break;
- }
-
- llvm::APSInt Result;
-
- // We can't check the value of a dependent argument.
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- return false;
-
- // Check constant-ness first.
- if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
- return true;
-
- if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
- return false;
-
- return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
- << Arg->getSourceRange();
-}
-
-enum { TileRegLow = 0, TileRegHigh = 7 };
-
-bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
- ArrayRef<int> ArgNums) {
- for (int ArgNum : ArgNums) {
- if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh))
- return true;
- }
- return false;
-}
-
-bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
- ArrayRef<int> ArgNums) {
- // Because the max number of tile register is TileRegHigh + 1, so here we use
- // each bit to represent the usage of them in bitset.
- std::bitset<TileRegHigh + 1> ArgValues;
- for (int ArgNum : ArgNums) {
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- continue;
-
- llvm::APSInt Result;
- if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
- return true;
- int ArgExtValue = Result.getExtValue();
- assert((ArgExtValue >= TileRegLow && ArgExtValue <= TileRegHigh) &&
- "Incorrect tile register num.");
- if (ArgValues.test(ArgExtValue))
- return Diag(TheCall->getBeginLoc(),
- diag::err_x86_builtin_tile_arg_duplicate)
- << TheCall->getArg(ArgNum)->getSourceRange();
- ArgValues.set(ArgExtValue);
- }
- return false;
-}
-
-bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
- ArrayRef<int> ArgNums) {
- return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
- CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
-}
-
-bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
- switch (BuiltinID) {
- default:
- return false;
- case X86::BI__builtin_ia32_tileloadd64:
- case X86::BI__builtin_ia32_tileloaddt164:
- case X86::BI__builtin_ia32_tilestored64:
- case X86::BI__builtin_ia32_tilezero:
- return CheckX86BuiltinTileArgumentsRange(TheCall, 0);
- case X86::BI__builtin_ia32_tdpbssd:
- case X86::BI__builtin_ia32_tdpbsud:
- case X86::BI__builtin_ia32_tdpbusd:
- case X86::BI__builtin_ia32_tdpbuud:
- case X86::BI__builtin_ia32_tdpbf16ps:
- case X86::BI__builtin_ia32_tdpfp16ps:
- case X86::BI__builtin_ia32_tcmmimfp16ps:
- case X86::BI__builtin_ia32_tcmmrlfp16ps:
- return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
- }
-}
-static bool isX86_32Builtin(unsigned BuiltinID) {
- // These builtins only work on x86-32 targets.
- switch (BuiltinID) {
- case X86::BI__builtin_ia32_readeflags_u32:
- case X86::BI__builtin_ia32_writeeflags_u32:
- return true;
- }
-
- return false;
-}
-
-bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
- if (BuiltinID == X86::BI__builtin_cpu_supports)
- return SemaBuiltinCpuSupports(*this, TI, TheCall);
-
- if (BuiltinID == X86::BI__builtin_cpu_is)
- return SemaBuiltinCpuIs(*this, TI, TheCall);
-
- // Check for 32-bit only builtins on a 64-bit target.
- const llvm::Triple &TT = TI.getTriple();
- if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
- return Diag(TheCall->getCallee()->getBeginLoc(),
- diag::err_32_bit_builtin_64_bit_tgt);
-
- // If the intrinsic has rounding or SAE make sure its valid.
- if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
- return true;
-
- // If the intrinsic has a gather/scatter scale immediate make sure its valid.
- if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
- return true;
-
- // If the intrinsic has a tile arguments, make sure they are valid.
- if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
- return true;
-
- // For intrinsics which take an immediate value as part of the instruction,
- // range check them here.
- int i = 0, l = 0, u = 0;
- switch (BuiltinID) {
- default:
- return false;
- case X86::BI__builtin_ia32_vec_ext_v2si:
- case X86::BI__builtin_ia32_vec_ext_v2di:
- case X86::BI__builtin_ia32_vextractf128_pd256:
- case X86::BI__builtin_ia32_vextractf128_ps256:
- case X86::BI__builtin_ia32_vextractf128_si256:
- case X86::BI__builtin_ia32_extract128i256:
- case X86::BI__builtin_ia32_extractf64x4_mask:
- case X86::BI__builtin_ia32_extracti64x4_mask:
- case X86::BI__builtin_ia32_extractf32x8_mask:
- case X86::BI__builtin_ia32_extracti32x8_mask:
- case X86::BI__builtin_ia32_extractf64x2_256_mask:
- case X86::BI__builtin_ia32_extracti64x2_256_mask:
- case X86::BI__builtin_ia32_extractf32x4_256_mask:
- case X86::BI__builtin_ia32_extracti32x4_256_mask:
- i = 1; l = 0; u = 1;
- break;
- case X86::BI__builtin_ia32_vec_set_v2di:
- case X86::BI__builtin_ia32_vinsertf128_pd256:
- case X86::BI__builtin_ia32_vinsertf128_ps256:
- case X86::BI__builtin_ia32_vinsertf128_si256:
- case X86::BI__builtin_ia32_insert128i256:
- case X86::BI__builtin_ia32_insertf32x8:
- case X86::BI__builtin_ia32_inserti32x8:
- case X86::BI__builtin_ia32_insertf64x4:
- case X86::BI__builtin_ia32_inserti64x4:
- case X86::BI__builtin_ia32_insertf64x2_256:
- case X86::BI__builtin_ia32_inserti64x2_256:
- case X86::BI__builtin_ia32_insertf32x4_256:
- case X86::BI__builtin_ia32_inserti32x4_256:
- i = 2; l = 0; u = 1;
- break;
- case X86::BI__builtin_ia32_vpermilpd:
- case X86::BI__builtin_ia32_vec_ext_v4hi:
- case X86::BI__builtin_ia32_vec_ext_v4si:
- case X86::BI__builtin_ia32_vec_ext_v4sf:
- case X86::BI__builtin_ia32_vec_ext_v4di:
- case X86::BI__builtin_ia32_extractf32x4_mask:
- case X86::BI__builtin_ia32_extracti32x4_mask:
- case X86::BI__builtin_ia32_extractf64x2_512_mask:
- case X86::BI__builtin_ia32_extracti64x2_512_mask:
- i = 1; l = 0; u = 3;
- break;
- case X86::BI_mm_prefetch:
- case X86::BI__builtin_ia32_vec_ext_v8hi:
- case X86::BI__builtin_ia32_vec_ext_v8si:
- i = 1; l = 0; u = 7;
- break;
- case X86::BI__builtin_ia32_sha1rnds4:
- case X86::BI__builtin_ia32_blendpd:
- case X86::BI__builtin_ia32_shufpd:
- case X86::BI__builtin_ia32_vec_set_v4hi:
- case X86::BI__builtin_ia32_vec_set_v4si:
- case X86::BI__builtin_ia32_vec_set_v4di:
- case X86::BI__builtin_ia32_shuf_f32x4_256:
- case X86::BI__builtin_ia32_shuf_f64x2_256:
- case X86::BI__builtin_ia32_shuf_i32x4_256:
- case X86::BI__builtin_ia32_shuf_i64x2_256:
- case X86::BI__builtin_ia32_insertf64x2_512:
- case X86::BI__builtin_ia32_inserti64x2_512:
- case X86::BI__builtin_ia32_insertf32x4:
- case X86::BI__builtin_ia32_inserti32x4:
- i = 2; l = 0; u = 3;
- break;
- case X86::BI__builtin_ia32_vpermil2pd:
- case X86::BI__builtin_ia32_vpermil2pd256:
- case X86::BI__builtin_ia32_vpermil2ps:
- case X86::BI__builtin_ia32_vpermil2ps256:
- i = 3; l = 0; u = 3;
- break;
- case X86::BI__builtin_ia32_cmpb128_mask:
- case X86::BI__builtin_ia32_cmpw128_mask:
- case X86::BI__builtin_ia32_cmpd128_mask:
- case X86::BI__builtin_ia32_cmpq128_mask:
- case X86::BI__builtin_ia32_cmpb256_mask:
- case X86::BI__builtin_ia32_cmpw256_mask:
- case X86::BI__builtin_ia32_cmpd256_mask:
- case X86::BI__builtin_ia32_cmpq256_mask:
- case X86::BI__builtin_ia32_cmpb512_mask:
- case X86::BI__builtin_ia32_cmpw512_mask:
- case X86::BI__builtin_ia32_cmpd512_mask:
- case X86::BI__builtin_ia32_cmpq512_mask:
- case X86::BI__builtin_ia32_ucmpb128_mask:
- case X86::BI__builtin_ia32_ucmpw128_mask:
- case X86::BI__builtin_ia32_ucmpd128_mask:
- case X86::BI__builtin_ia32_ucmpq128_mask:
- case X86::BI__builtin_ia32_ucmpb256_mask:
- case X86::BI__builtin_ia32_ucmpw256_mask:
- case X86::BI__builtin_ia32_ucmpd256_mask:
- case X86::BI__builtin_ia32_ucmpq256_mask:
- case X86::BI__builtin_ia32_ucmpb512_mask:
- case X86::BI__builtin_ia32_ucmpw512_mask:
- case X86::BI__builtin_ia32_ucmpd512_mask:
- case X86::BI__builtin_ia32_ucmpq512_mask:
- case X86::BI__builtin_ia32_vpcomub:
- case X86::BI__builtin_ia32_vpcomuw:
- case X86::BI__builtin_ia32_vpcomud:
- case X86::BI__builtin_ia32_vpcomuq:
- case X86::BI__builtin_ia32_vpcomb:
- case X86::BI__builtin_ia32_vpcomw:
- case X86::BI__builtin_ia32_vpcomd:
- case X86::BI__builtin_ia32_vpcomq:
- case X86::BI__builtin_ia32_vec_set_v8hi:
- case X86::BI__builtin_ia32_vec_set_v8si:
- i = 2; l = 0; u = 7;
- break;
- case X86::BI__builtin_ia32_vpermilpd256:
- case X86::BI__builtin_ia32_roundps:
- case X86::BI__builtin_ia32_roundpd:
- case X86::BI__builtin_ia32_roundps256:
- case X86::BI__builtin_ia32_roundpd256:
- case X86::BI__builtin_ia32_getmantpd128_mask:
- case X86::BI__builtin_ia32_getmantpd256_mask:
- case X86::BI__builtin_ia32_getmantps128_mask:
- case X86::BI__builtin_ia32_getmantps256_mask:
- case X86::BI__builtin_ia32_getmantpd512_mask:
- case X86::BI__builtin_ia32_getmantps512_mask:
- case X86::BI__builtin_ia32_getmantph128_mask:
- case X86::BI__builtin_ia32_getmantph256_mask:
- case X86::BI__builtin_ia32_getmantph512_mask:
- case X86::BI__builtin_ia32_vec_ext_v16qi:
- case X86::BI__builtin_ia32_vec_ext_v16hi:
- i = 1; l = 0; u = 15;
- break;
- case X86::BI__builtin_ia32_pblendd128:
- case X86::BI__builtin_ia32_blendps:
- case X86::BI__builtin_ia32_blendpd256:
- case X86::BI__builtin_ia32_shufpd256:
- case X86::BI__builtin_ia32_roundss:
- case X86::BI__builtin_ia32_roundsd:
- case X86::BI__builtin_ia32_rangepd128_mask:
- case X86::BI__builtin_ia32_rangepd256_mask:
- case X86::BI__builtin_ia32_rangepd512_mask:
- case X86::BI__builtin_ia32_rangeps128_mask:
- case X86::BI__builtin_ia32_rangeps256_mask:
- case X86::BI__builtin_ia32_rangeps512_mask:
- case X86::BI__builtin_ia32_getmantsd_round_mask:
- case X86::BI__builtin_ia32_getmantss_round_mask:
- case X86::BI__builtin_ia32_getmantsh_round_mask:
- case X86::BI__builtin_ia32_vec_set_v16qi:
- case X86::BI__builtin_ia32_vec_set_v16hi:
- i = 2; l = 0; u = 15;
- break;
- case X86::BI__builtin_ia32_vec_ext_v32qi:
- i = 1; l = 0; u = 31;
- break;
- case X86::BI__builtin_ia32_cmpps:
- case X86::BI__builtin_ia32_cmpss:
- case X86::BI__builtin_ia32_cmppd:
- case X86::BI__builtin_ia32_cmpsd:
- case X86::BI__builtin_ia32_cmpps256:
- case X86::BI__builtin_ia32_cmppd256:
- case X86::BI__builtin_ia32_cmpps128_mask:
- case X86::BI__builtin_ia32_cmppd128_mask:
- case X86::BI__builtin_ia32_cmpps256_mask:
- case X86::BI__builtin_ia32_cmppd256_mask:
- case X86::BI__builtin_ia32_cmpps512_mask:
- case X86::BI__builtin_ia32_cmppd512_mask:
- case X86::BI__builtin_ia32_cmpsd_mask:
- case X86::BI__builtin_ia32_cmpss_mask:
- case X86::BI__builtin_ia32_vec_set_v32qi:
- i = 2; l = 0; u = 31;
- break;
- case X86::BI__builtin_ia32_permdf256:
- case X86::BI__builtin_ia32_permdi256:
- case X86::BI__builtin_ia32_permdf512:
- case X86::BI__builtin_ia32_permdi512:
- case X86::BI__builtin_ia32_vpermilps:
- case X86::BI__builtin_ia32_vpermilps256:
- case X86::BI__builtin_ia32_vpermilpd512:
- case X86::BI__builtin_ia32_vpermilps512:
- case X86::BI__builtin_ia32_pshufd:
- case X86::BI__builtin_ia32_pshufd256:
- case X86::BI__builtin_ia32_pshufd512:
- case X86::BI__builtin_ia32_pshufhw:
- case X86::BI__builtin_ia32_pshufhw256:
- case X86::BI__builtin_ia32_pshufhw512:
- case X86::BI__builtin_ia32_pshuflw:
- case X86::BI__builtin_ia32_pshuflw256:
- case X86::BI__builtin_ia32_pshuflw512:
- case X86::BI__builtin_ia32_vcvtps2ph:
- case X86::BI__builtin_ia32_vcvtps2ph_mask:
- case X86::BI__builtin_ia32_vcvtps2ph256:
- case X86::BI__builtin_ia32_vcvtps2ph256_mask:
- case X86::BI__builtin_ia32_vcvtps2ph512_mask:
- case X86::BI__builtin_ia32_rndscaleps_128_mask:
- case X86::BI__builtin_ia32_rndscalepd_128_mask:
- case X86::BI__builtin_ia32_rndscaleps_256_mask:
- case X86::BI__builtin_ia32_rndscalepd_256_mask:
- case X86::BI__builtin_ia32_rndscaleps_mask:
- case X86::BI__builtin_ia32_rndscalepd_mask:
- case X86::BI__builtin_ia32_rndscaleph_mask:
- case X86::BI__builtin_ia32_reducepd128_mask:
- case X86::BI__builtin_ia32_reducepd256_mask:
- case X86::BI__builtin_ia32_reducepd512_mask:
- case X86::BI__builtin_ia32_reduceps128_mask:
- case X86::BI__builtin_ia32_reduceps256_mask:
- case X86::BI__builtin_ia32_reduceps512_mask:
- case X86::BI__builtin_ia32_reduceph128_mask:
- case X86::BI__builtin_ia32_reduceph256_mask:
- case X86::BI__builtin_ia32_reduceph512_mask:
- case X86::BI__builtin_ia32_prold512:
- case X86::BI__builtin_ia32_prolq512:
- case X86::BI__builtin_ia32_prold128:
- case X86::BI__builtin_ia32_prold256:
- case X86::BI__builtin_ia32_prolq128:
- case X86::BI__builtin_ia32_prolq256:
- case X86::BI__builtin_ia32_prord512:
- case X86::BI__builtin_ia32_prorq512:
- case X86::BI__builtin_ia32_prord128:
- case X86::BI__builtin_ia32_prord256:
- case X86::BI__builtin_ia32_prorq128:
- case X86::BI__builtin_ia32_prorq256:
- case X86::BI__builtin_ia32_fpclasspd128_mask:
- case X86::BI__builtin_ia32_fpclasspd256_mask:
- case X86::BI__builtin_ia32_fpclassps128_mask:
- case X86::BI__builtin_ia32_fpclassps256_mask:
- case X86::BI__builtin_ia32_fpclassps512_mask:
- case X86::BI__builtin_ia32_fpclasspd512_mask:
- case X86::BI__builtin_ia32_fpclassph128_mask:
- case X86::BI__builtin_ia32_fpclassph256_mask:
- case X86::BI__builtin_ia32_fpclassph512_mask:
- case X86::BI__builtin_ia32_fpclasssd_mask:
- case X86::BI__builtin_ia32_fpclassss_mask:
- case X86::BI__builtin_ia32_fpclasssh_mask:
- case X86::BI__builtin_ia32_pslldqi128_byteshift:
- case X86::BI__builtin_ia32_pslldqi256_byteshift:
- case X86::BI__builtin_ia32_pslldqi512_byteshift:
- case X86::BI__builtin_ia32_psrldqi128_byteshift:
- case X86::BI__builtin_ia32_psrldqi256_byteshift:
- case X86::BI__builtin_ia32_psrldqi512_byteshift:
- case X86::BI__builtin_ia32_kshiftliqi:
- case X86::BI__builtin_ia32_kshiftlihi:
- case X86::BI__builtin_ia32_kshiftlisi:
- case X86::BI__builtin_ia32_kshiftlidi:
- case X86::BI__builtin_ia32_kshiftriqi:
- case X86::BI__builtin_ia32_kshiftrihi:
- case X86::BI__builtin_ia32_kshiftrisi:
- case X86::BI__builtin_ia32_kshiftridi:
- i = 1; l = 0; u = 255;
- break;
- case X86::BI__builtin_ia32_vperm2f128_pd256:
- case X86::BI__builtin_ia32_vperm2f128_ps256:
- case X86::BI__builtin_ia32_vperm2f128_si256:
- case X86::BI__builtin_ia32_permti256:
- case X86::BI__builtin_ia32_pblendw128:
- case X86::BI__builtin_ia32_pblendw256:
- case X86::BI__builtin_ia32_blendps256:
- case X86::BI__builtin_ia32_pblendd256:
- case X86::BI__builtin_ia32_palignr128:
- case X86::BI__builtin_ia32_palignr256:
- case X86::BI__builtin_ia32_palignr512:
- case X86::BI__builtin_ia32_alignq512:
- case X86::BI__builtin_ia32_alignd512:
- case X86::BI__builtin_ia32_alignd128:
- case X86::BI__builtin_ia32_alignd256:
- case X86::BI__builtin_ia32_alignq128:
- case X86::BI__builtin_ia32_alignq256:
- case X86::BI__builtin_ia32_vcomisd:
- case X86::BI__builtin_ia32_vcomiss:
- case X86::BI__builtin_ia32_shuf_f32x4:
- case X86::BI__builtin_ia32_shuf_f64x2:
- case X86::BI__builtin_ia32_shuf_i32x4:
- case X86::BI__builtin_ia32_shuf_i64x2:
- case X86::BI__builtin_ia32_shufpd512:
- case X86::BI__builtin_ia32_shufps:
- case X86::BI__builtin_ia32_shufps256:
- case X86::BI__builtin_ia32_shufps512:
- case X86::BI__builtin_ia32_dbpsadbw128:
- case X86::BI__builtin_ia32_dbpsadbw256:
- case X86::BI__builtin_ia32_dbpsadbw512:
- case X86::BI__builtin_ia32_vpshldd128:
- case X86::BI__builtin_ia32_vpshldd256:
- case X86::BI__builtin_ia32_vpshldd512:
- case X86::BI__builtin_ia32_vpshldq128:
- case X86::BI__builtin_ia32_vpshldq256:
- case X86::BI__builtin_ia32_vpshldq512:
- case X86::BI__builtin_ia32_vpshldw128:
- case X86::BI__builtin_ia32_vpshldw256:
- case X86::BI__builtin_ia32_vpshldw512:
- case X86::BI__builtin_ia32_vpshrdd128:
- case X86::BI__builtin_ia32_vpshrdd256:
- case X86::BI__builtin_ia32_vpshrdd512:
- case X86::BI__builtin_ia32_vpshrdq128:
- case X86::BI__builtin_ia32_vpshrdq256:
- case X86::BI__builtin_ia32_vpshrdq512:
- case X86::BI__builtin_ia32_vpshrdw128:
- case X86::BI__builtin_ia32_vpshrdw256:
- case X86::BI__builtin_ia32_vpshrdw512:
- i = 2; l = 0; u = 255;
- break;
- case X86::BI__builtin_ia32_fixupimmpd512_mask:
- case X86::BI__builtin_ia32_fixupimmpd512_maskz:
- case X86::BI__builtin_ia32_fixupimmps512_mask:
- case X86::BI__builtin_ia32_fixupimmps512_maskz:
- case X86::BI__builtin_ia32_fixupimmsd_mask:
- case X86::BI__builtin_ia32_fixupimmsd_maskz:
- case X86::BI__builtin_ia32_fixupimmss_mask:
- case X86::BI__builtin_ia32_fixupimmss_maskz:
- case X86::BI__builtin_ia32_fixupimmpd128_mask:
- case X86::BI__builtin_ia32_fixupimmpd128_maskz:
- case X86::BI__builtin_ia32_fixupimmpd256_mask:
- case X86::BI__builtin_ia32_fixupimmpd256_maskz:
- case X86::BI__builtin_ia32_fixupimmps128_mask:
- case X86::BI__builtin_ia32_fixupimmps128_maskz:
- case X86::BI__builtin_ia32_fixupimmps256_mask:
- case X86::BI__builtin_ia32_fixupimmps256_maskz:
- case X86::BI__builtin_ia32_pternlogd512_mask:
- case X86::BI__builtin_ia32_pternlogd512_maskz:
- case X86::BI__builtin_ia32_pternlogq512_mask:
- case X86::BI__builtin_ia32_pternlogq512_maskz:
- case X86::BI__builtin_ia32_pternlogd128_mask:
- case X86::BI__builtin_ia32_pternlogd128_maskz:
- case X86::BI__builtin_ia32_pternlogd256_mask:
- case X86::BI__builtin_ia32_pternlogd256_maskz:
- case X86::BI__builtin_ia32_pternlogq128_mask:
- case X86::BI__builtin_ia32_pternlogq128_maskz:
- case X86::BI__builtin_ia32_pternlogq256_mask:
- case X86::BI__builtin_ia32_pternlogq256_maskz:
- case X86::BI__builtin_ia32_vsm3rnds2:
- i = 3; l = 0; u = 255;
- break;
- case X86::BI__builtin_ia32_gatherpfdpd:
- case X86::BI__builtin_ia32_gatherpfdps:
- case X86::BI__builtin_ia32_gatherpfqpd:
- case X86::BI__builtin_ia32_gatherpfqps:
- case X86::BI__builtin_ia32_scatterpfdpd:
- case X86::BI__builtin_ia32_scatterpfdps:
- case X86::BI__builtin_ia32_scatterpfqpd:
- case X86::BI__builtin_ia32_scatterpfqps:
- i = 4; l = 2; u = 3;
- break;
- case X86::BI__builtin_ia32_reducesd_mask:
- case X86::BI__builtin_ia32_reducess_mask:
- case X86::BI__builtin_ia32_rndscalesd_round_mask:
- case X86::BI__builtin_ia32_rndscaless_round_mask:
- case X86::BI__builtin_ia32_rndscalesh_round_mask:
- case X86::BI__builtin_ia32_reducesh_mask:
- i = 4; l = 0; u = 255;
- break;
- case X86::BI__builtin_ia32_cmpccxadd32:
- case X86::BI__builtin_ia32_cmpccxadd64:
- i = 3; l = 0; u = 15;
- break;
- }
-
- // Note that we don't force a hard error on the range check here, allowing
- // template-generated or macro-generated dead code to potentially have out-of-
- // range values. These need to code generate, but don't need to necessarily
- // make any sense. We use a warning that defaults to an error.
- return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
-}
-
-/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
-/// parameter with the FormatAttr's correct format_idx and firstDataArg.
-/// Returns true when the format fits the function and the FormatStringInfo has
-/// been populated.
bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
bool IsVariadic, FormatStringInfo *FSI) {
if (Format->getFirstArg() == 0)
@@ -7186,6 +2971,14 @@ bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
///
/// Returns true if the value evaluates to null.
static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
+ // Treat (smart) pointers constructed from nullptr as null, whether we can
+ // const-evaluate them or not.
+ // This must happen first: the smart pointer expr might have _Nonnull type!
+ if (isa<CXXNullPtrLiteralExpr>(
+ IgnoreExprNodes(Expr, IgnoreImplicitAsWrittenSingleStep,
+ IgnoreElidableImplicitConstructorSingleStep)))
+ return true;
+
// If the expression has non-null type, it doesn't evaluate to null.
if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) {
if (*nullability == NullabilityKind::NonNull)
@@ -7194,13 +2987,11 @@ static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
// As a special case, transparent unions initialized with zero are
// considered null for the purposes of the nonnull attribute.
- if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
- if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
- if (const CompoundLiteralExpr *CLE =
- dyn_cast<CompoundLiteralExpr>(Expr))
- if (const InitListExpr *ILE =
- dyn_cast<InitListExpr>(CLE->getInitializer()))
- Expr = ILE->getInit(0);
+ if (const RecordType *UT = Expr->getType()->getAsUnionType();
+ UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
+ if (const auto *CLE = dyn_cast<CompoundLiteralExpr>(Expr))
+ if (const auto *ILE = dyn_cast<InitListExpr>(CLE->getInitializer()))
+ Expr = ILE->getInit(0);
}
bool Result;
@@ -7218,58 +3009,6 @@ static void CheckNonNullArgument(Sema &S,
<< ArgExpr->getSourceRange());
}
-bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
- FormatStringInfo FSI;
- if ((GetFormatStringType(Format) == FST_NSString) &&
- getFormatStringInfo(Format, false, true, &FSI)) {
- Idx = FSI.FormatIdx;
- return true;
- }
- return false;
-}
-
-/// Diagnose use of %s directive in an NSString which is being passed
-/// as formatting string to formatting method.
-static void
-DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
- const NamedDecl *FDecl,
- Expr **Args,
- unsigned NumArgs) {
- unsigned Idx = 0;
- bool Format = false;
- ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
- if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
- Idx = 2;
- Format = true;
- }
- else
- for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
- if (S.GetFormatNSStringIdx(I, Idx)) {
- Format = true;
- break;
- }
- }
- if (!Format || NumArgs <= Idx)
- return;
- const Expr *FormatExpr = Args[Idx];
- if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
- FormatExpr = CSCE->getSubExpr();
- const StringLiteral *FormatString;
- if (const ObjCStringLiteral *OSL =
- dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
- FormatString = OSL->getString();
- else
- FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
- if (!FormatString)
- return;
- if (S.FormatStringHasSArg(FormatString)) {
- S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
- << "%s" << 1 << 1;
- S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
- << FDecl->getDeclName();
- }
-}
-
/// Determine whether the given type has a non-null nullability annotation.
static bool isNonNullType(QualType type) {
if (auto nullability = type->getNullability())
@@ -7374,44 +3113,6 @@ static void CheckNonNullArguments(Sema &S,
}
}
-// 16 byte ByVal alignment not due to a vector member is not honoured by XL
-// on AIX. Emit a warning here that users are generating binary incompatible
-// code to be safe.
-// Here we try to get information about the alignment of the struct member
-// from the struct passed to the caller function. We only warn when the struct
-// is passed byval, hence the series of checks and early returns if we are a not
-// passing a struct byval.
-void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) {
- const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens());
- if (!ICE)
- return;
-
- const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
- if (!DR)
- return;
-
- const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl());
- if (!PD || !PD->getType()->isRecordType())
- return;
-
- QualType ArgType = Arg->getType();
- for (const FieldDecl *FD :
- ArgType->castAs<RecordType>()->getDecl()->fields()) {
- if (const auto *AA = FD->getAttr<AlignedAttr>()) {
- CharUnits Alignment =
- Context.toCharUnitsFromBits(AA->getAlignment(Context));
- if (Alignment.getQuantity() == 16) {
- Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD;
- Diag(Loc, diag::note_misaligned_member_used_here) << PD;
- }
- }
- }
-}
-
-/// Warn if a pointer or reference argument passed to a function points to an
-/// object that is less aligned than the parameter. This can happen when
-/// creating a typedef with a lower alignment than the original type and then
-/// calling functions defined in terms of the original type.
void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy,
QualType ParamTy) {
@@ -7447,9 +3148,6 @@ void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
<< ParamName << (FDecl != nullptr) << FDecl;
}
-/// Handles the checks for format strings, non-POD arguments to vararg
-/// functions, NULL arguments passed to non-NULL parameters, diagnose_if
-/// attributes and AArch64 SME attributes.
void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc,
@@ -7476,11 +3174,11 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
if (CallType != VariadicDoesNotApply &&
(!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
unsigned NumParams = Proto ? Proto->getNumParams()
- : FDecl && isa<FunctionDecl>(FDecl)
- ? cast<FunctionDecl>(FDecl)->getNumParams()
- : FDecl && isa<ObjCMethodDecl>(FDecl)
- ? cast<ObjCMethodDecl>(FDecl)->param_size()
- : 0;
+ : isa_and_nonnull<FunctionDecl>(FDecl)
+ ? cast<FunctionDecl>(FDecl)->getNumParams()
+ : isa_and_nonnull<ObjCMethodDecl>(FDecl)
+ ? cast<ObjCMethodDecl>(FDecl)->param_size()
+ : 0;
for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
// Args[ArgIdx] can be null in malformed code.
@@ -7512,6 +3210,8 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
// For variadic functions, we may have more args than parameters.
// For some K&R functions, we may have less args than parameters.
const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size());
+ bool IsScalableRet = Proto->getReturnType()->isSizelessVectorType();
+ bool IsScalableArg = false;
for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
// Args[ArgIdx] can be null in malformed code.
if (const Expr *Arg = Args[ArgIdx]) {
@@ -7522,9 +3222,11 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
FDecl->hasLinkage() &&
FDecl->getFormalLinkage() != Linkage::Internal &&
CallType == VariadicDoesNotApply)
- checkAIXMemberAlignment((Arg->getExprLoc()), Arg);
+ PPC().checkAIXMemberAlignment((Arg->getExprLoc()), Arg);
QualType ParamTy = Proto->getParamType(ArgIdx);
+ if (ParamTy->isSizelessVectorType())
+ IsScalableArg = true;
QualType ArgTy = Arg->getType();
CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1),
ArgTy, ParamTy);
@@ -7545,6 +3247,30 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
}
}
+ // If the call requires a streaming-mode change and has scalable vector
+ // arguments or return values, then warn the user that the streaming and
+ // non-streaming vector lengths may be different.
+ const auto *CallerFD = dyn_cast<FunctionDecl>(CurContext);
+ if (CallerFD && (!FD || !FD->getBuiltinID()) &&
+ (IsScalableArg || IsScalableRet)) {
+ bool IsCalleeStreaming =
+ ExtInfo.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask;
+ bool IsCalleeStreamingCompatible =
+ ExtInfo.AArch64SMEAttributes &
+ FunctionType::SME_PStateSMCompatibleMask;
+ SemaARM::ArmStreamingType CallerFnType = getArmStreamingFnType(CallerFD);
+ if (!IsCalleeStreamingCompatible &&
+ (CallerFnType == SemaARM::ArmStreamingCompatible ||
+ ((CallerFnType == SemaARM::ArmStreaming) ^ IsCalleeStreaming))) {
+ if (IsScalableArg)
+ Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
+ << /*IsArg=*/true;
+ if (IsScalableRet)
+ Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
+ << /*IsArg=*/false;
+ }
+ }
+
FunctionType::ArmStateValue CalleeArmZAState =
FunctionType::getArmZAState(ExtInfo.AArch64SMEAttributes);
FunctionType::ArmStateValue CalleeArmZT0State =
@@ -7553,7 +3279,7 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
CalleeArmZT0State != FunctionType::ARM_None) {
bool CallerHasZAState = false;
bool CallerHasZT0State = false;
- if (const auto *CallerFD = dyn_cast<FunctionDecl>(CurContext)) {
+ if (CallerFD) {
auto *Attr = CallerFD->getAttr<ArmNewAttr>();
if (Attr && Attr->isNewZA())
CallerHasZAState = true;
@@ -7607,8 +3333,12 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
}
-/// CheckConstructorCall - Check a constructor call for correctness and safety
-/// properties not enforced by the C type system.
+void Sema::CheckConstrainedAuto(const AutoType *AutoT, SourceLocation Loc) {
+ if (ConceptDecl *Decl = AutoT->getTypeConstraintConcept()) {
+ DiagnoseUseOfDecl(Decl, Loc);
+ }
+}
+
void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
@@ -7625,8 +3355,6 @@ void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
Loc, SourceRange(), CallType);
}
-/// CheckFunctionCall - Check a direct function call for various correctness
-/// and safety properties not strictly enforced by the C type system.
bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto) {
bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
@@ -7686,7 +3414,7 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
CheckInfNaNFunction(TheCall, FDecl);
if (getLangOpts().ObjC)
- DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
+ ObjC().DiagnoseCStringFormatDirectiveInCFAPI(FDecl, Args, NumArgs);
unsigned CMId = FDecl->getMemoryFunctionKind();
@@ -7711,20 +3439,6 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
return false;
}
-bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
- ArrayRef<const Expr *> Args) {
- VariadicCallType CallType =
- Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
-
- checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
- /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
- CallType);
-
- CheckTCBEnforcement(lbrac, Method);
-
- return false;
-}
-
bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto) {
QualType Ty;
@@ -7756,8 +3470,6 @@ bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
return false;
}
-/// Checks function calls when a FunctionDecl or a NamedDecl is not available,
-/// such as function pointers returned from functions.
bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
TheCall->getCallee());
@@ -7805,8 +3517,8 @@ static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
}
}
-ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
- AtomicExpr::AtomicOp Op) {
+ExprResult Sema::AtomicOpsOverloaded(ExprResult TheCallResult,
+ AtomicExpr::AtomicOp Op) {
CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()};
@@ -7864,18 +3576,18 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
&& sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
"need to update code for modified forms");
- static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
- AtomicExpr::AO__c11_atomic_fetch_min + 1 ==
- AtomicExpr::AO__atomic_load,
+ static_assert(AtomicExpr::AO__atomic_add_fetch == 0 &&
+ AtomicExpr::AO__atomic_xor_fetch + 1 ==
+ AtomicExpr::AO__c11_atomic_compare_exchange_strong,
"need to update code for modified C11 atomics");
- bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
- Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
- bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load &&
- Op <= AtomicExpr::AO__hip_atomic_fetch_max;
- bool IsScoped = Op >= AtomicExpr::AO__scoped_atomic_load &&
- Op <= AtomicExpr::AO__scoped_atomic_fetch_max;
- bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
- Op <= AtomicExpr::AO__c11_atomic_fetch_min) ||
+ bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_compare_exchange_strong &&
+ Op <= AtomicExpr::AO__opencl_atomic_store;
+ bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_compare_exchange_strong &&
+ Op <= AtomicExpr::AO__hip_atomic_store;
+ bool IsScoped = Op >= AtomicExpr::AO__scoped_atomic_add_fetch &&
+ Op <= AtomicExpr::AO__scoped_atomic_xor_fetch;
+ bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_compare_exchange_strong &&
+ Op <= AtomicExpr::AO__c11_atomic_store) ||
IsOpenCL;
bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
Op == AtomicExpr::AO__atomic_store_n ||
@@ -8044,7 +3756,7 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
if (!pointerType) {
Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
- << Ptr->getType() << Ptr->getSourceRange();
+ << Ptr->getType() << 0 << Ptr->getSourceRange();
return ExprError();
}
@@ -8073,6 +3785,16 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
}
}
+ // Pointer to object of size zero is not allowed.
+ if (RequireCompleteType(Ptr->getBeginLoc(), AtomTy,
+ diag::err_incomplete_type))
+ return ExprError();
+ if (Context.getTypeInfoInChars(AtomTy).Width.isZero()) {
+ Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
+ << Ptr->getType() << 1 << Ptr->getSourceRange();
+ return ExprError();
+ }
+
// For an arithmetic operation, the implied arithmetic must be well-formed.
if (Form == Arithmetic) {
// GCC does not enforce these rules for GNU atomics, but we do to help catch
@@ -8399,44 +4121,7 @@ static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
return false;
}
-bool Sema::BuiltinWasmRefNullExtern(CallExpr *TheCall) {
- if (TheCall->getNumArgs() != 0)
- return true;
-
- TheCall->setType(Context.getWebAssemblyExternrefType());
-
- return false;
-}
-
-bool Sema::BuiltinWasmRefNullFunc(CallExpr *TheCall) {
- if (TheCall->getNumArgs() != 0) {
- Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args)
- << 0 /*function call*/ << /*expected*/ 0 << TheCall->getNumArgs()
- << /*is non object*/ 0;
- return true;
- }
-
- // This custom type checking code ensures that the nodes are as expected
- // in order to later on generate the necessary builtin.
- QualType Pointee = Context.getFunctionType(Context.VoidTy, {}, {});
- QualType Type = Context.getPointerType(Pointee);
- Pointee = Context.getAddrSpaceQualType(Pointee, LangAS::wasm_funcref);
- Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type,
- Context.getPointerType(Pointee));
- TheCall->setType(Type);
-
- return false;
-}
-
-/// We have a call to a function like __sync_fetch_and_add, which is an
-/// overloaded function based on the pointer type of its first argument.
-/// The main BuildCallExpr routines have already promoted the types of
-/// arguments because all of these calls are prototyped as void(...).
-///
-/// This function goes through and does final semantic checking for these
-/// builtins, as well as generating any warnings.
-ExprResult
-Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
+ExprResult Sema::BuiltinAtomicOverloaded(ExprResult TheCallResult) {
CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
Expr *Callee = TheCall->getCallee();
DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts());
@@ -8465,7 +4150,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
if (!pointerType) {
Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
- << FirstArg->getType() << FirstArg->getSourceRange();
+ << FirstArg->getType() << 0 << FirstArg->getSourceRange();
return ExprError();
}
@@ -8473,7 +4158,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
!ValType->isBlockPointerType()) {
Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
- << FirstArg->getType() << FirstArg->getSourceRange();
+ << FirstArg->getType() << 0 << FirstArg->getSourceRange();
return ExprError();
}
@@ -8807,13 +4492,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
return TheCallResult;
}
-/// SemaBuiltinNontemporalOverloaded - We have a call to
-/// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
-/// overloaded function based on the pointer type of its last argument.
-///
-/// This function goes through and does final semantic checking for these
-/// builtins.
-ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
+ExprResult Sema::BuiltinNontemporalOverloaded(ExprResult TheCallResult) {
CallExpr *TheCall = (CallExpr *)TheCallResult.get();
DeclRefExpr *DRE =
cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
@@ -8826,7 +4505,7 @@ ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
unsigned numArgs = isStore ? 2 : 1;
// Ensure that we have the proper number of arguments.
- if (checkArgCount(*this, TheCall, numArgs))
+ if (checkArgCount(TheCall, numArgs))
return ExprError();
// Inspect the last argument of the nontemporal builtin. This should always
@@ -8879,38 +4558,6 @@ ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
return TheCallResult;
}
-/// CheckObjCString - Checks that the argument to the builtin
-/// CFString constructor is correct
-/// Note: It might also make sense to do the UTF-16 conversion here (would
-/// simplify the backend).
-bool Sema::CheckObjCString(Expr *Arg) {
- Arg = Arg->IgnoreParenCasts();
- StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
-
- if (!Literal || !Literal->isOrdinary()) {
- Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
- << Arg->getSourceRange();
- return true;
- }
-
- if (Literal->containsNonAsciiOrNull()) {
- StringRef String = Literal->getString();
- unsigned NumBytes = String.size();
- SmallVector<llvm::UTF16, 128> ToBuf(NumBytes);
- const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
- llvm::UTF16 *ToPtr = &ToBuf[0];
-
- llvm::ConversionResult Result =
- llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
- ToPtr + NumBytes, llvm::strictConversion);
- // Check for conversion failure.
- if (Result != llvm::conversionOK)
- Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated)
- << Arg->getSourceRange();
- }
- return false;
-}
-
/// CheckObjCString - Checks that the format string argument to the os_log()
/// and os_trace() functions is correct, and converts it to const char *.
ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
@@ -9011,10 +4658,7 @@ static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
return false;
}
-/// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
-/// for validity. Emit an error and return true on failure; return false
-/// on success.
-bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::BuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
Expr *Fn = TheCall->getCallee();
if (checkVAStartABI(*this, BuiltinID, Fn))
@@ -9023,7 +4667,7 @@ bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
// In C23 mode, va_start only needs one argument. However, the builtin still
// requires two arguments (which matches the behavior of the GCC builtin),
// <stdarg.h> passes `0` as the second argument in C23 mode.
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
// Type-check the first argument normally.
@@ -9088,7 +4732,7 @@ bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
return false;
}
-bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
+bool Sema::BuiltinVAStartARMMicrosoft(CallExpr *Call) {
auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool {
const LangOptions &LO = getLangOpts();
@@ -9151,10 +4795,8 @@ bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
return false;
}
-/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
-/// friends. This is declared to take (...), so we have to check everything.
-bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) {
- if (checkArgCount(*this, TheCall, 2))
+bool Sema::BuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) {
+ if (checkArgCount(TheCall, 2))
return true;
if (BuiltinID == Builtin::BI__builtin_isunordered &&
@@ -9193,12 +4835,9 @@ bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) {
return false;
}
-/// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
-/// __builtin_isnan and friends. This is declared to take (...), so we have
-/// to check everything.
-bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
- unsigned BuiltinID) {
- if (checkArgCount(*this, TheCall, NumArgs))
+bool Sema::BuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
+ unsigned BuiltinID) {
+ if (checkArgCount(TheCall, NumArgs))
return true;
FPOptions FPO = TheCall->getFPFeaturesInEffect(getLangOpts());
@@ -9253,7 +4892,7 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
// vector argument can be supported in all of them.
if (ElementTy->isVectorType() && IsFPClass) {
VectorResultTy = GetSignedVectorType(ElementTy);
- ElementTy = ElementTy->getAs<VectorType>()->getElementType();
+ ElementTy = ElementTy->castAs<VectorType>()->getElementType();
}
// This operation requires a non-_Complex floating-point number.
@@ -9265,7 +4904,7 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
// __builtin_isfpclass has integer parameter that specify test mask. It is
// passed in (...), so it should be analyzed completely here.
if (IsFPClass)
- if (SemaBuiltinConstantArgRange(TheCall, 1, 0, llvm::fcAllFlags))
+ if (BuiltinConstantArgRange(TheCall, 1, 0, llvm::fcAllFlags))
return true;
// TODO: enable this code to all classification functions.
@@ -9281,9 +4920,8 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
return false;
}
-/// Perform semantic analysis for a call to __builtin_complex.
-bool Sema::SemaBuiltinComplex(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 2))
+bool Sema::BuiltinComplex(CallExpr *TheCall) {
+ if (checkArgCount(TheCall, 2))
return true;
bool Dependent = false;
@@ -9336,58 +4974,9 @@ bool Sema::SemaBuiltinComplex(CallExpr *TheCall) {
return false;
}
-// Customized Sema Checking for VSX builtins that have the following signature:
-// vector [...] builtinName(vector [...], vector [...], const int);
-// Which takes the same type of vectors (any legal vector type) for the first
-// two arguments and takes compile time constant for the third argument.
-// Example builtins are :
-// vector double vec_xxpermdi(vector double, vector double, int);
-// vector short vec_xxsldwi(vector short, vector short, int);
-bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
- unsigned ExpectedNumArgs = 3;
- if (checkArgCount(*this, TheCall, ExpectedNumArgs))
- return true;
-
- // Check the third argument is a compile time constant
- if (!TheCall->getArg(2)->isIntegerConstantExpr(Context))
- return Diag(TheCall->getBeginLoc(),
- diag::err_vsx_builtin_nonconstant_argument)
- << 3 /* argument index */ << TheCall->getDirectCallee()
- << SourceRange(TheCall->getArg(2)->getBeginLoc(),
- TheCall->getArg(2)->getEndLoc());
-
- QualType Arg1Ty = TheCall->getArg(0)->getType();
- QualType Arg2Ty = TheCall->getArg(1)->getType();
-
- // Check the type of argument 1 and argument 2 are vectors.
- SourceLocation BuiltinLoc = TheCall->getBeginLoc();
- if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
- (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
- return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
- << TheCall->getDirectCallee()
- << SourceRange(TheCall->getArg(0)->getBeginLoc(),
- TheCall->getArg(1)->getEndLoc());
- }
-
- // Check the first two arguments are the same type.
- if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
- return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
- << TheCall->getDirectCallee()
- << SourceRange(TheCall->getArg(0)->getBeginLoc(),
- TheCall->getArg(1)->getEndLoc());
- }
-
- // When default clang type checking is turned off and the customized type
- // checking is used, the returning type of the function must be explicitly
- // set. Otherwise it is _Bool by default.
- TheCall->setType(Arg1Ty);
-
- return false;
-}
-
-/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
+/// BuiltinShuffleVector - Handle __builtin_shufflevector.
// This is declared to take (...), so we have to check everything.
-ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
+ExprResult Sema::BuiltinShuffleVector(CallExpr *TheCall) {
if (TheCall->getNumArgs() < 2)
return ExprError(Diag(TheCall->getEndLoc(),
diag::err_typecheck_call_too_few_args_at_least)
@@ -9408,7 +4997,7 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
if (!LHSType->isVectorType() || !RHSType->isVectorType())
return ExprError(
Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector)
- << TheCall->getDirectCallee()
+ << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
<< SourceRange(TheCall->getArg(0)->getBeginLoc(),
TheCall->getArg(1)->getEndLoc()));
@@ -9424,12 +5013,14 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
return ExprError(Diag(TheCall->getBeginLoc(),
diag::err_vec_builtin_incompatible_vector)
<< TheCall->getDirectCallee()
+ << /*isMorethantwoArgs*/ false
<< SourceRange(TheCall->getArg(1)->getBeginLoc(),
TheCall->getArg(1)->getEndLoc()));
} else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
return ExprError(Diag(TheCall->getBeginLoc(),
diag::err_vec_builtin_incompatible_vector)
<< TheCall->getDirectCallee()
+ << /*isMorethantwoArgs*/ false
<< SourceRange(TheCall->getArg(0)->getBeginLoc(),
TheCall->getArg(1)->getEndLoc()));
} else if (numElements != numResElements) {
@@ -9473,10 +5064,9 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
TheCall->getRParenLoc());
}
-/// SemaConvertVectorExpr - Handle __builtin_convertvector
-ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
- SourceLocation BuiltinLoc,
- SourceLocation RParenLoc) {
+ExprResult Sema::ConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
+ SourceLocation BuiltinLoc,
+ SourceLocation RParenLoc) {
ExprValueKind VK = VK_PRValue;
ExprObjectKind OK = OK_Ordinary;
QualType DstTy = TInfo->getType();
@@ -9500,14 +5090,11 @@ ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
<< E->getSourceRange());
}
- return new (Context)
- ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc);
+ return new (Context) class ConvertVectorExpr(E, TInfo, DstTy, VK, OK,
+ BuiltinLoc, RParenLoc);
}
-/// SemaBuiltinPrefetch - Handle __builtin_prefetch.
-// This is declared to take (const void*, ...) and can take two
-// optional constant int args.
-bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
+bool Sema::BuiltinPrefetch(CallExpr *TheCall) {
unsigned NumArgs = TheCall->getNumArgs();
if (NumArgs > 3)
@@ -9519,18 +5106,17 @@ bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
// Argument 0 is checked for us and the remaining arguments must be
// constant integers.
for (unsigned i = 1; i != NumArgs; ++i)
- if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3))
+ if (BuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3))
return true;
return false;
}
-/// SemaBuiltinArithmeticFence - Handle __arithmetic_fence.
-bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) {
+bool Sema::BuiltinArithmeticFence(CallExpr *TheCall) {
if (!Context.getTargetInfo().checkArithmeticFenceSupported())
return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
<< SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
Expr *Arg = TheCall->getArg(0);
if (Arg->isInstantiationDependent())
@@ -9548,10 +5134,7 @@ bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) {
return false;
}
-/// SemaBuiltinAssume - Handle __assume (MS Extension).
-// __assume does not evaluate its arguments, and should warn if its argument
-// has side effects.
-bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
+bool Sema::BuiltinAssume(CallExpr *TheCall) {
Expr *Arg = TheCall->getArg(0);
if (Arg->isInstantiationDependent()) return false;
@@ -9563,10 +5146,7 @@ bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
return false;
}
-/// Handle __builtin_alloca_with_align. This is declared
-/// as (size_t, size_t) where the second size_t must be a power of 2 greater
-/// than 8.
-bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
+bool Sema::BuiltinAllocaWithAlign(CallExpr *TheCall) {
// The alignment must be a constant integer.
Expr *Arg = TheCall->getArg(1);
@@ -9597,10 +5177,8 @@ bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
return false;
}
-/// Handle __builtin_assume_aligned. This is declared
-/// as (const void*, size_t, ...) and can take one optional constant int arg.
-bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
- if (checkArgCountRange(*this, TheCall, 2, 3))
+bool Sema::BuiltinAssumeAligned(CallExpr *TheCall) {
+ if (checkArgCountRange(TheCall, 2, 3))
return true;
unsigned NumArgs = TheCall->getNumArgs();
@@ -9621,7 +5199,7 @@ bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
// We can't check the value of a dependent argument.
if (!SecondArg->isValueDependent()) {
llvm::APSInt Result;
- if (SemaBuiltinConstantArg(TheCall, 1, Result))
+ if (BuiltinConstantArg(TheCall, 1, Result))
return true;
if (!Result.isPowerOf2())
@@ -9643,7 +5221,7 @@ bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
return false;
}
-bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
+bool Sema::BuiltinOSLogFormat(CallExpr *TheCall) {
unsigned BuiltinID =
cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID();
bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size;
@@ -9723,10 +5301,8 @@ bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
return false;
}
-/// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
-/// TheCall is a constant expression.
-bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
- llvm::APSInt &Result) {
+bool Sema::BuiltinConstantArg(CallExpr *TheCall, int ArgNum,
+ llvm::APSInt &Result) {
Expr *Arg = TheCall->getArg(ArgNum);
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
@@ -9741,10 +5317,8 @@ bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
return false;
}
-/// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
-/// TheCall is a constant expression in the range [Low, High].
-bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
- int Low, int High, bool RangeIsError) {
+bool Sema::BuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
+ int High, bool RangeIsError) {
if (isConstantEvaluatedContext())
return false;
llvm::APSInt Result;
@@ -9755,7 +5329,7 @@ bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
return false;
// Check constant-ness first.
- if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ if (BuiltinConstantArg(TheCall, ArgNum, Result))
return true;
if (Result.getSExtValue() < Low || Result.getSExtValue() > High) {
@@ -9774,10 +5348,8 @@ bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
return false;
}
-/// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr
-/// TheCall is a constant expression is a multiple of Num..
-bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
- unsigned Num) {
+bool Sema::BuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
+ unsigned Num) {
llvm::APSInt Result;
// We can't check the value of a dependent argument.
@@ -9786,7 +5358,7 @@ bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
return false;
// Check constant-ness first.
- if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ if (BuiltinConstantArg(TheCall, ArgNum, Result))
return true;
if (Result.getSExtValue() % Num != 0)
@@ -9796,9 +5368,7 @@ bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
return false;
}
-/// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a
-/// constant expression representing a power of 2.
-bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) {
+bool Sema::BuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) {
llvm::APSInt Result;
// We can't check the value of a dependent argument.
@@ -9807,7 +5377,7 @@ bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) {
return false;
// Check constant-ness first.
- if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ if (BuiltinConstantArg(TheCall, ArgNum, Result))
return true;
// Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if
@@ -9841,11 +5411,8 @@ static bool IsShiftedByte(llvm::APSInt Value) {
}
}
-/// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is
-/// a constant expression representing an arbitrary byte value shifted left by
-/// a multiple of 8 bits.
-bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
- unsigned ArgBits) {
+bool Sema::BuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
+ unsigned ArgBits) {
llvm::APSInt Result;
// We can't check the value of a dependent argument.
@@ -9854,7 +5421,7 @@ bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
return false;
// Check constant-ness first.
- if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ if (BuiltinConstantArg(TheCall, ArgNum, Result))
return true;
// Truncate to the given size.
@@ -9868,14 +5435,8 @@ bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
<< Arg->getSourceRange();
}
-/// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of
-/// TheCall is a constant expression representing either a shifted byte value,
-/// or a value of the form 0x??FF (i.e. a member of the arithmetic progression
-/// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some
-/// Arm MVE intrinsics.
-bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall,
- int ArgNum,
- unsigned ArgBits) {
+bool Sema::BuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
+ unsigned ArgBits) {
llvm::APSInt Result;
// We can't check the value of a dependent argument.
@@ -9884,7 +5445,7 @@ bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall,
return false;
// Check constant-ness first.
- if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ if (BuiltinConstantArg(TheCall, ArgNum, Result))
return true;
// Truncate to the given size.
@@ -9901,365 +5462,7 @@ bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall,
<< Arg->getSourceRange();
}
-/// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
-bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
- if (BuiltinID == AArch64::BI__builtin_arm_irg) {
- if (checkArgCount(*this, TheCall, 2))
- return true;
- Expr *Arg0 = TheCall->getArg(0);
- Expr *Arg1 = TheCall->getArg(1);
-
- ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
- if (FirstArg.isInvalid())
- return true;
- QualType FirstArgType = FirstArg.get()->getType();
- if (!FirstArgType->isAnyPointerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
- TheCall->setArg(0, FirstArg.get());
-
- ExprResult SecArg = DefaultLvalueConversion(Arg1);
- if (SecArg.isInvalid())
- return true;
- QualType SecArgType = SecArg.get()->getType();
- if (!SecArgType->isIntegerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
- << "second" << SecArgType << Arg1->getSourceRange();
-
- // Derive the return type from the pointer argument.
- TheCall->setType(FirstArgType);
- return false;
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_addg) {
- if (checkArgCount(*this, TheCall, 2))
- return true;
-
- Expr *Arg0 = TheCall->getArg(0);
- ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
- if (FirstArg.isInvalid())
- return true;
- QualType FirstArgType = FirstArg.get()->getType();
- if (!FirstArgType->isAnyPointerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
- TheCall->setArg(0, FirstArg.get());
-
- // Derive the return type from the pointer argument.
- TheCall->setType(FirstArgType);
-
- // Second arg must be an constant in range [0,15]
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
- if (checkArgCount(*this, TheCall, 2))
- return true;
- Expr *Arg0 = TheCall->getArg(0);
- Expr *Arg1 = TheCall->getArg(1);
-
- ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
- if (FirstArg.isInvalid())
- return true;
- QualType FirstArgType = FirstArg.get()->getType();
- if (!FirstArgType->isAnyPointerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
-
- QualType SecArgType = Arg1->getType();
- if (!SecArgType->isIntegerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
- << "second" << SecArgType << Arg1->getSourceRange();
- TheCall->setType(Context.IntTy);
- return false;
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
- BuiltinID == AArch64::BI__builtin_arm_stg) {
- if (checkArgCount(*this, TheCall, 1))
- return true;
- Expr *Arg0 = TheCall->getArg(0);
- ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
- if (FirstArg.isInvalid())
- return true;
-
- QualType FirstArgType = FirstArg.get()->getType();
- if (!FirstArgType->isAnyPointerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
- TheCall->setArg(0, FirstArg.get());
-
- // Derive the return type from the pointer argument.
- if (BuiltinID == AArch64::BI__builtin_arm_ldg)
- TheCall->setType(FirstArgType);
- return false;
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_subp) {
- Expr *ArgA = TheCall->getArg(0);
- Expr *ArgB = TheCall->getArg(1);
-
- ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA);
- ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB);
-
- if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
- return true;
-
- QualType ArgTypeA = ArgExprA.get()->getType();
- QualType ArgTypeB = ArgExprB.get()->getType();
-
- auto isNull = [&] (Expr *E) -> bool {
- return E->isNullPointerConstant(
- Context, Expr::NPC_ValueDependentIsNotNull); };
-
- // argument should be either a pointer or null
- if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
- << "first" << ArgTypeA << ArgA->getSourceRange();
-
- if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
- << "second" << ArgTypeB << ArgB->getSourceRange();
-
- // Ensure Pointee types are compatible
- if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
- ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
- QualType pointeeA = ArgTypeA->getPointeeType();
- QualType pointeeB = ArgTypeB->getPointeeType();
- if (!Context.typesAreCompatible(
- Context.getCanonicalType(pointeeA).getUnqualifiedType(),
- Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
- return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible)
- << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
- << ArgB->getSourceRange();
- }
- }
-
- // at least one argument should be pointer type
- if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
- << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
-
- if (isNull(ArgA)) // adopt type of the other pointer
- ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
-
- if (isNull(ArgB))
- ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
-
- TheCall->setArg(0, ArgExprA.get());
- TheCall->setArg(1, ArgExprB.get());
- TheCall->setType(Context.LongLongTy);
- return false;
- }
- assert(false && "Unhandled ARM MTE intrinsic");
- return true;
-}
-
-/// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
-/// TheCall is an ARM/AArch64 special register string literal.
-bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
- int ArgNum, unsigned ExpectedFieldNum,
- bool AllowName) {
- bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_wsr64 ||
- BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsrp ||
- BuiltinID == ARM::BI__builtin_arm_wsr ||
- BuiltinID == ARM::BI__builtin_arm_wsrp;
- bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
- BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
- BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp ||
- BuiltinID == AArch64::BI__builtin_arm_wsr ||
- BuiltinID == AArch64::BI__builtin_arm_wsrp;
- assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
-
- // We can't check the value of a dependent argument.
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- return false;
-
- // Check if the argument is a string literal.
- if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
- return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
- << Arg->getSourceRange();
-
- // Check the type of special register given.
- StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
- SmallVector<StringRef, 6> Fields;
- Reg.split(Fields, ":");
-
- if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
- return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
- << Arg->getSourceRange();
-
- // If the string is the name of a register then we cannot check that it is
- // valid here but if the string is of one the forms described in ACLE then we
- // can check that the supplied fields are integers and within the valid
- // ranges.
- if (Fields.size() > 1) {
- bool FiveFields = Fields.size() == 5;
-
- bool ValidString = true;
- if (IsARMBuiltin) {
- ValidString &= Fields[0].starts_with_insensitive("cp") ||
- Fields[0].starts_with_insensitive("p");
- if (ValidString)
- Fields[0] = Fields[0].drop_front(
- Fields[0].starts_with_insensitive("cp") ? 2 : 1);
-
- ValidString &= Fields[2].starts_with_insensitive("c");
- if (ValidString)
- Fields[2] = Fields[2].drop_front(1);
-
- if (FiveFields) {
- ValidString &= Fields[3].starts_with_insensitive("c");
- if (ValidString)
- Fields[3] = Fields[3].drop_front(1);
- }
- }
-
- SmallVector<int, 5> Ranges;
- if (FiveFields)
- Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
- else
- Ranges.append({15, 7, 15});
-
- for (unsigned i=0; i<Fields.size(); ++i) {
- int IntField;
- ValidString &= !Fields[i].getAsInteger(10, IntField);
- ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
- }
-
- if (!ValidString)
- return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
- << Arg->getSourceRange();
- } else if (IsAArch64Builtin && Fields.size() == 1) {
- // This code validates writes to PSTATE registers.
-
- // Not a write.
- if (TheCall->getNumArgs() != 2)
- return false;
-
- // The 128-bit system register accesses do not touch PSTATE.
- if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
- BuiltinID == AArch64::BI__builtin_arm_wsr128)
- return false;
-
- // These are the named PSTATE accesses using "MSR (immediate)" instructions,
- // along with the upper limit on the immediates allowed.
- auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
- .CaseLower("spsel", 15)
- .CaseLower("daifclr", 15)
- .CaseLower("daifset", 15)
- .CaseLower("pan", 15)
- .CaseLower("uao", 15)
- .CaseLower("dit", 15)
- .CaseLower("ssbs", 15)
- .CaseLower("tco", 15)
- .CaseLower("allint", 1)
- .CaseLower("pm", 1)
- .Default(std::nullopt);
-
- // If this is not a named PSTATE, just continue without validating, as this
- // will be lowered to an "MSR (register)" instruction directly
- if (!MaxLimit)
- return false;
-
- // Here we only allow constants in the range for that pstate, as required by
- // the ACLE.
- //
- // While clang also accepts the names of system registers in its ACLE
- // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
- // as the value written via a register is different to the value used as an
- // immediate to have the same effect. e.g., for the instruction `msr tco,
- // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
- // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
- //
- // If a programmer wants to codegen the MSR (register) form of `msr tco,
- // xN`, they can still do so by specifying the register using five
- // colon-separated numbers in a string.
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit);
- }
-
- return false;
-}
-
-/// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity.
-/// Emit an error and return true on failure; return false on success.
-/// TypeStr is a string containing the type descriptor of the value returned by
-/// the builtin and the descriptors of the expected type of the arguments.
-bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
- const char *TypeStr) {
-
- assert((TypeStr[0] != '\0') &&
- "Invalid types in PPC MMA builtin declaration");
-
- unsigned Mask = 0;
- unsigned ArgNum = 0;
-
- // The first type in TypeStr is the type of the value returned by the
- // builtin. So we first read that type and change the type of TheCall.
- QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
- TheCall->setType(type);
-
- while (*TypeStr != '\0') {
- Mask = 0;
- QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
- if (ArgNum >= TheCall->getNumArgs()) {
- ArgNum++;
- break;
- }
-
- Expr *Arg = TheCall->getArg(ArgNum);
- QualType PassedType = Arg->getType();
- QualType StrippedRVType = PassedType.getCanonicalType();
-
- // Strip Restrict/Volatile qualifiers.
- if (StrippedRVType.isRestrictQualified() ||
- StrippedRVType.isVolatileQualified())
- StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType();
-
- // The only case where the argument type and expected type are allowed to
- // mismatch is if the argument type is a non-void pointer (or array) and
- // expected type is a void pointer.
- if (StrippedRVType != ExpectedType)
- if (!(ExpectedType->isVoidPointerType() &&
- (StrippedRVType->isPointerType() || StrippedRVType->isArrayType())))
- return Diag(Arg->getBeginLoc(),
- diag::err_typecheck_convert_incompatible)
- << PassedType << ExpectedType << 1 << 0 << 0;
-
- // If the value of the Mask is not 0, we have a constraint in the size of
- // the integer argument so here we ensure the argument is a constant that
- // is in the valid range.
- if (Mask != 0 &&
- SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true))
- return true;
-
- ArgNum++;
- }
-
- // In case we exited early from the previous loop, there are other types to
- // read from TypeStr. So we need to read them all to ensure we have the right
- // number of arguments in TheCall and if it is not the case, to display a
- // better error message.
- while (*TypeStr != '\0') {
- (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
- ArgNum++;
- }
- if (checkArgCount(*this, TheCall, ArgNum))
- return true;
-
- return false;
-}
-
-/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
-/// This checks that the target supports __builtin_longjmp and
-/// that val is a constant 1.
-bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
+bool Sema::BuiltinLongjmp(CallExpr *TheCall) {
if (!Context.getTargetInfo().hasSjLjLowering())
return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported)
<< SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
@@ -10268,7 +5471,7 @@ bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
llvm::APSInt Result;
// TODO: This is less than ideal. Overload this to take a value.
- if (SemaBuiltinConstantArg(TheCall, 1, Result))
+ if (BuiltinConstantArg(TheCall, 1, Result))
return true;
if (Result != 1)
@@ -10278,9 +5481,7 @@ bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
return false;
}
-/// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]).
-/// This checks that the target supports __builtin_setjmp.
-bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) {
+bool Sema::BuiltinSetjmp(CallExpr *TheCall) {
if (!Context.getTargetInfo().hasSjLjLowering())
return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported)
<< SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
@@ -10840,9 +6041,6 @@ Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
.Default(FST_Unknown);
}
-/// CheckFormatArguments - Check calls to printf and scanf (and similar
-/// functions) for correct use of format strings.
-/// Returns true if a format string has been fully checked.
bool Sema::CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args, bool IsCXXMember,
VariadicCallType CallType, SourceLocation Loc,
@@ -11397,7 +6595,7 @@ void CheckFormatHandler::EmitFormatDiagnostic(
}
}
-//===--- CHECK: Printf format string checking ------------------------------===//
+//===--- CHECK: Printf format string checking -----------------------------===//
namespace {
@@ -12022,6 +7220,19 @@ isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) {
S.Context.getFloatingTypeOrder(From, To) < 0;
}
+static analyze_format_string::ArgType::MatchKind
+handleFormatSignedness(analyze_format_string::ArgType::MatchKind Match,
+ DiagnosticsEngine &Diags, SourceLocation Loc) {
+ if (Match == analyze_format_string::ArgType::NoMatchSignedness) {
+ Match =
+ Diags.isIgnored(
+ diag::warn_format_conversion_argument_type_mismatch_signedness, Loc)
+ ? analyze_format_string::ArgType::Match
+ : analyze_format_string::ArgType::NoMatch;
+ }
+ return Match;
+}
+
bool
CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
const char *StartSpecifier,
@@ -12063,8 +7274,22 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
return true;
}
+ // Diagnose attempts to use '%P' with ObjC object types, which will result in
+ // dumping raw class data (like is-a pointer), not actual data.
+ if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::PArg &&
+ ExprTy->isObjCObjectPointerType()) {
+ const CharSourceRange &CSR =
+ getSpecifierRange(StartSpecifier, SpecifierLen);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_with_objc_pointer),
+ E->getExprLoc(), false, CSR);
+ return true;
+ }
+
ArgType::MatchKind ImplicitMatch = ArgType::NoMatch;
ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy);
+ ArgType::MatchKind OrigMatch = Match;
+
+ Match = handleFormatSignedness(Match, S.getDiagnostics(), E->getExprLoc());
if (Match == ArgType::Match)
return true;
@@ -12088,6 +7313,14 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
ICE->getType() == S.Context.UnsignedIntTy) {
// All further checking is done on the subexpression
ImplicitMatch = AT.matchesType(S.Context, ExprTy);
+ if (OrigMatch == ArgType::NoMatchSignedness &&
+ ImplicitMatch != ArgType::NoMatchSignedness)
+ // If the original match was a signedness match this match on the
+ // implicit cast type also need to be signedness match otherwise we
+ // might introduce new unexpected warnings from -Wformat-signedness.
+ return true;
+ ImplicitMatch = handleFormatSignedness(
+ ImplicitMatch, S.getDiagnostics(), E->getExprLoc());
if (ImplicitMatch == ArgType::Match)
return true;
}
@@ -12209,6 +7442,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
case ArgType::Match:
case ArgType::MatchPromotion:
case ArgType::NoMatchPromotionTypeConfusion:
+ case ArgType::NoMatchSignedness:
llvm_unreachable("expected non-matching");
case ArgType::NoMatchPedantic:
Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
@@ -12244,8 +7478,10 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
CastFix << (S.LangOpts.CPlusPlus ? ">" : ")");
SmallVector<FixItHint,4> Hints;
- if (AT.matchesType(S.Context, IntendedTy) != ArgType::Match ||
- ShouldNotPrintDirectly)
+ ArgType::MatchKind IntendedMatch = AT.matchesType(S.Context, IntendedTy);
+ IntendedMatch = handleFormatSignedness(IntendedMatch, S.getDiagnostics(),
+ E->getExprLoc());
+ if ((IntendedMatch != ArgType::Match) || ShouldNotPrintDirectly)
Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str()));
if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) {
@@ -12292,10 +7528,15 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// In this case, the expression could be printed using a different
// specifier, but we've decided that the specifier is probably correct
// and we should cast instead. Just use the normal warning message.
+
+ unsigned Diag =
+ IsScopedEnum
+ ? diag::warn_format_conversion_argument_type_mismatch_pedantic
+ : diag::warn_format_conversion_argument_type_mismatch;
+
EmitFormatDiagnostic(
- S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
- << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum
- << E->getSourceRange(),
+ S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy
+ << IsEnum << E->getSourceRange(),
E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints);
}
}
@@ -12314,6 +7555,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
case ArgType::Match:
case ArgType::MatchPromotion:
case ArgType::NoMatchPromotionTypeConfusion:
+ case ArgType::NoMatchSignedness:
llvm_unreachable("expected non-matching");
case ArgType::NoMatchPedantic:
Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
@@ -12525,6 +7767,7 @@ bool CheckScanfHandler::HandleScanfSpecifier(
analyze_format_string::ArgType::MatchKind Match =
AT.matchesType(S.Context, Ex->getType());
+ Match = handleFormatSignedness(Match, S.getDiagnostics(), Ex->getExprLoc());
bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
if (Match == analyze_format_string::ArgType::Match)
return true;
@@ -12586,7 +7829,7 @@ static void CheckFormatString(
const ConstantArrayType *T =
S.Context.getAsConstantArrayType(FExpr->getType());
assert(T && "String literal not of constant array type!");
- size_t TypeSize = T->getSize().getZExtValue();
+ size_t TypeSize = T->getZExtSize();
size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
const unsigned numDataArgs = Args.size() - firstDataArg;
@@ -12646,7 +7889,7 @@ bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) {
// Account for cases where the string literal is truncated in a declaration.
const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType());
assert(T && "String literal not of constant array type!");
- size_t TypeSize = T->getSize().getZExtValue();
+ size_t TypeSize = T->getZExtSize();
size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen,
getLangOpts(),
@@ -12975,7 +8218,6 @@ void Sema::CheckInfNaNFunction(const CallExpr *Call,
<< 0 << 0 << Call->getSourceRange();
}
-// Warn when using the wrong abs() function.
void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl) {
if (Call->getNumArgs() != 1)
@@ -13384,13 +8626,6 @@ static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) {
}
}
-/// Check for dangerous or invalid arguments to memset().
-///
-/// This issues warnings on known problematic, dangerous or unspecified
-/// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
-/// function calls.
-///
-/// \param Call The call expression to diagnose.
void Sema::CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName) {
@@ -13609,7 +8844,7 @@ static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
// Only handle constant-sized or VLAs, but not flexible members.
if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) {
// Only issue the FIXIT for arrays of size > 1.
- if (CAT->getSize().getSExtValue() <= 1)
+ if (CAT->getZExtSize() <= 1)
return false;
} else if (!Ty->isVariableArrayType()) {
return false;
@@ -13617,8 +8852,6 @@ static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
return true;
}
-// Warn if the user has made the 'size' argument to strlcpy or strlcat
-// be the size of the source, instead of the destination.
void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName) {
@@ -13704,9 +8937,6 @@ static const Expr *getStrlenExprArg(const Expr *E) {
return nullptr;
}
-// Warn on anti-patterns as the 'size' argument to strncat.
-// The correct size argument should look like following:
-// strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
void Sema::CheckStrncatArguments(const CallExpr *CE,
IdentifierInfo *FnName) {
// Don't crash if the user has the wrong number of arguments.
@@ -13865,7 +9095,6 @@ void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName,
}
} // namespace
-/// Alerts the user that they are attempting to free a non-malloc'd object.
void Sema::CheckFreeArguments(const CallExpr *E) {
const std::string CalleeName =
cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString();
@@ -13940,11 +9169,9 @@ Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
// PPC MMA non-pointer types are not allowed as return type. Checking the type
// here prevent the user from using a PPC MMA type as trailing return type.
if (Context.getTargetInfo().getTriple().isPPC64())
- CheckPPCMMAType(RetValExp->getType(), ReturnLoc);
+ PPC().CheckPPCMMAType(RetValExp->getType(), ReturnLoc);
}
-/// Check for comparisons of floating-point values using == and !=. Issue a
-/// warning if the comparison is not likely to do what the programmer intended.
void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS,
BinaryOperatorKind Opcode) {
if (!BinaryOperator::isEqualityOp(Opcode))
@@ -14751,7 +9978,7 @@ static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
// Special case for ObjC BOOL on targets where its a typedef for a signed char
// (Namely, macOS). FIXME: IntRange::forValueOfType should do this.
bool IsObjCSignedCharBool = S.getLangOpts().ObjC &&
- S.NSAPIObj->isObjCBOOLType(OtherT) &&
+ S.ObjC().NSAPIObj->isObjCBOOLType(OtherT) &&
OtherT->isSpecificBuiltinType(BuiltinType::SChar);
// Whether we're treating Other as being a bool because of the form of
@@ -15179,26 +10406,6 @@ static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow);
}
-static bool isObjCSignedCharBool(Sema &S, QualType Ty) {
- return Ty->isSpecificBuiltinType(BuiltinType::SChar) &&
- S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty);
-}
-
-static void adornObjCBoolConversionDiagWithTernaryFixit(
- Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) {
- Expr *Ignored = SourceExpr->IgnoreImplicit();
- if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored))
- Ignored = OVE->getSourceExpr();
- bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) ||
- isa<BinaryOperator>(Ignored) ||
- isa<CXXOperatorCallExpr>(Ignored);
- SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc());
- if (NeedsParens)
- Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(")
- << FixItHint::CreateInsertion(EndLoc, ")");
- Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO");
-}
-
/// Diagnose an implicit cast from a floating point value to an integer value.
static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
SourceLocation CContext) {
@@ -15218,11 +10425,10 @@ static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
bool IsConstant =
E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects);
if (!IsConstant) {
- if (isObjCSignedCharBool(S, T)) {
- return adornObjCBoolConversionDiagWithTernaryFixit(
- S, E,
- S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool)
- << E->getType());
+ if (S.ObjC().isSignedCharBool(T)) {
+ return S.ObjC().adornBoolConversionDiagWithTernaryFixit(
+ E, S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool)
+ << E->getType());
}
return DiagnoseImpCast(S, E, T, CContext,
@@ -15246,11 +10452,10 @@ static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
precision = (precision * 59 + 195) / 196;
Value.toString(PrettySourceValue, precision);
- if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) {
- return adornObjCBoolConversionDiagWithTernaryFixit(
- S, E,
- S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool)
- << PrettySourceValue);
+ if (S.ObjC().isSignedCharBool(T) && IntegerValue != 0 && IntegerValue != 1) {
+ return S.ObjC().adornBoolConversionDiagWithTernaryFixit(
+ E, S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool)
+ << PrettySourceValue);
}
if (Result == llvm::APFloat::opOK && isExact) {
@@ -15451,102 +10656,6 @@ static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
S.getFixItZeroLiteralForType(T, Loc));
}
-static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
- ObjCArrayLiteral *ArrayLiteral);
-
-static void
-checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
- ObjCDictionaryLiteral *DictionaryLiteral);
-
-/// Check a single element within a collection literal against the
-/// target element type.
-static void checkObjCCollectionLiteralElement(Sema &S,
- QualType TargetElementType,
- Expr *Element,
- unsigned ElementKind) {
- // Skip a bitcast to 'id' or qualified 'id'.
- if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) {
- if (ICE->getCastKind() == CK_BitCast &&
- ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>())
- Element = ICE->getSubExpr();
- }
-
- QualType ElementType = Element->getType();
- ExprResult ElementResult(Element);
- if (ElementType->getAs<ObjCObjectPointerType>() &&
- S.CheckSingleAssignmentConstraints(TargetElementType,
- ElementResult,
- false, false)
- != Sema::Compatible) {
- S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element)
- << ElementType << ElementKind << TargetElementType
- << Element->getSourceRange();
- }
-
- if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element))
- checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral);
- else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element))
- checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral);
-}
-
-/// Check an Objective-C array literal being converted to the given
-/// target type.
-static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
- ObjCArrayLiteral *ArrayLiteral) {
- if (!S.NSArrayDecl)
- return;
-
- const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
- if (!TargetObjCPtr)
- return;
-
- if (TargetObjCPtr->isUnspecialized() ||
- TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
- != S.NSArrayDecl->getCanonicalDecl())
- return;
-
- auto TypeArgs = TargetObjCPtr->getTypeArgs();
- if (TypeArgs.size() != 1)
- return;
-
- QualType TargetElementType = TypeArgs[0];
- for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) {
- checkObjCCollectionLiteralElement(S, TargetElementType,
- ArrayLiteral->getElement(I),
- 0);
- }
-}
-
-/// Check an Objective-C dictionary literal being converted to the given
-/// target type.
-static void
-checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
- ObjCDictionaryLiteral *DictionaryLiteral) {
- if (!S.NSDictionaryDecl)
- return;
-
- const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
- if (!TargetObjCPtr)
- return;
-
- if (TargetObjCPtr->isUnspecialized() ||
- TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
- != S.NSDictionaryDecl->getCanonicalDecl())
- return;
-
- auto TypeArgs = TargetObjCPtr->getTypeArgs();
- if (TypeArgs.size() != 2)
- return;
-
- QualType TargetKeyType = TypeArgs[0];
- QualType TargetObjectType = TypeArgs[1];
- for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) {
- auto Element = DictionaryLiteral->getKeyValueElement(I);
- checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1);
- checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2);
- }
-}
-
// Helper function to filter out cases for constant width constant conversion.
// Don't warn on char array initialization or for non-decimal values.
static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T,
@@ -15622,14 +10731,12 @@ static void DiagnoseIntInBoolContext(Sema &S, Expr *E) {
}
}
-static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
- SourceLocation CC,
- bool *ICContext = nullptr,
- bool IsListInit = false) {
+void Sema::CheckImplicitConversion(Expr *E, QualType T, SourceLocation CC,
+ bool *ICContext, bool IsListInit) {
if (E->isTypeDependent() || E->isValueDependent()) return;
- const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr();
- const Type *Target = S.Context.getCanonicalType(T).getTypePtr();
+ const Type *Source = Context.getCanonicalType(E->getType()).getTypePtr();
+ const Type *Target = Context.getCanonicalType(T).getTypePtr();
if (Source == Target) return;
if (Target->isDependentType()) return;
@@ -15642,7 +10749,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return;
if (Source->isAtomicType())
- S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst);
+ Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst);
// Diagnose implicit casts to bool.
if (Target->isSpecificBuiltinType(BuiltinType::Bool)) {
@@ -15650,34 +10757,32 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Warn on string literal to bool. Checks for string literals in logical
// and expressions, for instance, assert(0 && "error here"), are
// prevented by a check in AnalyzeImplicitConversions().
- return DiagnoseImpCast(S, E, T, CC,
+ return DiagnoseImpCast(*this, E, T, CC,
diag::warn_impcast_string_literal_to_bool);
if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) ||
isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) {
// This covers the literal expressions that evaluate to Objective-C
// objects.
- return DiagnoseImpCast(S, E, T, CC,
+ return DiagnoseImpCast(*this, E, T, CC,
diag::warn_impcast_objective_c_literal_to_bool);
}
if (Source->isPointerType() || Source->canDecayToPointerType()) {
// Warn on pointer to bool conversion that is always true.
- S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false,
- SourceRange(CC));
+ DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false,
+ SourceRange(CC));
}
}
// If the we're converting a constant to an ObjC BOOL on a platform where BOOL
// is a typedef for signed char (macOS), then that constant value has to be 1
// or 0.
- if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) {
+ if (ObjC().isSignedCharBool(T) && Source->isIntegralType(Context)) {
Expr::EvalResult Result;
- if (E->EvaluateAsInt(Result, S.getASTContext(),
- Expr::SE_AllowSideEffects)) {
+ if (E->EvaluateAsInt(Result, getASTContext(), Expr::SE_AllowSideEffects)) {
if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) {
- adornObjCBoolConversionDiagWithTernaryFixit(
- S, E,
- S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool)
- << toString(Result.Val.getInt(), 10));
+ ObjC().adornBoolConversionDiagWithTernaryFixit(
+ E, Diag(CC, diag::warn_impcast_constant_value_to_objc_bool)
+ << toString(Result.Val.getInt(), 10));
}
return;
}
@@ -15686,35 +10791,43 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Check implicit casts from Objective-C collection literals to specialized
// collection types, e.g., NSArray<NSString *> *.
if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E))
- checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral);
+ ObjC().checkArrayLiteral(QualType(Target, 0), ArrayLiteral);
else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E))
- checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral);
+ ObjC().checkDictionaryLiteral(QualType(Target, 0), DictionaryLiteral);
// Strip vector types.
if (isa<VectorType>(Source)) {
if (Target->isSveVLSBuiltinType() &&
- (S.Context.areCompatibleSveTypes(QualType(Target, 0),
- QualType(Source, 0)) ||
- S.Context.areLaxCompatibleSveTypes(QualType(Target, 0),
- QualType(Source, 0))))
+ (Context.areCompatibleSveTypes(QualType(Target, 0),
+ QualType(Source, 0)) ||
+ Context.areLaxCompatibleSveTypes(QualType(Target, 0),
+ QualType(Source, 0))))
return;
if (Target->isRVVVLSBuiltinType() &&
- (S.Context.areCompatibleRVVTypes(QualType(Target, 0),
- QualType(Source, 0)) ||
- S.Context.areLaxCompatibleRVVTypes(QualType(Target, 0),
- QualType(Source, 0))))
+ (Context.areCompatibleRVVTypes(QualType(Target, 0),
+ QualType(Source, 0)) ||
+ Context.areLaxCompatibleRVVTypes(QualType(Target, 0),
+ QualType(Source, 0))))
return;
if (!isa<VectorType>(Target)) {
- if (S.SourceMgr.isInSystemMacro(CC))
+ if (SourceMgr.isInSystemMacro(CC))
return;
- return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
+ return DiagnoseImpCast(*this, E, T, CC, diag::warn_impcast_vector_scalar);
+ } else if (getLangOpts().HLSL &&
+ Target->castAs<VectorType>()->getNumElements() <
+ Source->castAs<VectorType>()->getNumElements()) {
+ // Diagnose vector truncation but don't return. We may also want to
+ // diagnose an element conversion.
+ DiagnoseImpCast(*this, E, T, CC,
+ diag::warn_hlsl_impcast_vector_truncation);
}
// If the vector cast is cast between two vectors of the same size, it is
- // a bitcast, not a conversion.
- if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
+ // a bitcast, not a conversion, except under HLSL where it is a conversion.
+ if (!getLangOpts().HLSL &&
+ Context.getTypeSize(Source) == Context.getTypeSize(Target))
return;
Source = cast<VectorType>(Source)->getElementType().getTypePtr();
@@ -15726,11 +10839,11 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Strip complex types.
if (isa<ComplexType>(Source)) {
if (!isa<ComplexType>(Target)) {
- if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType())
+ if (SourceMgr.isInSystemMacro(CC) || Target->isBooleanType())
return;
- return DiagnoseImpCast(S, E, T, CC,
- S.getLangOpts().CPlusPlus
+ return DiagnoseImpCast(*this, E, T, CC,
+ getLangOpts().CPlusPlus
? diag::err_impcast_complex_scalar
: diag::warn_impcast_complex_scalar);
}
@@ -15745,25 +10858,25 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Strip SVE vector types
if (SourceBT && SourceBT->isSveVLSBuiltinType()) {
// Need the original target type for vector type checks
- const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr();
+ const Type *OriginalTarget = Context.getCanonicalType(T).getTypePtr();
// Handle conversion from scalable to fixed when msve-vector-bits is
// specified
- if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0),
- QualType(Source, 0)) ||
- S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0),
- QualType(Source, 0)))
+ if (Context.areCompatibleSveTypes(QualType(OriginalTarget, 0),
+ QualType(Source, 0)) ||
+ Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0),
+ QualType(Source, 0)))
return;
// If the vector cast is cast between two vectors of the same size, it is
// a bitcast, not a conversion.
- if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
+ if (Context.getTypeSize(Source) == Context.getTypeSize(Target))
return;
- Source = SourceBT->getSveEltType(S.Context).getTypePtr();
+ Source = SourceBT->getSveEltType(Context).getTypePtr();
}
if (TargetBT && TargetBT->isSveVLSBuiltinType())
- Target = TargetBT->getSveEltType(S.Context).getTypePtr();
+ Target = TargetBT->getSveEltType(Context).getTypePtr();
// If the source is floating point...
if (SourceBT && SourceBT->isFloatingPoint()) {
@@ -15771,41 +10884,42 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (TargetBT && TargetBT->isFloatingPoint()) {
// ...then warn if we're dropping FP rank.
- int Order = S.getASTContext().getFloatingTypeSemanticOrder(
+ int Order = getASTContext().getFloatingTypeSemanticOrder(
QualType(SourceBT, 0), QualType(TargetBT, 0));
if (Order > 0) {
// Don't warn about float constants that are precisely
// representable in the target type.
Expr::EvalResult result;
- if (E->EvaluateAsRValue(result, S.Context)) {
+ if (E->EvaluateAsRValue(result, Context)) {
// Value might be a float, a float vector, or a float complex.
- if (IsSameFloatAfterCast(result.Val,
- S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)),
- S.Context.getFloatTypeSemantics(QualType(SourceBT, 0))))
+ if (IsSameFloatAfterCast(
+ result.Val,
+ Context.getFloatTypeSemantics(QualType(TargetBT, 0)),
+ Context.getFloatTypeSemantics(QualType(SourceBT, 0))))
return;
}
- if (S.SourceMgr.isInSystemMacro(CC))
+ if (SourceMgr.isInSystemMacro(CC))
return;
- DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
+ DiagnoseImpCast(*this, E, T, CC, diag::warn_impcast_float_precision);
}
// ... or possibly if we're increasing rank, too
else if (Order < 0) {
- if (S.SourceMgr.isInSystemMacro(CC))
+ if (SourceMgr.isInSystemMacro(CC))
return;
- DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion);
+ DiagnoseImpCast(*this, E, T, CC, diag::warn_impcast_double_promotion);
}
return;
}
// If the target is integral, always warn.
if (TargetBT && TargetBT->isInteger()) {
- if (S.SourceMgr.isInSystemMacro(CC))
+ if (SourceMgr.isInSystemMacro(CC))
return;
- DiagnoseFloatingImpCast(S, E, T, CC);
+ DiagnoseFloatingImpCast(*this, E, T, CC);
}
// Detect the case where a call result is converted from floating-point to
@@ -15827,7 +10941,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (isa<ImplicitCastExpr>(LastA) &&
InnerE->getType()->isBooleanType()) {
// Warn on this floating-point to bool conversion
- DiagnoseImpCast(S, E, T, CC,
+ DiagnoseImpCast(*this, E, T, CC,
diag::warn_impcast_floating_point_to_bool);
}
}
@@ -15839,38 +10953,37 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (Source->isFixedPointType()) {
if (Target->isUnsaturatedFixedPointType()) {
Expr::EvalResult Result;
- if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects,
- S.isConstantEvaluatedContext())) {
+ if (E->EvaluateAsFixedPoint(Result, Context, Expr::SE_AllowSideEffects,
+ isConstantEvaluatedContext())) {
llvm::APFixedPoint Value = Result.Val.getFixedPoint();
- llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T);
- llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T);
+ llvm::APFixedPoint MaxVal = Context.getFixedPointMax(T);
+ llvm::APFixedPoint MinVal = Context.getFixedPointMin(T);
if (Value > MaxVal || Value < MinVal) {
- S.DiagRuntimeBehavior(E->getExprLoc(), E,
- S.PDiag(diag::warn_impcast_fixed_point_range)
- << Value.toString() << T
- << E->getSourceRange()
- << clang::SourceRange(CC));
+ DiagRuntimeBehavior(E->getExprLoc(), E,
+ PDiag(diag::warn_impcast_fixed_point_range)
+ << Value.toString() << T
+ << E->getSourceRange()
+ << clang::SourceRange(CC));
return;
}
}
} else if (Target->isIntegerType()) {
Expr::EvalResult Result;
- if (!S.isConstantEvaluatedContext() &&
- E->EvaluateAsFixedPoint(Result, S.Context,
- Expr::SE_AllowSideEffects)) {
+ if (!isConstantEvaluatedContext() &&
+ E->EvaluateAsFixedPoint(Result, Context, Expr::SE_AllowSideEffects)) {
llvm::APFixedPoint FXResult = Result.Val.getFixedPoint();
bool Overflowed;
llvm::APSInt IntResult = FXResult.convertToInt(
- S.Context.getIntWidth(T),
- Target->isSignedIntegerOrEnumerationType(), &Overflowed);
+ Context.getIntWidth(T), Target->isSignedIntegerOrEnumerationType(),
+ &Overflowed);
if (Overflowed) {
- S.DiagRuntimeBehavior(E->getExprLoc(), E,
- S.PDiag(diag::warn_impcast_fixed_point_range)
- << FXResult.toString() << T
- << E->getSourceRange()
- << clang::SourceRange(CC));
+ DiagRuntimeBehavior(E->getExprLoc(), E,
+ PDiag(diag::warn_impcast_fixed_point_range)
+ << FXResult.toString() << T
+ << E->getSourceRange()
+ << clang::SourceRange(CC));
return;
}
}
@@ -15878,20 +10991,20 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
} else if (Target->isUnsaturatedFixedPointType()) {
if (Source->isIntegerType()) {
Expr::EvalResult Result;
- if (!S.isConstantEvaluatedContext() &&
- E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) {
+ if (!isConstantEvaluatedContext() &&
+ E->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) {
llvm::APSInt Value = Result.Val.getInt();
bool Overflowed;
llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue(
- Value, S.Context.getFixedPointSemantics(T), &Overflowed);
+ Value, Context.getFixedPointSemantics(T), &Overflowed);
if (Overflowed) {
- S.DiagRuntimeBehavior(E->getExprLoc(), E,
- S.PDiag(diag::warn_impcast_fixed_point_range)
- << toString(Value, /*Radix=*/10) << T
- << E->getSourceRange()
- << clang::SourceRange(CC));
+ DiagRuntimeBehavior(E->getExprLoc(), E,
+ PDiag(diag::warn_impcast_fixed_point_range)
+ << toString(Value, /*Radix=*/10) << T
+ << E->getSourceRange()
+ << clang::SourceRange(CC));
return;
}
}
@@ -15905,25 +11018,25 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
TargetBT->isFloatingType() && !IsListInit) {
// Determine the number of precision bits in the source integer type.
IntRange SourceRange =
- GetExprRange(S.Context, E, S.isConstantEvaluatedContext(),
+ GetExprRange(Context, E, isConstantEvaluatedContext(),
/*Approximate=*/true);
unsigned int SourcePrecision = SourceRange.Width;
// Determine the number of precision bits in the
// target floating point type.
unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision(
- S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)));
+ Context.getFloatTypeSemantics(QualType(TargetBT, 0)));
if (SourcePrecision > 0 && TargetPrecision > 0 &&
SourcePrecision > TargetPrecision) {
if (std::optional<llvm::APSInt> SourceInt =
- E->getIntegerConstantExpr(S.Context)) {
+ E->getIntegerConstantExpr(Context)) {
// If the source integer is a constant, convert it to the target
// floating point type. Issue a warning if the value changes
// during the whole conversion.
llvm::APFloat TargetFloatValue(
- S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)));
+ Context.getFloatTypeSemantics(QualType(TargetBT, 0)));
llvm::APFloat::opStatus ConversionStatus =
TargetFloatValue.convertFromAPInt(
*SourceInt, SourceBT->isSignedInteger(),
@@ -15935,26 +11048,26 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
SmallString<32> PrettyTargetValue;
TargetFloatValue.toString(PrettyTargetValue, TargetPrecision);
- S.DiagRuntimeBehavior(
+ DiagRuntimeBehavior(
E->getExprLoc(), E,
- S.PDiag(diag::warn_impcast_integer_float_precision_constant)
+ PDiag(diag::warn_impcast_integer_float_precision_constant)
<< PrettySourceValue << PrettyTargetValue << E->getType() << T
<< E->getSourceRange() << clang::SourceRange(CC));
}
} else {
// Otherwise, the implicit conversion may lose precision.
- DiagnoseImpCast(S, E, T, CC,
+ DiagnoseImpCast(*this, E, T, CC,
diag::warn_impcast_integer_float_precision);
}
}
}
- DiagnoseNullConversion(S, E, T, CC);
+ DiagnoseNullConversion(*this, E, T, CC);
- S.DiscardMisalignedMemberAddress(Target, E);
+ DiscardMisalignedMemberAddress(Target, E);
if (Target->isBooleanType())
- DiagnoseIntInBoolContext(S, E);
+ DiagnoseIntInBoolContext(*this, E);
if (!Source->isIntegerType() || !Target->isIntegerType())
return;
@@ -15964,51 +11077,51 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (Target->isSpecificBuiltinType(BuiltinType::Bool))
return;
- if (isObjCSignedCharBool(S, T) && !Source->isCharType() &&
+ if (ObjC().isSignedCharBool(T) && !Source->isCharType() &&
!E->isKnownToHaveBooleanValue(/*Semantic=*/false)) {
- return adornObjCBoolConversionDiagWithTernaryFixit(
- S, E,
- S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool)
- << E->getType());
+ return ObjC().adornBoolConversionDiagWithTernaryFixit(
+ E, Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool)
+ << E->getType());
}
IntRange SourceTypeRange =
- IntRange::forTargetOfCanonicalType(S.Context, Source);
+ IntRange::forTargetOfCanonicalType(Context, Source);
IntRange LikelySourceRange = GetExprRange(
- S.Context, E, S.isConstantEvaluatedContext(), /*Approximate=*/true);
- IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
+ Context, E, isConstantEvaluatedContext(), /*Approximate=*/true);
+ IntRange TargetRange = IntRange::forTargetOfCanonicalType(Context, Target);
if (LikelySourceRange.Width > TargetRange.Width) {
// If the source is a constant, use a default-on diagnostic.
// TODO: this should happen for bitfield stores, too.
Expr::EvalResult Result;
- if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects,
- S.isConstantEvaluatedContext())) {
+ if (E->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects,
+ isConstantEvaluatedContext())) {
llvm::APSInt Value(32);
Value = Result.Val.getInt();
- if (S.SourceMgr.isInSystemMacro(CC))
+ if (SourceMgr.isInSystemMacro(CC))
return;
std::string PrettySourceValue = toString(Value, 10);
std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
- S.DiagRuntimeBehavior(
- E->getExprLoc(), E,
- S.PDiag(diag::warn_impcast_integer_precision_constant)
- << PrettySourceValue << PrettyTargetValue << E->getType() << T
- << E->getSourceRange() << SourceRange(CC));
+ DiagRuntimeBehavior(E->getExprLoc(), E,
+ PDiag(diag::warn_impcast_integer_precision_constant)
+ << PrettySourceValue << PrettyTargetValue
+ << E->getType() << T << E->getSourceRange()
+ << SourceRange(CC));
return;
}
// People want to build with -Wshorten-64-to-32 and not -Wconversion.
- if (S.SourceMgr.isInSystemMacro(CC))
+ if (SourceMgr.isInSystemMacro(CC))
return;
- if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64)
- return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32,
+ if (TargetRange.Width == 32 && Context.getIntWidth(E->getType()) == 64)
+ return DiagnoseImpCast(*this, E, T, CC, diag::warn_impcast_integer_64_32,
/* pruneControlFlow */ true);
- return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
+ return DiagnoseImpCast(*this, E, T, CC,
+ diag::warn_impcast_integer_precision);
}
if (TargetRange.Width > SourceTypeRange.Width) {
@@ -16016,10 +11129,10 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (UO->getOpcode() == UO_Minus)
if (Source->isUnsignedIntegerType()) {
if (Target->isUnsignedIntegerType())
- return DiagnoseImpCast(S, E, T, CC,
+ return DiagnoseImpCast(*this, E, T, CC,
diag::warn_impcast_high_order_zero_bits);
if (Target->isSignedIntegerType())
- return DiagnoseImpCast(S, E, T, CC,
+ return DiagnoseImpCast(*this, E, T, CC,
diag::warn_impcast_nonnegative_result);
}
}
@@ -16032,18 +11145,17 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// cause a negative value to be stored.
Expr::EvalResult Result;
- if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) &&
- !S.SourceMgr.isInSystemMacro(CC)) {
+ if (E->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects) &&
+ !SourceMgr.isInSystemMacro(CC)) {
llvm::APSInt Value = Result.Val.getInt();
- if (isSameWidthConstantConversion(S, E, T, CC)) {
+ if (isSameWidthConstantConversion(*this, E, T, CC)) {
std::string PrettySourceValue = toString(Value, 10);
std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
- S.DiagRuntimeBehavior(
- E->getExprLoc(), E,
- S.PDiag(diag::warn_impcast_integer_precision_constant)
- << PrettySourceValue << PrettyTargetValue << E->getType() << T
- << E->getSourceRange() << SourceRange(CC));
+ Diag(E->getExprLoc(),
+ PDiag(diag::warn_impcast_integer_precision_constant)
+ << PrettySourceValue << PrettyTargetValue << E->getType() << T
+ << E->getSourceRange() << SourceRange(CC));
return;
}
}
@@ -16055,7 +11167,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
((TargetRange.NonNegative && !LikelySourceRange.NonNegative) ||
(!TargetRange.NonNegative && LikelySourceRange.NonNegative &&
LikelySourceRange.Width == TargetRange.Width))) {
- if (S.SourceMgr.isInSystemMacro(CC))
+ if (SourceMgr.isInSystemMacro(CC))
return;
if (SourceBT && SourceBT->isInteger() && TargetBT &&
@@ -16076,31 +11188,24 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
*ICContext = true;
}
- return DiagnoseImpCast(S, E, T, CC, DiagID);
+ return DiagnoseImpCast(*this, E, T, CC, DiagID);
}
// Diagnose conversions between different enumeration types.
// In C, we pretend that the type of an EnumConstantDecl is its enumeration
// type, to give us better diagnostics.
- QualType SourceType = E->getType();
- if (!S.getLangOpts().CPlusPlus) {
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
- if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
- EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext());
- SourceType = S.Context.getTypeDeclType(Enum);
- Source = S.Context.getCanonicalType(SourceType).getTypePtr();
- }
- }
+ QualType SourceType = E->getEnumCoercedType(Context);
+ Source = Context.getCanonicalType(SourceType).getTypePtr();
if (const EnumType *SourceEnum = Source->getAs<EnumType>())
if (const EnumType *TargetEnum = Target->getAs<EnumType>())
if (SourceEnum->getDecl()->hasNameForLinkage() &&
TargetEnum->getDecl()->hasNameForLinkage() &&
SourceEnum != TargetEnum) {
- if (S.SourceMgr.isInSystemMacro(CC))
+ if (SourceMgr.isInSystemMacro(CC))
return;
- return DiagnoseImpCast(S, E, SourceType, T, CC,
+ return DiagnoseImpCast(*this, E, SourceType, T, CC,
diag::warn_impcast_different_enum_types);
}
}
@@ -16120,7 +11225,7 @@ static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
AnalyzeImplicitConversions(S, E, CC);
if (E->getType() != T)
- return CheckImplicitConversion(S, E, T, CC, &ICContext);
+ return S.CheckImplicitConversion(E, T, CC, &ICContext);
}
static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
@@ -16151,21 +11256,24 @@ static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
if (E->getType() == T) return;
Suspicious = false;
- CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(),
- E->getType(), CC, &Suspicious);
+ S.CheckImplicitConversion(TrueExpr->IgnoreParenImpCasts(), E->getType(), CC,
+ &Suspicious);
if (!Suspicious)
- CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
- E->getType(), CC, &Suspicious);
+ S.CheckImplicitConversion(E->getFalseExpr()->IgnoreParenImpCasts(),
+ E->getType(), CC, &Suspicious);
}
/// Check conversion of given expression to boolean.
/// Input argument E is a logical expression.
static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
- if (S.getLangOpts().Bool)
+ // Run the bool-like conversion checks only for C since there bools are
+ // still not used as the return type from "boolean" operators or as the input
+ // type for conditional operators.
+ if (S.getLangOpts().CPlusPlus)
return;
if (E->IgnoreParenImpCasts()->getType()->isAtomicType())
return;
- CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
+ S.CheckImplicitConversion(E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
}
namespace {
@@ -16218,12 +11326,26 @@ static void AnalyzeImplicitConversions(
BO->getRHS()->isKnownToHaveBooleanValue() &&
BO->getLHS()->HasSideEffects(S.Context) &&
BO->getRHS()->HasSideEffects(S.Context)) {
- S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical)
- << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange()
- << FixItHint::CreateReplacement(
- BO->getOperatorLoc(),
- (BO->getOpcode() == BO_And ? "&&" : "||"));
- S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int);
+ SourceManager &SM = S.getSourceManager();
+ const LangOptions &LO = S.getLangOpts();
+ SourceLocation BLoc = BO->getOperatorLoc();
+ SourceLocation ELoc = Lexer::getLocForEndOfToken(BLoc, 0, SM, LO);
+ StringRef SR = clang::Lexer::getSourceText(
+ clang::CharSourceRange::getTokenRange(BLoc, ELoc), SM, LO);
+ // To reduce false positives, only issue the diagnostic if the operator
+ // is explicitly spelled as a punctuator. This suppresses the diagnostic
+ // when using 'bitand' or 'bitor' either as keywords in C++ or as macros
+ // in C, along with other macro spellings the user might invent.
+ if (SR.str() == "&" || SR.str() == "|") {
+
+ S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical)
+ << (BO->getOpcode() == BO_And ? "&" : "|")
+ << OrigE->getSourceRange()
+ << FixItHint::CreateReplacement(
+ BO->getOperatorLoc(),
+ (BO->getOpcode() == BO_And ? "&&" : "||"));
+ S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int);
+ }
}
// For conditional operators, we analyze the arguments as if they
@@ -16241,7 +11363,7 @@ static void AnalyzeImplicitConversions(
// The non-canonical typecheck is just an optimization;
// CheckImplicitConversion will filter out dead implicit conversions.
if (SourceExpr->getType() != T)
- CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit);
+ S.CheckImplicitConversion(SourceExpr, T, CC, nullptr, IsListInit);
// Now continue drilling into this expression.
@@ -16341,21 +11463,6 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList);
}
-/// Diagnose integer type and any valid implicit conversion to it.
-static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
- // Taking into account implicit conversions,
- // allow any integer.
- if (!E->getType()->isIntegerType()) {
- S.Diag(E->getBeginLoc(),
- diag::err_opencl_enqueue_kernel_invalid_local_size_type);
- return true;
- }
- // Potentially emit standard warnings for implicit conversions if enabled
- // using -Wconversion.
- CheckImplicitConversion(S, E, IntT, E->getBeginLoc());
- return false;
-}
-
// Helper function for Sema::DiagnoseAlwaysNonNullPointer.
// Returns true when emitting a warning about taking the address of a reference.
static bool CheckForReference(Sema &SemaRef, const Expr *E,
@@ -16404,12 +11511,6 @@ static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) {
return false;
}
-/// Diagnose pointers that are always non-null.
-/// \param E the expression containing the pointer
-/// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
-/// compared to a null pointer
-/// \param IsEqual True when the comparison is equal to a null pointer
-/// \param Range Extra SourceRange to highlight in the diagnostic
void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullKind,
bool IsEqual, SourceRange Range) {
@@ -16476,6 +11577,20 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
}
}
+ // Complain if we are converting a lambda expression to a boolean value
+ // outside of instantiation.
+ if (!inTemplateInstantiation()) {
+ if (const auto *MCallExpr = dyn_cast<CXXMemberCallExpr>(E)) {
+ if (const auto *MRecordDecl = MCallExpr->getRecordDecl();
+ MRecordDecl && MRecordDecl->isLambda()) {
+ Diag(E->getExprLoc(), diag::warn_impcast_pointer_to_bool)
+ << /*LambdaPointerConversionOperatorType=*/3
+ << MRecordDecl->getSourceRange() << Range << IsEqual;
+ return;
+ }
+ }
+ }
+
// Expect to find a single Decl. Skip anything more complicated.
ValueDecl *D = nullptr;
if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) {
@@ -16595,13 +11710,6 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
<< FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()");
}
-/// Diagnoses "dangerous" implicit conversions within the given
-/// expression (which is a full expression). Implements -Wconversion
-/// and -Wsign-compare.
-///
-/// \param CC the "context" location of the implicit conversion, i.e.
-/// the most location of the syntactic entity requiring the implicit
-/// conversion
void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
// Don't diagnose in unevaluated contexts.
if (isUnevaluatedContext())
@@ -16620,14 +11728,10 @@ void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
AnalyzeImplicitConversions(*this, E, CC);
}
-/// CheckBoolLikeConversion - Check conversion of given expression to boolean.
-/// Input argument E is a logical expression.
void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
::CheckBoolLikeConversion(*this, E, CC);
}
-/// Diagnose when expression is an integer constant expression and its evaluation
-/// results in integer overflow
void Sema::CheckForIntOverflow (const Expr *E) {
// Use a work list to deal with nested struct initializers.
SmallVector<const Expr *, 2> Exprs(1, E);
@@ -16681,6 +11785,7 @@ class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> {
struct Value {
explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {}
unsigned Parent : 31;
+ LLVM_PREFERRED_TYPE(bool)
unsigned Merged : 1;
};
SmallVector<Value, 8> Values;
@@ -17212,7 +12317,7 @@ public:
// evaluates to true.
bool EvalResult = false;
bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult);
- bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult);
+ bool ShouldVisitRHS = !EvalOK || !EvalResult;
if (ShouldVisitRHS) {
Region = RHSRegion;
Visit(BO->getRHS());
@@ -17244,7 +12349,7 @@ public:
// [...] the second operand is not evaluated if the first operand is false.
bool EvalResult = false;
bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult);
- bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult);
+ bool ShouldVisitRHS = !EvalOK || EvalResult;
if (ShouldVisitRHS) {
Region = RHSRegion;
Visit(BO->getRHS());
@@ -17295,8 +12400,8 @@ public:
// evaluated. [...]
bool EvalResult = false;
bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult);
- bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult);
- bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult);
+ bool ShouldVisitTrueExpr = !EvalOK || EvalResult;
+ bool ShouldVisitFalseExpr = !EvalOK || !EvalResult;
if (ShouldVisitTrueExpr) {
Region = TrueRegion;
Visit(CO->getTrueExpr());
@@ -17494,20 +12599,8 @@ public:
return VisitExpr(CCE);
// In C++11, list initializations are sequenced.
- SmallVector<SequenceTree::Seq, 32> Elts;
- SequenceTree::Seq Parent = Region;
- for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(),
- E = CCE->arg_end();
- I != E; ++I) {
- Region = Tree.allocate(Parent);
- Elts.push_back(Region);
- Visit(*I);
- }
-
- // Forget that the initializers are sequenced.
- Region = Parent;
- for (unsigned I = 0; I < Elts.size(); ++I)
- Tree.merge(Elts[I]);
+ SequenceExpressionsInOrder(
+ llvm::ArrayRef(CCE->getArgs(), CCE->getNumArgs()));
}
void VisitInitListExpr(const InitListExpr *ILE) {
@@ -17515,10 +12608,20 @@ public:
return VisitExpr(ILE);
// In C++11, list initializations are sequenced.
+ SequenceExpressionsInOrder(ILE->inits());
+ }
+
+ void VisitCXXParenListInitExpr(const CXXParenListInitExpr *PLIE) {
+ // C++20 parenthesized list initializations are sequenced. See C++20
+ // [decl.init.general]p16.5 and [decl.init.general]p16.6.2.2.
+ SequenceExpressionsInOrder(PLIE->getInitExprs());
+ }
+
+private:
+ void SequenceExpressionsInOrder(ArrayRef<const Expr *> ExpressionList) {
SmallVector<SequenceTree::Seq, 32> Elts;
SequenceTree::Seq Parent = Region;
- for (unsigned I = 0; I < ILE->getNumInits(); ++I) {
- const Expr *E = ILE->getInit(I);
+ for (const Expr *E : ExpressionList) {
if (!E)
continue;
Region = Tree.allocate(Parent);
@@ -17593,11 +12696,6 @@ static void diagnoseArrayStarInParamType(Sema &S, QualType PType,
S.Diag(Loc, diag::err_array_star_in_function_definition);
}
-/// CheckParmsForFunctionDef - Check that the parameters of the given
-/// function are appropriate for the definition of a function. This
-/// takes care of any checks that cannot be performed on the
-/// declaration itself, e.g., that the types of each of the function
-/// parameters are complete.
bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames) {
bool HasInvalidParm = false;
@@ -17929,8 +13027,6 @@ static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) {
return S.Context.getTypeAlignInChars(E->getType()->getPointeeType());
}
-/// CheckCastAlign - Implements -Wcast-align, which warns when a
-/// pointer cast increases the alignment requirements.
void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
// This is actually a lot of work to potentially be doing on every
// cast; don't do it if we're ignoring -Wcast_align (as is the default).
@@ -18192,8 +13288,10 @@ void Sema::CheckArrayAccess(const Expr *expr) {
expr = cast<MemberExpr>(expr)->getBase();
break;
}
- case Stmt::OMPArraySectionExprClass: {
- const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr);
+ case Stmt::ArraySectionExprClass: {
+ const ArraySectionExpr *ASE = cast<ArraySectionExpr>(expr);
+ // FIXME: We should probably be checking all of the elements to the
+ // 'length' here as well.
if (ASE->getLowerBound())
CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(),
/*ASE=*/nullptr, AllowOnePastEnd > 0);
@@ -18235,465 +13333,6 @@ void Sema::CheckArrayAccess(const Expr *expr) {
}
}
-//===--- CHECK: Objective-C retain cycles ----------------------------------//
-
-namespace {
-
-struct RetainCycleOwner {
- VarDecl *Variable = nullptr;
- SourceRange Range;
- SourceLocation Loc;
- bool Indirect = false;
-
- RetainCycleOwner() = default;
-
- void setLocsFrom(Expr *e) {
- Loc = e->getExprLoc();
- Range = e->getSourceRange();
- }
-};
-
-} // namespace
-
-/// Consider whether capturing the given variable can possibly lead to
-/// a retain cycle.
-static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
- // In ARC, it's captured strongly iff the variable has __strong
- // lifetime. In MRR, it's captured strongly if the variable is
- // __block and has an appropriate type.
- if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
- return false;
-
- owner.Variable = var;
- if (ref)
- owner.setLocsFrom(ref);
- return true;
-}
-
-static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
- while (true) {
- e = e->IgnoreParens();
- if (CastExpr *cast = dyn_cast<CastExpr>(e)) {
- switch (cast->getCastKind()) {
- case CK_BitCast:
- case CK_LValueBitCast:
- case CK_LValueToRValue:
- case CK_ARCReclaimReturnedObject:
- e = cast->getSubExpr();
- continue;
-
- default:
- return false;
- }
- }
-
- if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) {
- ObjCIvarDecl *ivar = ref->getDecl();
- if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
- return false;
-
- // Try to find a retain cycle in the base.
- if (!findRetainCycleOwner(S, ref->getBase(), owner))
- return false;
-
- if (ref->isFreeIvar()) owner.setLocsFrom(ref);
- owner.Indirect = true;
- return true;
- }
-
- if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) {
- VarDecl *var = dyn_cast<VarDecl>(ref->getDecl());
- if (!var) return false;
- return considerVariable(var, ref, owner);
- }
-
- if (MemberExpr *member = dyn_cast<MemberExpr>(e)) {
- if (member->isArrow()) return false;
-
- // Don't count this as an indirect ownership.
- e = member->getBase();
- continue;
- }
-
- if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
- // Only pay attention to pseudo-objects on property references.
- ObjCPropertyRefExpr *pre
- = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm()
- ->IgnoreParens());
- if (!pre) return false;
- if (pre->isImplicitProperty()) return false;
- ObjCPropertyDecl *property = pre->getExplicitProperty();
- if (!property->isRetaining() &&
- !(property->getPropertyIvarDecl() &&
- property->getPropertyIvarDecl()->getType()
- .getObjCLifetime() == Qualifiers::OCL_Strong))
- return false;
-
- owner.Indirect = true;
- if (pre->isSuperReceiver()) {
- owner.Variable = S.getCurMethodDecl()->getSelfDecl();
- if (!owner.Variable)
- return false;
- owner.Loc = pre->getLocation();
- owner.Range = pre->getSourceRange();
- return true;
- }
- e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase())
- ->getSourceExpr());
- continue;
- }
-
- // Array ivars?
-
- return false;
- }
-}
-
-namespace {
-
- struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
- VarDecl *Variable;
- Expr *Capturer = nullptr;
- bool VarWillBeReased = false;
-
- FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
- : EvaluatedExprVisitor<FindCaptureVisitor>(Context),
- Variable(variable) {}
-
- void VisitDeclRefExpr(DeclRefExpr *ref) {
- if (ref->getDecl() == Variable && !Capturer)
- Capturer = ref;
- }
-
- void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
- if (Capturer) return;
- Visit(ref->getBase());
- if (Capturer && ref->isFreeIvar())
- Capturer = ref;
- }
-
- void VisitBlockExpr(BlockExpr *block) {
- // Look inside nested blocks
- if (block->getBlockDecl()->capturesVariable(Variable))
- Visit(block->getBlockDecl()->getBody());
- }
-
- void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) {
- if (Capturer) return;
- if (OVE->getSourceExpr())
- Visit(OVE->getSourceExpr());
- }
-
- void VisitBinaryOperator(BinaryOperator *BinOp) {
- if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign)
- return;
- Expr *LHS = BinOp->getLHS();
- if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) {
- if (DRE->getDecl() != Variable)
- return;
- if (Expr *RHS = BinOp->getRHS()) {
- RHS = RHS->IgnoreParenCasts();
- std::optional<llvm::APSInt> Value;
- VarWillBeReased =
- (RHS && (Value = RHS->getIntegerConstantExpr(Context)) &&
- *Value == 0);
- }
- }
- }
- };
-
-} // namespace
-
-/// Check whether the given argument is a block which captures a
-/// variable.
-static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
- assert(owner.Variable && owner.Loc.isValid());
-
- e = e->IgnoreParenCasts();
-
- // Look through [^{...} copy] and Block_copy(^{...}).
- if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) {
- Selector Cmd = ME->getSelector();
- if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") {
- e = ME->getInstanceReceiver();
- if (!e)
- return nullptr;
- e = e->IgnoreParenCasts();
- }
- } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) {
- if (CE->getNumArgs() == 1) {
- FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl());
- if (Fn) {
- const IdentifierInfo *FnI = Fn->getIdentifier();
- if (FnI && FnI->isStr("_Block_copy")) {
- e = CE->getArg(0)->IgnoreParenCasts();
- }
- }
- }
- }
-
- BlockExpr *block = dyn_cast<BlockExpr>(e);
- if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable))
- return nullptr;
-
- FindCaptureVisitor visitor(S.Context, owner.Variable);
- visitor.Visit(block->getBlockDecl()->getBody());
- return visitor.VarWillBeReased ? nullptr : visitor.Capturer;
-}
-
-static void diagnoseRetainCycle(Sema &S, Expr *capturer,
- RetainCycleOwner &owner) {
- assert(capturer);
- assert(owner.Variable && owner.Loc.isValid());
-
- S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
- << owner.Variable << capturer->getSourceRange();
- S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
- << owner.Indirect << owner.Range;
-}
-
-/// Check for a keyword selector that starts with the word 'add' or
-/// 'set'.
-static bool isSetterLikeSelector(Selector sel) {
- if (sel.isUnarySelector()) return false;
-
- StringRef str = sel.getNameForSlot(0);
- str = str.ltrim('_');
- if (str.starts_with("set"))
- str = str.substr(3);
- else if (str.starts_with("add")) {
- // Specially allow 'addOperationWithBlock:'.
- if (sel.getNumArgs() == 1 && str.starts_with("addOperationWithBlock"))
- return false;
- str = str.substr(3);
- } else
- return false;
-
- if (str.empty()) return true;
- return !isLowercase(str.front());
-}
-
-static std::optional<int>
-GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
- bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
- Message->getReceiverInterface(),
- NSAPI::ClassId_NSMutableArray);
- if (!IsMutableArray) {
- return std::nullopt;
- }
-
- Selector Sel = Message->getSelector();
-
- std::optional<NSAPI::NSArrayMethodKind> MKOpt =
- S.NSAPIObj->getNSArrayMethodKind(Sel);
- if (!MKOpt) {
- return std::nullopt;
- }
-
- NSAPI::NSArrayMethodKind MK = *MKOpt;
-
- switch (MK) {
- case NSAPI::NSMutableArr_addObject:
- case NSAPI::NSMutableArr_insertObjectAtIndex:
- case NSAPI::NSMutableArr_setObjectAtIndexedSubscript:
- return 0;
- case NSAPI::NSMutableArr_replaceObjectAtIndex:
- return 1;
-
- default:
- return std::nullopt;
- }
-
- return std::nullopt;
-}
-
-static std::optional<int>
-GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
- bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
- Message->getReceiverInterface(),
- NSAPI::ClassId_NSMutableDictionary);
- if (!IsMutableDictionary) {
- return std::nullopt;
- }
-
- Selector Sel = Message->getSelector();
-
- std::optional<NSAPI::NSDictionaryMethodKind> MKOpt =
- S.NSAPIObj->getNSDictionaryMethodKind(Sel);
- if (!MKOpt) {
- return std::nullopt;
- }
-
- NSAPI::NSDictionaryMethodKind MK = *MKOpt;
-
- switch (MK) {
- case NSAPI::NSMutableDict_setObjectForKey:
- case NSAPI::NSMutableDict_setValueForKey:
- case NSAPI::NSMutableDict_setObjectForKeyedSubscript:
- return 0;
-
- default:
- return std::nullopt;
- }
-
- return std::nullopt;
-}
-
-static std::optional<int> GetNSSetArgumentIndex(Sema &S,
- ObjCMessageExpr *Message) {
- bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
- Message->getReceiverInterface(),
- NSAPI::ClassId_NSMutableSet);
-
- bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass(
- Message->getReceiverInterface(),
- NSAPI::ClassId_NSMutableOrderedSet);
- if (!IsMutableSet && !IsMutableOrderedSet) {
- return std::nullopt;
- }
-
- Selector Sel = Message->getSelector();
-
- std::optional<NSAPI::NSSetMethodKind> MKOpt =
- S.NSAPIObj->getNSSetMethodKind(Sel);
- if (!MKOpt) {
- return std::nullopt;
- }
-
- NSAPI::NSSetMethodKind MK = *MKOpt;
-
- switch (MK) {
- case NSAPI::NSMutableSet_addObject:
- case NSAPI::NSOrderedSet_setObjectAtIndex:
- case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript:
- case NSAPI::NSOrderedSet_insertObjectAtIndex:
- return 0;
- case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject:
- return 1;
- }
-
- return std::nullopt;
-}
-
-void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
- if (!Message->isInstanceMessage()) {
- return;
- }
-
- std::optional<int> ArgOpt;
-
- if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) &&
- !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) &&
- !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) {
- return;
- }
-
- int ArgIndex = *ArgOpt;
-
- Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts();
- if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) {
- Arg = OE->getSourceExpr()->IgnoreImpCasts();
- }
-
- if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
- if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
- if (ArgRE->isObjCSelfExpr()) {
- Diag(Message->getSourceRange().getBegin(),
- diag::warn_objc_circular_container)
- << ArgRE->getDecl() << StringRef("'super'");
- }
- }
- } else {
- Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
-
- if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) {
- Receiver = OE->getSourceExpr()->IgnoreImpCasts();
- }
-
- if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) {
- if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
- if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
- ValueDecl *Decl = ReceiverRE->getDecl();
- Diag(Message->getSourceRange().getBegin(),
- diag::warn_objc_circular_container)
- << Decl << Decl;
- if (!ArgRE->isObjCSelfExpr()) {
- Diag(Decl->getLocation(),
- diag::note_objc_circular_container_declared_here)
- << Decl;
- }
- }
- }
- } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) {
- if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) {
- if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
- ObjCIvarDecl *Decl = IvarRE->getDecl();
- Diag(Message->getSourceRange().getBegin(),
- diag::warn_objc_circular_container)
- << Decl << Decl;
- Diag(Decl->getLocation(),
- diag::note_objc_circular_container_declared_here)
- << Decl;
- }
- }
- }
- }
-}
-
-/// Check a message send to see if it's likely to cause a retain cycle.
-void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
- // Only check instance methods whose selector looks like a setter.
- if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector()))
- return;
-
- // Try to find a variable that the receiver is strongly owned by.
- RetainCycleOwner owner;
- if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
- if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner))
- return;
- } else {
- assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
- owner.Variable = getCurMethodDecl()->getSelfDecl();
- owner.Loc = msg->getSuperLoc();
- owner.Range = msg->getSuperLoc();
- }
-
- // Check whether the receiver is captured by any of the arguments.
- const ObjCMethodDecl *MD = msg->getMethodDecl();
- for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) {
- if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) {
- // noescape blocks should not be retained by the method.
- if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>())
- continue;
- return diagnoseRetainCycle(*this, capturer, owner);
- }
- }
-}
-
-/// Check a property assign to see if it's likely to cause a retain cycle.
-void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
- RetainCycleOwner owner;
- if (!findRetainCycleOwner(*this, receiver, owner))
- return;
-
- if (Expr *capturer = findCapturingExpr(*this, argument, owner))
- diagnoseRetainCycle(*this, capturer, owner);
-}
-
-void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) {
- RetainCycleOwner Owner;
- if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner))
- return;
-
- // Because we don't have an expression for the variable, we have to set the
- // location explicitly here.
- Owner.Loc = Var->getLocation();
- Owner.Range = Var->getSourceRange();
-
- if (Expr *Capturer = findCapturingExpr(*this, Init, Owner))
- diagnoseRetainCycle(*this, Capturer, Owner);
-}
-
static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc,
Expr *RHS, bool isProperty) {
// Check if RHS is an Objective-C object literal, which also can get
@@ -18703,8 +13342,8 @@ static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc,
// This enum needs to match with the 'select' in
// warn_objc_arc_literal_assign (off-by-1).
- Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS);
- if (Kind == Sema::LK_String || Kind == Sema::LK_None)
+ SemaObjC::ObjCLiteralKind Kind = S.ObjC().CheckLiteralKind(RHS);
+ if (Kind == SemaObjC::LK_String || Kind == SemaObjC::LK_None)
return false;
S.Diag(Loc, diag::warn_arc_literal_assign)
@@ -18934,7 +13573,6 @@ void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
//===--- CHECK: Warn on self move with std::move. -------------------------===//
-/// DiagnoseSelfMove - Emits a warning if a value is moved to itself.
void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc) {
if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc))
@@ -18947,18 +13585,17 @@ void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
LHSExpr = LHSExpr->IgnoreParenImpCasts();
RHSExpr = RHSExpr->IgnoreParenImpCasts();
- // Check for a call expression
- const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr);
- if (!CE || CE->getNumArgs() != 1)
- return;
-
- // Check for a call to std::move
- if (!CE->isCallToStdMove())
+ // Check for a call to std::move or for a static_cast<T&&>(..) to an xvalue
+ // which we can treat as an inlined std::move
+ if (const auto *CE = dyn_cast<CallExpr>(RHSExpr);
+ CE && CE->getNumArgs() == 1 && CE->isCallToStdMove())
+ RHSExpr = CE->getArg(0);
+ else if (const auto *CXXSCE = dyn_cast<CXXStaticCastExpr>(RHSExpr);
+ CXXSCE && CXXSCE->isXValue())
+ RHSExpr = CXXSCE->getSubExpr();
+ else
return;
- // Get argument from std::move
- RHSExpr = CE->getArg(0);
-
const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr);
const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr);
@@ -19027,10 +13664,11 @@ void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
//===--- Layout compatibility ----------------------------------------------//
-static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2);
+static bool isLayoutCompatible(const ASTContext &C, QualType T1, QualType T2);
/// Check if two enumeration types are layout-compatible.
-static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
+static bool isLayoutCompatible(const ASTContext &C, const EnumDecl *ED1,
+ const EnumDecl *ED2) {
// C++11 [dcl.enum] p8:
// Two enumeration types are layout-compatible if they have the same
// underlying type.
@@ -19039,8 +13677,24 @@ static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
}
/// Check if two fields are layout-compatible.
-static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
- FieldDecl *Field2) {
+/// Can be used on union members, which are exempt from alignment requirement
+/// of common initial sequence.
+static bool isLayoutCompatible(const ASTContext &C, const FieldDecl *Field1,
+ const FieldDecl *Field2,
+ bool AreUnionMembers = false) {
+ [[maybe_unused]] const Type *Field1Parent =
+ Field1->getParent()->getTypeForDecl();
+ [[maybe_unused]] const Type *Field2Parent =
+ Field2->getParent()->getTypeForDecl();
+ assert(((Field1Parent->isStructureOrClassType() &&
+ Field2Parent->isStructureOrClassType()) ||
+ (Field1Parent->isUnionType() && Field2Parent->isUnionType())) &&
+ "Can't evaluate layout compatibility between a struct field and a "
+ "union field.");
+ assert(((!AreUnionMembers && Field1Parent->isStructureOrClassType()) ||
+ (AreUnionMembers && Field1Parent->isUnionType())) &&
+ "AreUnionMembers should be 'true' for union fields (only).");
+
if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
return false;
@@ -19056,68 +13710,49 @@ static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
return false;
}
+ if (Field1->hasAttr<clang::NoUniqueAddressAttr>() ||
+ Field2->hasAttr<clang::NoUniqueAddressAttr>())
+ return false;
+
+ if (!AreUnionMembers &&
+ Field1->getMaxAlignment() != Field2->getMaxAlignment())
+ return false;
+
return true;
}
/// Check if two standard-layout structs are layout-compatible.
/// (C++11 [class.mem] p17)
-static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1,
- RecordDecl *RD2) {
- // If both records are C++ classes, check that base classes match.
- if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) {
- // If one of records is a CXXRecordDecl we are in C++ mode,
- // thus the other one is a CXXRecordDecl, too.
- const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2);
- // Check number of base classes.
- if (D1CXX->getNumBases() != D2CXX->getNumBases())
- return false;
+static bool isLayoutCompatibleStruct(const ASTContext &C, const RecordDecl *RD1,
+ const RecordDecl *RD2) {
+ // Get to the class where the fields are declared
+ if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1))
+ RD1 = D1CXX->getStandardLayoutBaseWithFields();
- // Check the base classes.
- for (CXXRecordDecl::base_class_const_iterator
- Base1 = D1CXX->bases_begin(),
- BaseEnd1 = D1CXX->bases_end(),
- Base2 = D2CXX->bases_begin();
- Base1 != BaseEnd1;
- ++Base1, ++Base2) {
- if (!isLayoutCompatible(C, Base1->getType(), Base2->getType()))
- return false;
- }
- } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) {
- // If only RD2 is a C++ class, it should have zero base classes.
- if (D2CXX->getNumBases() > 0)
- return false;
- }
+ if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2))
+ RD2 = D2CXX->getStandardLayoutBaseWithFields();
// Check the fields.
- RecordDecl::field_iterator Field2 = RD2->field_begin(),
- Field2End = RD2->field_end(),
- Field1 = RD1->field_begin(),
- Field1End = RD1->field_end();
- for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) {
- if (!isLayoutCompatible(C, *Field1, *Field2))
- return false;
- }
- if (Field1 != Field1End || Field2 != Field2End)
- return false;
-
- return true;
+ return llvm::equal(RD1->fields(), RD2->fields(),
+ [&C](const FieldDecl *F1, const FieldDecl *F2) -> bool {
+ return isLayoutCompatible(C, F1, F2);
+ });
}
/// Check if two standard-layout unions are layout-compatible.
/// (C++11 [class.mem] p18)
-static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1,
- RecordDecl *RD2) {
- llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields;
+static bool isLayoutCompatibleUnion(const ASTContext &C, const RecordDecl *RD1,
+ const RecordDecl *RD2) {
+ llvm::SmallPtrSet<const FieldDecl *, 8> UnmatchedFields;
for (auto *Field2 : RD2->fields())
UnmatchedFields.insert(Field2);
for (auto *Field1 : RD1->fields()) {
- llvm::SmallPtrSet<FieldDecl *, 8>::iterator
- I = UnmatchedFields.begin(),
- E = UnmatchedFields.end();
+ auto I = UnmatchedFields.begin();
+ auto E = UnmatchedFields.end();
for ( ; I != E; ++I) {
- if (isLayoutCompatible(C, Field1, *I)) {
+ if (isLayoutCompatible(C, Field1, *I, /*IsUnionMember=*/true)) {
bool Result = UnmatchedFields.erase(*I);
(void) Result;
assert(Result);
@@ -19131,8 +13766,8 @@ static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1,
return UnmatchedFields.empty();
}
-static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1,
- RecordDecl *RD2) {
+static bool isLayoutCompatible(const ASTContext &C, const RecordDecl *RD1,
+ const RecordDecl *RD2) {
if (RD1->isUnion() != RD2->isUnion())
return false;
@@ -19143,19 +13778,20 @@ static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1,
}
/// Check if two types are layout-compatible in C++11 sense.
-static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
+static bool isLayoutCompatible(const ASTContext &C, QualType T1, QualType T2) {
if (T1.isNull() || T2.isNull())
return false;
- // C++11 [basic.types] p11:
- // If two types T1 and T2 are the same type, then T1 and T2 are
- // layout-compatible types.
- if (C.hasSameType(T1, T2))
- return true;
-
+ // C++20 [basic.types] p11:
+ // Two types cv1 T1 and cv2 T2 are layout-compatible types
+ // if T1 and T2 are the same type, layout-compatible enumerations (9.7.1),
+ // or layout-compatible standard-layout class types (11.4).
T1 = T1.getCanonicalType().getUnqualifiedType();
T2 = T2.getCanonicalType().getUnqualifiedType();
+ if (C.hasSameType(T1, T2))
+ return true;
+
const Type::TypeClass TC1 = T1->getTypeClass();
const Type::TypeClass TC2 = T2->getTypeClass();
@@ -19178,6 +13814,31 @@ static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
return false;
}
+bool Sema::IsLayoutCompatible(QualType T1, QualType T2) const {
+ return isLayoutCompatible(getASTContext(), T1, T2);
+}
+
+//===-------------- Pointer interconvertibility ----------------------------//
+
+bool Sema::IsPointerInterconvertibleBaseOf(const TypeSourceInfo *Base,
+ const TypeSourceInfo *Derived) {
+ QualType BaseT = Base->getType()->getCanonicalTypeUnqualified();
+ QualType DerivedT = Derived->getType()->getCanonicalTypeUnqualified();
+
+ if (BaseT->isStructureOrClassType() && DerivedT->isStructureOrClassType() &&
+ getASTContext().hasSameType(BaseT, DerivedT))
+ return true;
+
+ if (!IsDerivedFrom(Derived->getTypeLoc().getBeginLoc(), DerivedT, BaseT))
+ return false;
+
+ // Per [basic.compound]/4.3, containing object has to be standard-layout.
+ if (DerivedT->getAsCXXRecordDecl()->isStandardLayout())
+ return true;
+
+ return false;
+}
+
//===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
/// Given a type tag expression find the type tag itself.
@@ -19596,7 +14257,7 @@ void Sema::CheckAddressOfPackedMember(Expr *rhs) {
}
bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
ExprResult A = UsualUnaryConversions(TheCall->getArg(0));
@@ -19606,23 +14267,43 @@ bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) {
TheCall->setArg(0, A.get());
QualType TyA = A.get()->getType();
- if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA))
+ if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA, 1))
return true;
TheCall->setType(TyA);
return false;
}
-bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 2))
+bool Sema::BuiltinElementwiseMath(CallExpr *TheCall) {
+ QualType Res;
+ if (BuiltinVectorMath(TheCall, Res))
+ return true;
+ TheCall->setType(Res);
+ return false;
+}
+
+bool Sema::BuiltinVectorToScalarMath(CallExpr *TheCall) {
+ QualType Res;
+ if (BuiltinVectorMath(TheCall, Res))
+ return true;
+
+ if (auto *VecTy0 = Res->getAs<VectorType>())
+ TheCall->setType(VecTy0->getElementType());
+ else
+ TheCall->setType(Res);
+
+ return false;
+}
+
+bool Sema::BuiltinVectorMath(CallExpr *TheCall, QualType &Res) {
+ if (checkArgCount(TheCall, 2))
return true;
ExprResult A = TheCall->getArg(0);
ExprResult B = TheCall->getArg(1);
// Do standard promotions between the two arguments, returning their common
// type.
- QualType Res =
- UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison);
+ Res = UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison);
if (A.isInvalid() || B.isInvalid())
return true;
@@ -19634,17 +14315,17 @@ bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) {
diag::err_typecheck_call_different_arg_types)
<< TyA << TyB;
- if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA))
+ if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA, 1))
return true;
TheCall->setArg(0, A.get());
TheCall->setArg(1, B.get());
- TheCall->setType(Res);
return false;
}
-bool Sema::SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 3))
+bool Sema::BuiltinElementwiseTernaryMath(CallExpr *TheCall,
+ bool CheckForFloatArgs) {
+ if (checkArgCount(TheCall, 3))
return true;
Expr *Args[3];
@@ -19655,11 +14336,20 @@ bool Sema::SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall) {
Args[I] = Converted.get();
}
- int ArgOrdinal = 1;
- for (Expr *Arg : Args) {
- if (checkFPMathBuiltinElementType(*this, Arg->getBeginLoc(), Arg->getType(),
+ if (CheckForFloatArgs) {
+ int ArgOrdinal = 1;
+ for (Expr *Arg : Args) {
+ if (checkFPMathBuiltinElementType(*this, Arg->getBeginLoc(),
+ Arg->getType(), ArgOrdinal++))
+ return true;
+ }
+ } else {
+ int ArgOrdinal = 1;
+ for (Expr *Arg : Args) {
+ if (checkMathBuiltinElementType(*this, Arg->getBeginLoc(), Arg->getType(),
ArgOrdinal++))
- return true;
+ return true;
+ }
}
for (int I = 1; I < 3; ++I) {
@@ -19678,7 +14368,7 @@ bool Sema::SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall) {
}
bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
ExprResult A = UsualUnaryConversions(TheCall->getArg(0));
@@ -19689,8 +14379,8 @@ bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
return false;
}
-bool Sema::SemaBuiltinNonDeterministicValue(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
+bool Sema::BuiltinNonDeterministicValue(CallExpr *TheCall) {
+ if (checkArgCount(TheCall, 1))
return true;
ExprResult Arg = TheCall->getArg(0);
@@ -19704,9 +14394,9 @@ bool Sema::SemaBuiltinNonDeterministicValue(CallExpr *TheCall) {
return false;
}
-ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall,
- ExprResult CallResult) {
- if (checkArgCount(*this, TheCall, 1))
+ExprResult Sema::BuiltinMatrixTranspose(CallExpr *TheCall,
+ ExprResult CallResult) {
+ if (checkArgCount(TheCall, 1))
return ExprError();
ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0));
@@ -19754,14 +14444,14 @@ getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) {
return Dim;
}
-ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
- ExprResult CallResult) {
+ExprResult Sema::BuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
+ ExprResult CallResult) {
if (!getLangOpts().MatrixTypes) {
Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled);
return ExprError();
}
- if (checkArgCount(*this, TheCall, 4))
+ if (checkArgCount(TheCall, 4))
return ExprError();
unsigned PtrArgIdx = 0;
@@ -19870,9 +14560,9 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
return CallResult;
}
-ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
- ExprResult CallResult) {
- if (checkArgCount(*this, TheCall, 3))
+ExprResult Sema::BuiltinMatrixColumnMajorStore(CallExpr *TheCall,
+ ExprResult CallResult) {
+ if (checkArgCount(TheCall, 3))
return ExprError();
unsigned PtrArgIdx = 1;
@@ -19968,172 +14658,6 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
return CallResult;
}
-/// Checks the argument at the given index is a WebAssembly table and if it
-/// is, sets ElTy to the element type.
-static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex,
- QualType &ElTy) {
- Expr *ArgExpr = E->getArg(ArgIndex);
- const auto *ATy = dyn_cast<ArrayType>(ArgExpr->getType());
- if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) {
- return S.Diag(ArgExpr->getBeginLoc(),
- diag::err_wasm_builtin_arg_must_be_table_type)
- << ArgIndex + 1 << ArgExpr->getSourceRange();
- }
- ElTy = ATy->getElementType();
- return false;
-}
-
-/// Checks the argument at the given index is an integer.
-static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E,
- unsigned ArgIndex) {
- Expr *ArgExpr = E->getArg(ArgIndex);
- if (!ArgExpr->getType()->isIntegerType()) {
- return S.Diag(ArgExpr->getBeginLoc(),
- diag::err_wasm_builtin_arg_must_be_integer_type)
- << ArgIndex + 1 << ArgExpr->getSourceRange();
- }
- return false;
-}
-
-/// Check that the first argument is a WebAssembly table, and the second
-/// is an index to use as index into the table.
-bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 2))
- return true;
-
- QualType ElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
- return true;
-
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
- return true;
-
- // If all is well, we set the type of TheCall to be the type of the
- // element of the table.
- // i.e. a table.get on an externref table has type externref,
- // or whatever the type of the table element is.
- TheCall->setType(ElTy);
-
- return false;
-}
-
-/// Check that the first argumnet is a WebAssembly table, the second is
-/// an index to use as index into the table and the third is the reference
-/// type to set into the table.
-bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 3))
- return true;
-
- QualType ElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
- return true;
-
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
- return true;
-
- if (!Context.hasSameType(ElTy, TheCall->getArg(2)->getType()))
- return true;
-
- return false;
-}
-
-/// Check that the argument is a WebAssembly table.
-bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
- return true;
-
- QualType ElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
- return true;
-
- return false;
-}
-
-/// Check that the first argument is a WebAssembly table, the second is the
-/// value to use for new elements (of a type matching the table type), the
-/// third value is an integer.
-bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 3))
- return true;
-
- QualType ElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
- return true;
-
- Expr *NewElemArg = TheCall->getArg(1);
- if (!Context.hasSameType(ElTy, NewElemArg->getType())) {
- return Diag(NewElemArg->getBeginLoc(),
- diag::err_wasm_builtin_arg_must_match_table_element_type)
- << 2 << 1 << NewElemArg->getSourceRange();
- }
-
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 2))
- return true;
-
- return false;
-}
-
-/// Check that the first argument is a WebAssembly table, the second is an
-/// integer, the third is the value to use to fill the table (of a type
-/// matching the table type), and the fourth is an integer.
-bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 4))
- return true;
-
- QualType ElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
- return true;
-
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
- return true;
-
- Expr *NewElemArg = TheCall->getArg(2);
- if (!Context.hasSameType(ElTy, NewElemArg->getType())) {
- return Diag(NewElemArg->getBeginLoc(),
- diag::err_wasm_builtin_arg_must_match_table_element_type)
- << 3 << 1 << NewElemArg->getSourceRange();
- }
-
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 3))
- return true;
-
- return false;
-}
-
-/// Check that the first argument is a WebAssembly table, the second is also a
-/// WebAssembly table (of the same element type), and the third to fifth
-/// arguments are integers.
-bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 5))
- return true;
-
- QualType XElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, XElTy))
- return true;
-
- QualType YElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 1, YElTy))
- return true;
-
- Expr *TableYArg = TheCall->getArg(1);
- if (!Context.hasSameType(XElTy, YElTy)) {
- return Diag(TableYArg->getBeginLoc(),
- diag::err_wasm_builtin_arg_must_match_table_element_type)
- << 2 << 1 << TableYArg->getSourceRange();
- }
-
- for (int I = 2; I <= 4; I++) {
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, I))
- return true;
- }
-
- return false;
-}
-
-/// \brief Enforce the bounds of a TCB
-/// CheckTCBEnforcement - Enforces that every function in a named TCB only
-/// directly calls other functions in the same TCB as marked by the enforce_tcb
-/// and enforce_tcb_leaf attributes.
void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc,
const NamedDecl *Callee) {
// This warning does not make sense in code that has no runtime behavior.
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
index c44be0df9b0a..88d4732c7d5c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/OperationKinds.h"
#include "clang/AST/QualTypeNames.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
@@ -40,7 +41,9 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaCodeCompletion.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallBitVector.h"
@@ -244,8 +247,8 @@ public:
/// Whether we should include code patterns in the completion
/// results.
bool includeCodePatterns() const {
- return SemaRef.CodeCompleter &&
- SemaRef.CodeCompleter->includeCodePatterns();
+ return SemaRef.CodeCompletion().CodeCompleter &&
+ SemaRef.CodeCompletion().CodeCompleter->includeCodePatterns();
}
/// Set the filter used for code-completion results.
@@ -764,6 +767,10 @@ getRequiredQualification(ASTContext &Context, const DeclContext *CurContext,
// Filter out names reserved for the implementation if they come from a
// system header.
static bool shouldIgnoreDueToReservedName(const NamedDecl *ND, Sema &SemaRef) {
+ // Debuggers want access to all identifiers, including reserved ones.
+ if (SemaRef.getLangOpts().DebuggerSupport)
+ return false;
+
ReservedIdentifierStatus Status = ND->isReserved(SemaRef.getLangOpts());
// Ignore reserved names for compiler provided decls.
if (isReservedInAllContexts(Status) && ND->getLocation().isInvalid())
@@ -1798,7 +1805,8 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
if (LangOpts.C99) {
// C99-specific
Results.AddResult(Result("_Complex", CCP_Type));
- Results.AddResult(Result("_Imaginary", CCP_Type));
+ if (!LangOpts.C2y)
+ Results.AddResult(Result("_Imaginary", CCP_Type));
Results.AddResult(Result("_Bool", CCP_Type));
Results.AddResult(Result("restrict", CCP_Type));
}
@@ -1857,9 +1865,9 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
Results.AddResult(Result("_Nullable", CCP_Type));
}
-static void AddStorageSpecifiers(Sema::ParserCompletionContext CCC,
- const LangOptions &LangOpts,
- ResultBuilder &Results) {
+static void
+AddStorageSpecifiers(SemaCodeCompletion::ParserCompletionContext CCC,
+ const LangOptions &LangOpts, ResultBuilder &Results) {
typedef CodeCompletionResult Result;
// Note: we don't suggest either "auto" or "register", because both
// are pointless as storage specifiers. Elsewhere, we suggest "auto"
@@ -1883,13 +1891,13 @@ static void AddStorageSpecifiers(Sema::ParserCompletionContext CCC,
}
}
-static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
- const LangOptions &LangOpts,
- ResultBuilder &Results) {
+static void
+AddFunctionSpecifiers(SemaCodeCompletion::ParserCompletionContext CCC,
+ const LangOptions &LangOpts, ResultBuilder &Results) {
typedef CodeCompletionResult Result;
switch (CCC) {
- case Sema::PCC_Class:
- case Sema::PCC_MemberTemplate:
+ case SemaCodeCompletion::PCC_Class:
+ case SemaCodeCompletion::PCC_MemberTemplate:
if (LangOpts.CPlusPlus) {
Results.AddResult(Result("explicit"));
Results.AddResult(Result("friend"));
@@ -1898,24 +1906,24 @@ static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
}
[[fallthrough]];
- case Sema::PCC_ObjCInterface:
- case Sema::PCC_ObjCImplementation:
- case Sema::PCC_Namespace:
- case Sema::PCC_Template:
+ case SemaCodeCompletion::PCC_ObjCInterface:
+ case SemaCodeCompletion::PCC_ObjCImplementation:
+ case SemaCodeCompletion::PCC_Namespace:
+ case SemaCodeCompletion::PCC_Template:
if (LangOpts.CPlusPlus || LangOpts.C99)
Results.AddResult(Result("inline"));
break;
- case Sema::PCC_ObjCInstanceVariableList:
- case Sema::PCC_Expression:
- case Sema::PCC_Statement:
- case Sema::PCC_TopLevelOrExpression:
- case Sema::PCC_ForInit:
- case Sema::PCC_Condition:
- case Sema::PCC_RecoveryInFunction:
- case Sema::PCC_Type:
- case Sema::PCC_ParenthesizedExpression:
- case Sema::PCC_LocalDeclarationSpecifiers:
+ case SemaCodeCompletion::PCC_ObjCInstanceVariableList:
+ case SemaCodeCompletion::PCC_Expression:
+ case SemaCodeCompletion::PCC_Statement:
+ case SemaCodeCompletion::PCC_TopLevelOrExpression:
+ case SemaCodeCompletion::PCC_ForInit:
+ case SemaCodeCompletion::PCC_Condition:
+ case SemaCodeCompletion::PCC_RecoveryInFunction:
+ case SemaCodeCompletion::PCC_Type:
+ case SemaCodeCompletion::PCC_ParenthesizedExpression:
+ case SemaCodeCompletion::PCC_LocalDeclarationSpecifiers:
break;
}
}
@@ -1954,31 +1962,31 @@ static void AddUsingAliasResult(CodeCompletionBuilder &Builder,
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
-static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
+static bool WantTypesInContext(SemaCodeCompletion::ParserCompletionContext CCC,
const LangOptions &LangOpts) {
switch (CCC) {
- case Sema::PCC_Namespace:
- case Sema::PCC_Class:
- case Sema::PCC_ObjCInstanceVariableList:
- case Sema::PCC_Template:
- case Sema::PCC_MemberTemplate:
- case Sema::PCC_Statement:
- case Sema::PCC_RecoveryInFunction:
- case Sema::PCC_Type:
- case Sema::PCC_ParenthesizedExpression:
- case Sema::PCC_LocalDeclarationSpecifiers:
- case Sema::PCC_TopLevelOrExpression:
+ case SemaCodeCompletion::PCC_Namespace:
+ case SemaCodeCompletion::PCC_Class:
+ case SemaCodeCompletion::PCC_ObjCInstanceVariableList:
+ case SemaCodeCompletion::PCC_Template:
+ case SemaCodeCompletion::PCC_MemberTemplate:
+ case SemaCodeCompletion::PCC_Statement:
+ case SemaCodeCompletion::PCC_RecoveryInFunction:
+ case SemaCodeCompletion::PCC_Type:
+ case SemaCodeCompletion::PCC_ParenthesizedExpression:
+ case SemaCodeCompletion::PCC_LocalDeclarationSpecifiers:
+ case SemaCodeCompletion::PCC_TopLevelOrExpression:
return true;
- case Sema::PCC_Expression:
- case Sema::PCC_Condition:
+ case SemaCodeCompletion::PCC_Expression:
+ case SemaCodeCompletion::PCC_Condition:
return LangOpts.CPlusPlus;
- case Sema::PCC_ObjCInterface:
- case Sema::PCC_ObjCImplementation:
+ case SemaCodeCompletion::PCC_ObjCInterface:
+ case SemaCodeCompletion::PCC_ObjCImplementation:
return false;
- case Sema::PCC_ForInit:
+ case SemaCodeCompletion::PCC_ForInit:
return LangOpts.CPlusPlus || LangOpts.ObjC || LangOpts.C99;
}
@@ -2110,8 +2118,6 @@ static void AddOverrideResults(ResultBuilder &Results,
// Generates a new CodeCompletionResult by taking this function and
// converting it into an override declaration with only one chunk in the
// final CodeCompletionString as a TypedTextChunk.
- std::string OverrideSignature;
- llvm::raw_string_ostream OS(OverrideSignature);
CodeCompletionResult CCR(Method, 0);
PrintingPolicy Policy =
getCompletionPrintingPolicy(S.getASTContext(), S.getPreprocessor());
@@ -2125,14 +2131,15 @@ static void AddOverrideResults(ResultBuilder &Results,
}
/// Add language constructs that show up for "ordinary" names.
-static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
- Sema &SemaRef, ResultBuilder &Results) {
+static void
+AddOrdinaryNameResults(SemaCodeCompletion::ParserCompletionContext CCC,
+ Scope *S, Sema &SemaRef, ResultBuilder &Results) {
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
typedef CodeCompletionResult Result;
switch (CCC) {
- case Sema::PCC_Namespace:
+ case SemaCodeCompletion::PCC_Namespace:
if (SemaRef.getLangOpts().CPlusPlus) {
if (Results.includeCodePatterns()) {
// namespace <identifier> { declarations }
@@ -2188,7 +2195,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
AddTypedefResult(Results);
[[fallthrough]];
- case Sema::PCC_Class:
+ case SemaCodeCompletion::PCC_Class:
if (SemaRef.getLangOpts().CPlusPlus) {
// Using declaration
Builder.AddTypedTextChunk("using");
@@ -2215,7 +2222,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
AddStaticAssertResult(Builder, Results, SemaRef.getLangOpts());
- if (CCC == Sema::PCC_Class) {
+ if (CCC == SemaCodeCompletion::PCC_Class) {
AddTypedefResult(Results);
bool IsNotInheritanceScope = !S->isClassInheritanceScope();
@@ -2246,8 +2253,8 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
}
[[fallthrough]];
- case Sema::PCC_Template:
- case Sema::PCC_MemberTemplate:
+ case SemaCodeCompletion::PCC_Template:
+ case SemaCodeCompletion::PCC_MemberTemplate:
if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns()) {
// template < parameters >
Builder.AddTypedTextChunk("template");
@@ -2263,25 +2270,25 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
break;
- case Sema::PCC_ObjCInterface:
+ case SemaCodeCompletion::PCC_ObjCInterface:
AddObjCInterfaceResults(SemaRef.getLangOpts(), Results, true);
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
break;
- case Sema::PCC_ObjCImplementation:
+ case SemaCodeCompletion::PCC_ObjCImplementation:
AddObjCImplementationResults(SemaRef.getLangOpts(), Results, true);
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
break;
- case Sema::PCC_ObjCInstanceVariableList:
+ case SemaCodeCompletion::PCC_ObjCInstanceVariableList:
AddObjCVisibilityResults(SemaRef.getLangOpts(), Results, true);
break;
- case Sema::PCC_RecoveryInFunction:
- case Sema::PCC_TopLevelOrExpression:
- case Sema::PCC_Statement: {
+ case SemaCodeCompletion::PCC_RecoveryInFunction:
+ case SemaCodeCompletion::PCC_TopLevelOrExpression:
+ case SemaCodeCompletion::PCC_Statement: {
if (SemaRef.getLangOpts().CPlusPlus11)
AddUsingAliasResult(Builder, Results);
@@ -2518,15 +2525,15 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
[[fallthrough]];
// Fall through (for statement expressions).
- case Sema::PCC_ForInit:
- case Sema::PCC_Condition:
+ case SemaCodeCompletion::PCC_ForInit:
+ case SemaCodeCompletion::PCC_Condition:
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
// Fall through: conditions and statements can have expressions.
[[fallthrough]];
- case Sema::PCC_ParenthesizedExpression:
+ case SemaCodeCompletion::PCC_ParenthesizedExpression:
if (SemaRef.getLangOpts().ObjCAutoRefCount &&
- CCC == Sema::PCC_ParenthesizedExpression) {
+ CCC == SemaCodeCompletion::PCC_ParenthesizedExpression) {
// (__bridge <type>)<expression>
Builder.AddTypedTextChunk("__bridge");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -2554,7 +2561,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
// Fall through
[[fallthrough]];
- case Sema::PCC_Expression: {
+ case SemaCodeCompletion::PCC_Expression: {
if (SemaRef.getLangOpts().CPlusPlus) {
// 'this', if we're in a non-static member function.
addThisCompletion(SemaRef, Results);
@@ -2752,15 +2759,15 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
break;
}
- case Sema::PCC_Type:
- case Sema::PCC_LocalDeclarationSpecifiers:
+ case SemaCodeCompletion::PCC_Type:
+ case SemaCodeCompletion::PCC_LocalDeclarationSpecifiers:
break;
}
if (WantTypesInContext(CCC, SemaRef.getLangOpts()))
AddTypeSpecifierResults(SemaRef.getLangOpts(), Results);
- if (SemaRef.getLangOpts().CPlusPlus && CCC != Sema::PCC_Type)
+ if (SemaRef.getLangOpts().CPlusPlus && CCC != SemaCodeCompletion::PCC_Type)
Results.AddResult(Result("operator"));
}
@@ -3178,7 +3185,6 @@ static void AddTemplateParameterChunks(
else if (const auto *TC = TTP->getTypeConstraint()) {
llvm::raw_string_ostream OS(PlaceholderStr);
TC->print(OS, Policy);
- OS.flush();
} else
PlaceholderStr = "class";
@@ -3686,7 +3692,7 @@ CodeCompletionString *CodeCompletionResult::createCodeCompletionStringForDecl(
std::string Keyword;
if (Idx > StartParameter)
Result.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Idx))
+ if (const IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Idx))
Keyword += II->getName();
Keyword += ":";
if (Idx < StartParameter || AllParametersAreInformative)
@@ -3715,7 +3721,7 @@ CodeCompletionString *CodeCompletionResult::createCodeCompletionStringForDecl(
Arg = "(" + formatObjCParamQualifiers((*P)->getObjCDeclQualifier(),
ParamType);
Arg += ParamType.getAsString(Policy) + ")";
- if (IdentifierInfo *II = (*P)->getIdentifier())
+ if (const IdentifierInfo *II = (*P)->getIdentifier())
if (DeclaringEntity || AllParametersAreInformative)
Arg += II->getName();
}
@@ -4017,7 +4023,7 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
std::string Name;
llvm::raw_string_ostream OS(Name);
FDecl->getDeclName().print(OS, Policy);
- Result.AddTextChunk(Result.getAllocator().CopyString(OS.str()));
+ Result.AddTextChunk(Result.getAllocator().CopyString(Name));
} else {
// Function without a declaration. Just give the return type.
Result.AddResultTypeChunk(Result.getAllocator().CopyString(
@@ -4044,18 +4050,17 @@ unsigned clang::getMacroUsagePriority(StringRef MacroName,
unsigned Priority = CCP_Macro;
// Treat the "nil", "Nil" and "NULL" macros as null pointer constants.
- if (MacroName.equals("nil") || MacroName.equals("NULL") ||
- MacroName.equals("Nil")) {
+ if (MacroName == "nil" || MacroName == "NULL" || MacroName == "Nil") {
Priority = CCP_Constant;
if (PreferredTypeIsPointer)
Priority = Priority / CCF_SimilarTypeMatch;
}
// Treat "YES", "NO", "true", and "false" as constants.
- else if (MacroName.equals("YES") || MacroName.equals("NO") ||
- MacroName.equals("true") || MacroName.equals("false"))
+ else if (MacroName == "YES" || MacroName == "NO" || MacroName == "true" ||
+ MacroName == "false")
Priority = CCP_Constant;
// Treat "bool" as a type.
- else if (MacroName.equals("bool"))
+ else if (MacroName == "bool")
Priority = CCP_Type + (LangOpts.ObjC ? CCD_bool_in_ObjC : 0);
return Priority;
@@ -4236,59 +4241,60 @@ static void HandleCodeCompleteResults(Sema *S,
}
static CodeCompletionContext
-mapCodeCompletionContext(Sema &S, Sema::ParserCompletionContext PCC) {
+mapCodeCompletionContext(Sema &S,
+ SemaCodeCompletion::ParserCompletionContext PCC) {
switch (PCC) {
- case Sema::PCC_Namespace:
+ case SemaCodeCompletion::PCC_Namespace:
return CodeCompletionContext::CCC_TopLevel;
- case Sema::PCC_Class:
+ case SemaCodeCompletion::PCC_Class:
return CodeCompletionContext::CCC_ClassStructUnion;
- case Sema::PCC_ObjCInterface:
+ case SemaCodeCompletion::PCC_ObjCInterface:
return CodeCompletionContext::CCC_ObjCInterface;
- case Sema::PCC_ObjCImplementation:
+ case SemaCodeCompletion::PCC_ObjCImplementation:
return CodeCompletionContext::CCC_ObjCImplementation;
- case Sema::PCC_ObjCInstanceVariableList:
+ case SemaCodeCompletion::PCC_ObjCInstanceVariableList:
return CodeCompletionContext::CCC_ObjCIvarList;
- case Sema::PCC_Template:
- case Sema::PCC_MemberTemplate:
+ case SemaCodeCompletion::PCC_Template:
+ case SemaCodeCompletion::PCC_MemberTemplate:
if (S.CurContext->isFileContext())
return CodeCompletionContext::CCC_TopLevel;
if (S.CurContext->isRecord())
return CodeCompletionContext::CCC_ClassStructUnion;
return CodeCompletionContext::CCC_Other;
- case Sema::PCC_RecoveryInFunction:
+ case SemaCodeCompletion::PCC_RecoveryInFunction:
return CodeCompletionContext::CCC_Recovery;
- case Sema::PCC_ForInit:
+ case SemaCodeCompletion::PCC_ForInit:
if (S.getLangOpts().CPlusPlus || S.getLangOpts().C99 ||
S.getLangOpts().ObjC)
return CodeCompletionContext::CCC_ParenthesizedExpression;
else
return CodeCompletionContext::CCC_Expression;
- case Sema::PCC_Expression:
+ case SemaCodeCompletion::PCC_Expression:
return CodeCompletionContext::CCC_Expression;
- case Sema::PCC_Condition:
+ case SemaCodeCompletion::PCC_Condition:
return CodeCompletionContext(CodeCompletionContext::CCC_Expression,
S.getASTContext().BoolTy);
- case Sema::PCC_Statement:
+ case SemaCodeCompletion::PCC_Statement:
return CodeCompletionContext::CCC_Statement;
- case Sema::PCC_Type:
+ case SemaCodeCompletion::PCC_Type:
return CodeCompletionContext::CCC_Type;
- case Sema::PCC_ParenthesizedExpression:
+ case SemaCodeCompletion::PCC_ParenthesizedExpression:
return CodeCompletionContext::CCC_ParenthesizedExpression;
- case Sema::PCC_LocalDeclarationSpecifiers:
+ case SemaCodeCompletion::PCC_LocalDeclarationSpecifiers:
return CodeCompletionContext::CCC_Type;
- case Sema::PCC_TopLevelOrExpression:
+ case SemaCodeCompletion::PCC_TopLevelOrExpression:
return CodeCompletionContext::CCC_TopLevelOrExpression;
}
@@ -4335,7 +4341,7 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
std::string Str;
llvm::raw_string_ostream OS(Str);
NNS->print(OS, Policy);
- Builder.AddTextChunk(Results.getAllocator().CopyString(OS.str()));
+ Builder.AddTextChunk(Results.getAllocator().CopyString(Str));
}
} else if (!InContext->Equals(Overridden->getDeclContext()))
continue;
@@ -4361,10 +4367,10 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
}
}
-void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
- ModuleIdPath Path) {
+void SemaCodeCompletion::CodeCompleteModuleImport(SourceLocation ImportLoc,
+ ModuleIdPath Path) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
@@ -4375,7 +4381,7 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
if (Path.empty()) {
// Enumerate all top-level modules.
SmallVector<Module *, 8> Modules;
- PP.getHeaderSearchInfo().collectAllModules(Modules);
+ SemaRef.PP.getHeaderSearchInfo().collectAllModules(Modules);
for (unsigned I = 0, N = Modules.size(); I != N; ++I) {
Builder.AddTypedTextChunk(
Builder.getAllocator().CopyString(Modules[I]->Name));
@@ -4386,9 +4392,9 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
}
} else if (getLangOpts().Modules) {
// Load the named module.
- Module *Mod =
- PP.getModuleLoader().loadModule(ImportLoc, Path, Module::AllVisible,
- /*IsInclusionDirective=*/false);
+ Module *Mod = SemaRef.PP.getModuleLoader().loadModule(
+ ImportLoc, Path, Module::AllVisible,
+ /*IsInclusionDirective=*/false);
// Enumerate submodules.
if (Mod) {
for (auto *Submodule : Mod->submodules()) {
@@ -4402,15 +4408,16 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
}
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteOrdinaryName(Scope *S,
- ParserCompletionContext CompletionContext) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteOrdinaryName(
+ Scope *S, SemaCodeCompletion::ParserCompletionContext CompletionContext) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- mapCodeCompletionContext(*this, CompletionContext));
+ mapCodeCompletionContext(SemaRef, CompletionContext));
Results.EnterNewScope();
// Determine how to filter results, e.g., so that the names of
@@ -4441,7 +4448,7 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
if (getLangOpts().CPlusPlus)
- MaybeAddOverrideCalls(*this, /*InContext=*/nullptr, Results);
+ MaybeAddOverrideCalls(SemaRef, /*InContext=*/nullptr, Results);
break;
case PCC_RecoveryInFunction:
@@ -4451,17 +4458,17 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
// If we are in a C++ non-static member function, check the qualifiers on
// the member function to filter/prioritize the results list.
- auto ThisType = getCurrentThisType();
+ auto ThisType = SemaRef.getCurrentThisType();
if (!ThisType.isNull())
Results.setObjectTypeQualifiers(ThisType->getPointeeType().getQualifiers(),
VK_LValue);
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
- LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
+ SemaRef.LookupVisibleDecls(S, SemaRef.LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
- AddOrdinaryNameResults(CompletionContext, S, *this, Results);
+ AddOrdinaryNameResults(CompletionContext, S, SemaRef, Results);
Results.ExitScope();
switch (CompletionContext) {
@@ -4489,24 +4496,25 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
}
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
+ AddMacroResults(SemaRef.PP, Results, CodeCompleter->loadExternal(), false);
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
- ParsedType Receiver,
- ArrayRef<IdentifierInfo *> SelIdents,
- bool AtArgumentExpression, bool IsSuper,
- ResultBuilder &Results);
-
-void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
- bool AllowNonIdentifiers,
- bool AllowNestedNameSpecifiers) {
+static void
+AddClassMessageCompletions(Sema &SemaRef, Scope *S, ParsedType Receiver,
+ ArrayRef<const IdentifierInfo *> SelIdents,
+ bool AtArgumentExpression, bool IsSuper,
+ ResultBuilder &Results);
+
+void SemaCodeCompletion::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
+ bool AllowNonIdentifiers,
+ bool AllowNestedNameSpecifiers) {
typedef CodeCompletionResult Result;
ResultBuilder Results(
- *this, CodeCompleter->getAllocator(),
+ SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
AllowNestedNameSpecifiers
// FIXME: Try to separate codepath leading here to deduce whether we
@@ -4535,10 +4543,10 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
if (AllowNestedNameSpecifiers) {
Results.allowNestedNameSpecifiers();
Results.setFilter(&ResultBuilder::IsImpossibleToSatisfy);
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
- LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
+ SemaRef.LookupVisibleDecls(S, Sema::LookupNestedNameSpecifierName,
+ Consumer, CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
Results.setFilter(nullptr);
}
}
@@ -4560,15 +4568,16 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
0) {
ParsedType T = DS.getRepAsType();
if (!T.get().isNull() && T.get()->isObjCObjectOrInterfaceType())
- AddClassMessageCompletions(*this, S, T, std::nullopt, false, false,
+ AddClassMessageCompletions(SemaRef, S, T, std::nullopt, false, false,
Results);
}
// Note that we intentionally suppress macro results here, since we do not
// encourage using macros to produce the names of entities.
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
static const char *underscoreAttrScope(llvm::StringRef Scope) {
@@ -4587,12 +4596,12 @@ static const char *noUnderscoreAttrScope(llvm::StringRef Scope) {
return nullptr;
}
-void Sema::CodeCompleteAttribute(AttributeCommonInfo::Syntax Syntax,
- AttributeCompletion Completion,
- const IdentifierInfo *InScope) {
+void SemaCodeCompletion::CodeCompleteAttribute(
+ AttributeCommonInfo::Syntax Syntax, AttributeCompletion Completion,
+ const IdentifierInfo *InScope) {
if (Completion == AttributeCompletion::None)
return;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Attribute);
@@ -4621,7 +4630,8 @@ void Sema::CodeCompleteAttribute(AttributeCommonInfo::Syntax Syntax,
llvm::DenseSet<llvm::StringRef> FoundScopes;
auto AddCompletions = [&](const ParsedAttrInfo &A) {
- if (A.IsTargetSpecific && !A.existsInTarget(Context.getTargetInfo()))
+ if (A.IsTargetSpecific &&
+ !A.existsInTarget(getASTContext().getTargetInfo()))
return;
if (!A.acceptsLangOpts(getLangOpts()))
return;
@@ -4719,11 +4729,12 @@ void Sema::CodeCompleteAttribute(AttributeCommonInfo::Syntax Syntax,
for (const auto &Entry : ParsedAttrInfoRegistry::entries())
AddCompletions(*Entry.instantiate());
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-struct Sema::CodeCompleteExpressionData {
+struct SemaCodeCompletion::CodeCompleteExpressionData {
CodeCompleteExpressionData(QualType PreferredType = QualType(),
bool IsParenthesized = false)
: PreferredType(PreferredType), IntegralConstantExpression(false),
@@ -4836,10 +4847,10 @@ static void AddLambdaCompletion(ResultBuilder &Results,
/// Perform code-completion in an expression context when we know what
/// type we're looking for.
-void Sema::CodeCompleteExpression(Scope *S,
- const CodeCompleteExpressionData &Data) {
+void SemaCodeCompletion::CodeCompleteExpression(
+ Scope *S, const CodeCompleteExpressionData &Data) {
ResultBuilder Results(
- *this, CodeCompleter->getAllocator(),
+ SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext(
Data.IsParenthesized
@@ -4864,13 +4875,13 @@ void Sema::CodeCompleteExpression(Scope *S,
for (unsigned I = 0, N = Data.IgnoreDecls.size(); I != N; ++I)
Results.Ignore(Data.IgnoreDecls[I]);
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
- LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
+ SemaRef.LookupVisibleDecls(S, Sema::LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
Results.EnterNewScope();
- AddOrdinaryNameResults(PCC, S, *this, Results);
+ AddOrdinaryNameResults(PCC, S, SemaRef, Results);
Results.ExitScope();
bool PreferredTypeIsPointer = false;
@@ -4884,7 +4895,8 @@ void Sema::CodeCompleteExpression(Scope *S,
Enum = Def;
// FIXME: collect covered enumerators in cases like:
// if (x == my_enum::one) { ... } else if (x == ^) {}
- AddEnumerators(Results, Context, Enum, CurContext, CoveredEnumerators());
+ AddEnumerators(Results, getASTContext(), Enum, SemaRef.CurContext,
+ CoveredEnumerators());
}
}
@@ -4893,7 +4905,7 @@ void Sema::CodeCompleteExpression(Scope *S,
AddPrettyFunctionResults(getLangOpts(), Results);
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false,
+ AddMacroResults(SemaRef.PP, Results, CodeCompleter->loadExternal(), false,
PreferredTypeIsPointer);
// Complete a lambda expression when preferred type is a function.
@@ -4903,18 +4915,20 @@ void Sema::CodeCompleteExpression(Scope *S,
AddLambdaCompletion(Results, F->getParamTypes(), getLangOpts());
}
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteExpression(Scope *S, QualType PreferredType,
- bool IsParenthesized) {
+void SemaCodeCompletion::CodeCompleteExpression(Scope *S,
+ QualType PreferredType,
+ bool IsParenthesized) {
return CodeCompleteExpression(
S, CodeCompleteExpressionData(PreferredType, IsParenthesized));
}
-void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E,
- QualType PreferredType) {
+void SemaCodeCompletion::CodeCompletePostfixExpression(Scope *S, ExprResult E,
+ QualType PreferredType) {
if (E.isInvalid())
CodeCompleteExpression(S, PreferredType);
else if (getLangOpts().ObjC)
@@ -4923,7 +4937,7 @@ void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E,
/// The set of properties that have already been added, referenced by
/// property name.
-typedef llvm::SmallPtrSet<IdentifierInfo *, 16> AddedPropertiesSet;
+typedef llvm::SmallPtrSet<const IdentifierInfo *, 16> AddedPropertiesSet;
/// Retrieve the container definition, if any?
static ObjCContainerDecl *getContainerDef(ObjCContainerDecl *Container) {
@@ -5085,7 +5099,7 @@ AddObjCProperties(const CodeCompletionContext &CCContext,
PrintingPolicy Policy = getCompletionPrintingPolicy(Results.getSema());
// Adds a method result
const auto AddMethod = [&](const ObjCMethodDecl *M) {
- IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0);
+ const IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0);
if (!Name)
return;
if (!AddedProperties.insert(Name).second)
@@ -5177,10 +5191,11 @@ AddRecordMembersCompletionResults(Sema &SemaRef, ResultBuilder &Results,
if (AccessOpFixIt)
FixIts.emplace_back(*AccessOpFixIt);
CodeCompletionDeclConsumer Consumer(Results, RD, BaseType, std::move(FixIts));
- SemaRef.LookupVisibleDecls(RD, Sema::LookupMemberName, Consumer,
- SemaRef.CodeCompleter->includeGlobals(),
- /*IncludeDependentBases=*/true,
- SemaRef.CodeCompleter->loadExternal());
+ SemaRef.LookupVisibleDecls(
+ RD, Sema::LookupMemberName, Consumer,
+ SemaRef.CodeCompletion().CodeCompleter->includeGlobals(),
+ /*IncludeDependentBases=*/true,
+ SemaRef.CodeCompletion().CodeCompleter->loadExternal());
if (SemaRef.getLangOpts().CPlusPlus) {
if (!Results.empty()) {
@@ -5674,6 +5689,17 @@ QualType getApproximateType(const Expr *E) {
return getApproximateType(VD->getInit());
}
}
+ if (const auto *UO = llvm::dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UnaryOperatorKind::UO_Deref) {
+ // We recurse into the subexpression because it could be of dependent
+ // type.
+ if (auto Pointee = getApproximateType(UO->getSubExpr())->getPointeeType();
+ !Pointee.isNull())
+ return Pointee;
+ // Our caller expects a non-null result, even though the SubType is
+ // supposed to have a pointee. Fall through to Unresolved anyway.
+ }
+ }
return Unresolved;
}
@@ -5692,17 +5718,16 @@ Expr *unwrapParenList(Expr *Base) {
} // namespace
-void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
- Expr *OtherOpBase,
- SourceLocation OpLoc, bool IsArrow,
- bool IsBaseExprStatement,
- QualType PreferredType) {
+void SemaCodeCompletion::CodeCompleteMemberReferenceExpr(
+ Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow,
+ bool IsBaseExprStatement, QualType PreferredType) {
Base = unwrapParenList(Base);
OtherOpBase = unwrapParenList(OtherOpBase);
if (!Base || !CodeCompleter)
return;
- ExprResult ConvertedBase = PerformMemberExprBaseConversion(Base, IsArrow);
+ ExprResult ConvertedBase =
+ SemaRef.PerformMemberExprBaseConversion(Base, IsArrow);
if (ConvertedBase.isInvalid())
return;
QualType ConvertedBaseType = getApproximateType(ConvertedBase.get());
@@ -5727,7 +5752,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
CodeCompletionContext CCContext(contextKind, ConvertedBaseType);
CCContext.setPreferredType(PreferredType);
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), CCContext,
&ResultBuilder::IsMember);
@@ -5736,7 +5761,8 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
if (!Base)
return false;
- ExprResult ConvertedBase = PerformMemberExprBaseConversion(Base, IsArrow);
+ ExprResult ConvertedBase =
+ SemaRef.PerformMemberExprBaseConversion(Base, IsArrow);
if (ConvertedBase.isInvalid())
return false;
Base = ConvertedBase.get();
@@ -5759,7 +5785,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
}
if (RecordDecl *RD = getAsRecordDecl(BaseType)) {
- AddRecordMembersCompletionResults(*this, Results, S, BaseType, BaseKind,
+ AddRecordMembersCompletionResults(SemaRef, Results, S, BaseType, BaseKind,
RD, std::move(AccessOpFixIt));
} else if (const auto *TTPT =
dyn_cast<TemplateTypeParmType>(BaseType.getTypePtr())) {
@@ -5769,7 +5795,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
if (R.Operator != Operator)
continue;
CodeCompletionResult Result(
- R.render(*this, CodeCompleter->getAllocator(),
+ R.render(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo()));
if (AccessOpFixIt)
Result.FixIts.push_back(*AccessOpFixIt);
@@ -5789,14 +5815,14 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
// Add property results based on our interface.
assert(ObjCPtr && "Non-NULL pointer guaranteed above!");
AddObjCProperties(CCContext, ObjCPtr->getInterfaceDecl(), true,
- /*AllowNullaryMethods=*/true, CurContext,
+ /*AllowNullaryMethods=*/true, SemaRef.CurContext,
AddedProperties, Results, IsBaseExprStatement);
}
// Add properties from the protocols in a qualified interface.
for (auto *I : BaseType->castAs<ObjCObjectPointerType>()->quals())
AddObjCProperties(CCContext, I, true, /*AllowNullaryMethods=*/true,
- CurContext, AddedProperties, Results,
+ SemaRef.CurContext, AddedProperties, Results,
IsBaseExprStatement, /*IsClassProperty*/ false,
/*InOriginalClass*/ false);
} else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
@@ -5818,9 +5844,10 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
if (Class) {
CodeCompletionDeclConsumer Consumer(Results, Class, BaseType);
Results.setFilter(&ResultBuilder::IsObjCIvar);
- LookupVisibleDecls(
- Class, LookupMemberName, Consumer, CodeCompleter->includeGlobals(),
- /*IncludeDependentBases=*/false, CodeCompleter->loadExternal());
+ SemaRef.LookupVisibleDecls(Class, Sema::LookupMemberName, Consumer,
+ CodeCompleter->includeGlobals(),
+ /*IncludeDependentBases=*/false,
+ CodeCompleter->loadExternal());
}
}
@@ -5845,35 +5872,37 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
return;
// Hand off the results found for code completion.
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCClassPropertyRefExpr(Scope *S,
- IdentifierInfo &ClassName,
- SourceLocation ClassNameLoc,
- bool IsBaseExprStatement) {
- IdentifierInfo *ClassNamePtr = &ClassName;
- ObjCInterfaceDecl *IFace = getObjCInterfaceDecl(ClassNamePtr, ClassNameLoc);
+void SemaCodeCompletion::CodeCompleteObjCClassPropertyRefExpr(
+ Scope *S, const IdentifierInfo &ClassName, SourceLocation ClassNameLoc,
+ bool IsBaseExprStatement) {
+ const IdentifierInfo *ClassNamePtr = &ClassName;
+ ObjCInterfaceDecl *IFace =
+ SemaRef.ObjC().getObjCInterfaceDecl(ClassNamePtr, ClassNameLoc);
if (!IFace)
return;
CodeCompletionContext CCContext(
CodeCompletionContext::CCC_ObjCPropertyAccess);
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), CCContext,
&ResultBuilder::IsMember);
Results.EnterNewScope();
AddedPropertiesSet AddedProperties;
AddObjCProperties(CCContext, IFace, true,
- /*AllowNullaryMethods=*/true, CurContext, AddedProperties,
- Results, IsBaseExprStatement,
+ /*AllowNullaryMethods=*/true, SemaRef.CurContext,
+ AddedProperties, Results, IsBaseExprStatement,
/*IsClassProperty=*/true);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
+void SemaCodeCompletion::CodeCompleteTag(Scope *S, unsigned TagSpec) {
if (!CodeCompleter)
return;
@@ -5902,26 +5931,27 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
llvm_unreachable("Unknown type specifier kind in CodeCompleteTag");
}
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), ContextKind);
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
// First pass: look for tags.
Results.setFilter(Filter);
- LookupVisibleDecls(S, LookupTagName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
+ SemaRef.LookupVisibleDecls(S, Sema::LookupTagName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
if (CodeCompleter->includeGlobals()) {
// Second pass: look for nested name specifiers.
Results.setFilter(&ResultBuilder::IsNestedNameSpecifier);
- LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
+ SemaRef.LookupVisibleDecls(S, Sema::LookupNestedNameSpecifierName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
}
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
static void AddTypeQualifierResults(DeclSpec &DS, ResultBuilder &Results,
@@ -5938,25 +5968,26 @@ static void AddTypeQualifierResults(DeclSpec &DS, ResultBuilder &Results,
Results.AddResult("__unaligned");
}
-void Sema::CodeCompleteTypeQualifiers(DeclSpec &DS) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteTypeQualifiers(DeclSpec &DS) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_TypeQualifiers);
Results.EnterNewScope();
- AddTypeQualifierResults(DS, Results, LangOpts);
+ AddTypeQualifierResults(DS, Results, getLangOpts());
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
- const VirtSpecifiers *VS) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteFunctionQualifiers(
+ DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_TypeQualifiers);
Results.EnterNewScope();
- AddTypeQualifierResults(DS, Results, LangOpts);
- if (LangOpts.CPlusPlus11) {
+ AddTypeQualifierResults(DS, Results, getLangOpts());
+ if (getLangOpts().CPlusPlus11) {
Results.AddResult("noexcept");
if (D.getContext() == DeclaratorContext::Member && !D.isCtorOrDtor() &&
!D.isStaticMember()) {
@@ -5967,19 +5998,21 @@ void Sema::CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
}
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteBracketDeclarator(Scope *S) {
+void SemaCodeCompletion::CodeCompleteBracketDeclarator(Scope *S) {
CodeCompleteExpression(S, QualType(getASTContext().getSizeType()));
}
-void Sema::CodeCompleteCase(Scope *S) {
- if (getCurFunction()->SwitchStack.empty() || !CodeCompleter)
+void SemaCodeCompletion::CodeCompleteCase(Scope *S) {
+ if (SemaRef.getCurFunction()->SwitchStack.empty() || !CodeCompleter)
return;
- SwitchStmt *Switch = getCurFunction()->SwitchStack.back().getPointer();
+ SwitchStmt *Switch =
+ SemaRef.getCurFunction()->SwitchStack.back().getPointer();
// Condition expression might be invalid, do not continue in this case.
if (!Switch->getCond())
return;
@@ -6036,16 +6069,18 @@ void Sema::CodeCompleteCase(Scope *S) {
}
// Add any enumerators that have not yet been mentioned.
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Expression);
- AddEnumerators(Results, Context, Enum, CurContext, Enumerators);
+ AddEnumerators(Results, getASTContext(), Enum, SemaRef.CurContext,
+ Enumerators);
if (CodeCompleter->includeMacros()) {
- AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
+ AddMacroResults(SemaRef.PP, Results, CodeCompleter->loadExternal(), false);
}
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
static bool anyNullArguments(ArrayRef<Expr *> Args) {
@@ -6126,7 +6161,7 @@ ProduceSignatureHelp(Sema &SemaRef, MutableArrayRef<ResultCandidate> Candidates,
if (Candidates.empty())
return QualType();
if (SemaRef.getPreprocessor().isCodeCompletionReached())
- SemaRef.CodeCompleter->ProcessOverloadCandidates(
+ SemaRef.CodeCompletion().CodeCompleter->ProcessOverloadCandidates(
SemaRef, CurrentArg, Candidates.data(), Candidates.size(), OpenParLoc,
Braced);
return getParamType(SemaRef, Candidates, CurrentArg);
@@ -6137,6 +6172,7 @@ ProduceSignatureHelp(Sema &SemaRef, MutableArrayRef<ResultCandidate> Candidates,
// so that we can recover argument names from it.
static FunctionProtoTypeLoc GetPrototypeLoc(Expr *Fn) {
TypeLoc Target;
+
if (const auto *T = Fn->getType().getTypePtr()->getAs<TypedefType>()) {
Target = T->getDecl()->getTypeSourceInfo()->getTypeLoc();
@@ -6145,6 +6181,11 @@ static FunctionProtoTypeLoc GetPrototypeLoc(Expr *Fn) {
if (const auto *const VD = dyn_cast<VarDecl>(D)) {
Target = VD->getTypeSourceInfo()->getTypeLoc();
}
+ } else if (const auto *ME = dyn_cast<MemberExpr>(Fn)) {
+ const auto *MD = ME->getMemberDecl();
+ if (const auto *FD = dyn_cast<FieldDecl>(MD)) {
+ Target = FD->getTypeSourceInfo()->getTypeLoc();
+ }
}
if (!Target)
@@ -6174,8 +6215,9 @@ static FunctionProtoTypeLoc GetPrototypeLoc(Expr *Fn) {
return {};
}
-QualType Sema::ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
- SourceLocation OpenParLoc) {
+QualType
+SemaCodeCompletion::ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
+ SourceLocation OpenParLoc) {
Fn = unwrapParenList(Fn);
if (!CodeCompleter || !Fn)
return QualType();
@@ -6198,8 +6240,9 @@ QualType Sema::ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
if (auto ULE = dyn_cast<UnresolvedLookupExpr>(NakedFn)) {
- AddOverloadedCallCandidates(ULE, ArgsWithoutDependentTypes, CandidateSet,
- /*PartialOverloading=*/true);
+ SemaRef.AddOverloadedCallCandidates(ULE, ArgsWithoutDependentTypes,
+ CandidateSet,
+ /*PartialOverloading=*/true);
} else if (auto UME = dyn_cast<UnresolvedMemberExpr>(NakedFn)) {
TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr;
if (UME->hasExplicitTemplateArgs()) {
@@ -6215,9 +6258,10 @@ QualType Sema::ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
UnresolvedSet<8> Decls;
Decls.append(UME->decls_begin(), UME->decls_end());
const bool FirstArgumentIsBase = !UME->isImplicitAccess() && UME->getBase();
- AddFunctionCandidates(Decls, ArgExprs, CandidateSet, TemplateArgs,
- /*SuppressUserConversions=*/false,
- /*PartialOverloading=*/true, FirstArgumentIsBase);
+ SemaRef.AddFunctionCandidates(Decls, ArgExprs, CandidateSet, TemplateArgs,
+ /*SuppressUserConversions=*/false,
+ /*PartialOverloading=*/true,
+ FirstArgumentIsBase);
} else {
FunctionDecl *FD = nullptr;
if (auto *MCE = dyn_cast<MemberExpr>(NakedFn))
@@ -6229,28 +6273,30 @@ QualType Sema::ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
!FD->getType()->getAs<FunctionProtoType>())
Results.push_back(ResultCandidate(FD));
else
- AddOverloadCandidate(FD, DeclAccessPair::make(FD, FD->getAccess()),
- ArgsWithoutDependentTypes, CandidateSet,
- /*SuppressUserConversions=*/false,
- /*PartialOverloading=*/true);
+ SemaRef.AddOverloadCandidate(FD,
+ DeclAccessPair::make(FD, FD->getAccess()),
+ ArgsWithoutDependentTypes, CandidateSet,
+ /*SuppressUserConversions=*/false,
+ /*PartialOverloading=*/true);
} else if (auto DC = NakedFn->getType()->getAsCXXRecordDecl()) {
// If expression's type is CXXRecordDecl, it may overload the function
// call operator, so we check if it does and add them as candidates.
// A complete type is needed to lookup for member function call operators.
- if (isCompleteType(Loc, NakedFn->getType())) {
+ if (SemaRef.isCompleteType(Loc, NakedFn->getType())) {
DeclarationName OpName =
- Context.DeclarationNames.getCXXOperatorName(OO_Call);
- LookupResult R(*this, OpName, Loc, LookupOrdinaryName);
- LookupQualifiedName(R, DC);
+ getASTContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ LookupResult R(SemaRef, OpName, Loc, Sema::LookupOrdinaryName);
+ SemaRef.LookupQualifiedName(R, DC);
R.suppressDiagnostics();
SmallVector<Expr *, 12> ArgExprs(1, NakedFn);
ArgExprs.append(ArgsWithoutDependentTypes.begin(),
ArgsWithoutDependentTypes.end());
- AddFunctionCandidates(R.asUnresolvedSet(), ArgExprs, CandidateSet,
- /*ExplicitArgs=*/nullptr,
- /*SuppressUserConversions=*/false,
- /*PartialOverloading=*/true);
+ SemaRef.AddFunctionCandidates(R.asUnresolvedSet(), ArgExprs,
+ CandidateSet,
+ /*ExplicitArgs=*/nullptr,
+ /*SuppressUserConversions=*/false,
+ /*PartialOverloading=*/true);
}
} else {
// Lastly we check whether expression's type is function pointer or
@@ -6262,9 +6308,9 @@ QualType Sema::ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
T = T->getPointeeType();
if (auto FP = T->getAs<FunctionProtoType>()) {
- if (!TooManyArguments(FP->getNumParams(),
- ArgsWithoutDependentTypes.size(),
- /*PartialOverloading=*/true) ||
+ if (!SemaRef.TooManyArguments(FP->getNumParams(),
+ ArgsWithoutDependentTypes.size(),
+ /*PartialOverloading=*/true) ||
FP->isVariadic()) {
if (P) {
Results.push_back(ResultCandidate(P));
@@ -6277,8 +6323,8 @@ QualType Sema::ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
Results.push_back(ResultCandidate(FT));
}
}
- mergeCandidatesWithResults(*this, Results, CandidateSet, Loc, Args.size());
- QualType ParamType = ProduceSignatureHelp(*this, Results, Args.size(),
+ mergeCandidatesWithResults(SemaRef, Results, CandidateSet, Loc, Args.size());
+ QualType ParamType = ProduceSignatureHelp(SemaRef, Results, Args.size(),
OpenParLoc, /*Braced=*/false);
return !CandidateSet.empty() ? ParamType : QualType();
}
@@ -6350,18 +6396,16 @@ getNextAggregateIndexAfterDesignatedInit(const ResultCandidate &Aggregate,
return DesignatedIndex + ArgsAfterDesignator + 1;
}
-QualType Sema::ProduceConstructorSignatureHelp(QualType Type,
- SourceLocation Loc,
- ArrayRef<Expr *> Args,
- SourceLocation OpenParLoc,
- bool Braced) {
+QualType SemaCodeCompletion::ProduceConstructorSignatureHelp(
+ QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args,
+ SourceLocation OpenParLoc, bool Braced) {
if (!CodeCompleter)
return QualType();
SmallVector<ResultCandidate, 8> Results;
// A complete type is needed to lookup for constructors.
RecordDecl *RD =
- isCompleteType(Loc, Type) ? Type->getAsRecordDecl() : nullptr;
+ SemaRef.isCompleteType(Loc, Type) ? Type->getAsRecordDecl() : nullptr;
if (!RD)
return Type;
CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD);
@@ -6373,7 +6417,7 @@ QualType Sema::ProduceConstructorSignatureHelp(QualType Type,
// FIXME: it would be nice to support "unwrapping" aggregates that contain
// a single subaggregate, like std::array<T, N> -> T __elements[N].
if (Braced && !RD->isUnion() &&
- (!LangOpts.CPlusPlus || (CRD && CRD->isAggregate()))) {
+ (!getLangOpts().CPlusPlus || (CRD && CRD->isAggregate()))) {
ResultCandidate AggregateSig(RD);
unsigned AggregateSize = AggregateSig.getNumParams();
@@ -6383,7 +6427,7 @@ QualType Sema::ProduceConstructorSignatureHelp(QualType Type,
if (*NextIndex >= AggregateSize)
return Type;
Results.push_back(AggregateSig);
- return ProduceSignatureHelp(*this, Results, *NextIndex, OpenParLoc,
+ return ProduceSignatureHelp(SemaRef, Results, *NextIndex, OpenParLoc,
Braced);
}
@@ -6397,36 +6441,39 @@ QualType Sema::ProduceConstructorSignatureHelp(QualType Type,
if (CRD) {
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
- for (NamedDecl *C : LookupConstructors(CRD)) {
+ for (NamedDecl *C : SemaRef.LookupConstructors(CRD)) {
if (auto *FD = dyn_cast<FunctionDecl>(C)) {
// FIXME: we can't yet provide correct signature help for initializer
// list constructors, so skip them entirely.
- if (Braced && LangOpts.CPlusPlus && isInitListConstructor(FD))
+ if (Braced && getLangOpts().CPlusPlus &&
+ SemaRef.isInitListConstructor(FD))
continue;
- AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()), Args,
- CandidateSet,
- /*SuppressUserConversions=*/false,
- /*PartialOverloading=*/true,
- /*AllowExplicit*/ true);
+ SemaRef.AddOverloadCandidate(
+ FD, DeclAccessPair::make(FD, C->getAccess()), Args, CandidateSet,
+ /*SuppressUserConversions=*/false,
+ /*PartialOverloading=*/true,
+ /*AllowExplicit*/ true);
} else if (auto *FTD = dyn_cast<FunctionTemplateDecl>(C)) {
- if (Braced && LangOpts.CPlusPlus &&
- isInitListConstructor(FTD->getTemplatedDecl()))
+ if (Braced && getLangOpts().CPlusPlus &&
+ SemaRef.isInitListConstructor(FTD->getTemplatedDecl()))
continue;
- AddTemplateOverloadCandidate(
+ SemaRef.AddTemplateOverloadCandidate(
FTD, DeclAccessPair::make(FTD, C->getAccess()),
/*ExplicitTemplateArgs=*/nullptr, Args, CandidateSet,
/*SuppressUserConversions=*/false,
/*PartialOverloading=*/true);
}
}
- mergeCandidatesWithResults(*this, Results, CandidateSet, Loc, Args.size());
+ mergeCandidatesWithResults(SemaRef, Results, CandidateSet, Loc,
+ Args.size());
}
- return ProduceSignatureHelp(*this, Results, Args.size(), OpenParLoc, Braced);
+ return ProduceSignatureHelp(SemaRef, Results, Args.size(), OpenParLoc,
+ Braced);
}
-QualType Sema::ProduceCtorInitMemberSignatureHelp(
+QualType SemaCodeCompletion::ProduceCtorInitMemberSignatureHelp(
Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc,
bool Braced) {
@@ -6438,7 +6485,7 @@ QualType Sema::ProduceCtorInitMemberSignatureHelp(
if (!Constructor)
return QualType();
// FIXME: Add support for Base class constructors as well.
- if (ValueDecl *MemberDecl = tryLookupCtorInitMemberDecl(
+ if (ValueDecl *MemberDecl = SemaRef.tryLookupCtorInitMemberDecl(
Constructor->getParent(), SS, TemplateTypeTy, II))
return ProduceConstructorSignatureHelp(MemberDecl->getType(),
MemberDecl->getLocation(), ArgExprs,
@@ -6468,7 +6515,7 @@ static bool argMatchesTemplateParams(const ParsedTemplateArgument &Arg,
llvm_unreachable("Unhandled switch case");
}
-QualType Sema::ProduceTemplateArgumentSignatureHelp(
+QualType SemaCodeCompletion::ProduceTemplateArgumentSignatureHelp(
TemplateTy ParsedTemplate, ArrayRef<ParsedTemplateArgument> Args,
SourceLocation LAngleLoc) {
if (!CodeCompleter || !ParsedTemplate)
@@ -6496,7 +6543,7 @@ QualType Sema::ProduceTemplateArgumentSignatureHelp(
if (const auto *TD = llvm::dyn_cast<TemplateDecl>(ND))
Consider(TD);
}
- return ProduceSignatureHelp(*this, Results, Args.size(), LAngleLoc,
+ return ProduceSignatureHelp(SemaRef, Results, Args.size(), LAngleLoc,
/*Braced=*/false);
}
@@ -6525,9 +6572,8 @@ static QualType getDesignatedType(QualType BaseType, const Designation &Desig) {
return BaseType;
}
-void Sema::CodeCompleteDesignator(QualType BaseType,
- llvm::ArrayRef<Expr *> InitExprs,
- const Designation &D) {
+void SemaCodeCompletion::CodeCompleteDesignator(
+ QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D) {
BaseType = getDesignatedType(BaseType, D);
if (BaseType.isNull())
return;
@@ -6537,7 +6583,7 @@ void Sema::CodeCompleteDesignator(QualType BaseType,
CodeCompletionContext CCC(CodeCompletionContext::CCC_DotMemberAccess,
BaseType);
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), CCC);
Results.EnterNewScope();
@@ -6553,14 +6599,15 @@ void Sema::CodeCompleteDesignator(QualType BaseType,
// FIXME: Make use of previous designators to mark any fields before those
// inaccessible, and also compute the next initializer priority.
ResultBuilder::Result Result(FD, Results.getBasePriority(FD));
- Results.AddResult(Result, CurContext, /*Hiding=*/nullptr);
+ Results.AddResult(Result, SemaRef.CurContext, /*Hiding=*/nullptr);
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
+void SemaCodeCompletion::CodeCompleteInitializer(Scope *S, Decl *D) {
ValueDecl *VD = dyn_cast_or_null<ValueDecl>(D);
if (!VD) {
CodeCompleteOrdinaryName(S, PCC_Expression);
@@ -6575,19 +6622,19 @@ void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
CodeCompleteExpression(S, Data);
}
-void Sema::CodeCompleteAfterIf(Scope *S, bool IsBracedThen) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteAfterIf(Scope *S, bool IsBracedThen) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- mapCodeCompletionContext(*this, PCC_Statement));
+ mapCodeCompletionContext(SemaRef, PCC_Statement));
Results.setFilter(&ResultBuilder::IsOrdinaryName);
Results.EnterNewScope();
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
- LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
+ SemaRef.LookupVisibleDecls(S, Sema::LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
- AddOrdinaryNameResults(PCC_Statement, S, *this, Results);
+ AddOrdinaryNameResults(PCC_Statement, S, SemaRef, Results);
// "else" block
CodeCompletionBuilder Builder(Results.getAllocator(),
@@ -6633,16 +6680,18 @@ void Sema::CodeCompleteAfterIf(Scope *S, bool IsBracedThen) {
AddPrettyFunctionResults(getLangOpts(), Results);
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
+ AddMacroResults(SemaRef.PP, Results, CodeCompleter->loadExternal(), false);
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
- bool EnteringContext,
- bool IsUsingDeclaration, QualType BaseType,
- QualType PreferredType) {
+void SemaCodeCompletion::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
+ bool EnteringContext,
+ bool IsUsingDeclaration,
+ QualType BaseType,
+ QualType PreferredType) {
if (SS.isEmpty() || !CodeCompleter)
return;
@@ -6658,34 +6707,34 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// As SS is invalid, we try to collect accessible contexts from the current
// scope with a dummy lookup so that the completion consumer can try to
// guess what the specified scope is.
- ResultBuilder DummyResults(*this, CodeCompleter->getAllocator(),
+ ResultBuilder DummyResults(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), CC);
if (!PreferredType.isNull())
DummyResults.setPreferredType(PreferredType);
if (S->getEntity()) {
CodeCompletionDeclConsumer Consumer(DummyResults, S->getEntity(),
BaseType);
- LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- /*IncludeGlobalScope=*/false,
- /*LoadExternal=*/false);
+ SemaRef.LookupVisibleDecls(S, Sema::LookupOrdinaryName, Consumer,
+ /*IncludeGlobalScope=*/false,
+ /*LoadExternal=*/false);
}
- HandleCodeCompleteResults(this, CodeCompleter,
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
DummyResults.getCompletionContext(), nullptr, 0);
return;
}
// Always pretend to enter a context to ensure that a dependent type
// resolves to a dependent record.
- DeclContext *Ctx = computeDeclContext(SS, /*EnteringContext=*/true);
+ DeclContext *Ctx = SemaRef.computeDeclContext(SS, /*EnteringContext=*/true);
// Try to instantiate any non-dependent declaration contexts before
// we look in them. Bail out if we fail.
NestedNameSpecifier *NNS = SS.getScopeRep();
if (NNS != nullptr && SS.isValid() && !NNS->isDependent()) {
- if (Ctx == nullptr || RequireCompleteDeclContext(SS, Ctx))
+ if (Ctx == nullptr || SemaRef.RequireCompleteDeclContext(SS, Ctx))
return;
}
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), CC);
if (!PreferredType.isNull())
Results.setPreferredType(PreferredType);
@@ -6699,14 +6748,16 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// If the scope is a concept-constrained type parameter, infer nested
// members based on the constraints.
- if (const auto *TTPT =
- dyn_cast_or_null<TemplateTypeParmType>(NNS->getAsType())) {
- for (const auto &R : ConceptInfo(*TTPT, S).members()) {
- if (R.Operator != ConceptInfo::Member::Colons)
- continue;
- Results.AddResult(CodeCompletionResult(
- R.render(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo())));
+ if (NNS) {
+ if (const auto *TTPT =
+ dyn_cast_or_null<TemplateTypeParmType>(NNS->getAsType())) {
+ for (const auto &R : ConceptInfo(*TTPT, S).members()) {
+ if (R.Operator != ConceptInfo::Member::Colons)
+ continue;
+ Results.AddResult(CodeCompletionResult(
+ R.render(SemaRef, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo())));
+ }
}
}
@@ -6716,23 +6767,24 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// in a context that permits expressions. This is a general issue with
// qualified-id completions.
if (Ctx && !EnteringContext)
- MaybeAddOverrideCalls(*this, Ctx, Results);
+ MaybeAddOverrideCalls(SemaRef, Ctx, Results);
Results.ExitScope();
if (Ctx &&
(CodeCompleter->includeNamespaceLevelDecls() || !Ctx->isFileContext())) {
CodeCompletionDeclConsumer Consumer(Results, Ctx, BaseType);
- LookupVisibleDecls(Ctx, LookupOrdinaryName, Consumer,
- /*IncludeGlobalScope=*/true,
- /*IncludeDependentBases=*/true,
- CodeCompleter->loadExternal());
+ SemaRef.LookupVisibleDecls(Ctx, Sema::LookupOrdinaryName, Consumer,
+ /*IncludeGlobalScope=*/true,
+ /*IncludeDependentBases=*/true,
+ CodeCompleter->loadExternal());
}
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteUsing(Scope *S) {
+void SemaCodeCompletion::CodeCompleteUsing(Scope *S) {
if (!CodeCompleter)
return;
@@ -6741,7 +6793,7 @@ void Sema::CodeCompleteUsing(Scope *S) {
CodeCompletionContext Context(CodeCompletionContext::CCC_SymbolOrNewName);
Context.setIsUsingDeclaration(true);
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), Context,
&ResultBuilder::IsNestedNameSpecifier);
Results.EnterNewScope();
@@ -6752,48 +6804,50 @@ void Sema::CodeCompleteUsing(Scope *S) {
// After "using", we can see anything that would start a
// nested-name-specifier.
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
- LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
+ SemaRef.LookupVisibleDecls(S, Sema::LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteUsingDirective(Scope *S) {
+void SemaCodeCompletion::CodeCompleteUsingDirective(Scope *S) {
if (!CodeCompleter)
return;
// After "using namespace", we expect to see a namespace name or namespace
// alias.
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Namespace,
&ResultBuilder::IsNamespaceOrAlias);
Results.EnterNewScope();
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
- LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
+ SemaRef.LookupVisibleDecls(S, Sema::LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteNamespaceDecl(Scope *S) {
+void SemaCodeCompletion::CodeCompleteNamespaceDecl(Scope *S) {
if (!CodeCompleter)
return;
DeclContext *Ctx = S->getEntity();
if (!S->getParent())
- Ctx = Context.getTranslationUnitDecl();
+ Ctx = getASTContext().getTranslationUnitDecl();
bool SuppressedGlobalResults =
Ctx && !CodeCompleter->includeGlobals() && isa<TranslationUnitDecl>(Ctx);
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
SuppressedGlobalResults
? CodeCompletionContext::CCC_Namespace
@@ -6810,7 +6864,7 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
NS(Ctx->decls_begin()),
NSEnd(Ctx->decls_end());
NS != NSEnd; ++NS)
- OrigToLatest[NS->getOriginalNamespace()] = *NS;
+ OrigToLatest[NS->getFirstDecl()] = *NS;
// Add the most recent definition (or extended definition) of each
// namespace to the list of results.
@@ -6822,37 +6876,39 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
Results.AddResult(
CodeCompletionResult(NS->second, Results.getBasePriority(NS->second),
nullptr),
- CurContext, nullptr, false);
+ SemaRef.CurContext, nullptr, false);
Results.ExitScope();
}
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
+void SemaCodeCompletion::CodeCompleteNamespaceAliasDecl(Scope *S) {
if (!CodeCompleter)
return;
// After "namespace", we expect to see a namespace or alias.
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Namespace,
&ResultBuilder::IsNamespaceOrAlias);
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
- LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
+ SemaRef.LookupVisibleDecls(S, Sema::LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteOperatorName(Scope *S) {
+void SemaCodeCompletion::CodeCompleteOperatorName(Scope *S) {
if (!CodeCompleter)
return;
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Type,
&ResultBuilder::IsType);
@@ -6867,31 +6923,32 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
// Add any type names visible from the current scope
Results.allowNestedNameSpecifiers();
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
- LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
+ SemaRef.LookupVisibleDecls(S, Sema::LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
// Add any type specifiers
AddTypeSpecifierResults(getLangOpts(), Results);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteConstructorInitializer(
+void SemaCodeCompletion::CodeCompleteConstructorInitializer(
Decl *ConstructorD, ArrayRef<CXXCtorInitializer *> Initializers) {
if (!ConstructorD)
return;
- AdjustDeclIfTemplate(ConstructorD);
+ SemaRef.AdjustDeclIfTemplate(ConstructorD);
auto *Constructor = dyn_cast<CXXConstructorDecl>(ConstructorD);
if (!Constructor)
return;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Symbol);
Results.EnterNewScope();
@@ -6901,7 +6958,7 @@ void Sema::CodeCompleteConstructorInitializer(
llvm::SmallPtrSet<CanQualType, 4> InitializedBases;
for (unsigned I = 0, E = Initializers.size(); I != E; ++I) {
if (Initializers[I]->isBaseInitializer())
- InitializedBases.insert(Context.getCanonicalType(
+ InitializedBases.insert(getASTContext().getCanonicalType(
QualType(Initializers[I]->getBaseClass(), 0)));
else
InitializedFields.insert(
@@ -6909,7 +6966,7 @@ void Sema::CodeCompleteConstructorInitializer(
}
// Add completions for base classes.
- PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
+ PrintingPolicy Policy = getCompletionPrintingPolicy(SemaRef);
bool SawLastInitializer = Initializers.empty();
CXXRecordDecl *ClassDecl = Constructor->getParent();
@@ -6919,10 +6976,10 @@ void Sema::CodeCompleteConstructorInitializer(
Builder.AddTypedTextChunk(Name);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (const auto *Function = dyn_cast<FunctionDecl>(ND))
- AddFunctionParameterChunks(PP, Policy, Function, Builder);
+ AddFunctionParameterChunks(SemaRef.PP, Policy, Function, Builder);
else if (const auto *FunTemplDecl = dyn_cast<FunctionTemplateDecl>(ND))
- AddFunctionParameterChunks(PP, Policy, FunTemplDecl->getTemplatedDecl(),
- Builder);
+ AddFunctionParameterChunks(SemaRef.PP, Policy,
+ FunTemplDecl->getTemplatedDecl(), Builder);
Builder.AddChunk(CodeCompletionString::CK_RightParen);
return Builder.TakeString();
};
@@ -6954,7 +7011,7 @@ void Sema::CodeCompleteConstructorInitializer(
FD->getType().getAsString(Policy))
: Name,
FD);
- auto Ctors = getConstructors(Context, RD);
+ auto Ctors = getConstructors(getASTContext(), RD);
if (Ctors.begin() == Ctors.end())
return AddDefaultCtorInit(Name, Name, RD);
for (const NamedDecl *Ctor : Ctors) {
@@ -6981,11 +7038,12 @@ void Sema::CodeCompleteConstructorInitializer(
};
for (const auto &Base : ClassDecl->bases()) {
- if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
+ if (!InitializedBases
+ .insert(getASTContext().getCanonicalType(Base.getType()))
.second) {
SawLastInitializer =
!Initializers.empty() && Initializers.back()->isBaseInitializer() &&
- Context.hasSameUnqualifiedType(
+ getASTContext().hasSameUnqualifiedType(
Base.getType(), QualType(Initializers.back()->getBaseClass(), 0));
continue;
}
@@ -6996,11 +7054,12 @@ void Sema::CodeCompleteConstructorInitializer(
// Add completions for virtual base classes.
for (const auto &Base : ClassDecl->vbases()) {
- if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
+ if (!InitializedBases
+ .insert(getASTContext().getCanonicalType(Base.getType()))
.second) {
SawLastInitializer =
!Initializers.empty() && Initializers.back()->isBaseInitializer() &&
- Context.hasSameUnqualifiedType(
+ getASTContext().hasSameUnqualifiedType(
Base.getType(), QualType(Initializers.back()->getBaseClass(), 0));
continue;
}
@@ -7027,8 +7086,9 @@ void Sema::CodeCompleteConstructorInitializer(
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
/// Determine whether this scope denotes a namespace.
@@ -7040,9 +7100,10 @@ static bool isNamespaceScope(Scope *S) {
return DC->isFileContext();
}
-void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
- bool AfterAmpersand) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteLambdaIntroducer(Scope *S,
+ LambdaIntroducer &Intro,
+ bool AfterAmpersand) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
@@ -7068,24 +7129,25 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
if (Known.insert(Var->getIdentifier()).second)
Results.AddResult(CodeCompletionResult(Var, CCP_LocalDeclaration),
- CurContext, nullptr, false);
+ SemaRef.CurContext, nullptr, false);
}
}
// Add 'this', if it would be valid.
if (!IncludedThis && !AfterAmpersand && Intro.Default != LCD_ByCopy)
- addThisCompletion(*this, Results);
+ addThisCompletion(SemaRef, Results);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteAfterFunctionEquals(Declarator &D) {
- if (!LangOpts.CPlusPlus11)
+void SemaCodeCompletion::CodeCompleteAfterFunctionEquals(Declarator &D) {
+ if (!getLangOpts().CPlusPlus11)
return;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
auto ShouldAddDefault = [&D, this]() {
@@ -7105,7 +7167,7 @@ void Sema::CodeCompleteAfterFunctionEquals(Declarator &D) {
// verify that it is the copy or move assignment?
if (Op == OverloadedOperatorKind::OO_Equal)
return true;
- if (LangOpts.CPlusPlus20 &&
+ if (getLangOpts().CPlusPlus20 &&
(Op == OverloadedOperatorKind::OO_EqualEqual ||
Op == OverloadedOperatorKind::OO_ExclaimEqual ||
Op == OverloadedOperatorKind::OO_Less ||
@@ -7125,8 +7187,9 @@ void Sema::CodeCompleteAfterFunctionEquals(Declarator &D) {
// first function declaration.
Results.AddResult("delete");
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
/// Macro that optionally prepends an "@" to the string literal passed in via
@@ -7226,20 +7289,21 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
}
}
-void Sema::CodeCompleteObjCAtDirective(Scope *S) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteObjCAtDirective(Scope *S) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
- if (isa<ObjCImplDecl>(CurContext))
+ if (isa<ObjCImplDecl>(SemaRef.CurContext))
AddObjCImplementationResults(getLangOpts(), Results, false);
- else if (CurContext->isObjCContainer())
+ else if (SemaRef.CurContext->isObjCContainer())
AddObjCInterfaceResults(getLangOpts(), Results, false);
else
AddObjCTopLevelResults(Results, false);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
@@ -7363,38 +7427,41 @@ static void AddObjCVisibilityResults(const LangOptions &LangOpts,
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "package")));
}
-void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteObjCAtVisibility(Scope *S) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
AddObjCVisibilityResults(getLangOpts(), Results, false);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCAtStatement(Scope *S) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteObjCAtStatement(Scope *S) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
AddObjCStatementResults(Results, false);
AddObjCExpressionResults(Results, false);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCAtExpression(Scope *S) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteObjCAtExpression(Scope *S) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
AddObjCExpressionResults(Results, false);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
/// Determine whether the addition of the given flag to an Objective-C
@@ -7430,13 +7497,14 @@ static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
return false;
}
-void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
+void SemaCodeCompletion::CodeCompleteObjCPropertyFlags(Scope *S,
+ ObjCDeclSpec &ODS) {
if (!CodeCompleter)
return;
unsigned Attributes = ODS.getPropertyAttributes();
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
@@ -7499,8 +7567,9 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
Results.AddResult(CodeCompletionResult("null_resettable"));
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
/// Describes the kind of Objective-C method that we want to find
@@ -7512,7 +7581,7 @@ enum ObjCMethodKind {
};
static bool isAcceptableObjCSelector(Selector Sel, ObjCMethodKind WantKind,
- ArrayRef<IdentifierInfo *> SelIdents,
+ ArrayRef<const IdentifierInfo *> SelIdents,
bool AllowSameLength = true) {
unsigned NumSelIdents = SelIdents.size();
if (NumSelIdents > Sel.getNumArgs())
@@ -7539,7 +7608,7 @@ static bool isAcceptableObjCSelector(Selector Sel, ObjCMethodKind WantKind,
static bool isAcceptableObjCMethod(ObjCMethodDecl *Method,
ObjCMethodKind WantKind,
- ArrayRef<IdentifierInfo *> SelIdents,
+ ArrayRef<const IdentifierInfo *> SelIdents,
bool AllowSameLength = true) {
return isAcceptableObjCSelector(Method->getSelector(), WantKind, SelIdents,
AllowSameLength);
@@ -7571,7 +7640,7 @@ typedef llvm::SmallPtrSet<Selector, 16> VisitedSelectorSet;
/// \param Results the structure into which we'll add results.
static void AddObjCMethods(ObjCContainerDecl *Container,
bool WantInstanceMethods, ObjCMethodKind WantKind,
- ArrayRef<IdentifierInfo *> SelIdents,
+ ArrayRef<const IdentifierInfo *> SelIdents,
DeclContext *CurContext,
VisitedSelectorSet &Selectors, bool AllowSameLength,
ResultBuilder &Results, bool InOriginalClass = true,
@@ -7659,12 +7728,13 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
IsRootClass);
}
-void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
+void SemaCodeCompletion::CodeCompleteObjCPropertyGetter(Scope *S) {
// Try to find the interface where getters might live.
- ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
+ ObjCInterfaceDecl *Class =
+ dyn_cast_or_null<ObjCInterfaceDecl>(SemaRef.CurContext);
if (!Class) {
if (ObjCCategoryDecl *Category =
- dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
+ dyn_cast_or_null<ObjCCategoryDecl>(SemaRef.CurContext))
Class = Category->getClassInterface();
if (!Class)
@@ -7672,26 +7742,28 @@ void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
}
// Find all of the potential getters.
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
VisitedSelectorSet Selectors;
- AddObjCMethods(Class, true, MK_ZeroArgSelector, std::nullopt, CurContext,
- Selectors,
+ AddObjCMethods(Class, true, MK_ZeroArgSelector, std::nullopt,
+ SemaRef.CurContext, Selectors,
/*AllowSameLength=*/true, Results);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
+void SemaCodeCompletion::CodeCompleteObjCPropertySetter(Scope *S) {
// Try to find the interface where setters might live.
- ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
+ ObjCInterfaceDecl *Class =
+ dyn_cast_or_null<ObjCInterfaceDecl>(SemaRef.CurContext);
if (!Class) {
if (ObjCCategoryDecl *Category =
- dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
+ dyn_cast_or_null<ObjCCategoryDecl>(SemaRef.CurContext))
Class = Category->getClassInterface();
if (!Class)
@@ -7699,24 +7771,25 @@ void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
}
// Find all of the potential getters.
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
VisitedSelectorSet Selectors;
- AddObjCMethods(Class, true, MK_OneArgSelector, std::nullopt, CurContext,
- Selectors,
+ AddObjCMethods(Class, true, MK_OneArgSelector, std::nullopt,
+ SemaRef.CurContext, Selectors,
/*AllowSameLength=*/true, Results);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
- bool IsParameter) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
+ bool IsParameter) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Type);
Results.EnterNewScope();
@@ -7753,7 +7826,7 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
// an action, e.g.,
// IBAction)<#selector#>:(id)sender
if (DS.getObjCDeclQualifier() == 0 && !IsParameter &&
- PP.isMacroDefined("IBAction")) {
+ SemaRef.PP.isMacroDefined("IBAction")) {
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo(),
CCP_CodePattern, CXAvailability_Available);
@@ -7774,21 +7847,22 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
}
// Add various builtin type names and specifiers.
- AddOrdinaryNameResults(PCC_Type, S, *this, Results);
+ AddOrdinaryNameResults(PCC_Type, S, SemaRef, Results);
Results.ExitScope();
// Add the various type names
Results.setFilter(&ResultBuilder::IsOrdinaryNonValueName);
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
- LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
+ SemaRef.LookupVisibleDecls(S, Sema::LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
+ AddMacroResults(SemaRef.PP, Results, CodeCompleter->loadExternal(), false);
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
/// When we have an expression with type "id", we may assume
@@ -7804,7 +7878,7 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
if (Sel.isNull())
return nullptr;
- IdentifierInfo *Id = Sel.getIdentifierInfoForSlot(0);
+ const IdentifierInfo *Id = Sel.getIdentifierInfoForSlot(0);
if (!Id)
return nullptr;
@@ -7880,7 +7954,7 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
/// this "super" completion. If NULL, no completion was added.
static ObjCMethodDecl *
AddSuperSendCompletion(Sema &S, bool NeedSuperKeyword,
- ArrayRef<IdentifierInfo *> SelIdents,
+ ArrayRef<const IdentifierInfo *> SelIdents,
ResultBuilder &Results) {
ObjCMethodDecl *CurMethod = S.getCurMethodDecl();
if (!CurMethod)
@@ -7979,49 +8053,50 @@ AddSuperSendCompletion(Sema &S, bool NeedSuperKeyword,
return SuperMethod;
}
-void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
+void SemaCodeCompletion::CodeCompleteObjCMessageReceiver(Scope *S) {
typedef CodeCompletionResult Result;
ResultBuilder Results(
- *this, CodeCompleter->getAllocator(),
+ SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCMessageReceiver,
getLangOpts().CPlusPlus11
? &ResultBuilder::IsObjCMessageReceiverOrLambdaCapture
: &ResultBuilder::IsObjCMessageReceiver);
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
Results.EnterNewScope();
- LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals(),
- CodeCompleter->loadExternal());
+ SemaRef.LookupVisibleDecls(S, Sema::LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
// If we are in an Objective-C method inside a class that has a superclass,
// add "super" as an option.
- if (ObjCMethodDecl *Method = getCurMethodDecl())
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
if (ObjCInterfaceDecl *Iface = Method->getClassInterface())
if (Iface->getSuperClass()) {
Results.AddResult(Result("super"));
- AddSuperSendCompletion(*this, /*NeedSuperKeyword=*/true, std::nullopt,
+ AddSuperSendCompletion(SemaRef, /*NeedSuperKeyword=*/true, std::nullopt,
Results);
}
if (getLangOpts().CPlusPlus11)
- addThisCompletion(*this, Results);
+ addThisCompletion(SemaRef, Results);
Results.ExitScope();
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ AddMacroResults(SemaRef.PP, Results, CodeCompleter->loadExternal(), false);
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
- ArrayRef<IdentifierInfo *> SelIdents,
- bool AtArgumentExpression) {
+void SemaCodeCompletion::CodeCompleteObjCSuperMessage(
+ Scope *S, SourceLocation SuperLoc,
+ ArrayRef<const IdentifierInfo *> SelIdents, bool AtArgumentExpression) {
ObjCInterfaceDecl *CDecl = nullptr;
- if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
+ if (ObjCMethodDecl *CurMethod = SemaRef.getCurMethodDecl()) {
// Figure out which interface we're in.
CDecl = CurMethod->getClassInterface();
if (!CDecl)
@@ -8044,13 +8119,14 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
} else {
// "super" may be the name of a type or variable. Figure out which
// it is.
- IdentifierInfo *Super = getSuperIdentifier();
- NamedDecl *ND = LookupSingleName(S, Super, SuperLoc, LookupOrdinaryName);
+ const IdentifierInfo *Super = SemaRef.getSuperIdentifier();
+ NamedDecl *ND =
+ SemaRef.LookupSingleName(S, Super, SuperLoc, Sema::LookupOrdinaryName);
if ((CDecl = dyn_cast_or_null<ObjCInterfaceDecl>(ND))) {
// "super" names an interface. Use it.
} else if (TypeDecl *TD = dyn_cast_or_null<TypeDecl>(ND)) {
if (const ObjCObjectType *Iface =
- Context.getTypeDeclType(TD)->getAs<ObjCObjectType>())
+ getASTContext().getTypeDeclType(TD)->getAs<ObjCObjectType>())
CDecl = Iface->getInterface();
} else if (ND && isa<UnresolvedUsingTypenameDecl>(ND)) {
// "super" names an unresolved type; we can't be more specific.
@@ -8060,9 +8136,10 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
SourceLocation TemplateKWLoc;
UnqualifiedId id;
id.setIdentifier(Super, SuperLoc);
- ExprResult SuperExpr = ActOnIdExpression(S, SS, TemplateKWLoc, id,
- /*HasTrailingLParen=*/false,
- /*IsAddressOfOperand=*/false);
+ ExprResult SuperExpr =
+ SemaRef.ActOnIdExpression(S, SS, TemplateKWLoc, id,
+ /*HasTrailingLParen=*/false,
+ /*IsAddressOfOperand=*/false);
return CodeCompleteObjCInstanceMessage(S, (Expr *)SuperExpr.get(),
SelIdents, AtArgumentExpression);
}
@@ -8072,7 +8149,7 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ParsedType Receiver;
if (CDecl)
- Receiver = ParsedType::make(Context.getObjCInterfaceType(CDecl));
+ Receiver = ParsedType::make(getASTContext().getObjCInterfaceType(CDecl));
return CodeCompleteObjCClassMessage(S, Receiver, SelIdents,
AtArgumentExpression,
/*IsSuper=*/true);
@@ -8112,11 +8189,11 @@ static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results,
return PreferredType;
}
-static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
- ParsedType Receiver,
- ArrayRef<IdentifierInfo *> SelIdents,
- bool AtArgumentExpression, bool IsSuper,
- ResultBuilder &Results) {
+static void
+AddClassMessageCompletions(Sema &SemaRef, Scope *S, ParsedType Receiver,
+ ArrayRef<const IdentifierInfo *> SelIdents,
+ bool AtArgumentExpression, bool IsSuper,
+ ResultBuilder &Results) {
typedef CodeCompletionResult Result;
ObjCInterfaceDecl *CDecl = nullptr;
@@ -8160,15 +8237,16 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
N = SemaRef.getExternalSource()->GetNumExternalSelectors();
I != N; ++I) {
Selector Sel = SemaRef.getExternalSource()->GetExternalSelector(I);
- if (Sel.isNull() || SemaRef.MethodPool.count(Sel))
+ if (Sel.isNull() || SemaRef.ObjC().MethodPool.count(Sel))
continue;
- SemaRef.ReadMethodPool(Sel);
+ SemaRef.ObjC().ReadMethodPool(Sel);
}
}
- for (Sema::GlobalMethodPool::iterator M = SemaRef.MethodPool.begin(),
- MEnd = SemaRef.MethodPool.end();
+ for (SemaObjC::GlobalMethodPool::iterator
+ M = SemaRef.ObjC().MethodPool.begin(),
+ MEnd = SemaRef.ObjC().MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.second;
MethList && MethList->getMethod(); MethList = MethList->getNext()) {
@@ -8187,20 +8265,19 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
Results.ExitScope();
}
-void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
- ArrayRef<IdentifierInfo *> SelIdents,
- bool AtArgumentExpression,
- bool IsSuper) {
+void SemaCodeCompletion::CodeCompleteObjCClassMessage(
+ Scope *S, ParsedType Receiver, ArrayRef<const IdentifierInfo *> SelIdents,
+ bool AtArgumentExpression, bool IsSuper) {
- QualType T = this->GetTypeFromParser(Receiver);
+ QualType T = SemaRef.GetTypeFromParser(Receiver);
ResultBuilder Results(
- *this, CodeCompleter->getAllocator(),
+ SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext(CodeCompletionContext::CCC_ObjCClassMessage, T,
SelIdents));
- AddClassMessageCompletions(*this, S, Receiver, SelIdents,
+ AddClassMessageCompletions(SemaRef, S, Receiver, SelIdents,
AtArgumentExpression, IsSuper, Results);
// If we're actually at the argument expression (rather than prior to the
@@ -8218,22 +8295,23 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
return;
}
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
- ArrayRef<IdentifierInfo *> SelIdents,
- bool AtArgumentExpression,
- ObjCInterfaceDecl *Super) {
+void SemaCodeCompletion::CodeCompleteObjCInstanceMessage(
+ Scope *S, Expr *Receiver, ArrayRef<const IdentifierInfo *> SelIdents,
+ bool AtArgumentExpression, ObjCInterfaceDecl *Super) {
typedef CodeCompletionResult Result;
+ ASTContext &Context = getASTContext();
Expr *RecExpr = static_cast<Expr *>(Receiver);
// If necessary, apply function/array conversion to the receiver.
// C99 6.7.5.3p[7,8].
if (RecExpr) {
- ExprResult Conv = DefaultFunctionArrayLvalueConversion(RecExpr);
+ ExprResult Conv = SemaRef.DefaultFunctionArrayLvalueConversion(RecExpr);
if (Conv.isInvalid()) // conversion failed. bail.
return;
RecExpr = Conv.get();
@@ -8258,7 +8336,7 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
Context.getObjCObjectPointerType(Context.getObjCInterfaceType(IFace));
}
} else if (RecExpr && getLangOpts().CPlusPlus) {
- ExprResult Conv = PerformContextuallyConvertToObjCPointer(RecExpr);
+ ExprResult Conv = SemaRef.PerformContextuallyConvertToObjCPointer(RecExpr);
if (Conv.isUsable()) {
RecExpr = Conv.get();
ReceiverType = RecExpr->getType();
@@ -8267,7 +8345,7 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
// Build the set of methods we can see.
ResultBuilder Results(
- *this, CodeCompleter->getAllocator(),
+ SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext(CodeCompletionContext::CCC_ObjCInstanceMessage,
ReceiverType, SelIdents));
@@ -8278,13 +8356,13 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
// completion.
if (Super) {
if (ObjCMethodDecl *SuperMethod =
- AddSuperSendCompletion(*this, false, SelIdents, Results))
+ AddSuperSendCompletion(SemaRef, false, SelIdents, Results))
Results.Ignore(SuperMethod);
}
// If we're inside an Objective-C method definition, prefer its selector to
// others.
- if (ObjCMethodDecl *CurMethod = getCurMethodDecl())
+ if (ObjCMethodDecl *CurMethod = SemaRef.getCurMethodDecl())
Results.setPreferredSelector(CurMethod->getSelector());
// Keep track of the selectors we've already added.
@@ -8295,9 +8373,9 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
// class method.
if (ReceiverType->isObjCClassType() ||
ReceiverType->isObjCQualifiedClassType()) {
- if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
+ if (ObjCMethodDecl *CurMethod = SemaRef.getCurMethodDecl()) {
if (ObjCInterfaceDecl *ClassDecl = CurMethod->getClassInterface())
- AddObjCMethods(ClassDecl, false, MK_Any, SelIdents, CurContext,
+ AddObjCMethods(ClassDecl, false, MK_Any, SelIdents, SemaRef.CurContext,
Selectors, AtArgumentExpression, Results);
}
}
@@ -8306,7 +8384,7 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ReceiverType->getAsObjCQualifiedIdType()) {
// Search protocols for instance methods.
for (auto *I : QualID->quals())
- AddObjCMethods(I, true, MK_Any, SelIdents, CurContext, Selectors,
+ AddObjCMethods(I, true, MK_Any, SelIdents, SemaRef.CurContext, Selectors,
AtArgumentExpression, Results);
}
// Handle messages to a pointer to interface type.
@@ -8314,11 +8392,12 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ReceiverType->getAsObjCInterfacePointerType()) {
// Search the class, its superclasses, etc., for instance methods.
AddObjCMethods(IFacePtr->getInterfaceDecl(), true, MK_Any, SelIdents,
- CurContext, Selectors, AtArgumentExpression, Results);
+ SemaRef.CurContext, Selectors, AtArgumentExpression,
+ Results);
// Search protocols for instance methods.
for (auto *I : IFacePtr->quals())
- AddObjCMethods(I, true, MK_Any, SelIdents, CurContext, Selectors,
+ AddObjCMethods(I, true, MK_Any, SelIdents, SemaRef.CurContext, Selectors,
AtArgumentExpression, Results);
}
// Handle messages to "id".
@@ -8328,19 +8407,21 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
// If we have an external source, load the entire class method
// pool from the AST file.
- if (ExternalSource) {
- for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
+ if (SemaRef.ExternalSource) {
+ for (uint32_t I = 0,
+ N = SemaRef.ExternalSource->GetNumExternalSelectors();
I != N; ++I) {
- Selector Sel = ExternalSource->GetExternalSelector(I);
- if (Sel.isNull() || MethodPool.count(Sel))
+ Selector Sel = SemaRef.ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || SemaRef.ObjC().MethodPool.count(Sel))
continue;
- ReadMethodPool(Sel);
+ SemaRef.ObjC().ReadMethodPool(Sel);
}
}
- for (GlobalMethodPool::iterator M = MethodPool.begin(),
- MEnd = MethodPool.end();
+ for (SemaObjC::GlobalMethodPool::iterator
+ M = SemaRef.ObjC().MethodPool.begin(),
+ MEnd = SemaRef.ObjC().MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.first;
MethList && MethList->getMethod(); MethList = MethList->getNext()) {
@@ -8354,7 +8435,7 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
Results.getBasePriority(MethList->getMethod()), nullptr);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
- Results.MaybeAddResult(R, CurContext);
+ Results.MaybeAddResult(R, SemaRef.CurContext);
}
}
}
@@ -8375,12 +8456,13 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
return;
}
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCForCollection(Scope *S,
- DeclGroupPtrTy IterationVar) {
+void SemaCodeCompletion::CodeCompleteObjCForCollection(
+ Scope *S, DeclGroupPtrTy IterationVar) {
CodeCompleteExpressionData Data;
Data.ObjCCollection = true;
@@ -8395,27 +8477,28 @@ void Sema::CodeCompleteObjCForCollection(Scope *S,
CodeCompleteExpression(S, Data);
}
-void Sema::CodeCompleteObjCSelector(Scope *S,
- ArrayRef<IdentifierInfo *> SelIdents) {
+void SemaCodeCompletion::CodeCompleteObjCSelector(
+ Scope *S, ArrayRef<const IdentifierInfo *> SelIdents) {
// If we have an external source, load the entire class method
// pool from the AST file.
- if (ExternalSource) {
- for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors(); I != N;
- ++I) {
- Selector Sel = ExternalSource->GetExternalSelector(I);
- if (Sel.isNull() || MethodPool.count(Sel))
+ if (SemaRef.ExternalSource) {
+ for (uint32_t I = 0, N = SemaRef.ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = SemaRef.ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || SemaRef.ObjC().MethodPool.count(Sel))
continue;
- ReadMethodPool(Sel);
+ SemaRef.ObjC().ReadMethodPool(Sel);
}
}
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_SelectorName);
Results.EnterNewScope();
- for (GlobalMethodPool::iterator M = MethodPool.begin(),
- MEnd = MethodPool.end();
+ for (SemaObjC::GlobalMethodPool::iterator
+ M = SemaRef.ObjC().MethodPool.begin(),
+ MEnd = SemaRef.ObjC().MethodPool.end();
M != MEnd; ++M) {
Selector Sel = M->first;
@@ -8449,8 +8532,9 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
/// Add all of the protocol declarations that we find in the given
@@ -8470,9 +8554,9 @@ static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
}
}
-void Sema::CodeCompleteObjCProtocolReferences(
+void SemaCodeCompletion::CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
@@ -8483,22 +8567,24 @@ void Sema::CodeCompleteObjCProtocolReferences(
// already seen.
// FIXME: This doesn't work when caching code-completion results.
for (const IdentifierLocPair &Pair : Protocols)
- if (ObjCProtocolDecl *Protocol = LookupProtocol(Pair.first, Pair.second))
+ if (ObjCProtocolDecl *Protocol =
+ SemaRef.ObjC().LookupProtocol(Pair.first, Pair.second))
Results.Ignore(Protocol);
// Add all protocols.
- AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, false,
- Results);
+ AddProtocolResults(getASTContext().getTranslationUnitDecl(),
+ SemaRef.CurContext, false, Results);
Results.ExitScope();
}
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCProtocolDecl(Scope *) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteObjCProtocolDecl(Scope *) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
@@ -8506,14 +8592,15 @@ void Sema::CodeCompleteObjCProtocolDecl(Scope *) {
Results.EnterNewScope();
// Add all protocols.
- AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, true,
- Results);
+ AddProtocolResults(getASTContext().getTranslationUnitDecl(),
+ SemaRef.CurContext, true, Results);
Results.ExitScope();
}
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
/// Add all of the Objective-C interface declarations that we find in
@@ -8535,99 +8622,102 @@ static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
}
}
-void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteObjCInterfaceDecl(Scope *S) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCInterfaceName);
Results.EnterNewScope();
if (CodeCompleter->includeGlobals()) {
// Add all classes.
- AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
- false, Results);
+ AddInterfaceResults(getASTContext().getTranslationUnitDecl(),
+ SemaRef.CurContext, false, false, Results);
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCClassForwardDecl(Scope *S) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteObjCClassForwardDecl(Scope *S) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCClassForwardDecl);
Results.EnterNewScope();
if (CodeCompleter->includeGlobals()) {
// Add all classes.
- AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
- false, Results);
+ AddInterfaceResults(getASTContext().getTranslationUnitDecl(),
+ SemaRef.CurContext, false, false, Results);
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
- SourceLocation ClassNameLoc) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteObjCSuperclass(
+ Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCInterfaceName);
Results.EnterNewScope();
// Make sure that we ignore the class we're currently defining.
- NamedDecl *CurClass =
- LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ NamedDecl *CurClass = SemaRef.LookupSingleName(
+ SemaRef.TUScope, ClassName, ClassNameLoc, Sema::LookupOrdinaryName);
if (CurClass && isa<ObjCInterfaceDecl>(CurClass))
Results.Ignore(CurClass);
if (CodeCompleter->includeGlobals()) {
// Add all classes.
- AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
- false, Results);
+ AddInterfaceResults(getASTContext().getTranslationUnitDecl(),
+ SemaRef.CurContext, false, false, Results);
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCImplementationDecl(Scope *S) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteObjCImplementationDecl(Scope *S) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCImplementation);
Results.EnterNewScope();
if (CodeCompleter->includeGlobals()) {
// Add all unimplemented classes.
- AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
- true, Results);
+ AddInterfaceResults(getASTContext().getTranslationUnitDecl(),
+ SemaRef.CurContext, false, true, Results);
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
- IdentifierInfo *ClassName,
- SourceLocation ClassNameLoc) {
+void SemaCodeCompletion::CodeCompleteObjCInterfaceCategory(
+ Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCCategoryName);
// Ignore any categories we find that have already been implemented by this
// interface.
llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
- NamedDecl *CurClass =
- LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ NamedDecl *CurClass = SemaRef.LookupSingleName(
+ SemaRef.TUScope, ClassName, ClassNameLoc, Sema::LookupOrdinaryName);
if (ObjCInterfaceDecl *Class =
dyn_cast_or_null<ObjCInterfaceDecl>(CurClass)) {
for (const auto *Cat : Class->visible_categories())
@@ -8636,34 +8726,34 @@ void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
// Add all of the categories we know about.
Results.EnterNewScope();
- TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
+ TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
for (const auto *D : TU->decls())
if (const auto *Category = dyn_cast<ObjCCategoryDecl>(D))
if (CategoryNames.insert(Category->getIdentifier()).second)
Results.AddResult(
Result(Category, Results.getBasePriority(Category), nullptr),
- CurContext, nullptr, false);
+ SemaRef.CurContext, nullptr, false);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
- IdentifierInfo *ClassName,
- SourceLocation ClassNameLoc) {
+void SemaCodeCompletion::CodeCompleteObjCImplementationCategory(
+ Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc) {
typedef CodeCompletionResult Result;
// Find the corresponding interface. If we couldn't find the interface, the
// program itself is ill-formed. However, we'll try to be helpful still by
// providing the list of all of the categories we know about.
- NamedDecl *CurClass =
- LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ NamedDecl *CurClass = SemaRef.LookupSingleName(
+ SemaRef.TUScope, ClassName, ClassNameLoc, Sema::LookupOrdinaryName);
ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass);
if (!Class)
return CodeCompleteObjCInterfaceCategory(S, ClassName, ClassNameLoc);
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCCategoryName);
@@ -8678,7 +8768,7 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
if ((!IgnoreImplemented || !Cat->getImplementation()) &&
CategoryNames.insert(Cat->getIdentifier()).second)
Results.AddResult(Result(Cat, Results.getBasePriority(Cat), nullptr),
- CurContext, nullptr, false);
+ SemaRef.CurContext, nullptr, false);
}
Class = Class->getSuperClass();
@@ -8686,18 +8776,19 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
+void SemaCodeCompletion::CodeCompleteObjCPropertyDefinition(Scope *S) {
CodeCompletionContext CCContext(CodeCompletionContext::CCC_Other);
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), CCContext);
// Figure out where this @synthesize lives.
ObjCContainerDecl *Container =
- dyn_cast_or_null<ObjCContainerDecl>(CurContext);
+ dyn_cast_or_null<ObjCContainerDecl>(SemaRef.CurContext);
if (!Container || (!isa<ObjCImplementationDecl>(Container) &&
!isa<ObjCCategoryImplDecl>(Container)))
return;
@@ -8714,29 +8805,30 @@ void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
if (ObjCImplementationDecl *ClassImpl =
dyn_cast<ObjCImplementationDecl>(Container))
AddObjCProperties(CCContext, ClassImpl->getClassInterface(), false,
- /*AllowNullaryMethods=*/false, CurContext,
+ /*AllowNullaryMethods=*/false, SemaRef.CurContext,
AddedProperties, Results);
else
AddObjCProperties(CCContext,
cast<ObjCCategoryImplDecl>(Container)->getCategoryDecl(),
- false, /*AllowNullaryMethods=*/false, CurContext,
+ false, /*AllowNullaryMethods=*/false, SemaRef.CurContext,
AddedProperties, Results);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCPropertySynthesizeIvar(
+void SemaCodeCompletion::CodeCompleteObjCPropertySynthesizeIvar(
Scope *S, IdentifierInfo *PropertyName) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
// Figure out where this @synthesize lives.
ObjCContainerDecl *Container =
- dyn_cast_or_null<ObjCContainerDecl>(CurContext);
+ dyn_cast_or_null<ObjCContainerDecl>(SemaRef.CurContext);
if (!Container || (!isa<ObjCImplementationDecl>(Container) &&
!isa<ObjCCategoryImplDecl>(Container)))
return;
@@ -8752,7 +8844,7 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(
->getClassInterface();
// Determine the type of the property we're synthesizing.
- QualType PropertyType = Context.getObjCIdType();
+ QualType PropertyType = getASTContext().getObjCIdType();
if (Class) {
if (ObjCPropertyDecl *Property = Class->FindPropertyDeclaration(
PropertyName, ObjCPropertyQueryKind::OBJC_PR_query_instance)) {
@@ -8776,7 +8868,7 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(
for (ObjCIvarDecl *Ivar = Class->all_declared_ivar_begin(); Ivar;
Ivar = Ivar->getNextIvar()) {
Results.AddResult(Result(Ivar, Results.getBasePriority(Ivar), nullptr),
- CurContext, nullptr, false);
+ SemaRef.CurContext, nullptr, false);
// Determine whether we've seen an ivar with a name similar to the
// property.
@@ -8805,9 +8897,9 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo(),
Priority, CXAvailability_Available);
- PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
- Builder.AddResultTypeChunk(
- GetCompletionTypeString(PropertyType, Context, Policy, Allocator));
+ PrintingPolicy Policy = getCompletionPrintingPolicy(SemaRef);
+ Builder.AddResultTypeChunk(GetCompletionTypeString(
+ PropertyType, getASTContext(), Policy, Allocator));
Builder.AddTypedTextChunk(Allocator.CopyString(NameWithPrefix));
Results.AddResult(
Result(Builder.TakeString(), Priority, CXCursor_ObjCIvarDecl));
@@ -8815,8 +8907,9 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
// Mapping from selectors to the methods that implement that selector, along
@@ -9151,8 +9244,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// Add -(void)getKey:(type **)buffer range:(NSRange)inRange
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("get") + UpperKey).str();
- IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
- &Context.Idents.get("range")};
+ const IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
+ &Context.Idents.get("range")};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -9183,8 +9276,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)insertObject:(type *)object inKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("in") + UpperKey + "AtIndex").str();
- IdentifierInfo *SelectorIds[2] = {&Context.Idents.get("insertObject"),
- &Context.Idents.get(SelectorName)};
+ const IdentifierInfo *SelectorIds[2] = {&Context.Idents.get("insertObject"),
+ &Context.Idents.get(SelectorName)};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -9213,8 +9306,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)insertKey:(NSArray *)array atIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("insert") + UpperKey).str();
- IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
- &Context.Idents.get("atIndexes")};
+ const IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
+ &Context.Idents.get("atIndexes")};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -9243,7 +9336,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName =
(Twine("removeObjectFrom") + UpperKey + "AtIndex").str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
@@ -9264,7 +9357,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// -(void)removeKeyAtIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("remove") + UpperKey + "AtIndexes").str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
@@ -9286,8 +9379,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName =
(Twine("replaceObjectIn") + UpperKey + "AtIndex").str();
- IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
- &Context.Idents.get("withObject")};
+ const IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
+ &Context.Idents.get("withObject")};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -9317,8 +9410,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
std::string SelectorName1 =
(Twine("replace") + UpperKey + "AtIndexes").str();
std::string SelectorName2 = (Twine("with") + UpperKey).str();
- IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName1),
- &Context.Idents.get(SelectorName2)};
+ const IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName1),
+ &Context.Idents.get(SelectorName2)};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -9353,7 +9446,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
->getInterfaceDecl()
->getName() == "NSEnumerator"))) {
std::string SelectorName = (Twine("enumeratorOf") + UpperKey).str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
if (ReturnType.isNull()) {
@@ -9372,7 +9465,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod &&
(ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
std::string SelectorName = (Twine("memberOf") + UpperKey).str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
@@ -9402,7 +9495,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName =
(Twine("add") + UpperKey + Twine("Object")).str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
@@ -9424,7 +9517,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)addKey:(NSSet *)objects
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("add") + UpperKey).str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
@@ -9446,7 +9539,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName =
(Twine("remove") + UpperKey + Twine("Object")).str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
@@ -9468,7 +9561,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)removeKey:(NSSet *)objects
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("remove") + UpperKey).str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
@@ -9489,7 +9582,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)intersectKey:(NSSet *)objects
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("intersect") + UpperKey).str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
@@ -9518,7 +9611,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
->getName() == "NSSet"))) {
std::string SelectorName =
(Twine("keyPathsForValuesAffecting") + UpperKey).str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
if (ReturnType.isNull()) {
@@ -9539,7 +9632,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
ReturnType->isBooleanType())) {
std::string SelectorName =
(Twine("automaticallyNotifiesObserversOf") + UpperKey).str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
if (ReturnType.isNull()) {
@@ -9555,15 +9648,15 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
}
}
-void Sema::CodeCompleteObjCMethodDecl(Scope *S,
- std::optional<bool> IsInstanceMethod,
- ParsedType ReturnTy) {
+void SemaCodeCompletion::CodeCompleteObjCMethodDecl(
+ Scope *S, std::optional<bool> IsInstanceMethod, ParsedType ReturnTy) {
+ ASTContext &Context = getASTContext();
// Determine the return type of the method we're declaring, if
// provided.
- QualType ReturnType = GetTypeFromParser(ReturnTy);
+ QualType ReturnType = SemaRef.GetTypeFromParser(ReturnTy);
Decl *IDecl = nullptr;
- if (CurContext->isObjCContainer()) {
- ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
+ if (SemaRef.CurContext->isObjCContainer()) {
+ ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(SemaRef.CurContext);
IDecl = OCD;
}
// Determine where we should start searching for methods.
@@ -9587,7 +9680,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
}
if (!SearchDecl) {
- HandleCodeCompleteResults(this, CodeCompleter,
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
CodeCompletionContext::CCC_Other, nullptr, 0);
return;
}
@@ -9599,11 +9692,11 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
// Add declarations or definitions for each of the known methods.
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
- PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
+ PrintingPolicy Policy = getCompletionPrintingPolicy(SemaRef);
for (KnownMethodsMap::iterator M = KnownMethods.begin(),
MEnd = KnownMethods.end();
M != MEnd; ++M) {
@@ -9728,38 +9821,41 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteObjCMethodDeclSelector(
+void SemaCodeCompletion::CodeCompleteObjCMethodDeclSelector(
Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnTy,
- ArrayRef<IdentifierInfo *> SelIdents) {
+ ArrayRef<const IdentifierInfo *> SelIdents) {
// If we have an external source, load the entire class method
// pool from the AST file.
- if (ExternalSource) {
- for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors(); I != N;
- ++I) {
- Selector Sel = ExternalSource->GetExternalSelector(I);
- if (Sel.isNull() || MethodPool.count(Sel))
+ if (SemaRef.ExternalSource) {
+ for (uint32_t I = 0, N = SemaRef.ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = SemaRef.ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || SemaRef.ObjC().MethodPool.count(Sel))
continue;
- ReadMethodPool(Sel);
+ SemaRef.ObjC().ReadMethodPool(Sel);
}
}
// Build the set of methods we can see.
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
if (ReturnTy)
- Results.setPreferredType(GetTypeFromParser(ReturnTy).getNonReferenceType());
+ Results.setPreferredType(
+ SemaRef.GetTypeFromParser(ReturnTy).getNonReferenceType());
Results.EnterNewScope();
- for (GlobalMethodPool::iterator M = MethodPool.begin(),
- MEnd = MethodPool.end();
+ for (SemaObjC::GlobalMethodPool::iterator
+ M = SemaRef.ObjC().MethodPool.begin(),
+ MEnd = SemaRef.ObjC().MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = IsInstanceMethod ? &M->second.first
: &M->second.second;
@@ -9791,7 +9887,7 @@ void Sema::CodeCompleteObjCMethodDeclSelector(
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
R.DeclaringEntity = true;
- Results.MaybeAddResult(R, CurContext);
+ Results.MaybeAddResult(R, SemaRef.CurContext);
}
}
@@ -9799,7 +9895,7 @@ void Sema::CodeCompleteObjCMethodDeclSelector(
if (!AtParameterName && !SelIdents.empty() &&
SelIdents.front()->getName().starts_with("init")) {
- for (const auto &M : PP.macros()) {
+ for (const auto &M : SemaRef.PP.macros()) {
if (M.first->getName() != "NS_DESIGNATED_INITIALIZER")
continue;
Results.EnterNewScope();
@@ -9813,12 +9909,13 @@ void Sema::CodeCompleteObjCMethodDeclSelector(
}
}
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompletePreprocessorDirective(bool InConditional) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PreprocessorDirective);
Results.EnterNewScope();
@@ -9983,17 +10080,20 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
// FIXME: we don't support #assert or #unassert, so don't suggest them.
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteInPreprocessorConditionalExclusion(Scope *S) {
- CodeCompleteOrdinaryName(S, S->getFnParent() ? Sema::PCC_RecoveryInFunction
- : Sema::PCC_Namespace);
+void SemaCodeCompletion::CodeCompleteInPreprocessorConditionalExclusion(
+ Scope *S) {
+ CodeCompleteOrdinaryName(S, S->getFnParent()
+ ? SemaCodeCompletion::PCC_RecoveryInFunction
+ : SemaCodeCompletion::PCC_Namespace);
}
-void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompletePreprocessorMacroName(bool IsDefinition) {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
IsDefinition ? CodeCompletionContext::CCC_MacroName
: CodeCompletionContext::CCC_MacroNameUse);
@@ -10002,8 +10102,8 @@ void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Results.EnterNewScope();
- for (Preprocessor::macro_iterator M = PP.macro_begin(),
- MEnd = PP.macro_end();
+ for (Preprocessor::macro_iterator M = SemaRef.PP.macro_begin(),
+ MEnd = SemaRef.PP.macro_end();
M != MEnd; ++M) {
Builder.AddTypedTextChunk(
Builder.getAllocator().CopyString(M->first->getName()));
@@ -10015,17 +10115,18 @@ void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
// FIXME: Can we detect when the user just wrote an include guard above?
}
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompletePreprocessorExpression() {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompletePreprocessorExpression() {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PreprocessorExpression);
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, CodeCompleter->loadExternal(), true);
+ AddMacroResults(SemaRef.PP, Results, CodeCompleter->loadExternal(), true);
// defined (<macro>)
Results.EnterNewScope();
@@ -10039,14 +10140,13 @@ void Sema::CodeCompletePreprocessorExpression() {
Results.AddResult(Builder.TakeString());
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompletePreprocessorMacroArgument(Scope *S,
- IdentifierInfo *Macro,
- MacroInfo *MacroInfo,
- unsigned Argument) {
+void SemaCodeCompletion::CodeCompletePreprocessorMacroArgument(
+ Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument) {
// FIXME: In the future, we could provide "overload" results, much like we
// do for function calls.
@@ -10057,7 +10157,8 @@ void Sema::CodeCompletePreprocessorMacroArgument(Scope *S,
// This handles completion inside an #include filename, e.g. #include <foo/ba
// We look for the directory "foo" under each directory on the include path,
// list its files, and reassemble the appropriate #include.
-void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
+void SemaCodeCompletion::CodeCompleteIncludedFile(llvm::StringRef Dir,
+ bool Angled) {
// RelDir should use /, but unescaped \ is possible on windows!
// Our completions will normalize to / for simplicity, this case is rare.
std::string RelDir = llvm::sys::path::convert_to_slash(Dir);
@@ -10065,9 +10166,9 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
SmallString<128> NativeRelDir = StringRef(RelDir);
llvm::sys::path::native(NativeRelDir);
llvm::vfs::FileSystem &FS =
- getSourceManager().getFileManager().getVirtualFileSystem();
+ SemaRef.getSourceManager().getFileManager().getVirtualFileSystem();
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_IncludedFile);
llvm::DenseSet<StringRef> SeenResults; // To deduplicate results.
@@ -10179,11 +10280,11 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
// Finally with all our helpers, we can scan the include path.
// Do this in standard order so deduplication keeps the right file.
// (In case we decide to add more details to the results later).
- const auto &S = PP.getHeaderSearchInfo();
+ const auto &S = SemaRef.PP.getHeaderSearchInfo();
using llvm::make_range;
if (!Angled) {
// The current directory is on the include path for "quoted" includes.
- if (auto CurFile = PP.getCurrentFileLexer()->getFileEntry())
+ if (auto CurFile = SemaRef.PP.getCurrentFileLexer()->getFileEntry())
AddFilesFromIncludeDir(CurFile->getDir().getName(), false,
DirectoryLookup::LT_NormalDir);
for (const auto &D : make_range(S.quoted_dir_begin(), S.quoted_dir_end()))
@@ -10194,18 +10295,19 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
for (const auto &D : make_range(S.system_dir_begin(), S.system_dir_end()))
AddFilesFromDirLookup(D, true);
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::CodeCompleteNaturalLanguage() {
- HandleCodeCompleteResults(this, CodeCompleter,
+void SemaCodeCompletion::CodeCompleteNaturalLanguage() {
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
CodeCompletionContext::CCC_NaturalLanguage, nullptr,
0);
}
-void Sema::CodeCompleteAvailabilityPlatformName() {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+void SemaCodeCompletion::CodeCompleteAvailabilityPlatformName() {
+ ResultBuilder Results(SemaRef, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
@@ -10216,28 +10318,33 @@ void Sema::CodeCompleteAvailabilityPlatformName() {
Twine(Platform) + "ApplicationExtension")));
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ HandleCodeCompleteResults(&SemaRef, CodeCompleter,
+ Results.getCompletionContext(), Results.data(),
+ Results.size());
}
-void Sema::GatherGlobalCodeCompletions(
+void SemaCodeCompletion::GatherGlobalCodeCompletions(
CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results) {
- ResultBuilder Builder(*this, Allocator, CCTUInfo,
+ ResultBuilder Builder(SemaRef, Allocator, CCTUInfo,
CodeCompletionContext::CCC_Recovery);
if (!CodeCompleter || CodeCompleter->includeGlobals()) {
- CodeCompletionDeclConsumer Consumer(Builder,
- Context.getTranslationUnitDecl());
- LookupVisibleDecls(Context.getTranslationUnitDecl(), LookupAnyName,
- Consumer,
- !CodeCompleter || CodeCompleter->loadExternal());
+ CodeCompletionDeclConsumer Consumer(
+ Builder, getASTContext().getTranslationUnitDecl());
+ SemaRef.LookupVisibleDecls(getASTContext().getTranslationUnitDecl(),
+ Sema::LookupAnyName, Consumer,
+ !CodeCompleter || CodeCompleter->loadExternal());
}
if (!CodeCompleter || CodeCompleter->includeMacros())
- AddMacroResults(PP, Builder,
+ AddMacroResults(SemaRef.PP, Builder,
!CodeCompleter || CodeCompleter->loadExternal(), true);
Results.clear();
Results.insert(Results.end(), Builder.data(),
Builder.data() + Builder.size());
}
+
+SemaCodeCompletion::SemaCodeCompletion(Sema &S,
+ CodeCompleteConsumer *CompletionConsumer)
+ : SemaBase(S), CodeCompleter(CompletionConsumer) {}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
index 88fc846c89e4..c45443d76e6b 100755
--- a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
@@ -65,6 +65,7 @@ public:
const Expr *getLHS() const { return LHS; }
const Expr *getRHS() const { return RHS; }
+ OverloadedOperatorKind getOp() const { return Op; }
ExprResult recreateBinOp(Sema &SemaRef, ExprResult LHS) const {
return recreateBinOp(SemaRef, LHS, const_cast<Expr *>(getRHS()));
@@ -177,77 +178,177 @@ struct SatisfactionStackRAII {
};
} // namespace
-template <typename AtomicEvaluator>
+template <typename ConstraintEvaluator>
static ExprResult
calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction,
- AtomicEvaluator &&Evaluator) {
- ConstraintExpr = ConstraintExpr->IgnoreParenImpCasts();
+ const ConstraintEvaluator &Evaluator);
- if (LogicalBinOp BO = ConstraintExpr) {
- size_t EffectiveDetailEndIndex = Satisfaction.Details.size();
- ExprResult LHSRes = calculateConstraintSatisfaction(
- S, BO.getLHS(), Satisfaction, Evaluator);
+template <typename ConstraintEvaluator>
+static ExprResult
+calculateConstraintSatisfaction(Sema &S, const Expr *LHS,
+ OverloadedOperatorKind Op, const Expr *RHS,
+ ConstraintSatisfaction &Satisfaction,
+ const ConstraintEvaluator &Evaluator) {
+ size_t EffectiveDetailEndIndex = Satisfaction.Details.size();
- if (LHSRes.isInvalid())
- return ExprError();
+ ExprResult LHSRes =
+ calculateConstraintSatisfaction(S, LHS, Satisfaction, Evaluator);
- bool IsLHSSatisfied = Satisfaction.IsSatisfied;
+ if (LHSRes.isInvalid())
+ return ExprError();
- if (BO.isOr() && IsLHSSatisfied)
- // [temp.constr.op] p3
- // A disjunction is a constraint taking two operands. To determine if
- // a disjunction is satisfied, the satisfaction of the first operand
- // is checked. If that is satisfied, the disjunction is satisfied.
- // Otherwise, the disjunction is satisfied if and only if the second
- // operand is satisfied.
- // LHS is instantiated while RHS is not. Skip creating invalid BinaryOp.
- return LHSRes;
+ bool IsLHSSatisfied = Satisfaction.IsSatisfied;
+
+ if (Op == clang::OO_PipePipe && IsLHSSatisfied)
+ // [temp.constr.op] p3
+ // A disjunction is a constraint taking two operands. To determine if
+ // a disjunction is satisfied, the satisfaction of the first operand
+ // is checked. If that is satisfied, the disjunction is satisfied.
+ // Otherwise, the disjunction is satisfied if and only if the second
+ // operand is satisfied.
+ // LHS is instantiated while RHS is not. Skip creating invalid BinaryOp.
+ return LHSRes;
+
+ if (Op == clang::OO_AmpAmp && !IsLHSSatisfied)
+ // [temp.constr.op] p2
+ // A conjunction is a constraint taking two operands. To determine if
+ // a conjunction is satisfied, the satisfaction of the first operand
+ // is checked. If that is not satisfied, the conjunction is not
+ // satisfied. Otherwise, the conjunction is satisfied if and only if
+ // the second operand is satisfied.
+ // LHS is instantiated while RHS is not. Skip creating invalid BinaryOp.
+ return LHSRes;
+
+ ExprResult RHSRes =
+ calculateConstraintSatisfaction(S, RHS, Satisfaction, Evaluator);
+ if (RHSRes.isInvalid())
+ return ExprError();
- if (BO.isAnd() && !IsLHSSatisfied)
- // [temp.constr.op] p2
- // A conjunction is a constraint taking two operands. To determine if
- // a conjunction is satisfied, the satisfaction of the first operand
- // is checked. If that is not satisfied, the conjunction is not
- // satisfied. Otherwise, the conjunction is satisfied if and only if
- // the second operand is satisfied.
- // LHS is instantiated while RHS is not. Skip creating invalid BinaryOp.
- return LHSRes;
-
- ExprResult RHSRes = calculateConstraintSatisfaction(
- S, BO.getRHS(), Satisfaction, std::forward<AtomicEvaluator>(Evaluator));
- if (RHSRes.isInvalid())
+ bool IsRHSSatisfied = Satisfaction.IsSatisfied;
+ // Current implementation adds diagnostic information about the falsity
+ // of each false atomic constraint expression when it evaluates them.
+ // When the evaluation results to `false || true`, the information
+ // generated during the evaluation of left-hand side is meaningless
+ // because the whole expression evaluates to true.
+ // The following code removes the irrelevant diagnostic information.
+ // FIXME: We should probably delay the addition of diagnostic information
+ // until we know the entire expression is false.
+ if (Op == clang::OO_PipePipe && IsRHSSatisfied) {
+ auto EffectiveDetailEnd = Satisfaction.Details.begin();
+ std::advance(EffectiveDetailEnd, EffectiveDetailEndIndex);
+ Satisfaction.Details.erase(EffectiveDetailEnd, Satisfaction.Details.end());
+ }
+
+ if (!LHSRes.isUsable() || !RHSRes.isUsable())
+ return ExprEmpty();
+
+ return BinaryOperator::Create(S.Context, LHSRes.get(), RHSRes.get(),
+ BinaryOperator::getOverloadedOpcode(Op),
+ S.Context.BoolTy, VK_PRValue, OK_Ordinary,
+ LHS->getBeginLoc(), FPOptionsOverride{});
+}
+
+template <typename ConstraintEvaluator>
+static ExprResult
+calculateConstraintSatisfaction(Sema &S, const CXXFoldExpr *FE,
+ ConstraintSatisfaction &Satisfaction,
+ const ConstraintEvaluator &Evaluator) {
+ bool Conjunction = FE->getOperator() == BinaryOperatorKind::BO_LAnd;
+ size_t EffectiveDetailEndIndex = Satisfaction.Details.size();
+
+ ExprResult Out;
+ if (FE->isLeftFold() && FE->getInit()) {
+ Out = calculateConstraintSatisfaction(S, FE->getInit(), Satisfaction,
+ Evaluator);
+ if (Out.isInvalid())
return ExprError();
+ // If the first clause of a conjunction is not satisfied,
+ // or if the first clause of a disjection is satisfied,
+ // we have established satisfaction of the whole constraint
+ // and we should not continue further.
+ if (Conjunction != Satisfaction.IsSatisfied)
+ return Out;
+ }
+ std::optional<unsigned> NumExpansions =
+ Evaluator.EvaluateFoldExpandedConstraintSize(FE);
+ if (!NumExpansions)
+ return ExprError();
+ for (unsigned I = 0; I < *NumExpansions; I++) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(S, I);
+ ExprResult Res = calculateConstraintSatisfaction(S, FE->getPattern(),
+ Satisfaction, Evaluator);
+ if (Res.isInvalid())
+ return ExprError();
bool IsRHSSatisfied = Satisfaction.IsSatisfied;
- // Current implementation adds diagnostic information about the falsity
- // of each false atomic constraint expression when it evaluates them.
- // When the evaluation results to `false || true`, the information
- // generated during the evaluation of left-hand side is meaningless
- // because the whole expression evaluates to true.
- // The following code removes the irrelevant diagnostic information.
- // FIXME: We should probably delay the addition of diagnostic information
- // until we know the entire expression is false.
- if (BO.isOr() && IsRHSSatisfied) {
+ if (!Conjunction && IsRHSSatisfied) {
auto EffectiveDetailEnd = Satisfaction.Details.begin();
std::advance(EffectiveDetailEnd, EffectiveDetailEndIndex);
Satisfaction.Details.erase(EffectiveDetailEnd,
Satisfaction.Details.end());
}
+ if (Out.isUnset())
+ Out = Res;
+ else if (!Res.isUnset()) {
+ Out = BinaryOperator::Create(
+ S.Context, Out.get(), Res.get(), FE->getOperator(), S.Context.BoolTy,
+ VK_PRValue, OK_Ordinary, FE->getBeginLoc(), FPOptionsOverride{});
+ }
+ if (Conjunction != IsRHSSatisfied)
+ return Out;
+ }
+
+ if (FE->isRightFold() && FE->getInit()) {
+ ExprResult Res = calculateConstraintSatisfaction(S, FE->getInit(),
+ Satisfaction, Evaluator);
+ if (Out.isInvalid())
+ return ExprError();
- return BO.recreateBinOp(S, LHSRes, RHSRes);
+ if (Out.isUnset())
+ Out = Res;
+ else if (!Res.isUnset()) {
+ Out = BinaryOperator::Create(
+ S.Context, Out.get(), Res.get(), FE->getOperator(), S.Context.BoolTy,
+ VK_PRValue, OK_Ordinary, FE->getBeginLoc(), FPOptionsOverride{});
+ }
+ }
+
+ if (Out.isUnset()) {
+ Satisfaction.IsSatisfied = Conjunction;
+ Out = S.BuildEmptyCXXFoldExpr(FE->getBeginLoc(), FE->getOperator());
}
+ return Out;
+}
+
+template <typename ConstraintEvaluator>
+static ExprResult
+calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
+ ConstraintSatisfaction &Satisfaction,
+ const ConstraintEvaluator &Evaluator) {
+ ConstraintExpr = ConstraintExpr->IgnoreParenImpCasts();
+
+ if (LogicalBinOp BO = ConstraintExpr)
+ return calculateConstraintSatisfaction(
+ S, BO.getLHS(), BO.getOp(), BO.getRHS(), Satisfaction, Evaluator);
if (auto *C = dyn_cast<ExprWithCleanups>(ConstraintExpr)) {
// These aren't evaluated, so we don't care about cleanups, so we can just
// evaluate these as if the cleanups didn't exist.
- return calculateConstraintSatisfaction(
- S, C->getSubExpr(), Satisfaction,
- std::forward<AtomicEvaluator>(Evaluator));
+ return calculateConstraintSatisfaction(S, C->getSubExpr(), Satisfaction,
+ Evaluator);
+ }
+
+ if (auto *FE = dyn_cast<CXXFoldExpr>(ConstraintExpr);
+ FE && S.getLangOpts().CPlusPlus26 &&
+ (FE->getOperator() == BinaryOperatorKind::BO_LAnd ||
+ FE->getOperator() == BinaryOperatorKind::BO_LOr)) {
+ return calculateConstraintSatisfaction(S, FE, Satisfaction, Evaluator);
}
// An atomic constraint expression
- ExprResult SubstitutedAtomicExpr = Evaluator(ConstraintExpr);
+ ExprResult SubstitutedAtomicExpr =
+ Evaluator.EvaluateAtomicConstraint(ConstraintExpr);
if (SubstitutedAtomicExpr.isInvalid())
return ExprError();
@@ -273,7 +374,6 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
char *Mem = new (S.Context) char[MessageSize];
memcpy(Mem, DiagString.c_str(), MessageSize);
Satisfaction.Details.emplace_back(
- ConstraintExpr,
new (S.Context) ConstraintSatisfaction::SubstitutionDiagnostic{
SubstitutedAtomicExpr.get()->getBeginLoc(),
StringRef(Mem, MessageSize)});
@@ -302,8 +402,7 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
"evaluating bool expression didn't produce int");
Satisfaction.IsSatisfied = EvalResult.Val.getInt().getBoolValue();
if (!Satisfaction.IsSatisfied)
- Satisfaction.Details.emplace_back(ConstraintExpr,
- SubstitutedAtomicExpr.get());
+ Satisfaction.Details.emplace_back(SubstitutedAtomicExpr.get());
return SubstitutedAtomicExpr;
}
@@ -336,92 +435,136 @@ static ExprResult calculateConstraintSatisfaction(
Sema &S, const NamedDecl *Template, SourceLocation TemplateNameLoc,
const MultiLevelTemplateArgumentList &MLTAL, const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction) {
- return calculateConstraintSatisfaction(
- S, ConstraintExpr, Satisfaction, [&](const Expr *AtomicExpr) {
- EnterExpressionEvaluationContext ConstantEvaluated(
- S, Sema::ExpressionEvaluationContext::ConstantEvaluated,
- Sema::ReuseLambdaContextDecl);
-
- // Atomic constraint - substitute arguments and check satisfaction.
- ExprResult SubstitutedExpression;
- {
- TemplateDeductionInfo Info(TemplateNameLoc);
- Sema::InstantiatingTemplate Inst(S, AtomicExpr->getBeginLoc(),
- Sema::InstantiatingTemplate::ConstraintSubstitution{},
- const_cast<NamedDecl *>(Template), Info,
- AtomicExpr->getSourceRange());
- if (Inst.isInvalid())
+
+ struct ConstraintEvaluator {
+ Sema &S;
+ const NamedDecl *Template;
+ SourceLocation TemplateNameLoc;
+ const MultiLevelTemplateArgumentList &MLTAL;
+ ConstraintSatisfaction &Satisfaction;
+
+ ExprResult EvaluateAtomicConstraint(const Expr *AtomicExpr) const {
+ EnterExpressionEvaluationContext ConstantEvaluated(
+ S, Sema::ExpressionEvaluationContext::ConstantEvaluated,
+ Sema::ReuseLambdaContextDecl);
+
+ // Atomic constraint - substitute arguments and check satisfaction.
+ ExprResult SubstitutedExpression;
+ {
+ TemplateDeductionInfo Info(TemplateNameLoc);
+ Sema::InstantiatingTemplate Inst(
+ S, AtomicExpr->getBeginLoc(),
+ Sema::InstantiatingTemplate::ConstraintSubstitution{},
+ const_cast<NamedDecl *>(Template), Info,
+ AtomicExpr->getSourceRange());
+ if (Inst.isInvalid())
+ return ExprError();
+
+ llvm::FoldingSetNodeID ID;
+ if (Template &&
+ DiagRecursiveConstraintEval(S, ID, Template, AtomicExpr, MLTAL)) {
+ Satisfaction.IsSatisfied = false;
+ Satisfaction.ContainsErrors = true;
+ return ExprEmpty();
+ }
+
+ SatisfactionStackRAII StackRAII(S, Template, ID);
+
+ // We do not want error diagnostics escaping here.
+ Sema::SFINAETrap Trap(S);
+ SubstitutedExpression =
+ S.SubstConstraintExpr(const_cast<Expr *>(AtomicExpr), MLTAL);
+
+ if (SubstitutedExpression.isInvalid() || Trap.hasErrorOccurred()) {
+ // C++2a [temp.constr.atomic]p1
+ // ...If substitution results in an invalid type or expression, the
+ // constraint is not satisfied.
+ if (!Trap.hasErrorOccurred())
+ // A non-SFINAE error has occurred as a result of this
+ // substitution.
return ExprError();
- llvm::FoldingSetNodeID ID;
- if (Template &&
- DiagRecursiveConstraintEval(S, ID, Template, AtomicExpr, MLTAL)) {
- Satisfaction.IsSatisfied = false;
- Satisfaction.ContainsErrors = true;
- return ExprEmpty();
- }
-
- SatisfactionStackRAII StackRAII(S, Template, ID);
-
- // We do not want error diagnostics escaping here.
- Sema::SFINAETrap Trap(S);
- SubstitutedExpression =
- S.SubstConstraintExpr(const_cast<Expr *>(AtomicExpr), MLTAL);
-
- if (SubstitutedExpression.isInvalid() || Trap.hasErrorOccurred()) {
- // C++2a [temp.constr.atomic]p1
- // ...If substitution results in an invalid type or expression, the
- // constraint is not satisfied.
- if (!Trap.hasErrorOccurred())
- // A non-SFINAE error has occurred as a result of this
- // substitution.
- return ExprError();
-
- PartialDiagnosticAt SubstDiag{SourceLocation(),
- PartialDiagnostic::NullDiagnostic()};
- Info.takeSFINAEDiagnostic(SubstDiag);
- // FIXME: Concepts: This is an unfortunate consequence of there
- // being no serialization code for PartialDiagnostics and the fact
- // that serializing them would likely take a lot more storage than
- // just storing them as strings. We would still like, in the
- // future, to serialize the proper PartialDiagnostic as serializing
- // it as a string defeats the purpose of the diagnostic mechanism.
- SmallString<128> DiagString;
- DiagString = ": ";
- SubstDiag.second.EmitToString(S.getDiagnostics(), DiagString);
- unsigned MessageSize = DiagString.size();
- char *Mem = new (S.Context) char[MessageSize];
- memcpy(Mem, DiagString.c_str(), MessageSize);
- Satisfaction.Details.emplace_back(
- AtomicExpr,
- new (S.Context) ConstraintSatisfaction::SubstitutionDiagnostic{
- SubstDiag.first, StringRef(Mem, MessageSize)});
- Satisfaction.IsSatisfied = false;
- return ExprEmpty();
- }
+ PartialDiagnosticAt SubstDiag{SourceLocation(),
+ PartialDiagnostic::NullDiagnostic()};
+ Info.takeSFINAEDiagnostic(SubstDiag);
+ // FIXME: Concepts: This is an unfortunate consequence of there
+ // being no serialization code for PartialDiagnostics and the fact
+ // that serializing them would likely take a lot more storage than
+ // just storing them as strings. We would still like, in the
+ // future, to serialize the proper PartialDiagnostic as serializing
+ // it as a string defeats the purpose of the diagnostic mechanism.
+ SmallString<128> DiagString;
+ DiagString = ": ";
+ SubstDiag.second.EmitToString(S.getDiagnostics(), DiagString);
+ unsigned MessageSize = DiagString.size();
+ char *Mem = new (S.Context) char[MessageSize];
+ memcpy(Mem, DiagString.c_str(), MessageSize);
+ Satisfaction.Details.emplace_back(
+ new (S.Context) ConstraintSatisfaction::SubstitutionDiagnostic{
+ SubstDiag.first, StringRef(Mem, MessageSize)});
+ Satisfaction.IsSatisfied = false;
+ return ExprEmpty();
}
+ }
- if (!S.CheckConstraintExpression(SubstitutedExpression.get()))
- return ExprError();
+ if (!S.CheckConstraintExpression(SubstitutedExpression.get()))
+ return ExprError();
+
+ // [temp.constr.atomic]p3: To determine if an atomic constraint is
+ // satisfied, the parameter mapping and template arguments are first
+ // substituted into its expression. If substitution results in an
+ // invalid type or expression, the constraint is not satisfied.
+ // Otherwise, the lvalue-to-rvalue conversion is performed if necessary,
+ // and E shall be a constant expression of type bool.
+ //
+ // Perform the L to R Value conversion if necessary. We do so for all
+ // non-PRValue categories, else we fail to extend the lifetime of
+ // temporaries, and that fails the constant expression check.
+ if (!SubstitutedExpression.get()->isPRValue())
+ SubstitutedExpression = ImplicitCastExpr::Create(
+ S.Context, SubstitutedExpression.get()->getType(),
+ CK_LValueToRValue, SubstitutedExpression.get(),
+ /*BasePath=*/nullptr, VK_PRValue, FPOptionsOverride());
+
+ return SubstitutedExpression;
+ }
- // [temp.constr.atomic]p3: To determine if an atomic constraint is
- // satisfied, the parameter mapping and template arguments are first
- // substituted into its expression. If substitution results in an
- // invalid type or expression, the constraint is not satisfied.
- // Otherwise, the lvalue-to-rvalue conversion is performed if necessary,
- // and E shall be a constant expression of type bool.
- //
- // Perform the L to R Value conversion if necessary. We do so for all
- // non-PRValue categories, else we fail to extend the lifetime of
- // temporaries, and that fails the constant expression check.
- if (!SubstitutedExpression.get()->isPRValue())
- SubstitutedExpression = ImplicitCastExpr::Create(
- S.Context, SubstitutedExpression.get()->getType(),
- CK_LValueToRValue, SubstitutedExpression.get(),
- /*BasePath=*/nullptr, VK_PRValue, FPOptionsOverride());
-
- return SubstitutedExpression;
- });
+ std::optional<unsigned>
+ EvaluateFoldExpandedConstraintSize(const CXXFoldExpr *FE) const {
+
+ // We should ignore errors in the presence of packs of different size.
+ Sema::SFINAETrap Trap(S);
+
+ Expr *Pattern = FE->getPattern();
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+ bool Expand = true;
+ bool RetainExpansion = false;
+ std::optional<unsigned> OrigNumExpansions = FE->getNumExpansions(),
+ NumExpansions = OrigNumExpansions;
+ if (S.CheckParameterPacksForExpansion(
+ FE->getEllipsisLoc(), Pattern->getSourceRange(), Unexpanded,
+ MLTAL, Expand, RetainExpansion, NumExpansions) ||
+ !Expand || RetainExpansion)
+ return std::nullopt;
+
+ if (NumExpansions && S.getLangOpts().BracketDepth < NumExpansions) {
+ S.Diag(FE->getEllipsisLoc(),
+ clang::diag::err_fold_expression_limit_exceeded)
+ << *NumExpansions << S.getLangOpts().BracketDepth
+ << FE->getSourceRange();
+ S.Diag(FE->getEllipsisLoc(), diag::note_bracket_depth);
+ return std::nullopt;
+ }
+ return NumExpansions;
+ }
+ };
+
+ return calculateConstraintSatisfaction(
+ S, ConstraintExpr, Satisfaction,
+ ConstraintEvaluator{S, Template, TemplateNameLoc, MLTAL, Satisfaction});
}
static bool CheckConstraintSatisfaction(
@@ -486,6 +629,12 @@ bool Sema::CheckConstraintSatisfaction(
*this, nullptr, ConstraintExprs, ConvertedConstraints,
TemplateArgsLists, TemplateIDRange, OutSatisfaction);
}
+ // Invalid templates could make their way here. Substituting them could result
+ // in dependent expressions.
+ if (Template->isInvalidDecl()) {
+ OutSatisfaction.IsSatisfied = false;
+ return true;
+ }
// A list of the template argument list flattened in a predictible manner for
// the purposes of caching. The ConstraintSatisfaction type is in AST so it
@@ -537,13 +686,21 @@ bool Sema::CheckConstraintSatisfaction(
bool Sema::CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction) {
- return calculateConstraintSatisfaction(
- *this, ConstraintExpr, Satisfaction,
- [this](const Expr *AtomicExpr) -> ExprResult {
- // We only do this to immitate lvalue-to-rvalue conversion.
- return PerformContextuallyConvertToBool(
- const_cast<Expr *>(AtomicExpr));
- })
+
+ struct ConstraintEvaluator {
+ Sema &S;
+ ExprResult EvaluateAtomicConstraint(const Expr *AtomicExpr) const {
+ return S.PerformContextuallyConvertToBool(const_cast<Expr *>(AtomicExpr));
+ }
+
+ std::optional<unsigned>
+ EvaluateFoldExpandedConstraintSize(const CXXFoldExpr *FE) const {
+ return 0;
+ }
+ };
+
+ return calculateConstraintSatisfaction(*this, ConstraintExpr, Satisfaction,
+ ConstraintEvaluator{*this})
.isInvalid();
}
@@ -586,7 +743,8 @@ bool Sema::addInstantiatedCapturesToScope(
bool Sema::SetupConstraintScope(
FunctionDecl *FD, std::optional<ArrayRef<TemplateArgument>> TemplateArgs,
- MultiLevelTemplateArgumentList MLTAL, LocalInstantiationScope &Scope) {
+ const MultiLevelTemplateArgumentList &MLTAL,
+ LocalInstantiationScope &Scope) {
if (FD->isTemplateInstantiation() && FD->getPrimaryTemplate()) {
FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate();
InstantiatingTemplate Inst(
@@ -614,10 +772,12 @@ bool Sema::SetupConstraintScope(
// reference the original primary template.
// We walk up the instantiated template chain so that nested lambdas get
// handled properly.
- for (FunctionTemplateDecl *FromMemTempl =
- PrimaryTemplate->getInstantiatedFromMemberTemplate();
- FromMemTempl;
- FromMemTempl = FromMemTempl->getInstantiatedFromMemberTemplate()) {
+ // We should only collect instantiated parameters from the primary template.
+ // Otherwise, we may have mismatched template parameter depth!
+ if (FunctionTemplateDecl *FromMemTempl =
+ PrimaryTemplate->getInstantiatedFromMemberTemplate()) {
+ while (FromMemTempl->getInstantiatedFromMemberTemplate())
+ FromMemTempl = FromMemTempl->getInstantiatedFromMemberTemplate();
if (addInstantiatedParametersToScope(FD, FromMemTempl->getTemplatedDecl(),
Scope, MLTAL))
return true;
@@ -661,11 +821,12 @@ Sema::SetupConstraintCheckingTemplateArgumentsAndScope(
// Collect the list of template arguments relative to the 'primary' template.
// We need the entire list, since the constraint is completely uninstantiated
// at this point.
- MLTAL = getTemplateInstantiationArgs(FD, FD->getLexicalDeclContext(),
- /*Final=*/false, /*Innermost=*/nullptr,
- /*RelativeToPrimary=*/true,
- /*Pattern=*/nullptr,
- /*ForConstraintInstantiation=*/true);
+ MLTAL =
+ getTemplateInstantiationArgs(FD, FD->getLexicalDeclContext(),
+ /*Final=*/false, /*Innermost=*/std::nullopt,
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr,
+ /*ForConstraintInstantiation=*/true);
if (SetupConstraintScope(FD, TemplateArgs, MLTAL, Scope))
return std::nullopt;
@@ -690,11 +851,15 @@ bool Sema::CheckFunctionConstraints(const FunctionDecl *FD,
// A lambda conversion operator has the same constraints as the call operator
// and constraints checking relies on whether we are in a lambda call operator
// (and may refer to its parameters), so check the call operator instead.
+ // Note that the declarations outside of the lambda should also be
+ // considered. Turning on the 'ForOverloadResolution' flag results in the
+ // LocalInstantiationScope not looking into its parents, but we can still
+ // access Decls from the parents while building a lambda RAII scope later.
if (const auto *MD = dyn_cast<CXXConversionDecl>(FD);
MD && isLambdaConversionOperator(const_cast<CXXConversionDecl *>(MD)))
return CheckFunctionConstraints(MD->getParent()->getLambdaCallOperator(),
Satisfaction, UsageLoc,
- ForOverloadResolution);
+ /*ShouldAddDeclsFromParentScope=*/true);
DeclContext *CtxToSave = const_cast<FunctionDecl *>(FD);
@@ -740,7 +905,8 @@ static unsigned
CalculateTemplateDepthForConstraints(Sema &S, const NamedDecl *ND,
bool SkipForSpecialization = false) {
MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
- ND, ND->getLexicalDeclContext(), /*Final=*/false, /*Innermost=*/nullptr,
+ ND, ND->getLexicalDeclContext(), /*Final=*/false,
+ /*Innermost=*/std::nullopt,
/*RelativeToPrimary=*/true,
/*Pattern=*/nullptr,
/*ForConstraintInstantiation=*/true, SkipForSpecialization);
@@ -780,7 +946,7 @@ static const Expr *SubstituteConstraintExpressionWithoutSatisfaction(
const Expr *ConstrExpr) {
MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
DeclInfo.getDecl(), DeclInfo.getLexicalDeclContext(), /*Final=*/false,
- /*Innermost=*/nullptr,
+ /*Innermost=*/std::nullopt,
/*RelativeToPrimary=*/true,
/*Pattern=*/nullptr, /*ForConstraintInstantiation=*/true,
/*SkipForSpecialization*/ false);
@@ -797,9 +963,52 @@ static const Expr *SubstituteConstraintExpressionWithoutSatisfaction(
if (Inst.isInvalid())
return nullptr;
+ // Set up a dummy 'instantiation' scope in the case of reference to function
+ // parameters that the surrounding function hasn't been instantiated yet. Note
+ // this may happen while we're comparing two templates' constraint
+ // equivalence.
+ LocalInstantiationScope ScopeForParameters(S, /*CombineWithOuterScope=*/true);
+ if (auto *FD = DeclInfo.getDecl()->getAsFunction())
+ for (auto *PVD : FD->parameters()) {
+ if (!PVD->isParameterPack()) {
+ ScopeForParameters.InstantiatedLocal(PVD, PVD);
+ continue;
+ }
+ // This is hacky: we're mapping the parameter pack to a size-of-1 argument
+ // to avoid building SubstTemplateTypeParmPackTypes for
+ // PackExpansionTypes. The SubstTemplateTypeParmPackType node would
+ // otherwise reference the AssociatedDecl of the template arguments, which
+ // is, in this case, the template declaration.
+ //
+ // However, as we are in the process of comparing potential
+ // re-declarations, the canonical declaration is the declaration itself at
+ // this point. So if we didn't expand these packs, we would end up with an
+ // incorrect profile difference because we will be profiling the
+ // canonical types!
+ //
+ // FIXME: Improve the "no-transform" machinery in FindInstantiatedDecl so
+ // that we can eliminate the Scope in the cases where the declarations are
+ // not necessarily instantiated. It would also benefit the noexcept
+ // specifier comparison.
+ ScopeForParameters.MakeInstantiatedLocalArgPack(PVD);
+ ScopeForParameters.InstantiatedLocalPackArg(PVD, PVD);
+ }
+
std::optional<Sema::CXXThisScopeRAII> ThisScope;
- if (auto *RD = dyn_cast<CXXRecordDecl>(DeclInfo.getDeclContext()))
+
+ // See TreeTransform::RebuildTemplateSpecializationType. A context scope is
+ // essential for having an injected class as the canonical type for a template
+ // specialization type at the rebuilding stage. This guarantees that, for
+ // out-of-line definitions, injected class name types and their equivalent
+ // template specializations can be profiled to the same value, which makes it
+ // possible that e.g. constraints involving C<Class<T>> and C<Class> are
+ // perceived identical.
+ std::optional<Sema::ContextRAII> ContextScope;
+ if (auto *RD = dyn_cast<CXXRecordDecl>(DeclInfo.getDeclContext())) {
ThisScope.emplace(S, const_cast<CXXRecordDecl *>(RD), Qualifiers());
+ ContextScope.emplace(S, const_cast<DeclContext *>(cast<DeclContext>(RD)),
+ /*NewThisContext=*/false);
+ }
ExprResult SubstConstr = S.SubstConstraintExprWithoutSatisfaction(
const_cast<clang::Expr *>(ConstrExpr), MLTAL);
if (SFINAE.hasErrorOccurred() || !SubstConstr.isUsable())
@@ -1026,13 +1235,14 @@ static void diagnoseUnsatisfiedRequirement(Sema &S,
concepts::NestedRequirement *Req,
bool First) {
using SubstitutionDiagnostic = std::pair<SourceLocation, StringRef>;
- for (auto &Pair : Req->getConstraintSatisfaction()) {
- if (auto *SubstDiag = Pair.second.dyn_cast<SubstitutionDiagnostic *>())
+ for (auto &Record : Req->getConstraintSatisfaction()) {
+ if (auto *SubstDiag = Record.dyn_cast<SubstitutionDiagnostic *>())
S.Diag(SubstDiag->first, diag::note_nested_requirement_substitution_error)
- << (int)First << Req->getInvalidConstraintEntity() << SubstDiag->second;
+ << (int)First << Req->getInvalidConstraintEntity()
+ << SubstDiag->second;
else
- diagnoseWellFormedUnsatisfiedConstraintExpr(
- S, Pair.second.dyn_cast<Expr *>(), First);
+ diagnoseWellFormedUnsatisfiedConstraintExpr(S, Record.dyn_cast<Expr *>(),
+ First);
First = false;
}
}
@@ -1132,6 +1342,13 @@ static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
break;
}
return;
+ } else if (auto *TTE = dyn_cast<TypeTraitExpr>(SubstExpr);
+ TTE && TTE->getTrait() == clang::TypeTrait::BTT_IsDeducible) {
+ assert(TTE->getNumArgs() == 2);
+ S.Diag(SubstExpr->getSourceRange().getBegin(),
+ diag::note_is_deducible_constraint_evaluated_to_false)
+ << TTE->getArg(0)->getType() << TTE->getArg(1)->getType();
+ return;
}
S.Diag(SubstExpr->getSourceRange().getBegin(),
@@ -1139,12 +1356,11 @@ static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
<< (int)First << SubstExpr;
}
-template<typename SubstitutionDiagnostic>
+template <typename SubstitutionDiagnostic>
static void diagnoseUnsatisfiedConstraintExpr(
- Sema &S, const Expr *E,
- const llvm::PointerUnion<Expr *, SubstitutionDiagnostic *> &Record,
+ Sema &S, const llvm::PointerUnion<Expr *, SubstitutionDiagnostic *> &Record,
bool First = true) {
- if (auto *Diag = Record.template dyn_cast<SubstitutionDiagnostic *>()){
+ if (auto *Diag = Record.template dyn_cast<SubstitutionDiagnostic *>()) {
S.Diag(Diag->first, diag::note_substituted_constraint_expr_is_ill_formed)
<< Diag->second;
return;
@@ -1159,8 +1375,8 @@ Sema::DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction& Satisfaction,
bool First) {
assert(!Satisfaction.IsSatisfied &&
"Attempted to diagnose a satisfied constraint");
- for (auto &Pair : Satisfaction.Details) {
- diagnoseUnsatisfiedConstraintExpr(*this, Pair.first, Pair.second, First);
+ for (auto &Record : Satisfaction.Details) {
+ diagnoseUnsatisfiedConstraintExpr(*this, Record, First);
First = false;
}
}
@@ -1170,8 +1386,8 @@ void Sema::DiagnoseUnsatisfiedConstraint(
bool First) {
assert(!Satisfaction.IsSatisfied &&
"Attempted to diagnose a satisfied constraint");
- for (auto &Pair : Satisfaction) {
- diagnoseUnsatisfiedConstraintExpr(*this, Pair.first, Pair.second, First);
+ for (auto &Record : Satisfaction) {
+ diagnoseUnsatisfiedConstraintExpr(*this, Record, First);
First = false;
}
}
@@ -1201,18 +1417,34 @@ Sema::getNormalizedAssociatedConstraints(
return CacheEntry->second;
}
+const NormalizedConstraint *clang::getNormalizedAssociatedConstraints(
+ Sema &S, NamedDecl *ConstrainedDecl,
+ ArrayRef<const Expr *> AssociatedConstraints) {
+ return S.getNormalizedAssociatedConstraints(ConstrainedDecl,
+ AssociatedConstraints);
+}
+
static bool
substituteParameterMappings(Sema &S, NormalizedConstraint &N,
ConceptDecl *Concept,
const MultiLevelTemplateArgumentList &MLTAL,
const ASTTemplateArgumentListInfo *ArgsAsWritten) {
- if (!N.isAtomic()) {
+
+ if (N.isCompound()) {
if (substituteParameterMappings(S, N.getLHS(), Concept, MLTAL,
ArgsAsWritten))
return true;
return substituteParameterMappings(S, N.getRHS(), Concept, MLTAL,
ArgsAsWritten);
}
+
+ if (N.isFoldExpanded()) {
+ Sema::ArgumentPackSubstitutionIndexRAII _(S, -1);
+ return substituteParameterMappings(
+ S, N.getFoldExpandedConstraint()->Constraint, Concept, MLTAL,
+ ArgsAsWritten);
+ }
+
TemplateParameterList *TemplateParams = Concept->getTemplateParameters();
AtomicConstraint &Atomic = *N.getAtomicConstraint();
@@ -1241,10 +1473,20 @@ substituteParameterMappings(Sema &S, NormalizedConstraint &N,
: SourceLocation()));
Atomic.ParameterMapping.emplace(TempArgs, OccurringIndices.count());
}
+ SourceLocation InstLocBegin =
+ ArgsAsWritten->arguments().empty()
+ ? ArgsAsWritten->getLAngleLoc()
+ : ArgsAsWritten->arguments().front().getSourceRange().getBegin();
+ SourceLocation InstLocEnd =
+ ArgsAsWritten->arguments().empty()
+ ? ArgsAsWritten->getRAngleLoc()
+ : ArgsAsWritten->arguments().front().getSourceRange().getEnd();
Sema::InstantiatingTemplate Inst(
- S, ArgsAsWritten->arguments().front().getSourceRange().getBegin(),
+ S, InstLocBegin,
Sema::InstantiatingTemplate::ParameterMappingSubstitution{}, Concept,
- ArgsAsWritten->arguments().front().getSourceRange());
+ {InstLocBegin, InstLocEnd});
+ if (Inst.isInvalid())
+ return true;
if (S.SubstTemplateArguments(*Atomic.ParameterMapping, MLTAL, SubstArgs))
return true;
@@ -1258,11 +1500,9 @@ substituteParameterMappings(Sema &S, NormalizedConstraint &N,
static bool substituteParameterMappings(Sema &S, NormalizedConstraint &N,
const ConceptSpecializationExpr *CSE) {
- TemplateArgumentList TAL{TemplateArgumentList::OnStack,
- CSE->getTemplateArguments()};
MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
CSE->getNamedConcept(), CSE->getNamedConcept()->getLexicalDeclContext(),
- /*Final=*/false, &TAL,
+ /*Final=*/false, CSE->getTemplateArguments(),
/*RelativeToPrimary=*/true,
/*Pattern=*/nullptr,
/*ForConstraintInstantiation=*/true);
@@ -1271,6 +1511,42 @@ static bool substituteParameterMappings(Sema &S, NormalizedConstraint &N,
CSE->getTemplateArgsAsWritten());
}
+NormalizedConstraint::NormalizedConstraint(ASTContext &C,
+ NormalizedConstraint LHS,
+ NormalizedConstraint RHS,
+ CompoundConstraintKind Kind)
+ : Constraint{CompoundConstraint{
+ new(C) NormalizedConstraintPair{std::move(LHS), std::move(RHS)},
+ Kind}} {}
+
+NormalizedConstraint::NormalizedConstraint(ASTContext &C,
+ const NormalizedConstraint &Other) {
+ if (Other.isAtomic()) {
+ Constraint = new (C) AtomicConstraint(*Other.getAtomicConstraint());
+ } else if (Other.isFoldExpanded()) {
+ Constraint = new (C) FoldExpandedConstraint(
+ Other.getFoldExpandedConstraint()->Kind,
+ NormalizedConstraint(C, Other.getFoldExpandedConstraint()->Constraint),
+ Other.getFoldExpandedConstraint()->Pattern);
+ } else {
+ Constraint = CompoundConstraint(
+ new (C)
+ NormalizedConstraintPair{NormalizedConstraint(C, Other.getLHS()),
+ NormalizedConstraint(C, Other.getRHS())},
+ Other.getCompoundKind());
+ }
+}
+
+NormalizedConstraint &NormalizedConstraint::getLHS() const {
+ assert(isCompound() && "getLHS called on a non-compound constraint.");
+ return Constraint.get<CompoundConstraint>().getPointer()->LHS;
+}
+
+NormalizedConstraint &NormalizedConstraint::getRHS() const {
+ assert(isCompound() && "getRHS called on a non-compound constraint.");
+ return Constraint.get<CompoundConstraint>().getPointer()->RHS;
+}
+
std::optional<NormalizedConstraint>
NormalizedConstraint::fromConstraintExprs(Sema &S, NamedDecl *D,
ArrayRef<const Expr *> E) {
@@ -1320,6 +1596,8 @@ NormalizedConstraint::fromConstraintExpr(Sema &S, NamedDecl *D, const Expr *E) {
S, CSE->getExprLoc(),
Sema::InstantiatingTemplate::ConstraintNormalization{}, D,
CSE->getSourceRange());
+ if (Inst.isInvalid())
+ return std::nullopt;
// C++ [temp.constr.normal]p1.1
// [...]
// The normal form of an id-expression of the form C<A1, A2, ..., AN>,
@@ -1343,17 +1621,75 @@ NormalizedConstraint::fromConstraintExpr(Sema &S, NamedDecl *D, const Expr *E) {
return std::nullopt;
return New;
+ } else if (auto *FE = dyn_cast<const CXXFoldExpr>(E);
+ FE && S.getLangOpts().CPlusPlus26 &&
+ (FE->getOperator() == BinaryOperatorKind::BO_LAnd ||
+ FE->getOperator() == BinaryOperatorKind::BO_LOr)) {
+
+ // Normalize fold expressions in C++26.
+
+ FoldExpandedConstraint::FoldOperatorKind Kind =
+ FE->getOperator() == BinaryOperatorKind::BO_LAnd
+ ? FoldExpandedConstraint::FoldOperatorKind::And
+ : FoldExpandedConstraint::FoldOperatorKind::Or;
+
+ if (FE->getInit()) {
+ auto LHS = fromConstraintExpr(S, D, FE->getLHS());
+ auto RHS = fromConstraintExpr(S, D, FE->getRHS());
+ if (!LHS || !RHS)
+ return std::nullopt;
+
+ if (FE->isRightFold())
+ RHS = NormalizedConstraint{new (S.Context) FoldExpandedConstraint{
+ Kind, std::move(*RHS), FE->getPattern()}};
+ else
+ LHS = NormalizedConstraint{new (S.Context) FoldExpandedConstraint{
+ Kind, std::move(*LHS), FE->getPattern()}};
+
+ return NormalizedConstraint(
+ S.Context, std::move(*LHS), std::move(*RHS),
+ FE->getOperator() == BinaryOperatorKind::BO_LAnd ? CCK_Conjunction
+ : CCK_Disjunction);
+ }
+ auto Sub = fromConstraintExpr(S, D, FE->getPattern());
+ if (!Sub)
+ return std::nullopt;
+ return NormalizedConstraint{new (S.Context) FoldExpandedConstraint{
+ Kind, std::move(*Sub), FE->getPattern()}};
}
+
return NormalizedConstraint{new (S.Context) AtomicConstraint(S, E)};
}
-using NormalForm =
- llvm::SmallVector<llvm::SmallVector<AtomicConstraint *, 2>, 4>;
+bool FoldExpandedConstraint::AreCompatibleForSubsumption(
+ const FoldExpandedConstraint &A, const FoldExpandedConstraint &B) {
+
+ // [C++26] [temp.constr.fold]
+ // Two fold expanded constraints are compatible for subsumption
+ // if their respective constraints both contain an equivalent unexpanded pack.
-static NormalForm makeCNF(const NormalizedConstraint &Normalized) {
+ llvm::SmallVector<UnexpandedParameterPack> APacks, BPacks;
+ Sema::collectUnexpandedParameterPacks(const_cast<Expr *>(A.Pattern), APacks);
+ Sema::collectUnexpandedParameterPacks(const_cast<Expr *>(B.Pattern), BPacks);
+
+ for (const UnexpandedParameterPack &APack : APacks) {
+ std::pair<unsigned, unsigned> DepthAndIndex = getDepthAndIndex(APack);
+ auto it = llvm::find_if(BPacks, [&](const UnexpandedParameterPack &BPack) {
+ return getDepthAndIndex(BPack) == DepthAndIndex;
+ });
+ if (it != BPacks.end())
+ return true;
+ }
+ return false;
+}
+
+NormalForm clang::makeCNF(const NormalizedConstraint &Normalized) {
if (Normalized.isAtomic())
return {{Normalized.getAtomicConstraint()}};
+ else if (Normalized.isFoldExpanded())
+ return {{Normalized.getFoldExpandedConstraint()}};
+
NormalForm LCNF = makeCNF(Normalized.getLHS());
NormalForm RCNF = makeCNF(Normalized.getRHS());
if (Normalized.getCompoundKind() == NormalizedConstraint::CCK_Conjunction) {
@@ -1379,10 +1715,13 @@ static NormalForm makeCNF(const NormalizedConstraint &Normalized) {
return Res;
}
-static NormalForm makeDNF(const NormalizedConstraint &Normalized) {
+NormalForm clang::makeDNF(const NormalizedConstraint &Normalized) {
if (Normalized.isAtomic())
return {{Normalized.getAtomicConstraint()}};
+ else if (Normalized.isFoldExpanded())
+ return {{Normalized.getFoldExpandedConstraint()}};
+
NormalForm LDNF = makeDNF(Normalized.getLHS());
NormalForm RDNF = makeDNF(Normalized.getRHS());
if (Normalized.getCompoundKind() == NormalizedConstraint::CCK_Disjunction) {
@@ -1409,60 +1748,6 @@ static NormalForm makeDNF(const NormalizedConstraint &Normalized) {
return Res;
}
-template<typename AtomicSubsumptionEvaluator>
-static bool subsumes(const NormalForm &PDNF, const NormalForm &QCNF,
- AtomicSubsumptionEvaluator E) {
- // C++ [temp.constr.order] p2
- // Then, P subsumes Q if and only if, for every disjunctive clause Pi in the
- // disjunctive normal form of P, Pi subsumes every conjunctive clause Qj in
- // the conjuctive normal form of Q, where [...]
- for (const auto &Pi : PDNF) {
- for (const auto &Qj : QCNF) {
- // C++ [temp.constr.order] p2
- // - [...] a disjunctive clause Pi subsumes a conjunctive clause Qj if
- // and only if there exists an atomic constraint Pia in Pi for which
- // there exists an atomic constraint, Qjb, in Qj such that Pia
- // subsumes Qjb.
- bool Found = false;
- for (const AtomicConstraint *Pia : Pi) {
- for (const AtomicConstraint *Qjb : Qj) {
- if (E(*Pia, *Qjb)) {
- Found = true;
- break;
- }
- }
- if (Found)
- break;
- }
- if (!Found)
- return false;
- }
- }
- return true;
-}
-
-template<typename AtomicSubsumptionEvaluator>
-static bool subsumes(Sema &S, NamedDecl *DP, ArrayRef<const Expr *> P,
- NamedDecl *DQ, ArrayRef<const Expr *> Q, bool &Subsumes,
- AtomicSubsumptionEvaluator E) {
- // C++ [temp.constr.order] p2
- // In order to determine if a constraint P subsumes a constraint Q, P is
- // transformed into disjunctive normal form, and Q is transformed into
- // conjunctive normal form. [...]
- auto *PNormalized = S.getNormalizedAssociatedConstraints(DP, P);
- if (!PNormalized)
- return true;
- const NormalForm PDNF = makeDNF(*PNormalized);
-
- auto *QNormalized = S.getNormalizedAssociatedConstraints(DQ, Q);
- if (!QNormalized)
- return true;
- const NormalForm QCNF = makeCNF(*QNormalized);
-
- Subsumes = subsumes(PDNF, QCNF, E);
- return false;
-}
-
bool Sema::IsAtLeastAsConstrained(NamedDecl *D1,
MutableArrayRef<const Expr *> AC1,
NamedDecl *D2,
@@ -1515,10 +1800,11 @@ bool Sema::IsAtLeastAsConstrained(NamedDecl *D1,
}
}
- if (subsumes(*this, D1, AC1, D2, AC2, Result,
- [this] (const AtomicConstraint &A, const AtomicConstraint &B) {
- return A.subsumes(Context, B);
- }))
+ if (clang::subsumes(
+ *this, D1, AC1, D2, AC2, Result,
+ [this](const AtomicConstraint &A, const AtomicConstraint &B) {
+ return A.subsumes(Context, B);
+ }))
return true;
SubsumptionCache.try_emplace(Key, Result);
return false;
@@ -1575,10 +1861,12 @@ bool Sema::MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
const NormalForm DNF2 = makeDNF(*Normalized2);
const NormalForm CNF2 = makeCNF(*Normalized2);
- bool Is1AtLeastAs2Normally = subsumes(DNF1, CNF2, NormalExprEvaluator);
- bool Is2AtLeastAs1Normally = subsumes(DNF2, CNF1, NormalExprEvaluator);
- bool Is1AtLeastAs2 = subsumes(DNF1, CNF2, IdenticalExprEvaluator);
- bool Is2AtLeastAs1 = subsumes(DNF2, CNF1, IdenticalExprEvaluator);
+ bool Is1AtLeastAs2Normally =
+ clang::subsumes(DNF1, CNF2, NormalExprEvaluator);
+ bool Is2AtLeastAs1Normally =
+ clang::subsumes(DNF2, CNF1, NormalExprEvaluator);
+ bool Is1AtLeastAs2 = clang::subsumes(DNF1, CNF2, IdenticalExprEvaluator);
+ bool Is2AtLeastAs1 = clang::subsumes(DNF2, CNF1, IdenticalExprEvaluator);
if (Is1AtLeastAs2 == Is1AtLeastAs2Normally &&
Is2AtLeastAs1 == Is2AtLeastAs1Normally)
// Same result - no ambiguity was caused by identical atomic expressions.
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
index 4e600fd29ee7..4e180d648cd8 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
@@ -347,99 +347,15 @@ static Expr *maybeTailCall(Sema &S, QualType RetType, Expr *E,
Expr *JustAddress = AddressExpr.get();
- // FIXME: Without optimizations, the temporary result from `await_suspend()`
- // may be put on the coroutine frame since the coroutine frame constructor
- // will think the temporary variable will escape from the
- // `coroutine_handle<>::address()` call. This is problematic since the
- // coroutine should be considered to be suspended after it enters
- // `await_suspend` so it shouldn't access/update the coroutine frame after
- // that.
- //
- // See https://github.com/llvm/llvm-project/issues/65054 for the report.
- //
- // The long term solution may wrap the whole logic about `await-suspend`
- // into a standalone function. This is similar to the proposed solution
- // in tryMarkAwaitSuspendNoInline. See the comments there for details.
- //
- // The short term solution here is to mark `coroutine_handle<>::address()`
- // function as always-inline so that the coroutine frame constructor won't
- // think the temporary result is escaped incorrectly.
- if (auto *FD = cast<CallExpr>(JustAddress)->getDirectCallee())
- if (!FD->hasAttr<AlwaysInlineAttr>() && !FD->hasAttr<NoInlineAttr>())
- FD->addAttr(AlwaysInlineAttr::CreateImplicit(S.getASTContext(),
- FD->getLocation()));
-
// Check that the type of AddressExpr is void*
if (!JustAddress->getType().getTypePtr()->isVoidPointerType())
S.Diag(cast<CallExpr>(JustAddress)->getCalleeDecl()->getLocation(),
diag::warn_coroutine_handle_address_invalid_return_type)
<< JustAddress->getType();
- // Clean up temporary objects so that they don't live across suspension points
- // unnecessarily. We choose to clean up before the call to
- // __builtin_coro_resume so that the cleanup code are not inserted in-between
- // the resume call and return instruction, which would interfere with the
- // musttail call contract.
- JustAddress = S.MaybeCreateExprWithCleanups(JustAddress);
- return S.BuildBuiltinCallExpr(Loc, Builtin::BI__builtin_coro_resume,
- JustAddress);
-}
-
-/// The await_suspend call performed by co_await is essentially asynchronous
-/// to the execution of the coroutine. Inlining it normally into an unsplit
-/// coroutine can cause miscompilation because the coroutine CFG misrepresents
-/// the true control flow of the program: things that happen in the
-/// await_suspend are not guaranteed to happen prior to the resumption of the
-/// coroutine, and things that happen after the resumption of the coroutine
-/// (including its exit and the potential deallocation of the coroutine frame)
-/// are not guaranteed to happen only after the end of await_suspend.
-///
-/// See https://github.com/llvm/llvm-project/issues/56301 and
-/// https://reviews.llvm.org/D157070 for the example and the full discussion.
-///
-/// The short-term solution to this problem is to mark the call as uninlinable.
-/// But we don't want to do this if the call is known to be trivial, which is
-/// very common.
-///
-/// The long-term solution may introduce patterns like:
-///
-/// call @llvm.coro.await_suspend(ptr %awaiter, ptr %handle,
-/// ptr @awaitSuspendFn)
-///
-/// Then it is much easier to perform the safety analysis in the middle end.
-/// If it is safe to inline the call to awaitSuspend, we can replace it in the
-/// CoroEarly pass. Otherwise we could replace it in the CoroSplit pass.
-static void tryMarkAwaitSuspendNoInline(Sema &S, OpaqueValueExpr *Awaiter,
- CallExpr *AwaitSuspend) {
- // The method here to extract the awaiter decl is not precise.
- // This is intentional. Since it is hard to perform the analysis in the
- // frontend due to the complexity of C++'s type systems.
- // And we prefer to perform such analysis in the middle end since it is
- // easier to implement and more powerful.
- CXXRecordDecl *AwaiterDecl =
- Awaiter->getType().getNonReferenceType()->getAsCXXRecordDecl();
-
- if (AwaiterDecl && AwaiterDecl->field_empty())
- return;
-
- FunctionDecl *FD = AwaitSuspend->getDirectCallee();
-
- assert(FD);
-
- // If the `await_suspend()` function is marked as `always_inline` explicitly,
- // we should give the user the right to control the codegen.
- if (FD->hasAttr<NoInlineAttr>() || FD->hasAttr<AlwaysInlineAttr>())
- return;
-
- // This is problematic if the user calls the await_suspend standalone. But on
- // the on hand, it is not incorrect semantically since inlining is not part
- // of the standard. On the other hand, it is relatively rare to call
- // the await_suspend function standalone.
- //
- // And given we've already had the long-term plan, the current workaround
- // looks relatively tolerant.
- FD->addAttr(
- NoInlineAttr::CreateImplicit(S.getASTContext(), FD->getLocation()));
+ // Clean up temporary objects, because the resulting expression
+ // will become the body of await_suspend wrapper.
+ return S.MaybeCreateExprWithCleanups(JustAddress);
}
/// Build calls to await_ready, await_suspend, and await_resume for a co_await
@@ -513,10 +429,6 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, VarDecl *CoroPromise,
// type Z.
QualType RetType = AwaitSuspend->getCallReturnType(S.Context);
- // We need to mark await_suspend as noinline temporarily. See the comment
- // of tryMarkAwaitSuspendNoInline for details.
- tryMarkAwaitSuspendNoInline(S, Operand, AwaitSuspend);
-
// Support for coroutine_handle returning await_suspend.
if (Expr *TailCallSuspend =
maybeTailCall(S, RetType, AwaitSuspend, Loc))
@@ -905,13 +817,11 @@ ExprResult Sema::BuildOperatorCoawaitLookupExpr(Scope *S, SourceLocation Loc) {
assert(!Operators.isAmbiguous() && "Operator lookup cannot be ambiguous");
const auto &Functions = Operators.asUnresolvedSet();
- bool IsOverloaded =
- Functions.size() > 1 ||
- (Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin()));
Expr *CoawaitOp = UnresolvedLookupExpr::Create(
Context, /*NamingClass*/ nullptr, NestedNameSpecifierLoc(),
- DeclarationNameInfo(OpName, Loc), /*RequiresADL*/ true, IsOverloaded,
- Functions.begin(), Functions.end());
+ DeclarationNameInfo(OpName, Loc), /*RequiresADL*/ true, Functions.begin(),
+ Functions.end(), /*KnownDependent=*/false,
+ /*KnownInstantiationDependent=*/false);
assert(CoawaitOp);
return CoawaitOp;
}
@@ -1748,7 +1658,7 @@ bool CoroutineStmtBuilder::makeOnFallthrough() {
return false;
} else if (HasRVoid) {
Fallthrough = S.BuildCoreturnStmt(FD.getLocation(), nullptr,
- /*IsImplicit*/false);
+ /*IsImplicit=*/true);
Fallthrough = S.ActOnFinishFullStmt(Fallthrough.get());
if (Fallthrough.isInvalid())
return false;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
index f5bb3e0b42e2..717ddb833958 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
@@ -45,8 +45,17 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaCUDA.h"
+#include "clang/Sema/SemaHLSL.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPPC.h"
+#include "clang/Sema/SemaRISCV.h"
+#include "clang/Sema/SemaSwift.h"
+#include "clang/Sema/SemaWasm.h"
#include "clang/Sema/Template.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/TargetParser/Triple.h"
@@ -127,55 +136,6 @@ class TypeNameValidatorCCC final : public CorrectionCandidateCallback {
} // end anonymous namespace
-/// Determine whether the token kind starts a simple-type-specifier.
-bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const {
- switch (Kind) {
- // FIXME: Take into account the current language when deciding whether a
- // token kind is a valid type specifier
- case tok::kw_short:
- case tok::kw_long:
- case tok::kw___int64:
- case tok::kw___int128:
- case tok::kw_signed:
- case tok::kw_unsigned:
- case tok::kw_void:
- case tok::kw_char:
- case tok::kw_int:
- case tok::kw_half:
- case tok::kw_float:
- case tok::kw_double:
- case tok::kw___bf16:
- case tok::kw__Float16:
- case tok::kw___float128:
- case tok::kw___ibm128:
- case tok::kw_wchar_t:
- case tok::kw_bool:
- case tok::kw__Accum:
- case tok::kw__Fract:
- case tok::kw__Sat:
-#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
-#include "clang/Basic/TransformTypeTraits.def"
- case tok::kw___auto_type:
- return true;
-
- case tok::annot_typename:
- case tok::kw_char16_t:
- case tok::kw_char32_t:
- case tok::kw_typeof:
- case tok::annot_decltype:
- case tok::kw_decltype:
- return getLangOpts().CPlusPlus;
-
- case tok::kw_char8_t:
- return getLangOpts().Char8;
-
- default:
- break;
- }
-
- return false;
-}
-
namespace {
enum class UnqualifiedTypeNameLookupResult {
NotFound,
@@ -324,14 +284,6 @@ static ParsedType buildNamedType(Sema &S, const CXXScopeSpec *SS, QualType T,
return S.CreateParsedType(ElTy, Builder.getTypeSourceInfo(S.Context, ElTy));
}
-/// If the identifier refers to a type name within this scope,
-/// return the declaration of that type.
-///
-/// This routine performs ordinary name lookup of the identifier II
-/// within the given scope, with optional C++ scope specifier SS, to
-/// determine whether the name refers to a type. If so, returns an
-/// opaque pointer (actually a QualType) corresponding to that
-/// type. Otherwise, returns NULL.
ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS, bool isClassName,
bool HasTrailingDot, ParsedType ObjectTypePtr,
@@ -581,8 +533,9 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
} else if (AllowDeducedTemplate) {
if (auto *TD = getAsTypeTemplateDecl(IIDecl)) {
assert(!FoundUsingShadow || FoundUsingShadow->getTargetDecl() == TD);
- TemplateName Template =
- FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD);
+ TemplateName Template = Context.getQualifiedTemplateName(
+ SS ? SS->getScopeRep() : nullptr, /*TemplateKeyword=*/false,
+ FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD));
T = Context.getDeducedTemplateSpecializationType(Template, QualType(),
false);
// Don't wrap in a further UsingType.
@@ -683,11 +636,6 @@ ParsedType Sema::ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
-/// isTagName() - This method is called *for error recovery purposes only*
-/// to determine if the specified name is a valid tag name ("struct foo"). If
-/// so, this returns the TST for the tag corresponding to it (TST_enum,
-/// TST_union, TST_struct, TST_interface, TST_class). This is used to diagnose
-/// cases in C where the user forgot to specify the tag.
DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
// Do a tag name lookup in this scope.
LookupResult R(*this, &II, SourceLocation(), LookupTagName);
@@ -712,20 +660,6 @@ DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
return DeclSpec::TST_unspecified;
}
-/// isMicrosoftMissingTypename - In Microsoft mode, within class scope,
-/// if a CXXScopeSpec's type is equal to the type of one of the base classes
-/// then downgrade the missing typename error to a warning.
-/// This is needed for MSVC compatibility; Example:
-/// @code
-/// template<class T> class A {
-/// public:
-/// typedef int TYPE;
-/// };
-/// template<class T> class B : public A<T> {
-/// public:
-/// A<T>::TYPE a; // no typename required because A<T> is a base class.
-/// };
-/// @endcode
bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) {
if (CurContext->isRecord()) {
if (SS->getScopeRep()->getKind() == NestedNameSpecifier::Super)
@@ -780,8 +714,8 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
<< II, CanRecover);
} else if (DeclContext *DC = computeDeclContext(*SS, false)) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
- bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
- II->getName().equals(CorrectedStr);
+ bool DroppedSpecifier =
+ Corrected.WillReplaceSpecifier() && II->getName() == CorrectedStr;
diagnoseTypo(Corrected,
PDiag(IsTemplateName
? diag::err_no_member_template_suggest
@@ -877,7 +811,7 @@ static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
IdentifierInfo *&Name,
SourceLocation NameLoc) {
LookupResult R(SemaRef, Name, NameLoc, Sema::LookupTagName);
- SemaRef.LookupParsedName(R, S, &SS);
+ SemaRef.LookupParsedName(R, S, &SS, /*ObjectType=*/QualType());
if (TagDecl *Tag = R.getAsSingle<TagDecl>()) {
StringRef FixItTagName;
switch (Tag->getTagKind()) {
@@ -914,7 +848,7 @@ static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
// Replace lookup results with just the tag decl.
Result.clear(Sema::LookupTagName);
- SemaRef.LookupParsedName(Result, S, &SS);
+ SemaRef.LookupParsedName(Result, S, &SS, /*ObjectType=*/QualType());
return true;
}
@@ -941,7 +875,8 @@ Sema::NameClassification Sema::ClassifyName(Scope *S, CXXScopeSpec &SS,
}
LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName);
- LookupParsedName(Result, S, &SS, !CurMethod);
+ LookupParsedName(Result, S, &SS, /*ObjectType=*/QualType(),
+ /*AllowBuiltinCreation=*/!CurMethod);
if (SS.isInvalid())
return NameClassification::Error();
@@ -959,7 +894,7 @@ Sema::NameClassification Sema::ClassifyName(Scope *S, CXXScopeSpec &SS,
// FIXME: This lookup really, really needs to be folded in to the normal
// unqualified lookup mechanism.
if (SS.isEmpty() && CurMethod && !isResultTypeOrTemplate(Result, NextToken)) {
- DeclResult Ivar = LookupIvarInObjCMethod(Result, S, Name);
+ DeclResult Ivar = ObjC().LookupIvarInObjCMethod(Result, S, Name);
if (Ivar.isInvalid())
return NameClassification::Error();
if (Ivar.isUsable())
@@ -1051,7 +986,7 @@ Corrected:
} else {// FIXME: is this even reachable? Test it.
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
- Name->getName().equals(CorrectedStr);
+ Name->getName() == CorrectedStr;
diagnoseTypo(Corrected, PDiag(QualifiedDiag)
<< Name << computeDeclContext(SS, false)
<< DroppedSpecifier << SS.getRange());
@@ -1077,7 +1012,7 @@ Corrected:
// FIXME: This is a gross hack.
if (ObjCIvarDecl *Ivar = Result.getAsSingle<ObjCIvarDecl>()) {
DeclResult R =
- LookupIvarInObjCMethod(Result, S, Ivar->getIdentifier());
+ ObjC().LookupIvarInObjCMethod(Result, S, Ivar->getIdentifier());
if (R.isInvalid())
return NameClassification::Error();
if (R.isUsable())
@@ -1179,12 +1114,10 @@ Corrected:
dyn_cast<UsingShadowDecl>(*Result.begin());
assert(!FoundUsingShadow ||
TD == cast<TemplateDecl>(FoundUsingShadow->getTargetDecl()));
- Template =
- FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD);
- if (SS.isNotEmpty())
- Template = Context.getQualifiedTemplateName(SS.getScopeRep(),
- /*TemplateKeyword=*/false,
- Template);
+ Template = Context.getQualifiedTemplateName(
+ SS.getScopeRep(),
+ /*TemplateKeyword=*/false,
+ FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD));
} else {
// All results were non-template functions. This is a function template
// name.
@@ -1241,9 +1174,13 @@ Corrected:
return ParsedType::make(T);
}
- if (isa<ConceptDecl>(FirstDecl))
+ if (isa<ConceptDecl>(FirstDecl)) {
+ // We want to preserve the UsingShadowDecl for concepts.
+ if (auto *USD = dyn_cast<UsingShadowDecl>(Result.getRepresentativeDecl()))
+ return NameClassification::Concept(TemplateName(USD));
return NameClassification::Concept(
TemplateName(cast<TemplateDecl>(FirstDecl)));
+ }
if (auto *EmptyD = dyn_cast<UnresolvedUsingIfExistsDecl>(FirstDecl)) {
(void)DiagnoseUseOfDecl(EmptyD, NameLoc);
@@ -1281,8 +1218,8 @@ Corrected:
Result.suppressDiagnostics();
return NameClassification::OverloadSet(UnresolvedLookupExpr::Create(
Context, Result.getNamingClass(), SS.getWithLocInContext(Context),
- Result.getLookupNameInfo(), ADL, Result.isOverloadedResult(),
- Result.begin(), Result.end()));
+ Result.getLookupNameInfo(), ADL, Result.begin(), Result.end(),
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false));
}
ExprResult
@@ -1311,7 +1248,7 @@ ExprResult Sema::ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
const Token &NextToken) {
if (getCurMethodDecl() && SS.isEmpty())
if (auto *Ivar = dyn_cast<ObjCIvarDecl>(Found->getUnderlyingDecl()))
- return BuildIvarRefExpr(S, NameLoc, Ivar);
+ return ObjC().BuildIvarRefExpr(S, NameLoc, Ivar);
// Reconstruct the lookup result.
LookupResult Result(*this, Found->getDeclName(), NameLoc, LookupOrdinaryName);
@@ -1398,9 +1335,6 @@ void Sema::ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context) {
CurContext = static_cast<decltype(CurContext)>(Context);
}
-/// EnterDeclaratorContext - Used when we must lookup names in the context
-/// of a declarator's nested name specifier.
-///
void Sema::EnterDeclaratorContext(Scope *S, DeclContext *DC) {
// C++0x [basic.lookup.unqual]p13:
// A name used in the definition of a static data member of class
@@ -1529,7 +1463,7 @@ void Sema::ActOnExitFunctionContext() {
///
/// This routine determines whether overloading is possible, not
/// whether a new declaration actually overloads a previous one.
-/// It will return true in C++ (where overloads are alway permitted)
+/// It will return true in C++ (where overloads are always permitted)
/// or, as a C extension, when either the new declaration or a
/// previous one is declared with the 'overloadable' attribute.
static bool AllowOverloadingOfFunction(const LookupResult &Previous,
@@ -1558,7 +1492,6 @@ static bool AllowOverloadingOfFunction(const LookupResult &Previous,
return false;
}
-/// Add this decl to the scope shadowed decl chains.
void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
// Move up the scope chain until we find the nearest enclosing
// non-transparent context. The declaration will be introduced into this
@@ -1582,6 +1515,10 @@ void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
cast<FunctionDecl>(D)->isFunctionTemplateSpecialization())
return;
+ if (isa<UsingEnumDecl>(D) && D->getDeclName().isEmpty()) {
+ S->AddDecl(D);
+ return;
+ }
// If this replaces anything in the current scope,
IdentifierResolver::iterator I = IdResolver.begin(D->getDeclName()),
IEnd = IdResolver.end();
@@ -1637,8 +1574,6 @@ static bool isOutOfScopePreviousDeclaration(NamedDecl *,
DeclContext*,
ASTContext&);
-/// Filters out lookup results that don't fall within the given scope
-/// as determined by isDeclInScope.
void Sema::FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage,
bool AllowInlineNamespace) {
@@ -1658,8 +1593,6 @@ void Sema::FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
F.done();
}
-/// We've determined that \p New is a redeclaration of \p Old. Check that they
-/// have compatible owning modules.
bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
// [module.interface]p7:
// A declaration is attached to a module as follows:
@@ -1695,8 +1628,7 @@ bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
// Partitions are part of the module, but a partition could import another
// module, so verify that the PMIs agree.
if ((NewM->isModulePartition() || OldM->isModulePartition()) &&
- NewM->getPrimaryModuleInterfaceName() ==
- OldM->getPrimaryModuleInterfaceName())
+ getASTContext().isInSameModule(NewM, OldM))
return false;
}
@@ -1720,9 +1652,6 @@ bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
return false;
}
-// [module.interface]p6:
-// A redeclaration of an entity X is implicitly exported if X was introduced by
-// an exported declaration; otherwise it shall not be exported.
bool Sema::CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old) {
// [module.interface]p1:
// An export-declaration shall inhabit a namespace scope.
@@ -1747,6 +1676,15 @@ bool Sema::CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old) {
if (IsOldExported)
return false;
+ // If the Old declaration are not attached to named modules
+ // and the New declaration are attached to global module.
+ // It should be fine to allow the export since it doesn't change
+ // the linkage of declarations. See
+ // https://github.com/llvm/llvm-project/issues/98583 for details.
+ if (!Old->isInNamedModule() && New->getOwningModule() &&
+ New->getOwningModule()->isImplicitGlobalModule())
+ return false;
+
assert(IsNewExported);
auto Lk = Old->getFormalLinkage();
@@ -1760,8 +1698,6 @@ bool Sema::CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old) {
return true;
}
-// A wrapper function for checking the semantic restrictions of
-// a redeclaration within a module.
bool Sema::CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old) {
if (CheckRedeclarationModuleOwnership(New, Old))
return true;
@@ -1772,22 +1708,6 @@ bool Sema::CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old) {
return false;
}
-// Check the redefinition in C++20 Modules.
-//
-// [basic.def.odr]p14:
-// For any definable item D with definitions in multiple translation units,
-// - if D is a non-inline non-templated function or variable, or
-// - if the definitions in different translation units do not satisfy the
-// following requirements,
-// the program is ill-formed; a diagnostic is required only if the definable
-// item is attached to a named module and a prior definition is reachable at
-// the point where a later definition occurs.
-// - Each such definition shall not be attached to a named module
-// ([module.unit]).
-// - Each such definition shall consist of the same sequence of tokens, ...
-// ...
-//
-// Return true if the redefinition is not allowed. Return false otherwise.
bool Sema::IsRedefinitionInModule(const NamedDecl *New,
const NamedDecl *Old) const {
assert(getASTContext().isSameEntity(New, Old) &&
@@ -1884,18 +1804,6 @@ static bool IsDisallowedCopyOrAssign(const CXXMethodDecl *D) {
return D->isCopyAssignmentOperator();
}
-// We need this to handle
-//
-// typedef struct {
-// void *foo() { return 0; }
-// } A;
-//
-// When we see foo we don't know if after the typedef we will get 'A' or '*A'
-// for example. If 'A', foo will have external linkage. If we have '*A',
-// foo will have no linkage. Since we can't know until we get to the end
-// of the typedef, this function finds out if D might have non-external linkage.
-// Callers should verify at the end of the TU if it D has external linkage or
-// not.
bool Sema::mightHaveNonExternalLinkage(const DeclaratorDecl *D) {
const DeclContext *DC = D->getDeclContext();
while (!DC->isTranslationUnit()) {
@@ -2011,7 +1919,7 @@ static bool ShouldDiagnoseUnusedDecl(const LangOptions &LangOpts,
// it is, by the bindings' expressions).
bool IsAllPlaceholders = true;
for (const auto *BD : DD->bindings()) {
- if (BD->isReferenced())
+ if (BD->isReferenced() || BD->hasAttr<UnusedAttr>())
return false;
IsAllPlaceholders = IsAllPlaceholders && BD->isPlaceholderVar(LangOpts);
}
@@ -2093,7 +2001,8 @@ static bool ShouldDiagnoseUnusedDecl(const LangOptions &LangOpts,
return false;
if (Init) {
- const auto *Construct = dyn_cast<CXXConstructExpr>(Init);
+ const auto *Construct =
+ dyn_cast<CXXConstructExpr>(Init->IgnoreImpCasts());
if (Construct && !Construct->isElidable()) {
const CXXConstructorDecl *CD = Construct->getConstructor();
if (!CD->isTrivial() && !RD->hasAttr<WarnUnusedAttr>() &&
@@ -2159,8 +2068,6 @@ void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
D, [this](SourceLocation Loc, PartialDiagnostic PD) { Diag(Loc, PD); });
}
-/// DiagnoseUnusedDecl - Emit warnings about declarations that are not used
-/// unless they are marked attr(unused).
void Sema::DiagnoseUnusedDecl(const NamedDecl *D, DiagReceiverTy DiagReceiver) {
if (!ShouldDiagnoseUnusedDecl(getLangOpts(), D))
return;
@@ -2232,8 +2139,21 @@ void Sema::DiagnoseUnusedButSetDecl(const VarDecl *VD,
assert(iter->getSecond() >= 0 &&
"Found a negative number of references to a VarDecl");
- if (iter->getSecond() != 0)
- return;
+ if (int RefCnt = iter->getSecond(); RefCnt > 0) {
+ // Assume the given VarDecl is "used" if its ref count stored in
+ // `RefMinusAssignments` is positive, with one exception.
+ //
+ // For a C++ variable whose decl (with initializer) entirely consist the
+ // condition expression of a if/while/for construct,
+ // Clang creates a DeclRefExpr for the condition expression rather than a
+ // BinaryOperator of AssignmentOp. Thus, the C++ variable's ref
+ // count stored in `RefMinusAssignment` equals 1 when the variable is never
+ // used in the body of the if/while/for construct.
+ bool UnusedCXXCondDecl = VD->isCXXCondDecl() && (RefCnt == 1);
+ if (!UnusedCXXCondDecl)
+ return;
+ }
+
unsigned DiagID = isa<ParmVarDecl>(VD) ? diag::warn_unused_but_set_parameter
: diag::warn_unused_but_set_variable;
DiagReceiver(VD->getLocation(), PDiag(DiagID) << VD);
@@ -2303,9 +2223,14 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
if (LabelDecl *LD = dyn_cast<LabelDecl>(D))
CheckPoppedLabel(LD, *this, addDiag);
- // Remove this name from our lexical scope, and warn on it if we haven't
- // already.
- IdResolver.RemoveDecl(D);
+ // Partial translation units that are created in incremental processing must
+ // not clean up the IdResolver because PTUs should take into account the
+ // declarations that came from previous PTUs.
+ if (!PP.isIncrementalProcessingEnabled() || getLangOpts().ObjC ||
+ getLangOpts().CPlusPlus)
+ IdResolver.RemoveDecl(D);
+
+ // Warn on it if we are shadowing a declaration.
auto ShadowI = ShadowingDecls.find(D);
if (ShadowI != ShadowingDecls.end()) {
if (const auto *FD = dyn_cast<FieldDecl>(ShadowI->second)) {
@@ -2315,12 +2240,6 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
}
ShadowingDecls.erase(ShadowI);
}
-
- if (!getLangOpts().CPlusPlus && S->isClassScope()) {
- if (auto *FD = dyn_cast<FieldDecl>(TmpD);
- FD && FD->hasAttr<CountedByAttr>())
- CheckCountedByAttr(S, FD);
- }
}
llvm::sort(DeclDiags,
@@ -2338,68 +2257,6 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
}
}
-/// Look for an Objective-C class in the translation unit.
-///
-/// \param Id The name of the Objective-C class we're looking for. If
-/// typo-correction fixes this name, the Id will be updated
-/// to the fixed name.
-///
-/// \param IdLoc The location of the name in the translation unit.
-///
-/// \param DoTypoCorrection If true, this routine will attempt typo correction
-/// if there is no class with the given name.
-///
-/// \returns The declaration of the named Objective-C class, or NULL if the
-/// class could not be found.
-ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id,
- SourceLocation IdLoc,
- bool DoTypoCorrection) {
- // The third "scope" argument is 0 since we aren't enabling lazy built-in
- // creation from this context.
- NamedDecl *IDecl = LookupSingleName(TUScope, Id, IdLoc, LookupOrdinaryName);
-
- if (!IDecl && DoTypoCorrection) {
- // Perform typo correction at the given location, but only if we
- // find an Objective-C class name.
- DeclFilterCCC<ObjCInterfaceDecl> CCC{};
- if (TypoCorrection C =
- CorrectTypo(DeclarationNameInfo(Id, IdLoc), LookupOrdinaryName,
- TUScope, nullptr, CCC, CTK_ErrorRecovery)) {
- diagnoseTypo(C, PDiag(diag::err_undef_interface_suggest) << Id);
- IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>();
- Id = IDecl->getIdentifier();
- }
- }
- ObjCInterfaceDecl *Def = dyn_cast_or_null<ObjCInterfaceDecl>(IDecl);
- // This routine must always return a class definition, if any.
- if (Def && Def->getDefinition())
- Def = Def->getDefinition();
- return Def;
-}
-
-/// getNonFieldDeclScope - Retrieves the innermost scope, starting
-/// from S, where a non-field would be declared. This routine copes
-/// with the difference between C and C++ scoping rules in structs and
-/// unions. For example, the following code is well-formed in C but
-/// ill-formed in C++:
-/// @code
-/// struct S6 {
-/// enum { BAR } e;
-/// };
-///
-/// void test_S6() {
-/// struct S6 a;
-/// a.e = BAR;
-/// }
-/// @endcode
-/// For the declaration of BAR, this routine will return a different
-/// scope. The scope S will be the scope of the unnamed enumeration
-/// within S6. In C++, this routine will return the scope associated
-/// with S6, because the enumeration's scope is a transparent
-/// context but structures can contain non-field names. In C, this
-/// routine will return the translation unit scope, since the
-/// enumeration's scope is a transparent context and structures cannot
-/// contain non-field names.
Scope *Sema::getNonFieldDeclScope(Scope *S) {
while (((S->getFlags() & Scope::DeclScope) == 0) ||
(S->getEntity() && S->getEntity()->isTransparentContext()) ||
@@ -2437,10 +2294,17 @@ FunctionDecl *Sema::CreateBuiltin(IdentifierInfo *II, QualType Type,
Parent = CLinkageDecl;
}
- FunctionDecl *New = FunctionDecl::Create(Context, Parent, Loc, Loc, II, Type,
- /*TInfo=*/nullptr, SC_Extern,
- getCurFPFeatures().isFPConstrained(),
- false, Type->isFunctionProtoType());
+ ConstexprSpecKind ConstexprKind = ConstexprSpecKind::Unspecified;
+ if (Context.BuiltinInfo.isImmediate(ID)) {
+ assert(getLangOpts().CPlusPlus20 &&
+ "consteval builtins should only be available in C++20 mode");
+ ConstexprKind = ConstexprSpecKind::Consteval;
+ }
+
+ FunctionDecl *New = FunctionDecl::Create(
+ Context, Parent, Loc, Loc, II, Type, /*TInfo=*/nullptr, SC_Extern,
+ getCurFPFeatures().isFPConstrained(), /*isInlineSpecified=*/false,
+ Type->isFunctionProtoType(), ConstexprKind);
New->setImplicit();
New->addAttr(BuiltinAttr::CreateImplicit(Context, ID));
@@ -2462,10 +2326,6 @@ FunctionDecl *Sema::CreateBuiltin(IdentifierInfo *II, QualType Type,
return New;
}
-/// LazilyCreateBuiltin - The specified Builtin-ID was first used at
-/// file scope. lazily create a decl for it. ForRedeclaration is true
-/// if we're creating this built-in in anticipation of redeclaring the
-/// built-in.
NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc) {
@@ -2531,9 +2391,9 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
/// entity if their types are the same.
/// FIXME: This is notionally doing the same thing as ASTReaderDecl's
/// isSameEntity.
-static void filterNonConflictingPreviousTypedefDecls(Sema &S,
- TypedefNameDecl *Decl,
- LookupResult &Previous) {
+static void
+filterNonConflictingPreviousTypedefDecls(Sema &S, const TypedefNameDecl *Decl,
+ LookupResult &Previous) {
// This is only interesting when modules are enabled.
if (!S.getLangOpts().Modules && !S.getLangOpts().ModulesLocalVisibility)
return;
@@ -2570,9 +2430,9 @@ static void filterNonConflictingPreviousTypedefDecls(Sema &S,
Filter.done();
}
-bool Sema::isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New) {
+bool Sema::isIncompatibleTypedef(const TypeDecl *Old, TypedefNameDecl *New) {
QualType OldType;
- if (TypedefNameDecl *OldTypedef = dyn_cast<TypedefNameDecl>(Old))
+ if (const TypedefNameDecl *OldTypedef = dyn_cast<TypedefNameDecl>(Old))
OldType = OldTypedef->getUnderlyingType();
else
OldType = Context.getTypeDeclType(Old);
@@ -2604,11 +2464,6 @@ bool Sema::isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New) {
return false;
}
-/// MergeTypedefNameDecl - We just parsed a typedef 'New' which has the
-/// same name and scope as a previous declaration 'Old'. Figure out
-/// how to resolve this situation, merging decls or emitting
-/// diagnostics as appropriate. If there was an error, set New to be invalid.
-///
void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls) {
// If the new decl is known invalid already, don't bother doing any
@@ -2945,7 +2800,7 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
D, *AA, AA->getPlatform(), AA->isImplicit(), AA->getIntroduced(),
AA->getDeprecated(), AA->getObsoleted(), AA->getUnavailable(),
AA->getMessage(), AA->getStrict(), AA->getReplacement(), AMK,
- AA->getPriority());
+ AA->getPriority(), AA->getEnvironment());
else if (const auto *VA = dyn_cast<VisibilityAttr>(Attr))
NewAttr = S.mergeVisibilityAttr(D, *VA, VA->getVisibility());
else if (const auto *VA = dyn_cast<TypeVisibilityAttr>(Attr))
@@ -2978,7 +2833,7 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
} else if (const auto *MA = dyn_cast<MinSizeAttr>(Attr))
NewAttr = S.mergeMinSizeAttr(D, *MA);
else if (const auto *SNA = dyn_cast<SwiftNameAttr>(Attr))
- NewAttr = S.mergeSwiftNameAttr(D, *SNA, SNA->getName());
+ NewAttr = S.Swift().mergeNameAttr(D, *SNA, SNA->getName());
else if (const auto *OA = dyn_cast<OptimizeNoneAttr>(Attr))
NewAttr = S.mergeOptimizeNoneAttr(D, *OA);
else if (const auto *InternalLinkageA = dyn_cast<InternalLinkageAttr>(Attr))
@@ -2995,9 +2850,9 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
else if (const auto *UA = dyn_cast<UuidAttr>(Attr))
NewAttr = S.mergeUuidAttr(D, *UA, UA->getGuid(), UA->getGuidDecl());
else if (const auto *IMA = dyn_cast<WebAssemblyImportModuleAttr>(Attr))
- NewAttr = S.mergeImportModuleAttr(D, *IMA);
+ NewAttr = S.Wasm().mergeImportModuleAttr(D, *IMA);
else if (const auto *INA = dyn_cast<WebAssemblyImportNameAttr>(Attr))
- NewAttr = S.mergeImportNameAttr(D, *INA);
+ NewAttr = S.Wasm().mergeImportNameAttr(D, *INA);
else if (const auto *TCBA = dyn_cast<EnforceTCBAttr>(Attr))
NewAttr = S.mergeEnforceTCBAttr(D, *TCBA);
else if (const auto *TCBLA = dyn_cast<EnforceTCBLeafAttr>(Attr))
@@ -3005,10 +2860,13 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
else if (const auto *BTFA = dyn_cast<BTFDeclTagAttr>(Attr))
NewAttr = S.mergeBTFDeclTagAttr(D, *BTFA);
else if (const auto *NT = dyn_cast<HLSLNumThreadsAttr>(Attr))
- NewAttr =
- S.mergeHLSLNumThreadsAttr(D, *NT, NT->getX(), NT->getY(), NT->getZ());
+ NewAttr = S.HLSL().mergeNumThreadsAttr(D, *NT, NT->getX(), NT->getY(),
+ NT->getZ());
else if (const auto *SA = dyn_cast<HLSLShaderAttr>(Attr))
- NewAttr = S.mergeHLSLShaderAttr(D, *SA, SA->getType());
+ NewAttr = S.HLSL().mergeShaderAttr(D, *SA, SA->getType());
+ else if (isa<SuppressAttr>(Attr))
+ // Do nothing. Each redeclaration should be suppressed separately.
+ NewAttr = nullptr;
else if (Attr->shouldInheritEvenIfAlreadyPresent() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
@@ -3063,7 +2921,7 @@ static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
if (isa<AliasAttr>(NewAttribute) || isa<IFuncAttr>(NewAttribute)) {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(New)) {
- Sema::SkipBodyInfo SkipBody;
+ SkipBodyInfo SkipBody;
S.CheckForFunctionRedefinition(FD, cast<FunctionDecl>(Def), &SkipBody);
// If we're skipping this definition, drop the "alias" attribute.
@@ -3216,7 +3074,6 @@ static void diagnoseMissingConstinit(Sema &S, const VarDecl *InitDecl,
}
}
-/// mergeDeclAttributes - Copy attributes from the Old decl to the New one.
void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK) {
if (UsedAttr *OldAttr = Old->getMostRecentDecl()->getAttr<UsedAttr>()) {
@@ -3675,17 +3532,6 @@ static void adjustDeclContextForDeclaratorDecl(DeclaratorDecl *NewD,
FixSemaDC(VD->getDescribedVarTemplate());
}
-/// MergeFunctionDecl - We just parsed a function 'New' from
-/// declarator D which has the same name and scope as a previous
-/// declaration 'Old'. Figure out how to resolve this situation,
-/// merging decls or emitting diagnostics as appropriate.
-///
-/// In C++, New and Old must be declarations that are not
-/// overloaded. Use IsOverload to determine whether New and Old are
-/// overloaded, and to select the Old declaration that New should be
-/// merged with.
-///
-/// Returns true if there was an error, false otherwise.
bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S,
bool MergeTypeWithOld, bool NewDeclIsDefn) {
// Verify the old decl was also a function.
@@ -3967,6 +3813,51 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S,
return true;
}
+ QualType OldQTypeForComparison = OldQType;
+ if (Context.hasAnyFunctionEffects()) {
+ const auto OldFX = Old->getFunctionEffects();
+ const auto NewFX = New->getFunctionEffects();
+ if (OldFX != NewFX) {
+ const auto Diffs = FunctionEffectDifferences(OldFX, NewFX);
+ for (const auto &Diff : Diffs) {
+ if (Diff.shouldDiagnoseRedeclaration(*Old, OldFX, *New, NewFX)) {
+ Diag(New->getLocation(),
+ diag::warn_mismatched_func_effect_redeclaration)
+ << Diff.effectName();
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ }
+ }
+ // Following a warning, we could skip merging effects from the previous
+ // declaration, but that would trigger an additional "conflicting types"
+ // error.
+ if (const auto *NewFPT = NewQType->getAs<FunctionProtoType>()) {
+ FunctionEffectSet::Conflicts MergeErrs;
+ FunctionEffectSet MergedFX =
+ FunctionEffectSet::getUnion(OldFX, NewFX, MergeErrs);
+ if (!MergeErrs.empty())
+ diagnoseFunctionEffectMergeConflicts(MergeErrs, New->getLocation(),
+ Old->getLocation());
+
+ FunctionProtoType::ExtProtoInfo EPI = NewFPT->getExtProtoInfo();
+ EPI.FunctionEffects = FunctionEffectsRef(MergedFX);
+ QualType ModQT = Context.getFunctionType(NewFPT->getReturnType(),
+ NewFPT->getParamTypes(), EPI);
+
+ New->setType(ModQT);
+ NewQType = New->getType();
+
+ // Revise OldQTForComparison to include the merged effects,
+ // so as not to fail due to differences later.
+ if (const auto *OldFPT = OldQType->getAs<FunctionProtoType>()) {
+ EPI = OldFPT->getExtProtoInfo();
+ EPI.FunctionEffects = FunctionEffectsRef(MergedFX);
+ OldQTypeForComparison = Context.getFunctionType(
+ OldFPT->getReturnType(), OldFPT->getParamTypes(), EPI);
+ }
+ }
+ }
+ }
+
if (getLangOpts().CPlusPlus) {
OldQType = Context.getCanonicalType(Old->getType());
NewQType = Context.getCanonicalType(New->getType());
@@ -4078,13 +3969,13 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S,
} else {
Diag(NewMethod->getLocation(),
diag::err_definition_of_implicitly_declared_member)
- << New << getSpecialMember(OldMethod);
+ << New << llvm::to_underlying(getSpecialMember(OldMethod));
return true;
}
} else if (OldMethod->getFirstDecl()->isExplicitlyDefaulted() && !isFriend) {
Diag(NewMethod->getLocation(),
diag::err_definition_of_explicitly_defaulted_member)
- << getSpecialMember(OldMethod);
+ << llvm::to_underlying(getSpecialMember(OldMethod));
return true;
}
}
@@ -4131,9 +4022,8 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S,
// We also want to respect all the extended bits except noreturn.
// noreturn should now match unless the old type info didn't have it.
- QualType OldQTypeForComparison = OldQType;
if (!OldTypeInfo.getNoReturn() && NewTypeInfo.getNoReturn()) {
- auto *OldType = OldQType->castAs<FunctionProtoType>();
+ auto *OldType = OldQTypeForComparison->castAs<FunctionProtoType>();
const FunctionType *OldTypeForComparison
= Context.adjustFunctionType(OldType, OldTypeInfo.withNoReturn(true));
OldQTypeForComparison = QualType(OldTypeForComparison, 0);
@@ -4202,7 +4092,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S,
// If we are merging two functions where only one of them has a prototype,
// we may have enough information to decide to issue a diagnostic that the
- // function without a protoype will change behavior in C23. This handles
+ // function without a prototype will change behavior in C23. This handles
// cases like:
// void i(); void i(int j);
// void i(int j); void i();
@@ -4375,15 +4265,6 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S,
return true;
}
-/// Completes the merge of two function declarations that are
-/// known to be compatible.
-///
-/// This routine handles the merging of attributes and other
-/// properties of function declarations from the old declaration to
-/// the new declaration, once we know that New is in fact a
-/// redeclaration of Old.
-///
-/// \returns false
bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld) {
// Merge the attributes
@@ -4440,7 +4321,7 @@ void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
ni != ne && oi != oe; ++ni, ++oi)
mergeParamDeclAttributes(*ni, *oi, *this);
- CheckObjCMethodOverride(newMethod, oldMethod);
+ ObjC().CheckObjCMethodOverride(newMethod, oldMethod);
}
static void diagnoseVarDeclTypeMismatch(Sema &S, VarDecl *New, VarDecl* Old) {
@@ -4459,13 +4340,6 @@ static void diagnoseVarDeclTypeMismatch(Sema &S, VarDecl *New, VarDecl* Old) {
New->setInvalidDecl();
}
-/// MergeVarDeclTypes - We parsed a variable 'New' which has the same name and
-/// scope as a previous declaration 'Old'. Figure out how to merge their types,
-/// emitting diagnostics as appropriate.
-///
-/// Declarations using the auto type specifier (C++ [decl.spec.auto]) call back
-/// to here in AddInitializerToDecl. We can't check them before the initializer
-/// is attached.
void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old,
bool MergeTypeWithOld) {
if (New->isInvalidDecl() || Old->isInvalidDecl() || New->getType()->containsErrors() || Old->getType()->containsErrors())
@@ -4582,14 +4456,6 @@ static bool mergeTypeWithPrevious(Sema &S, VarDecl *NewVD, VarDecl *OldVD,
}
}
-/// MergeVarDecl - We just parsed a variable 'New' which has the same name
-/// and scope as a previous declaration 'Old'. Figure out how to resolve this
-/// situation, merging decls or emitting diagnostics as appropriate.
-///
-/// Tentative definition rules (C99 6.9.2p2) are checked by
-/// FinalizeDeclaratorGroup. Unfortunately, we can't analyze tentative
-/// definitions here, since the initializer hasn't been attached.
-///
void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
// If the new decl is already invalid, don't do any other checking.
if (New->isInvalidDecl())
@@ -4652,16 +4518,18 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
}
mergeDeclAttributes(New, Old);
- // Warn if an already-declared variable is made a weak_import in a subsequent
+ // Warn if an already-defined variable is made a weak_import in a subsequent
// declaration
- if (New->hasAttr<WeakImportAttr>() &&
- Old->getStorageClass() == SC_None &&
- !Old->hasAttr<WeakImportAttr>()) {
- Diag(New->getLocation(), diag::warn_weak_import) << New->getDeclName();
- Diag(Old->getLocation(), diag::note_previous_declaration);
- // Remove weak_import attribute on new declaration.
- New->dropAttr<WeakImportAttr>();
- }
+ if (New->hasAttr<WeakImportAttr>())
+ for (auto *D = Old; D; D = D->getPreviousDecl()) {
+ if (D->isThisDeclarationADefinition() != VarDecl::DeclarationOnly) {
+ Diag(New->getLocation(), diag::warn_weak_import) << New->getDeclName();
+ Diag(D->getLocation(), diag::note_previous_definition);
+ // Remove weak_import attribute on new declaration.
+ New->dropAttr<WeakImportAttr>();
+ break;
+ }
+ }
if (const auto *ILA = New->getAttr<InternalLinkageAttr>())
if (!Old->hasAttr<InternalLinkageAttr>()) {
@@ -4882,8 +4750,6 @@ void Sema::notePreviousDefinition(const NamedDecl *Old, SourceLocation New) {
Diag(Old->getLocation(), diag::note_previous_definition);
}
-/// We've just determined that \p Old and \p New both appear to be definitions
-/// of the same variable. Either diagnose or fix the problem.
bool Sema::checkVarDeclRedefinition(VarDecl *Old, VarDecl *New) {
if (!hasVisibleDefinition(Old) &&
(New->getFormalLinkage() == Linkage::Internal || New->isInline() ||
@@ -4907,8 +4773,6 @@ bool Sema::checkVarDeclRedefinition(VarDecl *Old, VarDecl *New) {
}
}
-/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
-/// no declarator (e.g. "struct foo;") is parsed.
Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
const ParsedAttributesView &DeclAttrs,
@@ -5048,7 +4912,7 @@ void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
if (TagFromDeclSpec->hasNameForLinkage())
return;
- // A well-formed anonymous tag must always be a TUK_Definition.
+ // A well-formed anonymous tag must always be a TagUseKind::Definition.
assert(TagFromDeclSpec->isThisDeclarationADefinition());
// The type must match the tag exactly; no qualifiers allowed.
@@ -5130,9 +4994,7 @@ static unsigned GetDiagnosticTypeSpecifierID(const DeclSpec &DS) {
llvm_unreachable("unexpected type specifier");
}
}
-/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
-/// no declarator (e.g. "struct foo;") is parsed. It also accepts template
-/// parameters to cope with template friend declarations.
+
Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
const ParsedAttributesView &DeclAttrs,
@@ -5189,6 +5051,8 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag)
<< GetDiagnosticTypeSpecifierID(DS)
<< static_cast<int>(DS.getConstexprSpecifier());
+ else if (getLangOpts().C23)
+ Diag(DS.getConstexprSpecLoc(), diag::err_c23_constexpr_not_variable);
else
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_wrong_decl_kind)
<< static_cast<int>(DS.getConstexprSpecifier());
@@ -5206,25 +5070,6 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
return ActOnFriendTypeDecl(S, DS, TemplateParams);
}
- const CXXScopeSpec &SS = DS.getTypeSpecScope();
- bool IsExplicitSpecialization =
- !TemplateParams.empty() && TemplateParams.back()->size() == 0;
- if (Tag && SS.isNotEmpty() && !Tag->isCompleteDefinition() &&
- !IsExplicitInstantiation && !IsExplicitSpecialization &&
- !isa<ClassTemplatePartialSpecializationDecl>(Tag)) {
- // Per C++ [dcl.type.elab]p1, a class declaration cannot have a
- // nested-name-specifier unless it is an explicit instantiation
- // or an explicit specialization.
- //
- // FIXME: We allow class template partial specializations here too, per the
- // obvious intent of DR1819.
- //
- // Per C++ [dcl.enum]p1, an opaque-enum-declaration can't either.
- Diag(SS.getBeginLoc(), diag::err_standalone_class_nested_name_specifier)
- << GetDiagnosticTypeSpecifierID(DS) << SS.getRange();
- return nullptr;
- }
-
// Track whether this decl-specifier declares anything.
bool DeclaresAnything = true;
@@ -5417,7 +5262,7 @@ static bool CheckAnonMemberRedeclaration(Sema &SemaRef, Scope *S,
LookupResult R(SemaRef, Name, NameLoc,
Owner->isRecord() ? Sema::LookupMemberName
: Sema::LookupOrdinaryName,
- Sema::ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
if (!SemaRef.LookupName(R, S)) return false;
// Pick a representative declaration.
@@ -5447,10 +5292,6 @@ void Sema::ActOnDefinedDeclarationSpecifier(Decl *D) {
DiagPlaceholderFieldDeclDefinitions(RD);
}
-/// Emit diagnostic warnings for placeholder members.
-/// We can only do that after the class is fully constructed,
-/// as anonymous union/structs can insert placeholders
-/// in their parent scope (which might be a Record).
void Sema::DiagPlaceholderFieldDeclDefinitions(RecordDecl *Record) {
if (!getLangOpts().CPlusPlus)
return;
@@ -5604,10 +5445,6 @@ static void checkDuplicateDefaultInit(Sema &S, CXXRecordDecl *Parent,
checkDuplicateDefaultInit(S, Parent, findDefaultInitializer(AnonUnion));
}
-/// BuildAnonymousStructOrUnion - Handle the declaration of an
-/// anonymous structure or union. Anonymous unions are a C++ feature
-/// (C++ [class.union]) and a C11 feature; anonymous structures
-/// are a C11 feature and GNU C++ extension.
Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
@@ -5832,6 +5669,9 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
Anon = VarDecl::Create(Context, Owner, DS.getBeginLoc(),
Record->getLocation(), /*IdentifierInfo=*/nullptr,
Context.getTypeDeclType(Record), TInfo, SC);
+ if (Invalid)
+ Anon->setInvalidDecl();
+
ProcessDeclAttributes(S, Anon, Dc);
// Default-initialize the implicit variable. This initialization will be
@@ -5881,19 +5721,6 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
return Anon;
}
-/// BuildMicrosoftCAnonymousStruct - Handle the declaration of an
-/// Microsoft C anonymous structure.
-/// Ref: http://msdn.microsoft.com/en-us/library/z2cx9y4f.aspx
-/// Example:
-///
-/// struct A { int a; };
-/// struct B { struct A; int b; };
-///
-/// void foo() {
-/// B var;
-/// var.a = 3;
-/// }
-///
Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record) {
assert(Record && "expected a record!");
@@ -5936,13 +5763,10 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
return Anon;
}
-/// GetNameForDeclarator - Determine the full declaration name for the
-/// given Declarator.
DeclarationNameInfo Sema::GetNameForDeclarator(Declarator &D) {
return GetNameFromUnqualifiedId(D.getName());
}
-/// Retrieves the declaration name from a parsed unqualified-id.
DeclarationNameInfo
Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
DeclarationNameInfo NameInfo;
@@ -6212,11 +6036,12 @@ Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
// Check if we are in an `omp begin/end declare variant` scope. Handle this
// declaration only if the `bind_to_declaration` extension is set.
SmallVector<FunctionDecl *, 4> Bases;
- if (LangOpts.OpenMP && isInOpenMPDeclareVariantScope())
- if (getOMPTraitInfoForSurroundingScope()->isExtensionActive(llvm::omp::TraitProperty::
- implementation_extension_bind_to_declaration))
- ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
- S, D, MultiTemplateParamsArg(), Bases);
+ if (LangOpts.OpenMP && OpenMP().isInOpenMPDeclareVariantScope())
+ if (OpenMP().getOMPTraitInfoForSurroundingScope()->isExtensionActive(
+ llvm::omp::TraitProperty::
+ implementation_extension_bind_to_declaration))
+ OpenMP().ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
+ S, D, MultiTemplateParamsArg(), Bases);
Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg());
@@ -6225,18 +6050,12 @@ Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
Dcl->setTopLevelDeclInObjCContainer();
if (!Bases.empty())
- ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl, Bases);
+ OpenMP().ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl,
+ Bases);
return Dcl;
}
-/// DiagnoseClassNameShadow - Implement C++ [class.mem]p13:
-/// If T is the name of a class, then each of the following shall have a
-/// name different from T:
-/// - every static data member of class T;
-/// - every member function of class T
-/// - every member of class T that is itself a type;
-/// \returns true if the declaration name violates these rules.
bool Sema::DiagnoseClassNameShadow(DeclContext *DC,
DeclarationNameInfo NameInfo) {
DeclarationName Name = NameInfo.getName();
@@ -6252,25 +6071,13 @@ bool Sema::DiagnoseClassNameShadow(DeclContext *DC,
return false;
}
-/// Diagnose a declaration whose declarator-id has the given
-/// nested-name-specifier.
-///
-/// \param SS The nested-name-specifier of the declarator-id.
-///
-/// \param DC The declaration context to which the nested-name-specifier
-/// resolves.
-///
-/// \param Name The name of the entity being declared.
-///
-/// \param Loc The location of the name of the entity being declared.
-///
-/// \param IsTemplateId Whether the name is a (simple-)template-id, and thus
-/// we're declaring an explicit / partial specialization / instantiation.
-///
-/// \returns true if we cannot safely recover from this error, false otherwise.
bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
- SourceLocation Loc, bool IsTemplateId) {
+ SourceLocation Loc,
+ TemplateIdAnnotation *TemplateId,
+ bool IsMemberSpecialization) {
+ assert(SS.isValid() && "diagnoseQualifiedDeclaration called for declaration "
+ "without nested-name-specifier");
DeclContext *Cur = CurContext;
while (isa<LinkageSpecDecl>(Cur) || isa<CapturedDecl>(Cur))
Cur = Cur->getParent();
@@ -6299,7 +6106,7 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
// Check whether the qualifying scope encloses the scope of the original
// declaration. For a template-id, we perform the checks in
// CheckTemplateSpecializationScope.
- if (!Cur->Encloses(DC) && !IsTemplateId) {
+ if (!Cur->Encloses(DC) && !(TemplateId || IsMemberSpecialization)) {
if (Cur->isRecord())
Diag(Loc, diag::err_member_qualification)
<< Name << SS.getRange();
@@ -6345,16 +6152,49 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
return false;
}
- // C++11 [dcl.meaning]p1:
- // [...] "The nested-name-specifier of the qualified declarator-id shall
- // not begin with a decltype-specifer"
+ // C++23 [temp.names]p5:
+ // The keyword template shall not appear immediately after a declarative
+ // nested-name-specifier.
+ //
+ // First check the template-id (if any), and then check each component of the
+ // nested-name-specifier in reverse order.
+ //
+ // FIXME: nested-name-specifiers in friend declarations are declarative,
+ // but we don't call diagnoseQualifiedDeclaration for them. We should.
+ if (TemplateId && TemplateId->TemplateKWLoc.isValid())
+ Diag(Loc, diag::ext_template_after_declarative_nns)
+ << FixItHint::CreateRemoval(TemplateId->TemplateKWLoc);
+
NestedNameSpecifierLoc SpecLoc(SS.getScopeRep(), SS.location_data());
- while (SpecLoc.getPrefix())
- SpecLoc = SpecLoc.getPrefix();
- if (isa_and_nonnull<DecltypeType>(
- SpecLoc.getNestedNameSpecifier()->getAsType()))
- Diag(Loc, diag::err_decltype_in_declarator)
- << SpecLoc.getTypeLoc().getSourceRange();
+ do {
+ if (SpecLoc.getNestedNameSpecifier()->getKind() ==
+ NestedNameSpecifier::TypeSpecWithTemplate)
+ Diag(Loc, diag::ext_template_after_declarative_nns)
+ << FixItHint::CreateRemoval(
+ SpecLoc.getTypeLoc().getTemplateKeywordLoc());
+
+ if (const Type *T = SpecLoc.getNestedNameSpecifier()->getAsType()) {
+ if (const auto *TST = T->getAsAdjusted<TemplateSpecializationType>()) {
+ // C++23 [expr.prim.id.qual]p3:
+ // [...] If a nested-name-specifier N is declarative and has a
+ // simple-template-id with a template argument list A that involves a
+ // template parameter, let T be the template nominated by N without A.
+ // T shall be a class template.
+ if (TST->isDependentType() && TST->isTypeAlias())
+ Diag(Loc, diag::ext_alias_template_in_declarative_nns)
+ << SpecLoc.getLocalSourceRange();
+ } else if (T->isDecltypeType() || T->getAsAdjusted<PackIndexingType>()) {
+ // C++23 [expr.prim.id.qual]p2:
+ // [...] A declarative nested-name-specifier shall not have a
+ // computed-type-specifier.
+ //
+ // CWG2858 changed this from 'decltype-specifier' to
+ // 'computed-type-specifier'.
+ Diag(Loc, diag::err_computed_type_in_declarative_nns)
+ << T->isDecltypeType() << SpecLoc.getTypeLoc().getSourceRange();
+ }
+ }
+ } while ((SpecLoc = SpecLoc.getPrefix()));
return false;
}
@@ -6377,12 +6217,6 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
} else if (DiagnoseUnexpandedParameterPack(NameInfo, UPPC_DeclarationType))
return nullptr;
- // The scope passed in may not be a decl scope. Zip up the scope tree until
- // we find one that is.
- while ((S->getFlags() & Scope::DeclScope) == 0 ||
- (S->getFlags() & Scope::TemplateParamScope) != 0)
- S = S->getParent();
-
DeclContext *DC = CurContext;
if (D.getCXXScopeSpec().isInvalid())
D.setInvalidType();
@@ -6418,9 +6252,13 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
return nullptr;
}
if (!D.getDeclSpec().isFriendSpecified()) {
- if (diagnoseQualifiedDeclaration(
- D.getCXXScopeSpec(), DC, Name, D.getIdentifierLoc(),
- D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId)) {
+ TemplateIdAnnotation *TemplateId =
+ D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
+ ? D.getName().TemplateId
+ : nullptr;
+ if (diagnoseQualifiedDeclaration(D.getCXXScopeSpec(), DC, Name,
+ D.getIdentifierLoc(), TemplateId,
+ /*IsMemberSpecialization=*/false)) {
if (DC->isRecord())
return nullptr;
@@ -6474,7 +6312,8 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
if (IsLinkageLookup) {
Previous.clear(LookupRedeclarationWithLinkage);
- Previous.setRedeclarationKind(ForExternalRedeclaration);
+ Previous.setRedeclarationKind(
+ RedeclarationKind::ForExternalRedeclaration);
}
LookupName(Previous, S, CreateBuiltins);
@@ -6506,12 +6345,22 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
RemoveUsingDecls(Previous);
}
- if (Previous.isSingleResult() &&
- Previous.getFoundDecl()->isTemplateParameter()) {
- // Maybe we will complain about the shadowed template parameter.
- if (!D.isInvalidType())
- DiagnoseTemplateParameterShadow(D.getIdentifierLoc(),
- Previous.getFoundDecl());
+ if (auto *TPD = Previous.getAsSingle<NamedDecl>();
+ TPD && TPD->isTemplateParameter()) {
+ // Older versions of clang allowed the names of function/variable templates
+ // to shadow the names of their template parameters. For the compatibility
+ // purposes we detect such cases and issue a default-to-error warning that
+ // can be disabled with -Wno-strict-primary-template-shadow.
+ if (!D.isInvalidType()) {
+ bool AllowForCompatibility = false;
+ if (Scope *DeclParent = S->getDeclParent();
+ Scope *TemplateParamParent = S->getTemplateParamParent()) {
+ AllowForCompatibility = DeclParent->Contains(*TemplateParamParent) &&
+ TemplateParamParent->isDeclScope(TPD);
+ }
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), TPD,
+ AllowForCompatibility);
+ }
// Just pretend that we didn't see the previous declaration.
Previous.clear();
@@ -6535,6 +6384,9 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
if (getLangOpts().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
+ /// Get the innermost enclosing declaration scope.
+ S = S->getDeclParent();
+
NamedDecl *New;
bool AddToScope = true;
@@ -6562,8 +6414,8 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
if (New->getDeclName() && AddToScope)
PushOnScopeChains(New, S);
- if (isInOpenMPDeclareTargetContext())
- checkDeclIsAllowedInOpenMPTarget(nullptr, New);
+ if (OpenMP().isInOpenMPDeclareTargetContext())
+ OpenMP().checkDeclIsAllowedInOpenMPTarget(nullptr, New);
return New;
}
@@ -6702,8 +6554,6 @@ TryToFixInvalidVariablyModifiedTypeSourceInfo(TypeSourceInfo *TInfo,
return FixedTInfo;
}
-/// Attempt to fold a variable-sized type to a constant-sized type, returning
-/// true if we were successful.
bool Sema::tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
QualType &T, SourceLocation Loc,
unsigned FailedFoldDiagID) {
@@ -6727,10 +6577,6 @@ bool Sema::tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
return false;
}
-/// Register the given locally-scoped extern "C" declaration so
-/// that it can be found later for redeclarations. We include any extern "C"
-/// declaration that is not visible in the translation unit here, not just
-/// function-scope declarations.
void
Sema::RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S) {
if (!getLangOpts().CPlusPlus &&
@@ -6748,8 +6594,6 @@ NamedDecl *Sema::findLocallyScopedExternCDecl(DeclarationName Name) {
return Result.empty() ? nullptr : *Result.begin();
}
-/// Diagnose function specifiers on a declaration of an identifier that
-/// does not identify a function.
void Sema::DiagnoseFunctionSpecifiers(const DeclSpec &DS) {
// FIXME: We should probably indicate the identifier in question to avoid
// confusion for constructs like "virtual int a(), b;"
@@ -6850,9 +6694,6 @@ Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) {
}
}
-/// ActOnTypedefNameDecl - Perform semantic checking for a declaration which
-/// declares a typedef-name, either using the 'typedef' type specifier or via
-/// a C++0x [dcl.typedef]p2 alias-declaration: 'using T = A;'.
NamedDecl*
Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
LookupResult &Previous, bool &Redeclaration) {
@@ -6879,21 +6720,21 @@ Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
if (IdentifierInfo *II = NewTD->getIdentifier())
if (!NewTD->isInvalidDecl() &&
NewTD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
- switch (II->getInterestingIdentifierID()) {
- case tok::InterestingIdentifierKind::FILE:
+ switch (II->getNotableIdentifierID()) {
+ case tok::NotableIdentifierKind::FILE:
Context.setFILEDecl(NewTD);
break;
- case tok::InterestingIdentifierKind::jmp_buf:
+ case tok::NotableIdentifierKind::jmp_buf:
Context.setjmp_bufDecl(NewTD);
break;
- case tok::InterestingIdentifierKind::sigjmp_buf:
+ case tok::NotableIdentifierKind::sigjmp_buf:
Context.setsigjmp_bufDecl(NewTD);
break;
- case tok::InterestingIdentifierKind::ucontext_t:
+ case tok::NotableIdentifierKind::ucontext_t:
Context.setucontext_tDecl(NewTD);
break;
- case tok::InterestingIdentifierKind::float_t:
- case tok::InterestingIdentifierKind::double_t:
+ case tok::NotableIdentifierKind::float_t:
+ case tok::NotableIdentifierKind::double_t:
NewTD->addAttr(AvailableOnlyInDefaultEvalMethodAttr::Create(Context));
break;
default:
@@ -6968,50 +6809,6 @@ static void SetNestedNameSpecifier(Sema &S, DeclaratorDecl *DD, Declarator &D) {
DD->setQualifierInfo(SS.getWithLocInContext(S.Context));
}
-bool Sema::inferObjCARCLifetime(ValueDecl *decl) {
- QualType type = decl->getType();
- Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime();
- if (lifetime == Qualifiers::OCL_Autoreleasing) {
- // Various kinds of declaration aren't allowed to be __autoreleasing.
- unsigned kind = -1U;
- if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
- if (var->hasAttr<BlocksAttr>())
- kind = 0; // __block
- else if (!var->hasLocalStorage())
- kind = 1; // global
- } else if (isa<ObjCIvarDecl>(decl)) {
- kind = 3; // ivar
- } else if (isa<FieldDecl>(decl)) {
- kind = 2; // field
- }
-
- if (kind != -1U) {
- Diag(decl->getLocation(), diag::err_arc_autoreleasing_var)
- << kind;
- }
- } else if (lifetime == Qualifiers::OCL_None) {
- // Try to infer lifetime.
- if (!type->isObjCLifetimeType())
- return false;
-
- lifetime = type->getObjCARCImplicitLifetime();
- type = Context.getLifetimeQualifiedType(type, lifetime);
- decl->setType(type);
- }
-
- if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
- // Thread-local variables cannot have lifetime.
- if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone &&
- var->getTLSKind()) {
- Diag(var->getLocation(), diag::err_arc_thread_ownership)
- << var->getType();
- return true;
- }
- }
-
- return false;
-}
-
void Sema::deduceOpenCLAddressSpace(ValueDecl *Decl) {
if (Decl->getType().hasAddressSpace())
return;
@@ -7093,6 +6890,11 @@ static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) {
}
}
+ if (HybridPatchableAttr *Attr = ND.getAttr<HybridPatchableAttr>()) {
+ if (!ND.isExternallyVisible())
+ S.Diag(Attr->getLocation(),
+ diag::warn_attribute_hybrid_patchable_non_extern);
+ }
if (const InheritableAttr *Attr = getDLLAttr(&ND)) {
auto *VD = dyn_cast<VarDecl>(&ND);
bool IsAnonymousNS = false;
@@ -7383,8 +7185,6 @@ static bool hasParsedAttr(Scope *S, const Declarator &PD,
PD.getDeclarationAttributes().hasAttribute(Kind);
}
-/// Adjust the \c DeclContext for a function or variable that might be a
-/// function-local external declaration.
bool Sema::adjustContextForLocalExternDecl(DeclContext *&DC) {
if (!DC->isFunctionOrMethod())
return false;
@@ -7641,6 +7441,11 @@ NamedDecl *Sema::ActOnVariableDeclarator(
tryToFixVariablyModifiedVarType(TInfo, R, D.getIdentifierLoc(),
/*DiagID=*/0);
+ if (AutoTypeLoc TL = TInfo->getTypeLoc().getContainedAutoTypeLoc()) {
+ const AutoType *AT = TL.getTypePtr();
+ CheckConstrainedAuto(AT, TL.getConceptNameLoc());
+ }
+
bool IsMemberSpecialization = false;
bool IsVariableTemplateSpecialization = false;
bool IsPartialSpecialization = false;
@@ -7664,80 +7469,8 @@ NamedDecl *Sema::ActOnVariableDeclarator(
NTCUC_AutoVar, NTCUK_Destruct);
} else {
bool Invalid = false;
-
- if (DC->isRecord() && !CurContext->isRecord()) {
- // This is an out-of-line definition of a static data member.
- switch (SC) {
- case SC_None:
- break;
- case SC_Static:
- Diag(D.getDeclSpec().getStorageClassSpecLoc(),
- diag::err_static_out_of_line)
- << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
- break;
- case SC_Auto:
- case SC_Register:
- case SC_Extern:
- // [dcl.stc] p2: The auto or register specifiers shall be applied only
- // to names of variables declared in a block or to function parameters.
- // [dcl.stc] p6: The extern specifier cannot be used in the declaration
- // of class members
-
- Diag(D.getDeclSpec().getStorageClassSpecLoc(),
- diag::err_storage_class_for_static_member)
- << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
- break;
- case SC_PrivateExtern:
- llvm_unreachable("C storage class in c++!");
- }
- }
-
- if (SC == SC_Static && CurContext->isRecord()) {
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
- // Walk up the enclosing DeclContexts to check for any that are
- // incompatible with static data members.
- const DeclContext *FunctionOrMethod = nullptr;
- const CXXRecordDecl *AnonStruct = nullptr;
- for (DeclContext *Ctxt = DC; Ctxt; Ctxt = Ctxt->getParent()) {
- if (Ctxt->isFunctionOrMethod()) {
- FunctionOrMethod = Ctxt;
- break;
- }
- const CXXRecordDecl *ParentDecl = dyn_cast<CXXRecordDecl>(Ctxt);
- if (ParentDecl && !ParentDecl->getDeclName()) {
- AnonStruct = ParentDecl;
- break;
- }
- }
- if (FunctionOrMethod) {
- // C++ [class.static.data]p5: A local class shall not have static data
- // members.
- Diag(D.getIdentifierLoc(),
- diag::err_static_data_member_not_allowed_in_local_class)
- << Name << RD->getDeclName()
- << llvm::to_underlying(RD->getTagKind());
- } else if (AnonStruct) {
- // C++ [class.static.data]p4: Unnamed classes and classes contained
- // directly or indirectly within unnamed classes shall not contain
- // static data members.
- Diag(D.getIdentifierLoc(),
- diag::err_static_data_member_not_allowed_in_anon_struct)
- << Name << llvm::to_underlying(AnonStruct->getTagKind());
- Invalid = true;
- } else if (RD->isUnion()) {
- // C++98 [class.union]p1: If a union contains a static data member,
- // the program is ill-formed. C++11 drops this restriction.
- Diag(D.getIdentifierLoc(),
- getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_static_data_member_in_union
- : diag::ext_static_data_member_in_union) << Name;
- }
- }
- }
-
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
- bool InvalidScope = false;
TemplateParams = MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getBeginLoc(), D.getIdentifierLoc(),
D.getCXXScopeSpec(),
@@ -7745,8 +7478,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
? D.getName().TemplateId
: nullptr,
TemplateParamLists,
- /*never a friend*/ false, IsMemberSpecialization, InvalidScope);
- Invalid |= InvalidScope;
+ /*never a friend*/ false, IsMemberSpecialization, Invalid);
if (TemplateParams) {
if (!TemplateParams->size() &&
@@ -7789,13 +7521,109 @@ NamedDecl *Sema::ActOnVariableDeclarator(
"should have a 'template<>' for this decl");
}
+ bool IsExplicitSpecialization =
+ IsVariableTemplateSpecialization && !IsPartialSpecialization;
+
+ // C++ [temp.expl.spec]p2:
+ // The declaration in an explicit-specialization shall not be an
+ // export-declaration. An explicit specialization shall not use a
+ // storage-class-specifier other than thread_local.
+ //
+ // We use the storage-class-specifier from DeclSpec because we may have
+ // added implicit 'extern' for declarations with __declspec(dllimport)!
+ if (SCSpec != DeclSpec::SCS_unspecified &&
+ (IsExplicitSpecialization || IsMemberSpecialization)) {
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::ext_explicit_specialization_storage_class)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+ }
+
+ if (CurContext->isRecord()) {
+ if (SC == SC_Static) {
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
+ // Walk up the enclosing DeclContexts to check for any that are
+ // incompatible with static data members.
+ const DeclContext *FunctionOrMethod = nullptr;
+ const CXXRecordDecl *AnonStruct = nullptr;
+ for (DeclContext *Ctxt = DC; Ctxt; Ctxt = Ctxt->getParent()) {
+ if (Ctxt->isFunctionOrMethod()) {
+ FunctionOrMethod = Ctxt;
+ break;
+ }
+ const CXXRecordDecl *ParentDecl = dyn_cast<CXXRecordDecl>(Ctxt);
+ if (ParentDecl && !ParentDecl->getDeclName()) {
+ AnonStruct = ParentDecl;
+ break;
+ }
+ }
+ if (FunctionOrMethod) {
+ // C++ [class.static.data]p5: A local class shall not have static
+ // data members.
+ Diag(D.getIdentifierLoc(),
+ diag::err_static_data_member_not_allowed_in_local_class)
+ << Name << RD->getDeclName()
+ << llvm::to_underlying(RD->getTagKind());
+ } else if (AnonStruct) {
+ // C++ [class.static.data]p4: Unnamed classes and classes contained
+ // directly or indirectly within unnamed classes shall not contain
+ // static data members.
+ Diag(D.getIdentifierLoc(),
+ diag::err_static_data_member_not_allowed_in_anon_struct)
+ << Name << llvm::to_underlying(AnonStruct->getTagKind());
+ Invalid = true;
+ } else if (RD->isUnion()) {
+ // C++98 [class.union]p1: If a union contains a static data member,
+ // the program is ill-formed. C++11 drops this restriction.
+ Diag(D.getIdentifierLoc(),
+ getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_static_data_member_in_union
+ : diag::ext_static_data_member_in_union)
+ << Name;
+ }
+ }
+ } else if (IsVariableTemplate || IsPartialSpecialization) {
+ // There is no such thing as a member field template.
+ Diag(D.getIdentifierLoc(), diag::err_template_member)
+ << II << TemplateParams->getSourceRange();
+ // Recover by pretending this is a static data member template.
+ SC = SC_Static;
+ }
+ } else if (DC->isRecord()) {
+ // This is an out-of-line definition of a static data member.
+ switch (SC) {
+ case SC_None:
+ break;
+ case SC_Static:
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_out_of_line)
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getStorageClassSpecLoc());
+ break;
+ case SC_Auto:
+ case SC_Register:
+ case SC_Extern:
+ // [dcl.stc] p2: The auto or register specifiers shall be applied only
+ // to names of variables declared in a block or to function parameters.
+ // [dcl.stc] p6: The extern specifier cannot be used in the declaration
+ // of class members
+
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_storage_class_for_static_member)
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getStorageClassSpecLoc());
+ break;
+ case SC_PrivateExtern:
+ llvm_unreachable("C storage class in c++!");
+ }
+ }
+
if (IsVariableTemplateSpecialization) {
SourceLocation TemplateKWLoc =
TemplateParamLists.size() > 0
? TemplateParamLists[0]->getTemplateLoc()
: SourceLocation();
DeclResult Res = ActOnVarTemplateSpecialization(
- S, D, TInfo, TemplateKWLoc, TemplateParams, SC,
+ S, D, TInfo, Previous, TemplateKWLoc, TemplateParams, SC,
IsPartialSpecialization);
if (Res.isInvalid())
return nullptr;
@@ -7834,8 +7662,6 @@ NamedDecl *Sema::ActOnVariableDeclarator(
// the variable (matching the scope specifier), store them.
// An explicit variable template specialization does not own any template
// parameter lists.
- bool IsExplicitSpecialization =
- IsVariableTemplateSpecialization && !IsPartialSpecialization;
unsigned VDTemplateParamLists =
(TemplateParams && !IsExplicitSpecialization) ? 1 : 0;
if (TemplateParamLists.size() > VDTemplateParamLists)
@@ -8050,7 +7876,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
// In auto-retain/release, infer strong retension for variables of
// retainable type.
- if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewVD))
+ if (getLangOpts().ObjCAutoRefCount && ObjC().inferObjCARCLifetime(NewVD))
NewVD->setInvalidDecl();
// Handle GNU asm-label extension (encoded as an attribute).
@@ -8138,8 +7964,8 @@ NamedDecl *Sema::ActOnVariableDeclarator(
D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
} else {
// If this is an explicit specialization of a static data member, check it.
- if (IsMemberSpecialization && !NewVD->isInvalidDecl() &&
- CheckMemberSpecialization(NewVD, Previous))
+ if (IsMemberSpecialization && !IsVariableTemplateSpecialization &&
+ !NewVD->isInvalidDecl() && CheckMemberSpecialization(NewVD, Previous))
NewVD->setInvalidDecl();
// Merge the decl with the existing one if appropriate.
@@ -8154,7 +7980,8 @@ NamedDecl *Sema::ActOnVariableDeclarator(
Previous.clear();
NewVD->setInvalidDecl();
}
- } else if (D.getCXXScopeSpec().isSet()) {
+ } else if (D.getCXXScopeSpec().isSet() &&
+ !IsVariableTemplateSpecialization) {
// No previous declaration in the qualifying scope.
Diag(D.getIdentifierLoc(), diag::err_no_member)
<< Name << computeDeclContext(D.getCXXScopeSpec(), true)
@@ -8162,7 +7989,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
NewVD->setInvalidDecl();
}
- if (!IsVariableTemplateSpecialization && !IsPlaceholderVariable)
+ if (!IsPlaceholderVariable)
D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
// CheckVariableDeclaration will set NewVD as invalid if something is in
@@ -8308,8 +8135,6 @@ static bool shouldWarnIfShadowedDecl(const DiagnosticsEngine &Diags,
return !Diags.isIgnored(diag::warn_decl_shadow, R.getNameLoc());
}
-/// Return the declaration shadowed by the given variable \p D, or null
-/// if it doesn't shadow any declaration or shadowing warnings are disabled.
NamedDecl *Sema::getShadowedDeclaration(const VarDecl *D,
const LookupResult &R) {
if (!shouldWarnIfShadowedDecl(Diags, R))
@@ -8324,8 +8149,6 @@ NamedDecl *Sema::getShadowedDeclaration(const VarDecl *D,
: nullptr;
}
-/// Return the declaration shadowed by the given typedef \p D, or null
-/// if it doesn't shadow any declaration or shadowing warnings are disabled.
NamedDecl *Sema::getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R) {
// Don't warn if typedef declaration is part of a class
@@ -8339,8 +8162,6 @@ NamedDecl *Sema::getShadowedDeclaration(const TypedefNameDecl *D,
return isa<TypedefNameDecl>(ShadowedDecl) ? ShadowedDecl : nullptr;
}
-/// Return the declaration shadowed by the given variable \p D, or null
-/// if it doesn't shadow any declaration or shadowing warnings are disabled.
NamedDecl *Sema::getShadowedDeclaration(const BindingDecl *D,
const LookupResult &R) {
if (!shouldWarnIfShadowedDecl(Diags, R))
@@ -8351,15 +8172,6 @@ NamedDecl *Sema::getShadowedDeclaration(const BindingDecl *D,
: nullptr;
}
-/// Diagnose variable or built-in function shadowing. Implements
-/// -Wshadow.
-///
-/// This method is called whenever a VarDecl is added to a "useful"
-/// scope.
-///
-/// \param ShadowedDecl the declaration that is shadowed by the given variable
-/// \param R the lookup of the name
-///
void Sema::CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R) {
DeclContext *NewDC = D->getDeclContext();
@@ -8476,8 +8288,6 @@ void Sema::CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
}
-/// Diagnose shadowing for variables shadowed in the lambda record \p LambdaRD
-/// when these variables are captured by the lambda.
void Sema::DiagnoseShadowingLambdaDecls(const LambdaScopeInfo *LSI) {
for (const auto &Shadow : LSI->ShadowingDecls) {
const NamedDecl *ShadowedDecl = Shadow.ShadowedDecl;
@@ -8505,13 +8315,13 @@ void Sema::DiagnoseShadowingLambdaDecls(const LambdaScopeInfo *LSI) {
}
}
-/// Check -Wshadow without the advantage of a previous lookup.
void Sema::CheckShadow(Scope *S, VarDecl *D) {
if (Diags.isIgnored(diag::warn_decl_shadow, D->getLocation()))
return;
LookupResult R(*this, D->getDeclName(), D->getLocation(),
- Sema::LookupOrdinaryName, Sema::ForVisibleRedeclaration);
+ Sema::LookupOrdinaryName,
+ RedeclarationKind::ForVisibleRedeclaration);
LookupName(R, S);
if (NamedDecl *ShadowedDecl = getShadowedDeclaration(D, R))
CheckShadow(D, ShadowedDecl, R);
@@ -8658,6 +8468,38 @@ static bool checkForConflictWithNonVisibleExternC(Sema &S, const T *ND,
return false;
}
+static bool CheckC23ConstexprVarType(Sema &SemaRef, SourceLocation VarLoc,
+ QualType T) {
+ QualType CanonT = SemaRef.Context.getCanonicalType(T);
+ // C23 6.7.1p5: An object declared with storage-class specifier constexpr or
+ // any of its members, even recursively, shall not have an atomic type, or a
+ // variably modified type, or a type that is volatile or restrict qualified.
+ if (CanonT->isVariablyModifiedType()) {
+ SemaRef.Diag(VarLoc, diag::err_c23_constexpr_invalid_type) << T;
+ return true;
+ }
+
+ // Arrays are qualified by their element type, so get the base type (this
+ // works on non-arrays as well).
+ CanonT = SemaRef.Context.getBaseElementType(CanonT);
+
+ if (CanonT->isAtomicType() || CanonT.isVolatileQualified() ||
+ CanonT.isRestrictQualified()) {
+ SemaRef.Diag(VarLoc, diag::err_c23_constexpr_invalid_type) << T;
+ return true;
+ }
+
+ if (CanonT->isRecordType()) {
+ const RecordDecl *RD = CanonT->getAsRecordDecl();
+ if (llvm::any_of(RD->fields(), [&SemaRef, VarLoc](const FieldDecl *F) {
+ return CheckC23ConstexprVarType(SemaRef, VarLoc, F->getType());
+ }))
+ return true;
+ }
+
+ return false;
+}
+
void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
// If the decl is already known invalid, don't check it.
if (NewVD->isInvalidDecl())
@@ -8817,7 +8659,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
const auto *ATy = dyn_cast<ConstantArrayType>(T.getTypePtr());
- if (!ATy || ATy->getSize().getSExtValue() != 0) {
+ if (!ATy || ATy->getZExtSize() != 0) {
Diag(NewVD->getLocation(),
diag::err_typecheck_wasm_table_must_have_zero_length);
NewVD->setInvalidDecl();
@@ -8908,6 +8750,12 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
+ if (getLangOpts().C23 && NewVD->isConstexpr() &&
+ CheckC23ConstexprVarType(*this, NewVD->getLocation(), T)) {
+ NewVD->setInvalidDecl();
+ return;
+ }
+
if (NewVD->isConstexpr() && !T->isDependentType() &&
RequireLiteralType(NewVD->getLocation(), T,
diag::err_constexpr_var_non_literal)) {
@@ -8918,7 +8766,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
// PPC MMA non-pointer types are not allowed as non-local variable types.
if (Context.getTargetInfo().getTriple().isPPC64() &&
!NewVD->isLocalVarDecl() &&
- CheckPPCMMAType(T, NewVD->getLocation())) {
+ PPC().CheckPPCMMAType(T, NewVD->getLocation())) {
NewVD->setInvalidDecl();
return;
}
@@ -8928,30 +8776,32 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
const FunctionDecl *FD = cast<FunctionDecl>(CurContext);
llvm::StringMap<bool> CallerFeatureMap;
Context.getFunctionFeatureMap(CallerFeatureMap, FD);
- if (!Builtin::evaluateRequiredTargetFeatures(
- "sve", CallerFeatureMap)) {
- Diag(NewVD->getLocation(), diag::err_sve_vector_in_non_sve_target) << T;
- NewVD->setInvalidDecl();
- return;
+
+ if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap)) {
+ if (!Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap)) {
+ Diag(NewVD->getLocation(), diag::err_sve_vector_in_non_sve_target) << T;
+ NewVD->setInvalidDecl();
+ return;
+ } else if (!IsArmStreamingFunction(FD,
+ /*IncludeLocallyStreaming=*/true)) {
+ Diag(NewVD->getLocation(),
+ diag::err_sve_vector_in_non_streaming_function)
+ << T;
+ NewVD->setInvalidDecl();
+ return;
+ }
}
}
- if (T->isRVVSizelessBuiltinType())
- checkRVVTypeSupport(T, NewVD->getLocation(), cast<Decl>(CurContext));
+ if (T->isRVVSizelessBuiltinType() && isa<FunctionDecl>(CurContext)) {
+ const FunctionDecl *FD = cast<FunctionDecl>(CurContext);
+ llvm::StringMap<bool> CallerFeatureMap;
+ Context.getFunctionFeatureMap(CallerFeatureMap, FD);
+ RISCV().checkRVVTypeSupport(T, NewVD->getLocation(), cast<Decl>(CurContext),
+ CallerFeatureMap);
+ }
}
-/// Perform semantic checking on a newly-created variable
-/// declaration.
-///
-/// This routine performs all of the type-checking required for a
-/// variable declaration once it has been built. It is used both to
-/// check variables after they have been parsed and their declarators
-/// have been translated into a declaration, and to check variables
-/// that have been instantiated from a template.
-///
-/// Sets NewVD->isInvalidDecl() if an error was encountered.
-///
-/// Returns true if the variable declaration is a redeclaration.
bool Sema::CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous) {
CheckVariableDeclarationType(NewVD);
@@ -8972,8 +8822,6 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous) {
return false;
}
-/// AddOverriddenMethods - See if a method overrides any in the base classes,
-/// and if so, check that it's a valid override and remember it.
bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
llvm::SmallPtrSet<const CXXMethodDecl*, 4> Overridden;
@@ -9108,7 +8956,7 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
LookupResult Prev(SemaRef, Name, NewFD->getLocation(),
IsLocalFriend ? Sema::LookupLocalFriendName
: Sema::LookupOrdinaryName,
- Sema::ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
NewFD->setInvalidDecl();
if (IsLocalFriend)
@@ -9226,19 +9074,20 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
<< Idx << FDParam->getType()
<< NewFD->getParamDecl(Idx - 1)->getType();
} else if (FDisConst != NewFDisConst) {
- SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_const_match)
- << NewFDisConst << FD->getSourceRange().getEnd()
- << (NewFDisConst
- ? FixItHint::CreateRemoval(ExtraArgs.D.getFunctionTypeInfo()
- .getConstQualifierLoc())
- : FixItHint::CreateInsertion(ExtraArgs.D.getFunctionTypeInfo()
- .getRParenLoc()
- .getLocWithOffset(1),
- " const"));
- } else
+ auto DB = SemaRef.Diag(FD->getLocation(),
+ diag::note_member_def_close_const_match)
+ << NewFDisConst << FD->getSourceRange().getEnd();
+ if (const auto &FTI = ExtraArgs.D.getFunctionTypeInfo(); !NewFDisConst)
+ DB << FixItHint::CreateInsertion(FTI.getRParenLoc().getLocWithOffset(1),
+ " const");
+ else if (FTI.hasMethodTypeQualifiers() &&
+ FTI.getConstQualifierLoc().isValid())
+ DB << FixItHint::CreateRemoval(FTI.getConstQualifierLoc());
+ } else {
SemaRef.Diag(FD->getLocation(),
IsMember ? diag::note_member_def_close_match
: diag::note_local_decl_close_match);
+ }
}
return nullptr;
}
@@ -9290,6 +9139,22 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
FunctionDecl *NewFD = nullptr;
bool isInline = D.getDeclSpec().isInlineSpecified();
+ ConstexprSpecKind ConstexprKind = D.getDeclSpec().getConstexprSpecifier();
+ if (ConstexprKind == ConstexprSpecKind::Constinit ||
+ (SemaRef.getLangOpts().C23 &&
+ ConstexprKind == ConstexprSpecKind::Constexpr)) {
+
+ if (SemaRef.getLangOpts().C23)
+ SemaRef.Diag(D.getDeclSpec().getConstexprSpecLoc(),
+ diag::err_c23_constexpr_not_variable);
+ else
+ SemaRef.Diag(D.getDeclSpec().getConstexprSpecLoc(),
+ diag::err_constexpr_wrong_decl_kind)
+ << static_cast<int>(ConstexprKind);
+ ConstexprKind = ConstexprSpecKind::Unspecified;
+ D.getMutableDeclSpec().ClearConstexprSpec();
+ }
+
if (!SemaRef.getLangOpts().CPlusPlus) {
// Determine whether the function was written with a prototype. This is
// true when:
@@ -9323,15 +9188,6 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
}
ExplicitSpecifier ExplicitSpecifier = D.getDeclSpec().getExplicitSpecifier();
-
- ConstexprSpecKind ConstexprKind = D.getDeclSpec().getConstexprSpecifier();
- if (ConstexprKind == ConstexprSpecKind::Constinit) {
- SemaRef.Diag(D.getDeclSpec().getConstexprSpecLoc(),
- diag::err_constexpr_wrong_decl_kind)
- << static_cast<int>(ConstexprKind);
- ConstexprKind = ConstexprSpecKind::Unspecified;
- D.getMutableDeclSpec().ClearConstexprSpec();
- }
Expr *TrailingRequiresClause = D.getTrailingRequiresClause();
SemaRef.CheckExplicitObjectMemberFunction(DC, D, Name, R);
@@ -9819,7 +9675,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
SmallVector<TemplateParameterList *, 4> TemplateParamLists;
llvm::append_range(TemplateParamLists, TemplateParamListsRef);
if (TemplateParameterList *Invented = D.getInventedTemplateParameterList()) {
- if (!TemplateParamLists.empty() &&
+ if (!TemplateParamLists.empty() && !TemplateParamLists.back()->empty() &&
Invented->getDepth() == TemplateParamLists.back()->getDepth())
TemplateParamLists.back() = Invented;
else
@@ -9876,8 +9732,9 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// the function decl is created above).
// FIXME: We need a better way to separate C++ standard and clang modules.
bool ImplicitInlineCXX20 = !getLangOpts().CPlusPlusModules ||
+ NewFD->isConstexpr() || NewFD->isConsteval() ||
!NewFD->getOwningModule() ||
- NewFD->getOwningModule()->isGlobalModule() ||
+ NewFD->isFromGlobalModule() ||
NewFD->getOwningModule()->isHeaderLikeModule();
bool isInline = D.getDeclSpec().isInlineSpecified();
bool isVirtual = D.getDeclSpec().isVirtualSpecified();
@@ -10086,23 +9943,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_auto_fn_virtual);
}
- if (getLangOpts().CPlusPlus14 &&
- (NewFD->isDependentContext() ||
- (isFriend && CurContext->isDependentContext())) &&
- NewFD->getReturnType()->isUndeducedType()) {
- // If the function template is referenced directly (for instance, as a
- // member of the current instantiation), pretend it has a dependent type.
- // This is not really justified by the standard, but is the only sane
- // thing to do.
- // FIXME: For a friend function, we have not marked the function as being
- // a friend yet, so 'isDependentContext' on the FD doesn't work.
- const FunctionProtoType *FPT =
- NewFD->getType()->castAs<FunctionProtoType>();
- QualType Result = SubstAutoTypeDependent(FPT->getReturnType());
- NewFD->setType(Context.getFunctionType(Result, FPT->getParamTypes(),
- FPT->getExtProtoInfo()));
- }
-
// C++ [dcl.fct.spec]p3:
// The inline specifier shall not appear on a block scope function
// declaration.
@@ -10220,25 +10060,45 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->setImplicitlyInline(ImplicitInlineCXX20);
}
- if (SC == SC_Static && isa<CXXMethodDecl>(NewFD) &&
- !CurContext->isRecord()) {
- // C++ [class.static]p1:
- // A data or function member of a class may be declared static
- // in a class definition, in which case it is a static member of
- // the class.
+ if (!isFriend && SC != SC_None) {
+ // C++ [temp.expl.spec]p2:
+ // The declaration in an explicit-specialization shall not be an
+ // export-declaration. An explicit specialization shall not use a
+ // storage-class-specifier other than thread_local.
+ //
+ // We diagnose friend declarations with storage-class-specifiers
+ // elsewhere.
+ if (isFunctionTemplateSpecialization || isMemberSpecialization) {
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::ext_explicit_specialization_storage_class)
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getStorageClassSpecLoc());
+ }
- // Complain about the 'static' specifier if it's on an out-of-line
- // member function definition.
+ if (SC == SC_Static && !CurContext->isRecord() && DC->isRecord()) {
+ assert(isa<CXXMethodDecl>(NewFD) &&
+ "Out-of-line member function should be a CXXMethodDecl");
+ // C++ [class.static]p1:
+ // A data or function member of a class may be declared static
+ // in a class definition, in which case it is a static member of
+ // the class.
- // MSVC permits the use of a 'static' storage specifier on an out-of-line
- // member function template declaration and class member template
- // declaration (MSVC versions before 2015), warn about this.
- Diag(D.getDeclSpec().getStorageClassSpecLoc(),
- ((!getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015) &&
- cast<CXXRecordDecl>(DC)->getDescribedClassTemplate()) ||
- (getLangOpts().MSVCCompat && NewFD->getDescribedFunctionTemplate()))
- ? diag::ext_static_out_of_line : diag::err_static_out_of_line)
- << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+ // Complain about the 'static' specifier if it's on an out-of-line
+ // member function definition.
+
+ // MSVC permits the use of a 'static' storage specifier on an
+ // out-of-line member function template declaration and class member
+ // template declaration (MSVC versions before 2015), warn about this.
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ ((!getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015) &&
+ cast<CXXRecordDecl>(DC)->getDescribedClassTemplate()) ||
+ (getLangOpts().MSVCCompat &&
+ NewFD->getDescribedFunctionTemplate()))
+ ? diag::ext_static_out_of_line
+ : diag::err_static_out_of_line)
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getStorageClassSpecLoc());
+ }
}
// C++11 [except.spec]p15:
@@ -10260,7 +10120,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// check at the end of the TU (or when the PMF starts) to see that we
// have a definition at that point.
if (isInline && !D.isFunctionDefinition() && getLangOpts().CPlusPlus20 &&
- NewFD->hasOwningModule() && NewFD->getOwningModule()->isNamedModule()) {
+ NewFD->isInNamedModule()) {
PendingInlineFuncDecls.insert(NewFD);
}
}
@@ -10500,16 +10360,70 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
diag::ext_operator_new_delete_declared_inline)
<< NewFD->getDeclName();
+ if (Expr *TRC = NewFD->getTrailingRequiresClause()) {
+ // C++20 [dcl.decl.general]p4:
+ // The optional requires-clause in an init-declarator or
+ // member-declarator shall be present only if the declarator declares a
+ // templated function.
+ //
+ // C++20 [temp.pre]p8:
+ // An entity is templated if it is
+ // - a template,
+ // - an entity defined or created in a templated entity,
+ // - a member of a templated entity,
+ // - an enumerator for an enumeration that is a templated entity, or
+ // - the closure type of a lambda-expression appearing in the
+ // declaration of a templated entity.
+ //
+ // [Note 6: A local class, a local or block variable, or a friend
+ // function defined in a templated entity is a templated entity.
+ // — end note]
+ //
+ // A templated function is a function template or a function that is
+ // templated. A templated class is a class template or a class that is
+ // templated. A templated variable is a variable template or a variable
+ // that is templated.
+ if (!FunctionTemplate) {
+ if (isFunctionTemplateSpecialization || isMemberSpecialization) {
+ // C++ [temp.expl.spec]p8 (proposed resolution for CWG2847):
+ // An explicit specialization shall not have a trailing
+ // requires-clause unless it declares a function template.
+ //
+ // Since a friend function template specialization cannot be
+ // definition, and since a non-template friend declaration with a
+ // trailing requires-clause must be a definition, we diagnose
+ // friend function template specializations with trailing
+ // requires-clauses on the same path as explicit specializations
+ // even though they aren't necessarily prohibited by the same
+ // language rule.
+ Diag(TRC->getBeginLoc(), diag::err_non_temp_spec_requires_clause)
+ << isFriend;
+ } else if (isFriend && NewFD->isTemplated() &&
+ !D.isFunctionDefinition()) {
+ // C++ [temp.friend]p9:
+ // A non-template friend declaration with a requires-clause shall be
+ // a definition.
+ Diag(NewFD->getBeginLoc(),
+ diag::err_non_temp_friend_decl_with_requires_clause_must_be_def);
+ NewFD->setInvalidDecl();
+ } else if (!NewFD->isTemplated() ||
+ !(isa<CXXMethodDecl>(NewFD) || D.isFunctionDefinition())) {
+ Diag(TRC->getBeginLoc(),
+ diag::err_constrained_non_templated_function);
+ }
+ }
+ }
+
// We do not add HD attributes to specializations here because
// they may have different constexpr-ness compared to their
- // templates and, after maybeAddCUDAHostDeviceAttrs() is applied,
+ // templates and, after maybeAddHostDeviceAttrs() is applied,
// may end up with different effective targets. Instead, a
// specialization inherits its target attributes from its template
// in the CheckFunctionTemplateSpecialization() call below.
if (getLangOpts().CUDA && !isFunctionTemplateSpecialization)
- maybeAddCUDAHostDeviceAttrs(NewFD, Previous);
+ CUDA().maybeAddHostDeviceAttrs(NewFD, Previous);
- // Handle explict specializations of function templates
+ // Handle explicit specializations of function templates
// and friend function declarations with an explicit
// template argument list.
if (isFunctionTemplateSpecialization) {
@@ -10552,27 +10466,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Previous))
NewFD->setInvalidDecl();
}
-
- // C++ [dcl.stc]p1:
- // A storage-class-specifier shall not be specified in an explicit
- // specialization (14.7.3)
- // FIXME: We should be checking this for dependent specializations.
- FunctionTemplateSpecializationInfo *Info =
- NewFD->getTemplateSpecializationInfo();
- if (Info && SC != SC_None) {
- if (SC != Info->getTemplate()->getTemplatedDecl()->getStorageClass())
- Diag(NewFD->getLocation(),
- diag::err_explicit_specialization_inconsistent_storage_class)
- << SC
- << FixItHint::CreateRemoval(
- D.getDeclSpec().getStorageClassSpecLoc());
-
- else
- Diag(NewFD->getLocation(),
- diag::ext_explicit_specialization_storage_class)
- << FixItHint::CreateRemoval(
- D.getDeclSpec().getStorageClassSpecLoc());
- }
} else if (isMemberSpecialization && isa<CXXMethodDecl>(NewFD)) {
if (CheckMemberSpecialization(NewFD, Previous))
NewFD->setInvalidDecl();
@@ -10717,10 +10610,10 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (getLangOpts().HLSL && D.isFunctionDefinition()) {
// Any top level function could potentially be specified as an entry.
if (!NewFD->isInvalidDecl() && S->getDepth() == 0 && Name.isIdentifier())
- ActOnHLSLTopLevelFunction(NewFD);
+ HLSL().ActOnTopLevelFunction(NewFD);
if (NewFD->hasAttr<HLSLShaderAttr>())
- CheckHLSLEntryPoint(NewFD);
+ HLSL().CheckEntryPoint(NewFD);
}
// If this is the first declaration of a library builtin function, add
@@ -10775,7 +10668,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// If there's a #pragma clang arc_cf_code_audited in scope, consider
// marking the function.
- AddCFAuditedAttribute(NewFD);
+ ObjC().AddCFAuditedAttribute(NewFD);
// If this is a function definition, check if we have to apply any
// attributes (i.e. optnone and no_builtin) due to a pragma.
@@ -10805,12 +10698,12 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (getLangOpts().CUDA) {
IdentifierInfo *II = NewFD->getIdentifier();
- if (II && II->isStr(getCudaConfigureFuncName()) &&
+ if (II && II->isStr(CUDA().getConfigureFuncName()) &&
!NewFD->isInvalidDecl() &&
NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
if (!R->castAs<FunctionType>()->getReturnType()->isScalarType())
Diag(NewFD->getLocation(), diag::err_config_scalar_return)
- << getCudaConfigureFuncName();
+ << CUDA().getConfigureFuncName();
Context.setcudaConfigureCallDecl(NewFD);
}
@@ -10951,6 +10844,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
break;
}
+ // Similar to no_builtin logic above, at this point of the code
+ // FunctionDecl::isThisDeclarationADefinition() always returns `false`
+ // because Sema::ActOnStartOfFunctionDef has not been called yet.
+ if (Context.getTargetInfo().allowDebugInfoForExternalRef() &&
+ !NewFD->isInvalidDecl() &&
+ D.getFunctionDefinitionKind() == FunctionDefinitionKind::Declaration)
+ ExternalDeclarations.push_back(NewFD);
+
return NewFD;
}
@@ -10990,15 +10891,6 @@ static Attr *getImplicitCodeSegAttrFromClass(Sema &S, const FunctionDecl *FD) {
return nullptr;
}
-/// Returns an implicit CodeSegAttr if a __declspec(code_seg) is found on a
-/// containing class. Otherwise it will return implicit SectionAttr if the
-/// function is a definition and there is an active value on CodeSegStack
-/// (from the current #pragma code-seg value).
-///
-/// \param FD Function being declared.
-/// \param IsDefinition Whether it is a definition or just a declaration.
-/// \returns A CodeSegAttr or SectionAttr to apply to the function or
-/// nullptr if no attribute should be added.
Attr *Sema::getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition) {
if (Attr *A = getImplicitCodeSegAttrFromClass(*this, FD))
@@ -11011,14 +10903,6 @@ Attr *Sema::getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
return nullptr;
}
-/// Determines if we can perform a correct type check for \p D as a
-/// redeclaration of \p PrevDecl. If not, we can generally still perform a
-/// best-effort check.
-///
-/// \param NewD The new declaration.
-/// \param OldD The old declaration.
-/// \param NewT The portion of the type of the new declaration to check.
-/// \param OldT The portion of the type of the old declaration to check.
bool Sema::canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT) {
if (!NewD->getLexicalDeclContext()->isDependentContext())
@@ -11043,15 +10927,6 @@ bool Sema::canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
return true;
}
-/// Checks if the new declaration declared in dependent context must be
-/// put in the same redeclaration chain as the specified declaration.
-///
-/// \param D Declaration that is checked.
-/// \param PrevDecl Previous declaration found with proper lookup method for the
-/// same declaration name.
-/// \returns True if D must be added to the redeclaration chain which PrevDecl
-/// belongs to.
-///
bool Sema::shouldLinkDependentDeclWithPrevious(Decl *D, Decl *PrevDecl) {
if (!D->getLexicalDeclContext()->isDependentContext())
return true;
@@ -11140,6 +11015,9 @@ static bool AttrCompatibleWithMultiVersion(attr::Kind Kind,
switch (Kind) {
default:
return false;
+ case attr::ArmLocallyStreaming:
+ return MVKind == MultiVersionKind::TargetVersion ||
+ MVKind == MultiVersionKind::TargetClones;
case attr::Used:
return MVKind == MultiVersionKind::Target;
case attr::NonNull:
@@ -11173,11 +11051,13 @@ static bool checkNonMultiVersionCompatAttributes(Sema &S,
return Diagnose(S, A);
break;
case attr::TargetVersion:
- if (MVKind != MultiVersionKind::TargetVersion)
+ if (MVKind != MultiVersionKind::TargetVersion &&
+ MVKind != MultiVersionKind::TargetClones)
return Diagnose(S, A);
break;
case attr::TargetClones:
- if (MVKind != MultiVersionKind::TargetClones)
+ if (MVKind != MultiVersionKind::TargetClones &&
+ MVKind != MultiVersionKind::TargetVersion)
return Diagnose(S, A);
break;
default:
@@ -11274,7 +11154,21 @@ bool Sema::areMultiversionVariantFunctionsCompatible(
FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo();
FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo();
- if (OldTypeInfo.getCC() != NewTypeInfo.getCC())
+ const auto *OldFPT = OldFD->getType()->getAs<FunctionProtoType>();
+ const auto *NewFPT = NewFD->getType()->getAs<FunctionProtoType>();
+
+ bool ArmStreamingCCMismatched = false;
+ if (OldFPT && NewFPT) {
+ unsigned Diff =
+ OldFPT->getAArch64SMEAttributes() ^ NewFPT->getAArch64SMEAttributes();
+ // Arm-streaming, arm-streaming-compatible and non-streaming versions
+ // cannot be mixed.
+ if (Diff & (FunctionType::SME_PStateSMEnabledMask |
+ FunctionType::SME_PStateSMCompatibleMask))
+ ArmStreamingCCMismatched = true;
+ }
+
+ if (OldTypeInfo.getCC() != NewTypeInfo.getCC() || ArmStreamingCCMismatched)
return Diag(DiffDiagIDAt.first, DiffDiagIDAt.second) << CallingConv;
QualType OldReturnType = OldType->getReturnType();
@@ -11294,9 +11188,8 @@ bool Sema::areMultiversionVariantFunctionsCompatible(
if (!CLinkageMayDiffer && OldFD->isExternC() != NewFD->isExternC())
return Diag(DiffDiagIDAt.first, DiffDiagIDAt.second) << LanguageLinkage;
- if (CheckEquivalentExceptionSpec(
- OldFD->getType()->getAs<FunctionProtoType>(), OldFD->getLocation(),
- NewFD->getType()->getAs<FunctionProtoType>(), NewFD->getLocation()))
+ if (CheckEquivalentExceptionSpec(OldFPT, OldFD->getLocation(), NewFPT,
+ NewFD->getLocation()))
return true;
}
return false;
@@ -11354,9 +11247,13 @@ static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD) {
"Function lacks multiversion attribute");
const auto *TA = FD->getAttr<TargetAttr>();
const auto *TVA = FD->getAttr<TargetVersionAttr>();
- // Target and target_version only causes MV if it is default, otherwise this
- // is a normal function.
- if ((TA && !TA->isDefaultVersion()) || (TVA && !TVA->isDefaultVersion()))
+ // The target attribute only causes MV if this declaration is the default,
+ // otherwise it is treated as a normal function.
+ if (TA && !TA->isDefaultVersion())
+ return false;
+ // The target_version attribute only causes Multiversioning if this
+ // declaration is NOT the default version.
+ if (TVA && TVA->isDefaultVersion())
return false;
if ((TA || TVA) && CheckMultiVersionValue(S, FD)) {
@@ -11382,21 +11279,45 @@ static bool PreviousDeclsHaveMultiVersionAttribute(const FunctionDecl *FD) {
return false;
}
-static bool CheckTargetCausesMultiVersioning(Sema &S, FunctionDecl *OldFD,
- FunctionDecl *NewFD,
- bool &Redeclaration,
- NamedDecl *&OldDecl,
- LookupResult &Previous) {
+static void patchDefaultTargetVersion(FunctionDecl *From, FunctionDecl *To) {
+ if (!From->getASTContext().getTargetInfo().getTriple().isAArch64())
+ return;
+
+ MultiVersionKind MVKindFrom = From->getMultiVersionKind();
+ MultiVersionKind MVKindTo = To->getMultiVersionKind();
+
+ if (MVKindTo == MultiVersionKind::None &&
+ (MVKindFrom == MultiVersionKind::TargetVersion ||
+ MVKindFrom == MultiVersionKind::TargetClones))
+ To->addAttr(TargetVersionAttr::CreateImplicit(
+ To->getASTContext(), "default", To->getSourceRange()));
+}
+
+static bool CheckDeclarationCausesMultiVersioning(Sema &S, FunctionDecl *OldFD,
+ FunctionDecl *NewFD,
+ bool &Redeclaration,
+ NamedDecl *&OldDecl,
+ LookupResult &Previous) {
+ assert(!OldFD->isMultiVersion() && "Unexpected MultiVersion");
+
+ // The definitions should be allowed in any order. If we have discovered
+ // a new target version and the preceeding was the default, then add the
+ // corresponding attribute to it.
+ patchDefaultTargetVersion(NewFD, OldFD);
+
const auto *NewTA = NewFD->getAttr<TargetAttr>();
const auto *NewTVA = NewFD->getAttr<TargetVersionAttr>();
const auto *OldTA = OldFD->getAttr<TargetAttr>();
- const auto *OldTVA = OldFD->getAttr<TargetVersionAttr>();
+
// If the old decl is NOT MultiVersioned yet, and we don't cause that
// to change, this is a simple redeclaration.
- if ((NewTA && !NewTA->isDefaultVersion() &&
- (!OldTA || OldTA->getFeaturesStr() == NewTA->getFeaturesStr())) ||
- (NewTVA && !NewTVA->isDefaultVersion() &&
- (!OldTVA || OldTVA->getName() == NewTVA->getName())))
+ if (NewTA && !NewTA->isDefaultVersion() &&
+ (!OldTA || OldTA->getFeaturesStr() == NewTA->getFeaturesStr()))
+ return false;
+
+ // The target_version attribute only causes Multiversioning if this
+ // declaration is NOT the default version.
+ if (NewTVA && NewTVA->isDefaultVersion())
return false;
// Otherwise, this decl causes MultiVersioning.
@@ -11413,9 +11334,7 @@ static bool CheckTargetCausesMultiVersioning(Sema &S, FunctionDecl *OldFD,
}
// If this is 'default', permit the forward declaration.
- if (!OldFD->isMultiVersion() &&
- ((NewTA && NewTA->isDefaultVersion() && !OldTA) ||
- (NewTVA && NewTVA->isDefaultVersion() && !OldTVA))) {
+ if (NewTA && NewTA->isDefaultVersion() && !OldTA) {
Redeclaration = true;
OldDecl = OldFD;
OldFD->setIsMultiVersion();
@@ -11447,22 +11366,6 @@ static bool CheckTargetCausesMultiVersioning(Sema &S, FunctionDecl *OldFD,
}
}
- if (NewTVA) {
- llvm::SmallVector<StringRef, 8> Feats;
- OldTVA->getFeatures(Feats);
- llvm::sort(Feats);
- llvm::SmallVector<StringRef, 8> NewFeats;
- NewTVA->getFeatures(NewFeats);
- llvm::sort(NewFeats);
-
- if (Feats == NewFeats) {
- S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
- S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
- NewFD->setInvalidDecl();
- return true;
- }
- }
-
for (const auto *FD : OldFD->redecls()) {
const auto *CurTA = FD->getAttr<TargetAttr>();
const auto *CurTVA = FD->getAttr<TargetVersionAttr>();
@@ -11487,36 +11390,60 @@ static bool CheckTargetCausesMultiVersioning(Sema &S, FunctionDecl *OldFD,
return false;
}
-static bool MultiVersionTypesCompatible(MultiVersionKind Old,
- MultiVersionKind New) {
- if (Old == New || Old == MultiVersionKind::None ||
- New == MultiVersionKind::None)
+static bool MultiVersionTypesCompatible(FunctionDecl *Old, FunctionDecl *New) {
+ MultiVersionKind OldKind = Old->getMultiVersionKind();
+ MultiVersionKind NewKind = New->getMultiVersionKind();
+
+ if (OldKind == NewKind || OldKind == MultiVersionKind::None ||
+ NewKind == MultiVersionKind::None)
return true;
- return (Old == MultiVersionKind::CPUDispatch &&
- New == MultiVersionKind::CPUSpecific) ||
- (Old == MultiVersionKind::CPUSpecific &&
- New == MultiVersionKind::CPUDispatch);
+ if (Old->getASTContext().getTargetInfo().getTriple().isAArch64()) {
+ switch (OldKind) {
+ case MultiVersionKind::TargetVersion:
+ return NewKind == MultiVersionKind::TargetClones;
+ case MultiVersionKind::TargetClones:
+ return NewKind == MultiVersionKind::TargetVersion;
+ default:
+ return false;
+ }
+ } else {
+ switch (OldKind) {
+ case MultiVersionKind::CPUDispatch:
+ return NewKind == MultiVersionKind::CPUSpecific;
+ case MultiVersionKind::CPUSpecific:
+ return NewKind == MultiVersionKind::CPUDispatch;
+ default:
+ return false;
+ }
+ }
}
/// Check the validity of a new function declaration being added to an existing
/// multiversioned declaration collection.
static bool CheckMultiVersionAdditionalDecl(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD,
- MultiVersionKind NewMVKind, const CPUDispatchAttr *NewCPUDisp,
- const CPUSpecificAttr *NewCPUSpec, const TargetClonesAttr *NewClones,
- bool &Redeclaration, NamedDecl *&OldDecl, LookupResult &Previous) {
- const auto *NewTA = NewFD->getAttr<TargetAttr>();
- const auto *NewTVA = NewFD->getAttr<TargetVersionAttr>();
- MultiVersionKind OldMVKind = OldFD->getMultiVersionKind();
+ const CPUDispatchAttr *NewCPUDisp, const CPUSpecificAttr *NewCPUSpec,
+ const TargetClonesAttr *NewClones, bool &Redeclaration, NamedDecl *&OldDecl,
+ LookupResult &Previous) {
+
// Disallow mixing of multiversioning types.
- if (!MultiVersionTypesCompatible(OldMVKind, NewMVKind)) {
+ if (!MultiVersionTypesCompatible(OldFD, NewFD)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
+ // Add the default target_version attribute if it's missing.
+ patchDefaultTargetVersion(OldFD, NewFD);
+ patchDefaultTargetVersion(NewFD, OldFD);
+
+ const auto *NewTA = NewFD->getAttr<TargetAttr>();
+ const auto *NewTVA = NewFD->getAttr<TargetVersionAttr>();
+ MultiVersionKind NewMVKind = NewFD->getMultiVersionKind();
+ [[maybe_unused]] MultiVersionKind OldMVKind = OldFD->getMultiVersionKind();
+
ParsedTargetAttr NewParsed;
if (NewTA) {
NewParsed = S.getASTContext().getTargetInfo().parseTargetAttr(
@@ -11545,19 +11472,6 @@ static bool CheckMultiVersionAdditionalDecl(
S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
continue;
- if (NewMVKind == MultiVersionKind::None &&
- OldMVKind == MultiVersionKind::TargetVersion) {
- NewFD->addAttr(TargetVersionAttr::CreateImplicit(
- S.Context, "default", NewFD->getSourceRange()));
- NewFD->setIsMultiVersion();
- NewMVKind = MultiVersionKind::TargetVersion;
- if (!NewTVA) {
- NewTVA = NewFD->getAttr<TargetVersionAttr>();
- NewTVA->getFeatures(NewFeats);
- llvm::sort(NewFeats);
- }
- }
-
switch (NewMVKind) {
case MultiVersionKind::None:
assert(OldMVKind == MultiVersionKind::TargetClones &&
@@ -11585,43 +11499,81 @@ static bool CheckMultiVersionAdditionalDecl(
break;
}
case MultiVersionKind::TargetVersion: {
- const auto *CurTVA = CurFD->getAttr<TargetVersionAttr>();
- if (CurTVA->getName() == NewTVA->getName()) {
- NewFD->setIsMultiVersion();
- Redeclaration = true;
- OldDecl = ND;
- return false;
- }
- llvm::SmallVector<StringRef, 8> CurFeats;
- if (CurTVA) {
+ if (const auto *CurTVA = CurFD->getAttr<TargetVersionAttr>()) {
+ if (CurTVA->getName() == NewTVA->getName()) {
+ NewFD->setIsMultiVersion();
+ Redeclaration = true;
+ OldDecl = ND;
+ return false;
+ }
+ llvm::SmallVector<StringRef, 8> CurFeats;
CurTVA->getFeatures(CurFeats);
llvm::sort(CurFeats);
- }
- if (CurFeats == NewFeats) {
- S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
- S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
- NewFD->setInvalidDecl();
- return true;
+
+ if (CurFeats == NewFeats) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ } else if (const auto *CurClones = CurFD->getAttr<TargetClonesAttr>()) {
+ // Default
+ if (NewFeats.empty())
+ break;
+
+ for (unsigned I = 0; I < CurClones->featuresStrs_size(); ++I) {
+ llvm::SmallVector<StringRef, 8> CurFeats;
+ CurClones->getFeatures(CurFeats, I);
+ llvm::sort(CurFeats);
+
+ if (CurFeats == NewFeats) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ }
}
break;
}
case MultiVersionKind::TargetClones: {
- const auto *CurClones = CurFD->getAttr<TargetClonesAttr>();
+ assert(NewClones && "MultiVersionKind does not match attribute type");
+ if (const auto *CurClones = CurFD->getAttr<TargetClonesAttr>()) {
+ if (CurClones->featuresStrs_size() != NewClones->featuresStrs_size() ||
+ !std::equal(CurClones->featuresStrs_begin(),
+ CurClones->featuresStrs_end(),
+ NewClones->featuresStrs_begin())) {
+ S.Diag(NewFD->getLocation(), diag::err_target_clone_doesnt_match);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ } else if (const auto *CurTVA = CurFD->getAttr<TargetVersionAttr>()) {
+ llvm::SmallVector<StringRef, 8> CurFeats;
+ CurTVA->getFeatures(CurFeats);
+ llvm::sort(CurFeats);
+
+ // Default
+ if (CurFeats.empty())
+ break;
+
+ for (unsigned I = 0; I < NewClones->featuresStrs_size(); ++I) {
+ NewFeats.clear();
+ NewClones->getFeatures(NewFeats, I);
+ llvm::sort(NewFeats);
+
+ if (CurFeats == NewFeats) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ }
+ break;
+ }
Redeclaration = true;
OldDecl = CurFD;
NewFD->setIsMultiVersion();
-
- if (CurClones && NewClones &&
- (CurClones->featuresStrs_size() != NewClones->featuresStrs_size() ||
- !std::equal(CurClones->featuresStrs_begin(),
- CurClones->featuresStrs_end(),
- NewClones->featuresStrs_begin()))) {
- S.Diag(NewFD->getLocation(), diag::err_target_clone_doesnt_match);
- S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
- NewFD->setInvalidDecl();
- return true;
- }
-
return false;
}
case MultiVersionKind::CPUSpecific:
@@ -11747,13 +11699,19 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
return false;
}
+ const llvm::Triple &T = S.getASTContext().getTargetInfo().getTriple();
+
// Target attribute on AArch64 is not used for multiversioning
- if (NewTA && S.getASTContext().getTargetInfo().getTriple().isAArch64())
+ if (NewTA && T.isAArch64())
+ return false;
+
+ // Target attribute on RISCV is not used for multiversioning
+ if (NewTA && T.isRISCV())
return false;
if (!OldDecl || !OldDecl->getAsFunction() ||
- OldDecl->getDeclContext()->getRedeclContext() !=
- NewFD->getDeclContext()->getRedeclContext()) {
+ !OldDecl->getDeclContext()->getRedeclContext()->Equals(
+ NewFD->getDeclContext()->getRedeclContext())) {
// If there's no previous declaration, AND this isn't attempting to cause
// multiversioning, this isn't an error condition.
if (MVKind == MultiVersionKind::None)
@@ -11763,24 +11721,8 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
FunctionDecl *OldFD = OldDecl->getAsFunction();
- if (!OldFD->isMultiVersion() && MVKind == MultiVersionKind::None) {
- if (NewTVA || !OldFD->getAttr<TargetVersionAttr>())
- return false;
- if (!NewFD->getType()->getAs<FunctionProtoType>()) {
- // Multiversion declaration doesn't have prototype.
- S.Diag(NewFD->getLocation(), diag::err_multiversion_noproto);
- NewFD->setInvalidDecl();
- } else {
- // No "target_version" attribute is equivalent to "default" attribute.
- NewFD->addAttr(TargetVersionAttr::CreateImplicit(
- S.Context, "default", NewFD->getSourceRange()));
- NewFD->setIsMultiVersion();
- OldFD->setIsMultiVersion();
- OldDecl = OldFD;
- Redeclaration = true;
- }
- return true;
- }
+ if (!OldFD->isMultiVersion() && MVKind == MultiVersionKind::None)
+ return false;
// Multiversioned redeclarations aren't allowed to omit the attribute, except
// for target_clones and target_version.
@@ -11797,8 +11739,8 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
switch (MVKind) {
case MultiVersionKind::Target:
case MultiVersionKind::TargetVersion:
- return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, Redeclaration,
- OldDecl, Previous);
+ return CheckDeclarationCausesMultiVersioning(
+ S, OldFD, NewFD, Redeclaration, OldDecl, Previous);
case MultiVersionKind::TargetClones:
if (OldFD->isUsed(false)) {
NewFD->setInvalidDecl();
@@ -11817,7 +11759,7 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
// At this point, we have a multiversion function decl (in OldFD) AND an
// appropriate attribute in the current function decl. Resolve that these are
// still compatible with previous declarations.
- return CheckMultiVersionAdditionalDecl(S, OldFD, NewFD, MVKind, NewCPUDisp,
+ return CheckMultiVersionAdditionalDecl(S, OldFD, NewFD, NewCPUDisp,
NewCPUSpec, NewClones, Redeclaration,
OldDecl, Previous);
}
@@ -11848,23 +11790,6 @@ static void CheckConstPureAttributesUsage(Sema &S, FunctionDecl *NewFD) {
}
}
-/// Perform semantic checking of a new function declaration.
-///
-/// Performs semantic analysis of the new function declaration
-/// NewFD. This routine performs all semantic checking that does not
-/// require the actual declarator involved in the declaration, and is
-/// used both for the declaration of functions as they are parsed
-/// (called via ActOnDeclarator) and for the declaration of functions
-/// that have been instantiated via C++ template instantiation (called
-/// via InstantiateDecl).
-///
-/// \param IsMemberSpecialization whether this new function declaration is
-/// a member specialization (that replaces any definition provided by the
-/// previous declaration).
-///
-/// This sets NewFD->isInvalidDecl() to true if there was an error.
-///
-/// \returns true if the function declaration is a redeclaration.
bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
LookupResult &Previous,
bool IsMemberSpecialization,
@@ -11941,12 +11866,41 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// PPC MMA non-pointer types are not allowed as function return types.
if (Context.getTargetInfo().getTriple().isPPC64() &&
- CheckPPCMMAType(NewFD->getReturnType(), NewFD->getLocation())) {
+ PPC().CheckPPCMMAType(NewFD->getReturnType(), NewFD->getLocation())) {
NewFD->setInvalidDecl();
}
CheckConstPureAttributesUsage(*this, NewFD);
+ // C++ [dcl.spec.auto.general]p12:
+ // Return type deduction for a templated function with a placeholder in its
+ // declared type occurs when the definition is instantiated even if the
+ // function body contains a return statement with a non-type-dependent
+ // operand.
+ //
+ // C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it is a template-id that is not a
+ // concept-id and is dependent; or if its terminal name is:
+ // - [...]
+ // - associated by name lookup with one or more declarations of member
+ // functions of a class that is the current instantiation declared with a
+ // return type that contains a placeholder type,
+ // - [...]
+ //
+ // If this is a templated function with a placeholder in its return type,
+ // make the placeholder type dependent since it won't be deduced until the
+ // definition is instantiated. We do this here because it needs to happen
+ // for implicitly instantiated member functions/member function templates.
+ if (getLangOpts().CPlusPlus14 &&
+ (NewFD->isDependentContext() &&
+ NewFD->getReturnType()->isUndeducedType())) {
+ const FunctionProtoType *FPT =
+ NewFD->getType()->castAs<FunctionProtoType>();
+ QualType NewReturnType = SubstAutoTypeDependent(FPT->getReturnType());
+ NewFD->setType(Context.getFunctionType(NewReturnType, FPT->getParamTypes(),
+ FPT->getExtProtoInfo()));
+ }
+
// C++11 [dcl.constexpr]p8:
// A constexpr specifier for a non-static member function that is not
// a constructor declares that member function to be const.
@@ -12066,7 +12020,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
}
if (LangOpts.OpenMP)
- ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(NewFD);
+ OpenMP().ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(NewFD);
// Semantic checking for this function declaration (in isolation).
@@ -12123,55 +12077,6 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
checkThisInStaticMemberFunctionType(Method);
}
- if (Expr *TRC = NewFD->getTrailingRequiresClause()) {
- // C++20: dcl.decl.general p4:
- // The optional requires-clause ([temp.pre]) in an init-declarator or
- // member-declarator shall be present only if the declarator declares a
- // templated function ([dcl.fct]).
- //
- // [temp.pre]/8:
- // An entity is templated if it is
- // - a template,
- // - an entity defined ([basic.def]) or created ([class.temporary]) in a
- // templated entity,
- // - a member of a templated entity,
- // - an enumerator for an enumeration that is a templated entity, or
- // - the closure type of a lambda-expression ([expr.prim.lambda.closure])
- // appearing in the declaration of a templated entity. [Note 6: A local
- // class, a local or block variable, or a friend function defined in a
- // templated entity is a templated entity. — end note]
- //
- // A templated function is a function template or a function that is
- // templated. A templated class is a class template or a class that is
- // templated. A templated variable is a variable template or a variable
- // that is templated.
-
- bool IsTemplate = NewFD->getDescribedFunctionTemplate();
- bool IsFriend = NewFD->getFriendObjectKind();
- if (!IsTemplate && // -a template
- // defined... in a templated entity
- !(DeclIsDefn && NewFD->isTemplated()) &&
- // a member of a templated entity
- !(isa<CXXMethodDecl>(NewFD) && NewFD->isTemplated()) &&
- // Don't complain about instantiations, they've already had these
- // rules + others enforced.
- !NewFD->isTemplateInstantiation() &&
- // If the function violates [temp.friend]p9 because it is missing
- // a definition, and adding a definition would make it templated,
- // then let that error take precedence.
- !(!DeclIsDefn && IsFriend && NewFD->isTemplated())) {
- Diag(TRC->getBeginLoc(), diag::err_constrained_non_templated_function);
- } else if (!DeclIsDefn && !IsTemplate && IsFriend &&
- !NewFD->isTemplateInstantiation()) {
- // C++ [temp.friend]p9:
- // A non-template friend declaration with a requires-clause shall be a
- // definition.
- Diag(NewFD->getBeginLoc(),
- diag::err_non_temp_friend_decl_with_requires_clause_must_be_def);
- NewFD->setInvalidDecl();
- }
- }
-
if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(NewFD))
ActOnConversionDeclarator(Conversion);
@@ -12246,16 +12151,30 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
}
if (!Redeclaration && LangOpts.CUDA)
- checkCUDATargetOverload(NewFD, Previous);
+ CUDA().checkTargetOverload(NewFD, Previous);
}
// Check if the function definition uses any AArch64 SME features without
- // having the '+sme' feature enabled.
+ // having the '+sme' feature enabled and warn user if sme locally streaming
+ // function returns or uses arguments with VL-based types.
if (DeclIsDefn) {
const auto *Attr = NewFD->getAttr<ArmNewAttr>();
bool UsesSM = NewFD->hasAttr<ArmLocallyStreamingAttr>();
bool UsesZA = Attr && Attr->isNewZA();
bool UsesZT0 = Attr && Attr->isNewZT0();
+
+ if (NewFD->hasAttr<ArmLocallyStreamingAttr>()) {
+ if (NewFD->getReturnType()->isSizelessVectorType())
+ Diag(NewFD->getLocation(),
+ diag::warn_sme_locally_streaming_has_vl_args_returns)
+ << /*IsArg=*/false;
+ if (llvm::any_of(NewFD->parameters(), [](ParmVarDecl *P) {
+ return P->getOriginalType()->isSizelessVectorType();
+ }))
+ Diag(NewFD->getLocation(),
+ diag::warn_sme_locally_streaming_has_vl_args_returns)
+ << /*IsArg=*/true;
+ }
if (const auto *FPT = NewFD->getType()->getAs<FunctionProtoType>()) {
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
UsesSM |=
@@ -12484,7 +12403,7 @@ void Sema::CheckMSVCRTEntryPoint(FunctionDecl *FD) {
if (FD->getName() != "DllMain")
FD->setHasImplicitReturnZero(true);
- // Explicity specified calling conventions are applied to MSVC entry points
+ // Explicitly specified calling conventions are applied to MSVC entry points
if (!hasExplicitCallingConv(T)) {
if (isDefaultStdCall(FD, *this)) {
if (FT->getCallConv() != CC_X86StdCall) {
@@ -12505,126 +12424,7 @@ void Sema::CheckMSVCRTEntryPoint(FunctionDecl *FD) {
}
}
-void Sema::ActOnHLSLTopLevelFunction(FunctionDecl *FD) {
- auto &TargetInfo = getASTContext().getTargetInfo();
-
- if (FD->getName() != TargetInfo.getTargetOpts().HLSLEntry)
- return;
-
- StringRef Env = TargetInfo.getTriple().getEnvironmentName();
- HLSLShaderAttr::ShaderType ShaderType;
- if (HLSLShaderAttr::ConvertStrToShaderType(Env, ShaderType)) {
- if (const auto *Shader = FD->getAttr<HLSLShaderAttr>()) {
- // The entry point is already annotated - check that it matches the
- // triple.
- if (Shader->getType() != ShaderType) {
- Diag(Shader->getLocation(), diag::err_hlsl_entry_shader_attr_mismatch)
- << Shader;
- FD->setInvalidDecl();
- }
- } else {
- // Implicitly add the shader attribute if the entry function isn't
- // explicitly annotated.
- FD->addAttr(HLSLShaderAttr::CreateImplicit(Context, ShaderType,
- FD->getBeginLoc()));
- }
- } else {
- switch (TargetInfo.getTriple().getEnvironment()) {
- case llvm::Triple::UnknownEnvironment:
- case llvm::Triple::Library:
- break;
- default:
- llvm_unreachable("Unhandled environment in triple");
- }
- }
-}
-
-void Sema::CheckHLSLEntryPoint(FunctionDecl *FD) {
- const auto *ShaderAttr = FD->getAttr<HLSLShaderAttr>();
- assert(ShaderAttr && "Entry point has no shader attribute");
- HLSLShaderAttr::ShaderType ST = ShaderAttr->getType();
-
- switch (ST) {
- case HLSLShaderAttr::Pixel:
- case HLSLShaderAttr::Vertex:
- case HLSLShaderAttr::Geometry:
- case HLSLShaderAttr::Hull:
- case HLSLShaderAttr::Domain:
- case HLSLShaderAttr::RayGeneration:
- case HLSLShaderAttr::Intersection:
- case HLSLShaderAttr::AnyHit:
- case HLSLShaderAttr::ClosestHit:
- case HLSLShaderAttr::Miss:
- case HLSLShaderAttr::Callable:
- if (const auto *NT = FD->getAttr<HLSLNumThreadsAttr>()) {
- DiagnoseHLSLAttrStageMismatch(NT, ST,
- {HLSLShaderAttr::Compute,
- HLSLShaderAttr::Amplification,
- HLSLShaderAttr::Mesh});
- FD->setInvalidDecl();
- }
- break;
-
- case HLSLShaderAttr::Compute:
- case HLSLShaderAttr::Amplification:
- case HLSLShaderAttr::Mesh:
- if (!FD->hasAttr<HLSLNumThreadsAttr>()) {
- Diag(FD->getLocation(), diag::err_hlsl_missing_numthreads)
- << HLSLShaderAttr::ConvertShaderTypeToStr(ST);
- FD->setInvalidDecl();
- }
- break;
- }
-
- for (ParmVarDecl *Param : FD->parameters()) {
- if (const auto *AnnotationAttr = Param->getAttr<HLSLAnnotationAttr>()) {
- CheckHLSLSemanticAnnotation(FD, Param, AnnotationAttr);
- } else {
- // FIXME: Handle struct parameters where annotations are on struct fields.
- // See: https://github.com/llvm/llvm-project/issues/57875
- Diag(FD->getLocation(), diag::err_hlsl_missing_semantic_annotation);
- Diag(Param->getLocation(), diag::note_previous_decl) << Param;
- FD->setInvalidDecl();
- }
- }
- // FIXME: Verify return type semantic annotation.
-}
-
-void Sema::CheckHLSLSemanticAnnotation(
- FunctionDecl *EntryPoint, const Decl *Param,
- const HLSLAnnotationAttr *AnnotationAttr) {
- auto *ShaderAttr = EntryPoint->getAttr<HLSLShaderAttr>();
- assert(ShaderAttr && "Entry point has no shader attribute");
- HLSLShaderAttr::ShaderType ST = ShaderAttr->getType();
-
- switch (AnnotationAttr->getKind()) {
- case attr::HLSLSV_DispatchThreadID:
- case attr::HLSLSV_GroupIndex:
- if (ST == HLSLShaderAttr::Compute)
- return;
- DiagnoseHLSLAttrStageMismatch(AnnotationAttr, ST,
- {HLSLShaderAttr::Compute});
- break;
- default:
- llvm_unreachable("Unknown HLSLAnnotationAttr");
- }
-}
-
-void Sema::DiagnoseHLSLAttrStageMismatch(
- const Attr *A, HLSLShaderAttr::ShaderType Stage,
- std::initializer_list<HLSLShaderAttr::ShaderType> AllowedStages) {
- SmallVector<StringRef, 8> StageStrings;
- llvm::transform(AllowedStages, std::back_inserter(StageStrings),
- [](HLSLShaderAttr::ShaderType ST) {
- return StringRef(
- HLSLShaderAttr::ConvertShaderTypeToStr(ST));
- });
- Diag(A->getLoc(), diag::err_hlsl_attr_unsupported_in_stage)
- << A << HLSLShaderAttr::ConvertShaderTypeToStr(Stage)
- << (AllowedStages.size() != 1) << join(StageStrings, ", ");
-}
-
-bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) {
+bool Sema::CheckForConstantInitializer(Expr *Init, unsigned DiagID) {
// FIXME: Need strict checking. In C89, we need to check for
// any assignment, increment, decrement, function-calls, or
// commas outside of a sizeof. In C99, it's the same list,
@@ -12642,8 +12442,7 @@ bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) {
const Expr *Culprit;
if (Init->isConstantInitializer(Context, false, &Culprit))
return false;
- Diag(Culprit->getExprLoc(), diag::err_init_element_not_constant)
- << Culprit->getSourceRange();
+ Diag(Culprit->getExprLoc(), DiagID) << Culprit->getSourceRange();
return true;
}
@@ -13104,7 +12903,8 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
TemplateDeductionInfo Info(DeduceInit->getExprLoc());
TemplateDeductionResult Result =
DeduceAutoType(TSI->getTypeLoc(), DeduceInit, DeducedType, Info);
- if (Result != TDK_Success && Result != TDK_AlreadyDiagnosed) {
+ if (Result != TemplateDeductionResult::Success &&
+ Result != TemplateDeductionResult::AlreadyDiagnosed) {
if (!IsInitCapture)
DiagnoseAutoDeductionFailure(VDecl, DeduceInit);
else if (isa<InitListExpr>(Init))
@@ -13151,7 +12951,7 @@ bool Sema::DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
assert(VDecl->isLinkageValid());
// In ARC, infer lifetime.
- if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
+ if (getLangOpts().ObjCAutoRefCount && ObjC().inferObjCARCLifetime(VDecl))
VDecl->setInvalidDecl();
if (getLangOpts().OpenCL)
@@ -13443,22 +13243,21 @@ void Sema::checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
.visit(QT, nullptr, false);
}
-/// AddInitializerToDecl - Adds the initializer Init to the
-/// declaration dcl. If DirectInit is true, this is C++ direct
-/// initialization rather than copy initialization.
void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// If there is no declaration, there was an error parsing it. Just ignore
// the initializer.
- if (!RealDecl || RealDecl->isInvalidDecl()) {
+ if (!RealDecl) {
CorrectDelayedTyposInExpr(Init, dyn_cast_or_null<VarDecl>(RealDecl));
return;
}
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(RealDecl)) {
- // Pure-specifiers are handled in ActOnPureSpecifier.
- Diag(Method->getLocation(), diag::err_member_function_initialization)
- << Method->getDeclName() << Init->getSourceRange();
- Method->setInvalidDecl();
+ if (auto *Method = dyn_cast<CXXMethodDecl>(RealDecl)) {
+ if (!Method->isInvalidDecl()) {
+ // Pure-specifiers are handled in ActOnPureSpecifier.
+ Diag(Method->getLocation(), diag::err_member_function_initialization)
+ << Method->getDeclName() << Init->getSourceRange();
+ Method->setInvalidDecl();
+ }
return;
}
@@ -13470,6 +13269,18 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
return;
}
+ if (VDecl->isInvalidDecl()) {
+ ExprResult Res = CorrectDelayedTyposInExpr(Init, VDecl);
+ SmallVector<Expr *> SubExprs;
+ if (Res.isUsable())
+ SubExprs.push_back(Res.get());
+ ExprResult Recovery =
+ CreateRecoveryExpr(Init->getBeginLoc(), Init->getEndLoc(), SubExprs);
+ if (Expr *E = Recovery.get())
+ VDecl->setInit(E);
+ return;
+ }
+
// WebAssembly tables can't be used to initialise a variable.
if (Init && !Init->getType().isNull() &&
Init->getType()->isWebAssemblyTableType()) {
@@ -13544,7 +13355,8 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
if (getLangOpts().CPlusPlusModules && currentModuleIsHeaderUnit() &&
!VDecl->isInvalidDecl() && VDecl->isThisDeclarationADefinition() &&
VDecl->getFormalLinkage() == Linkage::External && !VDecl->isInline() &&
- !VDecl->isTemplated() && !isa<VarTemplateSpecializationDecl>(VDecl)) {
+ !VDecl->isTemplated() && !isa<VarTemplateSpecializationDecl>(VDecl) &&
+ !VDecl->getInstantiatedFromStaticDataMember()) {
Diag(VDecl->getLocation(), diag::err_extern_def_in_header_unit);
VDecl->setInvalidDecl();
}
@@ -13662,12 +13474,12 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
CreateRecoveryExpr(Init->getBeginLoc(), Init->getEndLoc(), Args);
if (RecoveryExpr.get())
VDecl->setInit(RecoveryExpr.get());
- // In general, for error recovery purposes, the initalizer doesn't play
+ // In general, for error recovery purposes, the initializer doesn't play
// part in the valid bit of the declaration. There are a few exceptions:
// 1) if the var decl has a deduced auto type, and the type cannot be
// deduced by an invalid initializer;
- // 2) if the var decl is decompsition decl with a non-deduced type, and
- // the initialization fails (e.g. `int [a] = {1, 2};`);
+ // 2) if the var decl is a decomposition decl with a non-deduced type,
+ // and the initialization fails (e.g. `int [a] = {1, 2};`);
// Case 1) was already handled elsewhere.
if (isa<DecompositionDecl>(VDecl)) // Case 2)
VDecl->setInvalidDecl();
@@ -13712,7 +13524,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
checkUnsafeAssigns(VDecl->getLocation(), VDecl->getType(), Init);
if (VDecl->hasAttr<BlocksAttr>())
- checkRetainCycles(VDecl, Init);
+ ObjC().checkRetainCycles(VDecl, Init);
// It is safe to assign a weak reference into a strong variable.
// Although this code can still have problems:
@@ -13760,29 +13572,24 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// OpenCL v1.2 s6.5.3: __constant locals must be constant-initialized.
// This is true even in C++ for OpenCL.
} else if (VDecl->getType().getAddressSpace() == LangAS::opencl_constant) {
- CheckForConstantInitializer(Init, DclT);
+ CheckForConstantInitializer(Init);
- // Otherwise, C++ does not restrict the initializer.
+ // Otherwise, C++ does not restrict the initializer.
} else if (getLangOpts().CPlusPlus) {
// do nothing
// C99 6.7.8p4: All the expressions in an initializer for an object that has
// static storage duration shall be constant expressions or string literals.
} else if (VDecl->getStorageClass() == SC_Static) {
- CheckForConstantInitializer(Init, DclT);
+ CheckForConstantInitializer(Init);
- // C89 is stricter than C99 for aggregate initializers.
- // C89 6.5.7p3: All the expressions [...] in an initializer list
- // for an object that has aggregate or union type shall be
- // constant expressions.
+ // C89 is stricter than C99 for aggregate initializers.
+ // C89 6.5.7p3: All the expressions [...] in an initializer list
+ // for an object that has aggregate or union type shall be
+ // constant expressions.
} else if (!getLangOpts().C99 && VDecl->getType()->isAggregateType() &&
isa<InitListExpr>(Init)) {
- const Expr *Culprit;
- if (!Init->isConstantInitializer(Context, false, &Culprit)) {
- Diag(Culprit->getExprLoc(),
- diag::ext_aggregate_init_not_constant)
- << Culprit->getSourceRange();
- }
+ CheckForConstantInitializer(Init, diag::ext_aggregate_init_not_constant);
}
if (auto *E = dyn_cast<ExprWithCleanups>(Init))
@@ -13890,9 +13697,9 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
}
} else if (VDecl->isFileVarDecl()) {
// In C, extern is typically used to avoid tentative definitions when
- // declaring variables in headers, but adding an intializer makes it a
+ // declaring variables in headers, but adding an initializer makes it a
// definition. This is somewhat confusing, so GCC and Clang both warn on it.
- // In C++, extern is often used to give implictly static const variables
+ // In C++, extern is often used to give implicitly static const variables
// external linkage, so don't warn in that case. If selectany is present,
// this might be header code intended for C and C++ inclusion, so apply the
// C++ rules.
@@ -13912,8 +13719,10 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
VDecl->setStorageClass(SC_Extern);
// C99 6.7.8p4. All file scoped initializers need to be constant.
- if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl())
- CheckForConstantInitializer(Init, DclT);
+ // Avoid duplicate diagnostics for constexpr variables.
+ if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl() &&
+ !VDecl->isConstexpr())
+ CheckForConstantInitializer(Init);
}
QualType InitType = Init->getType();
@@ -13952,10 +13761,6 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
CheckCompleteVariableDeclaration(VDecl);
}
-/// ActOnInitializerError - Given that there was an error parsing an
-/// initializer for the given declaration, try to at least re-establish
-/// invariants such as whether a variable's type is either dependent or
-/// complete.
void Sema::ActOnInitializerError(Decl *D) {
// Our main concern here is re-establishing invariants like "a
// variable's type is either dependent or complete".
@@ -14084,7 +13889,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
return;
}
}
- // The declaration is unitialized, no need for further checks.
+ // The declaration is uninitialized, no need for further checks.
return;
}
@@ -14369,7 +14174,7 @@ StmtResult Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (var->isInvalidDecl()) return;
- MaybeAddCUDAConstantAttr(var);
+ CUDA().MaybeAddConstantAttr(var);
if (getLangOpts().OpenCL) {
// OpenCL v2.0 s6.12.5 - Every block variable declaration must have an
@@ -14523,9 +14328,13 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
QualType baseType = Context.getBaseElementType(type);
bool HasConstInit = true;
+ if (getLangOpts().C23 && var->isConstexpr() && !Init)
+ Diag(var->getLocation(), diag::err_constexpr_var_requires_const_init)
+ << var;
+
// Check whether the initializer is sufficiently constant.
- if (getLangOpts().CPlusPlus && !type->isDependentType() && Init &&
- !Init->isValueDependent() &&
+ if ((getLangOpts().CPlusPlus || (getLangOpts().C23 && var->isConstexpr())) &&
+ !type->isDependentType() && Init && !Init->isValueDependent() &&
(GlobalStorage || var->isConstexpr() ||
var->mightBeUsableInConstantExpressions(Context))) {
// If this variable might have a constant initializer or might be usable in
@@ -14533,7 +14342,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
// do this lazily, because the result might depend on things that change
// later, such as which constexpr functions happen to be defined.
SmallVector<PartialDiagnosticAt, 8> Notes;
- if (!getLangOpts().CPlusPlus11) {
+ if (!getLangOpts().CPlusPlus11 && !getLangOpts().C23) {
// Prior to C++11, in contexts where a constant initializer is required,
// the set of valid constant initializers is described by syntactic rules
// in [expr.const]p2-6.
@@ -14669,8 +14478,6 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
CheckCompleteDecompositionDeclaration(DD);
}
-/// Check if VD needs to be dllexport/dllimport due to being in a
-/// dllexport/import function.
void Sema::CheckStaticLocalForDllExport(VarDecl *VD) {
assert(VD->isStaticLocal());
@@ -14728,8 +14535,6 @@ void Sema::CheckThreadLocalForLargeAlignment(VarDecl *VD) {
}
}
-/// FinalizeDeclaration - called by ParseDeclarationAfterDeclarator to perform
-/// any semantic actions necessary after any initializer has been attached.
void Sema::FinalizeDeclaration(Decl *ThisDecl) {
// Note that we are no longer parsing the initializer for this declaration.
ParsingInitForAutoVars.erase(ThisDecl);
@@ -14779,7 +14584,7 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
// variables whether they are local or not. CUDA also allows
// constant initializers for __constant__ and __device__ variables.
if (getLangOpts().CUDA)
- checkAllowedCUDAInitializer(VD);
+ CUDA().checkAllowedInitializer(VD);
// Grab the dllimport or dllexport attribute off of the VarDecl.
const InheritableAttr *DLLAttr = getDLLAttr(VD);
@@ -14898,53 +14703,53 @@ Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
DeclaratorDecl *FirstNonDeducedAutoInGroup = nullptr;
bool DiagnosedNonDeducedAuto = false;
- for (unsigned i = 0, e = Group.size(); i != e; ++i) {
- if (Decl *D = Group[i]) {
- // Check if the Decl has been declared in '#pragma omp declare target'
- // directive and has static storage duration.
- if (auto *VD = dyn_cast<VarDecl>(D);
- LangOpts.OpenMP && VD && VD->hasAttr<OMPDeclareTargetDeclAttr>() &&
- VD->hasGlobalStorage())
- ActOnOpenMPDeclareTargetInitializer(D);
- // For declarators, there are some additional syntactic-ish checks we need
- // to perform.
- if (auto *DD = dyn_cast<DeclaratorDecl>(D)) {
- if (!FirstDeclaratorInGroup)
- FirstDeclaratorInGroup = DD;
- if (!FirstDecompDeclaratorInGroup)
- FirstDecompDeclaratorInGroup = dyn_cast<DecompositionDecl>(D);
- if (!FirstNonDeducedAutoInGroup && DS.hasAutoTypeSpec() &&
- !hasDeducedAuto(DD))
- FirstNonDeducedAutoInGroup = DD;
-
- if (FirstDeclaratorInGroup != DD) {
- // A decomposition declaration cannot be combined with any other
- // declaration in the same group.
- if (FirstDecompDeclaratorInGroup && !DiagnosedMultipleDecomps) {
- Diag(FirstDecompDeclaratorInGroup->getLocation(),
- diag::err_decomp_decl_not_alone)
- << FirstDeclaratorInGroup->getSourceRange()
- << DD->getSourceRange();
- DiagnosedMultipleDecomps = true;
- }
+ for (Decl *D : Group) {
+ if (!D)
+ continue;
+ // Check if the Decl has been declared in '#pragma omp declare target'
+ // directive and has static storage duration.
+ if (auto *VD = dyn_cast<VarDecl>(D);
+ LangOpts.OpenMP && VD && VD->hasAttr<OMPDeclareTargetDeclAttr>() &&
+ VD->hasGlobalStorage())
+ OpenMP().ActOnOpenMPDeclareTargetInitializer(D);
+ // For declarators, there are some additional syntactic-ish checks we need
+ // to perform.
+ if (auto *DD = dyn_cast<DeclaratorDecl>(D)) {
+ if (!FirstDeclaratorInGroup)
+ FirstDeclaratorInGroup = DD;
+ if (!FirstDecompDeclaratorInGroup)
+ FirstDecompDeclaratorInGroup = dyn_cast<DecompositionDecl>(D);
+ if (!FirstNonDeducedAutoInGroup && DS.hasAutoTypeSpec() &&
+ !hasDeducedAuto(DD))
+ FirstNonDeducedAutoInGroup = DD;
+
+ if (FirstDeclaratorInGroup != DD) {
+ // A decomposition declaration cannot be combined with any other
+ // declaration in the same group.
+ if (FirstDecompDeclaratorInGroup && !DiagnosedMultipleDecomps) {
+ Diag(FirstDecompDeclaratorInGroup->getLocation(),
+ diag::err_decomp_decl_not_alone)
+ << FirstDeclaratorInGroup->getSourceRange()
+ << DD->getSourceRange();
+ DiagnosedMultipleDecomps = true;
+ }
- // A declarator that uses 'auto' in any way other than to declare a
- // variable with a deduced type cannot be combined with any other
- // declarator in the same group.
- if (FirstNonDeducedAutoInGroup && !DiagnosedNonDeducedAuto) {
- Diag(FirstNonDeducedAutoInGroup->getLocation(),
- diag::err_auto_non_deduced_not_alone)
- << FirstNonDeducedAutoInGroup->getType()
- ->hasAutoForTrailingReturnType()
- << FirstDeclaratorInGroup->getSourceRange()
- << DD->getSourceRange();
- DiagnosedNonDeducedAuto = true;
- }
+ // A declarator that uses 'auto' in any way other than to declare a
+ // variable with a deduced type cannot be combined with any other
+ // declarator in the same group.
+ if (FirstNonDeducedAutoInGroup && !DiagnosedNonDeducedAuto) {
+ Diag(FirstNonDeducedAutoInGroup->getLocation(),
+ diag::err_auto_non_deduced_not_alone)
+ << FirstNonDeducedAutoInGroup->getType()
+ ->hasAutoForTrailingReturnType()
+ << FirstDeclaratorInGroup->getSourceRange()
+ << DD->getSourceRange();
+ DiagnosedNonDeducedAuto = true;
}
}
-
- Decls.push_back(D);
}
+
+ Decls.push_back(D);
}
if (DeclSpec::isDeclRep(DS.getTypeSpecType())) {
@@ -14959,8 +14764,6 @@ Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
return BuildDeclaratorGroup(Decls);
}
-/// BuildDeclaratorGroup - convert a list of declarations into a declaration
-/// group, performing any necessary semantic checking.
Sema::DeclGroupPtrTy
Sema::BuildDeclaratorGroup(MutableArrayRef<Decl *> Group) {
// C++14 [dcl.spec.auto]p7: (DR1347)
@@ -15037,8 +14840,6 @@ void Sema::ActOnDocumentableDecls(ArrayRef<Decl *> Group) {
Context.attachCommentsToJustParsedDecls(Group, &getPreprocessor());
}
-/// Common checks for a parameter-declaration that should apply to both function
-/// parameters and non-type template parameters.
void Sema::CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D) {
// Check that there are no default arguments inside the type of this
// parameter.
@@ -15098,8 +14899,6 @@ static void CheckExplicitObjectParameter(Sema &S, ParmVarDecl *P,
LSI->ExplicitObjectParameter = P;
}
-/// ActOnParamDeclarator - Called from Parser::ParseFunctionDeclarator()
-/// to introduce parameters into function prototype scope.
Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D,
SourceLocation ExplicitThisLoc) {
const DeclSpec &DS = D.getDeclSpec();
@@ -15145,10 +14944,10 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D,
QualType parmDeclType = TInfo->getType();
// Check for redeclaration of parameters, e.g. int foo(int x, int x);
- IdentifierInfo *II = D.getIdentifier();
+ const IdentifierInfo *II = D.getIdentifier();
if (II) {
LookupResult R(*this, II, D.getIdentifierLoc(), LookupOrdinaryName,
- ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
LookupName(R, S);
if (!R.empty()) {
NamedDecl *PrevDecl = *R.begin();
@@ -15208,8 +15007,6 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D,
return New;
}
-/// Synthesizes a variable for a parameter arising from a
-/// typedef.
ParmVarDecl *Sema::BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T) {
@@ -15265,41 +15062,10 @@ void Sema::DiagnoseSizeOfParametersAndReturnValue(
}
}
-QualType Sema::AdjustParameterTypeForObjCAutoRefCount(QualType T,
- SourceLocation NameLoc,
- TypeSourceInfo *TSInfo) {
- // In ARC, infer a lifetime qualifier for appropriate parameter types.
- if (!getLangOpts().ObjCAutoRefCount ||
- T.getObjCLifetime() != Qualifiers::OCL_None || !T->isObjCLifetimeType())
- return T;
-
- Qualifiers::ObjCLifetime Lifetime;
-
- // Special cases for arrays:
- // - if it's const, use __unsafe_unretained
- // - otherwise, it's an error
- if (T->isArrayType()) {
- if (!T.isConstQualified()) {
- if (DelayedDiagnostics.shouldDelayDiagnostics())
- DelayedDiagnostics.add(sema::DelayedDiagnostic::makeForbiddenType(
- NameLoc, diag::err_arc_array_param_no_ownership, T, false));
- else
- Diag(NameLoc, diag::err_arc_array_param_no_ownership)
- << TSInfo->getTypeLoc().getSourceRange();
- }
- Lifetime = Qualifiers::OCL_ExplicitNone;
- } else {
- Lifetime = T->getObjCARCImplicitLifetime();
- }
- T = Context.getLifetimeQualifiedType(T, Lifetime);
-
- return T;
-}
-
ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
- SourceLocation NameLoc, IdentifierInfo *Name,
- QualType T, TypeSourceInfo *TSInfo,
- StorageClass SC) {
+ SourceLocation NameLoc,
+ const IdentifierInfo *Name, QualType T,
+ TypeSourceInfo *TSInfo, StorageClass SC) {
// In ARC, infer a lifetime qualifier for appropriate parameter types.
if (getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_None &&
@@ -15374,7 +15140,7 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
// PPC MMA non-pointer types are not allowed as function argument types.
if (Context.getTargetInfo().getTriple().isPPC64() &&
- CheckPPCMMAType(New->getOriginalType(), New->getLocation())) {
+ PPC().CheckPPCMMAType(New->getOriginalType(), New->getLocation())) {
New->setInvalidDecl();
}
@@ -15444,8 +15210,8 @@ Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
// specialization function under the OpenMP context defined as part of the
// `omp begin declare variant`.
SmallVector<FunctionDecl *, 4> Bases;
- if (LangOpts.OpenMP && isInOpenMPDeclareVariantScope())
- ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
+ if (LangOpts.OpenMP && OpenMP().isInOpenMPDeclareVariantScope())
+ OpenMP().ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
ParentScope, D, TemplateParameterLists, Bases);
D.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
@@ -15453,7 +15219,8 @@ Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
Decl *Dcl = ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody, BodyKind);
if (!Bases.empty())
- ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl, Bases);
+ OpenMP().ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl,
+ Bases);
return Dcl;
}
@@ -15498,6 +15265,9 @@ ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
if (II->isStr("main") || II->isStr("efi_main"))
return false;
+ if (FD->isMSVCRTEntryPoint())
+ return false;
+
// Don't warn about inline functions.
if (FD->isInlined())
return false;
@@ -15736,10 +15506,19 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
// captures during transformation of nested lambdas, it is necessary to
// have the LSI properly restored.
if (isGenericLambdaCallOperatorSpecialization(FD)) {
- assert(inTemplateInstantiation() &&
- "There should be an active template instantiation on the stack "
- "when instantiating a generic lambda!");
- RebuildLambdaScopeInfo(cast<CXXMethodDecl>(D));
+ // C++2c 7.5.5.2p17 A member of a closure type shall not be explicitly
+ // instantiated, explicitly specialized.
+ if (FD->getTemplateSpecializationInfo()
+ ->isExplicitInstantiationOrSpecialization()) {
+ Diag(FD->getLocation(), diag::err_lambda_explicit_spec);
+ FD->setInvalidDecl();
+ PushFunctionScope();
+ } else {
+ assert(inTemplateInstantiation() &&
+ "There should be an active template instantiation on the stack "
+ "when instantiating a generic lambda!");
+ RebuildLambdaScopeInfo(cast<CXXMethodDecl>(D));
+ }
} else {
// Enter a new function scope
PushFunctionScope();
@@ -15839,6 +15618,11 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
FD->setInvalidDecl();
return D;
}
+
+ // Some function attributes (like OptimizeNoneAttr) need actions before
+ // parsing body started.
+ applyFunctionAttributesBeforeParsingBody(D);
+
// We want to attach documentation to original Decl (which might be
// a function template).
ActOnDocumentableDecl(D);
@@ -15850,18 +15634,20 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
return D;
}
-/// Given the set of return statements within a function body,
-/// compute the variables that are subject to the named return value
-/// optimization.
-///
-/// Each of the variables that is subject to the named return value
-/// optimization will be marked as NRVO variables in the AST, and any
-/// return statement that has a marked NRVO variable as its NRVO candidate can
-/// use the named return value optimization.
-///
-/// This function applies a very simplistic algorithm for NRVO: if every return
-/// statement in the scope of a variable has the same NRVO candidate, that
-/// candidate is an NRVO variable.
+void Sema::applyFunctionAttributesBeforeParsingBody(Decl *FD) {
+ if (!FD || FD->isInvalidDecl())
+ return;
+ if (auto *TD = dyn_cast<FunctionTemplateDecl>(FD))
+ FD = TD->getTemplatedDecl();
+ if (FD && FD->hasAttr<OptimizeNoneAttr>()) {
+ FPOptionsOverride FPO;
+ FPO.setDisallowOptimizations();
+ CurFPFeatures.applyChanges(FPO);
+ FpPragmaStack.CurrentValue =
+ CurFPFeatures.getChangesFrom(FPOptions(LangOpts));
+ }
+}
+
void Sema::computeNRVO(Stmt *Body, FunctionScopeInfo *Scope) {
ReturnStmt **Returns = Scope->Returns.data();
@@ -15975,7 +15761,7 @@ static void diagnoseImplicitlyRetainedSelf(Sema &S) {
static bool methodHasName(const FunctionDecl *FD, StringRef Name) {
return isa<CXXMethodDecl>(FD) && FD->param_empty() &&
- FD->getDeclName().isIdentifier() && FD->getName().equals(Name);
+ FD->getDeclName().isIdentifier() && FD->getName() == Name;
}
bool Sema::CanBeGetReturnObject(const FunctionDecl *FD) {
@@ -16023,7 +15809,17 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// This is meant to pop the context added in ActOnStartOfFunctionDef().
ExitFunctionBodyRAII ExitRAII(*this, isLambdaCallOperator(FD));
if (FD) {
- FD->setBody(Body);
+ // If this is called by Parser::ParseFunctionDefinition() after marking
+ // the declaration as deleted, and if the deleted-function-body contains
+ // a message (C++26), then a DefaultedOrDeletedInfo will have already been
+ // added to store that message; do not overwrite it in that case.
+ //
+ // Since this would always set the body to 'nullptr' in that case anyway,
+ // which is already done when the function decl is initially created,
+ // always skipping this irrespective of whether there is a delete message
+ // should not be a problem.
+ if (!FD->isDeletedAsWritten())
+ FD->setBody(Body);
FD->setWillHaveBody(false);
CheckImmediateEscalatingFunctionDefinition(FD, FSI);
@@ -16046,7 +15842,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
FD->setInvalidDecl();
}
}
- } else if (getLangOpts().CPlusPlus11 && isLambdaCallOperator(FD)) {
+ } else if (getLangOpts().CPlusPlus && isLambdaCallOperator(FD)) {
// In C++11, we don't use 'auto' deduction rules for lambda call
// operators because we don't support return type deduction.
auto *LSI = getCurLambda();
@@ -16258,9 +16054,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
}
}
- assert(
- (FD == getCurFunctionDecl() || getCurLambda()->CallOperator == FD) &&
- "Function parsing confused");
+ assert((FD == getCurFunctionDecl(/*AllowLambdas=*/true)) &&
+ "Function parsing confused");
} else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) {
assert(MD == getCurMethodDecl() && "Method parsing confused");
MD->setBody(Body);
@@ -16291,7 +16086,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (!SuperD)
return false;
return SuperD->getIdentifier() ==
- NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject);
+ ObjC().NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject);
};
// Don't issue this warning for unavailable inits or direct subclasses
// of NSObject.
@@ -16304,7 +16099,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
FSI->ObjCWarnForNoDesignatedInitChain = false;
}
if (FSI->ObjCWarnForNoInitDelegation) {
- // Don't issue this warning for unavaialable inits.
+ // Don't issue this warning for unavailable inits.
if (!MD->isUnavailable())
Diag(MD->getLocation(),
diag::warn_objc_secondary_init_missing_init_call);
@@ -16438,14 +16233,13 @@ void Sema::ActOnFinishDelayedAttribute(Scope *S, Decl *D,
if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
D = TD->getTemplatedDecl();
ProcessDeclAttributeList(S, D, Attrs);
+ ProcessAPINotes(D);
if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(D))
if (Method->isStatic())
checkThisInStaticMemberFunctionAttributes(Method);
}
-/// ImplicitlyDefineFunction - An undeclared identifier was used in a function
-/// call, forming a call to an implicitly defined function (per C99 6.5.1p2).
NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
IdentifierInfo &II, Scope *S) {
// It is not valid to implicitly define a function in C23.
@@ -16578,12 +16372,6 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
return FD;
}
-/// If this function is a C++ replaceable global allocation function
-/// (C++2a [basic.stc.dynamic.allocation], C++2a [new.delete]),
-/// adds any function attributes that we know a priori based on the standard.
-///
-/// We need to check for duplicate attributes both here and where user-written
-/// attributes are applied to declarations.
void Sema::AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD) {
if (FD->isInvalidDecl())
@@ -16653,15 +16441,6 @@ void Sema::AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
// have new-extended alignment and is of the requested size.
}
-/// Adds any function attributes that we know a priori based on
-/// the declaration of this function.
-///
-/// These attributes can apply both to implicitly-declared builtins
-/// (like __builtin___printf_chk) or to library-declared functions
-/// like NSLog or printf.
-///
-/// We need to check for duplicate attributes both here and where user-written
-/// attributes are applied to declarations.
void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
if (FD->isInvalidDecl())
return;
@@ -16912,7 +16691,6 @@ TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
return NewTD;
}
-/// Check that this is a valid underlying type for an enum declaration.
bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
QualType T = TI->getType();
@@ -16930,8 +16708,6 @@ bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
<< T << T->isBitIntType();
}
-/// Check whether this is a valid redeclaration of a previous enumeration.
-/// \return true if the redeclaration was invalid.
bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev) {
@@ -17016,10 +16792,6 @@ Sema::NonTagKind Sema::getNonTagTypeDeclKind(const Decl *PrevDecl,
llvm_unreachable("invalid TTK");
}
-/// Determine whether a tag with a given kind is acceptable
-/// as a redeclaration of the given tag declaration.
-///
-/// \returns true if the new tag kind is acceptable, false otherwise.
bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
@@ -17203,16 +16975,6 @@ static bool isAcceptableTagRedeclContext(Sema &S, DeclContext *OldDC,
return false;
}
-/// This is invoked when we see 'struct foo' or 'struct {'. In the
-/// former case, Name will be non-null. In the later case, Name will be null.
-/// TagSpec indicates what kind of tag this is. TUK indicates whether this is a
-/// reference/declaration/definition of a tag.
-///
-/// \param IsTypeSpecifier \c true if this is a type-specifier (or
-/// trailing-type-specifier) other than one in an alias-declaration.
-///
-/// \param SkipBody If non-null, will be set to indicate if the caller should
-/// skip the definition of this tag and treat it as if it were a declaration.
DeclResult
Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
@@ -17225,9 +16987,9 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
OffsetOfKind OOK, SkipBodyInfo *SkipBody) {
// If this is not a definition, it must have a name.
IdentifierInfo *OrigName = Name;
- assert((Name != nullptr || TUK == TUK_Definition) &&
+ assert((Name != nullptr || TUK == TagUseKind::Definition) &&
"Nameless record must be a definition!");
- assert(TemplateParameterLists.size() == 0 || TUK != TUK_Reference);
+ assert(TemplateParameterLists.size() == 0 || TUK != TagUseKind::Reference);
OwnedDecl = false;
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
@@ -17241,11 +17003,31 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// or a scope specifier, which also conveniently avoids this work
// for non-C++ cases.
if (TemplateParameterLists.size() > 0 ||
- (SS.isNotEmpty() && TUK != TUK_Reference)) {
- if (TemplateParameterList *TemplateParams =
- MatchTemplateParametersToScopeSpecifier(
- KWLoc, NameLoc, SS, nullptr, TemplateParameterLists,
- TUK == TUK_Friend, isMemberSpecialization, Invalid)) {
+ (SS.isNotEmpty() && TUK != TagUseKind::Reference)) {
+ TemplateParameterList *TemplateParams =
+ MatchTemplateParametersToScopeSpecifier(
+ KWLoc, NameLoc, SS, nullptr, TemplateParameterLists,
+ TUK == TagUseKind::Friend, isMemberSpecialization, Invalid);
+
+ // C++23 [dcl.type.elab] p2:
+ // If an elaborated-type-specifier is the sole constituent of a
+ // declaration, the declaration is ill-formed unless it is an explicit
+ // specialization, an explicit instantiation or it has one of the
+ // following forms: [...]
+ // C++23 [dcl.enum] p1:
+ // If the enum-head-name of an opaque-enum-declaration contains a
+ // nested-name-specifier, the declaration shall be an explicit
+ // specialization.
+ //
+ // FIXME: Class template partial specializations can be forward declared
+ // per CWG2213, but the resolution failed to allow qualified forward
+ // declarations. This is almost certainly unintentional, so we allow them.
+ if (TUK == TagUseKind::Declaration && SS.isNotEmpty() &&
+ !isMemberSpecialization)
+ Diag(SS.getBeginLoc(), diag::err_standalone_class_nested_name_specifier)
+ << TypeWithKeyword::getTagTypeKindName(Kind) << SS.getRange();
+
+ if (TemplateParams) {
if (Kind == TagTypeKind::Enum) {
Diag(KWLoc, diag::err_enum_template);
return true;
@@ -17278,6 +17060,26 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
return true;
}
+ if (TUK == TagUseKind::Friend && Kind == TagTypeKind::Enum) {
+ // C++23 [dcl.type.elab]p4:
+ // If an elaborated-type-specifier appears with the friend specifier as
+ // an entire member-declaration, the member-declaration shall have one
+ // of the following forms:
+ // friend class-key nested-name-specifier(opt) identifier ;
+ // friend class-key simple-template-id ;
+ // friend class-key nested-name-specifier template(opt)
+ // simple-template-id ;
+ //
+ // Since enum is not a class-key, so declarations like "friend enum E;"
+ // are ill-formed. Although CWG2363 reaffirms that such declarations are
+ // invalid, most implementations accept so we issue a pedantic warning.
+ Diag(KWLoc, diag::ext_enum_friend) << FixItHint::CreateRemoval(
+ ScopedEnum ? SourceRange(KWLoc, ScopedEnumKWLoc) : KWLoc);
+ assert(ScopedEnum || !ScopedEnumUsesClassTag);
+ Diag(KWLoc, diag::note_enum_friend)
+ << (ScopedEnum + ScopedEnumUsesClassTag);
+ }
+
// Figure out the underlying type if this a enum declaration. We need to do
// this early, because it's needed to detect if this is an incompatible
// redeclaration.
@@ -17309,7 +17111,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// of 'int'. However, if this is an unfixed forward declaration, don't set
// the underlying type unless the user enables -fms-compatibility. This
// makes unfixed forward declared enums incomplete and is more conforming.
- if (TUK == TUK_Definition || getLangOpts().MSVCCompat)
+ if (TUK == TagUseKind::Definition || getLangOpts().MSVCCompat)
EnumUnderlying = Context.IntTy.getTypePtr();
}
}
@@ -17320,8 +17122,8 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
bool isStdAlignValT = false;
RedeclarationKind Redecl = forRedeclarationInCurContext();
- if (TUK == TUK_Friend || TUK == TUK_Reference)
- Redecl = NotForRedeclaration;
+ if (TUK == TagUseKind::Friend || TUK == TagUseKind::Reference)
+ Redecl = RedeclarationKind::NotForRedeclaration;
/// Create a new tag decl in C/ObjC. Since the ODR-like semantics for ObjC/C
/// implemented asks for structural equivalence checking, the returned decl
@@ -17338,7 +17140,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name, nullptr,
ScopedEnum, ScopedEnumUsesClassTag, IsFixed);
// If this is an undefined enum, bail.
- if (TUK != TUK_Definition && !Invalid)
+ if (TUK != TagUseKind::Definition && !Invalid)
return nullptr;
if (EnumUnderlying) {
EnumDecl *ED = cast<EnumDecl>(New);
@@ -17366,7 +17168,8 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition &&
+ (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
@@ -17387,7 +17190,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// If this is a friend or a reference to a class in a dependent
// context, don't try to make a decl for it.
- if (TUK == TUK_Friend || TUK == TUK_Reference) {
+ if (TUK == TagUseKind::Friend || TUK == TagUseKind::Reference) {
DC = computeDeclContext(SS, false);
if (!DC) {
IsDependent = true;
@@ -17420,7 +17223,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// this as a dependent elaborated-type-specifier.
// But this only makes any sense for reference-like lookups.
if (Previous.wasNotFoundInCurrentInstantiation() &&
- (TUK == TUK_Reference || TUK == TUK_Friend)) {
+ (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend)) {
IsDependent = true;
return true;
}
@@ -17437,7 +17240,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// If T is the name of a class, then each of the following shall have a
// name different from T:
// -- every member of class T that is itself a type
- if (TUK != TUK_Reference && TUK != TUK_Friend &&
+ if (TUK != TagUseKind::Reference && TUK != TagUseKind::Friend &&
DiagnoseClassNameShadow(SearchDC, DeclarationNameInfo(Name, NameLoc)))
return true;
@@ -17451,7 +17254,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// When declaring or defining a tag, ignore ambiguities introduced
// by types using'ed into this scope.
if (Previous.isAmbiguous() &&
- (TUK == TUK_Definition || TUK == TUK_Declaration)) {
+ (TUK == TagUseKind::Definition || TUK == TagUseKind::Declaration)) {
LookupResult::Filter F = Previous.makeFilter();
while (F.hasNext()) {
NamedDecl *ND = F.next();
@@ -17475,7 +17278,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
//
// Does it matter that this should be by scope instead of by
// semantic context?
- if (!Previous.empty() && TUK == TUK_Friend) {
+ if (!Previous.empty() && TUK == TagUseKind::Friend) {
DeclContext *EnclosingNS = SearchDC->getEnclosingNamespaceContext();
LookupResult::Filter F = Previous.makeFilter();
bool FriendSawTagOutsideEnclosingNamespace = false;
@@ -17505,7 +17308,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (Previous.isAmbiguous())
return true;
- if (!getLangOpts().CPlusPlus && TUK != TUK_Reference) {
+ if (!getLangOpts().CPlusPlus && TUK != TagUseKind::Reference) {
// FIXME: This makes sure that we ignore the contexts associated
// with C structs, unions, and enums when looking for a matching
// tag declaration or definition. See the similar lookup tweak
@@ -17557,11 +17360,12 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// also need to do a redeclaration lookup there, just in case
// there's a shadow friend decl.
if (Name && Previous.empty() &&
- (TUK == TUK_Reference || TUK == TUK_Friend || IsTemplateParamOrArg)) {
+ (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend ||
+ IsTemplateParamOrArg)) {
if (Invalid) goto CreateNewDecl;
assert(SS.isEmpty());
- if (TUK == TUK_Reference || IsTemplateParamOrArg) {
+ if (TUK == TagUseKind::Reference || IsTemplateParamOrArg) {
// C++ [basic.scope.pdecl]p5:
// -- for an elaborated-type-specifier of the form
//
@@ -17595,7 +17399,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// Find the scope where we'll be declaring the tag.
S = getTagInjectionScope(S, getLangOpts());
} else {
- assert(TUK == TUK_Friend);
+ assert(TUK == TagUseKind::Friend);
CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(SearchDC);
// C++ [namespace.memdef]p3:
@@ -17660,7 +17464,8 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// redefinition if either context is within the other.
if (auto *Shadow = dyn_cast<UsingShadowDecl>(DirectPrevDecl)) {
auto *OldTag = dyn_cast<TagDecl>(PrevDecl);
- if (SS.isEmpty() && TUK != TUK_Reference && TUK != TUK_Friend &&
+ if (SS.isEmpty() && TUK != TagUseKind::Reference &&
+ TUK != TagUseKind::Friend &&
isDeclInScope(Shadow, SearchDC, S, isMemberSpecialization) &&
!(OldTag && isAcceptableTagRedeclContext(
*this, OldTag->getDeclContext(), SearchDC))) {
@@ -17679,13 +17484,13 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// If this is a use of a previous tag, or if the tag is already declared
// in the same scope (so that the definition/declaration completes or
// rementions the tag), reuse the decl.
- if (TUK == TUK_Reference || TUK == TUK_Friend ||
+ if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend ||
isDeclInScope(DirectPrevDecl, SearchDC, S,
SS.isNotEmpty() || isMemberSpecialization)) {
// Make sure that this wasn't declared as an enum and now used as a
// struct or something similar.
if (!isAcceptableTagRedeclaration(PrevTagDecl, Kind,
- TUK == TUK_Definition, KWLoc,
+ TUK == TagUseKind::Definition, KWLoc,
Name)) {
bool SafeToContinue =
(PrevTagDecl->getTagKind() != TagTypeKind::Enum &&
@@ -17712,7 +17517,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (Kind == TagTypeKind::Enum &&
PrevTagDecl->getTagKind() == TagTypeKind::Enum) {
const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
- if (TUK == TUK_Reference || TUK == TUK_Friend)
+ if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend)
return PrevTagDecl;
QualType EnumUnderlyingTy;
@@ -17727,14 +17532,14 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (CheckEnumRedeclaration(NameLoc.isValid() ? NameLoc : KWLoc,
ScopedEnum, EnumUnderlyingTy,
IsFixed, PrevEnum))
- return TUK == TUK_Declaration ? PrevTagDecl : nullptr;
+ return TUK == TagUseKind::Declaration ? PrevTagDecl : nullptr;
}
// C++11 [class.mem]p1:
// A member shall not be declared twice in the member-specification,
// except that a nested class or member class template can be declared
// and then later defined.
- if (TUK == TUK_Declaration && PrevDecl->isCXXClassMember() &&
+ if (TUK == TagUseKind::Declaration && PrevDecl->isCXXClassMember() &&
S->isDeclScope(PrevDecl)) {
Diag(NameLoc, diag::ext_member_redeclared);
Diag(PrevTagDecl->getLocation(), diag::note_previous_declaration);
@@ -17743,11 +17548,11 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (!Invalid) {
// If this is a use, just return the declaration we found, unless
// we have attributes.
- if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend) {
if (!Attrs.empty()) {
// FIXME: Diagnose these attributes. For now, we create a new
// declaration to hold them.
- } else if (TUK == TUK_Reference &&
+ } else if (TUK == TagUseKind::Reference &&
(PrevTagDecl->getFriendObjectKind() ==
Decl::FOK_Undeclared ||
PrevDecl->getOwningModule() != getCurrentModule()) &&
@@ -17771,7 +17576,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
}
// Diagnose attempts to redefine a tag.
- if (TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
if (NamedDecl *Def = PrevTagDecl->getDefinition()) {
// If we're defining a specialization and the previous definition
// is from an implicit instantiation, don't emit an error
@@ -17812,7 +17617,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SkipBody->Previous = Def;
makeMergedDefinitionVisible(Hidden);
// Carry on and handle it like a normal definition. We'll
- // skip starting the definitiion later.
+ // skip starting the definition later.
}
} else if (!IsExplicitSpecializationAfterInstantiation) {
// A redeclaration in function prototype scope in C isn't
@@ -17851,7 +17656,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// Okay, we're going to make a redeclaration. If this is some kind
// of reference, make sure we build the redeclaration in the same DC
// as the original, and ignore the current access specifier.
- if (TUK == TUK_Friend || TUK == TUK_Reference) {
+ if (TUK == TagUseKind::Friend || TUK == TagUseKind::Reference) {
SearchDC = PrevTagDecl->getDeclContext();
AS = AS_none;
}
@@ -17877,7 +17682,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// Use a better diagnostic if an elaborated-type-specifier
// found the wrong kind of type on the first
// (non-redeclaration) lookup.
- if ((TUK == TUK_Reference || TUK == TUK_Friend) &&
+ if ((TUK == TagUseKind::Reference || TUK == TagUseKind::Friend) &&
!Previous.isForRedeclaration()) {
NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
Diag(NameLoc, diag::err_tag_reference_non_tag)
@@ -17891,7 +17696,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// do nothing
// Diagnose implicit declarations introduced by elaborated types.
- } else if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ } else if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend) {
NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
Diag(NameLoc, diag::err_tag_reference_conflict) << NTK;
Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
@@ -17950,7 +17755,7 @@ CreateNewDecl:
StdAlignValT = cast<EnumDecl>(New);
// If this is an undefined enum, warn.
- if (TUK != TUK_Definition && !Invalid) {
+ if (TUK != TagUseKind::Definition && !Invalid) {
TagDecl *Def;
if (IsFixed && cast<EnumDecl>(New)->isFixed()) {
// C++0x: 7.2p2: opaque-enum-declaration.
@@ -17999,20 +17804,23 @@ CreateNewDecl:
cast_or_null<RecordDecl>(PrevDecl));
}
- if (OOK != OOK_Outside && TUK == TUK_Definition && !getLangOpts().CPlusPlus)
+ // Only C23 and later allow defining new types in 'offsetof()'.
+ if (OOK != OOK_Outside && TUK == TagUseKind::Definition &&
+ !getLangOpts().CPlusPlus && !getLangOpts().C23)
Diag(New->getLocation(), diag::ext_type_defined_in_offsetof)
<< (OOK == OOK_Macro) << New->getSourceRange();
// C++11 [dcl.type]p3:
// A type-specifier-seq shall not define a class or enumeration [...].
if (!Invalid && getLangOpts().CPlusPlus &&
- (IsTypeSpecifier || IsTemplateParamOrArg) && TUK == TUK_Definition) {
+ (IsTypeSpecifier || IsTemplateParamOrArg) &&
+ TUK == TagUseKind::Definition) {
Diag(New->getLocation(), diag::err_type_defined_in_type_specifier)
<< Context.getTagDeclType(New);
Invalid = true;
}
- if (!Invalid && getLangOpts().CPlusPlus && TUK == TUK_Definition &&
+ if (!Invalid && getLangOpts().CPlusPlus && TUK == TagUseKind::Definition &&
DC->getDeclKind() == Decl::Enum) {
Diag(New->getLocation(), diag::err_type_defined_in_enum)
<< Context.getTagDeclType(New);
@@ -18024,8 +17832,9 @@ CreateNewDecl:
if (SS.isSet()) {
// If this is either a declaration or a definition, check the
// nested-name-specifier against the current context.
- if ((TUK == TUK_Definition || TUK == TUK_Declaration) &&
+ if ((TUK == TagUseKind::Definition || TUK == TagUseKind::Declaration) &&
diagnoseQualifiedDeclaration(SS, DC, OrigName, Loc,
+ /*TemplateId=*/nullptr,
isMemberSpecialization))
Invalid = true;
@@ -18048,7 +17857,7 @@ CreateNewDecl:
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
@@ -18079,7 +17888,7 @@ CreateNewDecl:
if (getLangOpts().CPlusPlus) {
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
- if (TUK == TUK_Definition && !IsTypeSpecifier) {
+ if (TUK == TagUseKind::Definition && !IsTypeSpecifier) {
Diag(Loc, diag::err_type_defined_in_param_type)
<< Name;
Invalid = true;
@@ -18100,7 +17909,7 @@ CreateNewDecl:
// In Microsoft mode, a friend declaration also acts as a forward
// declaration so we always pass true to setObjectOfFriendDecl to make
// the tag name visible.
- if (TUK == TUK_Friend)
+ if (TUK == TagUseKind::Friend)
New->setObjectOfFriendDecl(getLangOpts().MSVCCompat);
// Set the access specifier.
@@ -18110,14 +17919,14 @@ CreateNewDecl:
if (PrevDecl)
CheckRedeclarationInModule(New, PrevDecl);
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip))
New->startDefinition();
ProcessDeclAttributeList(S, New, Attrs);
AddPragmaAttributes(S, New);
// If this has an identifier, add it to the scope stack.
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
// We might be replacing an existing declaration in the lookup tables;
// if so, borrow its access specifier.
if (PrevDecl)
@@ -18145,8 +17954,10 @@ CreateNewDecl:
if (PrevDecl)
mergeDeclAttributes(New, PrevDecl);
- if (auto *CXXRD = dyn_cast<CXXRecordDecl>(New))
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(New)) {
inferGslOwnerPointerAttribute(CXXRD);
+ inferNullableClassAttribute(CXXRD);
+ }
// If there's a #pragma GCC visibility in scope, set the visibility of this
// record.
@@ -18193,12 +18004,6 @@ bool Sema::ActOnDuplicateDefinition(Decl *Prev, SkipBodyInfo &SkipBody) {
return true;
}
-void Sema::ActOnObjCContainerStartDefinition(ObjCContainerDecl *IDecl) {
- assert(IDecl->getLexicalParent() == CurContext &&
- "The next DeclContext should be lexically contained in the current one.");
- CurContext = IDecl;
-}
-
void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
@@ -18269,6 +18074,15 @@ void Sema::ActOnTagFinishDefinition(Scope *S, Decl *TagD,
if (NumInitMethods > 1 || !Def->hasInitMethod())
Diag(RD->getLocation(), diag::err_sycl_special_type_num_init_method);
}
+
+ // If we're defining a dynamic class in a module interface unit, we always
+ // need to produce the vtable for it, even if the vtable is not used in the
+ // current TU.
+ //
+ // The case where the current class is not dynamic is handled in
+ // MarkVTableUsed.
+ if (getCurrentModule() && getCurrentModule()->isInterfaceOrPartition())
+ MarkVTableUsed(RD->getLocation(), RD, /*DefinitionRequired=*/true);
}
// Exit this scope of this tag's definition.
@@ -18300,22 +18114,6 @@ void Sema::ActOnTagFinishDefinition(Scope *S, Decl *TagD,
}
}
-void Sema::ActOnObjCContainerFinishDefinition() {
- // Exit this scope of this interface definition.
- PopDeclContext();
-}
-
-void Sema::ActOnObjCTemporaryExitContainerContext(ObjCContainerDecl *ObjCCtx) {
- assert(ObjCCtx == CurContext && "Mismatch of container contexts");
- OriginalLexicalContext = ObjCCtx;
- ActOnObjCContainerFinishDefinition();
-}
-
-void Sema::ActOnObjCReenterContainerContext(ObjCContainerDecl *ObjCCtx) {
- ActOnObjCContainerStartDefinition(ObjCCtx);
- OriginalLexicalContext = nullptr;
-}
-
void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) {
AdjustDeclIfTemplate(TagD);
TagDecl *Tag = cast<TagDecl>(TagD);
@@ -18336,8 +18134,9 @@ void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) {
// Note that FieldName may be null for anonymous bitfields.
ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
- IdentifierInfo *FieldName, QualType FieldTy,
- bool IsMsStruct, Expr *BitWidth) {
+ const IdentifierInfo *FieldName,
+ QualType FieldTy, bool IsMsStruct,
+ Expr *BitWidth) {
assert(BitWidth);
if (BitWidth->containsErrors())
return ExprError();
@@ -18422,8 +18221,6 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
return BitWidth;
}
-/// ActOnField - Each field of a C struct/union is passed into this in order
-/// to create a FieldDecl object for it.
Decl *Sema::ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth) {
FieldDecl *Res = HandleField(S, cast_if_present<RecordDecl>(TagD), DeclStart,
@@ -18432,8 +18229,6 @@ Decl *Sema::ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
return Res;
}
-/// HandleField - Analyze a field of a C struct or a C++ data member.
-///
FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
SourceLocation DeclStart,
Declarator &D, Expr *BitWidth,
@@ -18446,7 +18241,7 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
return nullptr;
}
- IdentifierInfo *II = D.getIdentifier();
+ const IdentifierInfo *II = D.getIdentifier();
SourceLocation Loc = DeclStart;
if (II) Loc = D.getIdentifierLoc();
@@ -18476,7 +18271,7 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
// Check to see if this name was declared as a member previously
NamedDecl *PrevDecl = nullptr;
LookupResult Previous(*this, II, Loc, LookupMemberName,
- ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
LookupName(Previous, S);
switch (Previous.getResultKind()) {
case LookupResult::Found:
@@ -18529,16 +18324,6 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
return NewFD;
}
-/// Build a new FieldDecl and check its well-formedness.
-///
-/// This routine builds a new FieldDecl given the fields name, type,
-/// record, etc. \p PrevDecl should refer to any previous declaration
-/// with the same name and in the same scope as the field to be
-/// created.
-///
-/// \returns a new FieldDecl.
-///
-/// \todo The Declarator argument is a hack. It will be removed once
FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
@@ -18547,7 +18332,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D) {
- IdentifierInfo *II = Name.getAsIdentifierInfo();
+ const IdentifierInfo *II = Name.getAsIdentifierInfo();
bool InvalidDecl = false;
if (D) InvalidDecl = D->isInvalidType();
@@ -18714,7 +18499,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
// In auto-retain/release, infer strong retension for fields of
// retainable type.
- if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewFD))
+ if (getLangOpts().ObjCAutoRefCount && ObjC().inferObjCARCLifetime(NewFD))
NewFD->setInvalidDecl();
if (T.isObjCGCWeak())
@@ -18722,7 +18507,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
// PPC MMA non-pointer types are not allowed as field types.
if (Context.getTargetInfo().getTriple().isPPC64() &&
- CheckPPCMMAType(T, NewFD->getLocation()))
+ PPC().CheckPPCMMAType(T, NewFD->getLocation()))
NewFD->setInvalidDecl();
NewFD->setAccess(AS);
@@ -18744,22 +18529,22 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
// because otherwise we'll never get complaints about
// copy constructors.
- CXXSpecialMember member = CXXInvalid;
+ CXXSpecialMemberKind member = CXXSpecialMemberKind::Invalid;
// We're required to check for any non-trivial constructors. Since the
// implicit default constructor is suppressed if there are any
// user-declared constructors, we just need to check that there is a
// trivial default constructor and a trivial copy constructor. (We don't
// worry about move constructors here, since this is a C++98 check.)
if (RDecl->hasNonTrivialCopyConstructor())
- member = CXXCopyConstructor;
+ member = CXXSpecialMemberKind::CopyConstructor;
else if (!RDecl->hasTrivialDefaultConstructor())
- member = CXXDefaultConstructor;
+ member = CXXSpecialMemberKind::DefaultConstructor;
else if (RDecl->hasNonTrivialCopyAssignment())
- member = CXXCopyAssignment;
+ member = CXXSpecialMemberKind::CopyAssignment;
else if (RDecl->hasNonTrivialDestructor())
- member = CXXDestructor;
+ member = CXXSpecialMemberKind::Destructor;
- if (member != CXXInvalid) {
+ if (member != CXXSpecialMemberKind::Invalid) {
if (!getLangOpts().CPlusPlus11 &&
getLangOpts().ObjCAutoRefCount && RDecl->hasObjectMember()) {
// Objective-C++ ARC: it is an error to have a non-trivial field of
@@ -18776,10 +18561,13 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
}
}
- Diag(FD->getLocation(), getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member :
- diag::err_illegal_union_or_anon_struct_member)
- << FD->getParent()->isUnion() << FD->getDeclName() << member;
+ Diag(
+ FD->getLocation(),
+ getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member
+ : diag::err_illegal_union_or_anon_struct_member)
+ << FD->getParent()->isUnion() << FD->getDeclName()
+ << llvm::to_underlying(member);
DiagnoseNontrivial(RDecl, member);
return !getLangOpts().CPlusPlus11;
}
@@ -18789,135 +18577,6 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
return false;
}
-/// TranslateIvarVisibility - Translate visibility from a token ID to an
-/// AST enum value.
-static ObjCIvarDecl::AccessControl
-TranslateIvarVisibility(tok::ObjCKeywordKind ivarVisibility) {
- switch (ivarVisibility) {
- default: llvm_unreachable("Unknown visitibility kind");
- case tok::objc_private: return ObjCIvarDecl::Private;
- case tok::objc_public: return ObjCIvarDecl::Public;
- case tok::objc_protected: return ObjCIvarDecl::Protected;
- case tok::objc_package: return ObjCIvarDecl::Package;
- }
-}
-
-/// ActOnIvar - Each ivar field of an objective-c class is passed into this
-/// in order to create an IvarDecl object for it.
-Decl *Sema::ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D,
- Expr *BitWidth, tok::ObjCKeywordKind Visibility) {
-
- IdentifierInfo *II = D.getIdentifier();
- SourceLocation Loc = DeclStart;
- if (II) Loc = D.getIdentifierLoc();
-
- // FIXME: Unnamed fields can be handled in various different ways, for
- // example, unnamed unions inject all members into the struct namespace!
-
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
- QualType T = TInfo->getType();
-
- if (BitWidth) {
- // 6.7.2.1p3, 6.7.2.1p4
- BitWidth = VerifyBitField(Loc, II, T, /*IsMsStruct*/false, BitWidth).get();
- if (!BitWidth)
- D.setInvalidType();
- } else {
- // Not a bitfield.
-
- // validate II.
-
- }
- if (T->isReferenceType()) {
- Diag(Loc, diag::err_ivar_reference_type);
- D.setInvalidType();
- }
- // C99 6.7.2.1p8: A member of a structure or union may have any type other
- // than a variably modified type.
- else if (T->isVariablyModifiedType()) {
- if (!tryToFixVariablyModifiedVarType(
- TInfo, T, Loc, diag::err_typecheck_ivar_variable_size))
- D.setInvalidType();
- }
-
- // Get the visibility (access control) for this ivar.
- ObjCIvarDecl::AccessControl ac =
- Visibility != tok::objc_not_keyword ? TranslateIvarVisibility(Visibility)
- : ObjCIvarDecl::None;
- // Must set ivar's DeclContext to its enclosing interface.
- ObjCContainerDecl *EnclosingDecl = cast<ObjCContainerDecl>(CurContext);
- if (!EnclosingDecl || EnclosingDecl->isInvalidDecl())
- return nullptr;
- ObjCContainerDecl *EnclosingContext;
- if (ObjCImplementationDecl *IMPDecl =
- dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
- if (LangOpts.ObjCRuntime.isFragile()) {
- // Case of ivar declared in an implementation. Context is that of its class.
- EnclosingContext = IMPDecl->getClassInterface();
- assert(EnclosingContext && "Implementation has no class interface!");
- }
- else
- EnclosingContext = EnclosingDecl;
- } else {
- if (ObjCCategoryDecl *CDecl =
- dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
- if (LangOpts.ObjCRuntime.isFragile() || !CDecl->IsClassExtension()) {
- Diag(Loc, diag::err_misplaced_ivar) << CDecl->IsClassExtension();
- return nullptr;
- }
- }
- EnclosingContext = EnclosingDecl;
- }
-
- // Construct the decl.
- ObjCIvarDecl *NewID = ObjCIvarDecl::Create(
- Context, EnclosingContext, DeclStart, Loc, II, T, TInfo, ac, BitWidth);
-
- if (T->containsErrors())
- NewID->setInvalidDecl();
-
- if (II) {
- NamedDecl *PrevDecl = LookupSingleName(S, II, Loc, LookupMemberName,
- ForVisibleRedeclaration);
- if (PrevDecl && isDeclInScope(PrevDecl, EnclosingContext, S)
- && !isa<TagDecl>(PrevDecl)) {
- Diag(Loc, diag::err_duplicate_member) << II;
- Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
- NewID->setInvalidDecl();
- }
- }
-
- // Process attributes attached to the ivar.
- ProcessDeclAttributes(S, NewID, D);
-
- if (D.isInvalidType())
- NewID->setInvalidDecl();
-
- // In ARC, infer 'retaining' for ivars of retainable type.
- if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewID))
- NewID->setInvalidDecl();
-
- if (D.getDeclSpec().isModulePrivateSpecified())
- NewID->setModulePrivate();
-
- if (II) {
- // FIXME: When interfaces are DeclContexts, we'll need to add
- // these to the interface.
- S->AddDecl(NewID);
- IdResolver.AddDecl(NewID);
- }
-
- if (LangOpts.ObjCRuntime.isNonFragile() &&
- !NewID->isInvalidDecl() && isa<ObjCInterfaceDecl>(EnclosingDecl))
- Diag(Loc, diag::warn_ivars_in_interface);
-
- return NewID;
-}
-
-/// ActOnLastBitfield - This routine handles synthesized bitfields rules for
-/// class and class extensions. For every class \@interface and class
-/// extension \@interface, if the last ivar is a bitfield of any type,
-/// then add an implicit `char :0` ivar to the end of that interface.
void Sema::ActOnLastBitfield(SourceLocation DeclLoc,
SmallVectorImpl<Decl *> &AllIvarDecls) {
if (LangOpts.ObjCRuntime.isFragile() || AllIvarDecls.empty())
@@ -19029,10 +18688,10 @@ static void ComputeSelectedDestructor(Sema &S, CXXRecordDecl *Record) {
static bool AreSpecialMemberFunctionsSameKind(ASTContext &Context,
CXXMethodDecl *M1,
CXXMethodDecl *M2,
- Sema::CXXSpecialMember CSM) {
+ CXXSpecialMemberKind CSM) {
// We don't want to compare templates to non-templates: See
// https://github.com/llvm/llvm-project/issues/59206
- if (CSM == Sema::CXXDefaultConstructor)
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor)
return bool(M1->getDescribedFunctionTemplate()) ==
bool(M2->getDescribedFunctionTemplate());
// FIXME: better resolve CWG
@@ -19055,7 +18714,7 @@ static bool AreSpecialMemberFunctionsSameKind(ASTContext &Context,
/// [CWG2595], if any, are satisfied is more constrained.
static void SetEligibleMethods(Sema &S, CXXRecordDecl *Record,
ArrayRef<CXXMethodDecl *> Methods,
- Sema::CXXSpecialMember CSM) {
+ CXXSpecialMemberKind CSM) {
SmallVector<bool, 4> SatisfactionStatus;
for (CXXMethodDecl *Method : Methods) {
@@ -19113,7 +18772,8 @@ static void SetEligibleMethods(Sema &S, CXXRecordDecl *Record,
// DR1734 and DR1496.
if (!AnotherMethodIsMoreConstrained) {
Method->setIneligibleOrNotSelected(false);
- Record->addedEligibleSpecialMemberFunction(Method, 1 << CSM);
+ Record->addedEligibleSpecialMemberFunction(Method,
+ 1 << llvm::to_underlying(CSM));
}
}
}
@@ -19152,13 +18812,15 @@ static void ComputeSpecialMemberFunctionsEligiblity(Sema &S,
}
SetEligibleMethods(S, Record, DefaultConstructors,
- Sema::CXXDefaultConstructor);
- SetEligibleMethods(S, Record, CopyConstructors, Sema::CXXCopyConstructor);
- SetEligibleMethods(S, Record, MoveConstructors, Sema::CXXMoveConstructor);
+ CXXSpecialMemberKind::DefaultConstructor);
+ SetEligibleMethods(S, Record, CopyConstructors,
+ CXXSpecialMemberKind::CopyConstructor);
+ SetEligibleMethods(S, Record, MoveConstructors,
+ CXXSpecialMemberKind::MoveConstructor);
SetEligibleMethods(S, Record, CopyAssignmentOperators,
- Sema::CXXCopyAssignment);
+ CXXSpecialMemberKind::CopyAssignment);
SetEligibleMethods(S, Record, MoveAssignmentOperators,
- Sema::CXXMoveAssignment);
+ CXXSpecialMemberKind::MoveAssignment);
}
void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
@@ -19257,15 +18919,11 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
} else if (Record->isUnion())
DiagID = getLangOpts().MicrosoftExt
? diag::ext_flexible_array_union_ms
- : getLangOpts().CPlusPlus
- ? diag::ext_flexible_array_union_gnu
- : diag::err_flexible_array_union;
+ : diag::ext_flexible_array_union_gnu;
else if (NumNamedMembers < 1)
DiagID = getLangOpts().MicrosoftExt
? diag::ext_flexible_array_empty_aggregate_ms
- : getLangOpts().CPlusPlus
- ? diag::ext_flexible_array_empty_aggregate_gnu
- : diag::err_flexible_array_empty_aggregate;
+ : diag::ext_flexible_array_empty_aggregate_gnu;
if (DiagID)
Diag(FD->getLocation(), DiagID)
@@ -19421,6 +19079,13 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// Okay, we successfully defined 'Record'.
if (Record) {
bool Completed = false;
+ if (S) {
+ Scope *Parent = S->getParent();
+ if (Parent && Parent->isTypeAliasScope() &&
+ Parent->isTemplateParamScope())
+ Record->setInvalidDecl();
+ }
+
if (CXXRecord) {
if (!CXXRecord->isInvalidDecl()) {
// Set access bits correctly on the directly-declared conversions.
@@ -19531,7 +19196,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
if (CXXRecord) {
auto *Dtor = CXXRecord->getDestructor();
if (Dtor && Dtor->isImplicit() &&
- ShouldDeleteSpecialMember(Dtor, CXXDestructor)) {
+ ShouldDeleteSpecialMember(Dtor, CXXSpecialMemberKind::Destructor)) {
CXXRecord->setImplicitDestructorIsDeleted();
SetDeclDeleted(Dtor, CXXRecord->getLocation());
}
@@ -19568,7 +19233,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
E = Record->field_end();
(NonBitFields == 0 || ZeroSize) && I != E; ++I) {
IsEmpty = false;
- if (I->isUnnamedBitfield()) {
+ if (I->isUnnamedBitField()) {
if (!I->isZeroLengthBitField(Context))
ZeroSize = false;
} else {
@@ -19611,7 +19276,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// Must enforce the rule that ivars in the base classes may not be
// duplicates.
if (ID->getSuperClass())
- DiagnoseDuplicateIvars(ID, ID->getSuperClass());
+ ObjC().DiagnoseDuplicateIvars(ID, ID->getSuperClass());
} else if (ObjCImplementationDecl *IMPDecl =
dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
assert(IMPDecl && "ActOnFields - missing ObjCImplementationDecl");
@@ -19619,7 +19284,8 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// Ivar declared in @implementation never belongs to the implementation.
// Only it is in implementation's lexical context.
ClsFields[I]->setLexicalDeclContext(IMPDecl);
- CheckImplementationIvars(IMPDecl, ClsFields, RecFields.size(), RBrac);
+ ObjC().CheckImplementationIvars(IMPDecl, ClsFields, RecFields.size(),
+ RBrac);
IMPDecl->setIvarLBraceLoc(LBrac);
IMPDecl->setIvarRBraceLoc(RBrac);
} else if (ObjCCategoryDecl *CDecl =
@@ -19657,6 +19323,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
CDecl->setIvarRBraceLoc(RBrac);
}
}
+ ProcessAPINotes(Record);
}
/// Determine whether the given integral value is representable within
@@ -19883,7 +19550,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
Val, EnumVal);
}
-Sema::SkipBodyInfo Sema::shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
+SkipBodyInfo Sema::shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc) {
if (!(getLangOpts().Modules || getLangOpts().ModulesLocalVisibility) ||
!getLangOpts().CPlusPlus)
@@ -19923,7 +19590,8 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
// Verify that there isn't already something declared with this name in this
// scope.
- LookupResult R(*this, Id, IdLoc, LookupOrdinaryName, ForVisibleRedeclaration);
+ LookupResult R(*this, Id, IdLoc, LookupOrdinaryName,
+ RedeclarationKind::ForVisibleRedeclaration);
LookupName(R, S);
NamedDecl *PrevDecl = R.getAsSingle<NamedDecl>();
@@ -19971,6 +19639,7 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
// Process attributes.
ProcessDeclAttributeList(S, New, Attrs);
AddPragmaAttributes(S, New);
+ ProcessAPINotes(New);
// Register this decl in the current scope stack.
New->setAccess(TheEnumDecl->getAccess());
@@ -20169,6 +19838,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
QualType EnumType = Context.getTypeDeclType(Enum);
ProcessDeclAttributeList(S, Enum, Attrs);
+ ProcessAPINotes(Enum);
if (Enum->isDependentType()) {
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
@@ -20307,8 +19977,13 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
? Context.UnsignedLongTy : Context.LongTy;
} else {
BestWidth = Context.getTargetInfo().getLongLongWidth();
- assert(NumPositiveBits <= BestWidth &&
- "How could an initializer get larger than ULL?");
+ if (NumPositiveBits > BestWidth) {
+ // This can happen with bit-precise integer types, but those are not
+ // allowed as the type for an enumerator per C23 6.7.2.2p4 and p12.
+ // FIXME: GCC uses __int128_t and __uint128_t for cases that fit within
+ // a 128-bit integer, we should consider doing the same.
+ Diag(Enum->getLocation(), diag::ext_enum_too_large);
+ }
BestType = Context.UnsignedLongLongTy;
BestPromotionType
= (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
@@ -20411,12 +20086,22 @@ Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
return New;
}
-Decl *Sema::ActOnTopLevelStmtDecl(Stmt *Statement) {
- auto *New = TopLevelStmtDecl::Create(Context, Statement);
- Context.getTranslationUnitDecl()->addDecl(New);
+TopLevelStmtDecl *Sema::ActOnStartTopLevelStmtDecl(Scope *S) {
+ auto *New = TopLevelStmtDecl::Create(Context, /*Statement=*/nullptr);
+ CurContext->addDecl(New);
+ PushDeclContext(S, New);
+ PushFunctionScope();
+ PushCompoundScope(false);
return New;
}
+void Sema::ActOnFinishTopLevelStmtDecl(TopLevelStmtDecl *D, Stmt *Statement) {
+ D->setStmt(Statement);
+ PopCompoundScope();
+ PopFunctionScopeInfo();
+ PopDeclContext();
+}
+
void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
@@ -20474,10 +20159,6 @@ void Sema::ActOnPragmaWeakAlias(IdentifierInfo* Name,
}
}
-ObjCContainerDecl *Sema::getObjCDeclContext() const {
- return (dyn_cast_or_null<ObjCContainerDecl>(CurContext));
-}
-
Sema::FunctionEmissionStatus Sema::getEmissionStatus(const FunctionDecl *FD,
bool Final) {
assert(FD && "Expected non-null FunctionDecl");
@@ -20516,7 +20197,7 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(const FunctionDecl *FD,
return FunctionEmissionStatus::OMPDiscarded;
// If we have an explicit value for the device type, or we are in a target
// declare context, we need to emit all extern and used symbols.
- if (isInOpenMPDeclareTargetContext() || DevTy)
+ if (OpenMP().isInOpenMPDeclareTargetContext() || DevTy)
if (IsEmittedForExternalSymbol())
return FunctionEmissionStatus::Emitted;
// Device mode only emits what it must, if it wasn't tagged yet and needed,
@@ -20526,7 +20207,7 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(const FunctionDecl *FD,
} else if (LangOpts.OpenMP > 45) {
// In OpenMP host compilation prior to 5.0 everything was an emitted host
// function. In 5.0, no_host was introduced which might cause a function to
- // be ommitted.
+ // be omitted.
std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(FD->getCanonicalDecl());
if (DevTy)
@@ -20542,11 +20223,11 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(const FunctionDecl *FD,
// when compiling for host, device and global functions are never emitted.
// (Technically, we do emit a host-side stub for global functions, but this
// doesn't count for our purposes here.)
- Sema::CUDAFunctionTarget T = IdentifyCUDATarget(FD);
- if (LangOpts.CUDAIsDevice && T == Sema::CFT_Host)
+ CUDAFunctionTarget T = CUDA().IdentifyTarget(FD);
+ if (LangOpts.CUDAIsDevice && T == CUDAFunctionTarget::Host)
return FunctionEmissionStatus::CUDADiscarded;
if (!LangOpts.CUDAIsDevice &&
- (T == Sema::CFT_Device || T == Sema::CFT_Global))
+ (T == CUDAFunctionTarget::Device || T == CUDAFunctionTarget::Global))
return FunctionEmissionStatus::CUDADiscarded;
if (IsEmittedForExternalSymbol())
@@ -20567,5 +20248,61 @@ bool Sema::shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee) {
// for host, only HD functions actually called from the host get marked as
// known-emitted.
return LangOpts.CUDA && !LangOpts.CUDAIsDevice &&
- IdentifyCUDATarget(Callee) == CFT_Global;
+ CUDA().IdentifyTarget(Callee) == CUDAFunctionTarget::Global;
+}
+
+void Sema::diagnoseFunctionEffectMergeConflicts(
+ const FunctionEffectSet::Conflicts &Errs, SourceLocation NewLoc,
+ SourceLocation OldLoc) {
+ for (const FunctionEffectSet::Conflict &Conflict : Errs) {
+ Diag(NewLoc, diag::warn_conflicting_func_effects)
+ << Conflict.Kept.description() << Conflict.Rejected.description();
+ Diag(OldLoc, diag::note_previous_declaration);
+ }
+}
+
+bool Sema::diagnoseConflictingFunctionEffect(
+ const FunctionEffectsRef &FX, const FunctionEffectWithCondition &NewEC,
+ SourceLocation NewAttrLoc) {
+ // If the new effect has a condition, we can't detect conflicts until the
+ // condition is resolved.
+ if (NewEC.Cond.getCondition() != nullptr)
+ return false;
+
+ // Diagnose the new attribute as incompatible with a previous one.
+ auto Incompatible = [&](const FunctionEffectWithCondition &PrevEC) {
+ Diag(NewAttrLoc, diag::err_attributes_are_not_compatible)
+ << ("'" + NewEC.description() + "'")
+ << ("'" + PrevEC.description() + "'") << false;
+ // We don't necessarily have the location of the previous attribute,
+ // so no note.
+ return true;
+ };
+
+ // Compare against previous attributes.
+ FunctionEffect::Kind NewKind = NewEC.Effect.kind();
+
+ for (const FunctionEffectWithCondition &PrevEC : FX) {
+ // Again, can't check yet when the effect is conditional.
+ if (PrevEC.Cond.getCondition() != nullptr)
+ continue;
+
+ FunctionEffect::Kind PrevKind = PrevEC.Effect.kind();
+ // Note that we allow PrevKind == NewKind; it's redundant and ignored.
+
+ if (PrevEC.Effect.oppositeKind() == NewKind)
+ return Incompatible(PrevEC);
+
+ // A new allocating is incompatible with a previous nonblocking.
+ if (PrevKind == FunctionEffect::Kind::NonBlocking &&
+ NewKind == FunctionEffect::Kind::Allocating)
+ return Incompatible(PrevEC);
+
+ // A new nonblocking is incompatible with a previous allocating.
+ if (PrevKind == FunctionEffect::Kind::Allocating &&
+ NewKind == FunctionEffect::Kind::NonBlocking)
+ return Incompatible(PrevEC);
+ }
+
+ return false;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
index 6f462de4be78..e2eada24f9fc 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
@@ -26,12 +26,14 @@
#include "clang/Basic/Cuda.h"
#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/HLSLRuntime.h"
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Attr.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Initialization.h"
@@ -39,14 +41,34 @@
#include "clang/Sema/ParsedAttr.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaAMDGPU.h"
+#include "clang/Sema/SemaARM.h"
+#include "clang/Sema/SemaAVR.h"
+#include "clang/Sema/SemaBPF.h"
+#include "clang/Sema/SemaCUDA.h"
+#include "clang/Sema/SemaHLSL.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaM68k.h"
+#include "clang/Sema/SemaMIPS.h"
+#include "clang/Sema/SemaMSP430.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenCL.h"
+#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaRISCV.h"
+#include "clang/Sema/SemaSYCL.h"
+#include "clang/Sema/SemaSwift.h"
+#include "clang/Sema/SemaWasm.h"
+#include "clang/Sema/SemaX86.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Demangle/Demangle.h"
#include "llvm/IR/Assumptions.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <optional>
using namespace clang;
@@ -60,192 +82,12 @@ namespace AttributeLangSupport {
};
} // end namespace AttributeLangSupport
-//===----------------------------------------------------------------------===//
-// Helper functions
-//===----------------------------------------------------------------------===//
-
-/// isFunctionOrMethod - Return true if the given decl has function
-/// type (function or function-typed variable) or an Objective-C
-/// method.
-static bool isFunctionOrMethod(const Decl *D) {
- return (D->getFunctionType() != nullptr) || isa<ObjCMethodDecl>(D);
-}
-
-/// Return true if the given decl has function type (function or
-/// function-typed variable) or an Objective-C method or a block.
-static bool isFunctionOrMethodOrBlock(const Decl *D) {
- return isFunctionOrMethod(D) || isa<BlockDecl>(D);
-}
-
-/// Return true if the given decl has a declarator that should have
-/// been processed by Sema::GetTypeForDeclarator.
-static bool hasDeclarator(const Decl *D) {
- // In some sense, TypedefDecl really *ought* to be a DeclaratorDecl.
- return isa<DeclaratorDecl>(D) || isa<BlockDecl>(D) || isa<TypedefNameDecl>(D) ||
- isa<ObjCPropertyDecl>(D);
-}
-
-/// hasFunctionProto - Return true if the given decl has a argument
-/// information. This decl should have already passed
-/// isFunctionOrMethod or isFunctionOrMethodOrBlock.
-static bool hasFunctionProto(const Decl *D) {
- if (const FunctionType *FnTy = D->getFunctionType())
- return isa<FunctionProtoType>(FnTy);
- return isa<ObjCMethodDecl>(D) || isa<BlockDecl>(D);
-}
-
-/// getFunctionOrMethodNumParams - Return number of function or method
-/// parameters. It is an error to call this on a K&R function (use
-/// hasFunctionProto first).
-static unsigned getFunctionOrMethodNumParams(const Decl *D) {
- if (const FunctionType *FnTy = D->getFunctionType())
- return cast<FunctionProtoType>(FnTy)->getNumParams();
- if (const auto *BD = dyn_cast<BlockDecl>(D))
- return BD->getNumParams();
- return cast<ObjCMethodDecl>(D)->param_size();
-}
-
-static const ParmVarDecl *getFunctionOrMethodParam(const Decl *D,
- unsigned Idx) {
- if (const auto *FD = dyn_cast<FunctionDecl>(D))
- return FD->getParamDecl(Idx);
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
- return MD->getParamDecl(Idx);
- if (const auto *BD = dyn_cast<BlockDecl>(D))
- return BD->getParamDecl(Idx);
- return nullptr;
-}
-
-static QualType getFunctionOrMethodParamType(const Decl *D, unsigned Idx) {
- if (const FunctionType *FnTy = D->getFunctionType())
- return cast<FunctionProtoType>(FnTy)->getParamType(Idx);
- if (const auto *BD = dyn_cast<BlockDecl>(D))
- return BD->getParamDecl(Idx)->getType();
-
- return cast<ObjCMethodDecl>(D)->parameters()[Idx]->getType();
-}
-
-static SourceRange getFunctionOrMethodParamRange(const Decl *D, unsigned Idx) {
- if (auto *PVD = getFunctionOrMethodParam(D, Idx))
- return PVD->getSourceRange();
- return SourceRange();
-}
-
-static QualType getFunctionOrMethodResultType(const Decl *D) {
- if (const FunctionType *FnTy = D->getFunctionType())
- return FnTy->getReturnType();
- return cast<ObjCMethodDecl>(D)->getReturnType();
-}
-
-static SourceRange getFunctionOrMethodResultSourceRange(const Decl *D) {
- if (const auto *FD = dyn_cast<FunctionDecl>(D))
- return FD->getReturnTypeSourceRange();
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
- return MD->getReturnTypeSourceRange();
- return SourceRange();
-}
-
-static bool isFunctionOrMethodVariadic(const Decl *D) {
- if (const FunctionType *FnTy = D->getFunctionType())
- return cast<FunctionProtoType>(FnTy)->isVariadic();
- if (const auto *BD = dyn_cast<BlockDecl>(D))
- return BD->isVariadic();
- return cast<ObjCMethodDecl>(D)->isVariadic();
-}
-
-static bool isInstanceMethod(const Decl *D) {
- if (const auto *MethodDecl = dyn_cast<CXXMethodDecl>(D))
- return MethodDecl->isInstance();
- return false;
-}
-
-static inline bool isNSStringType(QualType T, ASTContext &Ctx,
- bool AllowNSAttributedString = false) {
- const auto *PT = T->getAs<ObjCObjectPointerType>();
- if (!PT)
- return false;
-
- ObjCInterfaceDecl *Cls = PT->getObjectType()->getInterface();
- if (!Cls)
- return false;
-
- IdentifierInfo* ClsName = Cls->getIdentifier();
-
- if (AllowNSAttributedString &&
- ClsName == &Ctx.Idents.get("NSAttributedString"))
- return true;
- // FIXME: Should we walk the chain of classes?
- return ClsName == &Ctx.Idents.get("NSString") ||
- ClsName == &Ctx.Idents.get("NSMutableString");
-}
-
-static inline bool isCFStringType(QualType T, ASTContext &Ctx) {
- const auto *PT = T->getAs<PointerType>();
- if (!PT)
- return false;
-
- const auto *RT = PT->getPointeeType()->getAs<RecordType>();
- if (!RT)
- return false;
-
- const RecordDecl *RD = RT->getDecl();
- if (RD->getTagKind() != TagTypeKind::Struct)
- return false;
-
- return RD->getIdentifier() == &Ctx.Idents.get("__CFString");
-}
-
static unsigned getNumAttributeArgs(const ParsedAttr &AL) {
// FIXME: Include the type in the argument list.
return AL.getNumArgs() + AL.hasParsedType();
}
-/// A helper function to provide Attribute Location for the Attr types
-/// AND the ParsedAttr.
-template <typename AttrInfo>
-static std::enable_if_t<std::is_base_of_v<Attr, AttrInfo>, SourceLocation>
-getAttrLoc(const AttrInfo &AL) {
- return AL.getLocation();
-}
-static SourceLocation getAttrLoc(const ParsedAttr &AL) { return AL.getLoc(); }
-
-/// If Expr is a valid integer constant, get the value of the integer
-/// expression and return success or failure. May output an error.
-///
-/// Negative argument is implicitly converted to unsigned, unless
-/// \p StrictlyUnsigned is true.
-template <typename AttrInfo>
-static bool checkUInt32Argument(Sema &S, const AttrInfo &AI, const Expr *Expr,
- uint32_t &Val, unsigned Idx = UINT_MAX,
- bool StrictlyUnsigned = false) {
- std::optional<llvm::APSInt> I = llvm::APSInt(32);
- if (Expr->isTypeDependent() ||
- !(I = Expr->getIntegerConstantExpr(S.Context))) {
- if (Idx != UINT_MAX)
- S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
- << &AI << Idx << AANT_ArgumentIntegerConstant
- << Expr->getSourceRange();
- else
- S.Diag(getAttrLoc(AI), diag::err_attribute_argument_type)
- << &AI << AANT_ArgumentIntegerConstant << Expr->getSourceRange();
- return false;
- }
-
- if (!I->isIntN(32)) {
- S.Diag(Expr->getExprLoc(), diag::err_ice_too_large)
- << toString(*I, 10, false) << 32 << /* Unsigned */ 1;
- return false;
- }
-
- if (StrictlyUnsigned && I->isSigned() && I->isNegative()) {
- S.Diag(getAttrLoc(AI), diag::err_attribute_requires_positive_integer)
- << &AI << /*non-negative*/ 1;
- return false;
- }
-
- Val = (uint32_t)I->getZExtValue();
- return true;
-}
+SourceLocation Sema::getAttrLoc(const ParsedAttr &AL) { return AL.getLoc(); }
/// Wrapper around checkUInt32Argument, with an extra check to be sure
/// that the result will fit into a regular (signed) int. All args have the same
@@ -254,7 +96,7 @@ template <typename AttrInfo>
static bool checkPositiveIntArgument(Sema &S, const AttrInfo &AI, const Expr *Expr,
int &Val, unsigned Idx = UINT_MAX) {
uint32_t UVal;
- if (!checkUInt32Argument(S, AI, Expr, UVal, Idx))
+ if (!S.checkUInt32Argument(AI, Expr, UVal, Idx))
return false;
if (UVal > (uint32_t)std::numeric_limits<int>::max()) {
@@ -269,80 +111,6 @@ static bool checkPositiveIntArgument(Sema &S, const AttrInfo &AI, const Expr *Ex
return true;
}
-/// Diagnose mutually exclusive attributes when present on a given
-/// declaration. Returns true if diagnosed.
-template <typename AttrTy>
-static bool checkAttrMutualExclusion(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (const auto *A = D->getAttr<AttrTy>()) {
- S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
- << AL << A
- << (AL.isRegularKeywordAttribute() || A->isRegularKeywordAttribute());
- S.Diag(A->getLocation(), diag::note_conflicting_attribute);
- return true;
- }
- return false;
-}
-
-template <typename AttrTy>
-static bool checkAttrMutualExclusion(Sema &S, Decl *D, const Attr &AL) {
- if (const auto *A = D->getAttr<AttrTy>()) {
- S.Diag(AL.getLocation(), diag::err_attributes_are_not_compatible)
- << &AL << A
- << (AL.isRegularKeywordAttribute() || A->isRegularKeywordAttribute());
- S.Diag(A->getLocation(), diag::note_conflicting_attribute);
- return true;
- }
- return false;
-}
-
-/// Check if IdxExpr is a valid parameter index for a function or
-/// instance method D. May output an error.
-///
-/// \returns true if IdxExpr is a valid index.
-template <typename AttrInfo>
-static bool checkFunctionOrMethodParameterIndex(
- Sema &S, const Decl *D, const AttrInfo &AI, unsigned AttrArgNum,
- const Expr *IdxExpr, ParamIdx &Idx, bool CanIndexImplicitThis = false) {
- assert(isFunctionOrMethodOrBlock(D));
-
- // In C++ the implicit 'this' function parameter also counts.
- // Parameters are counted from one.
- bool HP = hasFunctionProto(D);
- bool HasImplicitThisParam = isInstanceMethod(D);
- bool IV = HP && isFunctionOrMethodVariadic(D);
- unsigned NumParams =
- (HP ? getFunctionOrMethodNumParams(D) : 0) + HasImplicitThisParam;
-
- std::optional<llvm::APSInt> IdxInt;
- if (IdxExpr->isTypeDependent() ||
- !(IdxInt = IdxExpr->getIntegerConstantExpr(S.Context))) {
- S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
- << &AI << AttrArgNum << AANT_ArgumentIntegerConstant
- << IdxExpr->getSourceRange();
- return false;
- }
-
- unsigned IdxSource = IdxInt->getLimitedValue(UINT_MAX);
- if (IdxSource < 1 || (!IV && IdxSource > NumParams)) {
- S.Diag(getAttrLoc(AI), diag::err_attribute_argument_out_of_bounds)
- << &AI << AttrArgNum << IdxExpr->getSourceRange();
- return false;
- }
- if (HasImplicitThisParam && !CanIndexImplicitThis) {
- if (IdxSource == 1) {
- S.Diag(getAttrLoc(AI), diag::err_attribute_invalid_implicit_this_argument)
- << &AI << IdxExpr->getSourceRange();
- return false;
- }
- }
-
- Idx = ParamIdx(IdxSource, D);
- return true;
-}
-
-/// Check if the argument \p E is a ASCII string literal. If not emit an error
-/// and return false, otherwise set \p Str to the value of the string literal
-/// and return true.
bool Sema::checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI,
const Expr *E, StringRef &Str,
SourceLocation *ArgLocation) {
@@ -360,10 +128,6 @@ bool Sema::checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI,
return true;
}
-/// Check if the argument \p ArgNum of \p Attr is a ASCII string literal.
-/// If not emit an error and return false. If the argument is an identifier it
-/// will emit an error with a fixit hint and treat it as if it was a string
-/// literal.
bool Sema::checkStringLiteralArgumentAttr(const ParsedAttr &AL, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation) {
@@ -395,45 +159,6 @@ bool Sema::checkStringLiteralArgumentAttr(const ParsedAttr &AL, unsigned ArgNum,
return checkStringLiteralArgumentAttr(AL, ArgExpr, Str, ArgLocation);
}
-/// Applies the given attribute to the Decl without performing any
-/// additional semantic checking.
-template <typename AttrType>
-static void handleSimpleAttribute(Sema &S, Decl *D,
- const AttributeCommonInfo &CI) {
- D->addAttr(::new (S.Context) AttrType(S.Context, CI));
-}
-
-template <typename... DiagnosticArgs>
-static const Sema::SemaDiagnosticBuilder&
-appendDiagnostics(const Sema::SemaDiagnosticBuilder &Bldr) {
- return Bldr;
-}
-
-template <typename T, typename... DiagnosticArgs>
-static const Sema::SemaDiagnosticBuilder&
-appendDiagnostics(const Sema::SemaDiagnosticBuilder &Bldr, T &&ExtraArg,
- DiagnosticArgs &&... ExtraArgs) {
- return appendDiagnostics(Bldr << std::forward<T>(ExtraArg),
- std::forward<DiagnosticArgs>(ExtraArgs)...);
-}
-
-/// Add an attribute @c AttrType to declaration @c D, provided that
-/// @c PassesCheck is true.
-/// Otherwise, emit diagnostic @c DiagID, passing in all parameters
-/// specified in @c ExtraArgs.
-template <typename AttrType, typename... DiagnosticArgs>
-static void handleSimpleAttributeOrDiagnose(Sema &S, Decl *D,
- const AttributeCommonInfo &CI,
- bool PassesCheck, unsigned DiagID,
- DiagnosticArgs &&... ExtraArgs) {
- if (!PassesCheck) {
- Sema::SemaDiagnosticBuilder DB = S.Diag(D->getBeginLoc(), DiagID);
- appendDiagnostics(DB, std::forward<DiagnosticArgs>(ExtraArgs)...);
- return;
- }
- handleSimpleAttribute<AttrType>(S, D, CI);
-}
-
/// Check if the passed-in expression is of type int or bool.
static bool isIntOrBool(Expr *Exp) {
QualType QT = Exp->getType();
@@ -635,7 +360,7 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
if (const auto *StrLit = dyn_cast<StringLiteral>(ArgExp)) {
if (StrLit->getLength() == 0 ||
- (StrLit->isOrdinary() && StrLit->getString() == StringRef("*"))) {
+ (StrLit->isOrdinary() && StrLit->getString() == "*")) {
// Pass empty strings to the analyzer without warnings.
// Treat "*" as the universal lock.
Args.push_back(ArgExp);
@@ -820,8 +545,8 @@ static bool checkParamIsIntegerType(Sema &S, const Decl *D, const AttrInfo &AI,
assert(AI.isArgExpr(AttrArgNo) && "Expected expression argument");
Expr *AttrArg = AI.getArgAsExpr(AttrArgNo);
ParamIdx Idx;
- if (!checkFunctionOrMethodParameterIndex(S, D, AI, AttrArgNo + 1, AttrArg,
- Idx))
+ if (!S.checkFunctionOrMethodParameterIndex(D, AI, AttrArgNo + 1, AttrArg,
+ Idx))
return false;
QualType ParamTy = getFunctionOrMethodParamType(D, Idx.getASTIndex());
@@ -838,7 +563,7 @@ static void handleAllocSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 2))
return;
- assert(isFunctionOrMethod(D) && hasFunctionProto(D));
+ assert(isFuncOrMethodForAttrSubject(D) && hasFunctionProto(D));
QualType RetTy = getFunctionOrMethodResultType(D);
if (!RetTy->isPointerType()) {
@@ -980,6 +705,21 @@ static void handleErrorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(EA);
}
+static void handleExcludeFromExplicitInstantiationAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ const auto *PD = isa<CXXRecordDecl>(D)
+ ? cast<DeclContext>(D)
+ : D->getDeclContext()->getRedeclContext();
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(PD); RD && RD->isLocalClass()) {
+ S.Diag(AL.getLoc(),
+ diag::warn_attribute_exclude_from_explicit_instantiation_local_class)
+ << AL << /*IsMember=*/!isa<CXXRecordDecl>(D);
+ return;
+ }
+ D->addAttr(::new (S.Context)
+ ExcludeFromExplicitInstantiationAttr(S.Context, AL));
+}
+
namespace {
/// Determines if a given Expr references any of the given function's
/// ParmVarDecls, or the function's implicit `this` parameter (if applicable).
@@ -1078,7 +818,7 @@ static void handleDiagnoseAsBuiltinAttr(Sema &S, Decl *D,
const Expr *IndexExpr = AL.getArgAsExpr(I);
uint32_t Index;
- if (!checkUInt32Argument(S, AL, IndexExpr, Index, I + 1, false))
+ if (!S.checkUInt32Argument(AL, IndexExpr, Index, I + 1, false))
return;
if (Index > DeclFD->getNumParams()) {
@@ -1188,7 +928,7 @@ static void handlePassObjectSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *E = AL.getArgAsExpr(0);
uint32_t Type;
- if (!checkUInt32Argument(S, AL, E, Type, /*Idx=*/1))
+ if (!S.checkUInt32Argument(AL, E, Type, /*Idx=*/1))
return;
// pass_object_size's argument is passed in as the second argument of
@@ -1475,82 +1215,6 @@ static void handlePreferredName(Sema &S, Decl *D, const ParsedAttr &AL) {
<< TT->getDecl();
}
-static bool checkIBOutletCommon(Sema &S, Decl *D, const ParsedAttr &AL) {
- // The IBOutlet/IBOutletCollection attributes only apply to instance
- // variables or properties of Objective-C classes. The outlet must also
- // have an object reference type.
- if (const auto *VD = dyn_cast<ObjCIvarDecl>(D)) {
- if (!VD->getType()->getAs<ObjCObjectPointerType>()) {
- S.Diag(AL.getLoc(), diag::warn_iboutlet_object_type)
- << AL << VD->getType() << 0;
- return false;
- }
- }
- else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D)) {
- if (!PD->getType()->getAs<ObjCObjectPointerType>()) {
- S.Diag(AL.getLoc(), diag::warn_iboutlet_object_type)
- << AL << PD->getType() << 1;
- return false;
- }
- }
- else {
- S.Diag(AL.getLoc(), diag::warn_attribute_iboutlet) << AL;
- return false;
- }
-
- return true;
-}
-
-static void handleIBOutlet(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!checkIBOutletCommon(S, D, AL))
- return;
-
- D->addAttr(::new (S.Context) IBOutletAttr(S.Context, AL));
-}
-
-static void handleIBOutletCollection(Sema &S, Decl *D, const ParsedAttr &AL) {
-
- // The iboutletcollection attribute can have zero or one arguments.
- if (AL.getNumArgs() > 1) {
- S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
- return;
- }
-
- if (!checkIBOutletCommon(S, D, AL))
- return;
-
- ParsedType PT;
-
- if (AL.hasParsedType())
- PT = AL.getTypeArg();
- else {
- PT = S.getTypeName(S.Context.Idents.get("NSObject"), AL.getLoc(),
- S.getScopeForContext(D->getDeclContext()->getParent()));
- if (!PT) {
- S.Diag(AL.getLoc(), diag::err_iboutletcollection_type) << "NSObject";
- return;
- }
- }
-
- TypeSourceInfo *QTLoc = nullptr;
- QualType QT = S.GetTypeFromParser(PT, &QTLoc);
- if (!QTLoc)
- QTLoc = S.Context.getTrivialTypeSourceInfo(QT, AL.getLoc());
-
- // Diagnose use of non-object type in iboutletcollection attribute.
- // FIXME. Gnu attribute extension ignores use of builtin types in
- // attributes. So, __attribute__((iboutletcollection(char))) will be
- // treated as __attribute__((iboutletcollection())).
- if (!QT->isObjCIdType() && !QT->isObjCObjectType()) {
- S.Diag(AL.getLoc(),
- QT->isBuiltinType() ? diag::err_iboutletcollection_builtintype
- : diag::err_iboutletcollection_type) << QT;
- return;
- }
-
- D->addAttr(::new (S.Context) IBOutletCollectionAttr(S.Context, AL, QTLoc));
-}
-
bool Sema::isValidPointerAttrType(QualType T, bool RefOkay) {
if (RefOkay) {
if (T->isReferenceType())
@@ -1596,7 +1260,7 @@ static void handleNonNullAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
for (unsigned I = 0; I < AL.getNumArgs(); ++I) {
Expr *Ex = AL.getArgAsExpr(I);
ParamIdx Idx;
- if (!checkFunctionOrMethodParameterIndex(S, D, AL, I + 1, Ex, Idx))
+ if (!S.checkFunctionOrMethodParameterIndex(D, AL, I + 1, Ex, Idx))
return;
// Is the function argument a pointer type?
@@ -1754,7 +1418,7 @@ void Sema::AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
ParamIdx Idx;
const auto *FuncDecl = cast<FunctionDecl>(D);
- if (!checkFunctionOrMethodParameterIndex(*this, FuncDecl, TmpAttr,
+ if (!checkFunctionOrMethodParameterIndex(FuncDecl, TmpAttr,
/*AttrArgNum=*/1, ParamExpr, Idx))
return;
@@ -1770,42 +1434,6 @@ void Sema::AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
D->addAttr(::new (Context) AllocAlignAttr(Context, CI, Idx));
}
-/// Check if \p AssumptionStr is a known assumption and warn if not.
-static void checkAssumptionAttr(Sema &S, SourceLocation Loc,
- StringRef AssumptionStr) {
- if (llvm::KnownAssumptionStrings.count(AssumptionStr))
- return;
-
- unsigned BestEditDistance = 3;
- StringRef Suggestion;
- for (const auto &KnownAssumptionIt : llvm::KnownAssumptionStrings) {
- unsigned EditDistance =
- AssumptionStr.edit_distance(KnownAssumptionIt.getKey());
- if (EditDistance < BestEditDistance) {
- Suggestion = KnownAssumptionIt.getKey();
- BestEditDistance = EditDistance;
- }
- }
-
- if (!Suggestion.empty())
- S.Diag(Loc, diag::warn_assume_attribute_string_unknown_suggested)
- << AssumptionStr << Suggestion;
- else
- S.Diag(Loc, diag::warn_assume_attribute_string_unknown) << AssumptionStr;
-}
-
-static void handleAssumumptionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- // Handle the case where the attribute has a text message.
- StringRef Str;
- SourceLocation AttrStrLoc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &AttrStrLoc))
- return;
-
- checkAssumptionAttr(S, AttrStrLoc, Str);
-
- D->addAttr(::new (S.Context) AssumptionAttr(S.Context, AL, Str));
-}
-
/// Normalize the attribute, __foo__ becomes foo.
/// Returns true if normalization was applied.
static bool normalizeName(StringRef &AttrName) {
@@ -1864,7 +1492,7 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
for (unsigned i = 1; i < AL.getNumArgs(); ++i) {
Expr *Ex = AL.getArgAsExpr(i);
ParamIdx Idx;
- if (!checkFunctionOrMethodParameterIndex(S, D, AL, i, Ex, Idx))
+ if (!S.checkFunctionOrMethodParameterIndex(D, AL, i, Ex, Idx))
return;
// Is the function argument a pointer type?
@@ -1979,6 +1607,38 @@ static void handleWeakRefAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) WeakRefAttr(S.Context, AL));
}
+// Mark alias/ifunc target as used. Due to name mangling, we look up the
+// demangled name ignoring parameters (not supported by microsoftDemangle
+// https://github.com/llvm/llvm-project/issues/88825). This should handle the
+// majority of use cases while leaving namespace scope names unmarked.
+static void markUsedForAliasOrIfunc(Sema &S, Decl *D, const ParsedAttr &AL,
+ StringRef Str) {
+ std::unique_ptr<char, llvm::FreeDeleter> Demangled;
+ if (S.getASTContext().getCXXABIKind() != TargetCXXABI::Microsoft)
+ Demangled.reset(llvm::itaniumDemangle(Str, /*ParseParams=*/false));
+ std::unique_ptr<MangleContext> MC(S.Context.createMangleContext());
+ SmallString<256> Name;
+
+ const DeclarationNameInfo Target(
+ &S.Context.Idents.get(Demangled ? Demangled.get() : Str), AL.getLoc());
+ LookupResult LR(S, Target, Sema::LookupOrdinaryName);
+ if (S.LookupName(LR, S.TUScope)) {
+ for (NamedDecl *ND : LR) {
+ if (!isa<FunctionDecl>(ND) && !isa<VarDecl>(ND))
+ continue;
+ if (MC->shouldMangleDeclName(ND)) {
+ llvm::raw_svector_ostream Out(Name);
+ Name.clear();
+ MC->mangleName(GlobalDecl(ND), Out);
+ } else {
+ Name = ND->getIdentifier()->getName();
+ }
+ if (Name == Str)
+ ND->markUsed(S.Context);
+ }
+ }
+}
+
static void handleIFuncAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef Str;
if (!S.checkStringLiteralArgumentAttr(AL, 0, Str))
@@ -1991,6 +1651,7 @@ static void handleIFuncAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
+ markUsedForAliasOrIfunc(S, D, AL, Str);
D->addAttr(::new (S.Context) IFuncAttr(S.Context, AL, Str));
}
@@ -2025,17 +1686,7 @@ static void handleAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
- // Mark target used to prevent unneeded-internal-declaration warnings.
- if (!S.LangOpts.CPlusPlus) {
- // FIXME: demangle Str for C++, as the attribute refers to the mangled
- // linkage name, not the pre-mangled identifier.
- const DeclarationNameInfo target(&S.Context.Idents.get(Str), AL.getLoc());
- LookupResult LR(S, target, Sema::LookupOrdinaryName);
- if (S.LookupQualifiedName(LR, S.getCurLexicalContext()))
- for (NamedDecl *ND : LR)
- ND->markUsed(S.Context);
- }
-
+ markUsedForAliasOrIfunc(S, D, AL, Str);
D->addAttr(::new (S.Context) AliasAttr(S.Context, AL, Str));
}
@@ -2053,12 +1704,6 @@ static void handleTLSModelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
- if (S.Context.getTargetInfo().getTriple().isOSAIX() &&
- Model == "local-dynamic") {
- S.Diag(LiteralLoc, diag::err_aix_attr_unsupported_tls_model) << Model;
- return;
- }
-
D->addAttr(::new (S.Context) TLSModelAttr(S.Context, AL, Model));
}
@@ -2155,21 +1800,6 @@ static void handleCommonAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) CommonAttr(S.Context, AL));
}
-static void handleCmseNSEntryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (S.LangOpts.CPlusPlus && !D->getDeclContext()->isExternCContext()) {
- S.Diag(AL.getLoc(), diag::err_attribute_not_clinkage) << AL;
- return;
- }
-
- const auto *FD = cast<FunctionDecl>(D);
- if (!FD->isExternallyVisible()) {
- S.Diag(AL.getLoc(), diag::warn_attribute_cmse_entry_static);
- return;
- }
-
- D->addAttr(::new (S.Context) CmseNSEntryAttr(S.Context, AL));
-}
-
static void handleNakedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (AL.isDeclspecAttribute()) {
const auto &Triple = S.getASTContext().getTargetInfo().getTriple();
@@ -2255,7 +1885,7 @@ static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// The checking path for 'noreturn' and 'analyzer_noreturn' are different
// because 'analyzer_noreturn' does not impact the type.
- if (!isFunctionOrMethodOrBlock(D)) {
+ if (!isFunctionOrMethodOrBlockForAttrSubject(D)) {
ValueDecl *VD = dyn_cast<ValueDecl>(D);
if (!VD || (!VD->getType()->isBlockPointerType() &&
!VD->getType()->isFunctionPointerType())) {
@@ -2359,7 +1989,7 @@ static void handleConstructorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
if (AL.getNumArgs() &&
- !checkUInt32Argument(S, AL, AL.getArgAsExpr(0), priority))
+ !S.checkUInt32Argument(AL, AL.getArgAsExpr(0), priority))
return;
D->addAttr(::new (S.Context) ConstructorAttr(S.Context, AL, priority));
@@ -2368,7 +1998,7 @@ static void handleConstructorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleDestructorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t priority = DestructorAttr::DefaultPriority;
if (AL.getNumArgs() &&
- !checkUInt32Argument(S, AL, AL.getArgAsExpr(0), priority))
+ !S.checkUInt32Argument(AL, AL.getArgAsExpr(0), priority))
return;
D->addAttr(::new (S.Context) DestructorAttr(S.Context, AL, priority));
@@ -2384,17 +2014,6 @@ static void handleAttrWithMessage(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) AttrTy(S.Context, AL, Str));
}
-static void handleObjCSuppresProtocolAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- if (!cast<ObjCProtocolDecl>(D)->isThisDeclarationADefinition()) {
- S.Diag(AL.getLoc(), diag::err_objc_attr_protocol_requires_definition)
- << AL << AL.getRange();
- return;
- }
-
- D->addAttr(::new (S.Context) ObjCExplicitProtocolImplAttr(S.Context, AL));
-}
-
static bool checkAvailabilityAttr(Sema &S, SourceRange Range,
IdentifierInfo *Platform,
VersionTuple Introduced,
@@ -2457,7 +2076,7 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(
bool Implicit, VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable, StringRef Message,
bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK,
- int Priority) {
+ int Priority, IdentifierInfo *Environment) {
VersionTuple MergedIntroduced = Introduced;
VersionTuple MergedDeprecated = Deprecated;
VersionTuple MergedObsoleted = Obsoleted;
@@ -2491,6 +2110,12 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(
continue;
}
+ IdentifierInfo *OldEnvironment = OldAA->getEnvironment();
+ if (OldEnvironment != Environment) {
+ ++i;
+ continue;
+ }
+
// If there is an existing availability attribute for this platform that
// has a lower priority use the existing one and discard the new
// attribute.
@@ -2609,7 +2234,7 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(
!OverrideOrImpl) {
auto *Avail = ::new (Context) AvailabilityAttr(
Context, CI, Platform, Introduced, Deprecated, Obsoleted, IsUnavailable,
- Message, IsStrict, Replacement, Priority);
+ Message, IsStrict, Replacement, Priority, Environment);
Avail->setImplicit(Implicit);
return Avail;
}
@@ -2668,13 +2293,34 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
+ if (S.getLangOpts().HLSL && IsStrict)
+ S.Diag(AL.getStrictLoc(), diag::err_availability_unexpected_parameter)
+ << "strict" << /* HLSL */ 0;
+
int PriorityModifier = AL.isPragmaClangAttribute()
? Sema::AP_PragmaClangAttribute
: Sema::AP_Explicit;
+
+ const IdentifierLoc *EnvironmentLoc = AL.getEnvironment();
+ IdentifierInfo *IIEnvironment = nullptr;
+ if (EnvironmentLoc) {
+ if (S.getLangOpts().HLSL) {
+ IIEnvironment = EnvironmentLoc->Ident;
+ if (AvailabilityAttr::getEnvironmentType(
+ EnvironmentLoc->Ident->getName()) ==
+ llvm::Triple::EnvironmentType::UnknownEnvironment)
+ S.Diag(EnvironmentLoc->Loc, diag::warn_availability_unknown_environment)
+ << EnvironmentLoc->Ident;
+ } else {
+ S.Diag(EnvironmentLoc->Loc, diag::err_availability_unexpected_parameter)
+ << "environment" << /* C/C++ */ 1;
+ }
+ }
+
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
ND, AL, II, false /*Implicit*/, Introduced.Version, Deprecated.Version,
Obsoleted.Version, IsUnavailable, Str, IsStrict, Replacement,
- Sema::AMK_None, PriorityModifier);
+ Sema::AMK_None, PriorityModifier, IIEnvironment);
if (NewAttr)
D->addAttr(NewAttr);
@@ -2730,8 +2376,8 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
ND, AL, NewII, true /*Implicit*/, NewIntroduced, NewDeprecated,
NewObsoleted, IsUnavailable, Str, IsStrict, Replacement,
- Sema::AMK_None,
- PriorityModifier + Sema::AP_InferredFromOtherPlatform);
+ Sema::AMK_None, PriorityModifier + Sema::AP_InferredFromOtherPlatform,
+ IIEnvironment);
if (NewAttr)
D->addAttr(NewAttr);
}
@@ -2772,8 +2418,8 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
ND, AL, NewII, true /*Implicit*/, NewIntroduced, NewDeprecated,
NewObsoleted, IsUnavailable, Str, IsStrict, Replacement,
- Sema::AMK_None,
- PriorityModifier + Sema::AP_InferredFromOtherPlatform);
+ Sema::AMK_None, PriorityModifier + Sema::AP_InferredFromOtherPlatform,
+ IIEnvironment);
if (NewAttr)
D->addAttr(NewAttr);
}
@@ -2806,7 +2452,7 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
MinMacCatalystVersion(Deprecated.Version),
MinMacCatalystVersion(Obsoleted.Version), IsUnavailable, Str,
IsStrict, Replacement, Sema::AMK_None,
- PriorityModifier + Sema::AP_InferredFromOtherPlatform);
+ PriorityModifier + Sema::AP_InferredFromOtherPlatform, IIEnvironment);
if (NewAttr)
D->addAttr(NewAttr);
} else if (II->getName() == "macos" && GetSDKInfo() &&
@@ -2849,7 +2495,8 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
VersionOrEmptyVersion(NewObsoleted), /*IsUnavailable=*/false, Str,
IsStrict, Replacement, Sema::AMK_None,
PriorityModifier + Sema::AP_InferredFromOtherPlatform +
- Sema::AP_InferredFromOtherPlatform);
+ Sema::AP_InferredFromOtherPlatform,
+ IIEnvironment);
if (NewAttr)
D->addAttr(NewAttr);
}
@@ -2953,113 +2600,6 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const ParsedAttr &AL,
D->addAttr(newAttr);
}
-static void handleObjCDirectAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- // objc_direct cannot be set on methods declared in the context of a protocol
- if (isa<ObjCProtocolDecl>(D->getDeclContext())) {
- S.Diag(AL.getLoc(), diag::err_objc_direct_on_protocol) << false;
- return;
- }
-
- if (S.getLangOpts().ObjCRuntime.allowsDirectDispatch()) {
- handleSimpleAttribute<ObjCDirectAttr>(S, D, AL);
- } else {
- S.Diag(AL.getLoc(), diag::warn_objc_direct_ignored) << AL;
- }
-}
-
-static void handleObjCDirectMembersAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- if (S.getLangOpts().ObjCRuntime.allowsDirectDispatch()) {
- handleSimpleAttribute<ObjCDirectMembersAttr>(S, D, AL);
- } else {
- S.Diag(AL.getLoc(), diag::warn_objc_direct_ignored) << AL;
- }
-}
-
-static void handleObjCMethodFamilyAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- const auto *M = cast<ObjCMethodDecl>(D);
- if (!AL.isArgIdent(0)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL << 1 << AANT_ArgumentIdentifier;
- return;
- }
-
- IdentifierLoc *IL = AL.getArgAsIdent(0);
- ObjCMethodFamilyAttr::FamilyKind F;
- if (!ObjCMethodFamilyAttr::ConvertStrToFamilyKind(IL->Ident->getName(), F)) {
- S.Diag(IL->Loc, diag::warn_attribute_type_not_supported) << AL << IL->Ident;
- return;
- }
-
- if (F == ObjCMethodFamilyAttr::OMF_init &&
- !M->getReturnType()->isObjCObjectPointerType()) {
- S.Diag(M->getLocation(), diag::err_init_method_bad_return_type)
- << M->getReturnType();
- // Ignore the attribute.
- return;
- }
-
- D->addAttr(new (S.Context) ObjCMethodFamilyAttr(S.Context, AL, F));
-}
-
-static void handleObjCNSObject(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
- QualType T = TD->getUnderlyingType();
- if (!T->isCARCBridgableType()) {
- S.Diag(TD->getLocation(), diag::err_nsobject_attribute);
- return;
- }
- }
- else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D)) {
- QualType T = PD->getType();
- if (!T->isCARCBridgableType()) {
- S.Diag(PD->getLocation(), diag::err_nsobject_attribute);
- return;
- }
- }
- else {
- // It is okay to include this attribute on properties, e.g.:
- //
- // @property (retain, nonatomic) struct Bork *Q __attribute__((NSObject));
- //
- // In this case it follows tradition and suppresses an error in the above
- // case.
- S.Diag(D->getLocation(), diag::warn_nsobject_attribute);
- }
- D->addAttr(::new (S.Context) ObjCNSObjectAttr(S.Context, AL));
-}
-
-static void handleObjCIndependentClass(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
- QualType T = TD->getUnderlyingType();
- if (!T->isObjCObjectPointerType()) {
- S.Diag(TD->getLocation(), diag::warn_ptr_independentclass_attribute);
- return;
- }
- } else {
- S.Diag(D->getLocation(), diag::warn_independentclass_attribute);
- return;
- }
- D->addAttr(::new (S.Context) ObjCIndependentClassAttr(S.Context, AL));
-}
-
-static void handleBlocksAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!AL.isArgIdent(0)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL << 1 << AANT_ArgumentIdentifier;
- return;
- }
-
- IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
- BlocksAttr::BlockType type;
- if (!BlocksAttr::ConvertStrToBlockType(II->getName(), type)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) << AL << II;
- return;
- }
-
- D->addAttr(::new (S.Context) BlocksAttr(S.Context, AL, type));
-}
-
static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
unsigned sentinel = (unsigned)SentinelAttr::DefaultSentinel;
if (AL.getNumArgs() > 0) {
@@ -3229,8 +2769,8 @@ static void handleWorkGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t WGSize[3];
for (unsigned i = 0; i < 3; ++i) {
const Expr *E = AL.getArgAsExpr(i);
- if (!checkUInt32Argument(S, AL, E, WGSize[i], i,
- /*StrictlyUnsigned=*/true))
+ if (!S.checkUInt32Argument(AL, E, WGSize[i], i,
+ /*StrictlyUnsigned=*/true))
return;
if (WGSize[i] == 0) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_is_zero)
@@ -3249,27 +2789,6 @@ static void handleWorkGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
WorkGroupAttr(S.Context, AL, WGSize[0], WGSize[1], WGSize[2]));
}
-// Handles intel_reqd_sub_group_size.
-static void handleSubGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
- uint32_t SGSize;
- const Expr *E = AL.getArgAsExpr(0);
- if (!checkUInt32Argument(S, AL, E, SGSize))
- return;
- if (SGSize == 0) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_is_zero)
- << AL << E->getSourceRange();
- return;
- }
-
- OpenCLIntelReqdSubGroupSizeAttr *Existing =
- D->getAttr<OpenCLIntelReqdSubGroupSizeAttr>();
- if (Existing && Existing->getSubGroupSize() != SGSize)
- S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL;
-
- D->addAttr(::new (S.Context)
- OpenCLIntelReqdSubGroupSizeAttr(S.Context, AL, SGSize));
-}
-
static void handleVecTypeHint(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.hasParsedType()) {
S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
@@ -3317,15 +2836,6 @@ SectionAttr *Sema::mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
return ::new (Context) SectionAttr(Context, CI, Name);
}
-/// Used to implement to perform semantic checking on
-/// attribute((section("foo"))) specifiers.
-///
-/// In this case, "foo" is passed in to be checked. If the section
-/// specifier is invalid, return an Error that indicates the problem.
-///
-/// This is a simple quality of implementation feature to catch errors
-/// and give good diagnostics in cases when the assembler or code generator
-/// would otherwise reject the section specifier.
llvm::Error Sema::isValidSectionSpecifier(StringRef SecName) {
if (!Context.getTargetInfo().getTriple().isOSDarwin())
return llvm::Error::success();
@@ -3438,8 +2948,6 @@ static void handleCodeSegAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(CSA);
}
-// Check for things we'd like to warn about. Multiversioning issues are
-// handled later in the process, once we know how many exist.
bool Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) {
enum FirstParam { Unsupported, Duplicate, Unknown };
enum SecondParam { None, CPU, Tune };
@@ -3483,7 +2991,7 @@ bool Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) {
<< Unsupported << None << CurFeature << Target;
}
- TargetInfo::BranchProtectionInfo BPI;
+ TargetInfo::BranchProtectionInfo BPI{};
StringRef DiagMsg;
if (ParsedAttrs.BranchProtection.empty())
return false;
@@ -3501,14 +3009,11 @@ bool Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) {
return false;
}
-// Check Target Version attrs
-bool Sema::checkTargetVersionAttr(SourceLocation LiteralLoc, StringRef &AttrStr,
- bool &isDefault) {
+bool Sema::checkTargetVersionAttr(SourceLocation LiteralLoc, Decl *D,
+ StringRef AttrStr) {
enum FirstParam { Unsupported };
enum SecondParam { None };
enum ThirdParam { Target, TargetClones, TargetVersion };
- if (AttrStr.trim() == "default")
- isDefault = true;
llvm::SmallVector<StringRef, 8> Features;
AttrStr.split(Features, "+");
for (auto &CurFeature : Features) {
@@ -3525,16 +3030,12 @@ bool Sema::checkTargetVersionAttr(SourceLocation LiteralLoc, StringRef &AttrStr,
static void handleTargetVersionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef Str;
SourceLocation LiteralLoc;
- bool isDefault = false;
if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &LiteralLoc) ||
- S.checkTargetVersionAttr(LiteralLoc, Str, isDefault))
+ S.checkTargetVersionAttr(LiteralLoc, D, Str))
return;
- // Do not create default only target_version attribute
- if (!isDefault) {
- TargetVersionAttr *NewAttr =
- ::new (S.Context) TargetVersionAttr(S.Context, AL, Str);
- D->addAttr(NewAttr);
- }
+ TargetVersionAttr *NewAttr =
+ ::new (S.Context) TargetVersionAttr(S.Context, AL, Str);
+ D->addAttr(NewAttr);
}
static void handleTargetAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -3550,7 +3051,7 @@ static void handleTargetAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
bool Sema::checkTargetClonesAttrString(
SourceLocation LiteralLoc, StringRef Str, const StringLiteral *Literal,
- bool &HasDefault, bool &HasCommas, bool &HasNotDefault,
+ Decl *D, bool &HasDefault, bool &HasCommas, bool &HasNotDefault,
SmallVectorImpl<SmallString<64>> &StringsBuffer) {
enum FirstParam { Unsupported, Duplicate, Unknown };
enum SecondParam { None, CPU, Tune };
@@ -3604,7 +3105,7 @@ bool Sema::checkTargetClonesAttrString(
llvm::sort(CurFeatures);
SmallString<64> Res;
for (auto &CurFeat : CurFeatures) {
- if (!Res.equals(""))
+ if (!Res.empty())
Res.append("+");
Res.append(CurFeat);
}
@@ -3670,7 +3171,7 @@ static void handleTargetClonesAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.checkStringLiteralArgumentAttr(AL, I, CurStr, &LiteralLoc) ||
S.checkTargetClonesAttrString(
LiteralLoc, CurStr,
- cast<StringLiteral>(AL.getArgAsExpr(I)->IgnoreParenCasts()),
+ cast<StringLiteral>(AL.getArgAsExpr(I)->IgnoreParenCasts()), D,
HasDefault, HasCommas, HasNotDefault, StringsBuffer))
return;
}
@@ -3715,7 +3216,7 @@ static void handleTargetClonesAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleMinVectorWidthAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *E = AL.getArgAsExpr(0);
uint32_t VecWidth;
- if (!checkUInt32Argument(S, AL, E, VecWidth)) {
+ if (!S.checkUInt32Argument(AL, E, VecWidth)) {
AL.setInvalid();
return;
}
@@ -3780,6 +3281,30 @@ static void handleCleanupAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
<< NI.getName() << ParamTy << Ty;
return;
}
+ VarDecl *VD = cast<VarDecl>(D);
+ // Create a reference to the variable declaration. This is a fake/dummy
+ // reference.
+ DeclRefExpr *VariableReference = DeclRefExpr::Create(
+ S.Context, NestedNameSpecifierLoc{}, FD->getLocation(), VD, false,
+ DeclarationNameInfo{VD->getDeclName(), VD->getLocation()}, VD->getType(),
+ VK_LValue);
+
+ // Create a unary operator expression that represents taking the address of
+ // the variable. This is a fake/dummy expression.
+ Expr *AddressOfVariable = UnaryOperator::Create(
+ S.Context, VariableReference, UnaryOperatorKind::UO_AddrOf,
+ S.Context.getPointerType(VD->getType()), VK_PRValue, OK_Ordinary, Loc,
+ +false, FPOptionsOverride{});
+
+ // Create a function call expression. This is a fake/dummy call expression.
+ CallExpr *FunctionCallExpression =
+ CallExpr::Create(S.Context, E, ArrayRef{AddressOfVariable},
+ S.Context.VoidTy, VK_PRValue, Loc, FPOptionsOverride{});
+
+ if (S.CheckFunctionCall(FD, FunctionCallExpression,
+ FD->getType()->getAs<FunctionProtoType>())) {
+ return;
+ }
D->addAttr(::new (S.Context) CleanupAttr(S.Context, AL, FD));
}
@@ -3809,15 +3334,14 @@ static void handleEnumExtensibilityAttr(Sema &S, Decl *D,
static void handleFormatArgAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
const Expr *IdxExpr = AL.getArgAsExpr(0);
ParamIdx Idx;
- if (!checkFunctionOrMethodParameterIndex(S, D, AL, 1, IdxExpr, Idx))
+ if (!S.checkFunctionOrMethodParameterIndex(D, AL, 1, IdxExpr, Idx))
return;
// Make sure the format string is really a string.
QualType Ty = getFunctionOrMethodParamType(D, Idx.getASTIndex());
- bool NotNSStringTy = !isNSStringType(Ty, S.Context);
- if (NotNSStringTy &&
- !isCFStringType(Ty, S.Context) &&
+ bool NotNSStringTy = !S.ObjC().isNSStringType(Ty);
+ if (NotNSStringTy && !S.ObjC().isCFStringType(Ty) &&
(!Ty->isPointerType() ||
!Ty->castAs<PointerType>()->getPointeeType()->isCharType())) {
S.Diag(AL.getLoc(), diag::err_format_attribute_not)
@@ -3832,8 +3356,8 @@ static void handleFormatArgAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (auto *Interface = OMD->getClassInterface())
Ty = S.Context.getObjCObjectPointerType(
QualType(Interface->getTypeForDecl(), 0));
- if (!isNSStringType(Ty, S.Context, /*AllowNSAttributedString=*/true) &&
- !isCFStringType(Ty, S.Context) &&
+ if (!S.ObjC().isNSStringType(Ty, /*AllowNSAttributedString=*/true) &&
+ !S.ObjC().isCFStringType(Ty) &&
(!Ty->isPointerType() ||
!Ty->castAs<PointerType>()->getPointeeType()->isCharType())) {
S.Diag(AL.getLoc(), diag::err_format_attribute_result_not)
@@ -3904,7 +3428,7 @@ static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *E = AL.getArgAsExpr(0);
uint32_t prioritynum;
- if (!checkUInt32Argument(S, AL, E, prioritynum)) {
+ if (!S.checkUInt32Argument(AL, E, prioritynum)) {
AL.setInvalid();
return;
}
@@ -4003,7 +3527,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// checks for the 2nd argument
Expr *IdxExpr = AL.getArgAsExpr(1);
uint32_t Idx;
- if (!checkUInt32Argument(S, AL, IdxExpr, Idx, 2))
+ if (!S.checkUInt32Argument(AL, IdxExpr, Idx, 2))
return;
if (Idx < 1 || Idx > NumArgs) {
@@ -4028,8 +3552,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// make sure the format string is really a string
QualType Ty = getFunctionOrMethodParamType(D, ArgIdx);
- if (!isNSStringType(Ty, S.Context, true) &&
- !isCFStringType(Ty, S.Context) &&
+ if (!S.ObjC().isNSStringType(Ty, true) && !S.ObjC().isCFStringType(Ty) &&
(!Ty->isPointerType() ||
!Ty->castAs<PointerType>()->getPointeeType()->isCharType())) {
S.Diag(AL.getLoc(), diag::err_format_attribute_not)
@@ -4040,7 +3563,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// check the 3rd argument
Expr *FirstArgExpr = AL.getArgAsExpr(2);
uint32_t FirstArg;
- if (!checkUInt32Argument(S, AL, FirstArgExpr, FirstArg, 3))
+ if (!S.checkUInt32Argument(AL, FirstArgExpr, FirstArg, 3))
return;
// FirstArg == 0 is is always valid.
@@ -4128,8 +3651,8 @@ static void handleCallbackAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *IdxExpr = AL.getArgAsExpr(I);
// If the expression is not parseable as an int32_t we have a problem.
- if (!checkUInt32Argument(S, AL, IdxExpr, (uint32_t &)ArgIdx, I + 1,
- false)) {
+ if (!S.checkUInt32Argument(AL, IdxExpr, (uint32_t &)ArgIdx, I + 1,
+ false)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
<< AL << (I + 1) << IdxExpr->getSourceRange();
return;
@@ -4871,7 +4394,9 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
NewElemTy = Context.getRealTypeForBitwidth(DestWidth, ExplicitType);
if (NewElemTy.isNull()) {
- Diag(AttrLoc, diag::err_machine_mode) << 1 /*Unsupported*/ << Name;
+ // Only emit diagnostic on host for 128-bit mode attribute
+ if (!(DestWidth == 128 && getLangOpts().CUDAIsDevice))
+ Diag(AttrLoc, diag::err_machine_mode) << 1 /*Unsupported*/ << Name;
return;
}
@@ -4987,22 +4512,6 @@ MinSizeAttr *Sema::mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI) {
return ::new (Context) MinSizeAttr(Context, CI);
}
-SwiftNameAttr *Sema::mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
- StringRef Name) {
- if (const auto *PrevSNA = D->getAttr<SwiftNameAttr>()) {
- if (PrevSNA->getName() != Name && !PrevSNA->isImplicit()) {
- Diag(PrevSNA->getLocation(), diag::err_attributes_are_not_compatible)
- << PrevSNA << &SNA
- << (PrevSNA->isRegularKeywordAttribute() ||
- SNA.isRegularKeywordAttribute());
- Diag(SNA.getLoc(), diag::note_conflicting_attribute);
- }
-
- D->dropAttr<SwiftNameAttr>();
- }
- return ::new (Context) SwiftNameAttr(Context, SNA, Name);
-}
-
OptimizeNoneAttr *Sema::mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI) {
if (AlwaysInlineAttr *Inline = D->getAttr<AlwaysInlineAttr>()) {
@@ -5064,8 +4573,8 @@ static void handleSharedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
if (S.getLangOpts().CUDA && VD->hasLocalStorage() &&
- S.CUDADiagIfHostCode(AL.getLoc(), diag::err_cuda_host_shared)
- << S.CurrentCUDATarget())
+ S.CUDA().DiagIfHostCode(AL.getLoc(), diag::err_cuda_host_shared)
+ << llvm::to_underlying(S.CUDA().CurrentTarget()))
return;
D->addAttr(::new (S.Context) CUDASharedAttr(S.Context, AL));
}
@@ -5154,8 +4663,9 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Diagnostic is emitted elsewhere: here we store the (valid) AL
// in the Decl node for syntactic reasoning, e.g., pretty-printing.
CallingConv CC;
- if (S.CheckCallingConvAttr(AL, CC, /*FD*/ nullptr,
- S.IdentifyCUDATarget(dyn_cast<FunctionDecl>(D))))
+ if (S.CheckCallingConvAttr(
+ AL, CC, /*FD*/ nullptr,
+ S.CUDA().IdentifyTarget(dyn_cast<FunctionDecl>(D))))
return;
if (!isa<ObjCMethodDecl>(D)) {
@@ -5235,6 +4745,12 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
case ParsedAttr::AT_M68kRTD:
D->addAttr(::new (S.Context) M68kRTDAttr(S.Context, AL));
return;
+ case ParsedAttr::AT_PreserveNone:
+ D->addAttr(::new (S.Context) PreserveNoneAttr(S.Context, AL));
+ return;
+ case ParsedAttr::AT_RISCVVectorCC:
+ D->addAttr(::new (S.Context) RISCVVectorCCAttr(S.Context, AL));
+ return;
default:
llvm_unreachable("unexpected attribute kind");
}
@@ -5245,11 +4761,6 @@ static void handleSuppressAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Suppression attribute with GSL spelling requires at least 1 argument.
if (!AL.checkAtLeastNumArgs(S, 1))
return;
- } else if (!isa<VarDecl>(D)) {
- // Analyzer suppression applies only to variables and statements.
- S.Diag(AL.getLoc(), diag::err_attribute_wrong_decl_type_str)
- << AL << 0 << "variables and statements";
- return;
}
std::vector<StringRef> DiagnosticIdentifiers;
@@ -5441,6 +4952,12 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
case ParsedAttr::AT_M68kRTD:
CC = CC_M68kRTD;
break;
+ case ParsedAttr::AT_PreserveNone:
+ CC = CC_PreserveNone;
+ break;
+ case ParsedAttr::AT_RISCVVectorCC:
+ CC = CC_RISCVVectorCall;
+ break;
default: llvm_unreachable("unexpected attribute kind");
}
@@ -5452,22 +4969,22 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
// on their host/device attributes.
if (LangOpts.CUDA) {
auto *Aux = Context.getAuxTargetInfo();
- assert(FD || CFT != CFT_InvalidTarget);
- auto CudaTarget = FD ? IdentifyCUDATarget(FD) : CFT;
+ assert(FD || CFT != CUDAFunctionTarget::InvalidTarget);
+ auto CudaTarget = FD ? CUDA().IdentifyTarget(FD) : CFT;
bool CheckHost = false, CheckDevice = false;
switch (CudaTarget) {
- case CFT_HostDevice:
+ case CUDAFunctionTarget::HostDevice:
CheckHost = true;
CheckDevice = true;
break;
- case CFT_Host:
+ case CUDAFunctionTarget::Host:
CheckHost = true;
break;
- case CFT_Device:
- case CFT_Global:
+ case CUDAFunctionTarget::Device:
+ case CUDAFunctionTarget::Global:
CheckDevice = true;
break;
- case CFT_InvalidTarget:
+ case CUDAFunctionTarget::InvalidTarget:
llvm_unreachable("unexpected cuda target");
}
auto *HostTI = LangOpts.CUDAIsDevice ? Aux : &TI;
@@ -5517,96 +5034,6 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
return false;
}
-/// Pointer-like types in the default address space.
-static bool isValidSwiftContextType(QualType Ty) {
- if (!Ty->hasPointerRepresentation())
- return Ty->isDependentType();
- return Ty->getPointeeType().getAddressSpace() == LangAS::Default;
-}
-
-/// Pointers and references in the default address space.
-static bool isValidSwiftIndirectResultType(QualType Ty) {
- if (const auto *PtrType = Ty->getAs<PointerType>()) {
- Ty = PtrType->getPointeeType();
- } else if (const auto *RefType = Ty->getAs<ReferenceType>()) {
- Ty = RefType->getPointeeType();
- } else {
- return Ty->isDependentType();
- }
- return Ty.getAddressSpace() == LangAS::Default;
-}
-
-/// Pointers and references to pointers in the default address space.
-static bool isValidSwiftErrorResultType(QualType Ty) {
- if (const auto *PtrType = Ty->getAs<PointerType>()) {
- Ty = PtrType->getPointeeType();
- } else if (const auto *RefType = Ty->getAs<ReferenceType>()) {
- Ty = RefType->getPointeeType();
- } else {
- return Ty->isDependentType();
- }
- if (!Ty.getQualifiers().empty())
- return false;
- return isValidSwiftContextType(Ty);
-}
-
-void Sema::AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
- ParameterABI abi) {
-
- QualType type = cast<ParmVarDecl>(D)->getType();
-
- if (auto existingAttr = D->getAttr<ParameterABIAttr>()) {
- if (existingAttr->getABI() != abi) {
- Diag(CI.getLoc(), diag::err_attributes_are_not_compatible)
- << getParameterABISpelling(abi) << existingAttr
- << (CI.isRegularKeywordAttribute() ||
- existingAttr->isRegularKeywordAttribute());
- Diag(existingAttr->getLocation(), diag::note_conflicting_attribute);
- return;
- }
- }
-
- switch (abi) {
- case ParameterABI::Ordinary:
- llvm_unreachable("explicit attribute for ordinary parameter ABI?");
-
- case ParameterABI::SwiftContext:
- if (!isValidSwiftContextType(type)) {
- Diag(CI.getLoc(), diag::err_swift_abi_parameter_wrong_type)
- << getParameterABISpelling(abi) << /*pointer to pointer */ 0 << type;
- }
- D->addAttr(::new (Context) SwiftContextAttr(Context, CI));
- return;
-
- case ParameterABI::SwiftAsyncContext:
- if (!isValidSwiftContextType(type)) {
- Diag(CI.getLoc(), diag::err_swift_abi_parameter_wrong_type)
- << getParameterABISpelling(abi) << /*pointer to pointer */ 0 << type;
- }
- D->addAttr(::new (Context) SwiftAsyncContextAttr(Context, CI));
- return;
-
- case ParameterABI::SwiftErrorResult:
- if (!isValidSwiftErrorResultType(type)) {
- Diag(CI.getLoc(), diag::err_swift_abi_parameter_wrong_type)
- << getParameterABISpelling(abi) << /*pointer to pointer */ 1 << type;
- }
- D->addAttr(::new (Context) SwiftErrorResultAttr(Context, CI));
- return;
-
- case ParameterABI::SwiftIndirectResult:
- if (!isValidSwiftIndirectResultType(type)) {
- Diag(CI.getLoc(), diag::err_swift_abi_parameter_wrong_type)
- << getParameterABISpelling(abi) << /*pointer*/ 0 << type;
- }
- D->addAttr(::new (Context) SwiftIndirectResultAttr(Context, CI));
- return;
- }
- llvm_unreachable("bad parameter ABI attribute");
-}
-
-/// Checks a regparm attribute, returning true if it is ill-formed and
-/// otherwise setting numParams to the appropriate value.
bool Sema::CheckRegparmAttr(const ParsedAttr &AL, unsigned &numParams) {
if (AL.isInvalid())
return true;
@@ -5618,7 +5045,7 @@ bool Sema::CheckRegparmAttr(const ParsedAttr &AL, unsigned &numParams) {
uint32_t NP;
Expr *NumParamsExpr = AL.getArgAsExpr(0);
- if (!checkUInt32Argument(*this, AL, NumParamsExpr, NP)) {
+ if (!checkUInt32Argument(AL, NumParamsExpr, NP)) {
AL.setInvalid();
return true;
}
@@ -5641,12 +5068,12 @@ bool Sema::CheckRegparmAttr(const ParsedAttr &AL, unsigned &numParams) {
return false;
}
-// Helper to get CudaArch.
-static CudaArch getCudaArch(const TargetInfo &TI) {
+// Helper to get OffloadArch.
+static OffloadArch getOffloadArch(const TargetInfo &TI) {
if (!TI.getTriple().isNVPTX())
- llvm_unreachable("getCudaArch is only valid for NVPTX triple");
+ llvm_unreachable("getOffloadArch is only valid for NVPTX triple");
auto &TO = TI.getTargetOpts();
- return StringToCudaArch(TO.CPU);
+ return StringToOffloadArch(TO.CPU);
}
// Checks whether an argument of launch_bounds attribute is
@@ -5706,10 +5133,10 @@ Sema::CreateLaunchBoundsAttr(const AttributeCommonInfo &CI, Expr *MaxThreads,
if (MaxBlocks) {
// '.maxclusterrank' ptx directive requires .target sm_90 or higher.
- auto SM = getCudaArch(Context.getTargetInfo());
- if (SM == CudaArch::UNKNOWN || SM < CudaArch::SM_90) {
+ auto SM = getOffloadArch(Context.getTargetInfo());
+ if (SM == OffloadArch::UNKNOWN || SM < OffloadArch::SM_90) {
Diag(MaxBlocks->getBeginLoc(), diag::warn_cuda_maxclusterrank_sm_90)
- << CudaArchToString(SM) << CI << MaxBlocks->getSourceRange();
+ << OffloadArchToString(SM) << CI << MaxBlocks->getSourceRange();
// Ignore it by setting MaxBlocks to null;
MaxBlocks = nullptr;
} else {
@@ -5748,13 +5175,13 @@ static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D,
}
ParamIdx ArgumentIdx;
- if (!checkFunctionOrMethodParameterIndex(S, D, AL, 2, AL.getArgAsExpr(1),
- ArgumentIdx))
+ if (!S.checkFunctionOrMethodParameterIndex(D, AL, 2, AL.getArgAsExpr(1),
+ ArgumentIdx))
return;
ParamIdx TypeTagIdx;
- if (!checkFunctionOrMethodParameterIndex(S, D, AL, 3, AL.getArgAsExpr(2),
- TypeTagIdx))
+ if (!S.checkFunctionOrMethodParameterIndex(D, AL, 3, AL.getArgAsExpr(2),
+ TypeTagIdx))
return;
bool IsPointer = AL.getAttrName()->getName() == "pointer_with_type_tag";
@@ -5801,9 +5228,9 @@ static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D,
static void handleXRayLogArgsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
ParamIdx ArgCount;
- if (!checkFunctionOrMethodParameterIndex(S, D, AL, 1, AL.getArgAsExpr(0),
- ArgCount,
- true /* CanIndexImplicitThis */))
+ if (!S.checkFunctionOrMethodParameterIndex(D, AL, 1, AL.getArgAsExpr(0),
+ ArgCount,
+ true /* CanIndexImplicitThis */))
return;
// ArgCount isn't a parameter index [0;n), it's a count [1;n]
@@ -5813,15 +5240,19 @@ static void handleXRayLogArgsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handlePatchableFunctionEntryAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
+ if (S.Context.getTargetInfo().getTriple().isOSAIX()) {
+ S.Diag(AL.getLoc(), diag::err_aix_attr_unsupported) << AL;
+ return;
+ }
uint32_t Count = 0, Offset = 0;
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(0), Count, 0, true))
+ if (!S.checkUInt32Argument(AL, AL.getArgAsExpr(0), Count, 0, true))
return;
if (AL.getNumArgs() == 2) {
Expr *Arg = AL.getArgAsExpr(1);
- if (!checkUInt32Argument(S, AL, Arg, Offset, 1, true))
+ if (!S.checkUInt32Argument(AL, Arg, Offset, 1, true))
return;
if (Count < Offset) {
- S.Diag(getAttrLoc(AL), diag::err_attribute_argument_out_of_range)
+ S.Diag(S.getAttrLoc(AL), diag::err_attribute_argument_out_of_range)
<< &AL << 0 << Count << Arg->getBeginLoc();
return;
}
@@ -5830,63 +5261,7 @@ static void handlePatchableFunctionEntryAttr(Sema &S, Decl *D,
PatchableFunctionEntryAttr(S.Context, AL, Count, Offset));
}
-namespace {
-struct IntrinToName {
- uint32_t Id;
- int32_t FullName;
- int32_t ShortName;
-};
-} // unnamed namespace
-
-static bool ArmBuiltinAliasValid(unsigned BuiltinID, StringRef AliasName,
- ArrayRef<IntrinToName> Map,
- const char *IntrinNames) {
- AliasName.consume_front("__arm_");
- const IntrinToName *It =
- llvm::lower_bound(Map, BuiltinID, [](const IntrinToName &L, unsigned Id) {
- return L.Id < Id;
- });
- if (It == Map.end() || It->Id != BuiltinID)
- return false;
- StringRef FullName(&IntrinNames[It->FullName]);
- if (AliasName == FullName)
- return true;
- if (It->ShortName == -1)
- return false;
- StringRef ShortName(&IntrinNames[It->ShortName]);
- return AliasName == ShortName;
-}
-
-static bool ArmMveAliasValid(unsigned BuiltinID, StringRef AliasName) {
-#include "clang/Basic/arm_mve_builtin_aliases.inc"
- // The included file defines:
- // - ArrayRef<IntrinToName> Map
- // - const char IntrinNames[]
- return ArmBuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
-}
-
-static bool ArmCdeAliasValid(unsigned BuiltinID, StringRef AliasName) {
-#include "clang/Basic/arm_cde_builtin_aliases.inc"
- return ArmBuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
-}
-
-static bool ArmSveAliasValid(ASTContext &Context, unsigned BuiltinID,
- StringRef AliasName) {
- if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID))
- BuiltinID = Context.BuiltinInfo.getAuxBuiltinID(BuiltinID);
- return BuiltinID >= AArch64::FirstSVEBuiltin &&
- BuiltinID <= AArch64::LastSVEBuiltin;
-}
-
-static bool ArmSmeAliasValid(ASTContext &Context, unsigned BuiltinID,
- StringRef AliasName) {
- if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID))
- BuiltinID = Context.BuiltinInfo.getAuxBuiltinID(BuiltinID);
- return BuiltinID >= AArch64::FirstSMEBuiltin &&
- BuiltinID <= AArch64::LastSMEBuiltin;
-}
-
-static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+static void handleBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
<< AL << 1 << AANT_ArgumentIdentifier;
@@ -5898,48 +5273,33 @@ static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef AliasName = cast<FunctionDecl>(D)->getIdentifier()->getName();
bool IsAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
- if ((IsAArch64 && !ArmSveAliasValid(S.Context, BuiltinID, AliasName) &&
- !ArmSmeAliasValid(S.Context, BuiltinID, AliasName)) ||
- (!IsAArch64 && !ArmMveAliasValid(BuiltinID, AliasName) &&
- !ArmCdeAliasValid(BuiltinID, AliasName))) {
- S.Diag(AL.getLoc(), diag::err_attribute_arm_builtin_alias);
+ bool IsARM = S.Context.getTargetInfo().getTriple().isARM();
+ bool IsRISCV = S.Context.getTargetInfo().getTriple().isRISCV();
+ bool IsHLSL = S.Context.getLangOpts().HLSL;
+ if ((IsAArch64 && !S.ARM().SveAliasValid(BuiltinID, AliasName)) ||
+ (IsARM && !S.ARM().MveAliasValid(BuiltinID, AliasName) &&
+ !S.ARM().CdeAliasValid(BuiltinID, AliasName)) ||
+ (IsRISCV && !S.RISCV().isAliasValid(BuiltinID, AliasName)) ||
+ (!IsAArch64 && !IsARM && !IsRISCV && !IsHLSL)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_builtin_alias) << AL;
return;
}
- D->addAttr(::new (S.Context) ArmBuiltinAliasAttr(S.Context, AL, Ident));
-}
-
-static bool RISCVAliasValid(unsigned BuiltinID, StringRef AliasName) {
- return BuiltinID >= RISCV::FirstRVVBuiltin &&
- BuiltinID <= RISCV::LastRVVBuiltin;
+ D->addAttr(::new (S.Context) BuiltinAliasAttr(S.Context, AL, Ident));
}
-static void handleBuiltinAliasAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- if (!AL.isArgIdent(0)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL << 1 << AANT_ArgumentIdentifier;
+static void handleNullableTypeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (AL.isUsedAsTypeAttr())
return;
- }
- IdentifierInfo *Ident = AL.getArgAsIdent(0)->Ident;
- unsigned BuiltinID = Ident->getBuiltinID();
- StringRef AliasName = cast<FunctionDecl>(D)->getIdentifier()->getName();
-
- bool IsAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
- bool IsARM = S.Context.getTargetInfo().getTriple().isARM();
- bool IsRISCV = S.Context.getTargetInfo().getTriple().isRISCV();
- bool IsHLSL = S.Context.getLangOpts().HLSL;
- if ((IsAArch64 && !ArmSveAliasValid(S.Context, BuiltinID, AliasName)) ||
- (IsARM && !ArmMveAliasValid(BuiltinID, AliasName) &&
- !ArmCdeAliasValid(BuiltinID, AliasName)) ||
- (IsRISCV && !RISCVAliasValid(BuiltinID, AliasName)) ||
- (!IsAArch64 && !IsARM && !IsRISCV && !IsHLSL)) {
- S.Diag(AL.getLoc(), diag::err_attribute_builtin_alias) << AL;
+ if (auto *CRD = dyn_cast<CXXRecordDecl>(D);
+ !CRD || !(CRD->isClass() || CRD->isStruct())) {
+ S.Diag(AL.getRange().getBegin(), diag::err_attribute_wrong_decl_type_str)
+ << AL << AL.isRegularKeywordAttribute() << "classes";
return;
}
- D->addAttr(::new (S.Context) BuiltinAliasAttr(S.Context, AL, Ident));
+ handleSimpleAttribute<TypeNullableAttr>(S, D, AL);
}
static void handlePreferredTypeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -5958,1114 +5318,6 @@ static void handlePreferredTypeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
//===----------------------------------------------------------------------===//
-// Checker-specific attribute handlers.
-//===----------------------------------------------------------------------===//
-static bool isValidSubjectOfNSReturnsRetainedAttribute(QualType QT) {
- return QT->isDependentType() || QT->isObjCRetainableType();
-}
-
-static bool isValidSubjectOfNSAttribute(QualType QT) {
- return QT->isDependentType() || QT->isObjCObjectPointerType() ||
- QT->isObjCNSObjectType();
-}
-
-static bool isValidSubjectOfCFAttribute(QualType QT) {
- return QT->isDependentType() || QT->isPointerType() ||
- isValidSubjectOfNSAttribute(QT);
-}
-
-static bool isValidSubjectOfOSAttribute(QualType QT) {
- if (QT->isDependentType())
- return true;
- QualType PT = QT->getPointeeType();
- return !PT.isNull() && PT->getAsCXXRecordDecl() != nullptr;
-}
-
-void Sema::AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
- RetainOwnershipKind K,
- bool IsTemplateInstantiation) {
- ValueDecl *VD = cast<ValueDecl>(D);
- switch (K) {
- case RetainOwnershipKind::OS:
- handleSimpleAttributeOrDiagnose<OSConsumedAttr>(
- *this, VD, CI, isValidSubjectOfOSAttribute(VD->getType()),
- diag::warn_ns_attribute_wrong_parameter_type,
- /*ExtraArgs=*/CI.getRange(), "os_consumed", /*pointers*/ 1);
- return;
- case RetainOwnershipKind::NS:
- handleSimpleAttributeOrDiagnose<NSConsumedAttr>(
- *this, VD, CI, isValidSubjectOfNSAttribute(VD->getType()),
-
- // These attributes are normally just advisory, but in ARC, ns_consumed
- // is significant. Allow non-dependent code to contain inappropriate
- // attributes even in ARC, but require template instantiations to be
- // set up correctly.
- ((IsTemplateInstantiation && getLangOpts().ObjCAutoRefCount)
- ? diag::err_ns_attribute_wrong_parameter_type
- : diag::warn_ns_attribute_wrong_parameter_type),
- /*ExtraArgs=*/CI.getRange(), "ns_consumed", /*objc pointers*/ 0);
- return;
- case RetainOwnershipKind::CF:
- handleSimpleAttributeOrDiagnose<CFConsumedAttr>(
- *this, VD, CI, isValidSubjectOfCFAttribute(VD->getType()),
- diag::warn_ns_attribute_wrong_parameter_type,
- /*ExtraArgs=*/CI.getRange(), "cf_consumed", /*pointers*/ 1);
- return;
- }
-}
-
-static Sema::RetainOwnershipKind
-parsedAttrToRetainOwnershipKind(const ParsedAttr &AL) {
- switch (AL.getKind()) {
- case ParsedAttr::AT_CFConsumed:
- case ParsedAttr::AT_CFReturnsRetained:
- case ParsedAttr::AT_CFReturnsNotRetained:
- return Sema::RetainOwnershipKind::CF;
- case ParsedAttr::AT_OSConsumesThis:
- case ParsedAttr::AT_OSConsumed:
- case ParsedAttr::AT_OSReturnsRetained:
- case ParsedAttr::AT_OSReturnsNotRetained:
- case ParsedAttr::AT_OSReturnsRetainedOnZero:
- case ParsedAttr::AT_OSReturnsRetainedOnNonZero:
- return Sema::RetainOwnershipKind::OS;
- case ParsedAttr::AT_NSConsumesSelf:
- case ParsedAttr::AT_NSConsumed:
- case ParsedAttr::AT_NSReturnsRetained:
- case ParsedAttr::AT_NSReturnsNotRetained:
- case ParsedAttr::AT_NSReturnsAutoreleased:
- return Sema::RetainOwnershipKind::NS;
- default:
- llvm_unreachable("Wrong argument supplied");
- }
-}
-
-bool Sema::checkNSReturnsRetainedReturnType(SourceLocation Loc, QualType QT) {
- if (isValidSubjectOfNSReturnsRetainedAttribute(QT))
- return false;
-
- Diag(Loc, diag::warn_ns_attribute_wrong_return_type)
- << "'ns_returns_retained'" << 0 << 0;
- return true;
-}
-
-/// \return whether the parameter is a pointer to OSObject pointer.
-static bool isValidOSObjectOutParameter(const Decl *D) {
- const auto *PVD = dyn_cast<ParmVarDecl>(D);
- if (!PVD)
- return false;
- QualType QT = PVD->getType();
- QualType PT = QT->getPointeeType();
- return !PT.isNull() && isValidSubjectOfOSAttribute(PT);
-}
-
-static void handleXReturnsXRetainedAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- QualType ReturnType;
- Sema::RetainOwnershipKind K = parsedAttrToRetainOwnershipKind(AL);
-
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
- ReturnType = MD->getReturnType();
- } else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
- (AL.getKind() == ParsedAttr::AT_NSReturnsRetained)) {
- return; // ignore: was handled as a type attribute
- } else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D)) {
- ReturnType = PD->getType();
- } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
- ReturnType = FD->getReturnType();
- } else if (const auto *Param = dyn_cast<ParmVarDecl>(D)) {
- // Attributes on parameters are used for out-parameters,
- // passed as pointers-to-pointers.
- unsigned DiagID = K == Sema::RetainOwnershipKind::CF
- ? /*pointer-to-CF-pointer*/2
- : /*pointer-to-OSObject-pointer*/3;
- ReturnType = Param->getType()->getPointeeType();
- if (ReturnType.isNull()) {
- S.Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_parameter_type)
- << AL << DiagID << AL.getRange();
- return;
- }
- } else if (AL.isUsedAsTypeAttr()) {
- return;
- } else {
- AttributeDeclKind ExpectedDeclKind;
- switch (AL.getKind()) {
- default: llvm_unreachable("invalid ownership attribute");
- case ParsedAttr::AT_NSReturnsRetained:
- case ParsedAttr::AT_NSReturnsAutoreleased:
- case ParsedAttr::AT_NSReturnsNotRetained:
- ExpectedDeclKind = ExpectedFunctionOrMethod;
- break;
-
- case ParsedAttr::AT_OSReturnsRetained:
- case ParsedAttr::AT_OSReturnsNotRetained:
- case ParsedAttr::AT_CFReturnsRetained:
- case ParsedAttr::AT_CFReturnsNotRetained:
- ExpectedDeclKind = ExpectedFunctionMethodOrParameter;
- break;
- }
- S.Diag(D->getBeginLoc(), diag::warn_attribute_wrong_decl_type)
- << AL.getRange() << AL << AL.isRegularKeywordAttribute()
- << ExpectedDeclKind;
- return;
- }
-
- bool TypeOK;
- bool Cf;
- unsigned ParmDiagID = 2; // Pointer-to-CF-pointer
- switch (AL.getKind()) {
- default: llvm_unreachable("invalid ownership attribute");
- case ParsedAttr::AT_NSReturnsRetained:
- TypeOK = isValidSubjectOfNSReturnsRetainedAttribute(ReturnType);
- Cf = false;
- break;
-
- case ParsedAttr::AT_NSReturnsAutoreleased:
- case ParsedAttr::AT_NSReturnsNotRetained:
- TypeOK = isValidSubjectOfNSAttribute(ReturnType);
- Cf = false;
- break;
-
- case ParsedAttr::AT_CFReturnsRetained:
- case ParsedAttr::AT_CFReturnsNotRetained:
- TypeOK = isValidSubjectOfCFAttribute(ReturnType);
- Cf = true;
- break;
-
- case ParsedAttr::AT_OSReturnsRetained:
- case ParsedAttr::AT_OSReturnsNotRetained:
- TypeOK = isValidSubjectOfOSAttribute(ReturnType);
- Cf = true;
- ParmDiagID = 3; // Pointer-to-OSObject-pointer
- break;
- }
-
- if (!TypeOK) {
- if (AL.isUsedAsTypeAttr())
- return;
-
- if (isa<ParmVarDecl>(D)) {
- S.Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_parameter_type)
- << AL << ParmDiagID << AL.getRange();
- } else {
- // Needs to be kept in sync with warn_ns_attribute_wrong_return_type.
- enum : unsigned {
- Function,
- Method,
- Property
- } SubjectKind = Function;
- if (isa<ObjCMethodDecl>(D))
- SubjectKind = Method;
- else if (isa<ObjCPropertyDecl>(D))
- SubjectKind = Property;
- S.Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_return_type)
- << AL << SubjectKind << Cf << AL.getRange();
- }
- return;
- }
-
- switch (AL.getKind()) {
- default:
- llvm_unreachable("invalid ownership attribute");
- case ParsedAttr::AT_NSReturnsAutoreleased:
- handleSimpleAttribute<NSReturnsAutoreleasedAttr>(S, D, AL);
- return;
- case ParsedAttr::AT_CFReturnsNotRetained:
- handleSimpleAttribute<CFReturnsNotRetainedAttr>(S, D, AL);
- return;
- case ParsedAttr::AT_NSReturnsNotRetained:
- handleSimpleAttribute<NSReturnsNotRetainedAttr>(S, D, AL);
- return;
- case ParsedAttr::AT_CFReturnsRetained:
- handleSimpleAttribute<CFReturnsRetainedAttr>(S, D, AL);
- return;
- case ParsedAttr::AT_NSReturnsRetained:
- handleSimpleAttribute<NSReturnsRetainedAttr>(S, D, AL);
- return;
- case ParsedAttr::AT_OSReturnsRetained:
- handleSimpleAttribute<OSReturnsRetainedAttr>(S, D, AL);
- return;
- case ParsedAttr::AT_OSReturnsNotRetained:
- handleSimpleAttribute<OSReturnsNotRetainedAttr>(S, D, AL);
- return;
- };
-}
-
-static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D,
- const ParsedAttr &Attrs) {
- const int EP_ObjCMethod = 1;
- const int EP_ObjCProperty = 2;
-
- SourceLocation loc = Attrs.getLoc();
- QualType resultType;
- if (isa<ObjCMethodDecl>(D))
- resultType = cast<ObjCMethodDecl>(D)->getReturnType();
- else
- resultType = cast<ObjCPropertyDecl>(D)->getType();
-
- if (!resultType->isReferenceType() &&
- (!resultType->isPointerType() || resultType->isObjCRetainableType())) {
- S.Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_return_type)
- << SourceRange(loc) << Attrs
- << (isa<ObjCMethodDecl>(D) ? EP_ObjCMethod : EP_ObjCProperty)
- << /*non-retainable pointer*/ 2;
-
- // Drop the attribute.
- return;
- }
-
- D->addAttr(::new (S.Context) ObjCReturnsInnerPointerAttr(S.Context, Attrs));
-}
-
-static void handleObjCRequiresSuperAttr(Sema &S, Decl *D,
- const ParsedAttr &Attrs) {
- const auto *Method = cast<ObjCMethodDecl>(D);
-
- const DeclContext *DC = Method->getDeclContext();
- if (const auto *PDecl = dyn_cast_if_present<ObjCProtocolDecl>(DC)) {
- S.Diag(D->getBeginLoc(), diag::warn_objc_requires_super_protocol) << Attrs
- << 0;
- S.Diag(PDecl->getLocation(), diag::note_protocol_decl);
- return;
- }
- if (Method->getMethodFamily() == OMF_dealloc) {
- S.Diag(D->getBeginLoc(), diag::warn_objc_requires_super_protocol) << Attrs
- << 1;
- return;
- }
-
- D->addAttr(::new (S.Context) ObjCRequiresSuperAttr(S.Context, Attrs));
-}
-
-static void handleNSErrorDomain(Sema &S, Decl *D, const ParsedAttr &Attr) {
- if (!isa<TagDecl>(D)) {
- S.Diag(D->getBeginLoc(), diag::err_nserrordomain_invalid_decl) << 0;
- return;
- }
-
- IdentifierLoc *IdentLoc =
- Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : nullptr;
- if (!IdentLoc || !IdentLoc->Ident) {
- // Try to locate the argument directly.
- SourceLocation Loc = Attr.getLoc();
- if (Attr.isArgExpr(0) && Attr.getArgAsExpr(0))
- Loc = Attr.getArgAsExpr(0)->getBeginLoc();
-
- S.Diag(Loc, diag::err_nserrordomain_invalid_decl) << 0;
- return;
- }
-
- // Verify that the identifier is a valid decl in the C decl namespace.
- LookupResult Result(S, DeclarationName(IdentLoc->Ident), SourceLocation(),
- Sema::LookupNameKind::LookupOrdinaryName);
- if (!S.LookupName(Result, S.TUScope) || !Result.getAsSingle<VarDecl>()) {
- S.Diag(IdentLoc->Loc, diag::err_nserrordomain_invalid_decl)
- << 1 << IdentLoc->Ident;
- return;
- }
-
- D->addAttr(::new (S.Context)
- NSErrorDomainAttr(S.Context, Attr, IdentLoc->Ident));
-}
-
-static void handleObjCBridgeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- IdentifierLoc *Parm = AL.isArgIdent(0) ? AL.getArgAsIdent(0) : nullptr;
-
- if (!Parm) {
- S.Diag(D->getBeginLoc(), diag::err_objc_attr_not_id) << AL << 0;
- return;
- }
-
- // Typedefs only allow objc_bridge(id) and have some additional checking.
- if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
- if (!Parm->Ident->isStr("id")) {
- S.Diag(AL.getLoc(), diag::err_objc_attr_typedef_not_id) << AL;
- return;
- }
-
- // Only allow 'cv void *'.
- QualType T = TD->getUnderlyingType();
- if (!T->isVoidPointerType()) {
- S.Diag(AL.getLoc(), diag::err_objc_attr_typedef_not_void_pointer);
- return;
- }
- }
-
- D->addAttr(::new (S.Context) ObjCBridgeAttr(S.Context, AL, Parm->Ident));
-}
-
-static void handleObjCBridgeMutableAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- IdentifierLoc *Parm = AL.isArgIdent(0) ? AL.getArgAsIdent(0) : nullptr;
-
- if (!Parm) {
- S.Diag(D->getBeginLoc(), diag::err_objc_attr_not_id) << AL << 0;
- return;
- }
-
- D->addAttr(::new (S.Context)
- ObjCBridgeMutableAttr(S.Context, AL, Parm->Ident));
-}
-
-static void handleObjCBridgeRelatedAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- IdentifierInfo *RelatedClass =
- AL.isArgIdent(0) ? AL.getArgAsIdent(0)->Ident : nullptr;
- if (!RelatedClass) {
- S.Diag(D->getBeginLoc(), diag::err_objc_attr_not_id) << AL << 0;
- return;
- }
- IdentifierInfo *ClassMethod =
- AL.getArgAsIdent(1) ? AL.getArgAsIdent(1)->Ident : nullptr;
- IdentifierInfo *InstanceMethod =
- AL.getArgAsIdent(2) ? AL.getArgAsIdent(2)->Ident : nullptr;
- D->addAttr(::new (S.Context) ObjCBridgeRelatedAttr(
- S.Context, AL, RelatedClass, ClassMethod, InstanceMethod));
-}
-
-static void handleObjCDesignatedInitializer(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- DeclContext *Ctx = D->getDeclContext();
-
- // This attribute can only be applied to methods in interfaces or class
- // extensions.
- if (!isa<ObjCInterfaceDecl>(Ctx) &&
- !(isa<ObjCCategoryDecl>(Ctx) &&
- cast<ObjCCategoryDecl>(Ctx)->IsClassExtension())) {
- S.Diag(D->getLocation(), diag::err_designated_init_attr_non_init);
- return;
- }
-
- ObjCInterfaceDecl *IFace;
- if (auto *CatDecl = dyn_cast<ObjCCategoryDecl>(Ctx))
- IFace = CatDecl->getClassInterface();
- else
- IFace = cast<ObjCInterfaceDecl>(Ctx);
-
- if (!IFace)
- return;
-
- IFace->setHasDesignatedInitializers();
- D->addAttr(::new (S.Context) ObjCDesignatedInitializerAttr(S.Context, AL));
-}
-
-static void handleObjCRuntimeName(Sema &S, Decl *D, const ParsedAttr &AL) {
- StringRef MetaDataName;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, MetaDataName))
- return;
- D->addAttr(::new (S.Context)
- ObjCRuntimeNameAttr(S.Context, AL, MetaDataName));
-}
-
-// When a user wants to use objc_boxable with a union or struct
-// but they don't have access to the declaration (legacy/third-party code)
-// then they can 'enable' this feature with a typedef:
-// typedef struct __attribute((objc_boxable)) legacy_struct legacy_struct;
-static void handleObjCBoxable(Sema &S, Decl *D, const ParsedAttr &AL) {
- bool notify = false;
-
- auto *RD = dyn_cast<RecordDecl>(D);
- if (RD && RD->getDefinition()) {
- RD = RD->getDefinition();
- notify = true;
- }
-
- if (RD) {
- ObjCBoxableAttr *BoxableAttr =
- ::new (S.Context) ObjCBoxableAttr(S.Context, AL);
- RD->addAttr(BoxableAttr);
- if (notify) {
- // we need to notify ASTReader/ASTWriter about
- // modification of existing declaration
- if (ASTMutationListener *L = S.getASTMutationListener())
- L->AddedAttributeToRecord(BoxableAttr, RD);
- }
- }
-}
-
-static void handleObjCOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (hasDeclarator(D))
- return;
-
- S.Diag(D->getBeginLoc(), diag::err_attribute_wrong_decl_type)
- << AL.getRange() << AL << AL.isRegularKeywordAttribute()
- << ExpectedVariable;
-}
-
-static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- const auto *VD = cast<ValueDecl>(D);
- QualType QT = VD->getType();
-
- if (!QT->isDependentType() &&
- !QT->isObjCLifetimeType()) {
- S.Diag(AL.getLoc(), diag::err_objc_precise_lifetime_bad_type)
- << QT;
- return;
- }
-
- Qualifiers::ObjCLifetime Lifetime = QT.getObjCLifetime();
-
- // If we have no lifetime yet, check the lifetime we're presumably
- // going to infer.
- if (Lifetime == Qualifiers::OCL_None && !QT->isDependentType())
- Lifetime = QT->getObjCARCImplicitLifetime();
-
- switch (Lifetime) {
- case Qualifiers::OCL_None:
- assert(QT->isDependentType() &&
- "didn't infer lifetime for non-dependent type?");
- break;
-
- case Qualifiers::OCL_Weak: // meaningful
- case Qualifiers::OCL_Strong: // meaningful
- break;
-
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- S.Diag(AL.getLoc(), diag::warn_objc_precise_lifetime_meaningless)
- << (Lifetime == Qualifiers::OCL_Autoreleasing);
- break;
- }
-
- D->addAttr(::new (S.Context) ObjCPreciseLifetimeAttr(S.Context, AL));
-}
-
-static void handleSwiftAttrAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- // Make sure that there is a string literal as the annotation's single
- // argument.
- StringRef Str;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, Str))
- return;
-
- D->addAttr(::new (S.Context) SwiftAttrAttr(S.Context, AL, Str));
-}
-
-static void handleSwiftBridge(Sema &S, Decl *D, const ParsedAttr &AL) {
- // Make sure that there is a string literal as the annotation's single
- // argument.
- StringRef BT;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, BT))
- return;
-
- // Warn about duplicate attributes if they have different arguments, but drop
- // any duplicate attributes regardless.
- if (const auto *Other = D->getAttr<SwiftBridgeAttr>()) {
- if (Other->getSwiftType() != BT)
- S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL;
- return;
- }
-
- D->addAttr(::new (S.Context) SwiftBridgeAttr(S.Context, AL, BT));
-}
-
-static bool isErrorParameter(Sema &S, QualType QT) {
- const auto *PT = QT->getAs<PointerType>();
- if (!PT)
- return false;
-
- QualType Pointee = PT->getPointeeType();
-
- // Check for NSError**.
- if (const auto *OPT = Pointee->getAs<ObjCObjectPointerType>())
- if (const auto *ID = OPT->getInterfaceDecl())
- if (ID->getIdentifier() == S.getNSErrorIdent())
- return true;
-
- // Check for CFError**.
- if (const auto *PT = Pointee->getAs<PointerType>())
- if (const auto *RT = PT->getPointeeType()->getAs<RecordType>())
- if (S.isCFError(RT->getDecl()))
- return true;
-
- return false;
-}
-
-static void handleSwiftError(Sema &S, Decl *D, const ParsedAttr &AL) {
- auto hasErrorParameter = [](Sema &S, Decl *D, const ParsedAttr &AL) -> bool {
- for (unsigned I = 0, E = getFunctionOrMethodNumParams(D); I != E; ++I) {
- if (isErrorParameter(S, getFunctionOrMethodParamType(D, I)))
- return true;
- }
-
- S.Diag(AL.getLoc(), diag::err_attr_swift_error_no_error_parameter)
- << AL << isa<ObjCMethodDecl>(D);
- return false;
- };
-
- auto hasPointerResult = [](Sema &S, Decl *D, const ParsedAttr &AL) -> bool {
- // - C, ObjC, and block pointers are definitely okay.
- // - References are definitely not okay.
- // - nullptr_t is weird, but acceptable.
- QualType RT = getFunctionOrMethodResultType(D);
- if (RT->hasPointerRepresentation() && !RT->isReferenceType())
- return true;
-
- S.Diag(AL.getLoc(), diag::err_attr_swift_error_return_type)
- << AL << AL.getArgAsIdent(0)->Ident->getName() << isa<ObjCMethodDecl>(D)
- << /*pointer*/ 1;
- return false;
- };
-
- auto hasIntegerResult = [](Sema &S, Decl *D, const ParsedAttr &AL) -> bool {
- QualType RT = getFunctionOrMethodResultType(D);
- if (RT->isIntegralType(S.Context))
- return true;
-
- S.Diag(AL.getLoc(), diag::err_attr_swift_error_return_type)
- << AL << AL.getArgAsIdent(0)->Ident->getName() << isa<ObjCMethodDecl>(D)
- << /*integral*/ 0;
- return false;
- };
-
- if (D->isInvalidDecl())
- return;
-
- IdentifierLoc *Loc = AL.getArgAsIdent(0);
- SwiftErrorAttr::ConventionKind Convention;
- if (!SwiftErrorAttr::ConvertStrToConventionKind(Loc->Ident->getName(),
- Convention)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
- << AL << Loc->Ident;
- return;
- }
-
- switch (Convention) {
- case SwiftErrorAttr::None:
- // No additional validation required.
- break;
-
- case SwiftErrorAttr::NonNullError:
- if (!hasErrorParameter(S, D, AL))
- return;
- break;
-
- case SwiftErrorAttr::NullResult:
- if (!hasErrorParameter(S, D, AL) || !hasPointerResult(S, D, AL))
- return;
- break;
-
- case SwiftErrorAttr::NonZeroResult:
- case SwiftErrorAttr::ZeroResult:
- if (!hasErrorParameter(S, D, AL) || !hasIntegerResult(S, D, AL))
- return;
- break;
- }
-
- D->addAttr(::new (S.Context) SwiftErrorAttr(S.Context, AL, Convention));
-}
-
-static void checkSwiftAsyncErrorBlock(Sema &S, Decl *D,
- const SwiftAsyncErrorAttr *ErrorAttr,
- const SwiftAsyncAttr *AsyncAttr) {
- if (AsyncAttr->getKind() == SwiftAsyncAttr::None) {
- if (ErrorAttr->getConvention() != SwiftAsyncErrorAttr::None) {
- S.Diag(AsyncAttr->getLocation(),
- diag::err_swift_async_error_without_swift_async)
- << AsyncAttr << isa<ObjCMethodDecl>(D);
- }
- return;
- }
-
- const ParmVarDecl *HandlerParam = getFunctionOrMethodParam(
- D, AsyncAttr->getCompletionHandlerIndex().getASTIndex());
- // handleSwiftAsyncAttr already verified the type is correct, so no need to
- // double-check it here.
- const auto *FuncTy = HandlerParam->getType()
- ->castAs<BlockPointerType>()
- ->getPointeeType()
- ->getAs<FunctionProtoType>();
- ArrayRef<QualType> BlockParams;
- if (FuncTy)
- BlockParams = FuncTy->getParamTypes();
-
- switch (ErrorAttr->getConvention()) {
- case SwiftAsyncErrorAttr::ZeroArgument:
- case SwiftAsyncErrorAttr::NonZeroArgument: {
- uint32_t ParamIdx = ErrorAttr->getHandlerParamIdx();
- if (ParamIdx == 0 || ParamIdx > BlockParams.size()) {
- S.Diag(ErrorAttr->getLocation(),
- diag::err_attribute_argument_out_of_bounds) << ErrorAttr << 2;
- return;
- }
- QualType ErrorParam = BlockParams[ParamIdx - 1];
- if (!ErrorParam->isIntegralType(S.Context)) {
- StringRef ConvStr =
- ErrorAttr->getConvention() == SwiftAsyncErrorAttr::ZeroArgument
- ? "zero_argument"
- : "nonzero_argument";
- S.Diag(ErrorAttr->getLocation(), diag::err_swift_async_error_non_integral)
- << ErrorAttr << ConvStr << ParamIdx << ErrorParam;
- return;
- }
- break;
- }
- case SwiftAsyncErrorAttr::NonNullError: {
- bool AnyErrorParams = false;
- for (QualType Param : BlockParams) {
- // Check for NSError *.
- if (const auto *ObjCPtrTy = Param->getAs<ObjCObjectPointerType>()) {
- if (const auto *ID = ObjCPtrTy->getInterfaceDecl()) {
- if (ID->getIdentifier() == S.getNSErrorIdent()) {
- AnyErrorParams = true;
- break;
- }
- }
- }
- // Check for CFError *.
- if (const auto *PtrTy = Param->getAs<PointerType>()) {
- if (const auto *RT = PtrTy->getPointeeType()->getAs<RecordType>()) {
- if (S.isCFError(RT->getDecl())) {
- AnyErrorParams = true;
- break;
- }
- }
- }
- }
-
- if (!AnyErrorParams) {
- S.Diag(ErrorAttr->getLocation(),
- diag::err_swift_async_error_no_error_parameter)
- << ErrorAttr << isa<ObjCMethodDecl>(D);
- return;
- }
- break;
- }
- case SwiftAsyncErrorAttr::None:
- break;
- }
-}
-
-static void handleSwiftAsyncError(Sema &S, Decl *D, const ParsedAttr &AL) {
- IdentifierLoc *IDLoc = AL.getArgAsIdent(0);
- SwiftAsyncErrorAttr::ConventionKind ConvKind;
- if (!SwiftAsyncErrorAttr::ConvertStrToConventionKind(IDLoc->Ident->getName(),
- ConvKind)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
- << AL << IDLoc->Ident;
- return;
- }
-
- uint32_t ParamIdx = 0;
- switch (ConvKind) {
- case SwiftAsyncErrorAttr::ZeroArgument:
- case SwiftAsyncErrorAttr::NonZeroArgument: {
- if (!AL.checkExactlyNumArgs(S, 2))
- return;
-
- Expr *IdxExpr = AL.getArgAsExpr(1);
- if (!checkUInt32Argument(S, AL, IdxExpr, ParamIdx))
- return;
- break;
- }
- case SwiftAsyncErrorAttr::NonNullError:
- case SwiftAsyncErrorAttr::None: {
- if (!AL.checkExactlyNumArgs(S, 1))
- return;
- break;
- }
- }
-
- auto *ErrorAttr =
- ::new (S.Context) SwiftAsyncErrorAttr(S.Context, AL, ConvKind, ParamIdx);
- D->addAttr(ErrorAttr);
-
- if (auto *AsyncAttr = D->getAttr<SwiftAsyncAttr>())
- checkSwiftAsyncErrorBlock(S, D, ErrorAttr, AsyncAttr);
-}
-
-// For a function, this will validate a compound Swift name, e.g.
-// <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, and
-// the function will output the number of parameter names, and whether this is a
-// single-arg initializer.
-//
-// For a type, enum constant, property, or variable declaration, this will
-// validate either a simple identifier, or a qualified
-// <code>context.identifier</code> name.
-static bool
-validateSwiftFunctionName(Sema &S, const ParsedAttr &AL, SourceLocation Loc,
- StringRef Name, unsigned &SwiftParamCount,
- bool &IsSingleParamInit) {
- SwiftParamCount = 0;
- IsSingleParamInit = false;
-
- // Check whether this will be mapped to a getter or setter of a property.
- bool IsGetter = false, IsSetter = false;
- if (Name.starts_with("getter:")) {
- IsGetter = true;
- Name = Name.substr(7);
- } else if (Name.starts_with("setter:")) {
- IsSetter = true;
- Name = Name.substr(7);
- }
-
- if (Name.back() != ')') {
- S.Diag(Loc, diag::warn_attr_swift_name_function) << AL;
- return false;
- }
-
- bool IsMember = false;
- StringRef ContextName, BaseName, Parameters;
-
- std::tie(BaseName, Parameters) = Name.split('(');
-
- // Split at the first '.', if it exists, which separates the context name
- // from the base name.
- std::tie(ContextName, BaseName) = BaseName.split('.');
- if (BaseName.empty()) {
- BaseName = ContextName;
- ContextName = StringRef();
- } else if (ContextName.empty() || !isValidAsciiIdentifier(ContextName)) {
- S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
- << AL << /*context*/ 1;
- return false;
- } else {
- IsMember = true;
- }
-
- if (!isValidAsciiIdentifier(BaseName) || BaseName == "_") {
- S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
- << AL << /*basename*/ 0;
- return false;
- }
-
- bool IsSubscript = BaseName == "subscript";
- // A subscript accessor must be a getter or setter.
- if (IsSubscript && !IsGetter && !IsSetter) {
- S.Diag(Loc, diag::warn_attr_swift_name_subscript_invalid_parameter)
- << AL << /* getter or setter */ 0;
- return false;
- }
-
- if (Parameters.empty()) {
- S.Diag(Loc, diag::warn_attr_swift_name_missing_parameters) << AL;
- return false;
- }
-
- assert(Parameters.back() == ')' && "expected ')'");
- Parameters = Parameters.drop_back(); // ')'
-
- if (Parameters.empty()) {
- // Setters and subscripts must have at least one parameter.
- if (IsSubscript) {
- S.Diag(Loc, diag::warn_attr_swift_name_subscript_invalid_parameter)
- << AL << /* have at least one parameter */1;
- return false;
- }
-
- if (IsSetter) {
- S.Diag(Loc, diag::warn_attr_swift_name_setter_parameters) << AL;
- return false;
- }
-
- return true;
- }
-
- if (Parameters.back() != ':') {
- S.Diag(Loc, diag::warn_attr_swift_name_function) << AL;
- return false;
- }
-
- StringRef CurrentParam;
- std::optional<unsigned> SelfLocation;
- unsigned NewValueCount = 0;
- std::optional<unsigned> NewValueLocation;
- do {
- std::tie(CurrentParam, Parameters) = Parameters.split(':');
-
- if (!isValidAsciiIdentifier(CurrentParam)) {
- S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
- << AL << /*parameter*/2;
- return false;
- }
-
- if (IsMember && CurrentParam == "self") {
- // "self" indicates the "self" argument for a member.
-
- // More than one "self"?
- if (SelfLocation) {
- S.Diag(Loc, diag::warn_attr_swift_name_multiple_selfs) << AL;
- return false;
- }
-
- // The "self" location is the current parameter.
- SelfLocation = SwiftParamCount;
- } else if (CurrentParam == "newValue") {
- // "newValue" indicates the "newValue" argument for a setter.
-
- // There should only be one 'newValue', but it's only significant for
- // subscript accessors, so don't error right away.
- ++NewValueCount;
-
- NewValueLocation = SwiftParamCount;
- }
-
- ++SwiftParamCount;
- } while (!Parameters.empty());
-
- // Only instance subscripts are currently supported.
- if (IsSubscript && !SelfLocation) {
- S.Diag(Loc, diag::warn_attr_swift_name_subscript_invalid_parameter)
- << AL << /*have a 'self:' parameter*/2;
- return false;
- }
-
- IsSingleParamInit =
- SwiftParamCount == 1 && BaseName == "init" && CurrentParam != "_";
-
- // Check the number of parameters for a getter/setter.
- if (IsGetter || IsSetter) {
- // Setters have one parameter for the new value.
- unsigned NumExpectedParams = IsGetter ? 0 : 1;
- unsigned ParamDiag =
- IsGetter ? diag::warn_attr_swift_name_getter_parameters
- : diag::warn_attr_swift_name_setter_parameters;
-
- // Instance methods have one parameter for "self".
- if (SelfLocation)
- ++NumExpectedParams;
-
- // Subscripts may have additional parameters beyond the expected params for
- // the index.
- if (IsSubscript) {
- if (SwiftParamCount < NumExpectedParams) {
- S.Diag(Loc, ParamDiag) << AL;
- return false;
- }
-
- // A subscript setter must explicitly label its newValue parameter to
- // distinguish it from index parameters.
- if (IsSetter) {
- if (!NewValueLocation) {
- S.Diag(Loc, diag::warn_attr_swift_name_subscript_setter_no_newValue)
- << AL;
- return false;
- }
- if (NewValueCount > 1) {
- S.Diag(Loc, diag::warn_attr_swift_name_subscript_setter_multiple_newValues)
- << AL;
- return false;
- }
- } else {
- // Subscript getters should have no 'newValue:' parameter.
- if (NewValueLocation) {
- S.Diag(Loc, diag::warn_attr_swift_name_subscript_getter_newValue)
- << AL;
- return false;
- }
- }
- } else {
- // Property accessors must have exactly the number of expected params.
- if (SwiftParamCount != NumExpectedParams) {
- S.Diag(Loc, ParamDiag) << AL;
- return false;
- }
- }
- }
-
- return true;
-}
-
-bool Sema::DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
- const ParsedAttr &AL, bool IsAsync) {
- if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
- ArrayRef<ParmVarDecl*> Params;
- unsigned ParamCount;
-
- if (const auto *Method = dyn_cast<ObjCMethodDecl>(D)) {
- ParamCount = Method->getSelector().getNumArgs();
- Params = Method->parameters().slice(0, ParamCount);
- } else {
- const auto *F = cast<FunctionDecl>(D);
-
- ParamCount = F->getNumParams();
- Params = F->parameters();
-
- if (!F->hasWrittenPrototype()) {
- Diag(Loc, diag::warn_attribute_wrong_decl_type)
- << AL << AL.isRegularKeywordAttribute()
- << ExpectedFunctionWithProtoType;
- return false;
- }
- }
-
- // The async name drops the last callback parameter.
- if (IsAsync) {
- if (ParamCount == 0) {
- Diag(Loc, diag::warn_attr_swift_name_decl_missing_params)
- << AL << isa<ObjCMethodDecl>(D);
- return false;
- }
- ParamCount -= 1;
- }
-
- unsigned SwiftParamCount;
- bool IsSingleParamInit;
- if (!validateSwiftFunctionName(*this, AL, Loc, Name,
- SwiftParamCount, IsSingleParamInit))
- return false;
-
- bool ParamCountValid;
- if (SwiftParamCount == ParamCount) {
- ParamCountValid = true;
- } else if (SwiftParamCount > ParamCount) {
- ParamCountValid = IsSingleParamInit && ParamCount == 0;
- } else {
- // We have fewer Swift parameters than Objective-C parameters, but that
- // might be because we've transformed some of them. Check for potential
- // "out" parameters and err on the side of not warning.
- unsigned MaybeOutParamCount =
- llvm::count_if(Params, [](const ParmVarDecl *Param) -> bool {
- QualType ParamTy = Param->getType();
- if (ParamTy->isReferenceType() || ParamTy->isPointerType())
- return !ParamTy->getPointeeType().isConstQualified();
- return false;
- });
-
- ParamCountValid = SwiftParamCount + MaybeOutParamCount >= ParamCount;
- }
-
- if (!ParamCountValid) {
- Diag(Loc, diag::warn_attr_swift_name_num_params)
- << (SwiftParamCount > ParamCount) << AL << ParamCount
- << SwiftParamCount;
- return false;
- }
- } else if ((isa<EnumConstantDecl>(D) || isa<ObjCProtocolDecl>(D) ||
- isa<ObjCInterfaceDecl>(D) || isa<ObjCPropertyDecl>(D) ||
- isa<VarDecl>(D) || isa<TypedefNameDecl>(D) || isa<TagDecl>(D) ||
- isa<IndirectFieldDecl>(D) || isa<FieldDecl>(D)) &&
- !IsAsync) {
- StringRef ContextName, BaseName;
-
- std::tie(ContextName, BaseName) = Name.split('.');
- if (BaseName.empty()) {
- BaseName = ContextName;
- ContextName = StringRef();
- } else if (!isValidAsciiIdentifier(ContextName)) {
- Diag(Loc, diag::warn_attr_swift_name_invalid_identifier) << AL
- << /*context*/1;
- return false;
- }
-
- if (!isValidAsciiIdentifier(BaseName)) {
- Diag(Loc, diag::warn_attr_swift_name_invalid_identifier) << AL
- << /*basename*/0;
- return false;
- }
- } else {
- Diag(Loc, diag::warn_attr_swift_name_decl_kind) << AL;
- return false;
- }
- return true;
-}
-
-static void handleSwiftName(Sema &S, Decl *D, const ParsedAttr &AL) {
- StringRef Name;
- SourceLocation Loc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, Name, &Loc))
- return;
-
- if (!S.DiagnoseSwiftName(D, Name, Loc, AL, /*IsAsync=*/false))
- return;
-
- D->addAttr(::new (S.Context) SwiftNameAttr(S.Context, AL, Name));
-}
-
-static void handleSwiftAsyncName(Sema &S, Decl *D, const ParsedAttr &AL) {
- StringRef Name;
- SourceLocation Loc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, Name, &Loc))
- return;
-
- if (!S.DiagnoseSwiftName(D, Name, Loc, AL, /*IsAsync=*/true))
- return;
-
- D->addAttr(::new (S.Context) SwiftAsyncNameAttr(S.Context, AL, Name));
-}
-
-static void handleSwiftNewType(Sema &S, Decl *D, const ParsedAttr &AL) {
- // Make sure that there is an identifier as the annotation's single argument.
- if (!AL.checkExactlyNumArgs(S, 1))
- return;
-
- if (!AL.isArgIdent(0)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
- << AL << AANT_ArgumentIdentifier;
- return;
- }
-
- SwiftNewTypeAttr::NewtypeKind Kind;
- IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
- if (!SwiftNewTypeAttr::ConvertStrToNewtypeKind(II->getName(), Kind)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) << AL << II;
- return;
- }
-
- if (!isa<TypedefNameDecl>(D)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type_str)
- << AL << AL.isRegularKeywordAttribute() << "typedefs";
- return;
- }
-
- D->addAttr(::new (S.Context) SwiftNewTypeAttr(S.Context, AL, Kind));
-}
-
-static void handleSwiftAsyncAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!AL.isArgIdent(0)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL << 1 << AANT_ArgumentIdentifier;
- return;
- }
-
- SwiftAsyncAttr::Kind Kind;
- IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
- if (!SwiftAsyncAttr::ConvertStrToKind(II->getName(), Kind)) {
- S.Diag(AL.getLoc(), diag::err_swift_async_no_access) << AL << II;
- return;
- }
-
- ParamIdx Idx;
- if (Kind == SwiftAsyncAttr::None) {
- // If this is 'none', then there shouldn't be any additional arguments.
- if (!AL.checkExactlyNumArgs(S, 1))
- return;
- } else {
- // Non-none swift_async requires a completion handler index argument.
- if (!AL.checkExactlyNumArgs(S, 2))
- return;
-
- Expr *HandlerIdx = AL.getArgAsExpr(1);
- if (!checkFunctionOrMethodParameterIndex(S, D, AL, 2, HandlerIdx, Idx))
- return;
-
- const ParmVarDecl *CompletionBlock =
- getFunctionOrMethodParam(D, Idx.getASTIndex());
- QualType CompletionBlockType = CompletionBlock->getType();
- if (!CompletionBlockType->isBlockPointerType()) {
- S.Diag(CompletionBlock->getLocation(),
- diag::err_swift_async_bad_block_type)
- << CompletionBlock->getType();
- return;
- }
- QualType BlockTy =
- CompletionBlockType->castAs<BlockPointerType>()->getPointeeType();
- if (!BlockTy->castAs<FunctionType>()->getReturnType()->isVoidType()) {
- S.Diag(CompletionBlock->getLocation(),
- diag::err_swift_async_bad_block_type)
- << CompletionBlock->getType();
- return;
- }
- }
-
- auto *AsyncAttr =
- ::new (S.Context) SwiftAsyncAttr(S.Context, AL, Kind, Idx);
- D->addAttr(AsyncAttr);
-
- if (auto *ErrorAttr = D->getAttr<SwiftAsyncErrorAttr>())
- checkSwiftAsyncErrorBlock(S, D, ErrorAttr, AsyncAttr);
-}
-
-//===----------------------------------------------------------------------===//
// Microsoft specific attribute handlers.
//===----------------------------------------------------------------------===//
@@ -7144,230 +5396,6 @@ static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(UA);
}
-static void handleHLSLNumThreadsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- llvm::VersionTuple SMVersion =
- S.Context.getTargetInfo().getTriple().getOSVersion();
- uint32_t ZMax = 1024;
- uint32_t ThreadMax = 1024;
- if (SMVersion.getMajor() <= 4) {
- ZMax = 1;
- ThreadMax = 768;
- } else if (SMVersion.getMajor() == 5) {
- ZMax = 64;
- ThreadMax = 1024;
- }
-
- uint32_t X;
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(0), X))
- return;
- if (X > 1024) {
- S.Diag(AL.getArgAsExpr(0)->getExprLoc(),
- diag::err_hlsl_numthreads_argument_oor) << 0 << 1024;
- return;
- }
- uint32_t Y;
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(1), Y))
- return;
- if (Y > 1024) {
- S.Diag(AL.getArgAsExpr(1)->getExprLoc(),
- diag::err_hlsl_numthreads_argument_oor) << 1 << 1024;
- return;
- }
- uint32_t Z;
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(2), Z))
- return;
- if (Z > ZMax) {
- S.Diag(AL.getArgAsExpr(2)->getExprLoc(),
- diag::err_hlsl_numthreads_argument_oor) << 2 << ZMax;
- return;
- }
-
- if (X * Y * Z > ThreadMax) {
- S.Diag(AL.getLoc(), diag::err_hlsl_numthreads_invalid) << ThreadMax;
- return;
- }
-
- HLSLNumThreadsAttr *NewAttr = S.mergeHLSLNumThreadsAttr(D, AL, X, Y, Z);
- if (NewAttr)
- D->addAttr(NewAttr);
-}
-
-HLSLNumThreadsAttr *Sema::mergeHLSLNumThreadsAttr(Decl *D,
- const AttributeCommonInfo &AL,
- int X, int Y, int Z) {
- if (HLSLNumThreadsAttr *NT = D->getAttr<HLSLNumThreadsAttr>()) {
- if (NT->getX() != X || NT->getY() != Y || NT->getZ() != Z) {
- Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL;
- Diag(AL.getLoc(), diag::note_conflicting_attribute);
- }
- return nullptr;
- }
- return ::new (Context) HLSLNumThreadsAttr(Context, AL, X, Y, Z);
-}
-
-static bool isLegalTypeForHLSLSV_DispatchThreadID(QualType T) {
- if (!T->hasUnsignedIntegerRepresentation())
- return false;
- if (const auto *VT = T->getAs<VectorType>())
- return VT->getNumElements() <= 3;
- return true;
-}
-
-static void handleHLSLSV_DispatchThreadIDAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- // FIXME: support semantic on field.
- // See https://github.com/llvm/llvm-project/issues/57889.
- if (isa<FieldDecl>(D)) {
- S.Diag(AL.getLoc(), diag::err_hlsl_attr_invalid_ast_node)
- << AL << "parameter";
- return;
- }
-
- auto *VD = cast<ValueDecl>(D);
- if (!isLegalTypeForHLSLSV_DispatchThreadID(VD->getType())) {
- S.Diag(AL.getLoc(), diag::err_hlsl_attr_invalid_type)
- << AL << "uint/uint2/uint3";
- return;
- }
-
- D->addAttr(::new (S.Context) HLSLSV_DispatchThreadIDAttr(S.Context, AL));
-}
-
-static void handleHLSLShaderAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- StringRef Str;
- SourceLocation ArgLoc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
- return;
-
- HLSLShaderAttr::ShaderType ShaderType;
- if (!HLSLShaderAttr::ConvertStrToShaderType(Str, ShaderType)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
- << AL << Str << ArgLoc;
- return;
- }
-
- // FIXME: check function match the shader stage.
-
- HLSLShaderAttr *NewAttr = S.mergeHLSLShaderAttr(D, AL, ShaderType);
- if (NewAttr)
- D->addAttr(NewAttr);
-}
-
-HLSLShaderAttr *
-Sema::mergeHLSLShaderAttr(Decl *D, const AttributeCommonInfo &AL,
- HLSLShaderAttr::ShaderType ShaderType) {
- if (HLSLShaderAttr *NT = D->getAttr<HLSLShaderAttr>()) {
- if (NT->getType() != ShaderType) {
- Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL;
- Diag(AL.getLoc(), diag::note_conflicting_attribute);
- }
- return nullptr;
- }
- return HLSLShaderAttr::Create(Context, ShaderType, AL);
-}
-
-static void handleHLSLResourceBindingAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- StringRef Space = "space0";
- StringRef Slot = "";
-
- if (!AL.isArgIdent(0)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
- << AL << AANT_ArgumentIdentifier;
- return;
- }
-
- IdentifierLoc *Loc = AL.getArgAsIdent(0);
- StringRef Str = Loc->Ident->getName();
- SourceLocation ArgLoc = Loc->Loc;
-
- SourceLocation SpaceArgLoc;
- if (AL.getNumArgs() == 2) {
- Slot = Str;
- if (!AL.isArgIdent(1)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
- << AL << AANT_ArgumentIdentifier;
- return;
- }
-
- IdentifierLoc *Loc = AL.getArgAsIdent(1);
- Space = Loc->Ident->getName();
- SpaceArgLoc = Loc->Loc;
- } else {
- Slot = Str;
- }
-
- // Validate.
- if (!Slot.empty()) {
- switch (Slot[0]) {
- case 'u':
- case 'b':
- case 's':
- case 't':
- break;
- default:
- S.Diag(ArgLoc, diag::err_hlsl_unsupported_register_type)
- << Slot.substr(0, 1);
- return;
- }
-
- StringRef SlotNum = Slot.substr(1);
- unsigned Num = 0;
- if (SlotNum.getAsInteger(10, Num)) {
- S.Diag(ArgLoc, diag::err_hlsl_unsupported_register_number);
- return;
- }
- }
-
- if (!Space.starts_with("space")) {
- S.Diag(SpaceArgLoc, diag::err_hlsl_expected_space) << Space;
- return;
- }
- StringRef SpaceNum = Space.substr(5);
- unsigned Num = 0;
- if (SpaceNum.getAsInteger(10, Num)) {
- S.Diag(SpaceArgLoc, diag::err_hlsl_expected_space) << Space;
- return;
- }
-
- // FIXME: check reg type match decl. Issue
- // https://github.com/llvm/llvm-project/issues/57886.
- HLSLResourceBindingAttr *NewAttr =
- HLSLResourceBindingAttr::Create(S.getASTContext(), Slot, Space, AL);
- if (NewAttr)
- D->addAttr(NewAttr);
-}
-
-static void handleHLSLParamModifierAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- HLSLParamModifierAttr *NewAttr = S.mergeHLSLParamModifierAttr(
- D, AL,
- static_cast<HLSLParamModifierAttr::Spelling>(AL.getSemanticSpelling()));
- if (NewAttr)
- D->addAttr(NewAttr);
-}
-
-HLSLParamModifierAttr *
-Sema::mergeHLSLParamModifierAttr(Decl *D, const AttributeCommonInfo &AL,
- HLSLParamModifierAttr::Spelling Spelling) {
- // We can only merge an `in` attribute with an `out` attribute. All other
- // combinations of duplicated attributes are ill-formed.
- if (HLSLParamModifierAttr *PA = D->getAttr<HLSLParamModifierAttr>()) {
- if ((PA->isIn() && Spelling == HLSLParamModifierAttr::Keyword_out) ||
- (PA->isOut() && Spelling == HLSLParamModifierAttr::Keyword_in)) {
- D->dropAttr<HLSLParamModifierAttr>();
- SourceRange AdjustedRange = {PA->getLocation(), AL.getRange().getEnd()};
- return HLSLParamModifierAttr::Create(
- Context, /*MergedSpelling=*/true, AdjustedRange,
- HLSLParamModifierAttr::Keyword_inout);
- }
- Diag(AL.getLoc(), diag::err_hlsl_duplicate_parameter_modifier) << AL;
- Diag(PA->getLocation(), diag::note_conflicting_attribute);
- return nullptr;
- }
- return HLSLParamModifierAttr::Create(Context, AL);
-}
-
static void handleMSInheritanceAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.LangOpts.CPlusPlus) {
S.Diag(AL.getLoc(), diag::err_attribute_not_supported_in_lang)
@@ -7452,283 +5480,6 @@ static void handleAbiTagAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
AbiTagAttr(S.Context, AL, Tags.data(), Tags.size()));
}
-static void handleARMInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- // Check the attribute arguments.
- if (AL.getNumArgs() > 1) {
- S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 1;
- return;
- }
-
- StringRef Str;
- SourceLocation ArgLoc;
-
- if (AL.getNumArgs() == 0)
- Str = "";
- else if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
- return;
-
- ARMInterruptAttr::InterruptType Kind;
- if (!ARMInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) << AL << Str
- << ArgLoc;
- return;
- }
-
- D->addAttr(::new (S.Context) ARMInterruptAttr(S.Context, AL, Kind));
-}
-
-static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- // MSP430 'interrupt' attribute is applied to
- // a function with no parameters and void return type.
- if (!isFunctionOrMethod(D)) {
- S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
- return;
- }
-
- if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
- S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
- << /*MSP430*/ 1 << 0;
- return;
- }
-
- if (!getFunctionOrMethodResultType(D)->isVoidType()) {
- S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
- << /*MSP430*/ 1 << 1;
- return;
- }
-
- // The attribute takes one integer argument.
- if (!AL.checkExactlyNumArgs(S, 1))
- return;
-
- if (!AL.isArgExpr(0)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
- << AL << AANT_ArgumentIntegerConstant;
- return;
- }
-
- Expr *NumParamsExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
- std::optional<llvm::APSInt> NumParams = llvm::APSInt(32);
- if (!(NumParams = NumParamsExpr->getIntegerConstantExpr(S.Context))) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
- << AL << AANT_ArgumentIntegerConstant
- << NumParamsExpr->getSourceRange();
- return;
- }
- // The argument should be in range 0..63.
- unsigned Num = NumParams->getLimitedValue(255);
- if (Num > 63) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << AL << (int)NumParams->getSExtValue()
- << NumParamsExpr->getSourceRange();
- return;
- }
-
- D->addAttr(::new (S.Context) MSP430InterruptAttr(S.Context, AL, Num));
- D->addAttr(UsedAttr::CreateImplicit(S.Context));
-}
-
-static void handleMipsInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- // Only one optional argument permitted.
- if (AL.getNumArgs() > 1) {
- S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 1;
- return;
- }
-
- StringRef Str;
- SourceLocation ArgLoc;
-
- if (AL.getNumArgs() == 0)
- Str = "";
- else if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
- return;
-
- // Semantic checks for a function with the 'interrupt' attribute for MIPS:
- // a) Must be a function.
- // b) Must have no parameters.
- // c) Must have the 'void' return type.
- // d) Cannot have the 'mips16' attribute, as that instruction set
- // lacks the 'eret' instruction.
- // e) The attribute itself must either have no argument or one of the
- // valid interrupt types, see [MipsInterruptDocs].
-
- if (!isFunctionOrMethod(D)) {
- S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
- return;
- }
-
- if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
- S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
- << /*MIPS*/ 0 << 0;
- return;
- }
-
- if (!getFunctionOrMethodResultType(D)->isVoidType()) {
- S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
- << /*MIPS*/ 0 << 1;
- return;
- }
-
- // We still have to do this manually because the Interrupt attributes are
- // a bit special due to sharing their spellings across targets.
- if (checkAttrMutualExclusion<Mips16Attr>(S, D, AL))
- return;
-
- MipsInterruptAttr::InterruptType Kind;
- if (!MipsInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
- << AL << "'" + std::string(Str) + "'";
- return;
- }
-
- D->addAttr(::new (S.Context) MipsInterruptAttr(S.Context, AL, Kind));
-}
-
-static void handleM68kInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!AL.checkExactlyNumArgs(S, 1))
- return;
-
- if (!AL.isArgExpr(0)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
- << AL << AANT_ArgumentIntegerConstant;
- return;
- }
-
- // FIXME: Check for decl - it should be void ()(void).
-
- Expr *NumParamsExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
- auto MaybeNumParams = NumParamsExpr->getIntegerConstantExpr(S.Context);
- if (!MaybeNumParams) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
- << AL << AANT_ArgumentIntegerConstant
- << NumParamsExpr->getSourceRange();
- return;
- }
-
- unsigned Num = MaybeNumParams->getLimitedValue(255);
- if ((Num & 1) || Num > 30) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << AL << (int)MaybeNumParams->getSExtValue()
- << NumParamsExpr->getSourceRange();
- return;
- }
-
- D->addAttr(::new (S.Context) M68kInterruptAttr(S.Context, AL, Num));
- D->addAttr(UsedAttr::CreateImplicit(S.Context));
-}
-
-static void handleAnyX86InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- // Semantic checks for a function with the 'interrupt' attribute.
- // a) Must be a function.
- // b) Must have the 'void' return type.
- // c) Must take 1 or 2 arguments.
- // d) The 1st argument must be a pointer.
- // e) The 2nd argument (if any) must be an unsigned integer.
- if (!isFunctionOrMethod(D) || !hasFunctionProto(D) || isInstanceMethod(D) ||
- CXXMethodDecl::isStaticOverloadedOperator(
- cast<NamedDecl>(D)->getDeclName().getCXXOverloadedOperator())) {
- S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << AL.isRegularKeywordAttribute()
- << ExpectedFunctionWithProtoType;
- return;
- }
- // Interrupt handler must have void return type.
- if (!getFunctionOrMethodResultType(D)->isVoidType()) {
- S.Diag(getFunctionOrMethodResultSourceRange(D).getBegin(),
- diag::err_anyx86_interrupt_attribute)
- << (S.Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86
- ? 0
- : 1)
- << 0;
- return;
- }
- // Interrupt handler must have 1 or 2 parameters.
- unsigned NumParams = getFunctionOrMethodNumParams(D);
- if (NumParams < 1 || NumParams > 2) {
- S.Diag(D->getBeginLoc(), diag::err_anyx86_interrupt_attribute)
- << (S.Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86
- ? 0
- : 1)
- << 1;
- return;
- }
- // The first argument must be a pointer.
- if (!getFunctionOrMethodParamType(D, 0)->isPointerType()) {
- S.Diag(getFunctionOrMethodParamRange(D, 0).getBegin(),
- diag::err_anyx86_interrupt_attribute)
- << (S.Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86
- ? 0
- : 1)
- << 2;
- return;
- }
- // The second argument, if present, must be an unsigned integer.
- unsigned TypeSize =
- S.Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86_64
- ? 64
- : 32;
- if (NumParams == 2 &&
- (!getFunctionOrMethodParamType(D, 1)->isUnsignedIntegerType() ||
- S.Context.getTypeSize(getFunctionOrMethodParamType(D, 1)) != TypeSize)) {
- S.Diag(getFunctionOrMethodParamRange(D, 1).getBegin(),
- diag::err_anyx86_interrupt_attribute)
- << (S.Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86
- ? 0
- : 1)
- << 3 << S.Context.getIntTypeForBitwidth(TypeSize, /*Signed=*/false);
- return;
- }
- D->addAttr(::new (S.Context) AnyX86InterruptAttr(S.Context, AL));
- D->addAttr(UsedAttr::CreateImplicit(S.Context));
-}
-
-static void handleAVRInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!isFunctionOrMethod(D)) {
- S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
- return;
- }
-
- if (!AL.checkExactlyNumArgs(S, 0))
- return;
-
- handleSimpleAttribute<AVRInterruptAttr>(S, D, AL);
-}
-
-static void handleAVRSignalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!isFunctionOrMethod(D)) {
- S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
- return;
- }
-
- if (!AL.checkExactlyNumArgs(S, 0))
- return;
-
- handleSimpleAttribute<AVRSignalAttr>(S, D, AL);
-}
-
-static void handleBPFPreserveAIRecord(Sema &S, RecordDecl *RD) {
- // Add preserve_access_index attribute to all fields and inner records.
- for (auto *D : RD->decls()) {
- if (D->hasAttr<BPFPreserveAccessIndexAttr>())
- continue;
-
- D->addAttr(BPFPreserveAccessIndexAttr::CreateImplicit(S.Context));
- if (auto *Rec = dyn_cast<RecordDecl>(D))
- handleBPFPreserveAIRecord(S, Rec);
- }
-}
-
-static void handleBPFPreserveAccessIndexAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- auto *Rec = cast<RecordDecl>(D);
- handleBPFPreserveAIRecord(S, Rec);
- Rec->addAttr(::new (S.Context) BPFPreserveAccessIndexAttr(S.Context, AL));
-}
-
static bool hasBTFDeclTagAttr(Decl *D, StringRef Tag) {
for (const auto *I : D->specific_attrs<BTFDeclTagAttr>()) {
if (I->getBTFDeclTag() == Tag)
@@ -7753,352 +5504,40 @@ BTFDeclTagAttr *Sema::mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL) {
return ::new (Context) BTFDeclTagAttr(Context, AL, AL.getBTFDeclTag());
}
-static void handleWebAssemblyExportNameAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- if (!isFunctionOrMethod(D)) {
- S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
- return;
- }
-
- auto *FD = cast<FunctionDecl>(D);
- if (FD->isThisDeclarationADefinition()) {
- S.Diag(D->getLocation(), diag::err_alias_is_definition) << FD << 0;
- return;
- }
-
- StringRef Str;
- SourceLocation ArgLoc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
- return;
-
- D->addAttr(::new (S.Context) WebAssemblyExportNameAttr(S.Context, AL, Str));
- D->addAttr(UsedAttr::CreateImplicit(S.Context));
-}
-
-WebAssemblyImportModuleAttr *
-Sema::mergeImportModuleAttr(Decl *D, const WebAssemblyImportModuleAttr &AL) {
- auto *FD = cast<FunctionDecl>(D);
-
- if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
- if (ExistingAttr->getImportModule() == AL.getImportModule())
- return nullptr;
- Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 0
- << ExistingAttr->getImportModule() << AL.getImportModule();
- Diag(AL.getLoc(), diag::note_previous_attribute);
- return nullptr;
- }
- if (FD->hasBody()) {
- Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
- return nullptr;
- }
- return ::new (Context) WebAssemblyImportModuleAttr(Context, AL,
- AL.getImportModule());
-}
-
-WebAssemblyImportNameAttr *
-Sema::mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL) {
- auto *FD = cast<FunctionDecl>(D);
-
- if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportNameAttr>()) {
- if (ExistingAttr->getImportName() == AL.getImportName())
- return nullptr;
- Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 1
- << ExistingAttr->getImportName() << AL.getImportName();
- Diag(AL.getLoc(), diag::note_previous_attribute);
- return nullptr;
- }
- if (FD->hasBody()) {
- Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
- return nullptr;
- }
- return ::new (Context) WebAssemblyImportNameAttr(Context, AL,
- AL.getImportName());
-}
-
-static void
-handleWebAssemblyImportModuleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- auto *FD = cast<FunctionDecl>(D);
-
- StringRef Str;
- SourceLocation ArgLoc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
- return;
- if (FD->hasBody()) {
- S.Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
- return;
- }
-
- FD->addAttr(::new (S.Context)
- WebAssemblyImportModuleAttr(S.Context, AL, Str));
-}
-
-static void
-handleWebAssemblyImportNameAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- auto *FD = cast<FunctionDecl>(D);
-
- StringRef Str;
- SourceLocation ArgLoc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
- return;
- if (FD->hasBody()) {
- S.Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
- return;
- }
-
- FD->addAttr(::new (S.Context) WebAssemblyImportNameAttr(S.Context, AL, Str));
-}
-
-static void handleRISCVInterruptAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- // Warn about repeated attributes.
- if (const auto *A = D->getAttr<RISCVInterruptAttr>()) {
- S.Diag(AL.getRange().getBegin(),
- diag::warn_riscv_repeated_interrupt_attribute);
- S.Diag(A->getLocation(), diag::note_riscv_repeated_interrupt_attribute);
- return;
- }
-
- // Check the attribute argument. Argument is optional.
- if (!AL.checkAtMostNumArgs(S, 1))
- return;
-
- StringRef Str;
- SourceLocation ArgLoc;
-
- // 'machine'is the default interrupt mode.
- if (AL.getNumArgs() == 0)
- Str = "machine";
- else if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
- return;
-
- // Semantic checks for a function with the 'interrupt' attribute:
- // - Must be a function.
- // - Must have no parameters.
- // - Must have the 'void' return type.
- // - The attribute itself must either have no argument or one of the
- // valid interrupt types, see [RISCVInterruptDocs].
-
- if (D->getFunctionType() == nullptr) {
- S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
- return;
- }
-
- if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
- S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
- << /*RISC-V*/ 2 << 0;
- return;
- }
-
- if (!getFunctionOrMethodResultType(D)->isVoidType()) {
- S.Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
- << /*RISC-V*/ 2 << 1;
- return;
- }
-
- RISCVInterruptAttr::InterruptType Kind;
- if (!RISCVInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) << AL << Str
- << ArgLoc;
- return;
- }
-
- D->addAttr(::new (S.Context) RISCVInterruptAttr(S.Context, AL, Kind));
-}
-
static void handleInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Dispatch the interrupt attribute based on the current target.
switch (S.Context.getTargetInfo().getTriple().getArch()) {
case llvm::Triple::msp430:
- handleMSP430InterruptAttr(S, D, AL);
+ S.MSP430().handleInterruptAttr(D, AL);
break;
case llvm::Triple::mipsel:
case llvm::Triple::mips:
- handleMipsInterruptAttr(S, D, AL);
+ S.MIPS().handleInterruptAttr(D, AL);
break;
case llvm::Triple::m68k:
- handleM68kInterruptAttr(S, D, AL);
+ S.M68k().handleInterruptAttr(D, AL);
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- handleAnyX86InterruptAttr(S, D, AL);
+ S.X86().handleAnyInterruptAttr(D, AL);
break;
case llvm::Triple::avr:
- handleAVRInterruptAttr(S, D, AL);
+ S.AVR().handleInterruptAttr(D, AL);
break;
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
- handleRISCVInterruptAttr(S, D, AL);
+ S.RISCV().handleInterruptAttr(D, AL);
break;
default:
- handleARMInterruptAttr(S, D, AL);
+ S.ARM().handleInterruptAttr(D, AL);
break;
}
}
-static bool
-checkAMDGPUFlatWorkGroupSizeArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr,
- const AMDGPUFlatWorkGroupSizeAttr &Attr) {
- // Accept template arguments for now as they depend on something else.
- // We'll get to check them when they eventually get instantiated.
- if (MinExpr->isValueDependent() || MaxExpr->isValueDependent())
- return false;
-
- uint32_t Min = 0;
- if (!checkUInt32Argument(S, Attr, MinExpr, Min, 0))
- return true;
-
- uint32_t Max = 0;
- if (!checkUInt32Argument(S, Attr, MaxExpr, Max, 1))
- return true;
-
- if (Min == 0 && Max != 0) {
- S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
- << &Attr << 0;
- return true;
- }
- if (Min > Max) {
- S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
- << &Attr << 1;
- return true;
- }
-
- return false;
-}
-
-AMDGPUFlatWorkGroupSizeAttr *
-Sema::CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI,
- Expr *MinExpr, Expr *MaxExpr) {
- AMDGPUFlatWorkGroupSizeAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
-
- if (checkAMDGPUFlatWorkGroupSizeArguments(*this, MinExpr, MaxExpr, TmpAttr))
- return nullptr;
- return ::new (Context)
- AMDGPUFlatWorkGroupSizeAttr(Context, CI, MinExpr, MaxExpr);
-}
-
-void Sema::addAMDGPUFlatWorkGroupSizeAttr(Decl *D,
- const AttributeCommonInfo &CI,
- Expr *MinExpr, Expr *MaxExpr) {
- if (auto *Attr = CreateAMDGPUFlatWorkGroupSizeAttr(CI, MinExpr, MaxExpr))
- D->addAttr(Attr);
-}
-
-static void handleAMDGPUFlatWorkGroupSizeAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- Expr *MinExpr = AL.getArgAsExpr(0);
- Expr *MaxExpr = AL.getArgAsExpr(1);
-
- S.addAMDGPUFlatWorkGroupSizeAttr(D, AL, MinExpr, MaxExpr);
-}
-
-static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr,
- Expr *MaxExpr,
- const AMDGPUWavesPerEUAttr &Attr) {
- if (S.DiagnoseUnexpandedParameterPack(MinExpr) ||
- (MaxExpr && S.DiagnoseUnexpandedParameterPack(MaxExpr)))
- return true;
-
- // Accept template arguments for now as they depend on something else.
- // We'll get to check them when they eventually get instantiated.
- if (MinExpr->isValueDependent() || (MaxExpr && MaxExpr->isValueDependent()))
- return false;
-
- uint32_t Min = 0;
- if (!checkUInt32Argument(S, Attr, MinExpr, Min, 0))
- return true;
-
- uint32_t Max = 0;
- if (MaxExpr && !checkUInt32Argument(S, Attr, MaxExpr, Max, 1))
- return true;
-
- if (Min == 0 && Max != 0) {
- S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
- << &Attr << 0;
- return true;
- }
- if (Max != 0 && Min > Max) {
- S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
- << &Attr << 1;
- return true;
- }
-
- return false;
-}
-
-AMDGPUWavesPerEUAttr *
-Sema::CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, Expr *MinExpr,
- Expr *MaxExpr) {
- AMDGPUWavesPerEUAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
-
- if (checkAMDGPUWavesPerEUArguments(*this, MinExpr, MaxExpr, TmpAttr))
- return nullptr;
-
- return ::new (Context) AMDGPUWavesPerEUAttr(Context, CI, MinExpr, MaxExpr);
-}
-
-void Sema::addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
- Expr *MinExpr, Expr *MaxExpr) {
- if (auto *Attr = CreateAMDGPUWavesPerEUAttr(CI, MinExpr, MaxExpr))
- D->addAttr(Attr);
-}
-
-static void handleAMDGPUWavesPerEUAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 2))
- return;
-
- Expr *MinExpr = AL.getArgAsExpr(0);
- Expr *MaxExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
-
- S.addAMDGPUWavesPerEUAttr(D, AL, MinExpr, MaxExpr);
-}
-
-static void handleAMDGPUNumSGPRAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- uint32_t NumSGPR = 0;
- Expr *NumSGPRExpr = AL.getArgAsExpr(0);
- if (!checkUInt32Argument(S, AL, NumSGPRExpr, NumSGPR))
- return;
-
- D->addAttr(::new (S.Context) AMDGPUNumSGPRAttr(S.Context, AL, NumSGPR));
-}
-
-static void handleAMDGPUNumVGPRAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- uint32_t NumVGPR = 0;
- Expr *NumVGPRExpr = AL.getArgAsExpr(0);
- if (!checkUInt32Argument(S, AL, NumVGPRExpr, NumVGPR))
- return;
-
- D->addAttr(::new (S.Context) AMDGPUNumVGPRAttr(S.Context, AL, NumVGPR));
-}
-
-static void handleX86ForceAlignArgPointerAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- // If we try to apply it to a function pointer, don't warn, but don't
- // do anything, either. It doesn't matter anyway, because there's nothing
- // special about calling a force_align_arg_pointer function.
- const auto *VD = dyn_cast<ValueDecl>(D);
- if (VD && VD->getType()->isFunctionPointerType())
- return;
- // Also don't warn on function pointer typedefs.
- const auto *TD = dyn_cast<TypedefNameDecl>(D);
- if (TD && (TD->getUnderlyingType()->isFunctionPointerType() ||
- TD->getUnderlyingType()->isFunctionType()))
- return;
- // Attribute can only be applied to function types.
- if (!isa<FunctionDecl>(D)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
- return;
- }
-
- D->addAttr(::new (S.Context) X86ForceAlignArgPointerAttr(S.Context, AL));
-}
-
static void handleLayoutVersion(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t Version;
Expr *VersionExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(0), Version))
+ if (!S.checkUInt32Argument(AL, AL.getArgAsExpr(0), Version))
return;
// TODO: Investigate what happens with the next major version of MSVC.
@@ -8392,62 +5831,6 @@ static void handleInternalLinkageAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(Internal);
}
-static void handleOpenCLNoSVMAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (S.LangOpts.getOpenCLCompatibleVersion() < 200)
- S.Diag(AL.getLoc(), diag::err_attribute_requires_opencl_version)
- << AL << "2.0" << 1;
- else
- S.Diag(AL.getLoc(), diag::warn_opencl_attr_deprecated_ignored)
- << AL << S.LangOpts.getOpenCLVersionString();
-}
-
-static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (D->isInvalidDecl())
- return;
-
- // Check if there is only one access qualifier.
- if (D->hasAttr<OpenCLAccessAttr>()) {
- if (D->getAttr<OpenCLAccessAttr>()->getSemanticSpelling() ==
- AL.getSemanticSpelling()) {
- S.Diag(AL.getLoc(), diag::warn_duplicate_declspec)
- << AL.getAttrName()->getName() << AL.getRange();
- } else {
- S.Diag(AL.getLoc(), diag::err_opencl_multiple_access_qualifiers)
- << D->getSourceRange();
- D->setInvalidDecl(true);
- return;
- }
- }
-
- // OpenCL v2.0 s6.6 - read_write can be used for image types to specify that
- // an image object can be read and written. OpenCL v2.0 s6.13.6 - A kernel
- // cannot read from and write to the same pipe object. Using the read_write
- // (or __read_write) qualifier with the pipe qualifier is a compilation error.
- // OpenCL v3.0 s6.8 - For OpenCL C 2.0, or with the
- // __opencl_c_read_write_images feature, image objects specified as arguments
- // to a kernel can additionally be declared to be read-write.
- // C++ for OpenCL 1.0 inherits rule from OpenCL C v2.0.
- // C++ for OpenCL 2021 inherits rule from OpenCL C v3.0.
- if (const auto *PDecl = dyn_cast<ParmVarDecl>(D)) {
- const Type *DeclTy = PDecl->getType().getCanonicalType().getTypePtr();
- if (AL.getAttrName()->getName().contains("read_write")) {
- bool ReadWriteImagesUnsupported =
- (S.getLangOpts().getOpenCLCompatibleVersion() < 200) ||
- (S.getLangOpts().getOpenCLCompatibleVersion() == 300 &&
- !S.getOpenCLOptions().isSupported("__opencl_c_read_write_images",
- S.getLangOpts()));
- if (ReadWriteImagesUnsupported || DeclTy->isPipeType()) {
- S.Diag(AL.getLoc(), diag::err_opencl_invalid_read_write)
- << AL << PDecl->getType() << DeclTy->isImageType();
- D->setInvalidDecl(true);
- return;
- }
- }
- }
-
- D->addAttr(::new (S.Context) OpenCLAccessAttr(S.Context, AL));
-}
-
static void handleZeroCallUsedRegsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Check that the argument is a string literal.
StringRef KindStr;
@@ -8466,133 +5849,44 @@ static void handleZeroCallUsedRegsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(ZeroCallUsedRegsAttr::Create(S.Context, Kind, AL));
}
-static void handleCountedByAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!AL.isArgIdent(0)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
- << AL << AANT_ArgumentIdentifier;
- return;
- }
-
- IdentifierLoc *IL = AL.getArgAsIdent(0);
- CountedByAttr *CBA =
- ::new (S.Context) CountedByAttr(S.Context, AL, IL->Ident);
- CBA->setCountedByFieldLoc(IL->Loc);
- D->addAttr(CBA);
-}
-
-static const FieldDecl *
-FindFieldInTopLevelOrAnonymousStruct(const RecordDecl *RD,
- const IdentifierInfo *FieldName) {
- for (const Decl *D : RD->decls()) {
- if (const auto *FD = dyn_cast<FieldDecl>(D))
- if (FD->getName() == FieldName->getName())
- return FD;
-
- if (const auto *R = dyn_cast<RecordDecl>(D))
- if (const FieldDecl *FD =
- FindFieldInTopLevelOrAnonymousStruct(R, FieldName))
- return FD;
- }
-
- return nullptr;
-}
-
-bool Sema::CheckCountedByAttr(Scope *S, const FieldDecl *FD) {
- LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
- LangOptions::StrictFlexArraysLevelKind::IncompleteOnly;
- if (!Decl::isFlexibleArrayMemberLike(Context, FD, FD->getType(),
- StrictFlexArraysLevel, true)) {
- // The "counted_by" attribute must be on a flexible array member.
- SourceRange SR = FD->getLocation();
- Diag(SR.getBegin(), diag::err_counted_by_attr_not_on_flexible_array_member)
- << SR;
- return true;
- }
-
- const auto *CBA = FD->getAttr<CountedByAttr>();
- const IdentifierInfo *FieldName = CBA->getCountedByField();
-
- auto GetNonAnonStructOrUnion = [](const RecordDecl *RD) {
- while (RD && !RD->getDeclName())
- if (const auto *R = dyn_cast<RecordDecl>(RD->getDeclContext()))
- RD = R;
- else
- break;
-
- return RD;
- };
-
- const RecordDecl *EnclosingRD = GetNonAnonStructOrUnion(FD->getParent());
- const FieldDecl *CountFD =
- FindFieldInTopLevelOrAnonymousStruct(EnclosingRD, FieldName);
-
- if (!CountFD) {
- DeclarationNameInfo NameInfo(FieldName,
- CBA->getCountedByFieldLoc().getBegin());
- LookupResult MemResult(*this, NameInfo, Sema::LookupMemberName);
- LookupName(MemResult, S);
-
- if (!MemResult.empty()) {
- SourceRange SR = CBA->getCountedByFieldLoc();
- Diag(SR.getBegin(), diag::err_flexible_array_count_not_in_same_struct)
- << CBA->getCountedByField() << SR;
-
- if (auto *ND = MemResult.getAsSingle<NamedDecl>()) {
- SR = ND->getLocation();
- Diag(SR.getBegin(), diag::note_flexible_array_counted_by_attr_field)
- << ND << SR;
- }
+static void handleCountedByAttrField(Sema &S, Decl *D, const ParsedAttr &AL) {
+ auto *FD = dyn_cast<FieldDecl>(D);
+ assert(FD);
- return true;
- } else {
- // The "counted_by" field needs to exist in the struct.
- LookupResult OrdResult(*this, NameInfo, Sema::LookupOrdinaryName);
- LookupName(OrdResult, S);
-
- if (!OrdResult.empty()) {
- SourceRange SR = FD->getLocation();
- Diag(SR.getBegin(), diag::err_counted_by_must_be_in_structure)
- << FieldName << SR;
-
- if (auto *ND = OrdResult.getAsSingle<NamedDecl>()) {
- SR = ND->getLocation();
- Diag(SR.getBegin(), diag::note_flexible_array_counted_by_attr_field)
- << ND << SR;
- }
-
- return true;
- }
- }
-
- CXXScopeSpec SS;
- DeclFilterCCC<FieldDecl> Filter(FieldName);
- return DiagnoseEmptyLookup(S, SS, MemResult, Filter, nullptr, std::nullopt,
- const_cast<DeclContext *>(FD->getDeclContext()));
- }
+ auto *CountExpr = AL.getArgAsExpr(0);
+ if (!CountExpr)
+ return;
- if (CountFD->hasAttr<CountedByAttr>()) {
- // The "counted_by" field can't point to the flexible array member.
- SourceRange SR = CBA->getCountedByFieldLoc();
- Diag(SR.getBegin(), diag::err_counted_by_attr_refers_to_flexible_array)
- << CBA->getCountedByField() << SR;
- return true;
+ bool CountInBytes;
+ bool OrNull;
+ switch (AL.getKind()) {
+ case ParsedAttr::AT_CountedBy:
+ CountInBytes = false;
+ OrNull = false;
+ break;
+ case ParsedAttr::AT_CountedByOrNull:
+ CountInBytes = false;
+ OrNull = true;
+ break;
+ case ParsedAttr::AT_SizedBy:
+ CountInBytes = true;
+ OrNull = false;
+ break;
+ case ParsedAttr::AT_SizedByOrNull:
+ CountInBytes = true;
+ OrNull = true;
+ break;
+ default:
+ llvm_unreachable("unexpected counted_by family attribute");
}
- if (!CountFD->getType()->isIntegerType() ||
- CountFD->getType()->isBooleanType()) {
- // The "counted_by" field must have an integer type.
- SourceRange SR = CBA->getCountedByFieldLoc();
- Diag(SR.getBegin(),
- diag::err_flexible_array_counted_by_attr_field_not_integer)
- << CBA->getCountedByField() << SR;
-
- SR = CountFD->getLocation();
- Diag(SR.getBegin(), diag::note_flexible_array_counted_by_attr_field)
- << CountFD << SR;
- return true;
- }
+ llvm::SmallVector<TypeCoupledDeclRefInfo, 1> Decls;
+ if (S.CheckCountedByAttrOnField(FD, CountExpr, Decls, CountInBytes, OrNull))
+ return;
- return false;
+ QualType CAT = S.BuildCountAttributedArrayOrPointerType(
+ FD->getType(), CountExpr, CountInBytes, OrNull);
+ FD->setType(CAT);
}
static void handleFunctionReturnThunksAttr(Sema &S, Decl *D,
@@ -8635,45 +5929,6 @@ static void handleNoUniqueAddressAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(NoUniqueAddressAttr::Create(S.Context, AL));
}
-static void handleSYCLKernelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- // The 'sycl_kernel' attribute applies only to function templates.
- const auto *FD = cast<FunctionDecl>(D);
- const FunctionTemplateDecl *FT = FD->getDescribedFunctionTemplate();
- assert(FT && "Function template is expected");
-
- // Function template must have at least two template parameters.
- const TemplateParameterList *TL = FT->getTemplateParameters();
- if (TL->size() < 2) {
- S.Diag(FT->getLocation(), diag::warn_sycl_kernel_num_of_template_params);
- return;
- }
-
- // Template parameters must be typenames.
- for (unsigned I = 0; I < 2; ++I) {
- const NamedDecl *TParam = TL->getParam(I);
- if (isa<NonTypeTemplateParmDecl>(TParam)) {
- S.Diag(FT->getLocation(),
- diag::warn_sycl_kernel_invalid_template_param_type);
- return;
- }
- }
-
- // Function must have at least one argument.
- if (getFunctionOrMethodNumParams(D) != 1) {
- S.Diag(FT->getLocation(), diag::warn_sycl_kernel_num_of_function_params);
- return;
- }
-
- // Function must return void.
- QualType RetTy = getFunctionOrMethodResultType(D);
- if (!RetTy->isVoidType()) {
- S.Diag(FT->getLocation(), diag::warn_sycl_kernel_return_type);
- return;
- }
-
- handleSimpleAttribute<SYCLKernelAttr>(S, D, AL);
-}
-
static void handleDestroyAttr(Sema &S, Decl *D, const ParsedAttr &A) {
if (!cast<VarDecl>(D)->hasGlobalStorage()) {
S.Diag(D->getLocation(), diag::err_destroy_attr_on_non_static_var)
@@ -8693,81 +5948,6 @@ static void handleUninitializedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) UninitializedAttr(S.Context, AL));
}
-static bool tryMakeVariablePseudoStrong(Sema &S, VarDecl *VD,
- bool DiagnoseFailure) {
- QualType Ty = VD->getType();
- if (!Ty->isObjCRetainableType()) {
- if (DiagnoseFailure) {
- S.Diag(VD->getBeginLoc(), diag::warn_ignored_objc_externally_retained)
- << 0;
- }
- return false;
- }
-
- Qualifiers::ObjCLifetime LifetimeQual = Ty.getQualifiers().getObjCLifetime();
-
- // Sema::inferObjCARCLifetime must run after processing decl attributes
- // (because __block lowers to an attribute), so if the lifetime hasn't been
- // explicitly specified, infer it locally now.
- if (LifetimeQual == Qualifiers::OCL_None)
- LifetimeQual = Ty->getObjCARCImplicitLifetime();
-
- // The attributes only really makes sense for __strong variables; ignore any
- // attempts to annotate a parameter with any other lifetime qualifier.
- if (LifetimeQual != Qualifiers::OCL_Strong) {
- if (DiagnoseFailure) {
- S.Diag(VD->getBeginLoc(), diag::warn_ignored_objc_externally_retained)
- << 1;
- }
- return false;
- }
-
- // Tampering with the type of a VarDecl here is a bit of a hack, but we need
- // to ensure that the variable is 'const' so that we can error on
- // modification, which can otherwise over-release.
- VD->setType(Ty.withConst());
- VD->setARCPseudoStrong(true);
- return true;
-}
-
-static void handleObjCExternallyRetainedAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- if (auto *VD = dyn_cast<VarDecl>(D)) {
- assert(!isa<ParmVarDecl>(VD) && "should be diagnosed automatically");
- if (!VD->hasLocalStorage()) {
- S.Diag(D->getBeginLoc(), diag::warn_ignored_objc_externally_retained)
- << 0;
- return;
- }
-
- if (!tryMakeVariablePseudoStrong(S, VD, /*DiagnoseFailure=*/true))
- return;
-
- handleSimpleAttribute<ObjCExternallyRetainedAttr>(S, D, AL);
- return;
- }
-
- // If D is a function-like declaration (method, block, or function), then we
- // make every parameter psuedo-strong.
- unsigned NumParams =
- hasFunctionProto(D) ? getFunctionOrMethodNumParams(D) : 0;
- for (unsigned I = 0; I != NumParams; ++I) {
- auto *PVD = const_cast<ParmVarDecl *>(getFunctionOrMethodParam(D, I));
- QualType Ty = PVD->getType();
-
- // If a user wrote a parameter with __strong explicitly, then assume they
- // want "real" strong semantics for that parameter. This works because if
- // the parameter was written with __strong, then the strong qualifier will
- // be non-local.
- if (Ty.getLocalUnqualifiedType().getQualifiers().getObjCLifetime() ==
- Qualifiers::OCL_Strong)
- continue;
-
- tryMakeVariablePseudoStrong(S, PVD, /*DiagnoseFailure=*/false);
- }
- handleSimpleAttribute<ObjCExternallyRetainedAttr>(S, D, AL);
-}
-
static void handleMIGServerRoutineAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Check that the return type is a `typedef int kern_return_t` or a typedef
// around it, because otherwise MIG convention checks make no sense.
@@ -8922,6 +6102,116 @@ EnforceTCBLeafAttr *Sema::mergeEnforceTCBLeafAttr(
*this, D, AL);
}
+static void handleVTablePointerAuthentication(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ CXXRecordDecl *Decl = cast<CXXRecordDecl>(D);
+ const uint32_t NumArgs = AL.getNumArgs();
+ if (NumArgs > 4) {
+ S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 4;
+ AL.setInvalid();
+ }
+
+ if (NumArgs == 0) {
+ S.Diag(AL.getLoc(), diag::err_attribute_too_few_arguments) << AL;
+ AL.setInvalid();
+ return;
+ }
+
+ if (D->getAttr<VTablePointerAuthenticationAttr>()) {
+ S.Diag(AL.getLoc(), diag::err_duplicated_vtable_pointer_auth) << Decl;
+ AL.setInvalid();
+ }
+
+ auto KeyType = VTablePointerAuthenticationAttr::VPtrAuthKeyType::DefaultKey;
+ if (AL.isArgIdent(0)) {
+ IdentifierLoc *IL = AL.getArgAsIdent(0);
+ if (!VTablePointerAuthenticationAttr::ConvertStrToVPtrAuthKeyType(
+ IL->Ident->getName(), KeyType)) {
+ S.Diag(IL->Loc, diag::err_invalid_authentication_key) << IL->Ident;
+ AL.setInvalid();
+ }
+ if (KeyType == VTablePointerAuthenticationAttr::DefaultKey &&
+ !S.getLangOpts().PointerAuthCalls) {
+ S.Diag(AL.getLoc(), diag::err_no_default_vtable_pointer_auth) << 0;
+ AL.setInvalid();
+ }
+ } else {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ auto AddressDiversityMode = VTablePointerAuthenticationAttr::
+ AddressDiscriminationMode::DefaultAddressDiscrimination;
+ if (AL.getNumArgs() > 1) {
+ if (AL.isArgIdent(1)) {
+ IdentifierLoc *IL = AL.getArgAsIdent(1);
+ if (!VTablePointerAuthenticationAttr::
+ ConvertStrToAddressDiscriminationMode(IL->Ident->getName(),
+ AddressDiversityMode)) {
+ S.Diag(IL->Loc, diag::err_invalid_address_discrimination) << IL->Ident;
+ AL.setInvalid();
+ }
+ if (AddressDiversityMode ==
+ VTablePointerAuthenticationAttr::DefaultAddressDiscrimination &&
+ !S.getLangOpts().PointerAuthCalls) {
+ S.Diag(IL->Loc, diag::err_no_default_vtable_pointer_auth) << 1;
+ AL.setInvalid();
+ }
+ } else {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
+ }
+ }
+
+ auto ED = VTablePointerAuthenticationAttr::ExtraDiscrimination::
+ DefaultExtraDiscrimination;
+ if (AL.getNumArgs() > 2) {
+ if (AL.isArgIdent(2)) {
+ IdentifierLoc *IL = AL.getArgAsIdent(2);
+ if (!VTablePointerAuthenticationAttr::ConvertStrToExtraDiscrimination(
+ IL->Ident->getName(), ED)) {
+ S.Diag(IL->Loc, diag::err_invalid_extra_discrimination) << IL->Ident;
+ AL.setInvalid();
+ }
+ if (ED == VTablePointerAuthenticationAttr::DefaultExtraDiscrimination &&
+ !S.getLangOpts().PointerAuthCalls) {
+ S.Diag(AL.getLoc(), diag::err_no_default_vtable_pointer_auth) << 2;
+ AL.setInvalid();
+ }
+ } else {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
+ }
+ }
+
+ uint32_t CustomDiscriminationValue = 0;
+ if (ED == VTablePointerAuthenticationAttr::CustomDiscrimination) {
+ if (NumArgs < 4) {
+ S.Diag(AL.getLoc(), diag::err_missing_custom_discrimination) << AL << 4;
+ AL.setInvalid();
+ return;
+ }
+ if (NumArgs > 4) {
+ S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 4;
+ AL.setInvalid();
+ }
+
+ if (!AL.isArgExpr(3) || !S.checkUInt32Argument(AL, AL.getArgAsExpr(3),
+ CustomDiscriminationValue)) {
+ S.Diag(AL.getLoc(), diag::err_invalid_custom_discrimination);
+ AL.setInvalid();
+ }
+ } else if (NumArgs > 3) {
+ S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 3;
+ AL.setInvalid();
+ }
+
+ Decl->addAttr(::new (S.Context) VTablePointerAuthenticationAttr(
+ S.Context, AL, KeyType, AddressDiversityMode, ED,
+ CustomDiscriminationValue));
+}
+
//===----------------------------------------------------------------------===//
// Top Level Sema Entry Points
//===----------------------------------------------------------------------===//
@@ -8957,82 +6247,6 @@ static bool MustDelayAttributeArguments(const ParsedAttr &AL) {
return false;
}
-static bool checkArmNewAttrMutualExclusion(
- Sema &S, const ParsedAttr &AL, const FunctionProtoType *FPT,
- FunctionType::ArmStateValue CurrentState, StringRef StateName) {
- auto CheckForIncompatibleAttr =
- [&](FunctionType::ArmStateValue IncompatibleState,
- StringRef IncompatibleStateName) {
- if (CurrentState == IncompatibleState) {
- S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
- << (std::string("'__arm_new(\"") + StateName.str() + "\")'")
- << (std::string("'") + IncompatibleStateName.str() + "(\"" +
- StateName.str() + "\")'")
- << true;
- AL.setInvalid();
- }
- };
-
- CheckForIncompatibleAttr(FunctionType::ARM_In, "__arm_in");
- CheckForIncompatibleAttr(FunctionType::ARM_Out, "__arm_out");
- CheckForIncompatibleAttr(FunctionType::ARM_InOut, "__arm_inout");
- CheckForIncompatibleAttr(FunctionType::ARM_Preserves, "__arm_preserves");
- return AL.isInvalid();
-}
-
-static void handleArmNewAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!AL.getNumArgs()) {
- S.Diag(AL.getLoc(), diag::err_missing_arm_state) << AL;
- AL.setInvalid();
- return;
- }
-
- std::vector<StringRef> NewState;
- if (const auto *ExistingAttr = D->getAttr<ArmNewAttr>()) {
- for (StringRef S : ExistingAttr->newArgs())
- NewState.push_back(S);
- }
-
- bool HasZA = false;
- bool HasZT0 = false;
- for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
- StringRef StateName;
- SourceLocation LiteralLoc;
- if (!S.checkStringLiteralArgumentAttr(AL, I, StateName, &LiteralLoc))
- return;
-
- if (StateName == "za")
- HasZA = true;
- else if (StateName == "zt0")
- HasZT0 = true;
- else {
- S.Diag(LiteralLoc, diag::err_unknown_arm_state) << StateName;
- AL.setInvalid();
- return;
- }
-
- if (!llvm::is_contained(NewState, StateName)) // Avoid adding duplicates.
- NewState.push_back(StateName);
- }
-
- if (auto *FPT = dyn_cast<FunctionProtoType>(D->getFunctionType())) {
- FunctionType::ArmStateValue ZAState =
- FunctionType::getArmZAState(FPT->getAArch64SMEAttributes());
- if (HasZA && ZAState != FunctionType::ARM_None &&
- checkArmNewAttrMutualExclusion(S, AL, FPT, ZAState, "za"))
- return;
- FunctionType::ArmStateValue ZT0State =
- FunctionType::getArmZT0State(FPT->getAArch64SMEAttributes());
- if (HasZT0 && ZT0State != FunctionType::ARM_None &&
- checkArmNewAttrMutualExclusion(S, AL, FPT, ZT0State, "zt0"))
- return;
- }
-
- D->dropAttr<ArmNewAttr>();
- D->addAttr(::new (S.Context)
- ArmNewAttr(S.Context, AL, NewState.data(), NewState.size()));
-}
-
/// ProcessDeclAttribute - Apply the specific attribute to the specified decl if
/// the attribute applies to decls. If the attribute is a type attribute, just
/// silently ignore it if a GNU attribute.
@@ -9043,8 +6257,14 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
return;
// Ignore C++11 attributes on declarator chunks: they appertain to the type
- // instead.
- if (AL.isCXX11Attribute() && !Options.IncludeCXX11Attributes)
+ // instead. Note, isCXX11Attribute() will look at whether the attribute is
+ // [[]] or alignas, while isC23Attribute() will only look at [[]]. This is
+ // important for ensuring that alignas in C23 is properly handled on a
+ // structure member declaration because it is a type-specifier-qualifier in
+ // C but still applies to the declaration rather than the type.
+ if ((S.getLangOpts().CPlusPlus ? AL.isCXX11Attribute()
+ : AL.isC23Attribute()) &&
+ !Options.IncludeCXX11Attributes)
return;
// Unknown attributes are automatically warned on. Target-specific attributes
@@ -9155,7 +6375,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleInterruptAttr(S, D, AL);
break;
case ParsedAttr::AT_X86ForceAlignArgPointer:
- handleX86ForceAlignArgPointerAttr(S, D, AL);
+ S.X86().handleForceAlignArgPointerAttr(D, AL);
break;
case ParsedAttr::AT_ReadOnlyPlacement:
handleSimpleAttribute<ReadOnlyPlacementAttr>(S, D, AL);
@@ -9165,22 +6385,25 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleDLLAttr(S, D, AL);
break;
case ParsedAttr::AT_AMDGPUFlatWorkGroupSize:
- handleAMDGPUFlatWorkGroupSizeAttr(S, D, AL);
+ S.AMDGPU().handleAMDGPUFlatWorkGroupSizeAttr(D, AL);
break;
case ParsedAttr::AT_AMDGPUWavesPerEU:
- handleAMDGPUWavesPerEUAttr(S, D, AL);
+ S.AMDGPU().handleAMDGPUWavesPerEUAttr(D, AL);
break;
case ParsedAttr::AT_AMDGPUNumSGPR:
- handleAMDGPUNumSGPRAttr(S, D, AL);
+ S.AMDGPU().handleAMDGPUNumSGPRAttr(D, AL);
break;
case ParsedAttr::AT_AMDGPUNumVGPR:
- handleAMDGPUNumVGPRAttr(S, D, AL);
+ S.AMDGPU().handleAMDGPUNumVGPRAttr(D, AL);
+ break;
+ case ParsedAttr::AT_AMDGPUMaxNumWorkGroups:
+ S.AMDGPU().handleAMDGPUMaxNumWorkGroupsAttr(D, AL);
break;
case ParsedAttr::AT_AVRSignal:
- handleAVRSignalAttr(S, D, AL);
+ S.AVR().handleSignalAttr(D, AL);
break;
case ParsedAttr::AT_BPFPreserveAccessIndex:
- handleBPFPreserveAccessIndexAttr(S, D, AL);
+ S.BPF().handlePreserveAccessIndexAttr(D, AL);
break;
case ParsedAttr::AT_BPFPreserveStaticOffset:
handleSimpleAttribute<BPFPreserveStaticOffsetAttr>(S, D, AL);
@@ -9189,19 +6412,19 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleBTFDeclTagAttr(S, D, AL);
break;
case ParsedAttr::AT_WebAssemblyExportName:
- handleWebAssemblyExportNameAttr(S, D, AL);
+ S.Wasm().handleWebAssemblyExportNameAttr(D, AL);
break;
case ParsedAttr::AT_WebAssemblyImportModule:
- handleWebAssemblyImportModuleAttr(S, D, AL);
+ S.Wasm().handleWebAssemblyImportModuleAttr(D, AL);
break;
case ParsedAttr::AT_WebAssemblyImportName:
- handleWebAssemblyImportNameAttr(S, D, AL);
+ S.Wasm().handleWebAssemblyImportNameAttr(D, AL);
break;
case ParsedAttr::AT_IBOutlet:
- handleIBOutlet(S, D, AL);
+ S.ObjC().handleIBOutlet(D, AL);
break;
case ParsedAttr::AT_IBOutletCollection:
- handleIBOutletCollection(S, D, AL);
+ S.ObjC().handleIBOutletCollection(D, AL);
break;
case ParsedAttr::AT_IFunc:
handleIFuncAttr(S, D, AL);
@@ -9264,6 +6487,9 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_Error:
handleErrorAttr(S, D, AL);
break;
+ case ParsedAttr::AT_ExcludeFromExplicitInstantiation:
+ handleExcludeFromExplicitInstantiationAttr(S, D, AL);
+ break;
case ParsedAttr::AT_DiagnoseIf:
handleDiagnoseIfAttr(S, D, AL);
break;
@@ -9289,7 +6515,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleEnumExtensibilityAttr(S, D, AL);
break;
case ParsedAttr::AT_SYCLKernel:
- handleSYCLKernelAttr(S, D, AL);
+ S.SYCL().handleKernelAttr(D, AL);
break;
case ParsedAttr::AT_SYCLSpecialClass:
handleSimpleAttribute<SYCLSpecialClassAttr>(S, D, AL);
@@ -9375,53 +6601,54 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleVecReturnAttr(S, D, AL);
break;
case ParsedAttr::AT_ObjCOwnership:
- handleObjCOwnershipAttr(S, D, AL);
+ S.ObjC().handleOwnershipAttr(D, AL);
break;
case ParsedAttr::AT_ObjCPreciseLifetime:
- handleObjCPreciseLifetimeAttr(S, D, AL);
+ S.ObjC().handlePreciseLifetimeAttr(D, AL);
break;
case ParsedAttr::AT_ObjCReturnsInnerPointer:
- handleObjCReturnsInnerPointerAttr(S, D, AL);
+ S.ObjC().handleReturnsInnerPointerAttr(D, AL);
break;
case ParsedAttr::AT_ObjCRequiresSuper:
- handleObjCRequiresSuperAttr(S, D, AL);
+ S.ObjC().handleRequiresSuperAttr(D, AL);
break;
case ParsedAttr::AT_ObjCBridge:
- handleObjCBridgeAttr(S, D, AL);
+ S.ObjC().handleBridgeAttr(D, AL);
break;
case ParsedAttr::AT_ObjCBridgeMutable:
- handleObjCBridgeMutableAttr(S, D, AL);
+ S.ObjC().handleBridgeMutableAttr(D, AL);
break;
case ParsedAttr::AT_ObjCBridgeRelated:
- handleObjCBridgeRelatedAttr(S, D, AL);
+ S.ObjC().handleBridgeRelatedAttr(D, AL);
break;
case ParsedAttr::AT_ObjCDesignatedInitializer:
- handleObjCDesignatedInitializer(S, D, AL);
+ S.ObjC().handleDesignatedInitializer(D, AL);
break;
case ParsedAttr::AT_ObjCRuntimeName:
- handleObjCRuntimeName(S, D, AL);
+ S.ObjC().handleRuntimeName(D, AL);
break;
case ParsedAttr::AT_ObjCBoxable:
- handleObjCBoxable(S, D, AL);
+ S.ObjC().handleBoxable(D, AL);
break;
case ParsedAttr::AT_NSErrorDomain:
- handleNSErrorDomain(S, D, AL);
+ S.ObjC().handleNSErrorDomain(D, AL);
break;
case ParsedAttr::AT_CFConsumed:
case ParsedAttr::AT_NSConsumed:
case ParsedAttr::AT_OSConsumed:
- S.AddXConsumedAttr(D, AL, parsedAttrToRetainOwnershipKind(AL),
- /*IsTemplateInstantiation=*/false);
+ S.ObjC().AddXConsumedAttr(D, AL,
+ S.ObjC().parsedAttrToRetainOwnershipKind(AL),
+ /*IsTemplateInstantiation=*/false);
break;
case ParsedAttr::AT_OSReturnsRetainedOnZero:
handleSimpleAttributeOrDiagnose<OSReturnsRetainedOnZeroAttr>(
- S, D, AL, isValidOSObjectOutParameter(D),
+ S, D, AL, S.ObjC().isValidOSObjectOutParameter(D),
diag::warn_ns_attribute_wrong_parameter_type,
/*Extra Args=*/AL, /*pointer-to-OSObject-pointer*/ 3, AL.getRange());
break;
case ParsedAttr::AT_OSReturnsRetainedOnNonZero:
handleSimpleAttributeOrDiagnose<OSReturnsRetainedOnNonZeroAttr>(
- S, D, AL, isValidOSObjectOutParameter(D),
+ S, D, AL, S.ObjC().isValidOSObjectOutParameter(D),
diag::warn_ns_attribute_wrong_parameter_type,
/*Extra Args=*/AL, /*pointer-to-OSObject-poointer*/ 3, AL.getRange());
break;
@@ -9432,7 +6659,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_CFReturnsRetained:
case ParsedAttr::AT_OSReturnsNotRetained:
case ParsedAttr::AT_OSReturnsRetained:
- handleXReturnsXRetainedAttr(S, D, AL);
+ S.ObjC().handleXReturnsXRetainedAttr(D, AL);
break;
case ParsedAttr::AT_WorkGroupSizeHint:
handleWorkGroupSize<WorkGroupSizeHintAttr>(S, D, AL);
@@ -9441,7 +6668,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleWorkGroupSize<ReqdWorkGroupSizeAttr>(S, D, AL);
break;
case ParsedAttr::AT_OpenCLIntelReqdSubGroupSize:
- handleSubGroupSize(S, D, AL);
+ S.OpenCL().handleSubGroupSize(D, AL);
break;
case ParsedAttr::AT_VecTypeHint:
handleVecTypeHint(S, D, AL);
@@ -9485,18 +6712,18 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_Unavailable:
handleAttrWithMessage<UnavailableAttr>(S, D, AL);
break;
- case ParsedAttr::AT_Assumption:
- handleAssumumptionAttr(S, D, AL);
+ case ParsedAttr::AT_OMPAssume:
+ S.OpenMP().handleOMPAssumeAttr(D, AL);
break;
case ParsedAttr::AT_ObjCDirect:
- handleObjCDirectAttr(S, D, AL);
+ S.ObjC().handleDirectAttr(D, AL);
break;
case ParsedAttr::AT_ObjCDirectMembers:
- handleObjCDirectMembersAttr(S, D, AL);
+ S.ObjC().handleDirectMembersAttr(D, AL);
handleSimpleAttribute<ObjCDirectMembersAttr>(S, D, AL);
break;
case ParsedAttr::AT_ObjCExplicitProtocolImpl:
- handleObjCSuppresProtocolAttr(S, D, AL);
+ S.ObjC().handleSuppresProtocolAttr(D, AL);
break;
case ParsedAttr::AT_Unused:
handleUnusedAttr(S, D, AL);
@@ -9520,16 +6747,16 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleTransparentUnionAttr(S, D, AL);
break;
case ParsedAttr::AT_ObjCMethodFamily:
- handleObjCMethodFamilyAttr(S, D, AL);
+ S.ObjC().handleMethodFamilyAttr(D, AL);
break;
case ParsedAttr::AT_ObjCNSObject:
- handleObjCNSObject(S, D, AL);
+ S.ObjC().handleNSObject(D, AL);
break;
case ParsedAttr::AT_ObjCIndependentClass:
- handleObjCIndependentClass(S, D, AL);
+ S.ObjC().handleIndependentClass(D, AL);
break;
case ParsedAttr::AT_Blocks:
- handleBlocksAttr(S, D, AL);
+ S.ObjC().handleBlocksAttr(D, AL);
break;
case ParsedAttr::AT_Sentinel:
handleSentinelAttr(S, D, AL);
@@ -9541,7 +6768,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleNoDebugAttr(S, D, AL);
break;
case ParsedAttr::AT_CmseNSEntry:
- handleCmseNSEntryAttr(S, D, AL);
+ S.ARM().handleCmseNSEntryAttr(D, AL);
break;
case ParsedAttr::AT_StdCall:
case ParsedAttr::AT_CDecl:
@@ -9562,6 +6789,8 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_AArch64SVEPcs:
case ParsedAttr::AT_AMDGPUKernelCall:
case ParsedAttr::AT_M68kRTD:
+ case ParsedAttr::AT_PreserveNone:
+ case ParsedAttr::AT_RISCVVectorCC:
handleCallConvAttr(S, D, AL);
break;
case ParsedAttr::AT_Suppress:
@@ -9572,22 +6801,22 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleLifetimeCategoryAttr(S, D, AL);
break;
case ParsedAttr::AT_OpenCLAccess:
- handleOpenCLAccessAttr(S, D, AL);
+ S.OpenCL().handleAccessAttr(D, AL);
break;
case ParsedAttr::AT_OpenCLNoSVM:
- handleOpenCLNoSVMAttr(S, D, AL);
+ S.OpenCL().handleNoSVMAttr(D, AL);
break;
case ParsedAttr::AT_SwiftContext:
- S.AddParameterABIAttr(D, AL, ParameterABI::SwiftContext);
+ S.Swift().AddParameterABIAttr(D, AL, ParameterABI::SwiftContext);
break;
case ParsedAttr::AT_SwiftAsyncContext:
- S.AddParameterABIAttr(D, AL, ParameterABI::SwiftAsyncContext);
+ S.Swift().AddParameterABIAttr(D, AL, ParameterABI::SwiftAsyncContext);
break;
case ParsedAttr::AT_SwiftErrorResult:
- S.AddParameterABIAttr(D, AL, ParameterABI::SwiftErrorResult);
+ S.Swift().AddParameterABIAttr(D, AL, ParameterABI::SwiftErrorResult);
break;
case ParsedAttr::AT_SwiftIndirectResult:
- S.AddParameterABIAttr(D, AL, ParameterABI::SwiftIndirectResult);
+ S.Swift().AddParameterABIAttr(D, AL, ParameterABI::SwiftIndirectResult);
break;
case ParsedAttr::AT_InternalLinkage:
handleInternalLinkageAttr(S, D, AL);
@@ -9610,7 +6839,10 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
break;
case ParsedAttr::AT_CountedBy:
- handleCountedByAttr(S, D, AL);
+ case ParsedAttr::AT_CountedByOrNull:
+ case ParsedAttr::AT_SizedBy:
+ case ParsedAttr::AT_SizedByOrNull:
+ handleCountedByAttrField(S, D, AL);
break;
// Microsoft attributes:
@@ -9629,25 +6861,34 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_MSConstexpr:
handleMSConstexprAttr(S, D, AL);
break;
+ case ParsedAttr::AT_HybridPatchable:
+ handleSimpleAttribute<HybridPatchableAttr>(S, D, AL);
+ break;
// HLSL attributes:
case ParsedAttr::AT_HLSLNumThreads:
- handleHLSLNumThreadsAttr(S, D, AL);
+ S.HLSL().handleNumThreadsAttr(D, AL);
break;
case ParsedAttr::AT_HLSLSV_GroupIndex:
handleSimpleAttribute<HLSLSV_GroupIndexAttr>(S, D, AL);
break;
case ParsedAttr::AT_HLSLSV_DispatchThreadID:
- handleHLSLSV_DispatchThreadIDAttr(S, D, AL);
+ S.HLSL().handleSV_DispatchThreadIDAttr(D, AL);
+ break;
+ case ParsedAttr::AT_HLSLPackOffset:
+ S.HLSL().handlePackOffsetAttr(D, AL);
break;
case ParsedAttr::AT_HLSLShader:
- handleHLSLShaderAttr(S, D, AL);
+ S.HLSL().handleShaderAttr(D, AL);
break;
case ParsedAttr::AT_HLSLResourceBinding:
- handleHLSLResourceBindingAttr(S, D, AL);
+ S.HLSL().handleResourceBindingAttr(D, AL);
+ break;
+ case ParsedAttr::AT_HLSLResourceClass:
+ S.HLSL().handleResourceClassAttr(D, AL);
break;
case ParsedAttr::AT_HLSLParamModifier:
- handleHLSLParamModifierAttr(S, D, AL);
+ S.HLSL().handleParamModifierAttr(D, AL);
break;
case ParsedAttr::AT_AbiTag:
@@ -9750,28 +6991,28 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
// Swift attributes.
case ParsedAttr::AT_SwiftAsyncName:
- handleSwiftAsyncName(S, D, AL);
+ S.Swift().handleAsyncName(D, AL);
break;
case ParsedAttr::AT_SwiftAttr:
- handleSwiftAttrAttr(S, D, AL);
+ S.Swift().handleAttrAttr(D, AL);
break;
case ParsedAttr::AT_SwiftBridge:
- handleSwiftBridge(S, D, AL);
+ S.Swift().handleBridge(D, AL);
break;
case ParsedAttr::AT_SwiftError:
- handleSwiftError(S, D, AL);
+ S.Swift().handleError(D, AL);
break;
case ParsedAttr::AT_SwiftName:
- handleSwiftName(S, D, AL);
+ S.Swift().handleName(D, AL);
break;
case ParsedAttr::AT_SwiftNewType:
- handleSwiftNewType(S, D, AL);
+ S.Swift().handleNewType(D, AL);
break;
case ParsedAttr::AT_SwiftAsync:
- handleSwiftAsyncAttr(S, D, AL);
+ S.Swift().handleAsyncAttr(D, AL);
break;
case ParsedAttr::AT_SwiftAsyncError:
- handleSwiftAsyncError(S, D, AL);
+ S.Swift().handleAsyncError(D, AL);
break;
// XRay attributes.
@@ -9793,7 +7034,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
break;
case ParsedAttr::AT_ObjCExternallyRetained:
- handleObjCExternallyRetainedAttr(S, D, AL);
+ S.ObjC().handleExternallyRetainedAttr(D, AL);
break;
case ParsedAttr::AT_MIGServerRoutine:
@@ -9805,7 +7046,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
break;
case ParsedAttr::AT_ArmBuiltinAlias:
- handleArmBuiltinAliasAttr(S, D, AL);
+ S.ARM().handleBuiltinAliasAttr(D, AL);
break;
case ParsedAttr::AT_ArmLocallyStreaming:
@@ -9813,7 +7054,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
break;
case ParsedAttr::AT_ArmNew:
- handleArmNewAttr(S, D, AL);
+ S.ARM().handleNewAttr(D, AL);
break;
case ParsedAttr::AT_AcquireHandle:
@@ -9851,11 +7092,17 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_UsingIfExists:
handleSimpleAttribute<UsingIfExistsAttr>(S, D, AL);
break;
+
+ case ParsedAttr::AT_TypeNullable:
+ handleNullableTypeAttr(S, D, AL);
+ break;
+
+ case ParsedAttr::AT_VTablePointerAuthentication:
+ handleVTablePointerAuthentication(S, D, AL);
+ break;
}
}
-/// ProcessDeclAttributeList - Apply all the decl attributes in the specified
-/// attribute list to the specified decl, ignoring any type attributes.
void Sema::ProcessDeclAttributeList(
Scope *S, Decl *D, const ParsedAttributesView &AttrList,
const ProcessDeclAttributeOptions &Options) {
@@ -9929,8 +7176,6 @@ void Sema::ProcessDeclAttributeList(
}
}
-// Helper for delayed processing TransparentUnion or BPFPreserveAccessIndexAttr
-// attribute.
void Sema::ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList) {
for (const ParsedAttr &AL : AttrList)
@@ -9942,11 +7187,9 @@ void Sema::ProcessDeclAttributeDelayed(Decl *D,
// For BPFPreserveAccessIndexAttr, we want to populate the attributes
// to fields and inner records as well.
if (D && D->hasAttr<BPFPreserveAccessIndexAttr>())
- handleBPFPreserveAIRecord(*this, cast<RecordDecl>(D));
+ BPF().handlePreserveAIRecord(cast<RecordDecl>(D));
}
-// Annotation attributes are the only attributes allowed after an access
-// specifier.
bool Sema::ProcessAccessDeclAttributeList(
AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList) {
for (const ParsedAttr &AL : AttrList) {
@@ -9981,9 +7224,6 @@ static void checkUnusedDeclAttributes(Sema &S, const ParsedAttributesView &A) {
}
}
-/// checkUnusedDeclAttributes - Given a declarator which is not being
-/// used to build a declaration, complain about any decl attributes
-/// which might be lying around on it.
void Sema::checkUnusedDeclAttributes(Declarator &D) {
::checkUnusedDeclAttributes(*this, D.getDeclarationAttributes());
::checkUnusedDeclAttributes(*this, D.getDeclSpec().getAttributes());
@@ -9992,8 +7232,6 @@ void Sema::checkUnusedDeclAttributes(Declarator &D) {
::checkUnusedDeclAttributes(*this, D.getTypeObject(i).getAttrs());
}
-/// DeclClonePragmaWeak - clone existing decl (maybe definition),
-/// \#pragma weak needs a non-definition decl and source may not have one.
NamedDecl *Sema::DeclClonePragmaWeak(NamedDecl *ND, const IdentifierInfo *II,
SourceLocation Loc) {
assert(isa<FunctionDecl>(ND) || isa<VarDecl>(ND));
@@ -10038,8 +7276,6 @@ NamedDecl *Sema::DeclClonePragmaWeak(NamedDecl *ND, const IdentifierInfo *II,
return NewD;
}
-/// DeclApplyPragmaWeak - A declaration (maybe definition) needs \#pragma weak
-/// applied to it, possibly with an alias.
void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, const WeakInfo &W) {
if (W.getAlias()) { // clone decl, impersonate __attribute(weak,alias(...))
IdentifierInfo *NDId = ND->getIdentifier();
@@ -10095,29 +7331,37 @@ void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
// Ordering of attributes can be important, so we take care to process
// attributes in the order in which they appeared in the source code.
+ auto ProcessAttributesWithSliding =
+ [&](const ParsedAttributesView &Src,
+ const ProcessDeclAttributeOptions &Options) {
+ ParsedAttributesView NonSlidingAttrs;
+ for (ParsedAttr &AL : Src) {
+ // FIXME: this sliding is specific to standard attributes and should
+ // eventually be deprecated and removed as those are not intended to
+ // slide to anything.
+ if ((AL.isStandardAttributeSyntax() || AL.isAlignas()) &&
+ AL.slidesFromDeclToDeclSpecLegacyBehavior()) {
+ // Skip processing the attribute, but do check if it appertains to
+ // the declaration. This is needed for the `MatrixType` attribute,
+ // which, despite being a type attribute, defines a `SubjectList`
+ // that only allows it to be used on typedef declarations.
+ AL.diagnoseAppertainsTo(*this, D);
+ } else {
+ NonSlidingAttrs.addAtEnd(&AL);
+ }
+ }
+ ProcessDeclAttributeList(S, D, NonSlidingAttrs, Options);
+ };
+
// First, process attributes that appeared on the declaration itself (but
// only if they don't have the legacy behavior of "sliding" to the DeclSepc).
- ParsedAttributesView NonSlidingAttrs;
- for (ParsedAttr &AL : PD.getDeclarationAttributes()) {
- if (AL.slidesFromDeclToDeclSpecLegacyBehavior()) {
- // Skip processing the attribute, but do check if it appertains to the
- // declaration. This is needed for the `MatrixType` attribute, which,
- // despite being a type attribute, defines a `SubjectList` that only
- // allows it to be used on typedef declarations.
- AL.diagnoseAppertainsTo(*this, D);
- } else {
- NonSlidingAttrs.addAtEnd(&AL);
- }
- }
- ProcessDeclAttributeList(S, D, NonSlidingAttrs);
+ ProcessAttributesWithSliding(PD.getDeclarationAttributes(), {});
// Apply decl attributes from the DeclSpec if present.
- if (!PD.getDeclSpec().getAttributes().empty()) {
- ProcessDeclAttributeList(S, D, PD.getDeclSpec().getAttributes(),
- ProcessDeclAttributeOptions()
- .WithIncludeCXX11Attributes(false)
- .WithIgnoreTypeAttributes(true));
- }
+ ProcessAttributesWithSliding(PD.getDeclSpec().getAttributes(),
+ ProcessDeclAttributeOptions()
+ .WithIncludeCXX11Attributes(false)
+ .WithIgnoreTypeAttributes(true));
// Walk the declarator structure, applying decl attributes that were in a type
// position to the decl itself. This handles cases like:
@@ -10135,6 +7379,9 @@ void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
// Apply additional attributes specified by '#pragma clang attribute'.
AddPragmaAttributes(S, D);
+
+ // Look for API notes that map to attributes.
+ ProcessAPINotes(D);
}
/// Is the given declaration allowed to use a forbidden type?
@@ -10255,9 +7502,6 @@ void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
} while ((pool = pool->getParent()));
}
-/// Given a set of delayed diagnostics, re-emit them as if they had
-/// been delayed in the current context instead of in the given pool.
-/// Essentially, this just moves them to the current pool.
void Sema::redelayDiagnostics(DelayedDiagnosticPool &pool) {
DelayedDiagnosticPool *curPool = DelayedDiagnostics.getCurrentPool();
assert(curPool && "re-emitting in undelayed context not supported");
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
index df5bd55e7c28..18262993af28 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
@@ -42,10 +42,14 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenMP.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -322,9 +326,6 @@ void Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
}
}
-/// ActOnParamDefaultArgument - Check whether the default argument
-/// provided for a function parameter is well-formed. If so, attach it
-/// to the parameter declaration.
void
Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
Expr *DefaultArg) {
@@ -370,10 +371,6 @@ Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
SetParamDefaultArgument(Param, DefaultArg, EqualLoc);
}
-/// ActOnParamUnparsedDefaultArgument - We've seen a default
-/// argument for a function parameter, but we can't parse it yet
-/// because we're inside a class definition. Note that this default
-/// argument will be parsed later.
void Sema::ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc) {
@@ -385,8 +382,6 @@ void Sema::ActOnParamUnparsedDefaultArgument(Decl *param,
UnparsedDefaultArgLocs[Param] = ArgLoc;
}
-/// ActOnParamDefaultArgumentError - Parsing or semantic analysis of
-/// the default argument for the parameter param failed.
void Sema::ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc,
Expr *DefaultArg) {
if (!param)
@@ -406,11 +401,6 @@ void Sema::ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc,
Param->setDefaultArg(RE.get());
}
-/// CheckExtraCXXDefaultArguments - Check for any extra default
-/// arguments in the declarator, which is not a function declaration
-/// or definition and therefore is not permitted to have default
-/// arguments. This routine should be invoked for every declarator
-/// that is not a function declaration or definition.
void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
// C++ [dcl.fct.default]p3
// A default argument expression shall be specified only in the
@@ -462,10 +452,6 @@ static bool functionDeclHasDefaultArgument(const FunctionDecl *FD) {
});
}
-/// MergeCXXFunctionDecl - Merge two declarations of the same C++
-/// function, once we already know that they have the same
-/// type. Subroutine of MergeFunctionDecl. Returns true if there was an
-/// error, false otherwise.
bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
Scope *S) {
bool Invalid = false;
@@ -657,13 +643,13 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
// is ill-formed. This can only happen for constructors.
if (isa<CXXConstructorDecl>(New) &&
New->getMinRequiredArguments() < Old->getMinRequiredArguments()) {
- CXXSpecialMember NewSM = getSpecialMember(cast<CXXMethodDecl>(New)),
- OldSM = getSpecialMember(cast<CXXMethodDecl>(Old));
+ CXXSpecialMemberKind NewSM = getSpecialMember(cast<CXXMethodDecl>(New)),
+ OldSM = getSpecialMember(cast<CXXMethodDecl>(Old));
if (NewSM != OldSM) {
ParmVarDecl *NewParam = New->getParamDecl(New->getMinRequiredArguments());
assert(NewParam->hasDefaultArg());
Diag(NewParam->getLocation(), diag::err_default_arg_makes_ctor_special)
- << NewParam->getDefaultArgRange() << NewSM;
+ << NewParam->getDefaultArgRange() << llvm::to_underlying(NewSM);
Diag(Old->getLocation(), diag::note_previous_declaration);
}
}
@@ -893,7 +879,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
assert(VarName && "Cannot have an unnamed binding declaration");
LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
- ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
LookupName(Previous, S,
/*CreateBuiltins*/DC->getRedeclContext()->isTranslationUnit());
@@ -907,6 +893,8 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
auto *BD = BindingDecl::Create(Context, DC, B.NameLoc, VarName);
+ ProcessDeclAttributeList(S, BD, *B.Attrs);
+
// Find the shadowed declaration before filtering for scope.
NamedDecl *ShadowedDecl = D.getCXXScopeSpec().isEmpty()
? getShadowedDeclaration(BD, Previous)
@@ -948,7 +936,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
DeclarationNameInfo NameInfo((IdentifierInfo *)nullptr,
Decomp.getLSquareLoc());
LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
- ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
// Build the variable that holds the non-decomposed object.
bool AddToScope = true;
@@ -960,8 +948,8 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
CurContext->addHiddenDecl(New);
}
- if (isInOpenMPDeclareTargetContext())
- checkDeclIsAllowedInOpenMPTarget(nullptr, New);
+ if (OpenMP().isInOpenMPDeclareTargetContext())
+ OpenMP().checkDeclIsAllowedInOpenMPTarget(nullptr, New);
return New;
}
@@ -1299,9 +1287,9 @@ static bool checkTupleLikeDecomposition(Sema &S,
// in the associated namespaces.
Expr *Get = UnresolvedLookupExpr::Create(
S.Context, nullptr, NestedNameSpecifierLoc(), SourceLocation(),
- DeclarationNameInfo(GetDN, Loc), /*RequiresADL*/ true, &Args,
+ DeclarationNameInfo(GetDN, Loc), /*RequiresADL=*/true, &Args,
UnresolvedSetIterator(), UnresolvedSetIterator(),
- /*KnownDependent=*/false);
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false);
Expr *Arg = E.get();
E = S.BuildCallExpr(nullptr, Get, Loc, Arg, Loc);
@@ -1450,7 +1438,7 @@ static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
auto DiagnoseBadNumberOfBindings = [&]() -> bool {
unsigned NumFields = llvm::count_if(
- RD->fields(), [](FieldDecl *FD) { return !FD->isUnnamedBitfield(); });
+ RD->fields(), [](FieldDecl *FD) { return !FD->isUnnamedBitField(); });
assert(Bindings.size() != NumFields);
S.Diag(Src->getLocation(), diag::err_decomp_decl_wrong_number_bindings)
<< DecompType << (unsigned)Bindings.size() << NumFields << NumFields
@@ -1463,7 +1451,7 @@ static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
// E shall not have an anonymous union member, ...
unsigned I = 0;
for (auto *FD : RD->fields()) {
- if (FD->isUnnamedBitfield())
+ if (FD->isUnnamedBitField())
continue;
// All the non-static data members are required to be nameable, so they
@@ -1601,11 +1589,6 @@ void Sema::CheckCompleteDecompositionDeclaration(DecompositionDecl *DD) {
DD->setInvalidDecl();
}
-/// Merge the exception specifications of two variable declarations.
-///
-/// This is called when there's a redeclaration of a VarDecl. The function
-/// checks if the redeclaration might have an exception specification and
-/// validates compatibility and merges the specs if necessary.
void Sema::MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old) {
// Shortcut if exceptions are disabled.
if (!getLangOpts().CXXExceptions)
@@ -1647,9 +1630,6 @@ void Sema::MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old) {
/// function declaration are well-formed according to C++
/// [dcl.fct.default].
void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
- unsigned NumParams = FD->getNumParams();
- unsigned ParamIdx = 0;
-
// This checking doesn't make sense for explicit specializations; their
// default arguments are determined by the declaration we're specializing,
// not by FD.
@@ -1659,6 +1639,9 @@ void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
if (FTD->isMemberSpecialization())
return;
+ unsigned NumParams = FD->getNumParams();
+ unsigned ParamIdx = 0;
+
// Find first parameter with a default argument
for (; ParamIdx < NumParams; ++ParamIdx) {
ParmVarDecl *Param = FD->getParamDecl(ParamIdx);
@@ -1671,21 +1654,19 @@ void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
// with a default argument shall have a default argument supplied in this or
// a previous declaration, unless the parameter was expanded from a
// parameter pack, or shall be a function parameter pack.
- for (; ParamIdx < NumParams; ++ParamIdx) {
+ for (++ParamIdx; ParamIdx < NumParams; ++ParamIdx) {
ParmVarDecl *Param = FD->getParamDecl(ParamIdx);
- if (!Param->hasDefaultArg() && !Param->isParameterPack() &&
- !(CurrentInstantiationScope &&
- CurrentInstantiationScope->isLocalPackExpansion(Param))) {
- if (Param->isInvalidDecl())
- /* We already complained about this parameter. */;
- else if (Param->getIdentifier())
- Diag(Param->getLocation(),
- diag::err_param_default_argument_missing_name)
+ if (Param->hasDefaultArg() || Param->isParameterPack() ||
+ (CurrentInstantiationScope &&
+ CurrentInstantiationScope->isLocalPackExpansion(Param)))
+ continue;
+ if (Param->isInvalidDecl())
+ /* We already complained about this parameter. */;
+ else if (Param->getIdentifier())
+ Diag(Param->getLocation(), diag::err_param_default_argument_missing_name)
<< Param->getIdentifier();
- else
- Diag(Param->getLocation(),
- diag::err_param_default_argument_missing);
- }
+ else
+ Diag(Param->getLocation(), diag::err_param_default_argument_missing);
}
}
@@ -1715,6 +1696,8 @@ static bool CheckLiteralType(Sema &SemaRef, Sema::CheckConstexprKind Kind,
static bool CheckConstexprDestructorSubobjects(Sema &SemaRef,
const CXXDestructorDecl *DD,
Sema::CheckConstexprKind Kind) {
+ assert(!SemaRef.getLangOpts().CPlusPlus23 &&
+ "this check is obsolete for C++23");
auto Check = [&](SourceLocation Loc, QualType T, const FieldDecl *FD) {
const CXXRecordDecl *RD =
T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
@@ -1746,6 +1729,8 @@ static bool CheckConstexprDestructorSubobjects(Sema &SemaRef,
static bool CheckConstexprParameterTypes(Sema &SemaRef,
const FunctionDecl *FD,
Sema::CheckConstexprKind Kind) {
+ assert(!SemaRef.getLangOpts().CPlusPlus23 &&
+ "this check is obsolete for C++23");
unsigned ArgIndex = 0;
const auto *FT = FD->getType()->castAs<FunctionProtoType>();
for (FunctionProtoType::param_type_iterator i = FT->param_type_begin(),
@@ -1767,6 +1752,8 @@ static bool CheckConstexprParameterTypes(Sema &SemaRef,
/// true. If not, produce a suitable diagnostic and return false.
static bool CheckConstexprReturnType(Sema &SemaRef, const FunctionDecl *FD,
Sema::CheckConstexprKind Kind) {
+ assert(!SemaRef.getLangOpts().CPlusPlus23 &&
+ "this check is obsolete for C++23");
if (CheckLiteralType(SemaRef, Kind, FD->getLocation(), FD->getReturnType(),
diag::err_constexpr_non_literal_return,
FD->isConsteval()))
@@ -1794,13 +1781,8 @@ static unsigned getRecordDiagFromTagKind(TagTypeKind Tag) {
static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
Stmt *Body,
Sema::CheckConstexprKind Kind);
+static bool CheckConstexprMissingReturn(Sema &SemaRef, const FunctionDecl *Dcl);
-// Check whether a function declaration satisfies the requirements of a
-// constexpr function definition or a constexpr constructor definition. If so,
-// return true. If not, produce appropriate diagnostics (unless asked not to by
-// Kind) and return false.
-//
-// This implements C++11 [dcl.constexpr]p3,4, as amended by DR1360.
bool Sema::CheckConstexprFunctionDefinition(const FunctionDecl *NewFD,
CheckConstexprKind Kind) {
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
@@ -1856,16 +1838,18 @@ bool Sema::CheckConstexprFunctionDefinition(const FunctionDecl *NewFD,
}
}
- // - its return type shall be a literal type;
- if (!CheckConstexprReturnType(*this, NewFD, Kind))
+ // - its return type shall be a literal type; (removed in C++23)
+ if (!getLangOpts().CPlusPlus23 &&
+ !CheckConstexprReturnType(*this, NewFD, Kind))
return false;
}
if (auto *Dtor = dyn_cast<CXXDestructorDecl>(NewFD)) {
// A destructor can be constexpr only if the defaulted destructor could be;
// we don't need to check the members and bases if we already know they all
- // have constexpr destructors.
- if (!Dtor->getParent()->defaultedDestructorIsConstexpr()) {
+ // have constexpr destructors. (removed in C++23)
+ if (!getLangOpts().CPlusPlus23 &&
+ !Dtor->getParent()->defaultedDestructorIsConstexpr()) {
if (Kind == CheckConstexprKind::CheckValid)
return false;
if (!CheckConstexprDestructorSubobjects(*this, Dtor, Kind))
@@ -1873,8 +1857,9 @@ bool Sema::CheckConstexprFunctionDefinition(const FunctionDecl *NewFD,
}
}
- // - each of its parameter types shall be a literal type;
- if (!CheckConstexprParameterTypes(*this, NewFD, Kind))
+ // - each of its parameter types shall be a literal type; (removed in C++23)
+ if (!getLangOpts().CPlusPlus23 &&
+ !CheckConstexprParameterTypes(*this, NewFD, Kind))
return false;
Stmt *Body = NewFD->getBody();
@@ -2055,7 +2040,7 @@ static bool CheckConstexprCtorInitializer(Sema &SemaRef,
if (Field->isInvalidDecl())
return true;
- if (Field->isUnnamedBitfield())
+ if (Field->isUnnamedBitField())
return true;
// Anonymous unions with no variant members and empty anonymous structs do not
@@ -2396,20 +2381,9 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
}
} else {
if (ReturnStmts.empty()) {
- // C++1y doesn't require constexpr functions to contain a 'return'
- // statement. We still do, unless the return type might be void, because
- // otherwise if there's no return statement, the function cannot
- // be used in a core constant expression.
- bool OK = SemaRef.getLangOpts().CPlusPlus14 &&
- (Dcl->getReturnType()->isVoidType() ||
- Dcl->getReturnType()->isDependentType());
switch (Kind) {
case Sema::CheckConstexprKind::Diagnose:
- SemaRef.Diag(Dcl->getLocation(),
- OK ? diag::warn_cxx11_compat_constexpr_body_no_return
- : diag::err_constexpr_body_no_return)
- << Dcl->isConsteval();
- if (!OK)
+ if (!CheckConstexprMissingReturn(SemaRef, Dcl))
return false;
break;
@@ -2454,9 +2428,17 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
// base class sub-objects shall be a constexpr constructor.
//
// Note that this rule is distinct from the "requirements for a constexpr
- // function", so is not checked in CheckValid mode.
+ // function", so is not checked in CheckValid mode. Because the check for
+ // constexpr potential is expensive, skip the check if the diagnostic is
+ // disabled, the function is declared in a system header, or we're in C++23
+ // or later mode (see https://wg21.link/P2448).
+ bool SkipCheck =
+ !SemaRef.getLangOpts().CheckConstexprFunctionBodies ||
+ SemaRef.getSourceManager().isInSystemHeader(Dcl->getLocation()) ||
+ SemaRef.getDiagnostics().isIgnored(
+ diag::ext_constexpr_function_never_constant_expr, Dcl->getLocation());
SmallVector<PartialDiagnosticAt, 8> Diags;
- if (Kind == Sema::CheckConstexprKind::Diagnose &&
+ if (Kind == Sema::CheckConstexprKind::Diagnose && !SkipCheck &&
!Expr::isPotentialConstantExpr(Dcl, Diags)) {
SemaRef.Diag(Dcl->getLocation(),
diag::ext_constexpr_function_never_constant_expr)
@@ -2471,6 +2453,28 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
return true;
}
+static bool CheckConstexprMissingReturn(Sema &SemaRef,
+ const FunctionDecl *Dcl) {
+ bool IsVoidOrDependentType = Dcl->getReturnType()->isVoidType() ||
+ Dcl->getReturnType()->isDependentType();
+ // Skip emitting a missing return error diagnostic for non-void functions
+ // since C++23 no longer mandates constexpr functions to yield constant
+ // expressions.
+ if (SemaRef.getLangOpts().CPlusPlus23 && !IsVoidOrDependentType)
+ return true;
+
+ // C++14 doesn't require constexpr functions to contain a 'return'
+ // statement. We still do, unless the return type might be void, because
+ // otherwise if there's no return statement, the function cannot
+ // be used in a core constant expression.
+ bool OK = SemaRef.getLangOpts().CPlusPlus14 && IsVoidOrDependentType;
+ SemaRef.Diag(Dcl->getLocation(),
+ OK ? diag::warn_cxx11_compat_constexpr_body_no_return
+ : diag::err_constexpr_body_no_return)
+ << Dcl->isConsteval();
+ return OK;
+}
+
bool Sema::CheckImmediateEscalatingFunctionDefinition(
FunctionDecl *FD, const sema::FunctionScopeInfo *FSI) {
if (!getLangOpts().CPlusPlus20 || !FD->isImmediateEscalating())
@@ -2582,14 +2586,6 @@ void Sema::DiagnoseImmediateEscalatingReason(FunctionDecl *FD) {
Visitor.TraverseDecl(FD);
}
-/// Get the class that is directly named by the current context. This is the
-/// class for which an unqualified-id in this scope could name a constructor
-/// or destructor.
-///
-/// If the scope specifier denotes a class, this will be that class.
-/// If the scope specifier is empty, this will be the class whose
-/// member-specification we are currently within. Otherwise, there
-/// is no such class.
CXXRecordDecl *Sema::getCurrentClass(Scope *, const CXXScopeSpec *SS) {
assert(getLangOpts().CPlusPlus && "No class names in C!");
@@ -2604,19 +2600,12 @@ CXXRecordDecl *Sema::getCurrentClass(Scope *, const CXXScopeSpec *SS) {
return dyn_cast_or_null<CXXRecordDecl>(CurContext);
}
-/// isCurrentClassName - Determine whether the identifier II is the
-/// name of the class type currently being defined. In the case of
-/// nested classes, this will only return true if II is the name of
-/// the innermost class.
bool Sema::isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS) {
CXXRecordDecl *CurDecl = getCurrentClass(S, SS);
return CurDecl && &II == CurDecl->getIdentifier();
}
-/// Determine whether the identifier II is a typo for the name of
-/// the class type currently being defined. If so, update it to the identifier
-/// that should have been used.
bool Sema::isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS) {
assert(getLangOpts().CPlusPlus && "No class names in C!");
@@ -2640,188 +2629,118 @@ bool Sema::isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS) {
return false;
}
-/// Determine whether the given class is a base class of the given
-/// class, including looking at dependent bases.
-static bool findCircularInheritance(const CXXRecordDecl *Class,
- const CXXRecordDecl *Current) {
- SmallVector<const CXXRecordDecl*, 8> Queue;
-
- Class = Class->getCanonicalDecl();
- while (true) {
- for (const auto &I : Current->bases()) {
- CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl();
- if (!Base)
- continue;
-
- Base = Base->getDefinition();
- if (!Base)
- continue;
-
- if (Base->getCanonicalDecl() == Class)
- return true;
-
- Queue.push_back(Base);
- }
-
- if (Queue.empty())
- return false;
-
- Current = Queue.pop_back_val();
- }
-
- return false;
-}
-
-/// Check the validity of a C++ base class specifier.
-///
-/// \returns a new CXXBaseSpecifier if well-formed, emits diagnostics
-/// and returns NULL otherwise.
-CXXBaseSpecifier *
-Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
- SourceRange SpecifierRange,
- bool Virtual, AccessSpecifier Access,
- TypeSourceInfo *TInfo,
- SourceLocation EllipsisLoc) {
- // In HLSL, unspecified class access is public rather than private.
- if (getLangOpts().HLSL && Class->getTagKind() == TagTypeKind::Class &&
- Access == AS_none)
- Access = AS_public;
-
+CXXBaseSpecifier *Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
+ SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ TypeSourceInfo *TInfo,
+ SourceLocation EllipsisLoc) {
QualType BaseType = TInfo->getType();
+ SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc();
if (BaseType->containsErrors()) {
// Already emitted a diagnostic when parsing the error type.
return nullptr;
}
- // C++ [class.union]p1:
- // A union shall not have base classes.
- if (Class->isUnion()) {
- Diag(Class->getLocation(), diag::err_base_clause_on_union)
- << SpecifierRange;
- return nullptr;
- }
- if (EllipsisLoc.isValid() &&
- !TInfo->getType()->containsUnexpandedParameterPack()) {
+ if (EllipsisLoc.isValid() && !BaseType->containsUnexpandedParameterPack()) {
Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
<< TInfo->getTypeLoc().getSourceRange();
EllipsisLoc = SourceLocation();
}
- SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc();
-
- if (BaseType->isDependentType()) {
- // Make sure that we don't have circular inheritance among our dependent
- // bases. For non-dependent bases, the check for completeness below handles
- // this.
- if (CXXRecordDecl *BaseDecl = BaseType->getAsCXXRecordDecl()) {
- if (BaseDecl->getCanonicalDecl() == Class->getCanonicalDecl() ||
- ((BaseDecl = BaseDecl->getDefinition()) &&
- findCircularInheritance(Class, BaseDecl))) {
- Diag(BaseLoc, diag::err_circular_inheritance)
- << BaseType << Context.getTypeDeclType(Class);
-
- if (BaseDecl->getCanonicalDecl() != Class->getCanonicalDecl())
- Diag(BaseDecl->getLocation(), diag::note_previous_decl)
- << BaseType;
+ auto *BaseDecl =
+ dyn_cast_if_present<CXXRecordDecl>(computeDeclContext(BaseType));
+ // C++ [class.derived.general]p2:
+ // A class-or-decltype shall denote a (possibly cv-qualified) class type
+ // that is not an incompletely defined class; any cv-qualifiers are
+ // ignored.
+ if (BaseDecl) {
+ // C++ [class.union.general]p4:
+ // [...] A union shall not be used as a base class.
+ if (BaseDecl->isUnion()) {
+ Diag(BaseLoc, diag::err_union_as_base_class) << SpecifierRange;
+ return nullptr;
+ }
- return nullptr;
+ // For the MS ABI, propagate DLL attributes to base class templates.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ Context.getTargetInfo().getTriple().isPS()) {
+ if (Attr *ClassAttr = getDLLAttr(Class)) {
+ if (auto *BaseSpec =
+ dyn_cast<ClassTemplateSpecializationDecl>(BaseDecl)) {
+ propagateDLLAttrToBaseClassTemplate(Class, ClassAttr, BaseSpec,
+ BaseLoc);
+ }
}
}
+ if (RequireCompleteType(BaseLoc, BaseType, diag::err_incomplete_base_class,
+ SpecifierRange)) {
+ Class->setInvalidDecl();
+ return nullptr;
+ }
+
+ BaseDecl = BaseDecl->getDefinition();
+ assert(BaseDecl && "Base type is not incomplete, but has no definition");
+
+ // Microsoft docs say:
+ // "If a base-class has a code_seg attribute, derived classes must have the
+ // same attribute."
+ const auto *BaseCSA = BaseDecl->getAttr<CodeSegAttr>();
+ const auto *DerivedCSA = Class->getAttr<CodeSegAttr>();
+ if ((DerivedCSA || BaseCSA) &&
+ (!BaseCSA || !DerivedCSA ||
+ BaseCSA->getName() != DerivedCSA->getName())) {
+ Diag(Class->getLocation(), diag::err_mismatched_code_seg_base);
+ Diag(BaseDecl->getLocation(), diag::note_base_class_specified_here)
+ << BaseDecl;
+ return nullptr;
+ }
+
+ // A class which contains a flexible array member is not suitable for use as
+ // a base class:
+ // - If the layout determines that a base comes before another base,
+ // the flexible array member would index into the subsequent base.
+ // - If the layout determines that base comes before the derived class,
+ // the flexible array member would index into the derived class.
+ if (BaseDecl->hasFlexibleArrayMember()) {
+ Diag(BaseLoc, diag::err_base_class_has_flexible_array_member)
+ << BaseDecl->getDeclName();
+ return nullptr;
+ }
+
+ // C++ [class]p3:
+ // If a class is marked final and it appears as a base-type-specifier in
+ // base-clause, the program is ill-formed.
+ if (FinalAttr *FA = BaseDecl->getAttr<FinalAttr>()) {
+ Diag(BaseLoc, diag::err_class_marked_final_used_as_base)
+ << BaseDecl->getDeclName() << FA->isSpelledAsSealed();
+ Diag(BaseDecl->getLocation(), diag::note_entity_declared_at)
+ << BaseDecl->getDeclName() << FA->getRange();
+ return nullptr;
+ }
+
+ // If the base class is invalid the derived class is as well.
+ if (BaseDecl->isInvalidDecl())
+ Class->setInvalidDecl();
+ } else if (BaseType->isDependentType()) {
// Make sure that we don't make an ill-formed AST where the type of the
// Class is non-dependent and its attached base class specifier is an
// dependent type, which violates invariants in many clang code paths (e.g.
// constexpr evaluator). If this case happens (in errory-recovery mode), we
// explicitly mark the Class decl invalid. The diagnostic was already
// emitted.
- if (!Class->getTypeForDecl()->isDependentType())
+ if (!Class->isDependentContext())
Class->setInvalidDecl();
- return new (Context) CXXBaseSpecifier(
- SpecifierRange, Virtual, Class->getTagKind() == TagTypeKind::Class,
- Access, TInfo, EllipsisLoc);
- }
-
- // Base specifiers must be record types.
- if (!BaseType->isRecordType()) {
+ } else {
+ // The base class is some non-dependent non-class type.
Diag(BaseLoc, diag::err_base_must_be_class) << SpecifierRange;
return nullptr;
}
- // C++ [class.union]p1:
- // A union shall not be used as a base class.
- if (BaseType->isUnionType()) {
- Diag(BaseLoc, diag::err_union_as_base_class) << SpecifierRange;
- return nullptr;
- }
-
- // For the MS ABI, propagate DLL attributes to base class templates.
- if (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
- Context.getTargetInfo().getTriple().isPS()) {
- if (Attr *ClassAttr = getDLLAttr(Class)) {
- if (auto *BaseTemplate = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
- BaseType->getAsCXXRecordDecl())) {
- propagateDLLAttrToBaseClassTemplate(Class, ClassAttr, BaseTemplate,
- BaseLoc);
- }
- }
- }
-
- // C++ [class.derived]p2:
- // The class-name in a base-specifier shall not be an incompletely
- // defined class.
- if (RequireCompleteType(BaseLoc, BaseType,
- diag::err_incomplete_base_class, SpecifierRange)) {
- Class->setInvalidDecl();
- return nullptr;
- }
-
- // If the base class is polymorphic or isn't empty, the new one is/isn't, too.
- RecordDecl *BaseDecl = BaseType->castAs<RecordType>()->getDecl();
- assert(BaseDecl && "Record type has no declaration");
- BaseDecl = BaseDecl->getDefinition();
- assert(BaseDecl && "Base type is not incomplete, but has no definition");
- CXXRecordDecl *CXXBaseDecl = cast<CXXRecordDecl>(BaseDecl);
- assert(CXXBaseDecl && "Base type is not a C++ type");
-
- // Microsoft docs say:
- // "If a base-class has a code_seg attribute, derived classes must have the
- // same attribute."
- const auto *BaseCSA = CXXBaseDecl->getAttr<CodeSegAttr>();
- const auto *DerivedCSA = Class->getAttr<CodeSegAttr>();
- if ((DerivedCSA || BaseCSA) &&
- (!BaseCSA || !DerivedCSA || BaseCSA->getName() != DerivedCSA->getName())) {
- Diag(Class->getLocation(), diag::err_mismatched_code_seg_base);
- Diag(CXXBaseDecl->getLocation(), diag::note_base_class_specified_here)
- << CXXBaseDecl;
- return nullptr;
- }
-
- // A class which contains a flexible array member is not suitable for use as a
- // base class:
- // - If the layout determines that a base comes before another base,
- // the flexible array member would index into the subsequent base.
- // - If the layout determines that base comes before the derived class,
- // the flexible array member would index into the derived class.
- if (CXXBaseDecl->hasFlexibleArrayMember()) {
- Diag(BaseLoc, diag::err_base_class_has_flexible_array_member)
- << CXXBaseDecl->getDeclName();
- return nullptr;
- }
-
- // C++ [class]p3:
- // If a class is marked final and it appears as a base-type-specifier in
- // base-clause, the program is ill-formed.
- if (FinalAttr *FA = CXXBaseDecl->getAttr<FinalAttr>()) {
- Diag(BaseLoc, diag::err_class_marked_final_used_as_base)
- << CXXBaseDecl->getDeclName()
- << FA->isSpelledAsSealed();
- Diag(CXXBaseDecl->getLocation(), diag::note_entity_declared_at)
- << CXXBaseDecl->getDeclName() << FA->getRange();
- return nullptr;
- }
-
- if (BaseDecl->isInvalidDecl())
- Class->setInvalidDecl();
+ // In HLSL, unspecified class access is public rather than private.
+ if (getLangOpts().HLSL && Class->getTagKind() == TagTypeKind::Class &&
+ Access == AS_none)
+ Access = AS_public;
// Create the base specifier.
return new (Context) CXXBaseSpecifier(
@@ -2829,11 +2748,6 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
Access, TInfo, EllipsisLoc);
}
-/// ActOnBaseSpecifier - Parsed a base specifier. A base specifier is
-/// one entry in the base class list of a class specifier, for
-/// example:
-/// class foo : public bar, virtual private baz {
-/// 'public bar' and 'virtual private baz' are each base-specifiers.
BaseResult Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
const ParsedAttributesView &Attributes,
bool Virtual, AccessSpecifier Access,
@@ -2871,13 +2785,20 @@ BaseResult Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
UPPC_BaseType))
return true;
+ // C++ [class.union.general]p4:
+ // [...] A union shall not have base classes.
+ if (Class->isUnion()) {
+ Diag(Class->getLocation(), diag::err_base_clause_on_union)
+ << SpecifierRange;
+ return true;
+ }
+
if (CXXBaseSpecifier *BaseSpec = CheckBaseSpecifier(Class, SpecifierRange,
Virtual, Access, TInfo,
EllipsisLoc))
return BaseSpec;
- else
- Class->setInvalidDecl();
+ Class->setInvalidDecl();
return true;
}
@@ -2906,8 +2827,6 @@ NoteIndirectBases(ASTContext &Context, IndirectBaseSet &Set,
}
}
-/// Performs the actual work of attaching the given base class
-/// specifiers to a C++ class.
bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases) {
if (Bases.empty())
@@ -3013,9 +2932,6 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
return Invalid;
}
-/// ActOnBaseSpecifiers - Attach the given base specifiers to the
-/// class, after checking whether there are any duplicate base
-/// classes.
void Sema::ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases) {
if (!ClassDecl || Bases.empty())
@@ -3025,8 +2941,6 @@ void Sema::ActOnBaseSpecifiers(Decl *ClassDecl,
AttachBaseSpecifiers(cast<CXXRecordDecl>(ClassDecl), Bases);
}
-/// Determine whether the type \p Derived is a C++ class that is
-/// derived from the type \p Base.
bool Sema::IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base) {
if (!getLangOpts().CPlusPlus)
return false;
@@ -3052,8 +2966,6 @@ bool Sema::IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base) {
return DerivedRD->isDerivedFrom(BaseRD);
}
-/// Determine whether the type \p Derived is a C++ class that is
-/// derived from the type \p Base.
bool Sema::IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths) {
if (!getLangOpts().CPlusPlus)
@@ -3098,18 +3010,7 @@ void Sema::BuildBasePathArray(const CXXBasePaths &Paths,
assert(Paths.isRecordingPaths() && "Must record paths!");
return ::BuildBasePathArray(Paths.front(), BasePathArray);
}
-/// CheckDerivedToBaseConversion - Check whether the Derived-to-Base
-/// conversion (where Derived and Base are class types) is
-/// well-formed, meaning that the conversion is unambiguous (and
-/// that all of the base classes are accessible). Returns true
-/// and emits a diagnostic if the code is ill-formed, returns false
-/// otherwise. Loc is the location where this routine should point to
-/// if there is an error, and Range is the source range to highlight
-/// if there is an error.
-///
-/// If either InaccessibleBaseID or AmbiguousBaseConvID are 0, then the
-/// diagnostic for the respective type of error will be suppressed, but the
-/// check for ill-formed code will still be performed.
+
bool
Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
@@ -3203,19 +3104,6 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
BasePath, IgnoreAccess);
}
-
-/// Builds a string representing ambiguous paths from a
-/// specific derived class to different subobjects of the same base
-/// class.
-///
-/// This function builds a string that can be used in error messages
-/// to show the different paths that one can take through the
-/// inheritance hierarchy to go from the derived class to different
-/// subobjects of a base class. The result looks something like this:
-/// @code
-/// struct D -> struct B -> struct A
-/// struct D -> struct C -> struct A
-/// @endcode
std::string Sema::getAmbiguousPathsDisplayString(CXXBasePaths &Paths) {
std::string PathDisplayStr;
std::set<unsigned> DisplayedPaths;
@@ -3239,7 +3127,6 @@ std::string Sema::getAmbiguousPathsDisplayString(CXXBasePaths &Paths) {
// C++ class member Handling
//===----------------------------------------------------------------------===//
-/// ActOnAccessSpecifier - Parsed an access specifier followed by a colon.
bool Sema::ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs) {
@@ -3250,7 +3137,6 @@ bool Sema::ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
return ProcessAccessDeclAttributeList(ASDecl, Attrs);
}
-/// CheckOverrideControl - Check C++11 override control semantics.
void Sema::CheckOverrideControl(NamedDecl *D) {
if (D->isInvalidDecl())
return;
@@ -3354,9 +3240,6 @@ void Sema::DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent) {
}
}
-/// CheckIfOverriddenFunctionIsMarkedFinal - Checks whether a virtual member
-/// function overrides a virtual member function marked 'final', according to
-/// C++11 [class.virtual]p4.
bool Sema::CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old) {
FinalAttr *FA = Old->getAttr<FinalAttr>();
@@ -3380,7 +3263,6 @@ static bool InitializationHasSideEffects(const FieldDecl &FD) {
return false;
}
-// Check if there is a field shadowing.
void Sema::CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
@@ -3431,11 +3313,6 @@ void Sema::CheckShadowInheritedFields(const SourceLocation &Loc,
}
}
-/// ActOnCXXMemberDeclarator - This is invoked when a C++ class member
-/// declarator is parsed. 'AS' is the access specifier, 'BW' specifies the
-/// bitfield width if there is one, 'InitExpr' specifies the initializer if
-/// one has been parsed, and 'InitStyle' is set if an in-class initializer is
-/// present (but parsing it has been deferred).
NamedDecl *
Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
@@ -3532,9 +3409,9 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
break;
}
- bool isInstField = ((DS.getStorageClassSpec() == DeclSpec::SCS_unspecified ||
- DS.getStorageClassSpec() == DeclSpec::SCS_mutable) &&
- !isFunc);
+ bool isInstField = (DS.getStorageClassSpec() == DeclSpec::SCS_unspecified ||
+ DS.getStorageClassSpec() == DeclSpec::SCS_mutable) &&
+ !isFunc && TemplateParameterLists.empty();
if (DS.hasConstexprSpecifier() && isInstField) {
SemaDiagnosticBuilder B =
@@ -3583,28 +3460,6 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
}
IdentifierInfo *II = Name.getAsIdentifierInfo();
-
- // Member field could not be with "template" keyword.
- // So TemplateParameterLists should be empty in this case.
- if (TemplateParameterLists.size()) {
- TemplateParameterList* TemplateParams = TemplateParameterLists[0];
- if (TemplateParams->size()) {
- // There is no such thing as a member field template.
- Diag(D.getIdentifierLoc(), diag::err_template_member)
- << II
- << SourceRange(TemplateParams->getTemplateLoc(),
- TemplateParams->getRAngleLoc());
- } else {
- // There is an extraneous 'template<>' for this member.
- Diag(TemplateParams->getTemplateLoc(),
- diag::err_template_member_noparams)
- << II
- << SourceRange(TemplateParams->getTemplateLoc(),
- TemplateParams->getRAngleLoc());
- }
- return nullptr;
- }
-
if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
Diag(D.getIdentifierLoc(), diag::err_member_with_template_arguments)
<< II
@@ -3621,14 +3476,18 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
// class X {
// int X::member;
// };
- if (DeclContext *DC = computeDeclContext(SS, false))
+ if (DeclContext *DC = computeDeclContext(SS, false)) {
+ TemplateIdAnnotation *TemplateId =
+ D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
+ ? D.getName().TemplateId
+ : nullptr;
diagnoseQualifiedDeclaration(SS, DC, Name, D.getIdentifierLoc(),
- D.getName().getKind() ==
- UnqualifiedIdKind::IK_TemplateId);
- else
+ TemplateId,
+ /*IsMemberSpecialization=*/false);
+ } else {
Diag(D.getIdentifierLoc(), diag::err_member_qualification)
<< Name << SS.getRange();
-
+ }
SS.clear();
}
@@ -4165,9 +4024,6 @@ namespace {
}
} // namespace
-/// Enter a new C++ default initializer scope. After calling this, the
-/// caller must call \ref ActOnFinishCXXInClassMemberInitializer, even if
-/// parsing or instantiating the initializer failed.
void Sema::ActOnStartCXXInClassMemberInitializer() {
// Create a synthetic function scope to represent the call to the constructor
// that notionally surrounds a use of this initializer.
@@ -4222,9 +4078,6 @@ ExprResult Sema::ConvertMemberDefaultInitExpression(FieldDecl *FD,
return Seq.Perform(*this, Entity, Kind, InitExpr);
}
-/// This is invoked after parsing an in-class initializer for a
-/// non-static C++ class member, and after instantiating an in-class initializer
-/// in a class template. Such actions are deferred until the class is complete.
void Sema::ActOnFinishCXXInClassMemberInitializer(Decl *D,
SourceLocation InitLoc,
Expr *InitExpr) {
@@ -4311,7 +4164,6 @@ static bool FindBaseInitializer(Sema &SemaRef,
return DirectBaseSpec || VirtualBaseSpec;
}
-/// Handle a C++ member initializer using braced-init-list syntax.
MemInitResult
Sema::ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
@@ -4327,7 +4179,6 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
EllipsisLoc);
}
-/// Handle a C++ member initializer using parentheses syntax.
MemInitResult
Sema::ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
@@ -4426,7 +4277,6 @@ ValueDecl *Sema::tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
return tryLookupUnambiguousFieldDecl(ClassDecl, MemberOrBase);
}
-/// Handle a C++ member initializer.
MemInitResult
Sema::BuildMemInitializer(Decl *ConstructorD,
Scope *S,
@@ -4494,9 +4344,13 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
} else if (DS.getTypeSpecType() == TST_decltype_auto) {
Diag(DS.getTypeSpecTypeLoc(), diag::err_decltype_auto_invalid);
return true;
+ } else if (DS.getTypeSpecType() == TST_typename_pack_indexing) {
+ BaseType =
+ BuildPackIndexingType(DS.getRepAsType().get(), DS.getPackIndexingExpr(),
+ DS.getBeginLoc(), DS.getEllipsisLoc());
} else {
LookupResult R(*this, MemberOrBase, IdLoc, LookupOrdinaryName);
- LookupParsedName(R, S, &SS);
+ LookupParsedName(R, S, &SS, /*ObjectType=*/QualType());
TypeDecl *TyD = R.getAsSingle<TypeDecl>();
if (!TyD) {
@@ -5264,7 +5118,7 @@ static bool isIncompleteOrZeroLengthArrayType(ASTContext &Context, QualType T) {
return true;
while (const ConstantArrayType *ArrayT = Context.getAsConstantArrayType(T)) {
- if (!ArrayT->getSize())
+ if (ArrayT->isZeroSize())
return true;
T = ArrayT->getElementType();
@@ -5488,7 +5342,7 @@ bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
// A declaration for a bit-field that omits the identifier declares an
// unnamed bit-field. Unnamed bit-fields are not members and cannot be
// initialized.
- if (F->isUnnamedBitfield())
+ if (F->isUnnamedBitField())
continue;
// If we're not generating the implicit copy/move constructor, then we'll
@@ -5617,7 +5471,7 @@ static void DiagnoseBaseOrMemInitializerOrder(
// 3. Direct fields.
for (auto *Field : ClassDecl->fields()) {
- if (Field->isUnnamedBitfield())
+ if (Field->isUnnamedBitField())
continue;
PopulateKeysForFields(Field, IdealInitKeys);
@@ -5772,7 +5626,6 @@ bool CheckRedundantUnionInit(Sema &S,
}
} // namespace
-/// ActOnMemInitializers - Handle the member initializers for a constructor.
void Sema::ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
@@ -5990,6 +5843,10 @@ void Sema::ActOnDefaultCtorInitializers(Decl *CDtorDecl) {
if (CXXConstructorDecl *Constructor
= dyn_cast<CXXConstructorDecl>(CDtorDecl)) {
+ if (CXXRecordDecl *ClassDecl = Constructor->getParent();
+ !ClassDecl || ClassDecl->isInvalidDecl()) {
+ return;
+ }
SetCtorInitializers(Constructor, /*AnyErrors=*/false);
DiagnoseUninitializedFields(*this, Constructor);
}
@@ -6519,7 +6376,6 @@ void Sema::checkClassLevelCodeSegAttribute(CXXRecordDecl *Class) {
}
}
-/// Check class-level dllimport/dllexport attribute.
void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
Attr *ClassAttr = getDLLAttr(Class);
@@ -6693,8 +6549,6 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
DelayedDllExportClasses.push_back(Class);
}
-/// Perform propagation of DLL attributes from a derived class to a
-/// templated base class for MS compatibility.
void Sema::propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc) {
@@ -6753,35 +6607,28 @@ void Sema::propagateDLLAttrToBaseClassTemplate(
}
}
-/// Determine the kind of defaulting that would be done for a given function.
-///
-/// If the function is both a default constructor and a copy / move constructor
-/// (due to having a default argument for the first parameter), this picks
-/// CXXDefaultConstructor.
-///
-/// FIXME: Check that case is properly handled by all callers.
Sema::DefaultedFunctionKind
Sema::getDefaultedFunctionKind(const FunctionDecl *FD) {
if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(FD)) {
if (Ctor->isDefaultConstructor())
- return Sema::CXXDefaultConstructor;
+ return CXXSpecialMemberKind::DefaultConstructor;
if (Ctor->isCopyConstructor())
- return Sema::CXXCopyConstructor;
+ return CXXSpecialMemberKind::CopyConstructor;
if (Ctor->isMoveConstructor())
- return Sema::CXXMoveConstructor;
+ return CXXSpecialMemberKind::MoveConstructor;
}
if (MD->isCopyAssignmentOperator())
- return Sema::CXXCopyAssignment;
+ return CXXSpecialMemberKind::CopyAssignment;
if (MD->isMoveAssignmentOperator())
- return Sema::CXXMoveAssignment;
+ return CXXSpecialMemberKind::MoveAssignment;
if (isa<CXXDestructorDecl>(FD))
- return Sema::CXXDestructor;
+ return CXXSpecialMemberKind::Destructor;
}
switch (FD->getDeclName().getCXXOverloadedOperator()) {
@@ -6821,26 +6668,26 @@ static void DefineDefaultedFunction(Sema &S, FunctionDecl *FD,
return S.DefineDefaultedComparison(DefaultLoc, FD, DFK.asComparison());
switch (DFK.asSpecialMember()) {
- case Sema::CXXDefaultConstructor:
+ case CXXSpecialMemberKind::DefaultConstructor:
S.DefineImplicitDefaultConstructor(DefaultLoc,
cast<CXXConstructorDecl>(FD));
break;
- case Sema::CXXCopyConstructor:
+ case CXXSpecialMemberKind::CopyConstructor:
S.DefineImplicitCopyConstructor(DefaultLoc, cast<CXXConstructorDecl>(FD));
break;
- case Sema::CXXCopyAssignment:
+ case CXXSpecialMemberKind::CopyAssignment:
S.DefineImplicitCopyAssignment(DefaultLoc, cast<CXXMethodDecl>(FD));
break;
- case Sema::CXXDestructor:
+ case CXXSpecialMemberKind::Destructor:
S.DefineImplicitDestructor(DefaultLoc, cast<CXXDestructorDecl>(FD));
break;
- case Sema::CXXMoveConstructor:
+ case CXXSpecialMemberKind::MoveConstructor:
S.DefineImplicitMoveConstructor(DefaultLoc, cast<CXXConstructorDecl>(FD));
break;
- case Sema::CXXMoveAssignment:
+ case CXXSpecialMemberKind::MoveAssignment:
S.DefineImplicitMoveAssignment(DefaultLoc, cast<CXXMethodDecl>(FD));
break;
- case Sema::CXXInvalid:
+ case CXXSpecialMemberKind::Invalid:
llvm_unreachable("Invalid special member.");
}
}
@@ -6981,13 +6828,6 @@ ReportOverrides(Sema &S, unsigned DiagID, const CXXMethodDecl *MD,
return IssuedDiagnostic;
}
-/// Perform semantic checks on a class definition that has been
-/// completing, introducing implicitly-declared members, checking for
-/// abstract types, etc.
-///
-/// \param S The scope in which the class was parsed. Null if we didn't just
-/// parse a class definition.
-/// \param Record The completed class.
void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
if (!Record)
return;
@@ -7005,7 +6845,7 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
!Record->isLambda()) {
bool Complained = false;
for (const auto *F : Record->fields()) {
- if (F->hasInClassInitializer() || F->isUnnamedBitfield())
+ if (F->hasInClassInitializer() || F->isUnnamedBitField())
continue;
if (F->getType()->isReferenceType() ||
@@ -7150,6 +6990,10 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
return false;
};
+ if (!Record->isInvalidDecl() &&
+ Record->hasAttr<VTablePointerAuthenticationAttr>())
+ checkIncorrectVTablePointerAuthenticationAttribute(*Record);
+
auto CompleteMemberFunction = [&](CXXMethodDecl *M) {
// Check whether the explicitly-defaulted members are valid.
bool Incomplete = CheckForDefaultedFunction(M);
@@ -7160,9 +7004,9 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
// For an explicitly defaulted or deleted special member, we defer
// determining triviality until the class is complete. That time is now!
- CXXSpecialMember CSM = getSpecialMember(M);
+ CXXSpecialMemberKind CSM = getSpecialMember(M);
if (!M->isImplicit() && !M->isUserProvided()) {
- if (CSM != CXXInvalid) {
+ if (CSM != CXXSpecialMemberKind::Invalid) {
M->setTrivial(SpecialMemberIsTrivial(M, CSM));
// Inform the class that we've finished declaring this member.
Record->finishedDefaultedOrDeletedMember(M);
@@ -7175,8 +7019,10 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
// Set triviality for the purpose of calls if this is a user-provided
// copy/move constructor or destructor.
- if ((CSM == CXXCopyConstructor || CSM == CXXMoveConstructor ||
- CSM == CXXDestructor) && M->isUserProvided()) {
+ if ((CSM == CXXSpecialMemberKind::CopyConstructor ||
+ CSM == CXXSpecialMemberKind::MoveConstructor ||
+ CSM == CXXSpecialMemberKind::Destructor) &&
+ M->isUserProvided()) {
M->setTrivialForCall(HasTrivialABI);
Record->setTrivialForCallFlags(M);
}
@@ -7185,8 +7031,9 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
M->hasAttr<DLLExportAttr>()) {
if (getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015) &&
M->isTrivial() &&
- (CSM == CXXDefaultConstructor || CSM == CXXCopyConstructor ||
- CSM == CXXDestructor))
+ (CSM == CXXSpecialMemberKind::DefaultConstructor ||
+ CSM == CXXSpecialMemberKind::CopyConstructor ||
+ CSM == CXXSpecialMemberKind::Destructor))
M->dropAttr<DLLExportAttr>();
if (M->hasAttr<DLLExportAttr>()) {
@@ -7195,11 +7042,43 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
}
}
+ bool EffectivelyConstexprDestructor = true;
+ // Avoid triggering vtable instantiation due to a dtor that is not
+ // "effectively constexpr" for better compatibility.
+ // See https://github.com/llvm/llvm-project/issues/102293 for more info.
+ if (isa<CXXDestructorDecl>(M)) {
+ auto Check = [](QualType T, auto &&Check) -> bool {
+ const CXXRecordDecl *RD =
+ T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ if (!RD || !RD->isCompleteDefinition())
+ return true;
+
+ if (!RD->hasConstexprDestructor())
+ return false;
+
+ QualType CanUnqualT = T.getCanonicalType().getUnqualifiedType();
+ for (const CXXBaseSpecifier &B : RD->bases())
+ if (B.getType().getCanonicalType().getUnqualifiedType() !=
+ CanUnqualT &&
+ !Check(B.getType(), Check))
+ return false;
+ for (const FieldDecl *FD : RD->fields())
+ if (FD->getType().getCanonicalType().getUnqualifiedType() !=
+ CanUnqualT &&
+ !Check(FD->getType(), Check))
+ return false;
+ return true;
+ };
+ EffectivelyConstexprDestructor =
+ Check(QualType(Record->getTypeForDecl(), 0), Check);
+ }
+
// Define defaulted constexpr virtual functions that override a base class
// function right away.
// FIXME: We can defer doing this until the vtable is marked as used.
- if (CSM != CXXInvalid && !M->isDeleted() && M->isDefaulted() &&
- M->isConstexpr() && M->size_overridden_methods())
+ if (CSM != CXXSpecialMemberKind::Invalid && !M->isDeleted() &&
+ M->isDefaulted() && M->isConstexpr() && M->size_overridden_methods() &&
+ EffectivelyConstexprDestructor)
DefineDefaultedFunction(*this, M, M->getLocation());
if (!Incomplete)
@@ -7282,7 +7161,7 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
bool CanPass = canPassInRegisters(*this, Record, CCK);
// Do not change ArgPassingRestrictions if it has already been set to
- // ArgPassingKind::CanNeverPassInRegs.
+ // RecordArgPassingKind::CanNeverPassInRegs.
if (Record->getArgPassingRestrictions() !=
RecordArgPassingKind::CanNeverPassInRegs)
Record->setArgPassingRestrictions(
@@ -7321,15 +7200,18 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
/// \param ConstRHS True if this is a copy operation with a const object
/// on its RHS, that is, if the argument to the outer special member
/// function is 'const' and this is not a field marked 'mutable'.
-static Sema::SpecialMemberOverloadResult lookupCallFromSpecialMember(
- Sema &S, CXXRecordDecl *Class, Sema::CXXSpecialMember CSM,
- unsigned FieldQuals, bool ConstRHS) {
+static Sema::SpecialMemberOverloadResult
+lookupCallFromSpecialMember(Sema &S, CXXRecordDecl *Class,
+ CXXSpecialMemberKind CSM, unsigned FieldQuals,
+ bool ConstRHS) {
unsigned LHSQuals = 0;
- if (CSM == Sema::CXXCopyAssignment || CSM == Sema::CXXMoveAssignment)
+ if (CSM == CXXSpecialMemberKind::CopyAssignment ||
+ CSM == CXXSpecialMemberKind::MoveAssignment)
LHSQuals = FieldQuals;
unsigned RHSQuals = FieldQuals;
- if (CSM == Sema::CXXDefaultConstructor || CSM == Sema::CXXDestructor)
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor ||
+ CSM == CXXSpecialMemberKind::Destructor)
RHSQuals = 0;
else if (ConstRHS)
RHSQuals |= Qualifiers::Const;
@@ -7425,12 +7307,10 @@ public:
/// Is the special member function which would be selected to perform the
/// specified operation on the specified class type a constexpr constructor?
-static bool
-specialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl,
- Sema::CXXSpecialMember CSM, unsigned Quals,
- bool ConstRHS,
- CXXConstructorDecl *InheritedCtor = nullptr,
- Sema::InheritedConstructorInfo *Inherited = nullptr) {
+static bool specialMemberIsConstexpr(
+ Sema &S, CXXRecordDecl *ClassDecl, CXXSpecialMemberKind CSM, unsigned Quals,
+ bool ConstRHS, CXXConstructorDecl *InheritedCtor = nullptr,
+ Sema::InheritedConstructorInfo *Inherited = nullptr) {
// Suppress duplicate constraint checking here, in case a constraint check
// caused us to decide to do this. Any truely recursive checks will get
// caught during these checks anyway.
@@ -7439,16 +7319,16 @@ specialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl,
// If we're inheriting a constructor, see if we need to call it for this base
// class.
if (InheritedCtor) {
- assert(CSM == Sema::CXXDefaultConstructor);
+ assert(CSM == CXXSpecialMemberKind::DefaultConstructor);
auto BaseCtor =
Inherited->findConstructorForBase(ClassDecl, InheritedCtor).first;
if (BaseCtor)
return BaseCtor->isConstexpr();
}
- if (CSM == Sema::CXXDefaultConstructor)
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor)
return ClassDecl->hasConstexprDefaultConstructor();
- if (CSM == Sema::CXXDestructor)
+ if (CSM == CXXSpecialMemberKind::Destructor)
return ClassDecl->hasConstexprDestructor();
Sema::SpecialMemberOverloadResult SMOR =
@@ -7463,8 +7343,8 @@ specialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl,
/// Determine whether the specified special member function would be constexpr
/// if it were implicitly defined.
static bool defaultedSpecialMemberIsConstexpr(
- Sema &S, CXXRecordDecl *ClassDecl, Sema::CXXSpecialMember CSM,
- bool ConstArg, CXXConstructorDecl *InheritedCtor = nullptr,
+ Sema &S, CXXRecordDecl *ClassDecl, CXXSpecialMemberKind CSM, bool ConstArg,
+ CXXConstructorDecl *InheritedCtor = nullptr,
Sema::InheritedConstructorInfo *Inherited = nullptr) {
if (!S.getLangOpts().CPlusPlus11)
return false;
@@ -7473,7 +7353,7 @@ static bool defaultedSpecialMemberIsConstexpr(
// In the definition of a constexpr constructor [...]
bool Ctor = true;
switch (CSM) {
- case Sema::CXXDefaultConstructor:
+ case CXXSpecialMemberKind::DefaultConstructor:
if (Inherited)
break;
// Since default constructor lookup is essentially trivial (and cannot
@@ -7484,23 +7364,23 @@ static bool defaultedSpecialMemberIsConstexpr(
// constructor is constexpr to determine whether the type is a literal type.
return ClassDecl->defaultedDefaultConstructorIsConstexpr();
- case Sema::CXXCopyConstructor:
- case Sema::CXXMoveConstructor:
+ case CXXSpecialMemberKind::CopyConstructor:
+ case CXXSpecialMemberKind::MoveConstructor:
// For copy or move constructors, we need to perform overload resolution.
break;
- case Sema::CXXCopyAssignment:
- case Sema::CXXMoveAssignment:
+ case CXXSpecialMemberKind::CopyAssignment:
+ case CXXSpecialMemberKind::MoveAssignment:
if (!S.getLangOpts().CPlusPlus14)
return false;
// In C++1y, we need to perform overload resolution.
Ctor = false;
break;
- case Sema::CXXDestructor:
+ case CXXSpecialMemberKind::Destructor:
return ClassDecl->defaultedDestructorIsConstexpr();
- case Sema::CXXInvalid:
+ case CXXSpecialMemberKind::Invalid:
return false;
}
@@ -7512,7 +7392,7 @@ static bool defaultedSpecialMemberIsConstexpr(
// will be initialized (if the constructor isn't deleted), we just don't know
// which one.
if (Ctor && ClassDecl->isUnion())
- return CSM == Sema::CXXDefaultConstructor
+ return CSM == CXXSpecialMemberKind::DefaultConstructor
? ClassDecl->hasInClassInitializer() ||
!ClassDecl->hasVariantMembers()
: true;
@@ -7523,21 +7403,23 @@ static bool defaultedSpecialMemberIsConstexpr(
// C++1y [class.copy]p26:
// -- [the class] is a literal type, and
- if (!Ctor && !ClassDecl->isLiteral())
+ if (!Ctor && !ClassDecl->isLiteral() && !S.getLangOpts().CPlusPlus23)
return false;
// -- every constructor involved in initializing [...] base class
// sub-objects shall be a constexpr constructor;
// -- the assignment operator selected to copy/move each direct base
// class is a constexpr function, and
- for (const auto &B : ClassDecl->bases()) {
- const RecordType *BaseType = B.getType()->getAs<RecordType>();
- if (!BaseType)
- continue;
- CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
- if (!specialMemberIsConstexpr(S, BaseClassDecl, CSM, 0, ConstArg,
- InheritedCtor, Inherited))
- return false;
+ if (!S.getLangOpts().CPlusPlus23) {
+ for (const auto &B : ClassDecl->bases()) {
+ const RecordType *BaseType = B.getType()->getAs<RecordType>();
+ if (!BaseType)
+ continue;
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ if (!specialMemberIsConstexpr(S, BaseClassDecl, CSM, 0, ConstArg,
+ InheritedCtor, Inherited))
+ return false;
+ }
}
// -- every constructor involved in initializing non-static data members
@@ -7547,20 +7429,23 @@ static bool defaultedSpecialMemberIsConstexpr(
// -- for each non-static data member of X that is of class type (or array
// thereof), the assignment operator selected to copy/move that member is
// a constexpr function
- for (const auto *F : ClassDecl->fields()) {
- if (F->isInvalidDecl())
- continue;
- if (CSM == Sema::CXXDefaultConstructor && F->hasInClassInitializer())
- continue;
- QualType BaseType = S.Context.getBaseElementType(F->getType());
- if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
- CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
- if (!specialMemberIsConstexpr(S, FieldRecDecl, CSM,
- BaseType.getCVRQualifiers(),
- ConstArg && !F->isMutable()))
+ if (!S.getLangOpts().CPlusPlus23) {
+ for (const auto *F : ClassDecl->fields()) {
+ if (F->isInvalidDecl())
+ continue;
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor &&
+ F->hasInClassInitializer())
+ continue;
+ QualType BaseType = S.Context.getBaseElementType(F->getType());
+ if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
+ CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!specialMemberIsConstexpr(S, FieldRecDecl, CSM,
+ BaseType.getCVRQualifiers(),
+ ConstArg && !F->isMutable()))
+ return false;
+ } else if (CSM == CXXSpecialMemberKind::DefaultConstructor) {
return false;
- } else if (CSM == Sema::CXXDefaultConstructor) {
- return false;
+ }
}
}
@@ -7589,9 +7474,10 @@ struct ComputingExceptionSpec {
}
static Sema::ImplicitExceptionSpecification
-ComputeDefaultedSpecialMemberExceptionSpec(
- Sema &S, SourceLocation Loc, CXXMethodDecl *MD, Sema::CXXSpecialMember CSM,
- Sema::InheritedConstructorInfo *ICI);
+ComputeDefaultedSpecialMemberExceptionSpec(Sema &S, SourceLocation Loc,
+ CXXMethodDecl *MD,
+ CXXSpecialMemberKind CSM,
+ Sema::InheritedConstructorInfo *ICI);
static Sema::ImplicitExceptionSpecification
ComputeDefaultedComparisonExceptionSpec(Sema &S, SourceLocation Loc,
@@ -7615,7 +7501,7 @@ computeImplicitExceptionSpec(Sema &S, SourceLocation Loc, FunctionDecl *FD) {
Sema::InheritedConstructorInfo ICI(
S, Loc, CD->getInheritedConstructor().getShadowDecl());
return ComputeDefaultedSpecialMemberExceptionSpec(
- S, Loc, CD, Sema::CXXDefaultConstructor, &ICI);
+ S, Loc, CD, CXXSpecialMemberKind::DefaultConstructor, &ICI);
}
static FunctionProtoType::ExtProtoInfo getImplicitMethodEPI(Sema &S,
@@ -7667,11 +7553,11 @@ void Sema::CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *FD) {
}
bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
- CXXSpecialMember CSM,
+ CXXSpecialMemberKind CSM,
SourceLocation DefaultLoc) {
CXXRecordDecl *RD = MD->getParent();
- assert(MD->isExplicitlyDefaulted() && CSM != CXXInvalid &&
+ assert(MD->isExplicitlyDefaulted() && CSM != CXXSpecialMemberKind::Invalid &&
"not an explicitly-defaulted special member");
// Defer all checking for special members of a dependent type.
@@ -7697,21 +7583,22 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
bool DeleteOnTypeMismatch = getLangOpts().CPlusPlus20 && First;
bool ShouldDeleteForTypeMismatch = false;
unsigned ExpectedParams = 1;
- if (CSM == CXXDefaultConstructor || CSM == CXXDestructor)
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor ||
+ CSM == CXXSpecialMemberKind::Destructor)
ExpectedParams = 0;
if (MD->getNumExplicitParams() != ExpectedParams) {
// This checks for default arguments: a copy or move constructor with a
// default argument is classified as a default constructor, and assignment
// operations and destructors can't have default arguments.
Diag(MD->getLocation(), diag::err_defaulted_special_member_params)
- << CSM << MD->getSourceRange();
+ << llvm::to_underlying(CSM) << MD->getSourceRange();
HadError = true;
} else if (MD->isVariadic()) {
if (DeleteOnTypeMismatch)
ShouldDeleteForTypeMismatch = true;
else {
Diag(MD->getLocation(), diag::err_defaulted_special_member_variadic)
- << CSM << MD->getSourceRange();
+ << llvm::to_underlying(CSM) << MD->getSourceRange();
HadError = true;
}
}
@@ -7719,13 +7606,14 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
const FunctionProtoType *Type = MD->getType()->castAs<FunctionProtoType>();
bool CanHaveConstParam = false;
- if (CSM == CXXCopyConstructor)
+ if (CSM == CXXSpecialMemberKind::CopyConstructor)
CanHaveConstParam = RD->implicitCopyConstructorHasConstParam();
- else if (CSM == CXXCopyAssignment)
+ else if (CSM == CXXSpecialMemberKind::CopyAssignment)
CanHaveConstParam = RD->implicitCopyAssignmentHasConstParam();
QualType ReturnType = Context.VoidTy;
- if (CSM == CXXCopyAssignment || CSM == CXXMoveAssignment) {
+ if (CSM == CXXSpecialMemberKind::CopyAssignment ||
+ CSM == CXXSpecialMemberKind::MoveAssignment) {
// Check for return type matching.
ReturnType = Type->getReturnType();
QualType ThisType = MD->getFunctionObjectParameterType();
@@ -7739,7 +7627,8 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
if (!Context.hasSameType(ReturnType, ExpectedReturnType)) {
Diag(MD->getLocation(), diag::err_defaulted_special_member_return_type)
- << (CSM == CXXMoveAssignment) << ExpectedReturnType;
+ << (CSM == CXXSpecialMemberKind::MoveAssignment)
+ << ExpectedReturnType;
HadError = true;
}
@@ -7749,7 +7638,8 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
ShouldDeleteForTypeMismatch = true;
else {
Diag(MD->getLocation(), diag::err_defaulted_special_member_quals)
- << (CSM == CXXMoveAssignment) << getLangOpts().CPlusPlus14;
+ << (CSM == CXXSpecialMemberKind::MoveAssignment)
+ << getLangOpts().CPlusPlus14;
HadError = true;
}
}
@@ -7767,7 +7657,8 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
else {
Diag(MD->getLocation(),
diag::err_defaulted_special_member_explicit_object_mismatch)
- << (CSM == CXXMoveAssignment) << RD << MD->getSourceRange();
+ << (CSM == CXXSpecialMemberKind::MoveAssignment) << RD
+ << MD->getSourceRange();
HadError = true;
}
}
@@ -7789,7 +7680,8 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
ShouldDeleteForTypeMismatch = true;
else {
Diag(MD->getLocation(),
- diag::err_defaulted_special_member_volatile_param) << CSM;
+ diag::err_defaulted_special_member_volatile_param)
+ << llvm::to_underlying(CSM);
HadError = true;
}
}
@@ -7797,23 +7689,25 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
if (HasConstParam && !CanHaveConstParam) {
if (DeleteOnTypeMismatch)
ShouldDeleteForTypeMismatch = true;
- else if (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment) {
+ else if (CSM == CXXSpecialMemberKind::CopyConstructor ||
+ CSM == CXXSpecialMemberKind::CopyAssignment) {
Diag(MD->getLocation(),
diag::err_defaulted_special_member_copy_const_param)
- << (CSM == CXXCopyAssignment);
+ << (CSM == CXXSpecialMemberKind::CopyAssignment);
// FIXME: Explain why this special member can't be const.
HadError = true;
} else {
Diag(MD->getLocation(),
diag::err_defaulted_special_member_move_const_param)
- << (CSM == CXXMoveAssignment);
+ << (CSM == CXXSpecialMemberKind::MoveAssignment);
HadError = true;
}
}
} else if (ExpectedParams) {
// A copy assignment operator can take its argument by value, but a
// defaulted one cannot.
- assert(CSM == CXXCopyAssignment && "unexpected non-ref argument");
+ assert(CSM == CXXSpecialMemberKind::CopyAssignment &&
+ "unexpected non-ref argument");
Diag(MD->getLocation(), diag::err_defaulted_copy_assign_not_ref);
HadError = true;
}
@@ -7846,18 +7740,17 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
MD->isConstexpr() && !Constexpr &&
MD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
if (!MD->isConsteval() && RD->getNumVBases()) {
- Diag(MD->getBeginLoc(), diag::err_incorrect_defaulted_constexpr_with_vb)
- << CSM;
+ Diag(MD->getBeginLoc(),
+ diag::err_incorrect_defaulted_constexpr_with_vb)
+ << llvm::to_underlying(CSM);
for (const auto &I : RD->vbases())
Diag(I.getBeginLoc(), diag::note_constexpr_virtual_base_here);
} else {
- Diag(MD->getBeginLoc(), MD->isConsteval()
- ? diag::err_incorrect_defaulted_consteval
- : diag::err_incorrect_defaulted_constexpr)
- << CSM;
+ Diag(MD->getBeginLoc(), diag::err_incorrect_defaulted_constexpr)
+ << llvm::to_underlying(CSM) << MD->isConsteval();
}
- // FIXME: Explain why the special member can't be constexpr.
- HadError = true;
+ HadError = true;
+ // FIXME: Explain why the special member can't be constexpr.
}
if (First) {
@@ -7887,9 +7780,11 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
if (First) {
SetDeclDeleted(MD, MD->getLocation());
if (!inTemplateInstantiation() && !HadError) {
- Diag(MD->getLocation(), diag::warn_defaulted_method_deleted) << CSM;
+ Diag(MD->getLocation(), diag::warn_defaulted_method_deleted)
+ << llvm::to_underlying(CSM);
if (ShouldDeleteForTypeMismatch) {
- Diag(MD->getLocation(), diag::note_deleted_type_mismatch) << CSM;
+ Diag(MD->getLocation(), diag::note_deleted_type_mismatch)
+ << llvm::to_underlying(CSM);
} else if (ShouldDeleteSpecialMember(MD, CSM, nullptr,
/*Diagnose*/ true) &&
DefaultLoc.isValid()) {
@@ -7899,13 +7794,15 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
}
if (ShouldDeleteForTypeMismatch && !HadError) {
Diag(MD->getLocation(),
- diag::warn_cxx17_compat_defaulted_method_type_mismatch) << CSM;
+ diag::warn_cxx17_compat_defaulted_method_type_mismatch)
+ << llvm::to_underlying(CSM);
}
} else {
// C++11 [dcl.fct.def.default]p4:
// [For a] user-provided explicitly-defaulted function [...] if such a
// function is implicitly defined as deleted, the program is ill-formed.
- Diag(MD->getLocation(), diag::err_out_of_line_default_deletes) << CSM;
+ Diag(MD->getLocation(), diag::err_out_of_line_default_deletes)
+ << llvm::to_underlying(CSM);
assert(!ShouldDeleteForTypeMismatch && "deleted non-first decl");
ShouldDeleteSpecialMember(MD, CSM, nullptr, /*Diagnose*/true);
HadError = true;
@@ -7937,7 +7834,7 @@ public:
DefaultedComparisonVisitor(Sema &S, CXXRecordDecl *RD, FunctionDecl *FD,
DefaultedComparisonKind DCK)
: S(S), RD(RD), FD(FD), DCK(DCK) {
- if (auto *Info = FD->getDefaultedFunctionInfo()) {
+ if (auto *Info = FD->getDefalutedOrDeletedInfo()) {
// FIXME: Change CreateOverloadedBinOp to take an ArrayRef instead of an
// UnresolvedSet to avoid this copy.
Fns.assign(Info->getUnqualifiedLookups().begin(),
@@ -7991,7 +7888,7 @@ protected:
for (FieldDecl *Field : Record->fields()) {
// C++23 [class.bit]p2:
// Unnamed bit-fields are not members ...
- if (Field->isUnnamedBitfield())
+ if (Field->isUnnamedBitField())
continue;
// Recursively expand anonymous structs.
if (Field->isAnonymousStructOrUnion()) {
@@ -8805,8 +8702,9 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
UnresolvedSet<32> Operators;
lookupOperatorsForDefaultedComparison(*this, S, Operators,
FD->getOverloadedOperator());
- FD->setDefaultedFunctionInfo(FunctionDecl::DefaultedFunctionInfo::Create(
- Context, Operators.pairs()));
+ FD->setDefaultedOrDeletedInfo(
+ FunctionDecl::DefaultedOrDeletedFunctionInfo::Create(
+ Context, Operators.pairs()));
}
// C++2a [class.compare.default]p1:
@@ -9089,13 +8987,11 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
// - if the function is a constructor or destructor, its class does not
// have any virtual base classes.
if (FD->isConstexpr()) {
- if (CheckConstexprReturnType(*this, FD, CheckConstexprKind::Diagnose) &&
+ if (!getLangOpts().CPlusPlus23 &&
+ CheckConstexprReturnType(*this, FD, CheckConstexprKind::Diagnose) &&
CheckConstexprParameterTypes(*this, FD, CheckConstexprKind::Diagnose) &&
!Info.Constexpr) {
- Diag(FD->getBeginLoc(),
- getLangOpts().CPlusPlus23
- ? diag::warn_cxx23_compat_defaulted_comparison_constexpr_mismatch
- : diag::ext_defaulted_comparison_constexpr_mismatch)
+ Diag(FD->getBeginLoc(), diag::err_defaulted_comparison_constexpr_mismatch)
<< FD->isImplicit() << (int)DCK << FD->isConsteval();
DefaultedComparisonAnalyzer(*this, RD, FD, DCK,
DefaultedComparisonAnalyzer::ExplainConstexpr)
@@ -9206,7 +9102,10 @@ ComputeDefaultedComparisonExceptionSpec(Sema &S, SourceLocation Loc,
EnterExpressionEvaluationContext Context(
S, Sema::ExpressionEvaluationContext::Unevaluated);
- CXXRecordDecl *RD = cast<CXXRecordDecl>(FD->getLexicalParent());
+ CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(FD->getFriendObjectKind() == Decl::FOK_None
+ ? FD->getDeclContext()
+ : FD->getLexicalDeclContext());
SourceLocation BodyLoc =
FD->getEndLoc().isValid() ? FD->getEndLoc() : FD->getLocation();
StmtResult Body =
@@ -9248,28 +9147,28 @@ template<typename Derived>
struct SpecialMemberVisitor {
Sema &S;
CXXMethodDecl *MD;
- Sema::CXXSpecialMember CSM;
+ CXXSpecialMemberKind CSM;
Sema::InheritedConstructorInfo *ICI;
// Properties of the special member, computed for convenience.
bool IsConstructor = false, IsAssignment = false, ConstArg = false;
- SpecialMemberVisitor(Sema &S, CXXMethodDecl *MD, Sema::CXXSpecialMember CSM,
+ SpecialMemberVisitor(Sema &S, CXXMethodDecl *MD, CXXSpecialMemberKind CSM,
Sema::InheritedConstructorInfo *ICI)
: S(S), MD(MD), CSM(CSM), ICI(ICI) {
switch (CSM) {
- case Sema::CXXDefaultConstructor:
- case Sema::CXXCopyConstructor:
- case Sema::CXXMoveConstructor:
+ case CXXSpecialMemberKind::DefaultConstructor:
+ case CXXSpecialMemberKind::CopyConstructor:
+ case CXXSpecialMemberKind::MoveConstructor:
IsConstructor = true;
break;
- case Sema::CXXCopyAssignment:
- case Sema::CXXMoveAssignment:
+ case CXXSpecialMemberKind::CopyAssignment:
+ case CXXSpecialMemberKind::MoveAssignment:
IsAssignment = true;
break;
- case Sema::CXXDestructor:
+ case CXXSpecialMemberKind::Destructor:
break;
- case Sema::CXXInvalid:
+ case CXXSpecialMemberKind::Invalid:
llvm_unreachable("invalid special member kind");
}
@@ -9284,7 +9183,8 @@ struct SpecialMemberVisitor {
/// Is this a "move" special member?
bool isMove() const {
- return CSM == Sema::CXXMoveConstructor || CSM == Sema::CXXMoveAssignment;
+ return CSM == CXXSpecialMemberKind::MoveConstructor ||
+ CSM == CXXSpecialMemberKind::MoveAssignment;
}
/// Look up the corresponding special member in the given class.
@@ -9299,7 +9199,7 @@ struct SpecialMemberVisitor {
Sema::SpecialMemberOverloadResult lookupInheritedCtor(CXXRecordDecl *Class) {
if (!ICI)
return {};
- assert(CSM == Sema::CXXDefaultConstructor);
+ assert(CSM == CXXSpecialMemberKind::DefaultConstructor);
auto *BaseCtor =
cast<CXXConstructorDecl>(MD)->getInheritedConstructor().getConstructor();
if (auto *MD = ICI->findConstructorForBase(Class, BaseCtor).first)
@@ -9350,7 +9250,7 @@ struct SpecialMemberVisitor {
return true;
for (auto *F : RD->fields())
- if (!F->isInvalidDecl() && !F->isUnnamedBitfield() &&
+ if (!F->isInvalidDecl() && !F->isUnnamedBitField() &&
getDerived().visitField(F))
return true;
@@ -9369,15 +9269,15 @@ struct SpecialMemberDeletionInfo
bool AllFieldsAreConst;
SpecialMemberDeletionInfo(Sema &S, CXXMethodDecl *MD,
- Sema::CXXSpecialMember CSM,
+ CXXSpecialMemberKind CSM,
Sema::InheritedConstructorInfo *ICI, bool Diagnose)
: SpecialMemberVisitor(S, MD, CSM, ICI), Diagnose(Diagnose),
Loc(MD->getLocation()), AllFieldsAreConst(true) {}
bool inUnion() const { return MD->getParent()->isUnion(); }
- Sema::CXXSpecialMember getEffectiveCSM() {
- return ICI ? Sema::CXXInvalid : CSM;
+ CXXSpecialMemberKind getEffectiveCSM() {
+ return ICI ? CXXSpecialMemberKind::Invalid : CSM;
}
bool shouldDeleteForVariantObjCPtrMember(FieldDecl *FD, QualType FieldType);
@@ -9443,7 +9343,7 @@ bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall(
// must be accessible and non-deleted, but need not be trivial. Such a
// destructor is never actually called, but is semantically checked as
// if it were.
- if (CSM == Sema::CXXDefaultConstructor) {
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor) {
// [class.default.ctor]p2:
// A defaulted default constructor for class X is defined as deleted if
// - X is a union that has a variant member with a non-trivial default
@@ -9464,15 +9364,16 @@ bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall(
if (Field) {
S.Diag(Field->getLocation(),
diag::note_deleted_special_member_class_subobject)
- << getEffectiveCSM() << MD->getParent() << /*IsField*/true
- << Field << DiagKind << IsDtorCallInCtor << /*IsObjCPtr*/false;
+ << llvm::to_underlying(getEffectiveCSM()) << MD->getParent()
+ << /*IsField*/ true << Field << DiagKind << IsDtorCallInCtor
+ << /*IsObjCPtr*/ false;
} else {
CXXBaseSpecifier *Base = Subobj.get<CXXBaseSpecifier*>();
S.Diag(Base->getBeginLoc(),
diag::note_deleted_special_member_class_subobject)
- << getEffectiveCSM() << MD->getParent() << /*IsField*/ false
- << Base->getType() << DiagKind << IsDtorCallInCtor
- << /*IsObjCPtr*/false;
+ << llvm::to_underlying(getEffectiveCSM()) << MD->getParent()
+ << /*IsField*/ false << Base->getType() << DiagKind
+ << IsDtorCallInCtor << /*IsObjCPtr*/ false;
}
if (DiagKind == 1)
@@ -9504,8 +9405,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject(
// C++11 [class.dtor]p5:
// -- any direct or virtual base class [...] has a type with a destructor
// that is deleted or inaccessible
- if (!(CSM == Sema::CXXDefaultConstructor &&
- Field && Field->hasInClassInitializer()) &&
+ if (!(CSM == CXXSpecialMemberKind::DefaultConstructor && Field &&
+ Field->hasInClassInitializer()) &&
shouldDeleteForSubobjectCall(Subobj, lookupIn(Class, Quals, IsMutable),
false))
return true;
@@ -9515,8 +9416,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject(
// type with a destructor that is deleted or inaccessible
if (IsConstructor) {
Sema::SpecialMemberOverloadResult SMOR =
- S.LookupSpecialMember(Class, Sema::CXXDestructor,
- false, false, false, false, false);
+ S.LookupSpecialMember(Class, CXXSpecialMemberKind::Destructor, false,
+ false, false, false, false);
if (shouldDeleteForSubobjectCall(Subobj, SMOR, true))
return true;
}
@@ -9534,15 +9435,16 @@ bool SpecialMemberDeletionInfo::shouldDeleteForVariantObjCPtrMember(
// Don't make the defaulted default constructor defined as deleted if the
// member has an in-class initializer.
- if (CSM == Sema::CXXDefaultConstructor && FD->hasInClassInitializer())
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor &&
+ FD->hasInClassInitializer())
return false;
if (Diagnose) {
auto *ParentClass = cast<CXXRecordDecl>(FD->getParent());
- S.Diag(FD->getLocation(),
- diag::note_deleted_special_member_class_subobject)
- << getEffectiveCSM() << ParentClass << /*IsField*/true
- << FD << 4 << /*IsDtorCallInCtor*/false << /*IsObjCPtr*/true;
+ S.Diag(FD->getLocation(), diag::note_deleted_special_member_class_subobject)
+ << llvm::to_underlying(getEffectiveCSM()) << ParentClass
+ << /*IsField*/ true << FD << 4 << /*IsDtorCallInCtor*/ false
+ << /*IsObjCPtr*/ true;
}
return true;
@@ -9567,9 +9469,9 @@ bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) {
if (BaseCtor->isDeleted() && Diagnose) {
S.Diag(Base->getBeginLoc(),
diag::note_deleted_special_member_class_subobject)
- << getEffectiveCSM() << MD->getParent() << /*IsField*/ false
- << Base->getType() << /*Deleted*/ 1 << /*IsDtorCallInCtor*/ false
- << /*IsObjCPtr*/false;
+ << llvm::to_underlying(getEffectiveCSM()) << MD->getParent()
+ << /*IsField*/ false << Base->getType() << /*Deleted*/ 1
+ << /*IsDtorCallInCtor*/ false << /*IsObjCPtr*/ false;
S.NoteDeletedFunction(BaseCtor);
}
return BaseCtor->isDeleted();
@@ -9586,7 +9488,7 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
if (inUnion() && shouldDeleteForVariantObjCPtrMember(FD, FieldType))
return true;
- if (CSM == Sema::CXXDefaultConstructor) {
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor) {
// For a default constructor, all references must be initialized in-class
// and, if a union, it must have a non-const member.
if (FieldType->isReferenceType() && !FD->hasInClassInitializer()) {
@@ -9609,7 +9511,7 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
if (inUnion() && !FieldType.isConstQualified())
AllFieldsAreConst = false;
- } else if (CSM == Sema::CXXCopyConstructor) {
+ } else if (CSM == CXXSpecialMemberKind::CopyConstructor) {
// For a copy constructor, data members must not be of rvalue reference
// type.
if (FieldType->isRValueReferenceType()) {
@@ -9660,8 +9562,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
}
// At least one member in each anonymous union must be non-const
- if (CSM == Sema::CXXDefaultConstructor && AllVariantFieldsAreConst &&
- !FieldRecord->field_empty()) {
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor &&
+ AllVariantFieldsAreConst && !FieldRecord->field_empty()) {
if (Diagnose)
S.Diag(FieldRecord->getLocation(),
diag::note_deleted_default_ctor_all_const)
@@ -9689,10 +9591,11 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
bool SpecialMemberDeletionInfo::shouldDeleteForAllConstMembers() {
// This is a silly definition, because it gives an empty union a deleted
// default constructor. Don't do that.
- if (CSM == Sema::CXXDefaultConstructor && inUnion() && AllFieldsAreConst) {
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor && inUnion() &&
+ AllFieldsAreConst) {
bool AnyFields = false;
for (auto *F : MD->getParent()->fields())
- if ((AnyFields = !F->isUnnamedBitfield()))
+ if ((AnyFields = !F->isUnnamedBitField()))
break;
if (!AnyFields)
return false;
@@ -9708,14 +9611,16 @@ bool SpecialMemberDeletionInfo::shouldDeleteForAllConstMembers() {
/// Determine whether a defaulted special member function should be defined as
/// deleted, as specified in C++11 [class.ctor]p5, C++11 [class.copy]p11,
/// C++11 [class.copy]p23, and C++11 [class.dtor]p5.
-bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
+bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD,
+ CXXSpecialMemberKind CSM,
InheritedConstructorInfo *ICI,
bool Diagnose) {
if (MD->isInvalidDecl())
return false;
CXXRecordDecl *RD = MD->getParent();
assert(!RD->isDependentType() && "do deletion after instantiation");
- if (!LangOpts.CPlusPlus11 || RD->isInvalidDecl())
+ if (!LangOpts.CPlusPlus || (!LangOpts.CPlusPlus11 && !RD->isLambda()) ||
+ RD->isInvalidDecl())
return false;
// C++11 [expr.lambda.prim]p19:
@@ -9724,7 +9629,8 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
// assignment operator.
// C++2a adds back these operators if the lambda has no lambda-capture.
if (RD->isLambda() && !RD->lambdaIsDefaultConstructibleAndAssignable() &&
- (CSM == CXXDefaultConstructor || CSM == CXXCopyAssignment)) {
+ (CSM == CXXSpecialMemberKind::DefaultConstructor ||
+ CSM == CXXSpecialMemberKind::CopyAssignment)) {
if (Diagnose)
Diag(RD->getLocation(), diag::note_lambda_decl);
return true;
@@ -9733,16 +9639,16 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
// For an anonymous struct or union, the copy and assignment special members
// will never be used, so skip the check. For an anonymous union declared at
// namespace scope, the constructor and destructor are used.
- if (CSM != CXXDefaultConstructor && CSM != CXXDestructor &&
- RD->isAnonymousStructOrUnion())
+ if (CSM != CXXSpecialMemberKind::DefaultConstructor &&
+ CSM != CXXSpecialMemberKind::Destructor && RD->isAnonymousStructOrUnion())
return false;
// C++11 [class.copy]p7, p18:
// If the class definition declares a move constructor or move assignment
// operator, an implicitly declared copy constructor or copy assignment
// operator is defined as deleted.
- if (MD->isImplicit() &&
- (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment)) {
+ if (MD->isImplicit() && (CSM == CXXSpecialMemberKind::CopyConstructor ||
+ CSM == CXXSpecialMemberKind::CopyAssignment)) {
CXXMethodDecl *UserDeclaredMove = nullptr;
// In Microsoft mode up to MSVC 2013, a user-declared move only causes the
@@ -9753,7 +9659,8 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
!getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015);
if (RD->hasUserDeclaredMoveConstructor() &&
- (!DeletesOnlyMatchingCopy || CSM == CXXCopyConstructor)) {
+ (!DeletesOnlyMatchingCopy ||
+ CSM == CXXSpecialMemberKind::CopyConstructor)) {
if (!Diagnose) return true;
// Find any user-declared move constructor.
@@ -9765,7 +9672,8 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
}
assert(UserDeclaredMove);
} else if (RD->hasUserDeclaredMoveAssignment() &&
- (!DeletesOnlyMatchingCopy || CSM == CXXCopyAssignment)) {
+ (!DeletesOnlyMatchingCopy ||
+ CSM == CXXSpecialMemberKind::CopyAssignment)) {
if (!Diagnose) return true;
// Find any user-declared move assignment operator.
@@ -9781,8 +9689,8 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
if (UserDeclaredMove) {
Diag(UserDeclaredMove->getLocation(),
diag::note_deleted_copy_user_declared_move)
- << (CSM == CXXCopyAssignment) << RD
- << UserDeclaredMove->isMoveAssignmentOperator();
+ << (CSM == CXXSpecialMemberKind::CopyAssignment) << RD
+ << UserDeclaredMove->isMoveAssignmentOperator();
return true;
}
}
@@ -9793,7 +9701,7 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
// C++11 [class.dtor]p5:
// -- for a virtual destructor, lookup of the non-array deallocation function
// results in an ambiguity or in a function that is deleted or inaccessible
- if (CSM == CXXDestructor && MD->isVirtual()) {
+ if (CSM == CXXSpecialMemberKind::Destructor && MD->isVirtual()) {
FunctionDecl *OperatorDelete = nullptr;
DeclarationName Name =
Context.DeclarationNames.getCXXOperatorName(OO_Delete);
@@ -9825,15 +9733,15 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
// failed.
// For inherited constructors (non-null ICI), CSM may be passed so that MD
// is treated as certain special member, which may not reflect what special
- // member MD really is. However inferCUDATargetForImplicitSpecialMember
+ // member MD really is. However inferTargetForImplicitSpecialMember
// expects CSM to match MD, therefore recalculate CSM.
assert(ICI || CSM == getSpecialMember(MD));
auto RealCSM = CSM;
if (ICI)
RealCSM = getSpecialMember(MD);
- return inferCUDATargetForImplicitSpecialMember(RD, RealCSM, MD,
- SMI.ConstArg, Diagnose);
+ return CUDA().inferTargetForImplicitSpecialMember(RD, RealCSM, MD,
+ SMI.ConstArg, Diagnose);
}
return false;
@@ -9867,7 +9775,7 @@ void Sema::DiagnoseDeletedDefaultedFunction(FunctionDecl *FD) {
/// If \p ForCall is true, look at CXXRecord::HasTrivialSpecialMembersForCall to
/// determine whether the special member is trivial.
static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
- Sema::CXXSpecialMember CSM, unsigned Quals,
+ CXXSpecialMemberKind CSM, unsigned Quals,
bool ConstRHS,
Sema::TrivialABIHandling TAH,
CXXMethodDecl **Selected) {
@@ -9875,10 +9783,10 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
*Selected = nullptr;
switch (CSM) {
- case Sema::CXXInvalid:
+ case CXXSpecialMemberKind::Invalid:
llvm_unreachable("not a special member");
- case Sema::CXXDefaultConstructor:
+ case CXXSpecialMemberKind::DefaultConstructor:
// C++11 [class.ctor]p5:
// A default constructor is trivial if:
// - all the [direct subobjects] have trivial default constructors
@@ -9907,7 +9815,7 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
return false;
- case Sema::CXXDestructor:
+ case CXXSpecialMemberKind::Destructor:
// C++11 [class.dtor]p5:
// A destructor is trivial if:
// - all the direct [subobjects] have trivial destructors
@@ -9924,7 +9832,7 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
return false;
- case Sema::CXXCopyConstructor:
+ case CXXSpecialMemberKind::CopyConstructor:
// C++11 [class.copy]p12:
// A copy constructor is trivial if:
// - the constructor selected to copy each direct [subobject] is trivial
@@ -9945,7 +9853,7 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
// struct B { mutable A a; };
goto NeedOverloadResolution;
- case Sema::CXXCopyAssignment:
+ case CXXSpecialMemberKind::CopyAssignment:
// C++11 [class.copy]p25:
// A copy assignment operator is trivial if:
// - the assignment operator selected to copy each direct [subobject] is
@@ -9960,8 +9868,8 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
// treat that as a language defect.
goto NeedOverloadResolution;
- case Sema::CXXMoveConstructor:
- case Sema::CXXMoveAssignment:
+ case CXXSpecialMemberKind::MoveConstructor:
+ case CXXSpecialMemberKind::MoveAssignment:
NeedOverloadResolution:
Sema::SpecialMemberOverloadResult SMOR =
lookupCallFromSpecialMember(S, RD, CSM, Quals, ConstRHS);
@@ -9985,7 +9893,8 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
*Selected = SMOR.getMethod();
if (TAH == Sema::TAH_ConsiderTrivialABI &&
- (CSM == Sema::CXXCopyConstructor || CSM == Sema::CXXMoveConstructor))
+ (CSM == CXXSpecialMemberKind::CopyConstructor ||
+ CSM == CXXSpecialMemberKind::MoveConstructor))
return SMOR.getMethod()->isTrivialForCall();
return SMOR.getMethod()->isTrivial();
}
@@ -10023,9 +9932,10 @@ enum TrivialSubobjectKind {
/// Check whether the special member selected for a given type would be trivial.
static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc,
QualType SubType, bool ConstRHS,
- Sema::CXXSpecialMember CSM,
+ CXXSpecialMemberKind CSM,
TrivialSubobjectKind Kind,
- Sema::TrivialABIHandling TAH, bool Diagnose) {
+ Sema::TrivialABIHandling TAH,
+ bool Diagnose) {
CXXRecordDecl *SubRD = SubType->getAsCXXRecordDecl();
if (!SubRD)
return true;
@@ -10039,27 +9949,28 @@ static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc,
if (ConstRHS)
SubType.addConst();
- if (!Selected && CSM == Sema::CXXDefaultConstructor) {
+ if (!Selected && CSM == CXXSpecialMemberKind::DefaultConstructor) {
S.Diag(SubobjLoc, diag::note_nontrivial_no_def_ctor)
<< Kind << SubType.getUnqualifiedType();
if (CXXConstructorDecl *CD = findUserDeclaredCtor(SubRD))
S.Diag(CD->getLocation(), diag::note_user_declared_ctor);
} else if (!Selected)
S.Diag(SubobjLoc, diag::note_nontrivial_no_copy)
- << Kind << SubType.getUnqualifiedType() << CSM << SubType;
+ << Kind << SubType.getUnqualifiedType() << llvm::to_underlying(CSM)
+ << SubType;
else if (Selected->isUserProvided()) {
if (Kind == TSK_CompleteObject)
S.Diag(Selected->getLocation(), diag::note_nontrivial_user_provided)
- << Kind << SubType.getUnqualifiedType() << CSM;
+ << Kind << SubType.getUnqualifiedType() << llvm::to_underlying(CSM);
else {
S.Diag(SubobjLoc, diag::note_nontrivial_user_provided)
- << Kind << SubType.getUnqualifiedType() << CSM;
+ << Kind << SubType.getUnqualifiedType() << llvm::to_underlying(CSM);
S.Diag(Selected->getLocation(), diag::note_declared_at);
}
} else {
if (Kind != TSK_CompleteObject)
S.Diag(SubobjLoc, diag::note_nontrivial_subobject)
- << Kind << SubType.getUnqualifiedType() << CSM;
+ << Kind << SubType.getUnqualifiedType() << llvm::to_underlying(CSM);
// Explain why the defaulted or deleted special member isn't trivial.
S.SpecialMemberIsTrivial(Selected, CSM, Sema::TAH_IgnoreTrivialABI,
@@ -10073,12 +9984,11 @@ static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc,
/// Check whether the members of a class type allow a special member to be
/// trivial.
static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD,
- Sema::CXXSpecialMember CSM,
- bool ConstArg,
+ CXXSpecialMemberKind CSM, bool ConstArg,
Sema::TrivialABIHandling TAH,
bool Diagnose) {
for (const auto *FI : RD->fields()) {
- if (FI->isInvalidDecl() || FI->isUnnamedBitfield())
+ if (FI->isInvalidDecl() || FI->isUnnamedBitField())
continue;
QualType FieldType = S.Context.getBaseElementType(FI->getType());
@@ -10095,7 +10005,8 @@ static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD,
// A default constructor is trivial if [...]
// -- no non-static data member of its class has a
// brace-or-equal-initializer
- if (CSM == Sema::CXXDefaultConstructor && FI->hasInClassInitializer()) {
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor &&
+ FI->hasInClassInitializer()) {
if (Diagnose)
S.Diag(FI->getLocation(), diag::note_nontrivial_default_member_init)
<< FI;
@@ -10122,23 +10033,21 @@ static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD,
return true;
}
-/// Diagnose why the specified class does not have a trivial special member of
-/// the given kind.
-void Sema::DiagnoseNontrivial(const CXXRecordDecl *RD, CXXSpecialMember CSM) {
+void Sema::DiagnoseNontrivial(const CXXRecordDecl *RD,
+ CXXSpecialMemberKind CSM) {
QualType Ty = Context.getRecordType(RD);
- bool ConstArg = (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment);
+ bool ConstArg = (CSM == CXXSpecialMemberKind::CopyConstructor ||
+ CSM == CXXSpecialMemberKind::CopyAssignment);
checkTrivialSubobjectCall(*this, RD->getLocation(), Ty, ConstArg, CSM,
TSK_CompleteObject, TAH_IgnoreTrivialABI,
/*Diagnose*/true);
}
-/// Determine whether a defaulted or deleted special member function is trivial,
-/// as specified in C++11 [class.ctor]p5, C++11 [class.copy]p12,
-/// C++11 [class.copy]p25, and C++11 [class.dtor]p5.
-bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
+bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMemberKind CSM,
TrivialABIHandling TAH, bool Diagnose) {
- assert(!MD->isUserProvided() && CSM != CXXInvalid && "not special enough");
+ assert(!MD->isUserProvided() && CSM != CXXSpecialMemberKind::Invalid &&
+ "not special enough");
CXXRecordDecl *RD = MD->getParent();
@@ -10148,13 +10057,13 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
// A [special member] is trivial if [...] its parameter-type-list is
// equivalent to the parameter-type-list of an implicit declaration [...]
switch (CSM) {
- case CXXDefaultConstructor:
- case CXXDestructor:
+ case CXXSpecialMemberKind::DefaultConstructor:
+ case CXXSpecialMemberKind::Destructor:
// Trivial default constructors and destructors cannot have parameters.
break;
- case CXXCopyConstructor:
- case CXXCopyAssignment: {
+ case CXXSpecialMemberKind::CopyConstructor:
+ case CXXSpecialMemberKind::CopyAssignment: {
const ParmVarDecl *Param0 = MD->getNonObjectParameter(0);
const ReferenceType *RT = Param0->getType()->getAs<ReferenceType>();
@@ -10183,8 +10092,8 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
break;
}
- case CXXMoveConstructor:
- case CXXMoveAssignment: {
+ case CXXSpecialMemberKind::MoveConstructor:
+ case CXXSpecialMemberKind::MoveAssignment: {
// Trivial move operations always have non-cv-qualified parameters.
const ParmVarDecl *Param0 = MD->getNonObjectParameter(0);
const RValueReferenceType *RT =
@@ -10199,7 +10108,7 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
break;
}
- case CXXInvalid:
+ case CXXSpecialMemberKind::Invalid:
llvm_unreachable("not a special member");
}
@@ -10248,7 +10157,7 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
// C++11 [class.dtor]p5:
// A destructor is trivial if [...]
// -- the destructor is not virtual
- if (CSM == CXXDestructor && MD->isVirtual()) {
+ if (CSM == CXXSpecialMemberKind::Destructor && MD->isVirtual()) {
if (Diagnose)
Diag(MD->getLocation(), diag::note_nontrivial_virtual_dtor) << RD;
return false;
@@ -10257,7 +10166,8 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
// C++11 [class.ctor]p5, C++11 [class.copy]p12, C++11 [class.copy]p25:
// A [special member] for class X is trivial if [...]
// -- class X has no virtual functions and no virtual base classes
- if (CSM != CXXDestructor && MD->getParent()->isDynamicClass()) {
+ if (CSM != CXXSpecialMemberKind::Destructor &&
+ MD->getParent()->isDynamicClass()) {
if (!Diagnose)
return false;
@@ -10364,8 +10274,6 @@ static void AddMostOverridenMethods(const CXXMethodDecl *MD,
AddMostOverridenMethods(O, Methods);
}
-/// Check if a method overloads virtual methods in a base class without
-/// overriding any.
void Sema::FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods) {
if (!MD->getDeclName().isIdentifier())
@@ -10405,8 +10313,6 @@ void Sema::NoteHiddenVirtualMethods(CXXMethodDecl *MD,
}
}
-/// Diagnose methods which overload virtual methods in a base class
-/// without overriding any.
void Sema::DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD) {
if (MD->isInvalidDecl())
return;
@@ -10498,6 +10404,39 @@ void Sema::checkIllFormedTrivialABIStruct(CXXRecordDecl &RD) {
}
}
+void Sema::checkIncorrectVTablePointerAuthenticationAttribute(
+ CXXRecordDecl &RD) {
+ if (RequireCompleteType(RD.getLocation(), Context.getRecordType(&RD),
+ diag::err_incomplete_type_vtable_pointer_auth))
+ return;
+
+ const CXXRecordDecl *PrimaryBase = &RD;
+ if (PrimaryBase->hasAnyDependentBases())
+ return;
+
+ while (1) {
+ assert(PrimaryBase);
+ const CXXRecordDecl *Base = nullptr;
+ for (auto BasePtr : PrimaryBase->bases()) {
+ if (!BasePtr.getType()->getAsCXXRecordDecl()->isDynamicClass())
+ continue;
+ Base = BasePtr.getType()->getAsCXXRecordDecl();
+ break;
+ }
+ if (!Base || Base == PrimaryBase || !Base->isPolymorphic())
+ break;
+ Diag(RD.getAttr<VTablePointerAuthenticationAttr>()->getLocation(),
+ diag::err_non_top_level_vtable_pointer_auth)
+ << &RD << Base;
+ PrimaryBase = Base;
+ }
+
+ if (!RD.isPolymorphic())
+ Diag(RD.getAttr<VTablePointerAuthenticationAttr>()->getLocation(),
+ diag::err_non_polymorphic_vtable_pointer_auth)
+ << &RD;
+}
+
void Sema::ActOnFinishCXXMemberSpecification(
Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList) {
@@ -10561,11 +10500,6 @@ static void findImplicitlyDeclaredEqualityComparisons(
}
}
-/// AddImplicitlyDeclaredMembersToClass - Adds any implicitly-declared
-/// special functions, such as the default constructor, copy
-/// constructor, or destructor, to the given C++ class (C++
-/// [special]p1). This routine can only be executed just before the
-/// definition of the class is complete.
void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
// Don't add implicit special members to templated classes.
// FIXME: This means unqualified lookups for 'operator=' within a class
@@ -10742,9 +10676,6 @@ void Sema::ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *RecordD) {
PopDeclContext();
}
-/// This is used to implement the constant expression evaluation part of the
-/// attribute enable_if extension. There is nothing in standard C++ which would
-/// require reentering parameters.
void Sema::ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param) {
if (!Param)
return;
@@ -10754,14 +10685,6 @@ void Sema::ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param) {
IdResolver.AddDecl(Param);
}
-/// ActOnStartDelayedCXXMethodDeclaration - We have completed
-/// parsing a top-level (non-nested) C++ class, and we are now
-/// parsing those parts of the given Method declaration that could
-/// not be parsed earlier (C++ [class.mem]p2), such as default
-/// arguments. This action should enter the scope of the given
-/// Method declaration as if we had just parsed the qualified method
-/// name. However, it should not bring the parameters into scope;
-/// that will be performed by ActOnDelayedCXXMethodParameter.
void Sema::ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *MethodD) {
}
@@ -10781,12 +10704,6 @@ void Sema::ActOnDelayedCXXMethodParameter(Scope *S, Decl *ParamD) {
IdResolver.AddDecl(Param);
}
-/// ActOnFinishDelayedCXXMethodDeclaration - We have finished
-/// processing the delayed method declaration for Method. The method
-/// declaration is now considered finished. There may be a separate
-/// ActOnStartOfFunctionDef action later (not necessarily
-/// immediately!) for this method, if it was also defined inside the
-/// class body.
void Sema::ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *MethodD) {
if (!MethodD)
return;
@@ -10828,12 +10745,6 @@ static void checkMethodTypeQualifiers(Sema &S, Declarator &D, unsigned DiagID) {
}
}
-/// CheckConstructorDeclarator - Called by ActOnDeclarator to check
-/// the well-formedness of the constructor declarator @p D with type @p
-/// R. If there are any errors in the declarator, this routine will
-/// emit diagnostics and set the invalid bit to true. In any case, the type
-/// will be updated to reflect a well-formed type for the constructor and
-/// returned.
QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass &SC) {
bool isVirtual = D.getDeclSpec().isVirtualSpecified();
@@ -10894,9 +10805,6 @@ QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
return Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(), EPI);
}
-/// CheckConstructor - Checks a fully-formed constructor for
-/// well-formedness, issuing any diagnostics required. Returns true if
-/// the constructor declarator is invalid.
void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
CXXRecordDecl *ClassDecl
= dyn_cast<CXXRecordDecl>(Constructor->getDeclContext());
@@ -10929,9 +10837,6 @@ void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
}
}
-/// CheckDestructor - Checks a fully-formed destructor definition for
-/// well-formedness, issuing any diagnostics required. Returns true
-/// on error.
bool Sema::CheckDestructor(CXXDestructorDecl *Destructor) {
CXXRecordDecl *RD = Destructor->getParent();
@@ -10981,12 +10886,6 @@ bool Sema::CheckDestructor(CXXDestructorDecl *Destructor) {
return false;
}
-/// CheckDestructorDeclarator - Called by ActOnDeclarator to check
-/// the well-formednes of the destructor declarator @p D with type @p
-/// R. If there are any errors in the declarator, this routine will
-/// emit diagnostics and set the declarator to invalid. Even if this happens,
-/// will be updated to reflect a well-formed type for the destructor and
-/// returned.
QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC) {
// C++ [class.dtor]p1:
@@ -11103,12 +11002,6 @@ static void extendRight(SourceRange &R, SourceRange After) {
R.setEnd(After.getEnd());
}
-/// CheckConversionDeclarator - Called by ActOnDeclarator to check the
-/// well-formednes of the conversion function declarator @p D with
-/// type @p R. If there are any errors in the declarator, this routine
-/// will emit diagnostics and return true. Otherwise, it will return
-/// false. Either way, the type @p R will be updated to reflect a
-/// well-formed type for the conversion operator.
void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC) {
// C++ [class.conv.fct]p1:
@@ -11282,10 +11175,6 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
<< SourceRange(DS.getExplicitSpecRange());
}
-/// ActOnConversionDeclarator - Called by ActOnDeclarator to complete
-/// the declaration of the given C++ conversion function. This routine
-/// is responsible for recording the conversion function in the C++
-/// class, if possible.
Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
assert(Conversion && "Expected to receive a conversion function declaration");
@@ -11329,7 +11218,7 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
if (ConvType->isUndeducedAutoType()) {
Diag(Conversion->getTypeSpecStartLoc(), diag::err_auto_not_allowed)
<< getReturnTypeLoc(Conversion).getSourceRange()
- << llvm::to_underlying(ConvType->getAs<AutoType>()->getKeyword())
+ << llvm::to_underlying(ConvType->castAs<AutoType>()->getKeyword())
<< /* in declaration of conversion function template= */ 24;
}
@@ -11383,7 +11272,9 @@ void Sema::CheckExplicitObjectMemberFunction(Declarator &D,
<< ExplicitObjectParam->getSourceRange();
}
- if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static) {
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
+ (D.getContext() == clang::DeclaratorContext::Member &&
+ D.isStaticMember())) {
Diag(ExplicitObjectParam->getBeginLoc(),
diag::err_explicit_object_parameter_nonmember)
<< D.getSourceRange() << /*static=*/0 << IsLambda;
@@ -11397,6 +11288,34 @@ void Sema::CheckExplicitObjectMemberFunction(Declarator &D,
D.setInvalidType();
}
+ // Friend declarations require some care. Consider:
+ //
+ // namespace N {
+ // struct A{};
+ // int f(A);
+ // }
+ //
+ // struct S {
+ // struct T {
+ // int f(this T);
+ // };
+ //
+ // friend int T::f(this T); // Allow this.
+ // friend int f(this S); // But disallow this.
+ // friend int N::f(this A); // And disallow this.
+ // };
+ //
+ // Here, it seems to suffice to check whether the scope
+ // specifier designates a class type.
+ if (D.getDeclSpec().isFriendSpecified() &&
+ !isa_and_present<CXXRecordDecl>(
+ computeDeclContext(D.getCXXScopeSpec()))) {
+ Diag(ExplicitObjectParam->getBeginLoc(),
+ diag::err_explicit_object_parameter_nonmember)
+ << D.getSourceRange() << /*non-member=*/2 << IsLambda;
+ D.setInvalidType();
+ }
+
if (IsLambda && FTI.hasMutableQualifier()) {
Diag(ExplicitObjectParam->getBeginLoc(),
diag::err_explicit_object_parameter_mutable)
@@ -11407,10 +11326,8 @@ void Sema::CheckExplicitObjectMemberFunction(Declarator &D,
return;
if (!DC || !DC->isRecord()) {
- Diag(ExplicitObjectParam->getLocation(),
- diag::err_explicit_object_parameter_nonmember)
- << D.getSourceRange() << /*non-member=*/2 << IsLambda;
- D.setInvalidType();
+ assert(D.isInvalidType() && "Explicit object parameter in non-member "
+ "should have been diagnosed already");
return;
}
@@ -11455,10 +11372,6 @@ struct BadSpecifierDiagnoser {
};
}
-/// Check the validity of a declarator that we parsed for a deduction-guide.
-/// These aren't actually declarators in the grammar, so we need to check that
-/// the user didn't specify any pieces that are not part of the deduction-guide
-/// grammar. Return true on invalid deduction-guide.
bool Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC) {
TemplateName GuidedTemplate = D.getName().TemplateName.get().get();
@@ -11540,12 +11453,12 @@ bool Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
TemplateName SpecifiedName = RetTST.getTypePtr()->getTemplateName();
bool TemplateMatches =
Context.hasSameTemplateName(SpecifiedName, GuidedTemplate);
- auto TKind = SpecifiedName.getKind();
- // A Using TemplateName can't actually be valid (either it's qualified, or
- // we're in the wrong scope). But we have diagnosed these problems
- // already.
- bool SimplyWritten = TKind == TemplateName::Template ||
- TKind == TemplateName::UsingTemplate;
+
+ const QualifiedTemplateName *Qualifiers =
+ SpecifiedName.getAsQualifiedTemplateName();
+ assert(Qualifiers && "expected QualifiedTemplate");
+ bool SimplyWritten = !Qualifiers->hasTemplateKeyword() &&
+ Qualifiers->getQualifier() == nullptr;
if (SimplyWritten && TemplateMatches)
AcceptableReturnType = true;
else {
@@ -11652,7 +11565,7 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
// look through using directives, just look for any ordinary names
// as if by qualified name lookup.
LookupResult R(*this, II, IdentLoc, LookupOrdinaryName,
- ForExternalRedeclaration);
+ RedeclarationKind::ForExternalRedeclaration);
LookupQualifiedName(R, CurContext->getRedeclContext());
NamedDecl *PrevDecl =
R.isSingleResult() ? R.getRepresentativeDecl() : nullptr;
@@ -11710,6 +11623,7 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
ProcessDeclAttributeList(DeclRegionScope, Namespc, AttrList);
AddPragmaAttributes(DeclRegionScope, Namespc);
+ ProcessAPINotes(Namespc);
// FIXME: Should we be merging attributes?
if (const VisibilityAttr *Attr = Namespc->getAttr<VisibilityAttr>())
@@ -11781,8 +11695,6 @@ static inline NamespaceDecl *getNamespaceDecl(NamedDecl *D) {
return dyn_cast_or_null<NamespaceDecl>(D);
}
-/// ActOnFinishNamespaceDef - This callback is called after a namespace is
-/// exited. Decl is the DeclTy returned by ActOnStartNamespaceDef.
void Sema::ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace) {
NamespaceDecl *Namespc = dyn_cast_or_null<NamespaceDecl>(Dcl);
assert(Namespc && "Invalid parameter, expected NamespaceDecl");
@@ -11946,8 +11858,6 @@ QualType Sema::CheckComparisonCategoryType(ComparisonCategoryType Kind,
return Info->getType();
}
-/// Retrieve the special "std" namespace, which may require us to
-/// implicitly define the namespace.
NamespaceDecl *Sema::getOrCreateStdNamespace() {
if (!StdNamespace) {
// The "std" namespace has not yet been defined, so build one implicitly.
@@ -11988,11 +11898,17 @@ bool Sema::isStdInitializerList(QualType Ty, QualType *Element) {
Template = Specialization->getSpecializedTemplate();
Arguments = Specialization->getTemplateArgs().data();
- } else if (const TemplateSpecializationType *TST =
- Ty->getAs<TemplateSpecializationType>()) {
- Template = dyn_cast_or_null<ClassTemplateDecl>(
- TST->getTemplateName().getAsTemplateDecl());
- Arguments = TST->template_arguments().begin();
+ } else {
+ const TemplateSpecializationType *TST = nullptr;
+ if (auto *ICN = Ty->getAs<InjectedClassNameType>())
+ TST = ICN->getInjectedTST();
+ else
+ TST = Ty->getAs<TemplateSpecializationType>();
+ if (TST) {
+ Template = dyn_cast_or_null<ClassTemplateDecl>(
+ TST->getTemplateName().getAsTemplateDecl());
+ Arguments = TST->template_arguments().begin();
+ }
}
if (!Template)
return false;
@@ -12003,7 +11919,7 @@ bool Sema::isStdInitializerList(QualType Ty, QualType *Element) {
if (TemplateClass->getIdentifier() !=
&PP.getIdentifierTable().get("initializer_list") ||
!getStdNamespace()->InEnclosingNamespaceSetOf(
- TemplateClass->getDeclContext()))
+ TemplateClass->getNonTransparentDeclContext()))
return false;
// This is a template called std::initializer_list, but is it the right
// template?
@@ -12162,8 +12078,8 @@ static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
DiagnoseInvisibleNamespace(Corrected, S);
} else if (DeclContext *DC = S.computeDeclContext(SS, false)) {
std::string CorrectedStr(Corrected.getAsString(S.getLangOpts()));
- bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
- Ident->getName().equals(CorrectedStr);
+ bool DroppedSpecifier =
+ Corrected.WillReplaceSpecifier() && Ident->getName() == CorrectedStr;
S.diagnoseTypo(Corrected,
S.PDiag(diag::err_using_directive_member_suggest)
<< Ident << DC << DroppedSpecifier << SS.getRange(),
@@ -12188,10 +12104,8 @@ Decl *Sema::ActOnUsingDirective(Scope *S, SourceLocation UsingLoc,
assert(NamespcName && "Invalid NamespcName.");
assert(IdentLoc.isValid() && "Invalid NamespceName location.");
- // This can only happen along a recovery path.
- while (S->isTemplateParamScope())
- S = S->getParent();
- assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
+ // Get the innermost enclosing declaration scope.
+ S = S->getDeclParent();
UsingDirectiveDecl *UDir = nullptr;
NestedNameSpecifier *Qualifier = nullptr;
@@ -12200,7 +12114,7 @@ Decl *Sema::ActOnUsingDirective(Scope *S, SourceLocation UsingLoc,
// Lookup namespace name.
LookupResult R(*this, NamespcName, IdentLoc, LookupNamespaceName);
- LookupParsedName(R, S, &SS);
+ LookupParsedName(R, S, &SS, /*ObjectType=*/QualType());
if (R.isAmbiguous())
return nullptr;
@@ -12256,8 +12170,10 @@ Decl *Sema::ActOnUsingDirective(Scope *S, SourceLocation UsingLoc,
Diag(IdentLoc, diag::err_expected_namespace_name) << SS.getRange();
}
- if (UDir)
+ if (UDir) {
ProcessDeclAttributeList(S, UDir, AttrList);
+ ProcessAPINotes(UDir);
+ }
return UDir;
}
@@ -12361,23 +12277,24 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S, AccessSpecifier AS,
Decl *Sema::ActOnUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
- SourceLocation EnumLoc,
- SourceLocation IdentLoc,
- IdentifierInfo &II, CXXScopeSpec *SS) {
+ SourceLocation EnumLoc, SourceRange TyLoc,
+ const IdentifierInfo &II, ParsedType Ty,
+ CXXScopeSpec *SS) {
assert(!SS->isInvalid() && "ScopeSpec is invalid");
TypeSourceInfo *TSI = nullptr;
- QualType EnumTy = GetTypeFromParser(
- getTypeName(II, IdentLoc, S, SS, /*isClassName=*/false,
- /*HasTrailingDot=*/false,
- /*ObjectType=*/nullptr, /*IsCtorOrDtorName=*/false,
- /*WantNontrivialTypeSourceInfo=*/true),
- &TSI);
+ SourceLocation IdentLoc = TyLoc.getBegin();
+ QualType EnumTy = GetTypeFromParser(Ty, &TSI);
if (EnumTy.isNull()) {
Diag(IdentLoc, SS && isDependentScopeSpecifier(*SS)
? diag::err_using_enum_is_dependent
: diag::err_unknown_typename)
<< II.getName()
- << SourceRange(SS ? SS->getBeginLoc() : IdentLoc, IdentLoc);
+ << SourceRange(SS ? SS->getBeginLoc() : IdentLoc, TyLoc.getEnd());
+ return nullptr;
+ }
+
+ if (EnumTy->isDependentType()) {
+ Diag(IdentLoc, diag::err_using_enum_is_dependent);
return nullptr;
}
@@ -12424,9 +12341,6 @@ IsEquivalentForUsingDecl(ASTContext &Context, NamedDecl *D1, NamedDecl *D2) {
return false;
}
-
-/// Determines whether to create a using shadow decl for a particular
-/// decl, given the set of decls existing prior to this using lookup.
bool Sema::CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Orig,
const LookupResult &Previous,
UsingShadowDecl *&PrevShadow) {
@@ -12609,7 +12523,6 @@ static bool isVirtualDirectBase(CXXRecordDecl *Derived, CXXRecordDecl *Base) {
llvm_unreachable("not a direct base class");
}
-/// Builds a shadow declaration corresponding to a 'using' declaration.
UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
NamedDecl *Orig,
UsingShadowDecl *PrevDecl) {
@@ -12653,33 +12566,6 @@ UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
return Shadow;
}
-/// Hides a using shadow declaration. This is required by the current
-/// using-decl implementation when a resolvable using declaration in a
-/// class is followed by a declaration which would hide or override
-/// one or more of the using decl's targets; for example:
-///
-/// struct Base { void foo(int); };
-/// struct Derived : Base {
-/// using Base::foo;
-/// void foo(int);
-/// };
-///
-/// The governing language is C++03 [namespace.udecl]p12:
-///
-/// When a using-declaration brings names from a base class into a
-/// derived class scope, member functions in the derived class
-/// override and/or hide member functions with the same name and
-/// parameter types in a base class (rather than conflicting).
-///
-/// There are two ways to implement this:
-/// (1) optimistically create shadow decls when they're not hidden
-/// by existing declarations, or
-/// (2) don't create any shadow decls (or at least don't make them
-/// visible) until we've fully parsed/instantiated the class.
-/// The problem with (1) is that we might have to retroactively remove
-/// a shadow decl, which requires several O(n) operations because the
-/// decl structures are (very reasonably) not designed for removal.
-/// (2) avoids this but is very fiddly and phase-dependent.
void Sema::HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow) {
if (Shadow->getDeclName().getNameKind() ==
DeclarationName::CXXConversionFunctionName)
@@ -12801,11 +12687,6 @@ private:
};
} // end anonymous namespace
-/// Remove decls we can't actually see from a lookup being used to declare
-/// shadow using decls.
-///
-/// \param S - The scope of the potential shadow decl
-/// \param Previous - The lookup of a potential shadow decl's name.
void Sema::FilterUsingLookup(Scope *S, LookupResult &Previous) {
// It is really dumb that we have to do this.
LookupResult::Filter F = Previous.makeFilter();
@@ -12824,11 +12705,6 @@ void Sema::FilterUsingLookup(Scope *S, LookupResult &Previous) {
F.done();
}
-/// Builds a using declaration.
-///
-/// \param IsInstantiation - Whether this call arises from an
-/// instantiation of an unresolved using declaration. We treat
-/// the lookup differently for these declarations.
NamedDecl *Sema::BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
@@ -12852,7 +12728,7 @@ NamedDecl *Sema::BuildUsingDeclaration(
// Do the redeclaration lookup in the current scope.
LookupResult Previous(*this, UsingName, LookupUsingDeclName,
- ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
Previous.setHideTags(false);
if (S) {
LookupName(Previous, S);
@@ -13058,7 +12934,10 @@ NamedDecl *Sema::BuildUsingDeclaration(
// A using-declaration shall not name a namespace.
if (R.getAsSingle<NamespaceDecl>()) {
Diag(IdentLoc, diag::err_using_decl_can_not_refer_to_namespace)
- << SS.getRange();
+ << SS.getRange();
+ // Suggest using 'using namespace ...' instead.
+ Diag(SS.getBeginLoc(), diag::note_namespace_using_decl)
+ << FixItHint::CreateInsertion(SS.getBeginLoc(), "namespace ");
return BuildInvalid();
}
@@ -13095,7 +12974,7 @@ NamedDecl *Sema::BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
/// In class scope, check if this is a duplicate, for better a diagnostic.
DeclarationNameInfo UsingEnumName(ED->getDeclName(), NameLoc);
LookupResult Previous(*this, UsingEnumName, LookupUsingDeclName,
- ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
LookupName(Previous, S);
@@ -13128,7 +13007,7 @@ NamedDecl *Sema::BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
UsingShadowDecl *PrevDecl = nullptr;
DeclarationNameInfo DNI(EC->getDeclName(), EC->getLocation());
LookupResult Previous(*this, DNI, LookupOrdinaryName,
- ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
LookupName(Previous, S);
FilterUsingLookup(S, Previous);
@@ -13152,7 +13031,6 @@ NamedDecl *Sema::BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
return UPD;
}
-/// Additional checks for a using declaration referring to a constructor name.
bool Sema::CheckInheritingConstructorUsingDecl(UsingDecl *UD) {
assert(!UD->hasTypename() && "expecting a constructor name");
@@ -13180,9 +13058,6 @@ bool Sema::CheckInheritingConstructorUsingDecl(UsingDecl *UD) {
return false;
}
-/// Checks that the given using declaration is not an invalid
-/// redeclaration. Note that this is checking only for the using decl
-/// itself, not for any ill-formedness among the UsingShadowDecls.
bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
@@ -13258,13 +13133,6 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
return false;
}
-/// Checks that the given nested-name qualifier used in a using decl
-/// in the current context is appropriately related to the current
-/// scope. If an error is found, diagnoses it and returns true.
-/// R is nullptr, if the caller has not (yet) done a lookup, otherwise it's the
-/// result of that lookup. UD is likewise nullptr, except when we have an
-/// already-populated UsingDecl whose shadow decls contain the same information
-/// (i.e. we're instantiating a UsingDecl with non-dependent scope).
bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
@@ -13499,11 +13367,8 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec) {
- // Skip up to the relevant declaration scope.
- while (S->isTemplateParamScope())
- S = S->getParent();
- assert((S->getFlags() & Scope::DeclScope) &&
- "got alias-declaration outside of declaration scope");
+ // Get the innermost enclosing declaration scope.
+ S = S->getDeclParent();
if (Type.isInvalid())
return nullptr;
@@ -13526,7 +13391,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS,
LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
TemplateParamLists.size()
? forRedeclarationInCurContext()
- : ForVisibleRedeclaration);
+ : RedeclarationKind::ForVisibleRedeclaration);
LookupName(Previous, S);
// Warn about shadowing the name of a template parameter.
@@ -13549,6 +13414,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS,
ProcessDeclAttributeList(S, NewTD, AttrList);
AddPragmaAttributes(S, NewTD);
+ ProcessAPINotes(NewTD);
CheckTypedefForVariablyModifiedType(S, NewTD);
Invalid |= NewTD->isInvalidDecl();
@@ -13564,6 +13430,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS,
Diag(UsingLoc, diag::err_alias_template_extra_headers)
<< SourceRange(TemplateParamLists[1]->getTemplateLoc(),
TemplateParamLists[TemplateParamLists.size()-1]->getRAngleLoc());
+ Invalid = true;
}
TemplateParameterList *TemplateParams = TemplateParamLists[0];
@@ -13658,7 +13525,7 @@ Decl *Sema::ActOnNamespaceAliasDef(Scope *S, SourceLocation NamespaceLoc,
// Lookup the namespace name.
LookupResult R(*this, Ident, IdentLoc, LookupNamespaceName);
- LookupParsedName(R, S, &SS);
+ LookupParsedName(R, S, &SS, /*ObjectType=*/QualType());
if (R.isAmbiguous())
return nullptr;
@@ -13674,7 +13541,7 @@ Decl *Sema::ActOnNamespaceAliasDef(Scope *S, SourceLocation NamespaceLoc,
// Check if we have a previous declaration with the same name.
LookupResult PrevR(*this, Alias, AliasLoc, LookupOrdinaryName,
- ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
LookupName(PrevR, S);
// Check we're not shadowing a template parameter.
@@ -13734,7 +13601,7 @@ struct SpecialMemberExceptionSpecInfo
Sema::ImplicitExceptionSpecification ExceptSpec;
SpecialMemberExceptionSpecInfo(Sema &S, CXXMethodDecl *MD,
- Sema::CXXSpecialMember CSM,
+ CXXSpecialMemberKind CSM,
Sema::InheritedConstructorInfo *ICI,
SourceLocation Loc)
: SpecialMemberVisitor(S, MD, CSM, ICI), Loc(Loc), ExceptSpec(S) {}
@@ -13767,7 +13634,8 @@ bool SpecialMemberExceptionSpecInfo::visitBase(CXXBaseSpecifier *Base) {
}
bool SpecialMemberExceptionSpecInfo::visitField(FieldDecl *FD) {
- if (CSM == Sema::CXXDefaultConstructor && FD->hasInClassInitializer()) {
+ if (CSM == CXXSpecialMemberKind::DefaultConstructor &&
+ FD->hasInClassInitializer()) {
Expr *E = FD->getInClassInitializer();
if (!E)
// FIXME: It's a little wasteful to build and throw away a
@@ -13826,7 +13694,7 @@ ExplicitSpecifier Sema::ActOnExplicitBoolSpecifier(Expr *ExplicitExpr) {
static Sema::ImplicitExceptionSpecification
ComputeDefaultedSpecialMemberExceptionSpec(
- Sema &S, SourceLocation Loc, CXXMethodDecl *MD, Sema::CXXSpecialMember CSM,
+ Sema &S, SourceLocation Loc, CXXMethodDecl *MD, CXXSpecialMemberKind CSM,
Sema::InheritedConstructorInfo *ICI) {
ComputingExceptionSpec CES(S, MD, Loc);
@@ -13876,7 +13744,7 @@ struct DeclaringSpecialMember {
Sema::ContextRAII SavedContext;
bool WasAlreadyBeingDeclared;
- DeclaringSpecialMember(Sema &S, CXXRecordDecl *RD, Sema::CXXSpecialMember CSM)
+ DeclaringSpecialMember(Sema &S, CXXRecordDecl *RD, CXXSpecialMemberKind CSM)
: S(S), D(RD, CSM), SavedContext(S, RD) {
WasAlreadyBeingDeclared = !S.SpecialMembersBeingDeclared.insert(D).second;
if (WasAlreadyBeingDeclared)
@@ -13919,7 +13787,7 @@ void Sema::CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD) {
// implicit special members with this name.
DeclarationName Name = FD->getDeclName();
LookupResult R(*this, Name, SourceLocation(), LookupOrdinaryName,
- ForExternalRedeclaration);
+ RedeclarationKind::ForExternalRedeclaration);
for (auto *D : FD->getParent()->lookup(Name))
if (auto *Acceptable = R.getAcceptableDecl(D))
R.addDecl(Acceptable);
@@ -13966,13 +13834,13 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
assert(ClassDecl->needsImplicitDefaultConstructor() &&
"Should not build implicit default constructor!");
- DeclaringSpecialMember DSM(*this, ClassDecl, CXXDefaultConstructor);
+ DeclaringSpecialMember DSM(*this, ClassDecl,
+ CXXSpecialMemberKind::DefaultConstructor);
if (DSM.isAlreadyBeingDeclared())
return nullptr;
- bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
- CXXDefaultConstructor,
- false);
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(
+ *this, ClassDecl, CXXSpecialMemberKind::DefaultConstructor, false);
// Create the actual constructor declaration.
CanQualType ClassType
@@ -13994,10 +13862,10 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
setupImplicitSpecialMemberType(DefaultCon, Context.VoidTy, std::nullopt);
if (getLangOpts().CUDA)
- inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDefaultConstructor,
- DefaultCon,
- /* ConstRHS */ false,
- /* Diagnose */ false);
+ CUDA().inferTargetForImplicitSpecialMember(
+ ClassDecl, CXXSpecialMemberKind::DefaultConstructor, DefaultCon,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
// We don't need to use SpecialMemberIsTrivial here; triviality for default
// constructors is easy to compute.
@@ -14009,7 +13877,8 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, DefaultCon);
- if (ShouldDeleteSpecialMember(DefaultCon, CXXDefaultConstructor))
+ if (ShouldDeleteSpecialMember(DefaultCon,
+ CXXSpecialMemberKind::DefaultConstructor))
SetDeclDeleted(DefaultCon, ClassLoc);
if (S)
@@ -14030,6 +13899,9 @@ void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXRecordDecl *ClassDecl = Constructor->getParent();
assert(ClassDecl && "DefineImplicitDefaultConstructor - invalid constructor");
+ if (ClassDecl->isInvalidDecl()) {
+ return;
+ }
SynthesizedFunctionScope Scope(*this, Constructor);
@@ -14098,10 +13970,10 @@ Sema::findInheritingConstructor(SourceLocation Loc,
// from which it was inherited.
InheritedConstructorInfo ICI(*this, Loc, Shadow);
- bool Constexpr =
- BaseCtor->isConstexpr() &&
- defaultedSpecialMemberIsConstexpr(*this, Derived, CXXDefaultConstructor,
- false, BaseCtor, &ICI);
+ bool Constexpr = BaseCtor->isConstexpr() &&
+ defaultedSpecialMemberIsConstexpr(
+ *this, Derived, CXXSpecialMemberKind::DefaultConstructor,
+ false, BaseCtor, &ICI);
CXXConstructorDecl *DerivedCtor = CXXConstructorDecl::Create(
Context, Derived, UsingLoc, NameInfo, TInfo->getType(), TInfo,
@@ -14145,7 +14017,8 @@ Sema::findInheritingConstructor(SourceLocation Loc,
DerivedCtor->setParams(ParamDecls);
Derived->addDecl(DerivedCtor);
- if (ShouldDeleteSpecialMember(DerivedCtor, CXXDefaultConstructor, &ICI))
+ if (ShouldDeleteSpecialMember(DerivedCtor,
+ CXXSpecialMemberKind::DefaultConstructor, &ICI))
SetDeclDeleted(DerivedCtor, UsingLoc);
return DerivedCtor;
@@ -14154,8 +14027,9 @@ Sema::findInheritingConstructor(SourceLocation Loc,
void Sema::NoteDeletedInheritingConstructor(CXXConstructorDecl *Ctor) {
InheritedConstructorInfo ICI(*this, Ctor->getLocation(),
Ctor->getInheritedConstructor().getShadowDecl());
- ShouldDeleteSpecialMember(Ctor, CXXDefaultConstructor, &ICI,
- /*Diagnose*/true);
+ ShouldDeleteSpecialMember(Ctor, CXXSpecialMemberKind::DefaultConstructor,
+ &ICI,
+ /*Diagnose*/ true);
}
void Sema::DefineInheritingConstructor(SourceLocation CurrentLocation,
@@ -14246,13 +14120,13 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
// inline public member of its class.
assert(ClassDecl->needsImplicitDestructor());
- DeclaringSpecialMember DSM(*this, ClassDecl, CXXDestructor);
+ DeclaringSpecialMember DSM(*this, ClassDecl,
+ CXXSpecialMemberKind::Destructor);
if (DSM.isAlreadyBeingDeclared())
return nullptr;
- bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
- CXXDestructor,
- false);
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(
+ *this, ClassDecl, CXXSpecialMemberKind::Destructor, false);
// Create the actual destructor declaration.
CanQualType ClassType
@@ -14274,10 +14148,10 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
setupImplicitSpecialMemberType(Destructor, Context.VoidTy, std::nullopt);
if (getLangOpts().CUDA)
- inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDestructor,
- Destructor,
- /* ConstRHS */ false,
- /* Diagnose */ false);
+ CUDA().inferTargetForImplicitSpecialMember(
+ ClassDecl, CXXSpecialMemberKind::Destructor, Destructor,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
// We don't need to use SpecialMemberIsTrivial here; triviality for
// destructors is easy to compute.
@@ -14295,7 +14169,7 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
// the definition of the class, because its validity depends on the alignment
// of the class. We'll check this from ActOnFields once the class is complete.
if (ClassDecl->isCompleteDefinition() &&
- ShouldDeleteSpecialMember(Destructor, CXXDestructor))
+ ShouldDeleteSpecialMember(Destructor, CXXSpecialMemberKind::Destructor))
SetDeclDeleted(Destructor, ClassLoc);
// Introduce this destructor into its scope.
@@ -14367,8 +14241,6 @@ void Sema::CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
MarkVirtualBaseDestructorsReferenced(Destructor->getLocation(), ClassDecl);
}
-/// Perform any semantic analysis which needs to be delayed until all
-/// pending class member declarations have been parsed.
void Sema::ActOnFinishCXXMemberDecls() {
// If the context is an invalid C++ class, just suppress these checks.
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(CurContext)) {
@@ -14876,7 +14748,8 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
// operators taking an object instead of a reference are allowed.
assert(ClassDecl->needsImplicitCopyAssignment());
- DeclaringSpecialMember DSM(*this, ClassDecl, CXXCopyAssignment);
+ DeclaringSpecialMember DSM(*this, ClassDecl,
+ CXXSpecialMemberKind::CopyAssignment);
if (DSM.isAlreadyBeingDeclared())
return nullptr;
@@ -14893,9 +14766,8 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
ArgType = Context.getLValueReferenceType(ArgType);
- bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
- CXXCopyAssignment,
- Const);
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(
+ *this, ClassDecl, CXXSpecialMemberKind::CopyAssignment, Const);
// An implicitly-declared copy assignment operator is an inline public
// member of its class.
@@ -14916,10 +14788,10 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
setupImplicitSpecialMemberType(CopyAssignment, RetType, ArgType);
if (getLangOpts().CUDA)
- inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyAssignment,
- CopyAssignment,
- /* ConstRHS */ Const,
- /* Diagnose */ false);
+ CUDA().inferTargetForImplicitSpecialMember(
+ ClassDecl, CXXSpecialMemberKind::CopyAssignment, CopyAssignment,
+ /* ConstRHS */ Const,
+ /* Diagnose */ false);
// Add the parameter to the operator.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment,
@@ -14930,9 +14802,10 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
CopyAssignment->setParams(FromParam);
CopyAssignment->setTrivial(
- ClassDecl->needsOverloadResolutionForCopyAssignment()
- ? SpecialMemberIsTrivial(CopyAssignment, CXXCopyAssignment)
- : ClassDecl->hasTrivialCopyAssignment());
+ ClassDecl->needsOverloadResolutionForCopyAssignment()
+ ? SpecialMemberIsTrivial(CopyAssignment,
+ CXXSpecialMemberKind::CopyAssignment)
+ : ClassDecl->hasTrivialCopyAssignment());
// Note that we have added this copy-assignment operator.
++getASTContext().NumImplicitCopyAssignmentOperatorsDeclared;
@@ -14940,7 +14813,8 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, CopyAssignment);
- if (ShouldDeleteSpecialMember(CopyAssignment, CXXCopyAssignment)) {
+ if (ShouldDeleteSpecialMember(CopyAssignment,
+ CXXSpecialMemberKind::CopyAssignment)) {
ClassDecl->setImplicitCopyAssignmentIsDeleted();
SetDeclDeleted(CopyAssignment, ClassLoc);
}
@@ -15129,7 +15003,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
for (auto *Field : ClassDecl->fields()) {
// FIXME: We should form some kind of AST representation for the implied
// memcpy in a union copy operation.
- if (Field->isUnnamedBitfield() || Field->getParent()->isUnion())
+ if (Field->isUnnamedBitField() || Field->getParent()->isUnion())
continue;
if (Field->isInvalidDecl()) {
@@ -15227,7 +15101,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
assert(ClassDecl->needsImplicitMoveAssignment());
- DeclaringSpecialMember DSM(*this, ClassDecl, CXXMoveAssignment);
+ DeclaringSpecialMember DSM(*this, ClassDecl,
+ CXXSpecialMemberKind::MoveAssignment);
if (DSM.isAlreadyBeingDeclared())
return nullptr;
@@ -15243,9 +15118,8 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
QualType RetType = Context.getLValueReferenceType(ArgType);
ArgType = Context.getRValueReferenceType(ArgType);
- bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
- CXXMoveAssignment,
- false);
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(
+ *this, ClassDecl, CXXSpecialMemberKind::MoveAssignment, false);
// An implicitly-declared move assignment operator is an inline public
// member of its class.
@@ -15266,10 +15140,10 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
setupImplicitSpecialMemberType(MoveAssignment, RetType, ArgType);
if (getLangOpts().CUDA)
- inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveAssignment,
- MoveAssignment,
- /* ConstRHS */ false,
- /* Diagnose */ false);
+ CUDA().inferTargetForImplicitSpecialMember(
+ ClassDecl, CXXSpecialMemberKind::MoveAssignment, MoveAssignment,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
// Add the parameter to the operator.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveAssignment,
@@ -15280,9 +15154,10 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
MoveAssignment->setParams(FromParam);
MoveAssignment->setTrivial(
- ClassDecl->needsOverloadResolutionForMoveAssignment()
- ? SpecialMemberIsTrivial(MoveAssignment, CXXMoveAssignment)
- : ClassDecl->hasTrivialMoveAssignment());
+ ClassDecl->needsOverloadResolutionForMoveAssignment()
+ ? SpecialMemberIsTrivial(MoveAssignment,
+ CXXSpecialMemberKind::MoveAssignment)
+ : ClassDecl->hasTrivialMoveAssignment());
// Note that we have added this copy-assignment operator.
++getASTContext().NumImplicitMoveAssignmentOperatorsDeclared;
@@ -15290,7 +15165,8 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, MoveAssignment);
- if (ShouldDeleteSpecialMember(MoveAssignment, CXXMoveAssignment)) {
+ if (ShouldDeleteSpecialMember(MoveAssignment,
+ CXXSpecialMemberKind::MoveAssignment)) {
ClassDecl->setImplicitMoveAssignmentIsDeleted();
SetDeclDeleted(MoveAssignment, ClassLoc);
}
@@ -15339,10 +15215,10 @@ static void checkMoveAssignmentForRepeatedMove(Sema &S, CXXRecordDecl *Class,
// If we're not actually going to call a move assignment for this base,
// or the selected move assignment is trivial, skip it.
Sema::SpecialMemberOverloadResult SMOR =
- S.LookupSpecialMember(Base, Sema::CXXMoveAssignment,
- /*ConstArg*/false, /*VolatileArg*/false,
- /*RValueThis*/true, /*ConstThis*/false,
- /*VolatileThis*/false);
+ S.LookupSpecialMember(Base, CXXSpecialMemberKind::MoveAssignment,
+ /*ConstArg*/ false, /*VolatileArg*/ false,
+ /*RValueThis*/ true, /*ConstThis*/ false,
+ /*VolatileThis*/ false);
if (!SMOR.getMethod() || SMOR.getMethod()->isTrivial() ||
!SMOR.getMethod()->isMoveAssignmentOperator())
continue;
@@ -15512,7 +15388,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
for (auto *Field : ClassDecl->fields()) {
// FIXME: We should form some kind of AST representation for the implied
// memcpy in a union copy operation.
- if (Field->isUnnamedBitfield() || Field->getParent()->isUnion())
+ if (Field->isUnnamedBitField() || Field->getParent()->isUnion())
continue;
if (Field->isInvalidDecl()) {
@@ -15619,7 +15495,8 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
// constructor, one is declared implicitly.
assert(ClassDecl->needsImplicitCopyConstructor());
- DeclaringSpecialMember DSM(*this, ClassDecl, CXXCopyConstructor);
+ DeclaringSpecialMember DSM(*this, ClassDecl,
+ CXXSpecialMemberKind::CopyConstructor);
if (DSM.isAlreadyBeingDeclared())
return nullptr;
@@ -15637,9 +15514,8 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
ArgType = Context.getLValueReferenceType(ArgType);
- bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
- CXXCopyConstructor,
- Const);
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(
+ *this, ClassDecl, CXXSpecialMemberKind::CopyConstructor, Const);
DeclarationName Name
= Context.DeclarationNames.getCXXConstructorName(
@@ -15662,10 +15538,10 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
setupImplicitSpecialMemberType(CopyConstructor, Context.VoidTy, ArgType);
if (getLangOpts().CUDA)
- inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyConstructor,
- CopyConstructor,
- /* ConstRHS */ Const,
- /* Diagnose */ false);
+ CUDA().inferTargetForImplicitSpecialMember(
+ ClassDecl, CXXSpecialMemberKind::CopyConstructor, CopyConstructor,
+ /* ConstRHS */ Const,
+ /* Diagnose */ false);
// During template instantiation of special member functions we need a
// reliable TypeSourceInfo for the parameter types in order to allow functions
@@ -15683,14 +15559,16 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
CopyConstructor->setTrivial(
ClassDecl->needsOverloadResolutionForCopyConstructor()
- ? SpecialMemberIsTrivial(CopyConstructor, CXXCopyConstructor)
+ ? SpecialMemberIsTrivial(CopyConstructor,
+ CXXSpecialMemberKind::CopyConstructor)
: ClassDecl->hasTrivialCopyConstructor());
CopyConstructor->setTrivialForCall(
ClassDecl->hasAttr<TrivialABIAttr>() ||
(ClassDecl->needsOverloadResolutionForCopyConstructor()
- ? SpecialMemberIsTrivial(CopyConstructor, CXXCopyConstructor,
- TAH_ConsiderTrivialABI)
+ ? SpecialMemberIsTrivial(CopyConstructor,
+ CXXSpecialMemberKind::CopyConstructor,
+ TAH_ConsiderTrivialABI)
: ClassDecl->hasTrivialCopyConstructorForCall()));
// Note that we have declared this constructor.
@@ -15699,7 +15577,8 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, CopyConstructor);
- if (ShouldDeleteSpecialMember(CopyConstructor, CXXCopyConstructor)) {
+ if (ShouldDeleteSpecialMember(CopyConstructor,
+ CXXSpecialMemberKind::CopyConstructor)) {
ClassDecl->setImplicitCopyConstructorIsDeleted();
SetDeclDeleted(CopyConstructor, ClassLoc);
}
@@ -15764,7 +15643,8 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
CXXRecordDecl *ClassDecl) {
assert(ClassDecl->needsImplicitMoveConstructor());
- DeclaringSpecialMember DSM(*this, ClassDecl, CXXMoveConstructor);
+ DeclaringSpecialMember DSM(*this, ClassDecl,
+ CXXSpecialMemberKind::MoveConstructor);
if (DSM.isAlreadyBeingDeclared())
return nullptr;
@@ -15778,9 +15658,8 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
ArgType = Context.getAddrSpaceQualType(ClassType, AS);
ArgType = Context.getRValueReferenceType(ArgType);
- bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
- CXXMoveConstructor,
- false);
+ bool Constexpr = defaultedSpecialMemberIsConstexpr(
+ *this, ClassDecl, CXXSpecialMemberKind::MoveConstructor, false);
DeclarationName Name
= Context.DeclarationNames.getCXXConstructorName(
@@ -15804,10 +15683,10 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
setupImplicitSpecialMemberType(MoveConstructor, Context.VoidTy, ArgType);
if (getLangOpts().CUDA)
- inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveConstructor,
- MoveConstructor,
- /* ConstRHS */ false,
- /* Diagnose */ false);
+ CUDA().inferTargetForImplicitSpecialMember(
+ ClassDecl, CXXSpecialMemberKind::MoveConstructor, MoveConstructor,
+ /* ConstRHS */ false,
+ /* Diagnose */ false);
// Add the parameter to the constructor.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveConstructor,
@@ -15819,13 +15698,15 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
MoveConstructor->setTrivial(
ClassDecl->needsOverloadResolutionForMoveConstructor()
- ? SpecialMemberIsTrivial(MoveConstructor, CXXMoveConstructor)
+ ? SpecialMemberIsTrivial(MoveConstructor,
+ CXXSpecialMemberKind::MoveConstructor)
: ClassDecl->hasTrivialMoveConstructor());
MoveConstructor->setTrivialForCall(
ClassDecl->hasAttr<TrivialABIAttr>() ||
(ClassDecl->needsOverloadResolutionForMoveConstructor()
- ? SpecialMemberIsTrivial(MoveConstructor, CXXMoveConstructor,
+ ? SpecialMemberIsTrivial(MoveConstructor,
+ CXXSpecialMemberKind::MoveConstructor,
TAH_ConsiderTrivialABI)
: ClassDecl->hasTrivialMoveConstructorForCall()));
@@ -15835,7 +15716,8 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, MoveConstructor);
- if (ShouldDeleteSpecialMember(MoveConstructor, CXXMoveConstructor)) {
+ if (ShouldDeleteSpecialMember(MoveConstructor,
+ CXXSpecialMemberKind::MoveConstructor)) {
ClassDecl->setImplicitMoveConstructorIsDeleted();
SetDeclDeleted(MoveConstructor, ClassLoc);
}
@@ -16081,7 +15963,7 @@ ExprResult Sema::BuildCXXConstructExpr(
CXXConstructionKind ConstructKind, SourceRange ParenRange) {
if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(FoundDecl)) {
Constructor = findInheritingConstructor(ConstructLoc, Constructor, Shadow);
- // The only way to get here is if we did overlaod resolution to find the
+ // The only way to get here is if we did overload resolution to find the
// shadow decl, so we don't need to worry about re-checking the trailing
// requires clause.
if (DiagnoseUseOfOverloadedDecl(Constructor, ConstructLoc))
@@ -16107,7 +15989,7 @@ ExprResult Sema::BuildCXXConstructExpr(
DeclInitType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) &&
"given constructor for wrong type");
MarkFunctionReferenced(ConstructLoc, Constructor);
- if (getLangOpts().CUDA && !CheckCUDACall(ConstructLoc, Constructor))
+ if (getLangOpts().CUDA && !CUDA().CheckCall(ConstructLoc, Constructor))
return ExprError();
return CheckForImmediateInvocation(
@@ -16174,18 +16056,14 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
// Emit warning for non-trivial dtor in global scope (a real global,
// class-static, function-static).
- Diag(VD->getLocation(), diag::warn_exit_time_destructor);
+ if (!VD->hasAttr<AlwaysDestroyAttr>())
+ Diag(VD->getLocation(), diag::warn_exit_time_destructor);
// TODO: this should be re-enabled for static locals by !CXAAtExit
if (!VD->isStaticLocal())
Diag(VD->getLocation(), diag::warn_global_destructor);
}
-/// Given a constructor and the set of arguments provided for the
-/// constructor, convert the arguments and add any required default arguments
-/// to form a proper call to this constructor.
-///
-/// \returns true if an error occurred, false otherwise.
bool Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
@@ -16395,9 +16273,6 @@ CheckOperatorDeleteDeclaration(Sema &SemaRef, FunctionDecl *FnDecl) {
return false;
}
-/// CheckOverloadedOperatorDeclaration - Check whether the declaration
-/// of this overloaded operator is well-formed. If so, returns false;
-/// otherwise, emits appropriate diagnostics and returns true.
bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
assert(FnDecl && FnDecl->isOverloadedOperator() &&
"Expected an overloaded operator declaration");
@@ -16622,9 +16497,6 @@ checkLiteralOperatorTemplateParameterList(Sema &SemaRef,
return true;
}
-/// CheckLiteralOperatorDeclaration - Check whether the declaration
-/// of this literal operator function is well-formed. If so, returns
-/// false; otherwise, emits appropriate diagnostics and returns true.
bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
if (isa<CXXMethodDecl>(FnDecl)) {
Diag(FnDecl->getLocation(), diag::err_literal_operator_outside_namespace)
@@ -16795,12 +16667,6 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
return false;
}
-/// ActOnStartLinkageSpecification - Parsed the beginning of a C++
-/// linkage specification, including the language and (if present)
-/// the '{'. ExternLoc is the location of the 'extern', Lang is the
-/// language string literal. LBraceLoc, if valid, provides the location of
-/// the '{' brace. Otherwise, this linkage specification does not
-/// have any braces.
Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc) {
@@ -16844,10 +16710,6 @@ Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
return D;
}
-/// ActOnFinishLinkageSpecification - Complete the definition of
-/// the C++ linkage specification LinkageSpec. If RBraceLoc is
-/// valid, it's the position of the closing '}' brace in a linkage
-/// specification that uses braces.
Decl *Sema::ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc) {
@@ -16880,14 +16742,10 @@ Decl *Sema::ActOnEmptyDeclaration(Scope *S,
return ED;
}
-/// Perform semantic analysis for the variable declaration that
-/// occurs within a C++ catch clause, returning the newly-created
-/// variable.
-VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
- TypeSourceInfo *TInfo,
+VarDecl *Sema::BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation Loc,
- IdentifierInfo *Name) {
+ const IdentifierInfo *Name) {
bool Invalid = false;
QualType ExDeclType = TInfo->getType();
@@ -16966,7 +16824,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
ExDecl->setExceptionVariable(true);
// In ARC, infer 'retaining' for variables of retainable type.
- if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(ExDecl))
+ if (getLangOpts().ObjCAutoRefCount && ObjC().inferObjCARCLifetime(ExDecl))
Invalid = true;
if (!Invalid && !ExDeclType->isDependentType()) {
@@ -17018,8 +16876,6 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
return ExDecl;
}
-/// ActOnExceptionDeclarator - Parsed the exception-declarator in a C++ catch
-/// handler.
Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
bool Invalid = D.isInvalidType();
@@ -17032,10 +16888,10 @@ Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
Invalid = true;
}
- IdentifierInfo *II = D.getIdentifier();
- if (NamedDecl *PrevDecl = LookupSingleName(S, II, D.getIdentifierLoc(),
- LookupOrdinaryName,
- ForVisibleRedeclaration)) {
+ const IdentifierInfo *II = D.getIdentifier();
+ if (NamedDecl *PrevDecl =
+ LookupSingleName(S, II, D.getIdentifierLoc(), LookupOrdinaryName,
+ RedeclarationKind::ForVisibleRedeclaration)) {
// The scope should be freshly made just for us. There is just no way
// it contains any previous declaration, except for function parameters in
// a function-try-block's catch statement.
@@ -17263,8 +17119,6 @@ static bool UsefulToPrintExpr(const Expr *E) {
return true;
}
-/// Try to print more useful information about a failed static_assert
-/// with expression \E
void Sema::DiagnoseStaticAssertDetails(const Expr *E) {
if (const auto *Op = dyn_cast<BinaryOperator>(E);
Op && Op->getOpcode() != BO_LOr) {
@@ -17530,81 +17384,6 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
return Decl;
}
-/// Perform semantic analysis of the given friend type declaration.
-///
-/// \returns A friend declaration that.
-FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart,
- SourceLocation FriendLoc,
- TypeSourceInfo *TSInfo) {
- assert(TSInfo && "NULL TypeSourceInfo for friend type declaration");
-
- QualType T = TSInfo->getType();
- SourceRange TypeRange = TSInfo->getTypeLoc().getSourceRange();
-
- // C++03 [class.friend]p2:
- // An elaborated-type-specifier shall be used in a friend declaration
- // for a class.*
- //
- // * The class-key of the elaborated-type-specifier is required.
- if (!CodeSynthesisContexts.empty()) {
- // Do not complain about the form of friend template types during any kind
- // of code synthesis. For template instantiation, we will have complained
- // when the template was defined.
- } else {
- if (!T->isElaboratedTypeSpecifier()) {
- // If we evaluated the type to a record type, suggest putting
- // a tag in front.
- if (const RecordType *RT = T->getAs<RecordType>()) {
- RecordDecl *RD = RT->getDecl();
-
- SmallString<16> InsertionText(" ");
- InsertionText += RD->getKindName();
-
- Diag(TypeRange.getBegin(),
- getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_unelaborated_friend_type :
- diag::ext_unelaborated_friend_type)
- << (unsigned) RD->getTagKind()
- << T
- << FixItHint::CreateInsertion(getLocForEndOfToken(FriendLoc),
- InsertionText);
- } else {
- Diag(FriendLoc,
- getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_nonclass_type_friend :
- diag::ext_nonclass_type_friend)
- << T
- << TypeRange;
- }
- } else if (T->getAs<EnumType>()) {
- Diag(FriendLoc,
- getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_enum_friend :
- diag::ext_enum_friend)
- << T
- << TypeRange;
- }
-
- // C++11 [class.friend]p3:
- // A friend declaration that does not declare a function shall have one
- // of the following forms:
- // friend elaborated-type-specifier ;
- // friend simple-type-specifier ;
- // friend typename-specifier ;
- if (getLangOpts().CPlusPlus11 && LocStart != FriendLoc)
- Diag(FriendLoc, diag::err_friend_not_first_in_declaration) << T;
- }
-
- // If the type specifier in a friend declaration designates a (possibly
- // cv-qualified) class type, that class is declared as a friend; otherwise,
- // the friend declaration is ignored.
- return FriendDecl::Create(Context, CurContext,
- TSInfo->getTypeLoc().getBeginLoc(), TSInfo,
- FriendLoc);
-}
-
-/// Handle a friend tag declaration where the scope specifier was
-/// templated.
DeclResult Sema::ActOnTemplatedFriendTag(
Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
@@ -17623,11 +17402,12 @@ DeclResult Sema::ActOnTemplatedFriendTag(
if (Invalid)
return true;
- return CheckClassTemplate(S, TagSpec, TUK_Friend, TagLoc, SS, Name,
- NameLoc, Attr, TemplateParams, AS_public,
+ return CheckClassTemplate(S, TagSpec, TagUseKind::Friend, TagLoc, SS,
+ Name, NameLoc, Attr, TemplateParams, AS_public,
/*ModulePrivateLoc=*/SourceLocation(),
FriendLoc, TempParamLists.size() - 1,
- TempParamLists.data()).get();
+ TempParamLists.data())
+ .get();
} else {
// The "template<>" header is extraneous.
Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams)
@@ -17655,8 +17435,8 @@ DeclResult Sema::ActOnTemplatedFriendTag(
if (SS.isEmpty()) {
bool Owned = false;
bool IsDependent = false;
- return ActOnTag(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc, Attr,
- AS_public,
+ return ActOnTag(S, TagSpec, TagUseKind::Friend, TagLoc, SS, Name, NameLoc,
+ Attr, AS_public,
/*ModulePrivateLoc=*/SourceLocation(),
MultiTemplateParamsArg(), Owned, IsDependent,
/*ScopedEnumKWLoc=*/SourceLocation(),
@@ -17720,26 +17500,10 @@ DeclResult Sema::ActOnTemplatedFriendTag(
return Friend;
}
-/// Handle a friend type declaration. This works in tandem with
-/// ActOnTag.
-///
-/// Notes on friend class templates:
-///
-/// We generally treat friend class declarations as if they were
-/// declaring a class. So, for example, the elaborated type specifier
-/// in a friend declaration is required to obey the restrictions of a
-/// class-head (i.e. no typedefs in the scope chain), template
-/// parameters are required to match up with simple template-ids, &c.
-/// However, unlike when declaring a template specialization, it's
-/// okay to refer to a template specialization without an empty
-/// template parameter declaration, e.g.
-/// friend class A<T>::B<unsigned>;
-/// We permit this as a special case; if there are any template
-/// parameters present at all, require proper matching, i.e.
-/// template <> template \<class T> friend class A<int>::B;
Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TempParams) {
SourceLocation Loc = DS.getBeginLoc();
+ SourceLocation FriendLoc = DS.getFriendSpecLoc();
assert(DS.isFriendSpecified());
assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified);
@@ -17751,9 +17515,10 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
// friend simple-type-specifier ;
// friend typename-specifier ;
//
- // Any declaration with a type qualifier does not have that form. (It's
- // legal to specify a qualified type as a friend, you just can't write the
- // keywords.)
+ // If the friend keyword isn't first, or if the declarations has any type
+ // qualifiers, then the declaration doesn't have that form.
+ if (getLangOpts().CPlusPlus11 && !DS.isFriendSpecifiedFirst())
+ Diag(FriendLoc, diag::err_friend_not_first_in_declaration);
if (DS.getTypeQualifiers()) {
if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
Diag(DS.getConstSpecLoc(), diag::err_friend_decl_spec) << "const";
@@ -17769,7 +17534,7 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
// Try to convert the decl specifier to a type. This works for
// friend templates because ActOnTag never produces a ClassTemplateDecl
- // for a TUK_Friend.
+ // for a TagUseKind::Friend.
Declarator TheDeclarator(DS, ParsedAttributesView::none(),
DeclaratorContext::Member);
TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator);
@@ -17780,24 +17545,35 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
if (DiagnoseUnexpandedParameterPack(Loc, TSI, UPPC_FriendDeclaration))
return nullptr;
- // This is definitely an error in C++98. It's probably meant to
- // be forbidden in C++0x, too, but the specification is just
- // poorly written.
- //
- // The problem is with declarations like the following:
- // template <T> friend A<T>::foo;
- // where deciding whether a class C is a friend or not now hinges
- // on whether there exists an instantiation of A that causes
- // 'foo' to equal C. There are restrictions on class-heads
- // (which we declare (by fiat) elaborated friend declarations to
- // be) that makes this tractable.
- //
- // FIXME: handle "template <> friend class A<T>;", which
- // is possibly well-formed? Who even knows?
- if (TempParams.size() && !T->isElaboratedTypeSpecifier()) {
- Diag(Loc, diag::err_tagless_friend_type_template)
- << DS.getSourceRange();
- return nullptr;
+ if (!T->isElaboratedTypeSpecifier()) {
+ if (TempParams.size()) {
+ // C++23 [dcl.pre]p5:
+ // In a simple-declaration, the optional init-declarator-list can be
+ // omitted only when declaring a class or enumeration, that is, when
+ // the decl-specifier-seq contains either a class-specifier, an
+ // elaborated-type-specifier with a class-key, or an enum-specifier.
+ //
+ // The declaration of a template-declaration or explicit-specialization
+ // is never a member-declaration, so this must be a simple-declaration
+ // with no init-declarator-list. Therefore, this is ill-formed.
+ Diag(Loc, diag::err_tagless_friend_type_template) << DS.getSourceRange();
+ return nullptr;
+ } else if (const RecordDecl *RD = T->getAsRecordDecl()) {
+ SmallString<16> InsertionText(" ");
+ InsertionText += RD->getKindName();
+
+ Diag(Loc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_unelaborated_friend_type
+ : diag::ext_unelaborated_friend_type)
+ << (unsigned)RD->getTagKind() << T
+ << FixItHint::CreateInsertion(getLocForEndOfToken(FriendLoc),
+ InsertionText);
+ } else {
+ Diag(FriendLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_nonclass_type_friend
+ : diag::ext_nonclass_type_friend)
+ << T << DS.getSourceRange();
+ }
}
// C++98 [class.friend]p1: A friend of a class is a function
@@ -17813,12 +17589,11 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
Decl *D;
if (!TempParams.empty())
- D = FriendTemplateDecl::Create(Context, CurContext, Loc,
- TempParams,
- TSI,
- DS.getFriendSpecLoc());
+ D = FriendTemplateDecl::Create(Context, CurContext, Loc, TempParams, TSI,
+ FriendLoc);
else
- D = CheckFriendTypeDecl(Loc, DS.getFriendSpecLoc(), TSI);
+ D = FriendDecl::Create(Context, CurContext, TSI->getTypeLoc().getBeginLoc(),
+ TSI, FriendLoc);
if (!D)
return nullptr;
@@ -17887,7 +17662,7 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
DeclContext *DC;
Scope *DCScope = S;
LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
- ForExternalRedeclaration);
+ RedeclarationKind::ForExternalRedeclaration);
bool isTemplateId = D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId;
@@ -18142,7 +17917,8 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
return ND;
}
-void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
+void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc,
+ StringLiteral *Message) {
AdjustDeclIfTemplate(Dcl);
FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(Dcl);
@@ -18191,7 +17967,7 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
// C++11 [dcl.fct.def.delete]p4:
// A deleted function is implicitly inline.
Fn->setImplicitlyInline();
- Fn->setDeletedAsWritten();
+ Fn->setDeletedAsWritten(true, Message);
}
void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
@@ -18304,11 +18080,11 @@ void Sema::DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock) {
}
}
-void Sema::SetFunctionBodyKind(Decl *D, SourceLocation Loc,
- FnBodyKind BodyKind) {
+void Sema::SetFunctionBodyKind(Decl *D, SourceLocation Loc, FnBodyKind BodyKind,
+ StringLiteral *DeletedMessage) {
switch (BodyKind) {
case FnBodyKind::Delete:
- SetDeclDeleted(D, Loc);
+ SetDeclDeleted(D, Loc, DeletedMessage);
break;
case FnBodyKind::Default:
SetDeclDefaulted(D, Loc);
@@ -18319,7 +18095,7 @@ void Sema::SetFunctionBodyKind(Decl *D, SourceLocation Loc,
}
}
-bool Sema::CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
+bool Sema::CheckOverridingFunctionAttributes(CXXMethodDecl *New,
const CXXMethodDecl *Old) {
const auto *NewFT = New->getType()->castAs<FunctionProtoType>();
const auto *OldFT = Old->getType()->castAs<FunctionProtoType>();
@@ -18355,6 +18131,43 @@ bool Sema::CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
return true;
}
+ // Virtual overrides: check for matching effects.
+ if (Context.hasAnyFunctionEffects()) {
+ const auto OldFX = Old->getFunctionEffects();
+ const auto NewFXOrig = New->getFunctionEffects();
+
+ if (OldFX != NewFXOrig) {
+ FunctionEffectSet NewFX(NewFXOrig);
+ const auto Diffs = FunctionEffectDifferences(OldFX, NewFX);
+ FunctionEffectSet::Conflicts Errs;
+ for (const auto &Diff : Diffs) {
+ switch (Diff.shouldDiagnoseMethodOverride(*Old, OldFX, *New, NewFX)) {
+ case FunctionEffectDiff::OverrideResult::NoAction:
+ break;
+ case FunctionEffectDiff::OverrideResult::Warn:
+ Diag(New->getLocation(), diag::warn_mismatched_func_effect_override)
+ << Diff.effectName();
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function)
+ << Old->getReturnTypeSourceRange();
+ break;
+ case FunctionEffectDiff::OverrideResult::Merge: {
+ NewFX.insert(Diff.Old, Errs);
+ const auto *NewFT = New->getType()->castAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = NewFT->getExtProtoInfo();
+ EPI.FunctionEffects = FunctionEffectsRef(NewFX);
+ QualType ModQT = Context.getFunctionType(NewFT->getReturnType(),
+ NewFT->getParamTypes(), EPI);
+ New->setType(ModQT);
+ break;
+ }
+ }
+ }
+ if (!Errs.empty())
+ diagnoseFunctionEffectMergeConflicts(Errs, New->getLocation(),
+ Old->getLocation());
+ }
+ }
+
CallingConv NewCC = NewFT->getCallConv(), OldCC = OldFT->getCallConv();
// If the calling conventions match, everything is fine
@@ -18495,11 +18308,6 @@ bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
return false;
}
-/// Mark the given method pure.
-///
-/// \param Method the method to be marked pure.
-///
-/// \param InitRange the source range that covers the "0" initializer.
bool Sema::CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange) {
SourceLocation EndLoc = InitRange.getEnd();
if (EndLoc.isValid())
@@ -18525,15 +18333,6 @@ void Sema::ActOnPureSpecifier(Decl *D, SourceLocation ZeroLoc) {
Diag(D->getLocation(), diag::err_illegal_initializer);
}
-/// Determine whether the given declaration is a global variable or
-/// static data member.
-static bool isNonlocalVariable(const Decl *D) {
- if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(D))
- return Var->hasGlobalStorage();
-
- return false;
-}
-
/// Invoked when we are about to parse an initializer for the declaration
/// 'Dcl'.
///
@@ -18542,9 +18341,7 @@ static bool isNonlocalVariable(const Decl *D) {
/// class X. If the declaration had a scope specifier, a scope will have
/// been created and passed in for this purpose. Otherwise, S will be null.
void Sema::ActOnCXXEnterDeclInitializer(Scope *S, Decl *D) {
- // If there is no declaration, there was an error parsing it.
- if (!D || D->isInvalidDecl())
- return;
+ assert(D && !D->isInvalidDecl());
// We will always have a nested name specifier here, but this declaration
// might not be out of line if the specifier names the current namespace:
@@ -18553,30 +18350,42 @@ void Sema::ActOnCXXEnterDeclInitializer(Scope *S, Decl *D) {
if (S && D->isOutOfLine())
EnterDeclaratorContext(S, D->getDeclContext());
- // If we are parsing the initializer for a static data member, push a
- // new expression evaluation context that is associated with this static
- // data member.
- if (isNonlocalVariable(D))
- PushExpressionEvaluationContext(
- ExpressionEvaluationContext::PotentiallyEvaluated, D);
+ PushExpressionEvaluationContext(
+ ExpressionEvaluationContext::PotentiallyEvaluated, D);
}
-/// Invoked after we are finished parsing an initializer for the declaration D.
void Sema::ActOnCXXExitDeclInitializer(Scope *S, Decl *D) {
- // If there is no declaration, there was an error parsing it.
- if (!D || D->isInvalidDecl())
- return;
-
- if (isNonlocalVariable(D))
- PopExpressionEvaluationContext();
+ assert(D);
if (S && D->isOutOfLine())
ExitDeclaratorContext(S);
+
+ if (getLangOpts().CPlusPlus23) {
+ // An expression or conversion is 'manifestly constant-evaluated' if it is:
+ // [...]
+ // - the initializer of a variable that is usable in constant expressions or
+ // has constant initialization.
+ if (auto *VD = dyn_cast<VarDecl>(D);
+ VD && (VD->isUsableInConstantExpressions(Context) ||
+ VD->hasConstantInitialization())) {
+ // An expression or conversion is in an 'immediate function context' if it
+ // is potentially evaluated and either:
+ // [...]
+ // - it is a subexpression of a manifestly constant-evaluated expression
+ // or conversion.
+ ExprEvalContexts.back().InImmediateFunctionContext = true;
+ }
+ }
+
+ // Unless the initializer is in an immediate function context (as determined
+ // above), this will evaluate all contained immediate function calls as
+ // constant expressions. If the initializer IS an immediate function context,
+ // the initializer has been determined to be a constant expression, and all
+ // such evaluations will be elided (i.e., as if we "knew the whole time" that
+ // it was a constant expression).
+ PopExpressionEvaluationContext();
}
-/// ActOnCXXConditionDeclarationExpr - Parsed a condition declaration of a
-/// C++ if/switch/while/for statement.
-/// e.g: "if (int x = f()) {...}"
DeclResult Sema::ActOnCXXConditionDeclaration(Scope *S, Declarator &D) {
// C++ 6.4p2:
// The declarator shall not specify a function or an array.
@@ -18595,6 +18404,9 @@ DeclResult Sema::ActOnCXXConditionDeclaration(Scope *S, Declarator &D) {
return true;
}
+ if (auto *VD = dyn_cast<VarDecl>(Dcl))
+ VD->setCXXCondDecl();
+
return Dcl;
}
@@ -18632,8 +18444,8 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
// Do not mark as used if compiling for the device outside of the target
// region.
if (TUKind != TU_Prefix && LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice &&
- !isInOpenMPDeclareTargetContext() &&
- !isInOpenMPTargetExecutionDirective()) {
+ !OpenMP().isInOpenMPDeclareTargetContext() &&
+ !OpenMP().isInOpenMPTargetExecutionDirective()) {
if (!DefinitionRequired)
MarkVirtualMembersReferenced(Loc, Class);
return;
@@ -18705,11 +18517,15 @@ bool Sema::DefineUsedVTables() {
bool DefineVTable = true;
- // If this class has a key function, but that key function is
- // defined in another translation unit, we don't need to emit the
- // vtable even though we're using it.
const CXXMethodDecl *KeyFunction = Context.getCurrentKeyFunction(Class);
- if (KeyFunction && !KeyFunction->hasBody()) {
+ // V-tables for non-template classes with an owning module are always
+ // uniquely emitted in that module.
+ if (Class->isInCurrentModuleUnit()) {
+ DefineVTable = true;
+ } else if (KeyFunction && !KeyFunction->hasBody()) {
+ // If this class has a key function, but that key function is
+ // defined in another translation unit, we don't need to emit the
+ // vtable even though we're using it.
// The key function is in another translation unit.
DefineVTable = false;
TemplateSpecializationKind TSK =
@@ -18754,7 +18570,7 @@ bool Sema::DefineUsedVTables() {
DefinedAnything = true;
MarkVirtualMembersReferenced(Loc, Class);
CXXRecordDecl *Canonical = Class->getCanonicalDecl();
- if (VTablesUsed[Canonical])
+ if (VTablesUsed[Canonical] && !Class->shouldEmitInExternalSource())
Consumer.HandleVTable(Class);
// Warn if we're emitting a weak vtable. The vtable will be weak if there is
@@ -18817,61 +18633,6 @@ void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
}
}
-/// SetIvarInitializers - This routine builds initialization ASTs for the
-/// Objective-C implementation whose ivars need be initialized.
-void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
- if (!getLangOpts().CPlusPlus)
- return;
- if (ObjCInterfaceDecl *OID = ObjCImplementation->getClassInterface()) {
- SmallVector<ObjCIvarDecl*, 8> ivars;
- CollectIvarsToConstructOrDestruct(OID, ivars);
- if (ivars.empty())
- return;
- SmallVector<CXXCtorInitializer*, 32> AllToInit;
- for (unsigned i = 0; i < ivars.size(); i++) {
- FieldDecl *Field = ivars[i];
- if (Field->isInvalidDecl())
- continue;
-
- CXXCtorInitializer *Member;
- InitializedEntity InitEntity = InitializedEntity::InitializeMember(Field);
- InitializationKind InitKind =
- InitializationKind::CreateDefault(ObjCImplementation->getLocation());
-
- InitializationSequence InitSeq(*this, InitEntity, InitKind, std::nullopt);
- ExprResult MemberInit =
- InitSeq.Perform(*this, InitEntity, InitKind, std::nullopt);
- MemberInit = MaybeCreateExprWithCleanups(MemberInit);
- // Note, MemberInit could actually come back empty if no initialization
- // is required (e.g., because it would call a trivial default constructor)
- if (!MemberInit.get() || MemberInit.isInvalid())
- continue;
-
- Member =
- new (Context) CXXCtorInitializer(Context, Field, SourceLocation(),
- SourceLocation(),
- MemberInit.getAs<Expr>(),
- SourceLocation());
- AllToInit.push_back(Member);
-
- // Be sure that the destructor is accessible and is marked as referenced.
- if (const RecordType *RecordTy =
- Context.getBaseElementType(Field->getType())
- ->getAs<RecordType>()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
- if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) {
- MarkFunctionReferenced(Field->getLocation(), Destructor);
- CheckDestructorAccess(Field->getLocation(), Destructor,
- PDiag(diag::err_access_dtor_ivar)
- << Context.getBaseElementType(Field->getType()));
- }
- }
- }
- ObjCImplementation->setIvarInitializers(Context,
- AllToInit.data(), AllToInit.size());
- }
-}
-
static
void DelegatingCycleHelper(CXXConstructorDecl* Ctor,
llvm::SmallPtrSet<CXXConstructorDecl*, 4> &Valid,
@@ -19141,40 +18902,40 @@ void Sema::checkExceptionSpecification(
}
}
-void Sema::actOnDelayedExceptionSpecification(Decl *MethodD,
- ExceptionSpecificationType EST,
- SourceRange SpecificationRange,
- ArrayRef<ParsedType> DynamicExceptions,
- ArrayRef<SourceRange> DynamicExceptionRanges,
- Expr *NoexceptExpr) {
- if (!MethodD)
+void Sema::actOnDelayedExceptionSpecification(
+ Decl *D, ExceptionSpecificationType EST, SourceRange SpecificationRange,
+ ArrayRef<ParsedType> DynamicExceptions,
+ ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr) {
+ if (!D)
return;
- // Dig out the method we're referring to.
- if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(MethodD))
- MethodD = FunTmpl->getTemplatedDecl();
+ // Dig out the function we're referring to.
+ if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ D = FTD->getTemplatedDecl();
- CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(MethodD);
- if (!Method)
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD)
return;
// Check the exception specification.
llvm::SmallVector<QualType, 4> Exceptions;
FunctionProtoType::ExceptionSpecInfo ESI;
- checkExceptionSpecification(/*IsTopLevel*/true, EST, DynamicExceptions,
+ checkExceptionSpecification(/*IsTopLevel=*/true, EST, DynamicExceptions,
DynamicExceptionRanges, NoexceptExpr, Exceptions,
ESI);
// Update the exception specification on the function type.
- Context.adjustExceptionSpec(Method, ESI, /*AsWritten*/true);
+ Context.adjustExceptionSpec(FD, ESI, /*AsWritten=*/true);
- if (Method->isStatic())
- checkThisInStaticMemberFunctionExceptionSpec(Method);
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (MD->isStatic())
+ checkThisInStaticMemberFunctionExceptionSpec(MD);
- if (Method->isVirtual()) {
- // Check overrides, which we previously had to delay.
- for (const CXXMethodDecl *O : Method->overridden_methods())
- CheckOverridingFunctionExceptionSpec(Method, O);
+ if (MD->isVirtual()) {
+ // Check overrides, which we previously had to delay.
+ for (const CXXMethodDecl *O : MD->overridden_methods())
+ CheckOverridingFunctionExceptionSpec(MD, O);
+ }
}
}
@@ -19186,7 +18947,7 @@ MSPropertyDecl *Sema::HandleMSProperty(Scope *S, RecordDecl *Record,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr) {
- IdentifierInfo *II = D.getIdentifier();
+ const IdentifierInfo *II = D.getIdentifier();
if (!II) {
Diag(DeclStart, diag::err_anonymous_property);
return nullptr;
@@ -19219,7 +18980,7 @@ MSPropertyDecl *Sema::HandleMSProperty(Scope *S, RecordDecl *Record,
// Check to see if this name was declared as a member previously
NamedDecl *PrevDecl = nullptr;
LookupResult Previous(*this, II, Loc, LookupMemberName,
- ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
LookupName(Previous, S);
switch (Previous.getResultKind()) {
case LookupResult::Found:
@@ -19286,7 +19047,16 @@ void Sema::ActOnStartFunctionDeclarationDeclarator(
ExplicitLists, /*IsFriend=*/false, IsMemberSpecialization, IsInvalid,
/*SuppressDiagnostic=*/true);
}
- if (ExplicitParams) {
+ // C++23 [dcl.fct]p23:
+ // An abbreviated function template can have a template-head. The invented
+ // template-parameters are appended to the template-parameter-list after
+ // the explicitly declared template-parameters.
+ //
+ // A template-head must have one or more template-parameters (read:
+ // 'template<>' is *not* a template-head). Only append the invented
+ // template parameters if we matched the nested-name-specifier to a non-empty
+ // TemplateParameterList.
+ if (ExplicitParams && !ExplicitParams->empty()) {
Info.AutoTemplateParameterDepth = ExplicitParams->getDepth();
llvm::append_range(Info.TemplateParams, *ExplicitParams);
Info.NumExplicitTemplateParams = ExplicitParams->size();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
index bb0d0cd2030b..807453400abd 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
@@ -21,10 +21,13 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
@@ -39,8 +42,9 @@ using namespace clang;
///
/// \return true to indicate that there was an error and appropriate
/// actions were taken
-bool Sema::checkInitMethod(ObjCMethodDecl *method,
- QualType receiverTypeIfCall) {
+bool SemaObjC::checkInitMethod(ObjCMethodDecl *method,
+ QualType receiverTypeIfCall) {
+ ASTContext &Context = getASTContext();
if (method->isInvalidDecl()) return true;
// This castAs is safe: methods that don't return an object
@@ -97,7 +101,8 @@ bool Sema::checkInitMethod(ObjCMethodDecl *method,
// If we're in a system header, and this is not a call, just make
// the method unusable.
- if (receiverTypeIfCall.isNull() && getSourceManager().isInSystemHeader(loc)) {
+ if (receiverTypeIfCall.isNull() &&
+ SemaRef.getSourceManager().isInSystemHeader(loc)) {
method->addAttr(UnavailableAttr::CreateImplicit(Context, "",
UnavailableAttr::IR_ARCInitReturnsUnrelated, loc));
return true;
@@ -133,8 +138,9 @@ static void diagnoseNoescape(const ParmVarDecl *NewD, const ParmVarDecl *OldD,
<< cast<ObjCMethodDecl>(NewD->getDeclContext());
}
-void Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
- const ObjCMethodDecl *Overridden) {
+void SemaObjC::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
+ const ObjCMethodDecl *Overridden) {
+ ASTContext &Context = getASTContext();
if (Overridden->hasRelatedResultType() &&
!NewMethod->hasRelatedResultType()) {
// This can only happen when the method follows a naming convention that
@@ -216,13 +222,14 @@ void Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
Diag(oldDecl->getLocation(), diag::note_previous_decl) << "parameter";
}
- diagnoseNoescape(newDecl, oldDecl, *this);
+ diagnoseNoescape(newDecl, oldDecl, SemaRef);
}
}
/// Check a method declaration for compatibility with the Objective-C
/// ARC conventions.
-bool Sema::CheckARCMethodDecl(ObjCMethodDecl *method) {
+bool SemaObjC::CheckARCMethodDecl(ObjCMethodDecl *method) {
+ ASTContext &Context = getASTContext();
ObjCMethodFamily family = method->getMethodFamily();
switch (family) {
case OMF_None:
@@ -326,7 +333,7 @@ static void DiagnoseObjCImplementedDeprecations(Sema &S, const NamedDecl *ND,
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
-void Sema::AddAnyMethodToGlobalPool(Decl *D) {
+void SemaObjC::AddAnyMethodToGlobalPool(Decl *D) {
ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D);
// If we don't have a valid method decl, simply return.
@@ -359,12 +366,14 @@ HasExplicitOwnershipAttr(Sema &S, ParmVarDecl *Param) {
/// ActOnStartOfObjCMethodDef - This routine sets up parameters; invisible
/// and user declared, in the method definition's AST.
-void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
- ImplicitlyRetainedSelfLocs.clear();
- assert((getCurMethodDecl() == nullptr) && "Methodparsing confused");
+void SemaObjC::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
+ ASTContext &Context = getASTContext();
+ SemaRef.ImplicitlyRetainedSelfLocs.clear();
+ assert((SemaRef.getCurMethodDecl() == nullptr) && "Methodparsing confused");
ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D);
- PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+ SemaRef.PushExpressionEvaluationContext(
+ SemaRef.ExprEvalContexts.back().Context);
// If we don't have a valid method decl, simply return.
if (!MDecl)
@@ -373,13 +382,13 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
QualType ResultType = MDecl->getReturnType();
if (!ResultType->isDependentType() && !ResultType->isVoidType() &&
!MDecl->isInvalidDecl() &&
- RequireCompleteType(MDecl->getLocation(), ResultType,
- diag::err_func_def_incomplete_result))
+ SemaRef.RequireCompleteType(MDecl->getLocation(), ResultType,
+ diag::err_func_def_incomplete_result))
MDecl->setInvalidDecl();
// Allow all of Sema to see that we are entering a method definition.
- PushDeclContext(FnBodyScope, MDecl);
- PushFunctionScope();
+ SemaRef.PushDeclContext(FnBodyScope, MDecl);
+ SemaRef.PushFunctionScope();
// Create Decl objects for each parameter, entrring them in the scope for
// binding to their use.
@@ -387,23 +396,22 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
// Insert the invisible arguments, self and _cmd!
MDecl->createImplicitParams(Context, MDecl->getClassInterface());
- PushOnScopeChains(MDecl->getSelfDecl(), FnBodyScope);
- PushOnScopeChains(MDecl->getCmdDecl(), FnBodyScope);
+ SemaRef.PushOnScopeChains(MDecl->getSelfDecl(), FnBodyScope);
+ SemaRef.PushOnScopeChains(MDecl->getCmdDecl(), FnBodyScope);
// The ObjC parser requires parameter names so there's no need to check.
- CheckParmsForFunctionDef(MDecl->parameters(),
- /*CheckParameterNames=*/false);
+ SemaRef.CheckParmsForFunctionDef(MDecl->parameters(),
+ /*CheckParameterNames=*/false);
// Introduce all of the other parameters into this scope.
for (auto *Param : MDecl->parameters()) {
- if (!Param->isInvalidDecl() &&
- getLangOpts().ObjCAutoRefCount &&
- !HasExplicitOwnershipAttr(*this, Param))
+ if (!Param->isInvalidDecl() && getLangOpts().ObjCAutoRefCount &&
+ !HasExplicitOwnershipAttr(SemaRef, Param))
Diag(Param->getLocation(), diag::warn_arc_strong_pointer_objc_pointer) <<
Param->getType();
if (Param->getIdentifier())
- PushOnScopeChains(Param, FnBodyScope);
+ SemaRef.PushOnScopeChains(Param, FnBodyScope);
}
// In ARC, disallow definition of retain/release/autorelease/retainCount
@@ -456,17 +464,17 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
// No need to issue deprecated warning if deprecated mehod in class/category
// is being implemented in its own implementation (no overriding is involved).
if (!ImplDeclOfMethodDecl || ImplDeclOfMethodDecl != ImplDeclOfMethodDef)
- DiagnoseObjCImplementedDeprecations(*this, IMD, MDecl->getLocation());
+ DiagnoseObjCImplementedDeprecations(SemaRef, IMD, MDecl->getLocation());
}
if (MDecl->getMethodFamily() == OMF_init) {
if (MDecl->isDesignatedInitializerForTheInterface()) {
- getCurFunction()->ObjCIsDesignatedInit = true;
- getCurFunction()->ObjCWarnForNoDesignatedInitChain =
+ SemaRef.getCurFunction()->ObjCIsDesignatedInit = true;
+ SemaRef.getCurFunction()->ObjCWarnForNoDesignatedInitChain =
IC->getSuperClass() != nullptr;
} else if (IC->hasDesignatedInitializers()) {
- getCurFunction()->ObjCIsSecondaryInit = true;
- getCurFunction()->ObjCWarnForNoInitDelegation = true;
+ SemaRef.getCurFunction()->ObjCIsSecondaryInit = true;
+ SemaRef.getCurFunction()->ObjCWarnForNoInitDelegation = true;
}
}
@@ -479,21 +487,25 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
if (Family == OMF_dealloc) {
if (!(getLangOpts().ObjCAutoRefCount ||
getLangOpts().getGC() == LangOptions::GCOnly))
- getCurFunction()->ObjCShouldCallSuper = true;
+ SemaRef.getCurFunction()->ObjCShouldCallSuper = true;
} else if (Family == OMF_finalize) {
if (Context.getLangOpts().getGC() != LangOptions::NonGC)
- getCurFunction()->ObjCShouldCallSuper = true;
+ SemaRef.getCurFunction()->ObjCShouldCallSuper = true;
} else {
const ObjCMethodDecl *SuperMethod =
SuperClass->lookupMethod(MDecl->getSelector(),
MDecl->isInstanceMethod());
- getCurFunction()->ObjCShouldCallSuper =
- (SuperMethod && SuperMethod->hasAttr<ObjCRequiresSuperAttr>());
+ SemaRef.getCurFunction()->ObjCShouldCallSuper =
+ (SuperMethod && SuperMethod->hasAttr<ObjCRequiresSuperAttr>());
}
}
}
+
+ // Some function attributes (like OptimizeNoneAttr) need actions before
+ // parsing body started.
+ SemaRef.applyFunctionAttributesBeforeParsingBody(D);
}
namespace {
@@ -538,29 +550,25 @@ static void diagnoseUseOfProtocols(Sema &TheSema,
}
}
-void Sema::
-ActOnSuperClassOfClassInterface(Scope *S,
- SourceLocation AtInterfaceLoc,
- ObjCInterfaceDecl *IDecl,
- IdentifierInfo *ClassName,
- SourceLocation ClassLoc,
- IdentifierInfo *SuperName,
- SourceLocation SuperLoc,
- ArrayRef<ParsedType> SuperTypeArgs,
- SourceRange SuperTypeArgsRange) {
+void SemaObjC::ActOnSuperClassOfClassInterface(
+ Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *SuperName, SourceLocation SuperLoc,
+ ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange) {
+ ASTContext &Context = getASTContext();
// Check if a different kind of symbol declared in this scope.
- NamedDecl *PrevDecl = LookupSingleName(TUScope, SuperName, SuperLoc,
- LookupOrdinaryName);
+ NamedDecl *PrevDecl = SemaRef.LookupSingleName(
+ SemaRef.TUScope, SuperName, SuperLoc, Sema::LookupOrdinaryName);
if (!PrevDecl) {
// Try to correct for a typo in the superclass name without correcting
// to the class we're defining.
ObjCInterfaceValidatorCCC CCC(IDecl);
- if (TypoCorrection Corrected = CorrectTypo(
- DeclarationNameInfo(SuperName, SuperLoc), LookupOrdinaryName,
- TUScope, nullptr, CCC, CTK_ErrorRecovery)) {
- diagnoseTypo(Corrected, PDiag(diag::err_undef_superclass_suggest)
- << SuperName << ClassName);
+ if (TypoCorrection Corrected = SemaRef.CorrectTypo(
+ DeclarationNameInfo(SuperName, SuperLoc), Sema::LookupOrdinaryName,
+ SemaRef.TUScope, nullptr, CCC, Sema::CTK_ErrorRecovery)) {
+ SemaRef.diagnoseTypo(Corrected, PDiag(diag::err_undef_superclass_suggest)
+ << SuperName << ClassName);
PrevDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>();
}
}
@@ -576,7 +584,7 @@ ActOnSuperClassOfClassInterface(Scope *S,
// Diagnose classes that inherit from deprecated classes.
if (SuperClassDecl) {
- (void)DiagnoseUseOfDecl(SuperClassDecl, SuperLoc);
+ (void)SemaRef.DiagnoseUseOfDecl(SuperClassDecl, SuperLoc);
SuperClassType = Context.getObjCInterfaceType(SuperClassDecl);
}
@@ -595,7 +603,8 @@ ActOnSuperClassOfClassInterface(Scope *S,
// @interface NewI @end
// typedef NewI DeprI __attribute__((deprecated("blah")))
// @interface SI : DeprI /* warn here */ @end
- (void)DiagnoseUseOfDecl(const_cast<TypedefNameDecl*>(TDecl), SuperLoc);
+ (void)SemaRef.DiagnoseUseOfDecl(
+ const_cast<TypedefNameDecl *>(TDecl), SuperLoc);
}
}
}
@@ -615,12 +624,10 @@ ActOnSuperClassOfClassInterface(Scope *S,
if (!SuperClassDecl)
Diag(SuperLoc, diag::err_undef_superclass)
<< SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc);
- else if (RequireCompleteType(SuperLoc,
- SuperClassType,
- diag::err_forward_superclass,
- SuperClassDecl->getDeclName(),
- ClassName,
- SourceRange(AtInterfaceLoc, ClassLoc))) {
+ else if (SemaRef.RequireCompleteType(
+ SuperLoc, SuperClassType, diag::err_forward_superclass,
+ SuperClassDecl->getDeclName(), ClassName,
+ SourceRange(AtInterfaceLoc, ClassLoc))) {
SuperClassDecl = nullptr;
SuperClassType = QualType();
}
@@ -635,22 +642,15 @@ ActOnSuperClassOfClassInterface(Scope *S,
TypeSourceInfo *SuperClassTInfo = nullptr;
if (!SuperTypeArgs.empty()) {
TypeResult fullSuperClassType = actOnObjCTypeArgsAndProtocolQualifiers(
- S,
- SuperLoc,
- CreateParsedType(SuperClassType,
- nullptr),
- SuperTypeArgsRange.getBegin(),
- SuperTypeArgs,
- SuperTypeArgsRange.getEnd(),
- SourceLocation(),
- { },
- { },
- SourceLocation());
+ S, SuperLoc, SemaRef.CreateParsedType(SuperClassType, nullptr),
+ SuperTypeArgsRange.getBegin(), SuperTypeArgs,
+ SuperTypeArgsRange.getEnd(), SourceLocation(), {}, {},
+ SourceLocation());
if (!fullSuperClassType.isUsable())
return;
- SuperClassType = GetTypeFromParser(fullSuperClassType.get(),
- &SuperClassTInfo);
+ SuperClassType =
+ SemaRef.GetTypeFromParser(fullSuperClassType.get(), &SuperClassTInfo);
}
if (!SuperClassTInfo) {
@@ -663,26 +663,24 @@ ActOnSuperClassOfClassInterface(Scope *S,
}
}
-DeclResult Sema::actOnObjCTypeParam(Scope *S,
- ObjCTypeParamVariance variance,
- SourceLocation varianceLoc,
- unsigned index,
- IdentifierInfo *paramName,
- SourceLocation paramLoc,
- SourceLocation colonLoc,
- ParsedType parsedTypeBound) {
+DeclResult SemaObjC::actOnObjCTypeParam(
+ Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc,
+ unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc,
+ SourceLocation colonLoc, ParsedType parsedTypeBound) {
+ ASTContext &Context = getASTContext();
// If there was an explicitly-provided type bound, check it.
TypeSourceInfo *typeBoundInfo = nullptr;
if (parsedTypeBound) {
// The type bound can be any Objective-C pointer type.
- QualType typeBound = GetTypeFromParser(parsedTypeBound, &typeBoundInfo);
+ QualType typeBound =
+ SemaRef.GetTypeFromParser(parsedTypeBound, &typeBoundInfo);
if (typeBound->isObjCObjectPointerType()) {
// okay
} else if (typeBound->isObjCObjectType()) {
// The user forgot the * on an Objective-C pointer type, e.g.,
// "T : NSView".
- SourceLocation starLoc = getLocForEndOfToken(
- typeBoundInfo->getTypeLoc().getEndLoc());
+ SourceLocation starLoc =
+ SemaRef.getLocForEndOfToken(typeBoundInfo->getTypeLoc().getEndLoc());
Diag(typeBoundInfo->getTypeLoc().getBeginLoc(),
diag::err_objc_type_param_bound_missing_pointer)
<< typeBound << paramName
@@ -762,15 +760,16 @@ DeclResult Sema::actOnObjCTypeParam(Scope *S,
}
// Create the type parameter.
- return ObjCTypeParamDecl::Create(Context, CurContext, variance, varianceLoc,
- index, paramLoc, paramName, colonLoc,
- typeBoundInfo);
+ return ObjCTypeParamDecl::Create(Context, SemaRef.CurContext, variance,
+ varianceLoc, index, paramLoc, paramName,
+ colonLoc, typeBoundInfo);
}
-ObjCTypeParamList *Sema::actOnObjCTypeParamList(Scope *S,
- SourceLocation lAngleLoc,
- ArrayRef<Decl *> typeParamsIn,
- SourceLocation rAngleLoc) {
+ObjCTypeParamList *
+SemaObjC::actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
+ ArrayRef<Decl *> typeParamsIn,
+ SourceLocation rAngleLoc) {
+ ASTContext &Context = getASTContext();
// We know that the array only contains Objective-C type parameters.
ArrayRef<ObjCTypeParamDecl *>
typeParams(
@@ -794,7 +793,7 @@ ObjCTypeParamList *Sema::actOnObjCTypeParamList(Scope *S,
knownParams.insert(std::make_pair(typeParam->getIdentifier(), typeParam));
// Push the type parameter into scope.
- PushOnScopeChains(typeParam, S, /*AddToContext=*/false);
+ SemaRef.PushOnScopeChains(typeParam, S, /*AddToContext=*/false);
}
}
@@ -802,11 +801,12 @@ ObjCTypeParamList *Sema::actOnObjCTypeParamList(Scope *S,
return ObjCTypeParamList::create(Context, lAngleLoc, typeParams, rAngleLoc);
}
-void Sema::popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList) {
+void SemaObjC::popObjCTypeParamList(Scope *S,
+ ObjCTypeParamList *typeParamList) {
for (auto *typeParam : *typeParamList) {
if (!typeParam->isInvalidDecl()) {
S->RemoveDecl(typeParam);
- IdResolver.RemoveDecl(typeParam);
+ SemaRef.IdResolver.RemoveDecl(typeParam);
}
}
}
@@ -971,7 +971,7 @@ static bool checkTypeParamListConsistency(Sema &S,
return false;
}
-ObjCInterfaceDecl *Sema::ActOnStartClassInterface(
+ObjCInterfaceDecl *SemaObjC::ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
@@ -981,10 +981,11 @@ ObjCInterfaceDecl *Sema::ActOnStartClassInterface(
const ParsedAttributesView &AttrList, SkipBodyInfo *SkipBody) {
assert(ClassName && "Missing class identifier");
+ ASTContext &Context = getASTContext();
// Check for another declaration kind with the same name.
- NamedDecl *PrevDecl =
- LookupSingleName(TUScope, ClassName, ClassLoc, LookupOrdinaryName,
- forRedeclarationInCurContext());
+ NamedDecl *PrevDecl = SemaRef.LookupSingleName(
+ SemaRef.TUScope, ClassName, ClassLoc, Sema::LookupOrdinaryName,
+ SemaRef.forRedeclarationInCurContext());
if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName;
@@ -1016,7 +1017,7 @@ ObjCInterfaceDecl *Sema::ActOnStartClassInterface(
if (ObjCTypeParamList *prevTypeParamList = PrevIDecl->getTypeParamList()) {
if (typeParamList) {
// Both have type parameter lists; check for consistency.
- if (checkTypeParamListConsistency(*this, prevTypeParamList,
+ if (checkTypeParamListConsistency(SemaRef, prevTypeParamList,
typeParamList,
TypeParamListContext::Definition)) {
typeParamList = nullptr;
@@ -1030,17 +1031,12 @@ ObjCInterfaceDecl *Sema::ActOnStartClassInterface(
// Clone the type parameter list.
SmallVector<ObjCTypeParamDecl *, 4> clonedTypeParams;
for (auto *typeParam : *prevTypeParamList) {
- clonedTypeParams.push_back(
- ObjCTypeParamDecl::Create(
- Context,
- CurContext,
- typeParam->getVariance(),
- SourceLocation(),
- typeParam->getIndex(),
- SourceLocation(),
- typeParam->getIdentifier(),
- SourceLocation(),
- Context.getTrivialTypeSourceInfo(typeParam->getUnderlyingType())));
+ clonedTypeParams.push_back(ObjCTypeParamDecl::Create(
+ Context, SemaRef.CurContext, typeParam->getVariance(),
+ SourceLocation(), typeParam->getIndex(), SourceLocation(),
+ typeParam->getIdentifier(), SourceLocation(),
+ Context.getTrivialTypeSourceInfo(
+ typeParam->getUnderlyingType())));
}
typeParamList = ObjCTypeParamList::create(Context,
@@ -1051,13 +1047,13 @@ ObjCInterfaceDecl *Sema::ActOnStartClassInterface(
}
}
- ObjCInterfaceDecl *IDecl
- = ObjCInterfaceDecl::Create(Context, CurContext, AtInterfaceLoc, ClassName,
- typeParamList, PrevIDecl, ClassLoc);
+ ObjCInterfaceDecl *IDecl =
+ ObjCInterfaceDecl::Create(Context, SemaRef.CurContext, AtInterfaceLoc,
+ ClassName, typeParamList, PrevIDecl, ClassLoc);
if (PrevIDecl) {
// Class already seen. Was it a definition?
if (ObjCInterfaceDecl *Def = PrevIDecl->getDefinition()) {
- if (SkipBody && !hasVisibleDefinition(Def)) {
+ if (SkipBody && !SemaRef.hasVisibleDefinition(Def)) {
SkipBody->CheckSameAsPrevious = true;
SkipBody->New = IDecl;
SkipBody->Previous = Def;
@@ -1070,14 +1066,15 @@ ObjCInterfaceDecl *Sema::ActOnStartClassInterface(
}
}
- ProcessDeclAttributeList(TUScope, IDecl, AttrList);
- AddPragmaAttributes(TUScope, IDecl);
+ SemaRef.ProcessDeclAttributeList(SemaRef.TUScope, IDecl, AttrList);
+ SemaRef.AddPragmaAttributes(SemaRef.TUScope, IDecl);
+ SemaRef.ProcessAPINotes(IDecl);
// Merge attributes from previous declarations.
if (PrevIDecl)
- mergeDeclAttributes(IDecl, PrevIDecl);
+ SemaRef.mergeDeclAttributes(IDecl, PrevIDecl);
- PushOnScopeChains(IDecl, TUScope);
+ SemaRef.PushOnScopeChains(IDecl, SemaRef.TUScope);
// Start the definition of this class. If we're in a redefinition case, there
// may already be a definition, so we'll end up adding to it.
@@ -1088,7 +1085,7 @@ ObjCInterfaceDecl *Sema::ActOnStartClassInterface(
if (SuperName) {
// Diagnose availability in the context of the @interface.
- ContextRAII SavedContext(*this, IDecl);
+ Sema::ContextRAII SavedContext(SemaRef, IDecl);
ActOnSuperClassOfClassInterface(S, AtInterfaceLoc, IDecl,
ClassName, ClassLoc,
@@ -1100,7 +1097,7 @@ ObjCInterfaceDecl *Sema::ActOnStartClassInterface(
// Check then save referenced protocols.
if (NumProtoRefs) {
- diagnoseUseOfProtocols(*this, IDecl, (ObjCProtocolDecl*const*)ProtoRefs,
+ diagnoseUseOfProtocols(SemaRef, IDecl, (ObjCProtocolDecl *const *)ProtoRefs,
NumProtoRefs, ProtoLocs);
IDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs,
ProtoLocs, Context);
@@ -1115,14 +1112,14 @@ ObjCInterfaceDecl *Sema::ActOnStartClassInterface(
/// ActOnTypedefedProtocols - this action finds protocol list as part of the
/// typedef'ed use for a qualified super class and adds them to the list
/// of the protocols.
-void Sema::ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
- SmallVectorImpl<SourceLocation> &ProtocolLocs,
- IdentifierInfo *SuperName,
- SourceLocation SuperLoc) {
+void SemaObjC::ActOnTypedefedProtocols(
+ SmallVectorImpl<Decl *> &ProtocolRefs,
+ SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName,
+ SourceLocation SuperLoc) {
if (!SuperName)
return;
- NamedDecl* IDecl = LookupSingleName(TUScope, SuperName, SuperLoc,
- LookupOrdinaryName);
+ NamedDecl *IDecl = SemaRef.LookupSingleName(
+ SemaRef.TUScope, SuperName, SuperLoc, Sema::LookupOrdinaryName);
if (!IDecl)
return;
@@ -1142,33 +1139,34 @@ void Sema::ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
/// ActOnCompatibilityAlias - this action is called after complete parsing of
/// a \@compatibility_alias declaration. It sets up the alias relationships.
-Decl *Sema::ActOnCompatibilityAlias(SourceLocation AtLoc,
- IdentifierInfo *AliasName,
- SourceLocation AliasLocation,
- IdentifierInfo *ClassName,
- SourceLocation ClassLocation) {
+Decl *SemaObjC::ActOnCompatibilityAlias(SourceLocation AtLoc,
+ IdentifierInfo *AliasName,
+ SourceLocation AliasLocation,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLocation) {
+ ASTContext &Context = getASTContext();
// Look for previous declaration of alias name
- NamedDecl *ADecl =
- LookupSingleName(TUScope, AliasName, AliasLocation, LookupOrdinaryName,
- forRedeclarationInCurContext());
+ NamedDecl *ADecl = SemaRef.LookupSingleName(
+ SemaRef.TUScope, AliasName, AliasLocation, Sema::LookupOrdinaryName,
+ SemaRef.forRedeclarationInCurContext());
if (ADecl) {
Diag(AliasLocation, diag::err_conflicting_aliasing_type) << AliasName;
Diag(ADecl->getLocation(), diag::note_previous_declaration);
return nullptr;
}
// Check for class declaration
- NamedDecl *CDeclU =
- LookupSingleName(TUScope, ClassName, ClassLocation, LookupOrdinaryName,
- forRedeclarationInCurContext());
+ NamedDecl *CDeclU = SemaRef.LookupSingleName(
+ SemaRef.TUScope, ClassName, ClassLocation, Sema::LookupOrdinaryName,
+ SemaRef.forRedeclarationInCurContext());
if (const TypedefNameDecl *TDecl =
dyn_cast_or_null<TypedefNameDecl>(CDeclU)) {
QualType T = TDecl->getUnderlyingType();
if (T->isObjCObjectType()) {
if (NamedDecl *IDecl = T->castAs<ObjCObjectType>()->getInterface()) {
ClassName = IDecl->getIdentifier();
- CDeclU = LookupSingleName(TUScope, ClassName, ClassLocation,
- LookupOrdinaryName,
- forRedeclarationInCurContext());
+ CDeclU = SemaRef.LookupSingleName(
+ SemaRef.TUScope, ClassName, ClassLocation, Sema::LookupOrdinaryName,
+ SemaRef.forRedeclarationInCurContext());
}
}
}
@@ -1181,25 +1179,23 @@ Decl *Sema::ActOnCompatibilityAlias(SourceLocation AtLoc,
}
// Everything checked out, instantiate a new alias declaration AST.
- ObjCCompatibleAliasDecl *AliasDecl =
- ObjCCompatibleAliasDecl::Create(Context, CurContext, AtLoc, AliasName, CDecl);
+ ObjCCompatibleAliasDecl *AliasDecl = ObjCCompatibleAliasDecl::Create(
+ Context, SemaRef.CurContext, AtLoc, AliasName, CDecl);
if (!CheckObjCDeclScope(AliasDecl))
- PushOnScopeChains(AliasDecl, TUScope);
+ SemaRef.PushOnScopeChains(AliasDecl, SemaRef.TUScope);
return AliasDecl;
}
-bool Sema::CheckForwardProtocolDeclarationForCircularDependency(
- IdentifierInfo *PName,
- SourceLocation &Ploc, SourceLocation PrevLoc,
- const ObjCList<ObjCProtocolDecl> &PList) {
+bool SemaObjC::CheckForwardProtocolDeclarationForCircularDependency(
+ IdentifierInfo *PName, SourceLocation &Ploc, SourceLocation PrevLoc,
+ const ObjCList<ObjCProtocolDecl> &PList) {
bool res = false;
for (ObjCList<ObjCProtocolDecl>::iterator I = PList.begin(),
E = PList.end(); I != E; ++I) {
- if (ObjCProtocolDecl *PDecl = LookupProtocol((*I)->getIdentifier(),
- Ploc)) {
+ if (ObjCProtocolDecl *PDecl = LookupProtocol((*I)->getIdentifier(), Ploc)) {
if (PDecl->getIdentifier() == PName) {
Diag(Ploc, diag::err_protocol_has_circular_dependency);
Diag(PrevLoc, diag::note_previous_definition);
@@ -1217,27 +1213,28 @@ bool Sema::CheckForwardProtocolDeclarationForCircularDependency(
return res;
}
-ObjCProtocolDecl *Sema::ActOnStartProtocolInterface(
+ObjCProtocolDecl *SemaObjC::ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList, SkipBodyInfo *SkipBody) {
+ ASTContext &Context = getASTContext();
bool err = false;
// FIXME: Deal with AttrList.
assert(ProtocolName && "Missing protocol identifier");
- ObjCProtocolDecl *PrevDecl = LookupProtocol(ProtocolName, ProtocolLoc,
- forRedeclarationInCurContext());
+ ObjCProtocolDecl *PrevDecl = LookupProtocol(
+ ProtocolName, ProtocolLoc, SemaRef.forRedeclarationInCurContext());
ObjCProtocolDecl *PDecl = nullptr;
if (ObjCProtocolDecl *Def = PrevDecl? PrevDecl->getDefinition() : nullptr) {
// Create a new protocol that is completely distinct from previous
// declarations, and do not make this protocol available for name lookup.
// That way, we'll end up completely ignoring the duplicate.
// FIXME: Can we turn this into an error?
- PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName,
+ PDecl = ObjCProtocolDecl::Create(Context, SemaRef.CurContext, ProtocolName,
ProtocolLoc, AtProtoInterfaceLoc,
/*PrevDecl=*/Def);
- if (SkipBody && !hasVisibleDefinition(Def)) {
+ if (SkipBody && !SemaRef.hasVisibleDefinition(Def)) {
SkipBody->CheckSameAsPrevious = true;
SkipBody->New = PDecl;
SkipBody->Previous = Def;
@@ -1250,7 +1247,7 @@ ObjCProtocolDecl *Sema::ActOnStartProtocolInterface(
// If we are using modules, add the decl to the context in order to
// serialize something meaningful.
if (getLangOpts().Modules)
- PushOnScopeChains(PDecl, TUScope);
+ SemaRef.PushOnScopeChains(PDecl, SemaRef.TUScope);
PDecl->startDuplicateDefinitionForComparison();
} else {
if (PrevDecl) {
@@ -1263,24 +1260,25 @@ ObjCProtocolDecl *Sema::ActOnStartProtocolInterface(
}
// Create the new declaration.
- PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName,
+ PDecl = ObjCProtocolDecl::Create(Context, SemaRef.CurContext, ProtocolName,
ProtocolLoc, AtProtoInterfaceLoc,
/*PrevDecl=*/PrevDecl);
- PushOnScopeChains(PDecl, TUScope);
+ SemaRef.PushOnScopeChains(PDecl, SemaRef.TUScope);
PDecl->startDefinition();
}
- ProcessDeclAttributeList(TUScope, PDecl, AttrList);
- AddPragmaAttributes(TUScope, PDecl);
+ SemaRef.ProcessDeclAttributeList(SemaRef.TUScope, PDecl, AttrList);
+ SemaRef.AddPragmaAttributes(SemaRef.TUScope, PDecl);
+ SemaRef.ProcessAPINotes(PDecl);
// Merge attributes from previous declarations.
if (PrevDecl)
- mergeDeclAttributes(PDecl, PrevDecl);
+ SemaRef.mergeDeclAttributes(PDecl, PrevDecl);
if (!err && NumProtoRefs ) {
/// Check then save referenced protocols.
- diagnoseUseOfProtocols(*this, PDecl, (ObjCProtocolDecl*const*)ProtoRefs,
+ diagnoseUseOfProtocols(SemaRef, PDecl, (ObjCProtocolDecl *const *)ProtoRefs,
NumProtoRefs, ProtoLocs);
PDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs,
ProtoLocs, Context);
@@ -1310,20 +1308,22 @@ static bool NestedProtocolHasNoDefinition(ObjCProtocolDecl *PDecl,
/// FindProtocolDeclaration - This routine looks up protocols and
/// issues an error if they are not declared. It returns list of
/// protocol declarations in its 'Protocols' argument.
-void
-Sema::FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
- ArrayRef<IdentifierLocPair> ProtocolId,
- SmallVectorImpl<Decl *> &Protocols) {
+void SemaObjC::FindProtocolDeclaration(bool WarnOnDeclarations,
+ bool ForObjCContainer,
+ ArrayRef<IdentifierLocPair> ProtocolId,
+ SmallVectorImpl<Decl *> &Protocols) {
for (const IdentifierLocPair &Pair : ProtocolId) {
ObjCProtocolDecl *PDecl = LookupProtocol(Pair.first, Pair.second);
if (!PDecl) {
DeclFilterCCC<ObjCProtocolDecl> CCC{};
- TypoCorrection Corrected = CorrectTypo(
- DeclarationNameInfo(Pair.first, Pair.second), LookupObjCProtocolName,
- TUScope, nullptr, CCC, CTK_ErrorRecovery);
+ TypoCorrection Corrected =
+ SemaRef.CorrectTypo(DeclarationNameInfo(Pair.first, Pair.second),
+ Sema::LookupObjCProtocolName, SemaRef.TUScope,
+ nullptr, CCC, Sema::CTK_ErrorRecovery);
if ((PDecl = Corrected.getCorrectionDeclAs<ObjCProtocolDecl>()))
- diagnoseTypo(Corrected, PDiag(diag::err_undeclared_protocol_suggest)
- << Pair.first);
+ SemaRef.diagnoseTypo(Corrected,
+ PDiag(diag::err_undeclared_protocol_suggest)
+ << Pair.first);
}
if (!PDecl) {
@@ -1337,7 +1337,7 @@ Sema::FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
// For an objc container, delay protocol reference checking until after we
// can set the objc decl as the availability context, otherwise check now.
if (!ForObjCContainer) {
- (void)DiagnoseUseOfDecl(PDecl, Pair.second);
+ (void)SemaRef.DiagnoseUseOfDecl(PDecl, Pair.second);
}
// If this is a forward declaration and we are supposed to warn in this
@@ -1413,30 +1413,25 @@ class ObjCTypeArgOrProtocolValidatorCCC final
};
} // end anonymous namespace
-void Sema::DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
- SourceLocation ProtocolLoc,
- IdentifierInfo *TypeArgId,
- SourceLocation TypeArgLoc,
- bool SelectProtocolFirst) {
+void SemaObjC::DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
+ SourceLocation ProtocolLoc,
+ IdentifierInfo *TypeArgId,
+ SourceLocation TypeArgLoc,
+ bool SelectProtocolFirst) {
Diag(TypeArgLoc, diag::err_objc_type_args_and_protocols)
<< SelectProtocolFirst << TypeArgId << ProtocolId
<< SourceRange(ProtocolLoc);
}
-void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
- Scope *S,
- ParsedType baseType,
- SourceLocation lAngleLoc,
- ArrayRef<IdentifierInfo *> identifiers,
- ArrayRef<SourceLocation> identifierLocs,
- SourceLocation rAngleLoc,
- SourceLocation &typeArgsLAngleLoc,
- SmallVectorImpl<ParsedType> &typeArgs,
- SourceLocation &typeArgsRAngleLoc,
- SourceLocation &protocolLAngleLoc,
- SmallVectorImpl<Decl *> &protocols,
- SourceLocation &protocolRAngleLoc,
- bool warnOnIncompleteProtocols) {
+void SemaObjC::actOnObjCTypeArgsOrProtocolQualifiers(
+ Scope *S, ParsedType baseType, SourceLocation lAngleLoc,
+ ArrayRef<IdentifierInfo *> identifiers,
+ ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc,
+ SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs,
+ SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc,
+ SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc,
+ bool warnOnIncompleteProtocols) {
+ ASTContext &Context = getASTContext();
// Local function that updates the declaration specifiers with
// protocol information.
unsigned numProtocolsResolved = 0;
@@ -1447,7 +1442,7 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// which case we want to warn about typos such as
// "NSArray<NSObject>" (that should be NSArray<NSObject *>).
ObjCInterfaceDecl *baseClass = nullptr;
- QualType base = GetTypeFromParser(baseType, nullptr);
+ QualType base = SemaRef.GetTypeFromParser(baseType, nullptr);
bool allAreTypeNames = false;
SourceLocation firstClassNameLoc;
if (!base.isNull()) {
@@ -1470,7 +1465,7 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// For an objc container, delay protocol reference checking until after we
// can set the objc decl as the availability context, otherwise check now.
if (!warnOnIncompleteProtocols) {
- (void)DiagnoseUseOfDecl(proto, identifierLocs[i]);
+ (void)SemaRef.DiagnoseUseOfDecl(proto, identifierLocs[i]);
}
// If this is a forward protocol declaration, get its definition.
@@ -1493,8 +1488,9 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// about such things), check whether this name refers to a type
// as well.
if (allAreTypeNames) {
- if (auto *decl = LookupSingleName(S, identifiers[i], identifierLocs[i],
- LookupOrdinaryName)) {
+ if (auto *decl =
+ SemaRef.LookupSingleName(S, identifiers[i], identifierLocs[i],
+ Sema::LookupOrdinaryName)) {
if (isa<ObjCInterfaceDecl>(decl)) {
if (firstClassNameLoc.isInvalid())
firstClassNameLoc = identifierLocs[i];
@@ -1525,9 +1521,9 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
if (allProtocolsDeclared) {
Diag(firstClassNameLoc, diag::warn_objc_redundant_qualified_class_type)
- << baseClass->getDeclName() << SourceRange(lAngleLoc, rAngleLoc)
- << FixItHint::CreateInsertion(getLocForEndOfToken(firstClassNameLoc),
- " *");
+ << baseClass->getDeclName() << SourceRange(lAngleLoc, rAngleLoc)
+ << FixItHint::CreateInsertion(
+ SemaRef.getLocForEndOfToken(firstClassNameLoc), " *");
}
}
@@ -1556,8 +1552,8 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
SmallVector<TypeOrClassDecl, 4> typeDecls;
unsigned numTypeDeclsResolved = 0;
for (unsigned i = 0, n = identifiers.size(); i != n; ++i) {
- NamedDecl *decl = LookupSingleName(S, identifiers[i], identifierLocs[i],
- LookupOrdinaryName);
+ NamedDecl *decl = SemaRef.LookupSingleName(
+ S, identifiers[i], identifierLocs[i], Sema::LookupOrdinaryName);
if (!decl) {
typeDecls.push_back(TypeOrClassDecl());
continue;
@@ -1594,7 +1590,7 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
else
type = Context.getObjCInterfaceType(typeDecl.get<ObjCInterfaceDecl *>());
TypeSourceInfo *parsedTSInfo = Context.getTrivialTypeSourceInfo(type, loc);
- ParsedType parsedType = CreateParsedType(type, parsedTSInfo);
+ ParsedType parsedType = SemaRef.CreateParsedType(type, parsedTSInfo);
DS.SetTypeSpecType(DeclSpec::TST_typename, loc, prevSpec, diagID,
parsedType, Context.getPrintingPolicy());
// Use the identifier location for the type source range.
@@ -1607,7 +1603,7 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// If we have a typedef of an Objective-C class type that is missing a '*',
// add the '*'.
if (type->getAs<ObjCInterfaceType>()) {
- SourceLocation starLoc = getLocForEndOfToken(loc);
+ SourceLocation starLoc = SemaRef.getLocForEndOfToken(loc);
D.AddTypeInfo(DeclaratorChunk::getPointer(/*TypeQuals=*/0, starLoc,
SourceLocation(),
SourceLocation(),
@@ -1623,7 +1619,7 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
}
// Convert this to a type.
- return ActOnTypeName(D);
+ return SemaRef.ActOnTypeName(D);
};
// Local function that updates the declaration specifiers with
@@ -1657,14 +1653,14 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// Error recovery: some names weren't found, or we have a mix of
// type and protocol names. Go resolve all of the unresolved names
// and complain if we can't find a consistent answer.
- LookupNameKind lookupKind = LookupAnyName;
+ Sema::LookupNameKind lookupKind = Sema::LookupAnyName;
for (unsigned i = 0, n = identifiers.size(); i != n; ++i) {
// If we already have a protocol or type. Check whether it is the
// right thing.
if (protocols[i] || typeDecls[i]) {
// If we haven't figured out whether we want types or protocols
// yet, try to figure it out from this name.
- if (lookupKind == LookupAnyName) {
+ if (lookupKind == Sema::LookupAnyName) {
// If this name refers to both a protocol and a type (e.g., \c
// NSObject), don't conclude anything yet.
if (protocols[i] && typeDecls[i])
@@ -1672,19 +1668,19 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// Otherwise, let this name decide whether we'll be correcting
// toward types or protocols.
- lookupKind = protocols[i] ? LookupObjCProtocolName
- : LookupOrdinaryName;
+ lookupKind = protocols[i] ? Sema::LookupObjCProtocolName
+ : Sema::LookupOrdinaryName;
continue;
}
// If we want protocols and we have a protocol, there's nothing
// more to do.
- if (lookupKind == LookupObjCProtocolName && protocols[i])
+ if (lookupKind == Sema::LookupObjCProtocolName && protocols[i])
continue;
// If we want types and we have a type declaration, there's
// nothing more to do.
- if (lookupKind == LookupOrdinaryName && typeDecls[i])
+ if (lookupKind == Sema::LookupOrdinaryName && typeDecls[i])
continue;
// We have a conflict: some names refer to protocols and others
@@ -1700,16 +1696,16 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// Perform typo correction on the name.
ObjCTypeArgOrProtocolValidatorCCC CCC(Context, lookupKind);
- TypoCorrection corrected =
- CorrectTypo(DeclarationNameInfo(identifiers[i], identifierLocs[i]),
- lookupKind, S, nullptr, CCC, CTK_ErrorRecovery);
+ TypoCorrection corrected = SemaRef.CorrectTypo(
+ DeclarationNameInfo(identifiers[i], identifierLocs[i]), lookupKind, S,
+ nullptr, CCC, Sema::CTK_ErrorRecovery);
if (corrected) {
// Did we find a protocol?
if (auto proto = corrected.getCorrectionDeclAs<ObjCProtocolDecl>()) {
- diagnoseTypo(corrected,
- PDiag(diag::err_undeclared_protocol_suggest)
- << identifiers[i]);
- lookupKind = LookupObjCProtocolName;
+ SemaRef.diagnoseTypo(corrected,
+ PDiag(diag::err_undeclared_protocol_suggest)
+ << identifiers[i]);
+ lookupKind = Sema::LookupObjCProtocolName;
protocols[i] = proto;
++numProtocolsResolved;
continue;
@@ -1717,10 +1713,10 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// Did we find a type?
if (auto typeDecl = corrected.getCorrectionDeclAs<TypeDecl>()) {
- diagnoseTypo(corrected,
- PDiag(diag::err_unknown_typename_suggest)
- << identifiers[i]);
- lookupKind = LookupOrdinaryName;
+ SemaRef.diagnoseTypo(corrected,
+ PDiag(diag::err_unknown_typename_suggest)
+ << identifiers[i]);
+ lookupKind = Sema::LookupOrdinaryName;
typeDecls[i] = typeDecl;
++numTypeDeclsResolved;
continue;
@@ -1728,10 +1724,10 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// Did we find an Objective-C class?
if (auto objcClass = corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) {
- diagnoseTypo(corrected,
- PDiag(diag::err_unknown_type_or_class_name_suggest)
- << identifiers[i] << true);
- lookupKind = LookupOrdinaryName;
+ SemaRef.diagnoseTypo(corrected,
+ PDiag(diag::err_unknown_type_or_class_name_suggest)
+ << identifiers[i] << true);
+ lookupKind = Sema::LookupOrdinaryName;
typeDecls[i] = objcClass;
++numTypeDeclsResolved;
continue;
@@ -1740,10 +1736,11 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
// We couldn't find anything.
Diag(identifierLocs[i],
- (lookupKind == LookupAnyName ? diag::err_objc_type_arg_missing
- : lookupKind == LookupObjCProtocolName ? diag::err_undeclared_protocol
- : diag::err_unknown_typename))
- << identifiers[i];
+ (lookupKind == Sema::LookupAnyName ? diag::err_objc_type_arg_missing
+ : lookupKind == Sema::LookupObjCProtocolName
+ ? diag::err_undeclared_protocol
+ : diag::err_unknown_typename))
+ << identifiers[i];
protocols.clear();
typeArgs.clear();
return;
@@ -1762,8 +1759,8 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
/// DiagnoseClassExtensionDupMethods - Check for duplicate declaration of
/// a class method in its extension.
///
-void Sema::DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
- ObjCInterfaceDecl *ID) {
+void SemaObjC::DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
+ ObjCInterfaceDecl *ID) {
if (!ID)
return; // Possibly due to previous error
@@ -1786,59 +1783,59 @@ void Sema::DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
}
/// ActOnForwardProtocolDeclaration - Handle \@protocol foo;
-Sema::DeclGroupPtrTy
-Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
- ArrayRef<IdentifierLocPair> IdentList,
- const ParsedAttributesView &attrList) {
+SemaObjC::DeclGroupPtrTy SemaObjC::ActOnForwardProtocolDeclaration(
+ SourceLocation AtProtocolLoc, ArrayRef<IdentifierLocPair> IdentList,
+ const ParsedAttributesView &attrList) {
+ ASTContext &Context = getASTContext();
SmallVector<Decl *, 8> DeclsInGroup;
for (const IdentifierLocPair &IdentPair : IdentList) {
IdentifierInfo *Ident = IdentPair.first;
- ObjCProtocolDecl *PrevDecl = LookupProtocol(Ident, IdentPair.second,
- forRedeclarationInCurContext());
- ObjCProtocolDecl *PDecl
- = ObjCProtocolDecl::Create(Context, CurContext, Ident,
- IdentPair.second, AtProtocolLoc,
- PrevDecl);
-
- PushOnScopeChains(PDecl, TUScope);
+ ObjCProtocolDecl *PrevDecl = LookupProtocol(
+ Ident, IdentPair.second, SemaRef.forRedeclarationInCurContext());
+ ObjCProtocolDecl *PDecl =
+ ObjCProtocolDecl::Create(Context, SemaRef.CurContext, Ident,
+ IdentPair.second, AtProtocolLoc, PrevDecl);
+
+ SemaRef.PushOnScopeChains(PDecl, SemaRef.TUScope);
CheckObjCDeclScope(PDecl);
- ProcessDeclAttributeList(TUScope, PDecl, attrList);
- AddPragmaAttributes(TUScope, PDecl);
+ SemaRef.ProcessDeclAttributeList(SemaRef.TUScope, PDecl, attrList);
+ SemaRef.AddPragmaAttributes(SemaRef.TUScope, PDecl);
if (PrevDecl)
- mergeDeclAttributes(PDecl, PrevDecl);
+ SemaRef.mergeDeclAttributes(PDecl, PrevDecl);
DeclsInGroup.push_back(PDecl);
}
- return BuildDeclaratorGroup(DeclsInGroup);
+ return SemaRef.BuildDeclaratorGroup(DeclsInGroup);
}
-ObjCCategoryDecl *Sema::ActOnStartCategoryInterface(
- SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
+ObjCCategoryDecl *SemaObjC::ActOnStartCategoryInterface(
+ SourceLocation AtInterfaceLoc, const IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
- IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
+ const IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList) {
+ ASTContext &Context = getASTContext();
ObjCCategoryDecl *CDecl;
ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
/// Check that class of this category is already completely declared.
- if (!IDecl
- || RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
- diag::err_category_forward_interface,
- CategoryName == nullptr)) {
+ if (!IDecl ||
+ SemaRef.RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
+ diag::err_category_forward_interface,
+ CategoryName == nullptr)) {
// Create an invalid ObjCCategoryDecl to serve as context for
// the enclosing method declarations. We mark the decl invalid
// to make it clear that this isn't a valid AST.
- CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc,
- ClassLoc, CategoryLoc, CategoryName,
- IDecl, typeParamList);
+ CDecl = ObjCCategoryDecl::Create(Context, SemaRef.CurContext,
+ AtInterfaceLoc, ClassLoc, CategoryLoc,
+ CategoryName, IDecl, typeParamList);
CDecl->setInvalidDecl();
- CurContext->addDecl(CDecl);
+ SemaRef.CurContext->addDecl(CDecl);
if (!IDecl)
Diag(ClassLoc, diag::err_undef_interface) << ClassName;
@@ -1866,10 +1863,10 @@ ObjCCategoryDecl *Sema::ActOnStartCategoryInterface(
// If we have a type parameter list, check it.
if (typeParamList) {
if (auto prevTypeParamList = IDecl->getTypeParamList()) {
- if (checkTypeParamListConsistency(*this, prevTypeParamList, typeParamList,
- CategoryName
- ? TypeParamListContext::Category
- : TypeParamListContext::Extension))
+ if (checkTypeParamListConsistency(
+ SemaRef, prevTypeParamList, typeParamList,
+ CategoryName ? TypeParamListContext::Category
+ : TypeParamListContext::Extension))
typeParamList = nullptr;
} else {
Diag(typeParamList->getLAngleLoc(),
@@ -1882,20 +1879,20 @@ ObjCCategoryDecl *Sema::ActOnStartCategoryInterface(
}
}
- CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc,
+ CDecl = ObjCCategoryDecl::Create(Context, SemaRef.CurContext, AtInterfaceLoc,
ClassLoc, CategoryLoc, CategoryName, IDecl,
typeParamList);
// FIXME: PushOnScopeChains?
- CurContext->addDecl(CDecl);
+ SemaRef.CurContext->addDecl(CDecl);
// Process the attributes before looking at protocols to ensure that the
// availability attribute is attached to the category to provide availability
// checking for protocol uses.
- ProcessDeclAttributeList(TUScope, CDecl, AttrList);
- AddPragmaAttributes(TUScope, CDecl);
+ SemaRef.ProcessDeclAttributeList(SemaRef.TUScope, CDecl, AttrList);
+ SemaRef.AddPragmaAttributes(SemaRef.TUScope, CDecl);
if (NumProtoRefs) {
- diagnoseUseOfProtocols(*this, CDecl, (ObjCProtocolDecl*const*)ProtoRefs,
+ diagnoseUseOfProtocols(SemaRef, CDecl, (ObjCProtocolDecl *const *)ProtoRefs,
NumProtoRefs, ProtoLocs);
CDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs,
ProtoLocs, Context);
@@ -1913,10 +1910,11 @@ ObjCCategoryDecl *Sema::ActOnStartCategoryInterface(
/// ActOnStartCategoryImplementation - Perform semantic checks on the
/// category implementation declaration and build an ObjCCategoryImplDecl
/// object.
-ObjCCategoryImplDecl *Sema::ActOnStartCategoryImplementation(
- SourceLocation AtCatImplLoc, IdentifierInfo *ClassName,
- SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc,
- const ParsedAttributesView &Attrs) {
+ObjCCategoryImplDecl *SemaObjC::ActOnStartCategoryImplementation(
+ SourceLocation AtCatImplLoc, const IdentifierInfo *ClassName,
+ SourceLocation ClassLoc, const IdentifierInfo *CatName,
+ SourceLocation CatLoc, const ParsedAttributesView &Attrs) {
+ ASTContext &Context = getASTContext();
ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
ObjCCategoryDecl *CatIDecl = nullptr;
if (IDecl && IDecl->hasDefinition()) {
@@ -1924,31 +1922,32 @@ ObjCCategoryImplDecl *Sema::ActOnStartCategoryImplementation(
if (!CatIDecl) {
// Category @implementation with no corresponding @interface.
// Create and install one.
- CatIDecl = ObjCCategoryDecl::Create(Context, CurContext, AtCatImplLoc,
- ClassLoc, CatLoc,
- CatName, IDecl,
- /*typeParamList=*/nullptr);
+ CatIDecl =
+ ObjCCategoryDecl::Create(Context, SemaRef.CurContext, AtCatImplLoc,
+ ClassLoc, CatLoc, CatName, IDecl,
+ /*typeParamList=*/nullptr);
CatIDecl->setImplicit();
}
}
ObjCCategoryImplDecl *CDecl =
- ObjCCategoryImplDecl::Create(Context, CurContext, CatName, IDecl,
- ClassLoc, AtCatImplLoc, CatLoc);
+ ObjCCategoryImplDecl::Create(Context, SemaRef.CurContext, CatName, IDecl,
+ ClassLoc, AtCatImplLoc, CatLoc);
/// Check that class of this category is already completely declared.
if (!IDecl) {
Diag(ClassLoc, diag::err_undef_interface) << ClassName;
CDecl->setInvalidDecl();
- } else if (RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
- diag::err_undef_interface)) {
+ } else if (SemaRef.RequireCompleteType(ClassLoc,
+ Context.getObjCInterfaceType(IDecl),
+ diag::err_undef_interface)) {
CDecl->setInvalidDecl();
}
- ProcessDeclAttributeList(TUScope, CDecl, Attrs);
- AddPragmaAttributes(TUScope, CDecl);
+ SemaRef.ProcessDeclAttributeList(SemaRef.TUScope, CDecl, Attrs);
+ SemaRef.AddPragmaAttributes(SemaRef.TUScope, CDecl);
// FIXME: PushOnScopeChains?
- CurContext->addDecl(CDecl);
+ SemaRef.CurContext->addDecl(CDecl);
// If the interface has the objc_runtime_visible attribute, we
// cannot implement a category for it.
@@ -1969,7 +1968,7 @@ ObjCCategoryImplDecl *Sema::ActOnStartCategoryImplementation(
CatIDecl->setImplementation(CDecl);
// Warn on implementating category of deprecated class under
// -Wdeprecated-implementations flag.
- DiagnoseObjCImplementedDeprecations(*this, CatIDecl,
+ DiagnoseObjCImplementedDeprecations(SemaRef, CatIDecl,
CDecl->getLocation());
}
}
@@ -1979,37 +1978,38 @@ ObjCCategoryImplDecl *Sema::ActOnStartCategoryImplementation(
return CDecl;
}
-ObjCImplementationDecl *Sema::ActOnStartClassImplementation(
- SourceLocation AtClassImplLoc, IdentifierInfo *ClassName,
- SourceLocation ClassLoc, IdentifierInfo *SuperClassname,
+ObjCImplementationDecl *SemaObjC::ActOnStartClassImplementation(
+ SourceLocation AtClassImplLoc, const IdentifierInfo *ClassName,
+ SourceLocation ClassLoc, const IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc, const ParsedAttributesView &Attrs) {
+ ASTContext &Context = getASTContext();
ObjCInterfaceDecl *IDecl = nullptr;
// Check for another declaration kind with the same name.
- NamedDecl *PrevDecl
- = LookupSingleName(TUScope, ClassName, ClassLoc, LookupOrdinaryName,
- forRedeclarationInCurContext());
+ NamedDecl *PrevDecl = SemaRef.LookupSingleName(
+ SemaRef.TUScope, ClassName, ClassLoc, Sema::LookupOrdinaryName,
+ SemaRef.forRedeclarationInCurContext());
if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName;
Diag(PrevDecl->getLocation(), diag::note_previous_definition);
} else if ((IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl))) {
// FIXME: This will produce an error if the definition of the interface has
// been imported from a module but is not visible.
- RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
- diag::warn_undef_interface);
+ SemaRef.RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
+ diag::warn_undef_interface);
} else {
// We did not find anything with the name ClassName; try to correct for
// typos in the class name.
ObjCInterfaceValidatorCCC CCC{};
- TypoCorrection Corrected =
- CorrectTypo(DeclarationNameInfo(ClassName, ClassLoc),
- LookupOrdinaryName, TUScope, nullptr, CCC, CTK_NonError);
+ TypoCorrection Corrected = SemaRef.CorrectTypo(
+ DeclarationNameInfo(ClassName, ClassLoc), Sema::LookupOrdinaryName,
+ SemaRef.TUScope, nullptr, CCC, Sema::CTK_NonError);
if (Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) {
// Suggest the (potentially) correct interface name. Don't provide a
// code-modification hint or use the typo name for recovery, because
// this is just a warning. The program may actually be correct.
- diagnoseTypo(Corrected,
- PDiag(diag::warn_undef_interface_suggest) << ClassName,
- /*ErrorRecovery*/false);
+ SemaRef.diagnoseTypo(
+ Corrected, PDiag(diag::warn_undef_interface_suggest) << ClassName,
+ /*ErrorRecovery*/ false);
} else {
Diag(ClassLoc, diag::warn_undef_interface) << ClassName;
}
@@ -2019,8 +2019,9 @@ ObjCImplementationDecl *Sema::ActOnStartClassImplementation(
ObjCInterfaceDecl *SDecl = nullptr;
if (SuperClassname) {
// Check if a different kind of symbol declared in this scope.
- PrevDecl = LookupSingleName(TUScope, SuperClassname, SuperClassLoc,
- LookupOrdinaryName);
+ PrevDecl =
+ SemaRef.LookupSingleName(SemaRef.TUScope, SuperClassname, SuperClassLoc,
+ Sema::LookupOrdinaryName);
if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
Diag(SuperClassLoc, diag::err_redefinition_different_kind)
<< SuperClassname;
@@ -2048,11 +2049,11 @@ ObjCImplementationDecl *Sema::ActOnStartClassImplementation(
// FIXME: Do we support attributes on the @implementation? If so we should
// copy them over.
- IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtClassImplLoc,
- ClassName, /*typeParamList=*/nullptr,
- /*PrevDecl=*/nullptr, ClassLoc,
- true);
- AddPragmaAttributes(TUScope, IDecl);
+ IDecl =
+ ObjCInterfaceDecl::Create(Context, SemaRef.CurContext, AtClassImplLoc,
+ ClassName, /*typeParamList=*/nullptr,
+ /*PrevDecl=*/nullptr, ClassLoc, true);
+ SemaRef.AddPragmaAttributes(SemaRef.TUScope, IDecl);
IDecl->startDefinition();
if (SDecl) {
IDecl->setSuperClass(Context.getTrivialTypeSourceInfo(
@@ -2063,7 +2064,7 @@ ObjCImplementationDecl *Sema::ActOnStartClassImplementation(
IDecl->setEndOfDefinitionLoc(ClassLoc);
}
- PushOnScopeChains(IDecl, TUScope);
+ SemaRef.PushOnScopeChains(IDecl, SemaRef.TUScope);
} else {
// Mark the interface as being completed, even if it was just as
// @class ....;
@@ -2072,12 +2073,12 @@ ObjCImplementationDecl *Sema::ActOnStartClassImplementation(
IDecl->startDefinition();
}
- ObjCImplementationDecl* IMPDecl =
- ObjCImplementationDecl::Create(Context, CurContext, IDecl, SDecl,
- ClassLoc, AtClassImplLoc, SuperClassLoc);
+ ObjCImplementationDecl *IMPDecl =
+ ObjCImplementationDecl::Create(Context, SemaRef.CurContext, IDecl, SDecl,
+ ClassLoc, AtClassImplLoc, SuperClassLoc);
- ProcessDeclAttributeList(TUScope, IMPDecl, Attrs);
- AddPragmaAttributes(TUScope, IMPDecl);
+ SemaRef.ProcessDeclAttributeList(SemaRef.TUScope, IMPDecl, Attrs);
+ SemaRef.AddPragmaAttributes(SemaRef.TUScope, IMPDecl);
if (CheckObjCDeclScope(IMPDecl)) {
ActOnObjCContainerStartDefinition(IMPDecl);
@@ -2093,10 +2094,10 @@ ObjCImplementationDecl *Sema::ActOnStartClassImplementation(
IMPDecl->setInvalidDecl();
} else { // add it to the list.
IDecl->setImplementation(IMPDecl);
- PushOnScopeChains(IMPDecl, TUScope);
+ SemaRef.PushOnScopeChains(IMPDecl, SemaRef.TUScope);
// Warn on implementating deprecated class under
// -Wdeprecated-implementations flag.
- DiagnoseObjCImplementedDeprecations(*this, IDecl, IMPDecl->getLocation());
+ DiagnoseObjCImplementedDeprecations(SemaRef, IDecl, IMPDecl->getLocation());
}
// If the superclass has the objc_runtime_visible attribute, we
@@ -2112,8 +2113,9 @@ ObjCImplementationDecl *Sema::ActOnStartClassImplementation(
return IMPDecl;
}
-Sema::DeclGroupPtrTy
-Sema::ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls) {
+SemaObjC::DeclGroupPtrTy
+SemaObjC::ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
+ ArrayRef<Decl *> Decls) {
SmallVector<Decl *, 64> DeclsInGroup;
DeclsInGroup.reserve(Decls.size() + 1);
@@ -2128,13 +2130,14 @@ Sema::ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls) {
DeclsInGroup.push_back(ObjCImpDecl);
- return BuildDeclaratorGroup(DeclsInGroup);
+ return SemaRef.BuildDeclaratorGroup(DeclsInGroup);
}
-void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
- ObjCIvarDecl **ivars, unsigned numIvars,
- SourceLocation RBrace) {
+void SemaObjC::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
+ ObjCIvarDecl **ivars, unsigned numIvars,
+ SourceLocation RBrace) {
assert(ImpDecl && "missing implementation decl");
+ ASTContext &Context = getASTContext();
ObjCInterfaceDecl* IDecl = ImpDecl->getClassInterface();
if (!IDecl)
return;
@@ -2150,7 +2153,7 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
// ObjCInterfaceDecl while in a 'non-fragile' runtime the ivar is
// only in the ObjCImplementationDecl. In the non-fragile case the ivar
// therefore also needs to be propagated to the ObjCInterfaceDecl.
- if (!LangOpts.ObjCRuntime.isFragile())
+ if (!getLangOpts().ObjCRuntime.isFragile())
IDecl->makeDeclVisibleInContext(ivars[i]);
ImpDecl->addDecl(ivars[i]);
}
@@ -2162,7 +2165,7 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
return;
assert(ivars && "missing @implementation ivars");
- if (LangOpts.ObjCRuntime.isNonFragile()) {
+ if (getLangOpts().ObjCRuntime.isNonFragile()) {
if (ImpDecl->getSuperClass())
Diag(ImpDecl->getLocation(), diag::warn_on_superclass_use);
for (unsigned i = 0; i < numIvars; i++) {
@@ -2231,12 +2234,16 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
Diag(IVI->getLocation(), diag::err_inconsistent_ivar_count);
}
+static bool shouldWarnUndefinedMethod(const ObjCMethodDecl *M) {
+ // No point warning no definition of method which is 'unavailable'.
+ return M->getAvailability() != AR_Unavailable;
+}
+
static void WarnUndefinedMethod(Sema &S, ObjCImplDecl *Impl,
ObjCMethodDecl *method, bool &IncompleteImpl,
unsigned DiagID,
NamedDecl *NeededFor = nullptr) {
- // No point warning no definition of method which is 'unavailable'.
- if (method->getAvailability() == AR_Unavailable)
+ if (!shouldWarnUndefinedMethod(method))
return;
// FIXME: For now ignore 'IncompleteImpl'.
@@ -2245,7 +2252,8 @@ static void WarnUndefinedMethod(Sema &S, ObjCImplDecl *Impl,
// separate warnings. We will give that approach a try, as that
// matches what we do with protocols.
{
- const Sema::SemaDiagnosticBuilder &B = S.Diag(Impl->getLocation(), DiagID);
+ const SemaBase::SemaDiagnosticBuilder &B =
+ S.Diag(Impl->getLocation(), DiagID);
B << method;
if (NeededFor)
B << NeededFor;
@@ -2575,22 +2583,21 @@ static bool checkMethodFamilyMismatch(Sema &S, ObjCMethodDecl *impl,
return true;
}
-void Sema::WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethodDecl,
- ObjCMethodDecl *MethodDecl,
- bool IsProtocolMethodDecl) {
+void SemaObjC::WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethodDecl,
+ ObjCMethodDecl *MethodDecl,
+ bool IsProtocolMethodDecl) {
if (getLangOpts().ObjCAutoRefCount &&
- checkMethodFamilyMismatch(*this, ImpMethodDecl, MethodDecl))
+ checkMethodFamilyMismatch(SemaRef, ImpMethodDecl, MethodDecl))
return;
- CheckMethodOverrideReturn(*this, ImpMethodDecl, MethodDecl,
- IsProtocolMethodDecl, false,
- true);
+ CheckMethodOverrideReturn(SemaRef, ImpMethodDecl, MethodDecl,
+ IsProtocolMethodDecl, false, true);
for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(),
IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end(),
EF = MethodDecl->param_end();
IM != EM && IF != EF; ++IM, ++IF) {
- CheckMethodOverrideParam(*this, ImpMethodDecl, MethodDecl, *IM, *IF,
+ CheckMethodOverrideParam(SemaRef, ImpMethodDecl, MethodDecl, *IM, *IF,
IsProtocolMethodDecl, false, true);
}
@@ -2601,19 +2608,18 @@ void Sema::WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethodDecl,
}
}
-void Sema::CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
- ObjCMethodDecl *Overridden,
- bool IsProtocolMethodDecl) {
+void SemaObjC::CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
+ ObjCMethodDecl *Overridden,
+ bool IsProtocolMethodDecl) {
- CheckMethodOverrideReturn(*this, Method, Overridden,
- IsProtocolMethodDecl, true,
- true);
+ CheckMethodOverrideReturn(SemaRef, Method, Overridden, IsProtocolMethodDecl,
+ true, true);
for (ObjCMethodDecl::param_iterator IM = Method->param_begin(),
IF = Overridden->param_begin(), EM = Method->param_end(),
EF = Overridden->param_end();
IM != EM && IF != EF; ++IM, ++IF) {
- CheckMethodOverrideParam(*this, Method, Overridden, *IM, *IF,
+ CheckMethodOverrideParam(SemaRef, Method, Overridden, *IM, *IF,
IsProtocolMethodDecl, true, true);
}
@@ -2626,9 +2632,10 @@ void Sema::CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
-void Sema::WarnExactTypedMethods(ObjCMethodDecl *ImpMethodDecl,
- ObjCMethodDecl *MethodDecl,
- bool IsProtocolMethodDecl) {
+void SemaObjC::WarnExactTypedMethods(ObjCMethodDecl *ImpMethodDecl,
+ ObjCMethodDecl *MethodDecl,
+ bool IsProtocolMethodDecl) {
+ ASTContext &Context = getASTContext();
// don't issue warning when protocol method is optional because primary
// class is not required to implement it and it is safe for protocol
// to implement it.
@@ -2641,16 +2648,15 @@ void Sema::WarnExactTypedMethods(ObjCMethodDecl *ImpMethodDecl,
MethodDecl->hasAttr<DeprecatedAttr>())
return;
- bool match = CheckMethodOverrideReturn(*this, ImpMethodDecl, MethodDecl,
- IsProtocolMethodDecl, false, false);
+ bool match = CheckMethodOverrideReturn(SemaRef, ImpMethodDecl, MethodDecl,
+ IsProtocolMethodDecl, false, false);
if (match)
for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(),
IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end(),
EF = MethodDecl->param_end();
IM != EM && IF != EF; ++IM, ++IF) {
- match = CheckMethodOverrideParam(*this, ImpMethodDecl, MethodDecl,
- *IM, *IF,
- IsProtocolMethodDecl, false, false);
+ match = CheckMethodOverrideParam(SemaRef, ImpMethodDecl, MethodDecl, *IM,
+ *IF, IsProtocolMethodDecl, false, false);
if (!match)
break;
}
@@ -2703,7 +2709,7 @@ static void findProtocolsWithExplicitImpls(const ObjCInterfaceDecl *Super,
/// Declared in protocol, and those referenced by it.
static void CheckProtocolMethodDefs(
Sema &S, ObjCImplDecl *Impl, ObjCProtocolDecl *PDecl, bool &IncompleteImpl,
- const Sema::SelectorSet &InsMap, const Sema::SelectorSet &ClsMap,
+ const SemaObjC::SelectorSet &InsMap, const SemaObjC::SelectorSet &ClsMap,
ObjCContainerDecl *CDecl, LazyProtocolNameSet &ProtocolsExplictImpl) {
ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl);
ObjCInterfaceDecl *IDecl = C ? C->getClassInterface()
@@ -2745,7 +2751,7 @@ static void CheckProtocolMethodDefs(
// implemented in the class, we should not issue "Method definition not
// found" warnings.
// FIXME: Use a general GetUnarySelector method for this.
- IdentifierInfo* II = &S.Context.Idents.get("forwardInvocation");
+ const IdentifierInfo *II = &S.Context.Idents.get("forwardInvocation");
Selector fISelector = S.Context.Selectors.getSelector(1, &II);
if (InsMap.count(fISelector))
// Is IDecl derived from 'NSProxy'? If so, no instance methods
@@ -2825,15 +2831,11 @@ static void CheckProtocolMethodDefs(
/// MatchAllMethodDeclarations - Check methods declared in interface
/// or protocol against those declared in their implementations.
///
-void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
- const SelectorSet &ClsMap,
- SelectorSet &InsMapSeen,
- SelectorSet &ClsMapSeen,
- ObjCImplDecl* IMPDecl,
- ObjCContainerDecl* CDecl,
- bool &IncompleteImpl,
- bool ImmediateClass,
- bool WarnCategoryMethodImpl) {
+void SemaObjC::MatchAllMethodDeclarations(
+ const SelectorSet &InsMap, const SelectorSet &ClsMap,
+ SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl *IMPDecl,
+ ObjCContainerDecl *CDecl, bool &IncompleteImpl, bool ImmediateClass,
+ bool WarnCategoryMethodImpl) {
// Check and see if instance methods in class interface have been
// implemented in the implementation class. If so, their types match.
for (auto *I : CDecl->instance_methods()) {
@@ -2842,7 +2844,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
if (!I->isPropertyAccessor() &&
!InsMap.count(I->getSelector())) {
if (ImmediateClass)
- WarnUndefinedMethod(*this, IMPDecl, I, IncompleteImpl,
+ WarnUndefinedMethod(SemaRef, IMPDecl, I, IncompleteImpl,
diag::warn_undef_method_impl);
continue;
} else {
@@ -2872,7 +2874,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
if (!I->isPropertyAccessor() &&
!ClsMap.count(I->getSelector())) {
if (ImmediateClass)
- WarnUndefinedMethod(*this, IMPDecl, I, IncompleteImpl,
+ WarnUndefinedMethod(SemaRef, IMPDecl, I, IncompleteImpl,
diag::warn_undef_method_impl);
} else {
ObjCMethodDecl *ImpMethodDecl =
@@ -2938,8 +2940,8 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
-void Sema::CheckCategoryVsClassMethodMatches(
- ObjCCategoryImplDecl *CatIMPDecl) {
+void SemaObjC::CheckCategoryVsClassMethodMatches(
+ ObjCCategoryImplDecl *CatIMPDecl) {
// Get category's primary class.
ObjCCategoryDecl *CatDecl = CatIMPDecl->getCategoryDecl();
if (!CatDecl)
@@ -2977,9 +2979,9 @@ void Sema::CheckCategoryVsClassMethodMatches(
true /*WarnCategoryMethodImpl*/);
}
-void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
- ObjCContainerDecl* CDecl,
- bool IncompleteImpl) {
+void SemaObjC::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl *IMPDecl,
+ ObjCContainerDecl *CDecl,
+ bool IncompleteImpl) {
SelectorSet InsMap;
// Check and see if instance methods in class interface have been
// implemented in the implementation class.
@@ -3004,8 +3006,8 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
// an implementation or 2) there is a @synthesize/@dynamic implementation
// of the property in the @implementation.
if (const ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
- bool SynthesizeProperties = LangOpts.ObjCDefaultSynthProperties &&
- LangOpts.ObjCRuntime.isNonFragile() &&
+ bool SynthesizeProperties = getLangOpts().ObjCDefaultSynthProperties &&
+ getLangOpts().ObjCRuntime.isNonFragile() &&
!IDecl->isObjCRequiresPropertyDefs();
DiagnoseUnimplementedProperties(S, IMPDecl, CDecl, SynthesizeProperties);
}
@@ -3039,14 +3041,14 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
for (auto *PI : I->all_referenced_protocols())
- CheckProtocolMethodDefs(*this, IMPDecl, PI, IncompleteImpl, InsMap,
+ CheckProtocolMethodDefs(SemaRef, IMPDecl, PI, IncompleteImpl, InsMap,
ClsMap, I, ExplicitImplProtocols);
} else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
// For extended class, unimplemented methods in its protocols will
// be reported in the primary class.
if (!C->IsClassExtension()) {
for (auto *P : C->protocols())
- CheckProtocolMethodDefs(*this, IMPDecl, P, IncompleteImpl, InsMap,
+ CheckProtocolMethodDefs(SemaRef, IMPDecl, P, IncompleteImpl, InsMap,
ClsMap, CDecl, ExplicitImplProtocols);
DiagnoseUnimplementedProperties(S, IMPDecl, CDecl,
/*SynthesizeProperties=*/false);
@@ -3055,18 +3057,17 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
llvm_unreachable("invalid ObjCContainerDecl type.");
}
-Sema::DeclGroupPtrTy
-Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
- IdentifierInfo **IdentList,
- SourceLocation *IdentLocs,
- ArrayRef<ObjCTypeParamList *> TypeParamLists,
- unsigned NumElts) {
+SemaObjC::DeclGroupPtrTy SemaObjC::ActOnForwardClassDeclaration(
+ SourceLocation AtClassLoc, IdentifierInfo **IdentList,
+ SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists,
+ unsigned NumElts) {
+ ASTContext &Context = getASTContext();
SmallVector<Decl *, 8> DeclsInGroup;
for (unsigned i = 0; i != NumElts; ++i) {
// Check for another declaration kind with the same name.
- NamedDecl *PrevDecl
- = LookupSingleName(TUScope, IdentList[i], IdentLocs[i],
- LookupOrdinaryName, forRedeclarationInCurContext());
+ NamedDecl *PrevDecl = SemaRef.LookupSingleName(
+ SemaRef.TUScope, IdentList[i], IdentLocs[i], Sema::LookupOrdinaryName,
+ SemaRef.forRedeclarationInCurContext());
if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
// GCC apparently allows the following idiom:
//
@@ -3121,8 +3122,8 @@ Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
if (ObjCTypeParamList *PrevTypeParams = PrevIDecl->getTypeParamList()) {
// Check for consistency with the previous declaration.
if (checkTypeParamListConsistency(
- *this, PrevTypeParams, TypeParams,
- TypeParamListContext::ForwardDeclaration)) {
+ SemaRef, PrevTypeParams, TypeParams,
+ TypeParamListContext::ForwardDeclaration)) {
TypeParams = nullptr;
}
} else if (ObjCInterfaceDecl *Def = PrevIDecl->getDefinition()) {
@@ -3137,29 +3138,29 @@ Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
}
}
- ObjCInterfaceDecl *IDecl
- = ObjCInterfaceDecl::Create(Context, CurContext, AtClassLoc,
- ClassName, TypeParams, PrevIDecl,
- IdentLocs[i]);
+ ObjCInterfaceDecl *IDecl = ObjCInterfaceDecl::Create(
+ Context, SemaRef.CurContext, AtClassLoc, ClassName, TypeParams,
+ PrevIDecl, IdentLocs[i]);
IDecl->setAtEndRange(IdentLocs[i]);
if (PrevIDecl)
- mergeDeclAttributes(IDecl, PrevIDecl);
+ SemaRef.mergeDeclAttributes(IDecl, PrevIDecl);
- PushOnScopeChains(IDecl, TUScope);
+ SemaRef.PushOnScopeChains(IDecl, SemaRef.TUScope);
CheckObjCDeclScope(IDecl);
DeclsInGroup.push_back(IDecl);
}
- return BuildDeclaratorGroup(DeclsInGroup);
+ return SemaRef.BuildDeclaratorGroup(DeclsInGroup);
}
static bool tryMatchRecordTypes(ASTContext &Context,
- Sema::MethodMatchStrategy strategy,
+ SemaObjC::MethodMatchStrategy strategy,
const Type *left, const Type *right);
-static bool matchTypes(ASTContext &Context, Sema::MethodMatchStrategy strategy,
- QualType leftQT, QualType rightQT) {
+static bool matchTypes(ASTContext &Context,
+ SemaObjC::MethodMatchStrategy strategy, QualType leftQT,
+ QualType rightQT) {
const Type *left =
Context.getCanonicalType(leftQT).getUnqualifiedType().getTypePtr();
const Type *right =
@@ -3168,7 +3169,8 @@ static bool matchTypes(ASTContext &Context, Sema::MethodMatchStrategy strategy,
if (left == right) return true;
// If we're doing a strict match, the types have to match exactly.
- if (strategy == Sema::MMS_strict) return false;
+ if (strategy == SemaObjC::MMS_strict)
+ return false;
if (left->isIncompleteType() || right->isIncompleteType()) return false;
@@ -3216,7 +3218,7 @@ static bool matchTypes(ASTContext &Context, Sema::MethodMatchStrategy strategy,
}
static bool tryMatchRecordTypes(ASTContext &Context,
- Sema::MethodMatchStrategy strategy,
+ SemaObjC::MethodMatchStrategy strategy,
const Type *lt, const Type *rt) {
assert(lt && rt && lt != rt);
@@ -3254,9 +3256,10 @@ static bool tryMatchRecordTypes(ASTContext &Context,
/// MatchTwoMethodDeclarations - Checks that two methods have matching type and
/// returns true, or false, accordingly.
/// TODO: Handle protocol list; such as id<p1,p2> in type comparisons
-bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
- const ObjCMethodDecl *right,
- MethodMatchStrategy strategy) {
+bool SemaObjC::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
+ const ObjCMethodDecl *right,
+ MethodMatchStrategy strategy) {
+ ASTContext &Context = getASTContext();
if (!matchTypes(Context, strategy, left->getReturnType(),
right->getReturnType()))
return false;
@@ -3313,8 +3316,8 @@ static bool isMethodContextSameForKindofLookup(ObjCMethodDecl *Method,
return MethodInterface == MethodInListInterface;
}
-void Sema::addMethodToGlobalList(ObjCMethodList *List,
- ObjCMethodDecl *Method) {
+void SemaObjC::addMethodToGlobalList(ObjCMethodList *List,
+ ObjCMethodDecl *Method) {
// Record at the head of the list whether there were 0, 1, or >= 2 methods
// inside categories.
if (ObjCCategoryDecl *CD =
@@ -3402,7 +3405,7 @@ void Sema::addMethodToGlobalList(ObjCMethodList *List,
// We have a new signature for an existing method - add it.
// This is extremely rare. Only 1% of Cocoa selectors are "overloaded".
- ObjCMethodList *Mem = BumpAlloc.Allocate<ObjCMethodList>();
+ ObjCMethodList *Mem = SemaRef.BumpAlloc.Allocate<ObjCMethodList>();
// We insert it right before ListWithSameDeclaration.
if (ListWithSameDeclaration) {
@@ -3418,24 +3421,24 @@ void Sema::addMethodToGlobalList(ObjCMethodList *List,
/// Read the contents of the method pool for a given selector from
/// external storage.
-void Sema::ReadMethodPool(Selector Sel) {
- assert(ExternalSource && "We need an external AST source");
- ExternalSource->ReadMethodPool(Sel);
+void SemaObjC::ReadMethodPool(Selector Sel) {
+ assert(SemaRef.ExternalSource && "We need an external AST source");
+ SemaRef.ExternalSource->ReadMethodPool(Sel);
}
-void Sema::updateOutOfDateSelector(Selector Sel) {
- if (!ExternalSource)
+void SemaObjC::updateOutOfDateSelector(Selector Sel) {
+ if (!SemaRef.ExternalSource)
return;
- ExternalSource->updateOutOfDateSelector(Sel);
+ SemaRef.ExternalSource->updateOutOfDateSelector(Sel);
}
-void Sema::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl,
- bool instance) {
+void SemaObjC::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl,
+ bool instance) {
// Ignore methods of invalid containers.
if (cast<Decl>(Method->getDeclContext())->isInvalidDecl())
return;
- if (ExternalSource)
+ if (SemaRef.ExternalSource)
ReadMethodPool(Method->getSelector());
GlobalMethodPool::iterator Pos = MethodPool.find(Method->getSelector());
@@ -3508,11 +3511,10 @@ static bool FilterMethodsByTypeBound(ObjCMethodDecl *Method,
/// We first select the type of the method: Instance or Factory, then collect
/// all methods with that type.
-bool Sema::CollectMultipleMethodsInGlobalPool(
+bool SemaObjC::CollectMultipleMethodsInGlobalPool(
Selector Sel, SmallVectorImpl<ObjCMethodDecl *> &Methods,
- bool InstanceFirst, bool CheckTheOther,
- const ObjCObjectType *TypeBound) {
- if (ExternalSource)
+ bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound) {
+ if (SemaRef.ExternalSource)
ReadMethodPool(Sel);
GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
@@ -3547,7 +3549,7 @@ bool Sema::CollectMultipleMethodsInGlobalPool(
return Methods.size() > 1;
}
-bool Sema::AreMultipleMethodsInGlobalPool(
+bool SemaObjC::AreMultipleMethodsInGlobalPool(
Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R,
bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl *> &Methods) {
// Diagnose finding more than one method in global pool.
@@ -3572,10 +3574,10 @@ bool Sema::AreMultipleMethodsInGlobalPool(
return MethList.hasMoreThanOneDecl();
}
-ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
- bool receiverIdOrClass,
- bool instance) {
- if (ExternalSource)
+ObjCMethodDecl *SemaObjC::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
+ bool receiverIdOrClass,
+ bool instance) {
+ if (SemaRef.ExternalSource)
ReadMethodPool(Sel);
GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
@@ -3592,17 +3594,18 @@ ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
return nullptr;
}
-void Sema::DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
- Selector Sel, SourceRange R,
- bool receiverIdOrClass) {
+void SemaObjC::DiagnoseMultipleMethodInGlobalPool(
+ SmallVectorImpl<ObjCMethodDecl *> &Methods, Selector Sel, SourceRange R,
+ bool receiverIdOrClass) {
// We found multiple methods, so we may have to complain.
bool issueDiagnostic = false, issueError = false;
// We support a warning which complains about *any* difference in
// method signature.
bool strictSelectorMatch =
- receiverIdOrClass &&
- !Diags.isIgnored(diag::warn_strict_multiple_method_decl, R.getBegin());
+ receiverIdOrClass &&
+ !getDiagnostics().isIgnored(diag::warn_strict_multiple_method_decl,
+ R.getBegin());
if (strictSelectorMatch) {
for (unsigned I = 1, N = Methods.size(); I != N; ++I) {
if (!MatchTwoMethodDeclarations(Methods[0], Methods[I], MMS_strict)) {
@@ -3646,7 +3649,7 @@ void Sema::DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &
}
}
-ObjCMethodDecl *Sema::LookupImplementedMethodInGlobalPool(Selector Sel) {
+ObjCMethodDecl *SemaObjC::LookupImplementedMethodInGlobalPool(Selector Sel) {
GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
if (Pos == MethodPool.end())
return nullptr;
@@ -3695,15 +3698,15 @@ static bool HelperIsMethodInObjCType(Sema &S, Selector Sel,
QualType ObjectType) {
if (ObjectType.isNull())
return true;
- if (S.LookupMethodInObjectType(Sel, ObjectType, true/*Instance method*/))
+ if (S.ObjC().LookupMethodInObjectType(Sel, ObjectType,
+ true /*Instance method*/))
return true;
- return S.LookupMethodInObjectType(Sel, ObjectType, false/*Class method*/) !=
- nullptr;
+ return S.ObjC().LookupMethodInObjectType(Sel, ObjectType,
+ false /*Class method*/) != nullptr;
}
const ObjCMethodDecl *
-Sema::SelectorsForTypoCorrection(Selector Sel,
- QualType ObjectType) {
+SemaObjC::SelectorsForTypoCorrection(Selector Sel, QualType ObjectType) {
unsigned NumArgs = Sel.getNumArgs();
SmallVector<const ObjCMethodDecl *, 8> Methods;
bool ObjectIsId = true, ObjectIsClass = true;
@@ -3733,8 +3736,8 @@ Sema::SelectorsForTypoCorrection(Selector Sel,
if (ObjectIsId)
Methods.push_back(M->getMethod());
else if (!ObjectIsClass &&
- HelperIsMethodInObjCType(*this, M->getMethod()->getSelector(),
- ObjectType))
+ HelperIsMethodInObjCType(
+ SemaRef, M->getMethod()->getSelector(), ObjectType))
Methods.push_back(M->getMethod());
}
// class methods
@@ -3745,8 +3748,8 @@ Sema::SelectorsForTypoCorrection(Selector Sel,
if (ObjectIsClass)
Methods.push_back(M->getMethod());
else if (!ObjectIsId &&
- HelperIsMethodInObjCType(*this, M->getMethod()->getSelector(),
- ObjectType))
+ HelperIsMethodInObjCType(
+ SemaRef, M->getMethod()->getSelector(), ObjectType))
Methods.push_back(M->getMethod());
}
}
@@ -3764,8 +3767,8 @@ Sema::SelectorsForTypoCorrection(Selector Sel,
/// \@implementation. This becomes necessary because class extension can
/// add ivars to a class in random order which will not be known until
/// class's \@implementation is seen.
-void Sema::DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID,
- ObjCInterfaceDecl *SID) {
+void SemaObjC::DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID,
+ ObjCInterfaceDecl *SID) {
for (auto *Ivar : ID->ivars()) {
if (Ivar->isInvalidDecl())
continue;
@@ -3817,23 +3820,23 @@ static void DiagnoseRetainableFlexibleArrayMember(Sema &S,
}
}
-Sema::ObjCContainerKind Sema::getObjCContainerKind() const {
- switch (CurContext->getDeclKind()) {
- case Decl::ObjCInterface:
- return Sema::OCK_Interface;
- case Decl::ObjCProtocol:
- return Sema::OCK_Protocol;
- case Decl::ObjCCategory:
- if (cast<ObjCCategoryDecl>(CurContext)->IsClassExtension())
- return Sema::OCK_ClassExtension;
- return Sema::OCK_Category;
- case Decl::ObjCImplementation:
- return Sema::OCK_Implementation;
- case Decl::ObjCCategoryImpl:
- return Sema::OCK_CategoryImplementation;
-
- default:
- return Sema::OCK_None;
+SemaObjC::ObjCContainerKind SemaObjC::getObjCContainerKind() const {
+ switch (SemaRef.CurContext->getDeclKind()) {
+ case Decl::ObjCInterface:
+ return SemaObjC::OCK_Interface;
+ case Decl::ObjCProtocol:
+ return SemaObjC::OCK_Protocol;
+ case Decl::ObjCCategory:
+ if (cast<ObjCCategoryDecl>(SemaRef.CurContext)->IsClassExtension())
+ return SemaObjC::OCK_ClassExtension;
+ return SemaObjC::OCK_Category;
+ case Decl::ObjCImplementation:
+ return SemaObjC::OCK_Implementation;
+ case Decl::ObjCCategoryImpl:
+ return SemaObjC::OCK_CategoryImplementation;
+
+ default:
+ return SemaObjC::OCK_None;
}
}
@@ -3977,14 +3980,16 @@ static void DiagnoseCategoryDirectMembersProtocolConformance(
}
// Note: For class/category implementations, allMethods is always null.
-Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
- ArrayRef<DeclGroupPtrTy> allTUVars) {
- if (getObjCContainerKind() == Sema::OCK_None)
+Decl *SemaObjC::ActOnAtEnd(Scope *S, SourceRange AtEnd,
+ ArrayRef<Decl *> allMethods,
+ ArrayRef<DeclGroupPtrTy> allTUVars) {
+ ASTContext &Context = getASTContext();
+ if (getObjCContainerKind() == SemaObjC::OCK_None)
return nullptr;
assert(AtEnd.isValid() && "Invalid location for '@end'");
- auto *OCD = cast<ObjCContainerDecl>(CurContext);
+ auto *OCD = cast<ObjCContainerDecl>(SemaRef.CurContext);
Decl *ClassDecl = OCD;
bool isInterfaceDeclKind =
@@ -3996,7 +4001,7 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
// ActOnPropertyImplDecl() creates them as not visible in case
// they are overridden by an explicit method that is encountered
// later.
- if (auto *OID = dyn_cast<ObjCImplementationDecl>(CurContext)) {
+ if (auto *OID = dyn_cast<ObjCImplementationDecl>(SemaRef.CurContext)) {
for (auto *PropImpl : OID->property_impls()) {
if (auto *Getter = PropImpl->getGetterMethodDecl())
if (Getter->isSynthesizedAccessorStub())
@@ -4077,7 +4082,8 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
DiagnoseClassExtensionDupMethods(C, CCPrimary);
}
- DiagnoseCategoryDirectMembersProtocolConformance(*this, C, C->protocols());
+ DiagnoseCategoryDirectMembersProtocolConformance(SemaRef, C,
+ C->protocols());
}
if (ObjCContainerDecl *CDecl = dyn_cast<ObjCContainerDecl>(ClassDecl)) {
if (CDecl->getIdentifier())
@@ -4123,8 +4129,8 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
DiagnoseUnusedBackingIvarInAccessor(S, IC);
if (IDecl->hasDesignatedInitializers())
DiagnoseMissingDesignatedInitOverrides(IC, IDecl);
- DiagnoseWeakIvars(*this, IC);
- DiagnoseRetainableFlexibleArrayMember(*this, IDecl);
+ DiagnoseWeakIvars(SemaRef, IC);
+ DiagnoseRetainableFlexibleArrayMember(SemaRef, IDecl);
bool HasRootClassAttr = IDecl->hasAttr<ObjCRootClassAttr>();
if (IDecl->getSuperClass() == nullptr) {
@@ -4132,14 +4138,14 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
// __attribute((objc_root_class)).
if (!HasRootClassAttr) {
SourceLocation DeclLoc(IDecl->getLocation());
- SourceLocation SuperClassLoc(getLocForEndOfToken(DeclLoc));
+ SourceLocation SuperClassLoc(SemaRef.getLocForEndOfToken(DeclLoc));
Diag(DeclLoc, diag::warn_objc_root_class_missing)
<< IDecl->getIdentifier();
// See if NSObject is in the current scope, and if it is, suggest
// adding " : NSObject " to the class declaration.
- NamedDecl *IF = LookupSingleName(TUScope,
- NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject),
- DeclLoc, LookupOrdinaryName);
+ NamedDecl *IF = SemaRef.LookupSingleName(
+ SemaRef.TUScope, NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject),
+ DeclLoc, Sema::LookupOrdinaryName);
ObjCInterfaceDecl *NSObjectDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
if (NSObjectDecl && NSObjectDecl->getDefinition()) {
Diag(SuperClassLoc, diag::note_objc_needs_superclass)
@@ -4168,7 +4174,7 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
if (IDecl->hasAttr<ObjCClassStubAttr>())
Diag(IC->getLocation(), diag::err_implementation_of_class_stub);
- if (LangOpts.ObjCRuntime.isNonFragile()) {
+ if (getLangOpts().ObjCRuntime.isNonFragile()) {
while (IDecl->getSuperClass()) {
DiagnoseDuplicateIvars(IDecl, IDecl->getSuperClass());
IDecl = IDecl->getSuperClass();
@@ -4201,7 +4207,7 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
!IntfDecl->hasAttr<ObjCSubclassingRestrictedAttr>())
Diag(IntfDecl->getLocation(), diag::err_class_stub_subclassing_mismatch);
}
- DiagnoseVariableSizedIvars(*this, OCD);
+ DiagnoseVariableSizedIvars(SemaRef, OCD);
if (isInterfaceDeclKind) {
// Reject invalid vardecls.
for (unsigned i = 0, e = allTUVars.size(); i != e; i++) {
@@ -4219,10 +4225,10 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
DeclGroupRef DG = allTUVars[i].get();
for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
(*I)->setTopLevelDeclInObjCContainer();
- Consumer.HandleTopLevelDeclInObjCContainer(DG);
+ SemaRef.Consumer.HandleTopLevelDeclInObjCContainer(DG);
}
- ActOnDocumentableDecl(ClassDecl);
+ SemaRef.ActOnDocumentableDecl(ClassDecl);
return ClassDecl;
}
@@ -4236,7 +4242,7 @@ CvtQTToAstBitMask(ObjCDeclSpec::ObjCDeclQualifier PQTVal) {
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
///
-static Sema::ResultTypeCompatibilityKind
+static SemaObjC::ResultTypeCompatibilityKind
CheckRelatedResultTypeCompatibility(Sema &S, ObjCMethodDecl *Method,
ObjCInterfaceDecl *CurrentClass) {
QualType ResultType = Method->getReturnType();
@@ -4249,27 +4255,27 @@ CheckRelatedResultTypeCompatibility(Sema &S, ObjCMethodDecl *Method,
// - it is id or qualified id, or
if (ResultObjectType->isObjCIdType() ||
ResultObjectType->isObjCQualifiedIdType())
- return Sema::RTC_Compatible;
+ return SemaObjC::RTC_Compatible;
if (CurrentClass) {
if (ObjCInterfaceDecl *ResultClass
= ResultObjectType->getInterfaceDecl()) {
// - it is the same as the method's class type, or
if (declaresSameEntity(CurrentClass, ResultClass))
- return Sema::RTC_Compatible;
+ return SemaObjC::RTC_Compatible;
// - it is a superclass of the method's class type
if (ResultClass->isSuperClassOf(CurrentClass))
- return Sema::RTC_Compatible;
+ return SemaObjC::RTC_Compatible;
}
} else {
// Any Objective-C pointer type might be acceptable for a protocol
// method; we just don't know.
- return Sema::RTC_Unknown;
+ return SemaObjC::RTC_Unknown;
}
}
- return Sema::RTC_Incompatible;
+ return SemaObjC::RTC_Incompatible;
}
namespace {
@@ -4287,13 +4293,14 @@ public:
// Bypass this search if we've never seen an instance/class method
// with this selector before.
- Sema::GlobalMethodPool::iterator it = S.MethodPool.find(selector);
- if (it == S.MethodPool.end()) {
+ SemaObjC::GlobalMethodPool::iterator it =
+ S.ObjC().MethodPool.find(selector);
+ if (it == S.ObjC().MethodPool.end()) {
if (!S.getExternalSource()) return;
- S.ReadMethodPool(selector);
+ S.ObjC().ReadMethodPool(selector);
- it = S.MethodPool.find(selector);
- if (it == S.MethodPool.end())
+ it = S.ObjC().MethodPool.find(selector);
+ if (it == S.ObjC().MethodPool.end())
return;
}
const ObjCMethodList &list =
@@ -4420,8 +4427,8 @@ private:
};
} // end anonymous namespace
-void Sema::CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
- ObjCMethodDecl *overridden) {
+void SemaObjC::CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
+ ObjCMethodDecl *overridden) {
if (overridden->isDirectMethod()) {
const auto *attr = overridden->getAttr<ObjCDirectAttr>();
Diag(method->getLocation(), diag::err_objc_override_direct_method);
@@ -4434,9 +4441,10 @@ void Sema::CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
}
}
-void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
- ObjCInterfaceDecl *CurrentClass,
- ResultTypeCompatibilityKind RTC) {
+void SemaObjC::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
+ ObjCInterfaceDecl *CurrentClass,
+ ResultTypeCompatibilityKind RTC) {
+ ASTContext &Context = getASTContext();
if (!ObjCMethod)
return;
auto IsMethodInCurrentClass = [CurrentClass](const ObjCMethodDecl *M) {
@@ -4445,7 +4453,7 @@ void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
CurrentClass->getCanonicalDecl();
};
// Search for overridden methods and merge information down from them.
- OverrideSearch overrides(*this, ObjCMethod);
+ OverrideSearch overrides(SemaRef, ObjCMethod);
// Keep track if the method overrides any method in the class's base classes,
// its protocols, or its categories' protocols; we will keep that info
// in the ObjCMethodDecl.
@@ -4477,7 +4485,7 @@ void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
// least 2 category methods recorded, otherwise only one will do.
if (CategCount > 1 ||
!isa<ObjCCategoryImplDecl>(overridden->getDeclContext())) {
- OverrideSearch overrides(*this, overridden);
+ OverrideSearch overrides(SemaRef, overridden);
for (ObjCMethodDecl *SuperOverridden : overrides) {
if (isa<ObjCProtocolDecl>(SuperOverridden->getDeclContext()) ||
!IsMethodInCurrentClass(SuperOverridden)) {
@@ -4494,11 +4502,11 @@ void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
}
// Propagate down the 'related result type' bit from overridden methods.
- if (RTC != Sema::RTC_Incompatible && overridden->hasRelatedResultType())
+ if (RTC != SemaObjC::RTC_Incompatible && overridden->hasRelatedResultType())
ObjCMethod->setRelatedResultType();
// Then merge the declarations.
- mergeObjCMethodDecls(ObjCMethod, overridden);
+ SemaRef.mergeObjCMethodDecls(ObjCMethod, overridden);
if (ObjCMethod->isImplicit() && overridden->isImplicit())
continue; // Conflicting properties are detected elsewhere.
@@ -4717,7 +4725,7 @@ static void checkObjCDirectMethodClashes(Sema &S, ObjCInterfaceDecl *IDecl,
diagClash(IMD);
}
-Decl *Sema::ActOnMethodDeclaration(
+Decl *SemaObjC::ActOnMethodDeclaration(
Scope *S, SourceLocation MethodLoc, SourceLocation EndLoc,
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
@@ -4727,21 +4735,22 @@ Decl *Sema::ActOnMethodDeclaration(
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodDeclKind,
bool isVariadic, bool MethodDefinition) {
+ ASTContext &Context = getASTContext();
// Make sure we can establish a context for the method.
- if (!CurContext->isObjCContainer()) {
+ if (!SemaRef.CurContext->isObjCContainer()) {
Diag(MethodLoc, diag::err_missing_method_context);
return nullptr;
}
- Decl *ClassDecl = cast<ObjCContainerDecl>(CurContext);
+ Decl *ClassDecl = cast<ObjCContainerDecl>(SemaRef.CurContext);
QualType resultDeclType;
bool HasRelatedResultType = false;
TypeSourceInfo *ReturnTInfo = nullptr;
if (ReturnType) {
- resultDeclType = GetTypeFromParser(ReturnType, &ReturnTInfo);
+ resultDeclType = SemaRef.GetTypeFromParser(ReturnType, &ReturnTInfo);
- if (CheckFunctionReturnType(resultDeclType, MethodLoc))
+ if (SemaRef.CheckFunctionReturnType(resultDeclType, MethodLoc))
return nullptr;
QualType bareResultType = resultDeclType;
@@ -4754,8 +4763,8 @@ Decl *Sema::ActOnMethodDeclaration(
}
ObjCMethodDecl *ObjCMethod = ObjCMethodDecl::Create(
- Context, MethodLoc, EndLoc, Sel, resultDeclType, ReturnTInfo, CurContext,
- MethodType == tok::minus, isVariadic,
+ Context, MethodLoc, EndLoc, Sel, resultDeclType, ReturnTInfo,
+ SemaRef.CurContext, MethodType == tok::minus, isVariadic,
/*isPropertyAccessor=*/false, /*isSynthesizedAccessorStub=*/false,
/*isImplicitlyDeclared=*/false, /*isDefined=*/false,
MethodDeclKind == tok::objc_optional
@@ -4773,12 +4782,13 @@ Decl *Sema::ActOnMethodDeclaration(
ArgType = Context.getObjCIdType();
DI = nullptr;
} else {
- ArgType = GetTypeFromParser(ArgInfo[i].Type, &DI);
+ ArgType = SemaRef.GetTypeFromParser(ArgInfo[i].Type, &DI);
}
- LookupResult R(*this, ArgInfo[i].Name, ArgInfo[i].NameLoc,
- LookupOrdinaryName, forRedeclarationInCurContext());
- LookupName(R, S);
+ LookupResult R(SemaRef, ArgInfo[i].Name, ArgInfo[i].NameLoc,
+ Sema::LookupOrdinaryName,
+ SemaRef.forRedeclarationInCurContext());
+ SemaRef.LookupName(R, S);
if (R.isSingleResult()) {
NamedDecl *PrevDecl = R.getFoundDecl();
if (S->isDeclScope(PrevDecl)) {
@@ -4795,9 +4805,9 @@ Decl *Sema::ActOnMethodDeclaration(
? DI->getTypeLoc().getBeginLoc()
: ArgInfo[i].NameLoc;
- ParmVarDecl* Param = CheckParameter(ObjCMethod, StartLoc,
- ArgInfo[i].NameLoc, ArgInfo[i].Name,
- ArgType, DI, SC_None);
+ ParmVarDecl *Param =
+ SemaRef.CheckParameter(ObjCMethod, StartLoc, ArgInfo[i].NameLoc,
+ ArgInfo[i].Name, ArgType, DI, SC_None);
Param->setObjCMethodScopeInfo(i);
@@ -4805,15 +4815,17 @@ Decl *Sema::ActOnMethodDeclaration(
CvtQTToAstBitMask(ArgInfo[i].DeclSpec.getObjCDeclQualifier()));
// Apply the attributes to the parameter.
- ProcessDeclAttributeList(TUScope, Param, ArgInfo[i].ArgAttrs);
- AddPragmaAttributes(TUScope, Param);
+ SemaRef.ProcessDeclAttributeList(SemaRef.TUScope, Param,
+ ArgInfo[i].ArgAttrs);
+ SemaRef.AddPragmaAttributes(SemaRef.TUScope, Param);
+ SemaRef.ProcessAPINotes(Param);
if (Param->hasAttr<BlocksAttr>()) {
Diag(Param->getLocation(), diag::err_block_on_nonlocal);
Param->setInvalidDecl();
}
S->AddDecl(Param);
- IdResolver.AddDecl(Param);
+ SemaRef.IdResolver.AddDecl(Param);
Params.push_back(Param);
}
@@ -4835,8 +4847,9 @@ Decl *Sema::ActOnMethodDeclaration(
ObjCMethod->setObjCDeclQualifier(
CvtQTToAstBitMask(ReturnQT.getObjCDeclQualifier()));
- ProcessDeclAttributeList(TUScope, ObjCMethod, AttrList);
- AddPragmaAttributes(TUScope, ObjCMethod);
+ SemaRef.ProcessDeclAttributeList(SemaRef.TUScope, ObjCMethod, AttrList);
+ SemaRef.AddPragmaAttributes(SemaRef.TUScope, ObjCMethod);
+ SemaRef.ProcessAPINotes(ObjCMethod);
// Add the method now.
const ObjCMethodDecl *PrevMethod = nullptr;
@@ -4890,7 +4903,7 @@ Decl *Sema::ActOnMethodDeclaration(
if (ObjCInterfaceDecl *IDecl = ImpDecl->getClassInterface()) {
if (auto *IMD = IDecl->lookupMethod(ObjCMethod->getSelector(),
ObjCMethod->isInstanceMethod())) {
- mergeInterfaceMethodToImpl(*this, ObjCMethod, IMD);
+ mergeInterfaceMethodToImpl(SemaRef, ObjCMethod, IMD);
// The Idecl->lookupMethod() above will find declarations for ObjCMethod
// in one of these places:
@@ -4950,8 +4963,8 @@ Decl *Sema::ActOnMethodDeclaration(
<< ObjCMethod->getDeclName();
}
} else {
- mergeObjCDirectMembers(*this, ClassDecl, ObjCMethod);
- checkObjCDirectMethodClashes(*this, IDecl, ObjCMethod, ImpDecl);
+ mergeObjCDirectMembers(SemaRef, ClassDecl, ObjCMethod);
+ checkObjCDirectMethodClashes(SemaRef, IDecl, ObjCMethod, ImpDecl);
}
// Warn if a method declared in a protocol to which a category or
@@ -4967,12 +4980,12 @@ Decl *Sema::ActOnMethodDeclaration(
auto OI = IMD->param_begin(), OE = IMD->param_end();
auto NI = ObjCMethod->param_begin();
for (; OI != OE; ++OI, ++NI)
- diagnoseNoescape(*NI, *OI, C, P, *this);
+ diagnoseNoescape(*NI, *OI, C, P, SemaRef);
}
}
} else {
if (!isa<ObjCProtocolDecl>(ClassDecl)) {
- mergeObjCDirectMembers(*this, ClassDecl, ObjCMethod);
+ mergeObjCDirectMembers(SemaRef, ClassDecl, ObjCMethod);
ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(ClassDecl);
if (!IDecl)
@@ -4981,7 +4994,7 @@ Decl *Sema::ActOnMethodDeclaration(
// declaration by now, however for invalid code we'll keep parsing
// but we won't find the primary interface and IDecl will be nil.
if (IDecl)
- checkObjCDirectMethodClashes(*this, IDecl, ObjCMethod);
+ checkObjCDirectMethodClashes(SemaRef, IDecl, ObjCMethod);
}
cast<DeclContext>(ClassDecl)->addDecl(ObjCMethod);
@@ -5010,8 +5023,8 @@ Decl *Sema::ActOnMethodDeclaration(
CurrentClass = CatImpl->getClassInterface();
}
- ResultTypeCompatibilityKind RTC
- = CheckRelatedResultTypeCompatibility(*this, ObjCMethod, CurrentClass);
+ ResultTypeCompatibilityKind RTC =
+ CheckRelatedResultTypeCompatibility(SemaRef, ObjCMethod, CurrentClass);
CheckObjCMethodOverrides(ObjCMethod, CurrentClass, RTC);
@@ -5020,9 +5033,9 @@ Decl *Sema::ActOnMethodDeclaration(
ARCError = CheckARCMethodDecl(ObjCMethod);
// Infer the related result type when possible.
- if (!ARCError && RTC == Sema::RTC_Compatible &&
+ if (!ARCError && RTC == SemaObjC::RTC_Compatible &&
!ObjCMethod->hasRelatedResultType() &&
- LangOpts.ObjCInferRelatedResultType) {
+ getLangOpts().ObjCInferRelatedResultType) {
bool InferRelatedResultType = false;
switch (ObjCMethod->getMethodFamily()) {
case OMF_None:
@@ -5056,7 +5069,7 @@ Decl *Sema::ActOnMethodDeclaration(
if (MethodDefinition &&
Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
- checkObjCMethodX86VectorTypes(*this, ObjCMethod);
+ checkObjCMethodX86VectorTypes(SemaRef, ObjCMethod);
// + load method cannot have availability attributes. It get called on
// startup, so it has to have the availability of the deployment target.
@@ -5072,20 +5085,21 @@ Decl *Sema::ActOnMethodDeclaration(
// Insert the invisible arguments, self and _cmd!
ObjCMethod->createImplicitParams(Context, ObjCMethod->getClassInterface());
- ActOnDocumentableDecl(ObjCMethod);
+ SemaRef.ActOnDocumentableDecl(ObjCMethod);
return ObjCMethod;
}
-bool Sema::CheckObjCDeclScope(Decl *D) {
+bool SemaObjC::CheckObjCDeclScope(Decl *D) {
// Following is also an error. But it is caused by a missing @end
// and diagnostic is issued elsewhere.
- if (isa<ObjCContainerDecl>(CurContext->getRedeclContext()))
+ if (isa<ObjCContainerDecl>(SemaRef.CurContext->getRedeclContext()))
return false;
// If we switched context to translation unit while we are still lexically in
// an objc container, it means the parser missed emitting an error.
- if (isa<TranslationUnitDecl>(getCurLexicalContext()->getRedeclContext()))
+ if (isa<TranslationUnitDecl>(
+ SemaRef.getCurLexicalContext()->getRedeclContext()))
return false;
Diag(D->getLocation(), diag::err_objc_decls_may_only_appear_in_global_scope);
@@ -5096,16 +5110,17 @@ bool Sema::CheckObjCDeclScope(Decl *D) {
/// Called whenever \@defs(ClassName) is encountered in the source. Inserts the
/// instance variables of ClassName into Decls.
-void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
- IdentifierInfo *ClassName,
- SmallVectorImpl<Decl*> &Decls) {
+void SemaObjC::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
+ const IdentifierInfo *ClassName,
+ SmallVectorImpl<Decl *> &Decls) {
+ ASTContext &Context = getASTContext();
// Check that ClassName is a valid class
ObjCInterfaceDecl *Class = getObjCInterfaceDecl(ClassName, DeclStart);
if (!Class) {
Diag(DeclStart, diag::err_undef_interface) << ClassName;
return;
}
- if (LangOpts.ObjCRuntime.isNonFragile()) {
+ if (getLangOpts().ObjCRuntime.isNonFragile()) {
Diag(DeclStart, diag::err_atdef_nonfragile_interface);
return;
}
@@ -5130,18 +5145,19 @@ void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
D != Decls.end(); ++D) {
FieldDecl *FD = cast<FieldDecl>(*D);
if (getLangOpts().CPlusPlus)
- PushOnScopeChains(FD, S);
+ SemaRef.PushOnScopeChains(FD, S);
else if (RecordDecl *Record = dyn_cast<RecordDecl>(TagD))
Record->addDecl(FD);
}
}
/// Build a type-check a new Objective-C exception variable declaration.
-VarDecl *Sema::BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType T,
- SourceLocation StartLoc,
- SourceLocation IdLoc,
- IdentifierInfo *Id,
- bool Invalid) {
+VarDecl *SemaObjC::BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType T,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ const IdentifierInfo *Id,
+ bool Invalid) {
+ ASTContext &Context = getASTContext();
// ISO/IEC TR 18037 S6.7.3: "The type of an object with automatic storage
// duration shall not be qualified by an address-space qualifier."
// Since all parameters have automatic store duration, they can not have
@@ -5170,8 +5186,8 @@ VarDecl *Sema::BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType T,
Diag(IdLoc, diag::err_catch_param_not_objc_type);
}
- VarDecl *New = VarDecl::Create(Context, CurContext, StartLoc, IdLoc, Id,
- T, TInfo, SC_None);
+ VarDecl *New = VarDecl::Create(Context, SemaRef.CurContext, StartLoc, IdLoc,
+ Id, T, TInfo, SC_None);
New->setExceptionVariable(true);
// In ARC, infer 'retaining' for variables of retainable type.
@@ -5183,7 +5199,7 @@ VarDecl *Sema::BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType T,
return New;
}
-Decl *Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) {
+Decl *SemaObjC::ActOnObjCExceptionDecl(Scope *S, Declarator &D) {
const DeclSpec &DS = D.getDeclSpec();
// We allow the "register" storage class on exception variables because
@@ -5204,14 +5220,14 @@ Decl *Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) {
<< DeclSpec::getSpecifierName(TSCS);
D.getMutableDeclSpec().ClearStorageClassSpecs();
- DiagnoseFunctionSpecifiers(D.getDeclSpec());
+ SemaRef.DiagnoseFunctionSpecifiers(D.getDeclSpec());
// Check that there are no default arguments inside the type of this
// exception object (C++ only).
if (getLangOpts().CPlusPlus)
- CheckExtraCXXDefaultArguments(D);
+ SemaRef.CheckExtraCXXDefaultArguments(D);
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
+ TypeSourceInfo *TInfo = SemaRef.GetTypeForDeclarator(D);
QualType ExceptionType = TInfo->getType();
VarDecl *New = BuildObjCExceptionDecl(TInfo, ExceptionType,
@@ -5230,9 +5246,9 @@ Decl *Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) {
// Add the parameter declaration into this scope.
S->AddDecl(New);
if (D.getIdentifier())
- IdResolver.AddDecl(New);
+ SemaRef.IdResolver.AddDecl(New);
- ProcessDeclAttributes(S, New, D);
+ SemaRef.ProcessDeclAttributes(S, New, D);
if (New->hasAttr<BlocksAttr>())
Diag(New->getLocation(), diag::err_block_on_nonlocal);
@@ -5241,8 +5257,9 @@ Decl *Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) {
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
-void Sema::CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
- SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
+void SemaObjC::CollectIvarsToConstructOrDestruct(
+ ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl *> &Ivars) {
+ ASTContext &Context = getASTContext();
for (ObjCIvarDecl *Iv = OI->all_declared_ivar_begin(); Iv;
Iv= Iv->getNextIvar()) {
QualType QT = Context.getBaseElementType(Iv->getType());
@@ -5251,11 +5268,12 @@ void Sema::CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
}
}
-void Sema::DiagnoseUseOfUnimplementedSelectors() {
+void SemaObjC::DiagnoseUseOfUnimplementedSelectors() {
+ ASTContext &Context = getASTContext();
// Load referenced selectors from the external source.
- if (ExternalSource) {
+ if (SemaRef.ExternalSource) {
SmallVector<std::pair<Selector, SourceLocation>, 4> Sels;
- ExternalSource->ReadReferencedSelectors(Sels);
+ SemaRef.ExternalSource->ReadReferencedSelectors(Sels);
for (unsigned I = 0, N = Sels.size(); I != N; ++I)
ReferencedSelectors[Sels[I].first] = Sels[I].second;
}
@@ -5275,8 +5293,8 @@ void Sema::DiagnoseUseOfUnimplementedSelectors() {
}
ObjCIvarDecl *
-Sema::GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
- const ObjCPropertyDecl *&PDecl) const {
+SemaObjC::GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
+ const ObjCPropertyDecl *&PDecl) const {
if (Method->isClassMethod())
return nullptr;
const ObjCInterfaceDecl *IDecl = Method->getClassInterface();
@@ -5300,51 +5318,51 @@ Sema::GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
}
namespace {
- /// Used by Sema::DiagnoseUnusedBackingIvarInAccessor to check if a property
- /// accessor references the backing ivar.
- class UnusedBackingIvarChecker :
- public RecursiveASTVisitor<UnusedBackingIvarChecker> {
- public:
- Sema &S;
- const ObjCMethodDecl *Method;
- const ObjCIvarDecl *IvarD;
- bool AccessedIvar;
- bool InvokedSelfMethod;
-
- UnusedBackingIvarChecker(Sema &S, const ObjCMethodDecl *Method,
- const ObjCIvarDecl *IvarD)
- : S(S), Method(Method), IvarD(IvarD),
- AccessedIvar(false), InvokedSelfMethod(false) {
- assert(IvarD);
- }
-
- bool VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
- if (E->getDecl() == IvarD) {
- AccessedIvar = true;
- return false;
- }
- return true;
+/// Used by SemaObjC::DiagnoseUnusedBackingIvarInAccessor to check if a property
+/// accessor references the backing ivar.
+class UnusedBackingIvarChecker
+ : public RecursiveASTVisitor<UnusedBackingIvarChecker> {
+public:
+ Sema &S;
+ const ObjCMethodDecl *Method;
+ const ObjCIvarDecl *IvarD;
+ bool AccessedIvar;
+ bool InvokedSelfMethod;
+
+ UnusedBackingIvarChecker(Sema &S, const ObjCMethodDecl *Method,
+ const ObjCIvarDecl *IvarD)
+ : S(S), Method(Method), IvarD(IvarD), AccessedIvar(false),
+ InvokedSelfMethod(false) {
+ assert(IvarD);
+ }
+
+ bool VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ if (E->getDecl() == IvarD) {
+ AccessedIvar = true;
+ return false;
}
+ return true;
+ }
- bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
- if (E->getReceiverKind() == ObjCMessageExpr::Instance &&
- S.isSelfExpr(E->getInstanceReceiver(), Method)) {
- InvokedSelfMethod = true;
- }
- return true;
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (E->getReceiverKind() == ObjCMessageExpr::Instance &&
+ S.ObjC().isSelfExpr(E->getInstanceReceiver(), Method)) {
+ InvokedSelfMethod = true;
}
- };
+ return true;
+ }
+};
} // end anonymous namespace
-void Sema::DiagnoseUnusedBackingIvarInAccessor(Scope *S,
- const ObjCImplementationDecl *ImplD) {
+void SemaObjC::DiagnoseUnusedBackingIvarInAccessor(
+ Scope *S, const ObjCImplementationDecl *ImplD) {
if (S->hasUnrecoverableErrorOccurred())
return;
for (const auto *CurMethod : ImplD->instance_methods()) {
unsigned DIAG = diag::warn_unused_property_backing_ivar;
SourceLocation Loc = CurMethod->getLocation();
- if (Diags.isIgnored(DIAG, Loc))
+ if (getDiagnostics().isIgnored(DIAG, Loc))
continue;
const ObjCPropertyDecl *PDecl;
@@ -5355,7 +5373,7 @@ void Sema::DiagnoseUnusedBackingIvarInAccessor(Scope *S,
if (CurMethod->isSynthesizedAccessorStub())
continue;
- UnusedBackingIvarChecker Checker(*this, CurMethod, IV);
+ UnusedBackingIvarChecker Checker(SemaRef, CurMethod, IV);
Checker.TraverseStmt(CurMethod->getBody());
if (Checker.AccessedIvar)
continue;
@@ -5370,3 +5388,298 @@ void Sema::DiagnoseUnusedBackingIvarInAccessor(Scope *S,
}
}
}
+
+QualType SemaObjC::AdjustParameterTypeForObjCAutoRefCount(
+ QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo) {
+ ASTContext &Context = getASTContext();
+ // In ARC, infer a lifetime qualifier for appropriate parameter types.
+ if (!getLangOpts().ObjCAutoRefCount ||
+ T.getObjCLifetime() != Qualifiers::OCL_None || !T->isObjCLifetimeType())
+ return T;
+
+ Qualifiers::ObjCLifetime Lifetime;
+
+ // Special cases for arrays:
+ // - if it's const, use __unsafe_unretained
+ // - otherwise, it's an error
+ if (T->isArrayType()) {
+ if (!T.isConstQualified()) {
+ if (SemaRef.DelayedDiagnostics.shouldDelayDiagnostics())
+ SemaRef.DelayedDiagnostics.add(
+ sema::DelayedDiagnostic::makeForbiddenType(
+ NameLoc, diag::err_arc_array_param_no_ownership, T, false));
+ else
+ Diag(NameLoc, diag::err_arc_array_param_no_ownership)
+ << TSInfo->getTypeLoc().getSourceRange();
+ }
+ Lifetime = Qualifiers::OCL_ExplicitNone;
+ } else {
+ Lifetime = T->getObjCARCImplicitLifetime();
+ }
+ T = Context.getLifetimeQualifiedType(T, Lifetime);
+
+ return T;
+}
+
+ObjCInterfaceDecl *SemaObjC::getObjCInterfaceDecl(const IdentifierInfo *&Id,
+ SourceLocation IdLoc,
+ bool DoTypoCorrection) {
+ // The third "scope" argument is 0 since we aren't enabling lazy built-in
+ // creation from this context.
+ NamedDecl *IDecl = SemaRef.LookupSingleName(SemaRef.TUScope, Id, IdLoc,
+ Sema::LookupOrdinaryName);
+
+ if (!IDecl && DoTypoCorrection) {
+ // Perform typo correction at the given location, but only if we
+ // find an Objective-C class name.
+ DeclFilterCCC<ObjCInterfaceDecl> CCC{};
+ if (TypoCorrection C = SemaRef.CorrectTypo(
+ DeclarationNameInfo(Id, IdLoc), Sema::LookupOrdinaryName,
+ SemaRef.TUScope, nullptr, CCC, Sema::CTK_ErrorRecovery)) {
+ SemaRef.diagnoseTypo(C, PDiag(diag::err_undef_interface_suggest) << Id);
+ IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>();
+ Id = IDecl->getIdentifier();
+ }
+ }
+ ObjCInterfaceDecl *Def = dyn_cast_or_null<ObjCInterfaceDecl>(IDecl);
+ // This routine must always return a class definition, if any.
+ if (Def && Def->getDefinition())
+ Def = Def->getDefinition();
+ return Def;
+}
+
+bool SemaObjC::inferObjCARCLifetime(ValueDecl *decl) {
+ ASTContext &Context = getASTContext();
+ QualType type = decl->getType();
+ Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime();
+ if (lifetime == Qualifiers::OCL_Autoreleasing) {
+ // Various kinds of declaration aren't allowed to be __autoreleasing.
+ unsigned kind = -1U;
+ if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
+ if (var->hasAttr<BlocksAttr>())
+ kind = 0; // __block
+ else if (!var->hasLocalStorage())
+ kind = 1; // global
+ } else if (isa<ObjCIvarDecl>(decl)) {
+ kind = 3; // ivar
+ } else if (isa<FieldDecl>(decl)) {
+ kind = 2; // field
+ }
+
+ if (kind != -1U) {
+ Diag(decl->getLocation(), diag::err_arc_autoreleasing_var) << kind;
+ }
+ } else if (lifetime == Qualifiers::OCL_None) {
+ // Try to infer lifetime.
+ if (!type->isObjCLifetimeType())
+ return false;
+
+ lifetime = type->getObjCARCImplicitLifetime();
+ type = Context.getLifetimeQualifiedType(type, lifetime);
+ decl->setType(type);
+ }
+
+ if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
+ // Thread-local variables cannot have lifetime.
+ if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone &&
+ var->getTLSKind()) {
+ Diag(var->getLocation(), diag::err_arc_thread_ownership)
+ << var->getType();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+ObjCContainerDecl *SemaObjC::getObjCDeclContext() const {
+ return (dyn_cast_or_null<ObjCContainerDecl>(SemaRef.CurContext));
+}
+
+void SemaObjC::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
+ if (!getLangOpts().CPlusPlus)
+ return;
+ if (ObjCInterfaceDecl *OID = ObjCImplementation->getClassInterface()) {
+ ASTContext &Context = getASTContext();
+ SmallVector<ObjCIvarDecl *, 8> ivars;
+ CollectIvarsToConstructOrDestruct(OID, ivars);
+ if (ivars.empty())
+ return;
+ SmallVector<CXXCtorInitializer *, 32> AllToInit;
+ for (unsigned i = 0; i < ivars.size(); i++) {
+ FieldDecl *Field = ivars[i];
+ if (Field->isInvalidDecl())
+ continue;
+
+ CXXCtorInitializer *Member;
+ InitializedEntity InitEntity = InitializedEntity::InitializeMember(Field);
+ InitializationKind InitKind =
+ InitializationKind::CreateDefault(ObjCImplementation->getLocation());
+
+ InitializationSequence InitSeq(SemaRef, InitEntity, InitKind,
+ std::nullopt);
+ ExprResult MemberInit =
+ InitSeq.Perform(SemaRef, InitEntity, InitKind, std::nullopt);
+ MemberInit = SemaRef.MaybeCreateExprWithCleanups(MemberInit);
+ // Note, MemberInit could actually come back empty if no initialization
+ // is required (e.g., because it would call a trivial default constructor)
+ if (!MemberInit.get() || MemberInit.isInvalid())
+ continue;
+
+ Member = new (Context)
+ CXXCtorInitializer(Context, Field, SourceLocation(), SourceLocation(),
+ MemberInit.getAs<Expr>(), SourceLocation());
+ AllToInit.push_back(Member);
+
+ // Be sure that the destructor is accessible and is marked as referenced.
+ if (const RecordType *RecordTy =
+ Context.getBaseElementType(Field->getType())
+ ->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(RD)) {
+ SemaRef.MarkFunctionReferenced(Field->getLocation(), Destructor);
+ SemaRef.CheckDestructorAccess(
+ Field->getLocation(), Destructor,
+ PDiag(diag::err_access_dtor_ivar)
+ << Context.getBaseElementType(Field->getType()));
+ }
+ }
+ }
+ ObjCImplementation->setIvarInitializers(Context, AllToInit.data(),
+ AllToInit.size());
+ }
+}
+
+/// TranslateIvarVisibility - Translate visibility from a token ID to an
+/// AST enum value.
+static ObjCIvarDecl::AccessControl
+TranslateIvarVisibility(tok::ObjCKeywordKind ivarVisibility) {
+ switch (ivarVisibility) {
+ default:
+ llvm_unreachable("Unknown visitibility kind");
+ case tok::objc_private:
+ return ObjCIvarDecl::Private;
+ case tok::objc_public:
+ return ObjCIvarDecl::Public;
+ case tok::objc_protected:
+ return ObjCIvarDecl::Protected;
+ case tok::objc_package:
+ return ObjCIvarDecl::Package;
+ }
+}
+
+/// ActOnIvar - Each ivar field of an objective-c class is passed into this
+/// in order to create an IvarDecl object for it.
+Decl *SemaObjC::ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D,
+ Expr *BitWidth, tok::ObjCKeywordKind Visibility) {
+
+ const IdentifierInfo *II = D.getIdentifier();
+ SourceLocation Loc = DeclStart;
+ if (II)
+ Loc = D.getIdentifierLoc();
+
+ // FIXME: Unnamed fields can be handled in various different ways, for
+ // example, unnamed unions inject all members into the struct namespace!
+
+ TypeSourceInfo *TInfo = SemaRef.GetTypeForDeclarator(D);
+ QualType T = TInfo->getType();
+
+ if (BitWidth) {
+ // 6.7.2.1p3, 6.7.2.1p4
+ BitWidth =
+ SemaRef.VerifyBitField(Loc, II, T, /*IsMsStruct*/ false, BitWidth)
+ .get();
+ if (!BitWidth)
+ D.setInvalidType();
+ } else {
+ // Not a bitfield.
+
+ // validate II.
+ }
+ if (T->isReferenceType()) {
+ Diag(Loc, diag::err_ivar_reference_type);
+ D.setInvalidType();
+ }
+ // C99 6.7.2.1p8: A member of a structure or union may have any type other
+ // than a variably modified type.
+ else if (T->isVariablyModifiedType()) {
+ if (!SemaRef.tryToFixVariablyModifiedVarType(
+ TInfo, T, Loc, diag::err_typecheck_ivar_variable_size))
+ D.setInvalidType();
+ }
+
+ // Get the visibility (access control) for this ivar.
+ ObjCIvarDecl::AccessControl ac = Visibility != tok::objc_not_keyword
+ ? TranslateIvarVisibility(Visibility)
+ : ObjCIvarDecl::None;
+ // Must set ivar's DeclContext to its enclosing interface.
+ ObjCContainerDecl *EnclosingDecl =
+ cast<ObjCContainerDecl>(SemaRef.CurContext);
+ if (!EnclosingDecl || EnclosingDecl->isInvalidDecl())
+ return nullptr;
+ ObjCContainerDecl *EnclosingContext;
+ if (ObjCImplementationDecl *IMPDecl =
+ dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
+ if (getLangOpts().ObjCRuntime.isFragile()) {
+ // Case of ivar declared in an implementation. Context is that of its
+ // class.
+ EnclosingContext = IMPDecl->getClassInterface();
+ assert(EnclosingContext && "Implementation has no class interface!");
+ } else
+ EnclosingContext = EnclosingDecl;
+ } else {
+ if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
+ if (getLangOpts().ObjCRuntime.isFragile() || !CDecl->IsClassExtension()) {
+ Diag(Loc, diag::err_misplaced_ivar) << CDecl->IsClassExtension();
+ return nullptr;
+ }
+ }
+ EnclosingContext = EnclosingDecl;
+ }
+
+ // Construct the decl.
+ ObjCIvarDecl *NewID =
+ ObjCIvarDecl::Create(getASTContext(), EnclosingContext, DeclStart, Loc,
+ II, T, TInfo, ac, BitWidth);
+
+ if (T->containsErrors())
+ NewID->setInvalidDecl();
+
+ if (II) {
+ NamedDecl *PrevDecl =
+ SemaRef.LookupSingleName(S, II, Loc, Sema::LookupMemberName,
+ RedeclarationKind::ForVisibleRedeclaration);
+ if (PrevDecl && SemaRef.isDeclInScope(PrevDecl, EnclosingContext, S) &&
+ !isa<TagDecl>(PrevDecl)) {
+ Diag(Loc, diag::err_duplicate_member) << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+ NewID->setInvalidDecl();
+ }
+ }
+
+ // Process attributes attached to the ivar.
+ SemaRef.ProcessDeclAttributes(S, NewID, D);
+
+ if (D.isInvalidType())
+ NewID->setInvalidDecl();
+
+ // In ARC, infer 'retaining' for ivars of retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewID))
+ NewID->setInvalidDecl();
+
+ if (D.getDeclSpec().isModulePrivateSpecified())
+ NewID->setModulePrivate();
+
+ if (II) {
+ // FIXME: When interfaces are DeclContexts, we'll need to add
+ // these to the interface.
+ S->AddDecl(NewID);
+ SemaRef.IdResolver.AddDecl(NewID);
+ }
+
+ if (getLangOpts().ObjCRuntime.isNonFragile() && !NewID->isInvalidDecl() &&
+ isa<ObjCInterfaceDecl>(EnclosingDecl))
+ Diag(Loc, diag::warn_ivars_in_interface);
+
+ return NewID;
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
index 75730ea888af..427ffd9061ef 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -112,12 +112,6 @@ ExprResult Sema::ActOnNoexceptSpec(Expr *NoexceptExpr,
return Converted;
}
-/// CheckSpecifiedExceptionType - Check if the given type is valid in an
-/// exception specification. Incomplete types, or pointers to incomplete types
-/// other than void are not allowed.
-///
-/// \param[in,out] T The exception type. This will be decayed to a pointer type
-/// when the input is an array or a function type.
bool Sema::CheckSpecifiedExceptionType(QualType &T, SourceRange Range) {
// C++11 [except.spec]p2:
// A type cv T, "array of T", or "function returning T" denoted
@@ -189,9 +183,6 @@ bool Sema::CheckSpecifiedExceptionType(QualType &T, SourceRange Range) {
return false;
}
-/// CheckDistantExceptionSpec - Check if the given type is a pointer or pointer
-/// to member to a function with an exception specification. This means that
-/// it is invalid to add another level of indirection.
bool Sema::CheckDistantExceptionSpec(QualType T) {
// C++17 removes this rule in favor of putting exception specifications into
// the type system.
@@ -258,13 +249,14 @@ Sema::UpdateExceptionSpec(FunctionDecl *FD,
}
static bool exceptionSpecNotKnownYet(const FunctionDecl *FD) {
- auto *MD = dyn_cast<CXXMethodDecl>(FD);
- if (!MD)
+ ExceptionSpecificationType EST =
+ FD->getType()->castAs<FunctionProtoType>()->getExceptionSpecType();
+ if (EST == EST_Unparsed)
+ return true;
+ else if (EST != EST_Unevaluated)
return false;
-
- auto EST = MD->getType()->castAs<FunctionProtoType>()->getExceptionSpecType();
- return EST == EST_Unparsed ||
- (EST == EST_Unevaluated && MD->getParent()->isBeingDefined());
+ const DeclContext *DC = FD->getLexicalDeclContext();
+ return DC->isRecord() && cast<RecordDecl>(DC)->isBeingDefined();
}
static bool CheckEquivalentExceptionSpecImpl(
@@ -490,10 +482,6 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
return ReturnValueOnError;
}
-/// CheckEquivalentExceptionSpec - Check if the two types have equivalent
-/// exception specifications. Exception specifications are equivalent if
-/// they allow exactly the same set of exception types. It does not matter how
-/// that is achieved. See C++ [except.spec]p2.
bool Sema::CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc) {
@@ -766,9 +754,6 @@ bool Sema::handlerCanCatch(QualType HandlerType, QualType ExceptionType) {
llvm_unreachable("unexpected access check result");
}
-/// CheckExceptionSpecSubset - Check whether the second function type's
-/// exception specification is a subset (or equivalent) of the first function
-/// type. This is used by override and pointer assignment checks.
bool Sema::CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID,
@@ -889,11 +874,6 @@ CheckSpecForTypesEquivalent(Sema &S, const PartialDiagnostic &DiagID,
SFunc, SourceLoc);
}
-/// CheckParamExceptionSpec - Check if the parameter and return types of the
-/// two functions have equivalent exception specs. This is part of the
-/// assignment and override compatibility check. We do not check the parameters
-/// of parameter function pointers recursively, as no sane programmer would
-/// even be able to write such a function type.
bool Sema::CheckParamExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic &NoteID,
const FunctionProtoType *Target, bool SkipTargetFirstParameter,
@@ -1017,13 +997,13 @@ CanThrowResult Sema::canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc) {
// As an extension, we assume that __attribute__((nothrow)) functions don't
// throw.
- if (D && isa<FunctionDecl>(D) && D->hasAttr<NoThrowAttr>())
+ if (isa_and_nonnull<FunctionDecl>(D) && D->hasAttr<NoThrowAttr>())
return CT_Cannot;
QualType T;
// In C++1z, just look at the function type of the callee.
- if (S.getLangOpts().CPlusPlus17 && E && isa<CallExpr>(E)) {
+ if (S.getLangOpts().CPlusPlus17 && isa_and_nonnull<CallExpr>(E)) {
E = cast<CallExpr>(E)->getCallee();
T = E->getType();
if (T->isSpecificPlaceholderType(BuiltinType::BoundMember)) {
@@ -1110,24 +1090,22 @@ static CanThrowResult canDynamicCastThrow(const CXXDynamicCastExpr *DC) {
}
static CanThrowResult canTypeidThrow(Sema &S, const CXXTypeidExpr *DC) {
+ // A typeid of a type is a constant and does not throw.
if (DC->isTypeOperand())
return CT_Cannot;
- Expr *Op = DC->getExprOperand();
- if (Op->isTypeDependent())
+ if (DC->isValueDependent())
return CT_Dependent;
- const RecordType *RT = Op->getType()->getAs<RecordType>();
- if (!RT)
+ // If this operand is not evaluated it cannot possibly throw.
+ if (!DC->isPotentiallyEvaluated())
return CT_Cannot;
- if (!cast<CXXRecordDecl>(RT->getDecl())->isPolymorphic())
- return CT_Cannot;
-
- if (Op->Classify(S.Context).isPRValue())
- return CT_Cannot;
+ // Can throw std::bad_typeid if a nullptr is dereferenced.
+ if (DC->hasNullCheck())
+ return CT_Can;
- return CT_Can;
+ return S.canThrow(DC->getExprOperand());
}
CanThrowResult Sema::canThrow(const Stmt *S) {
@@ -1156,8 +1134,9 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
}
case Expr::CXXTypeidExprClass:
- // - a potentially evaluated typeid expression applied to a glvalue
- // expression whose type is a polymorphic class type
+ // - a potentially evaluated typeid expression applied to a (possibly
+ // parenthesized) built-in unary * operator applied to a pointer to a
+ // polymorphic class type
return canTypeidThrow(*this, cast<CXXTypeidExpr>(S));
// - a potentially evaluated call to a function, member function, function
@@ -1314,7 +1293,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
// Some might be dependent for other reasons.
case Expr::ArraySubscriptExprClass:
case Expr::MatrixSubscriptExprClass:
- case Expr::OMPArraySectionExprClass:
+ case Expr::ArraySectionExprClass:
case Expr::OMPArrayShapingExprClass:
case Expr::OMPIteratorExprClass:
case Expr::BinaryOperatorClass:
@@ -1410,8 +1389,10 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Expr::OpaqueValueExprClass:
case Expr::PredefinedExprClass:
case Expr::SizeOfPackExprClass:
+ case Expr::PackIndexingExprClass:
case Expr::StringLiteralClass:
case Expr::SourceLocExprClass:
+ case Expr::EmbedExprClass:
case Expr::ConceptSpecializationExprClass:
case Expr::RequiresExprClass:
// These expressions can never throw.
@@ -1422,6 +1403,8 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
llvm_unreachable("Invalid class for expression");
// Most statements can throw if any substatement can throw.
+ case Stmt::OpenACCComputeConstructClass:
+ case Stmt::OpenACCLoopConstructClass:
case Stmt::AttributedStmtClass:
case Stmt::BreakStmtClass:
case Stmt::CapturedStmtClass:
@@ -1483,6 +1466,8 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPSimdDirectiveClass:
case Stmt::OMPTileDirectiveClass:
case Stmt::OMPUnrollDirectiveClass:
+ case Stmt::OMPReverseDirectiveClass:
+ case Stmt::OMPInterchangeDirectiveClass:
case Stmt::OMPSingleDirectiveClass:
case Stmt::OMPTargetDataDirectiveClass:
case Stmt::OMPTargetDirectiveClass:
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
index 4cce0abc2315..687b1be94592 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CheckExprLifetime.h"
#include "TreeTransform.h"
#include "UsedDeclVisitor.h"
#include "clang/AST/ASTConsumer.h"
@@ -49,10 +50,15 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaFixItUtils.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPseudoObject.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ConvertUTF.h"
@@ -63,8 +69,6 @@
using namespace clang;
using namespace sema;
-/// Determine whether the use of this declaration is valid, without
-/// emitting diagnostics.
bool Sema::CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid) {
// See if this is an auto-typed variable whose initializer we are parsing.
if (ParsingInitForAutoVars.count(D))
@@ -106,14 +110,13 @@ static void DiagnoseUnusedOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc) {
// should diagnose them.
if (A->getSemanticSpelling() != UnusedAttr::CXX11_maybe_unused &&
A->getSemanticSpelling() != UnusedAttr::C23_maybe_unused) {
- const Decl *DC = cast_or_null<Decl>(S.getCurObjCLexicalContext());
+ const Decl *DC = cast_or_null<Decl>(S.ObjC().getCurObjCLexicalContext());
if (DC && !DC->hasAttr<UnusedAttr>())
S.Diag(Loc, diag::warn_used_but_marked_unused) << D;
}
}
}
-/// Emit a note explaining that this function is deleted.
void Sema::NoteDeletedFunction(FunctionDecl *Decl) {
assert(Decl && Decl->isDeleted());
@@ -209,18 +212,6 @@ void Sema::MaybeSuggestAddingStaticToDecl(const FunctionDecl *Cur) {
}
}
-/// Determine whether the use of this declaration is valid, and
-/// emit any corresponding diagnostics.
-///
-/// This routine diagnoses various problems with referencing
-/// declarations that can occur when using a declaration. For example,
-/// it might warn if a deprecated or unavailable declaration is being
-/// used, or produce an error (and return true) if a C++0x deleted
-/// function is being used.
-///
-/// \returns true if there was an error (this declaration cannot be
-/// referenced), false otherwise.
-///
bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
@@ -271,8 +262,11 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
Diag(Loc, diag::err_deleted_inherited_ctor_use)
<< Ctor->getParent()
<< Ctor->getInheritedConstructor().getConstructor()->getParent();
- else
- Diag(Loc, diag::err_deleted_function_use);
+ else {
+ StringLiteral *Msg = FD->getDeletedMessage();
+ Diag(Loc, diag::err_deleted_function_use)
+ << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef());
+ }
NoteDeletedFunction(FD);
return true;
}
@@ -307,7 +301,7 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
DeduceReturnType(FD, Loc))
return true;
- if (getLangOpts().CUDA && !CheckCUDACall(Loc, FD))
+ if (getLangOpts().CUDA && !CUDA().CheckCall(Loc, FD))
return true;
}
@@ -355,9 +349,9 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
// at the same location.
// [OpenMP 5.2] Also allow iterator declared variables.
if (LangOpts.OpenMP && isa<VarDecl>(D) &&
- !isOpenMPDeclareMapperVarDeclAllowed(cast<VarDecl>(D))) {
+ !OpenMP().isOpenMPDeclareMapperVarDeclAllowed(cast<VarDecl>(D))) {
Diag(Loc, diag::err_omp_declare_mapper_wrong_var)
- << getOpenMPDeclareMapperVarName();
+ << OpenMP().getOpenMPDeclareMapperVarName();
Diag(D->getLocation(), diag::note_entity_declared_at) << D;
return true;
}
@@ -410,10 +404,6 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
return false;
}
-/// DiagnoseSentinelCalls - This routine checks whether a call or
-/// message-send is to a declaration with the sentinel attribute, and
-/// if so, it checks that the requirements of the sentinel are
-/// satisfied.
void Sema::DiagnoseSentinelCalls(const NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args) {
const SentinelAttr *Attr = D->getAttr<SentinelAttr>();
@@ -658,17 +648,19 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
QualType T = E->getType();
assert(!T.isNull() && "r-value conversion on typeless expression?");
- // lvalue-to-rvalue conversion cannot be applied to function or array types.
- if (T->isFunctionType() || T->isArrayType())
+ // lvalue-to-rvalue conversion cannot be applied to types that decay to
+ // pointers (i.e. function or array types).
+ if (T->canDecayToPointerType())
return E;
// We don't want to throw lvalue-to-rvalue casts on top of
// expressions of certain types in C++.
- if (getLangOpts().CPlusPlus &&
- (E->getType() == Context.OverloadTy ||
- T->isDependentType() ||
- T->isRecordType()))
- return E;
+ if (getLangOpts().CPlusPlus) {
+ if (T == Context.OverloadTy || T->isRecordType() ||
+ (T->isDependentType() && !T->isAnyPointerType() &&
+ !T->isMemberPointerType()))
+ return E;
+ }
// The C standard is actually really unclear on this point, and
// DR106 tells us what the result should be but not why. It's
@@ -762,8 +754,6 @@ ExprResult Sema::DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose) {
return Res;
}
-/// CallExprUnaryConversions - a special case of an unary conversion
-/// performed on a function designator of a call expression.
ExprResult Sema::CallExprUnaryConversions(Expr *E) {
QualType Ty = E->getType();
ExprResult Res = E;
@@ -931,9 +921,6 @@ ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
return E;
}
-/// Determine the degree of POD-ness for an expression.
-/// Incomplete types are considered POD, since this check can be performed
-/// when we're in an unevaluated context.
Sema::VarArgKind Sema::isValidVarArgType(const QualType &Ty) {
if (Ty->isIncompleteType()) {
// C++11 [expr.call]p7:
@@ -1034,8 +1021,6 @@ void Sema::checkVariadicArgument(const Expr *E, VariadicCallType CT) {
}
}
-/// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
-/// will create a trap if the resulting type is not a POD type.
ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl) {
if (const BuiltinType *PlaceholderTy = E->getType()->getAsPlaceholderType()) {
@@ -1043,9 +1028,9 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
if (PlaceholderTy->getKind() == BuiltinType::ARCUnbridgedCast &&
(CT == VariadicMethod ||
(FDecl && FDecl->hasAttr<CFAuditedTransferAttr>()))) {
- E = stripARCUnbridgedCast(E);
+ E = ObjC().stripARCUnbridgedCast(E);
- // Otherwise, do normal placeholder checking.
+ // Otherwise, do normal placeholder checking.
} else {
ExprResult ExprRes = CheckPlaceholderExpr(E);
if (ExprRes.isInvalid())
@@ -1099,12 +1084,13 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
return E;
}
-/// Converts an integer to complex float type. Helper function of
+/// Convert complex integers to complex floats and real integers to
+/// real floats as required for complex arithmetic. Helper function of
/// UsualArithmeticConversions()
///
/// \return false if the integer expression is an integer type and is
-/// successfully converted to the complex type.
-static bool handleIntegerToComplexFloatConversion(Sema &S, ExprResult &IntExpr,
+/// successfully converted to the (complex) float type.
+static bool handleComplexIntegerToFloatConversion(Sema &S, ExprResult &IntExpr,
ExprResult &ComplexExpr,
QualType IntTy,
QualType ComplexTy,
@@ -1114,8 +1100,6 @@ static bool handleIntegerToComplexFloatConversion(Sema &S, ExprResult &IntExpr,
if (IntTy->isIntegerType()) {
QualType fpTy = ComplexTy->castAs<ComplexType>()->getElementType();
IntExpr = S.ImpCastExprToType(IntExpr.get(), fpTy, CK_IntegralToFloating);
- IntExpr = S.ImpCastExprToType(IntExpr.get(), ComplexTy,
- CK_FloatingRealToComplex);
} else {
assert(IntTy->isComplexIntegerType());
IntExpr = S.ImpCastExprToType(IntExpr.get(), ComplexTy,
@@ -1160,11 +1144,11 @@ static QualType handleComplexFloatConversion(Sema &S, ExprResult &Shorter,
static QualType handleComplexConversion(Sema &S, ExprResult &LHS,
ExprResult &RHS, QualType LHSType,
QualType RHSType, bool IsCompAssign) {
- // if we have an integer operand, the result is the complex type.
- if (!handleIntegerToComplexFloatConversion(S, RHS, LHS, RHSType, LHSType,
+ // Handle (complex) integer types.
+ if (!handleComplexIntegerToFloatConversion(S, RHS, LHS, RHSType, LHSType,
/*SkipCast=*/false))
return LHSType;
- if (!handleIntegerToComplexFloatConversion(S, LHS, RHS, LHSType, RHSType,
+ if (!handleComplexIntegerToFloatConversion(S, LHS, RHS, LHSType, RHSType,
/*SkipCast=*/IsCompAssign))
return RHSType;
@@ -1497,7 +1481,8 @@ static void checkEnumArithmeticConversions(Sema &S, Expr *LHS, Expr *RHS,
//
// Warn on this in all language modes. Produce a deprecation warning in C++20.
// Eventually we will presumably reject these cases (in C++23 onwards?).
- QualType L = LHS->getType(), R = RHS->getType();
+ QualType L = LHS->getEnumCoercedType(S.Context),
+ R = RHS->getEnumCoercedType(S.Context);
bool LEnum = L->isUnscopedEnumerationType(),
REnum = R->isUnscopedEnumerationType();
bool IsCompAssign = ACK == Sema::ACK_CompAssign;
@@ -2041,12 +2026,6 @@ Sema::ExpandFunctionLocalPredefinedMacros(ArrayRef<Token> Toks) {
return ExpandedToks;
}
-/// ActOnStringLiteral - The specified tokens were lexed as pasted string
-/// fragments (e.g. "foo" "bar" L"baz"). The result string has to handle string
-/// concatenation ([C99 5.1.1.2, translation phase #6]), so it may come from
-/// multiple tokens. However, the common case is that StringToks points to one
-/// string.
-///
ExprResult
Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
assert(!StringToks.empty() && "Must have at least one string!");
@@ -2072,6 +2051,8 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
} else if (Literal.isUTF8()) {
if (getLangOpts().Char8)
CharTy = Context.Char8Ty;
+ else if (getLangOpts().C23)
+ CharTy = Context.UnsignedCharTy;
Kind = StringLiteralKind::UTF8;
} else if (Literal.isUTF16()) {
CharTy = Context.Char16Ty;
@@ -2083,17 +2064,23 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
CharTy = Context.UnsignedCharTy;
}
- // Warn on initializing an array of char from a u8 string literal; this
- // becomes ill-formed in C++2a.
- if (getLangOpts().CPlusPlus && !getLangOpts().CPlusPlus20 &&
- !getLangOpts().Char8 && Kind == StringLiteralKind::UTF8) {
- Diag(StringTokLocs.front(), diag::warn_cxx20_compat_utf8_string);
+ // Warn on u8 string literals before C++20 and C23, whose type
+ // was an array of char before but becomes an array of char8_t.
+ // In C++20, it cannot be used where a pointer to char is expected.
+ // In C23, it might have an unexpected value if char was signed.
+ if (Kind == StringLiteralKind::UTF8 &&
+ (getLangOpts().CPlusPlus
+ ? !getLangOpts().CPlusPlus20 && !getLangOpts().Char8
+ : !getLangOpts().C23)) {
+ Diag(StringTokLocs.front(), getLangOpts().CPlusPlus
+ ? diag::warn_cxx20_compat_utf8_string
+ : diag::warn_c23_compat_utf8_string);
// Create removals for all 'u8' prefixes in the string literal(s). This
- // ensures C++2a compatibility (but may change the program behavior when
+ // ensures C++20/C23 compatibility (but may change the program behavior when
// built by non-Clang compilers for which the execution character set is
// not always UTF-8).
- auto RemovalDiag = PDiag(diag::note_cxx20_compat_utf8_string_remove_u8);
+ auto RemovalDiag = PDiag(diag::note_cxx20_c23_compat_utf8_string_remove_u8);
SourceLocation RemovalDiagLoc;
for (const Token &Tok : StringToks) {
if (Tok.getKind() == tok::utf8_string_literal) {
@@ -2261,7 +2248,7 @@ NonOdrUseReason Sema::getNonOdrUseReasonInCurrentContext(ValueDecl *D) {
// be loaded from the captured.
if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->getType()->isReferenceType() &&
- !(getLangOpts().OpenMP && isOpenMPCapturedDecl(D)) &&
+ !(getLangOpts().OpenMP && OpenMP().isOpenMPCapturedDecl(D)) &&
!isCapturingReferenceToHostVarInCUDADeviceLambda(*this, VD) &&
VD->isUsableInConstantExpressions(Context))
return NOUR_Constant;
@@ -2272,8 +2259,6 @@ NonOdrUseReason Sema::getNonOdrUseReasonInCurrentContext(ValueDecl *D) {
return NOUR_None;
}
-/// BuildDeclRefExpr - Build an expression that references a
-/// declaration that does not require a closure capture.
DeclRefExpr *
Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
@@ -2331,15 +2316,6 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
return E;
}
-/// Decomposes the given name into a DeclarationNameInfo, its location, and
-/// possibly a list of template arguments.
-///
-/// If this produces template arguments, it is permitted to call
-/// DecomposeTemplateName.
-///
-/// This actually loses a lot of source location information for
-/// non-standard name kinds; we should consider preserving that in
-/// some way.
void
Sema::DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
@@ -2396,14 +2372,6 @@ static void emitEmptyLookupTypoDiagnostic(
SemaRef.PDiag(NoteID));
}
-/// Diagnose a lookup that found results in an enclosing class during error
-/// recovery. This usually indicates that the results were found in a dependent
-/// base class that could not be searched as part of a template definition.
-/// Always issues a diagnostic (though this may be only a warning in MS
-/// compatibility mode).
-///
-/// Return \c true if the error is unrecoverable, or \c false if the caller
-/// should attempt to recover using these lookup results.
bool Sema::DiagnoseDependentMemberLookup(const LookupResult &R) {
// During a default argument instantiation the CurContext points
// to a CXXMethodDecl; but we can't apply a this-> fixit inside a
@@ -2463,9 +2431,6 @@ bool Sema::DiagnoseDependentMemberLookup(const LookupResult &R) {
return false;
}
-/// Diagnose an empty lookup.
-///
-/// \return false if new lookup candidates were found
bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs,
@@ -2653,7 +2618,7 @@ recoverFromMSUnqualifiedLookup(Sema &S, ASTContext &Context,
RD = ThisType->getPointeeType()->getAsCXXRecordDecl();
else if (auto *MD = dyn_cast<CXXMethodDecl>(S.CurContext))
RD = MD->getParent();
- if (!RD || !RD->hasAnyDependentBases())
+ if (!RD || !RD->hasDefinition() || !RD->hasAnyDependentBases())
return nullptr;
// Diagnose this as unqualified lookup into a dependent base class. If 'this'
@@ -2709,34 +2674,23 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
return ExprError();
}
- // C++ [temp.dep.expr]p3:
- // An id-expression is type-dependent if it contains:
- // -- an identifier that was declared with a dependent type,
- // (note: handled after lookup)
- // -- a template-id that is dependent,
- // (note: handled in BuildTemplateIdExpr)
- // -- a conversion-function-id that specifies a dependent type,
- // -- a nested-name-specifier that contains a class-name that
- // names a dependent type.
- // Determine whether this is a member of an unknown specialization;
- // we need to handle these differently.
- bool DependentID = false;
- if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName &&
- Name.getCXXNameType()->isDependentType()) {
- DependentID = true;
- } else if (SS.isSet()) {
- if (DeclContext *DC = computeDeclContext(SS, false)) {
- if (RequireCompleteDeclContext(SS, DC))
- return ExprError();
- } else {
- DependentID = true;
+ // This specially handles arguments of attributes appertains to a type of C
+ // struct field such that the name lookup within a struct finds the member
+ // name, which is not the case for other contexts in C.
+ if (isAttrContext() && !getLangOpts().CPlusPlus && S->isClassScope()) {
+ // See if this is reference to a field of struct.
+ LookupResult R(*this, NameInfo, LookupMemberName);
+ // LookupName handles a name lookup from within anonymous struct.
+ if (LookupName(R, S)) {
+ if (auto *VD = dyn_cast<ValueDecl>(R.getFoundDecl())) {
+ QualType type = VD->getType().getNonReferenceType();
+ // This will eventually be translated into MemberExpr upon
+ // the use of instantiated struct fields.
+ return BuildDeclRefExpr(VD, type, VK_LValue, NameLoc);
+ }
}
}
- if (DependentID)
- return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
- IsAddressOfOperand, TemplateArgs);
-
// Perform the required lookup.
LookupResult R(*this, NameInfo,
(Id.getKind() == UnqualifiedIdKind::IK_ImplicitSelfParam)
@@ -2748,31 +2702,30 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// lookup to determine that it was a template name in the first place. If
// this becomes a performance hit, we can work harder to preserve those
// results until we get here but it's likely not worth it.
- bool MemberOfUnknownSpecialization;
AssumedTemplateKind AssumedTemplate;
- if (LookupTemplateName(R, S, SS, QualType(), /*EnteringContext=*/false,
- MemberOfUnknownSpecialization, TemplateKWLoc,
+ if (LookupTemplateName(R, S, SS, /*ObjectType=*/QualType(),
+ /*EnteringContext=*/false, TemplateKWLoc,
&AssumedTemplate))
return ExprError();
- if (MemberOfUnknownSpecialization ||
- (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation))
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
IsAddressOfOperand, TemplateArgs);
} else {
bool IvarLookupFollowUp = II && !SS.isSet() && getCurMethodDecl();
- LookupParsedName(R, S, &SS, !IvarLookupFollowUp);
+ LookupParsedName(R, S, &SS, /*ObjectType=*/QualType(),
+ /*AllowBuiltinCreation=*/!IvarLookupFollowUp);
// If the result might be in a dependent base class, this is a dependent
// id-expression.
- if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
IsAddressOfOperand, TemplateArgs);
// If this reference is in an Objective-C method, then we need to do
// some special Objective-C lookup, too.
if (IvarLookupFollowUp) {
- ExprResult E(LookupInObjCMethod(R, S, II, true));
+ ExprResult E(ObjC().LookupInObjCMethod(R, S, II, true));
if (E.isInvalid())
return ExprError();
@@ -2857,7 +2810,7 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// reference the ivar.
if (ObjCIvarDecl *Ivar = R.getAsSingle<ObjCIvarDecl>()) {
R.clear();
- ExprResult E(LookupInObjCMethod(R, S, Ivar->getIdentifier()));
+ ExprResult E(ObjC().LookupInObjCMethod(R, S, Ivar->getIdentifier()));
// In a hopelessly buggy code, Objective-C instance variable
// lookup fails and no expression will be built to reference it.
if (!E.isInvalid() && !E.get())
@@ -2893,25 +2846,9 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// to get this right here so that we don't end up making a
// spuriously dependent expression if we're inside a dependent
// instance method.
- if (!R.empty() && (*R.begin())->isCXXClassMember()) {
- bool MightBeImplicitMember;
- if (!IsAddressOfOperand)
- MightBeImplicitMember = true;
- else if (!SS.isEmpty())
- MightBeImplicitMember = false;
- else if (R.isOverloadedResult())
- MightBeImplicitMember = false;
- else if (R.isUnresolvableResult())
- MightBeImplicitMember = true;
- else
- MightBeImplicitMember = isa<FieldDecl>(R.getFoundDecl()) ||
- isa<IndirectFieldDecl>(R.getFoundDecl()) ||
- isa<MSPropertyDecl>(R.getFoundDecl());
-
- if (MightBeImplicitMember)
- return BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc,
- R, TemplateArgs, S);
- }
+ if (isPotentialImplicitMemberAccess(SS, R, IsAddressOfOperand))
+ return BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs,
+ S);
if (TemplateArgs || TemplateKWLoc.isValid()) {
@@ -2930,32 +2867,16 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
return BuildDeclarationNameExpr(SS, R, ADL);
}
-/// BuildQualifiedDeclarationNameExpr - Build a C++ qualified
-/// declaration name, generally during template instantiation.
-/// There's a large number of things which don't need to be done along
-/// this path.
ExprResult Sema::BuildQualifiedDeclarationNameExpr(
CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
- bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI) {
- if (NameInfo.getName().isDependentName())
- return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
- NameInfo, /*TemplateArgs=*/nullptr);
-
- DeclContext *DC = computeDeclContext(SS, false);
- if (!DC)
- return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
- NameInfo, /*TemplateArgs=*/nullptr);
-
- if (RequireCompleteDeclContext(SS, DC))
- return ExprError();
-
+ bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI) {
LookupResult R(*this, NameInfo, LookupOrdinaryName);
- LookupQualifiedName(R, DC);
+ LookupParsedName(R, /*S=*/nullptr, &SS, /*ObjectType=*/QualType());
if (R.isAmbiguous())
return ExprError();
- if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
NameInfo, /*TemplateArgs=*/nullptr);
@@ -2964,6 +2885,7 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
// diagnostic during template instantiation is likely bogus, e.g. if a class
// is invalid because it's derived from an invalid base class, then missing
// members were likely supposed to be inherited.
+ DeclContext *DC = computeDeclContext(SS);
if (const auto *CD = dyn_cast<CXXRecordDecl>(DC))
if (CD->isInvalidDecl())
return ExprError();
@@ -3007,198 +2929,16 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
return ExprEmpty();
}
- // Defend against this resolving to an implicit member access. We usually
- // won't get here if this might be a legitimate a class member (we end up in
- // BuildMemberReferenceExpr instead), but this can be valid if we're forming
- // a pointer-to-member or in an unevaluated context in C++11.
- if (!R.empty() && (*R.begin())->isCXXClassMember() && !IsAddressOfOperand)
+ // If necessary, build an implicit class member access.
+ if (isPotentialImplicitMemberAccess(SS, R, IsAddressOfOperand))
return BuildPossibleImplicitMemberExpr(SS,
/*TemplateKWLoc=*/SourceLocation(),
- R, /*TemplateArgs=*/nullptr, S);
-
- return BuildDeclarationNameExpr(SS, R, /* ADL */ false);
-}
-
-/// The parser has read a name in, and Sema has detected that we're currently
-/// inside an ObjC method. Perform some additional checks and determine if we
-/// should form a reference to an ivar.
-///
-/// Ideally, most of this would be done by lookup, but there's
-/// actually quite a lot of extra work involved.
-DeclResult Sema::LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
- IdentifierInfo *II) {
- SourceLocation Loc = Lookup.getNameLoc();
- ObjCMethodDecl *CurMethod = getCurMethodDecl();
-
- // Check for error condition which is already reported.
- if (!CurMethod)
- return DeclResult(true);
-
- // There are two cases to handle here. 1) scoped lookup could have failed,
- // in which case we should look for an ivar. 2) scoped lookup could have
- // found a decl, but that decl is outside the current instance method (i.e.
- // a global variable). In these two cases, we do a lookup for an ivar with
- // this name, if the lookup sucedes, we replace it our current decl.
-
- // If we're in a class method, we don't normally want to look for
- // ivars. But if we don't find anything else, and there's an
- // ivar, that's an error.
- bool IsClassMethod = CurMethod->isClassMethod();
-
- bool LookForIvars;
- if (Lookup.empty())
- LookForIvars = true;
- else if (IsClassMethod)
- LookForIvars = false;
- else
- LookForIvars = (Lookup.isSingleResult() &&
- Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod());
- ObjCInterfaceDecl *IFace = nullptr;
- if (LookForIvars) {
- IFace = CurMethod->getClassInterface();
- ObjCInterfaceDecl *ClassDeclared;
- ObjCIvarDecl *IV = nullptr;
- if (IFace && (IV = IFace->lookupInstanceVariable(II, ClassDeclared))) {
- // Diagnose using an ivar in a class method.
- if (IsClassMethod) {
- Diag(Loc, diag::err_ivar_use_in_class_method) << IV->getDeclName();
- return DeclResult(true);
- }
-
- // Diagnose the use of an ivar outside of the declaring class.
- if (IV->getAccessControl() == ObjCIvarDecl::Private &&
- !declaresSameEntity(ClassDeclared, IFace) &&
- !getLangOpts().DebuggerSupport)
- Diag(Loc, diag::err_private_ivar_access) << IV->getDeclName();
-
- // Success.
- return IV;
- }
- } else if (CurMethod->isInstanceMethod()) {
- // We should warn if a local variable hides an ivar.
- if (ObjCInterfaceDecl *IFace = CurMethod->getClassInterface()) {
- ObjCInterfaceDecl *ClassDeclared;
- if (ObjCIvarDecl *IV = IFace->lookupInstanceVariable(II, ClassDeclared)) {
- if (IV->getAccessControl() != ObjCIvarDecl::Private ||
- declaresSameEntity(IFace, ClassDeclared))
- Diag(Loc, diag::warn_ivar_use_hidden) << IV->getDeclName();
- }
- }
- } else if (Lookup.isSingleResult() &&
- Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod()) {
- // If accessing a stand-alone ivar in a class method, this is an error.
- if (const ObjCIvarDecl *IV =
- dyn_cast<ObjCIvarDecl>(Lookup.getFoundDecl())) {
- Diag(Loc, diag::err_ivar_use_in_class_method) << IV->getDeclName();
- return DeclResult(true);
- }
- }
-
- // Didn't encounter an error, didn't find an ivar.
- return DeclResult(false);
-}
-
-ExprResult Sema::BuildIvarRefExpr(Scope *S, SourceLocation Loc,
- ObjCIvarDecl *IV) {
- ObjCMethodDecl *CurMethod = getCurMethodDecl();
- assert(CurMethod && CurMethod->isInstanceMethod() &&
- "should not reference ivar from this context");
-
- ObjCInterfaceDecl *IFace = CurMethod->getClassInterface();
- assert(IFace && "should not reference ivar from this context");
-
- // If we're referencing an invalid decl, just return this as a silent
- // error node. The error diagnostic was already emitted on the decl.
- if (IV->isInvalidDecl())
- return ExprError();
-
- // Check if referencing a field with __attribute__((deprecated)).
- if (DiagnoseUseOfDecl(IV, Loc))
- return ExprError();
-
- // FIXME: This should use a new expr for a direct reference, don't
- // turn this into Self->ivar, just return a BareIVarExpr or something.
- IdentifierInfo &II = Context.Idents.get("self");
- UnqualifiedId SelfName;
- SelfName.setImplicitSelfParam(&II);
- CXXScopeSpec SelfScopeSpec;
- SourceLocation TemplateKWLoc;
- ExprResult SelfExpr =
- ActOnIdExpression(S, SelfScopeSpec, TemplateKWLoc, SelfName,
- /*HasTrailingLParen=*/false,
- /*IsAddressOfOperand=*/false);
- if (SelfExpr.isInvalid())
- return ExprError();
-
- SelfExpr = DefaultLvalueConversion(SelfExpr.get());
- if (SelfExpr.isInvalid())
- return ExprError();
+ R, /*TemplateArgs=*/nullptr,
+ /*S=*/nullptr);
- MarkAnyDeclReferenced(Loc, IV, true);
-
- ObjCMethodFamily MF = CurMethod->getMethodFamily();
- if (MF != OMF_init && MF != OMF_dealloc && MF != OMF_finalize &&
- !IvarBacksCurrentMethodAccessor(IFace, CurMethod, IV))
- Diag(Loc, diag::warn_direct_ivar_access) << IV->getDeclName();
-
- ObjCIvarRefExpr *Result = new (Context)
- ObjCIvarRefExpr(IV, IV->getUsageType(SelfExpr.get()->getType()), Loc,
- IV->getLocation(), SelfExpr.get(), true, true);
-
- if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
- if (!isUnevaluatedContext() &&
- !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
- getCurFunction()->recordUseOfWeak(Result);
- }
- if (getLangOpts().ObjCAutoRefCount && !isUnevaluatedContext())
- if (const BlockDecl *BD = CurContext->getInnermostBlockDecl())
- ImplicitlyRetainedSelfLocs.push_back({Loc, BD});
-
- return Result;
-}
-
-/// The parser has read a name in, and Sema has detected that we're currently
-/// inside an ObjC method. Perform some additional checks and determine if we
-/// should form a reference to an ivar. If so, build an expression referencing
-/// that ivar.
-ExprResult
-Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
- IdentifierInfo *II, bool AllowBuiltinCreation) {
- // FIXME: Integrate this lookup step into LookupParsedName.
- DeclResult Ivar = LookupIvarInObjCMethod(Lookup, S, II);
- if (Ivar.isInvalid())
- return ExprError();
- if (Ivar.isUsable())
- return BuildIvarRefExpr(S, Lookup.getNameLoc(),
- cast<ObjCIvarDecl>(Ivar.get()));
-
- if (Lookup.empty() && II && AllowBuiltinCreation)
- LookupBuiltin(Lookup);
-
- // Sentinel value saying that we didn't do anything special.
- return ExprResult(false);
+ return BuildDeclarationNameExpr(SS, R, /*ADL=*/false);
}
-/// Cast a base object to a member's actual type.
-///
-/// There are two relevant checks:
-///
-/// C++ [class.access.base]p7:
-///
-/// If a class member access operator [...] is used to access a non-static
-/// data member or non-static member function, the reference is ill-formed if
-/// the left operand [...] cannot be implicitly converted to a pointer to the
-/// naming class of the right operand.
-///
-/// C++ [expr.ref]p7:
-///
-/// If E2 is a non-static data member or a non-static member function, the
-/// program is ill-formed if the class of which E2 is directly a member is an
-/// ambiguous base (11.8) of the naming class (11.9.3) of E2.
-///
-/// Note that the latter check does not consider access; the access of the
-/// "real" base class is checked as appropriate when checking the access of the
-/// member name.
ExprResult
Sema::PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
@@ -3340,7 +3080,7 @@ bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
return false;
// Never if a scope specifier was provided.
- if (SS.isSet())
+ if (SS.isNotEmpty())
return false;
// Only in C++ or ObjC++.
@@ -3445,12 +3185,10 @@ ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
// we've picked a target.
R.suppressDiagnostics();
- UnresolvedLookupExpr *ULE
- = UnresolvedLookupExpr::Create(Context, R.getNamingClass(),
- SS.getWithLocInContext(Context),
- R.getLookupNameInfo(),
- NeedsADL, R.isOverloadedResult(),
- R.begin(), R.end());
+ UnresolvedLookupExpr *ULE = UnresolvedLookupExpr::Create(
+ Context, R.getNamingClass(), SS.getWithLocInContext(Context),
+ R.getLookupNameInfo(), NeedsADL, R.begin(), R.end(),
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false);
return ULE;
}
@@ -3459,7 +3197,6 @@ static void diagnoseUncapturableValueReferenceOrBinding(Sema &S,
SourceLocation loc,
ValueDecl *var);
-/// Complete semantic analysis for a reference to the given declaration.
ExprResult Sema::BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD, const TemplateArgumentListInfo *TemplateArgs,
@@ -3476,10 +3213,10 @@ ExprResult Sema::BuildDeclarationNameExpr(
return CreateRecoveryExpr(NameInfo.getBeginLoc(), NameInfo.getEndLoc(), {});
}
- if (TemplateDecl *Template = dyn_cast<TemplateDecl>(D)) {
+ if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D)) {
// Specifically diagnose references to class templates that are missing
// a template argument list.
- diagnoseMissingTemplateArguments(TemplateName(Template), Loc);
+ diagnoseMissingTemplateArguments(SS, /*TemplateKeyword=*/false, TD, Loc);
return ExprError();
}
@@ -3549,7 +3286,8 @@ ExprResult Sema::BuildDeclarationNameExpr(
case Decl::Field:
case Decl::IndirectField:
case Decl::ObjCIvar:
- assert(getLangOpts().CPlusPlus && "building reference to field in C?");
+ assert((getLangOpts().CPlusPlus || isAttrContext()) &&
+ "building reference to field in C?");
// These can't have reference type in well-formed programs, but
// for internal consistency we do this anyway.
@@ -3740,7 +3478,10 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
else {
// Pre-defined identifiers are of type char[x], where x is the length of
// the string.
- auto Str = PredefinedExpr::ComputeName(IK, currentDecl);
+ bool ForceElaboratedPrinting =
+ IK == PredefinedIdentKind::Function && getLangOpts().MSVCCompat;
+ auto Str =
+ PredefinedExpr::ComputeName(IK, currentDecl, ForceElaboratedPrinting);
unsigned Length = Str.length();
llvm::APInt LengthI(32, Length + 1);
@@ -3770,28 +3511,6 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
SL);
}
-ExprResult Sema::BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
- SourceLocation LParen,
- SourceLocation RParen,
- TypeSourceInfo *TSI) {
- return SYCLUniqueStableNameExpr::Create(Context, OpLoc, LParen, RParen, TSI);
-}
-
-ExprResult Sema::ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
- SourceLocation LParen,
- SourceLocation RParen,
- ParsedType ParsedTy) {
- TypeSourceInfo *TSI = nullptr;
- QualType Ty = GetTypeFromParser(ParsedTy, &TSI);
-
- if (Ty.isNull())
- return ExprError();
- if (!TSI)
- TSI = Context.getTrivialTypeSourceInfo(Ty, LParen);
-
- return BuildSYCLUniqueStableNameExpr(OpLoc, LParen, RParen, TSI);
-}
-
ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
return BuildPredefinedExpr(Loc, getPredefinedExprKind(Kind));
}
@@ -3869,7 +3588,10 @@ static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal,
using llvm::APFloat;
APFloat Val(Format);
- APFloat::opStatus result = Literal.GetFloatValue(Val);
+ llvm::RoundingMode RM = S.CurFPFeatures.getRoundingMode();
+ if (RM == llvm::RoundingMode::Dynamic)
+ RM = llvm::RoundingMode::NearestTiesToEven;
+ APFloat::opStatus result = Literal.GetFloatValue(Val, RM);
// Overflow is always an error, but underflow is only an error if
// we underflowed to zero (APFloat reports denormals as underflow).
@@ -3885,16 +3607,14 @@ static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal,
APFloat::getSmallest(Format).toString(buffer);
}
- S.Diag(Loc, diagnostic)
- << Ty
- << StringRef(buffer.data(), buffer.size());
+ S.Diag(Loc, diagnostic) << Ty << buffer.str();
}
bool isExact = (result == APFloat::opOK);
return FloatingLiteral::Create(S.Context, Val, isExact, Ty, Loc);
}
-bool Sema::CheckLoopHintExpr(Expr *E, SourceLocation Loc) {
+bool Sema::CheckLoopHintExpr(Expr *E, SourceLocation Loc, bool AllowZero) {
assert(E && "Invalid expression");
if (E->isValueDependent())
@@ -3912,9 +3632,15 @@ bool Sema::CheckLoopHintExpr(Expr *E, SourceLocation Loc) {
if (R.isInvalid())
return true;
- bool ValueIsPositive = ValueAPS.isStrictlyPositive();
+ // GCC allows the value of unroll count to be 0.
+ // https://gcc.gnu.org/onlinedocs/gcc/Loop-Specific-Pragmas.html says
+ // "The values of 0 and 1 block any unrolling of the loop."
+ // The values doesn't have to be strictly positive in '#pragma GCC unroll' and
+ // '#pragma unroll' cases.
+ bool ValueIsPositive =
+ AllowZero ? ValueAPS.isNonNegative() : ValueAPS.isStrictlyPositive();
if (!ValueIsPositive || ValueAPS.getActiveBits() > 31) {
- Diag(E->getExprLoc(), diag::err_pragma_loop_invalid_argument_value)
+ Diag(E->getExprLoc(), diag::err_requires_positive_value)
<< toString(ValueAPS, 10) << ValueIsPositive;
return true;
}
@@ -3925,9 +3651,9 @@ bool Sema::CheckLoopHintExpr(Expr *E, SourceLocation Loc) {
ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
// Fast path for a single digit (which is quite common). A single digit
// cannot have a trigraph, escaped newline, radix prefix, or suffix.
- if (Tok.getLength() == 1) {
+ if (Tok.getLength() == 1 || Tok.getKind() == tok::binary_data) {
const char Val = PP.getSpellingOfSingleCharacterNumericConstant(Tok);
- return ActOnIntegerConstant(Tok.getLocation(), Val-'0');
+ return ActOnIntegerConstant(Tok.getLocation(), Val);
}
SmallString<128> SpellingBuffer;
@@ -4093,7 +3819,8 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
} else if (Literal.isFloatingLiteral()) {
QualType Ty;
if (Literal.isHalf){
- if (getOpenCLOptions().isAvailableOption("cl_khr_fp16", getLangOpts()))
+ if (getLangOpts().HLSL ||
+ getOpenCLOptions().isAvailableOption("cl_khr_fp16", getLangOpts()))
Ty = Context.HalfTy;
else {
Diag(Tok.getLocation(), diag::err_half_const_requires_fp16);
@@ -4102,11 +3829,13 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
} else if (Literal.isFloat)
Ty = Context.FloatTy;
else if (Literal.isLong)
- Ty = Context.LongDoubleTy;
+ Ty = !getLangOpts().HLSL ? Context.LongDoubleTy : Context.DoubleTy;
else if (Literal.isFloat16)
Ty = Context.Float16Ty;
else if (Literal.isFloat128)
Ty = Context.Float128Ty;
+ else if (getLangOpts().HLSL)
+ Ty = Context.FloatTy;
else
Ty = Context.DoubleTy;
@@ -4141,11 +3870,13 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
// 'wb/uwb' literals are a C23 feature. We support _BitInt as a type in C++,
// but we do not currently support the suffix in C++ mode because it's not
// entirely clear whether WG21 will prefer this suffix to return a library
- // type such as std::bit_int instead of returning a _BitInt.
- if (Literal.isBitInt && !getLangOpts().CPlusPlus)
- PP.Diag(Tok.getLocation(), getLangOpts().C23
- ? diag::warn_c23_compat_bitint_suffix
- : diag::ext_c23_bitint_suffix);
+ // type such as std::bit_int instead of returning a _BitInt. '__wb/__uwb'
+ // literals are a C++ extension.
+ if (Literal.isBitInt)
+ PP.Diag(Tok.getLocation(),
+ getLangOpts().CPlusPlus ? diag::ext_cxx_bitint_suffix
+ : getLangOpts().C23 ? diag::warn_c23_compat_bitint_suffix
+ : diag::ext_c23_bitint_suffix);
// Get the value in the widest-possible width. What is "widest" depends on
// whether the literal is a bit-precise integer or not. For a bit-precise
@@ -4175,6 +3906,15 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
// be an unsigned int.
bool AllowUnsigned = Literal.isUnsigned || Literal.getRadix() != 10;
+ // HLSL doesn't really have `long` or `long long`. We support the `ll`
+ // suffix for portability of code with C++, but both `l` and `ll` are
+ // 64-bit integer types, and we want the type of `1l` and `1ll` to be the
+ // same.
+ if (getLangOpts().HLSL && !Literal.isLong && Literal.isLongLong) {
+ Literal.isLong = true;
+ Literal.isLongLong = false;
+ }
+
// Check from smallest to largest, picking the smallest type we can.
unsigned Width = 0;
@@ -4377,6 +4117,21 @@ static bool CheckVectorElementsTraitOperandType(Sema &S, QualType T,
return false;
}
+static bool checkPtrAuthTypeDiscriminatorOperandType(Sema &S, QualType T,
+ SourceLocation Loc,
+ SourceRange ArgRange) {
+ if (S.checkPointerAuthEnabled(Loc, ArgRange))
+ return true;
+
+ if (!T->isFunctionType() && !T->isFunctionPointerType() &&
+ !T->isFunctionReferenceType() && !T->isMemberFunctionPointerType()) {
+ S.Diag(Loc, diag::err_ptrauth_type_disc_undiscriminated) << T << ArgRange;
+ return true;
+ }
+
+ return false;
+}
+
static bool CheckExtensionTraitOperandType(Sema &S, QualType T,
SourceLocation Loc,
SourceRange ArgRange,
@@ -4441,13 +4196,6 @@ static void warnOnSizeofOnArrayDecay(Sema &S, SourceLocation Loc, QualType T,
<< ICE->getSubExpr()->getType();
}
-/// Check the constraints on expression operands to unary type expression
-/// and type traits.
-///
-/// Completes any types necessary and validates the constraints on the operand
-/// expression. The logic mostly mirrors the type-based overload, but may modify
-/// the expression as it completes the type for that expression through template
-/// instantiation, etc.
bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
UnaryExprOrTypeTrait ExprKind) {
QualType ExprTy = E->getType();
@@ -4663,6 +4411,9 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::Decayed:
T = cast<DecayedType>(Ty)->getPointeeType();
break;
+ case Type::ArrayParameter:
+ T = cast<ArrayParameterType>(Ty)->getElementType();
+ break;
case Type::Pointer:
T = cast<PointerType>(Ty)->getPointeeType();
break;
@@ -4706,6 +4457,7 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::BTFTagAttributed:
case Type::SubstTemplateTypeParm:
case Type::MacroQualified:
+ case Type::CountAttributed:
// Keep walking after single level desugaring.
T = T.getSingleStepDesugaredType(Context);
break;
@@ -4715,6 +4467,9 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::Decltype:
T = cast<DecltypeType>(Ty)->desugar();
break;
+ case Type::PackIndexing:
+ T = cast<PackIndexingType>(Ty)->desugar();
+ break;
case Type::Using:
T = cast<UsingType>(Ty)->desugar();
break;
@@ -4732,21 +4487,6 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
} while (!T.isNull() && T->isVariablyModifiedType());
}
-/// Check the constraints on operands to unary expression and type
-/// traits.
-///
-/// This will complete any types necessary, and validate the various constraints
-/// on those operands.
-///
-/// The UsualUnaryConversions() function is *not* called by this routine.
-/// C99 6.3.2.1p[2-4] all state:
-/// Except when it is the operand of the sizeof operator ...
-///
-/// C++ [expr.sizeof]p4
-/// The lvalue-to-rvalue, array-to-pointer, and function-to-pointer
-/// standard conversions are not applied to the operand of sizeof.
-///
-/// This policy is followed for all of the unary trait expressions.
bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
SourceLocation OpLoc,
SourceRange ExprRange,
@@ -4768,8 +4508,16 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
// When alignof or _Alignof is applied to an array type, the result
// is the alignment of the element type.
if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf ||
- ExprKind == UETT_OpenMPRequiredSimdAlign)
+ ExprKind == UETT_OpenMPRequiredSimdAlign) {
+ // If the trait is 'alignof' in C before C2y, the ability to apply the
+ // trait to an incomplete array is an extension.
+ if (ExprKind == UETT_AlignOf && !getLangOpts().CPlusPlus &&
+ ExprType->isIncompleteArrayType())
+ Diag(OpLoc, getLangOpts().C2y
+ ? diag::warn_c2y_compat_alignof_incomplete_array
+ : diag::ext_c2y_alignof_incomplete_array);
ExprType = Context.getBaseElementType(ExprType);
+ }
if (ExprKind == UETT_VecStep)
return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange);
@@ -4778,6 +4526,10 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
return CheckVectorElementsTraitOperandType(*this, ExprType, OpLoc,
ExprRange);
+ if (ExprKind == UETT_PtrAuthTypeDiscriminator)
+ return checkPtrAuthTypeDiscriminatorOperandType(*this, ExprType, OpLoc,
+ ExprRange);
+
// Explicitly list some types as extensions.
if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange,
ExprKind))
@@ -4833,7 +4585,6 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
return false;
}
-/// Build a sizeof or alignof expression given a type operand.
ExprResult Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
@@ -4859,8 +4610,6 @@ ExprResult Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
ExprKind, TInfo, Context.getSizeType(), OpLoc, R.getEnd());
}
-/// Build a sizeof or alignof expression given an expression
-/// operand.
ExprResult
Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind) {
@@ -4904,9 +4653,6 @@ Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
ExprKind, E, Context.getSizeType(), OpLoc, E->getSourceRange().getEnd());
}
-/// ActOnUnaryExprOrTypeTraitExpr - Handle @c sizeof(type) and @c sizeof @c
-/// expr and the same for @c alignof and @c __alignof
-/// Note that the ArgRange is invalid if isType is false.
ExprResult
Sema::ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind, bool IsType,
@@ -4933,13 +4679,6 @@ bool Sema::CheckAlignasTypeArgument(StringRef KWName, TypeSourceInfo *TInfo,
UETT_AlignOf, KWName);
}
-/// ActOnAlignasTypeArgument - Handle @c alignas(type-id) and @c
-/// _Alignas(type-name) .
-/// [dcl.align] An alignment-specifier of the form
-/// alignas(type-id) has the same effect as alignas(alignof(type-id)).
-///
-/// [N1570 6.7.5] _Alignas(type-name) is equivalent to
-/// _Alignas(_Alignof(type-name)).
bool Sema::ActOnAlignasTypeArgument(StringRef KWName, ParsedType Ty,
SourceLocation OpLoc, SourceRange R) {
TypeSourceInfo *TInfo;
@@ -5064,10 +4803,18 @@ ExprResult Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base,
SourceLocation rbLoc) {
if (base && !base->getType().isNull() &&
- base->hasPlaceholderType(BuiltinType::OMPArraySection))
- return ActOnOMPArraySectionExpr(base, lbLoc, ArgExprs.front(), SourceLocation(),
- SourceLocation(), /*Length*/ nullptr,
- /*Stride=*/nullptr, rbLoc);
+ base->hasPlaceholderType(BuiltinType::ArraySection)) {
+ auto *AS = cast<ArraySectionExpr>(base);
+ if (AS->isOMPArraySection())
+ return OpenMP().ActOnOMPArraySectionExpr(
+ base, lbLoc, ArgExprs.front(), SourceLocation(), SourceLocation(),
+ /*Length*/ nullptr,
+ /*Stride=*/nullptr, rbLoc);
+
+ return OpenACC().ActOnArraySectionExpr(base, lbLoc, ArgExprs.front(),
+ SourceLocation(), /*Length*/ nullptr,
+ rbLoc);
+ }
// Since this might be a postfix expression, get rid of ParenListExprs.
if (isa<ParenListExpr>(base)) {
@@ -5339,558 +5086,6 @@ void Sema::CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E) {
}
}
-ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
- Expr *LowerBound,
- SourceLocation ColonLocFirst,
- SourceLocation ColonLocSecond,
- Expr *Length, Expr *Stride,
- SourceLocation RBLoc) {
- if (Base->hasPlaceholderType() &&
- !Base->hasPlaceholderType(BuiltinType::OMPArraySection)) {
- ExprResult Result = CheckPlaceholderExpr(Base);
- if (Result.isInvalid())
- return ExprError();
- Base = Result.get();
- }
- if (LowerBound && LowerBound->getType()->isNonOverloadPlaceholderType()) {
- ExprResult Result = CheckPlaceholderExpr(LowerBound);
- if (Result.isInvalid())
- return ExprError();
- Result = DefaultLvalueConversion(Result.get());
- if (Result.isInvalid())
- return ExprError();
- LowerBound = Result.get();
- }
- if (Length && Length->getType()->isNonOverloadPlaceholderType()) {
- ExprResult Result = CheckPlaceholderExpr(Length);
- if (Result.isInvalid())
- return ExprError();
- Result = DefaultLvalueConversion(Result.get());
- if (Result.isInvalid())
- return ExprError();
- Length = Result.get();
- }
- if (Stride && Stride->getType()->isNonOverloadPlaceholderType()) {
- ExprResult Result = CheckPlaceholderExpr(Stride);
- if (Result.isInvalid())
- return ExprError();
- Result = DefaultLvalueConversion(Result.get());
- if (Result.isInvalid())
- return ExprError();
- Stride = Result.get();
- }
-
- // Build an unanalyzed expression if either operand is type-dependent.
- if (Base->isTypeDependent() ||
- (LowerBound &&
- (LowerBound->isTypeDependent() || LowerBound->isValueDependent())) ||
- (Length && (Length->isTypeDependent() || Length->isValueDependent())) ||
- (Stride && (Stride->isTypeDependent() || Stride->isValueDependent()))) {
- return new (Context) OMPArraySectionExpr(
- Base, LowerBound, Length, Stride, Context.DependentTy, VK_LValue,
- OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc);
- }
-
- // Perform default conversions.
- QualType OriginalTy = OMPArraySectionExpr::getBaseOriginalType(Base);
- QualType ResultTy;
- if (OriginalTy->isAnyPointerType()) {
- ResultTy = OriginalTy->getPointeeType();
- } else if (OriginalTy->isArrayType()) {
- ResultTy = OriginalTy->getAsArrayTypeUnsafe()->getElementType();
- } else {
- return ExprError(
- Diag(Base->getExprLoc(), diag::err_omp_typecheck_section_value)
- << Base->getSourceRange());
- }
- // C99 6.5.2.1p1
- if (LowerBound) {
- auto Res = PerformOpenMPImplicitIntegerConversion(LowerBound->getExprLoc(),
- LowerBound);
- if (Res.isInvalid())
- return ExprError(Diag(LowerBound->getExprLoc(),
- diag::err_omp_typecheck_section_not_integer)
- << 0 << LowerBound->getSourceRange());
- LowerBound = Res.get();
-
- if (LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
- LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
- Diag(LowerBound->getExprLoc(), diag::warn_omp_section_is_char)
- << 0 << LowerBound->getSourceRange();
- }
- if (Length) {
- auto Res =
- PerformOpenMPImplicitIntegerConversion(Length->getExprLoc(), Length);
- if (Res.isInvalid())
- return ExprError(Diag(Length->getExprLoc(),
- diag::err_omp_typecheck_section_not_integer)
- << 1 << Length->getSourceRange());
- Length = Res.get();
-
- if (Length->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
- Length->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
- Diag(Length->getExprLoc(), diag::warn_omp_section_is_char)
- << 1 << Length->getSourceRange();
- }
- if (Stride) {
- ExprResult Res =
- PerformOpenMPImplicitIntegerConversion(Stride->getExprLoc(), Stride);
- if (Res.isInvalid())
- return ExprError(Diag(Stride->getExprLoc(),
- diag::err_omp_typecheck_section_not_integer)
- << 1 << Stride->getSourceRange());
- Stride = Res.get();
-
- if (Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
- Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
- Diag(Stride->getExprLoc(), diag::warn_omp_section_is_char)
- << 1 << Stride->getSourceRange();
- }
-
- // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly,
- // C++ [expr.sub]p1: The type "T" shall be a completely-defined object
- // type. Note that functions are not objects, and that (in C99 parlance)
- // incomplete types are not object types.
- if (ResultTy->isFunctionType()) {
- Diag(Base->getExprLoc(), diag::err_omp_section_function_type)
- << ResultTy << Base->getSourceRange();
- return ExprError();
- }
-
- if (RequireCompleteType(Base->getExprLoc(), ResultTy,
- diag::err_omp_section_incomplete_type, Base))
- return ExprError();
-
- if (LowerBound && !OriginalTy->isAnyPointerType()) {
- Expr::EvalResult Result;
- if (LowerBound->EvaluateAsInt(Result, Context)) {
- // OpenMP 5.0, [2.1.5 Array Sections]
- // The array section must be a subset of the original array.
- llvm::APSInt LowerBoundValue = Result.Val.getInt();
- if (LowerBoundValue.isNegative()) {
- Diag(LowerBound->getExprLoc(), diag::err_omp_section_not_subset_of_array)
- << LowerBound->getSourceRange();
- return ExprError();
- }
- }
- }
-
- if (Length) {
- Expr::EvalResult Result;
- if (Length->EvaluateAsInt(Result, Context)) {
- // OpenMP 5.0, [2.1.5 Array Sections]
- // The length must evaluate to non-negative integers.
- llvm::APSInt LengthValue = Result.Val.getInt();
- if (LengthValue.isNegative()) {
- Diag(Length->getExprLoc(), diag::err_omp_section_length_negative)
- << toString(LengthValue, /*Radix=*/10, /*Signed=*/true)
- << Length->getSourceRange();
- return ExprError();
- }
- }
- } else if (ColonLocFirst.isValid() &&
- (OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() &&
- !OriginalTy->isVariableArrayType()))) {
- // OpenMP 5.0, [2.1.5 Array Sections]
- // When the size of the array dimension is not known, the length must be
- // specified explicitly.
- Diag(ColonLocFirst, diag::err_omp_section_length_undefined)
- << (!OriginalTy.isNull() && OriginalTy->isArrayType());
- return ExprError();
- }
-
- if (Stride) {
- Expr::EvalResult Result;
- if (Stride->EvaluateAsInt(Result, Context)) {
- // OpenMP 5.0, [2.1.5 Array Sections]
- // The stride must evaluate to a positive integer.
- llvm::APSInt StrideValue = Result.Val.getInt();
- if (!StrideValue.isStrictlyPositive()) {
- Diag(Stride->getExprLoc(), diag::err_omp_section_stride_non_positive)
- << toString(StrideValue, /*Radix=*/10, /*Signed=*/true)
- << Stride->getSourceRange();
- return ExprError();
- }
- }
- }
-
- if (!Base->hasPlaceholderType(BuiltinType::OMPArraySection)) {
- ExprResult Result = DefaultFunctionArrayLvalueConversion(Base);
- if (Result.isInvalid())
- return ExprError();
- Base = Result.get();
- }
- return new (Context) OMPArraySectionExpr(
- Base, LowerBound, Length, Stride, Context.OMPArraySectionTy, VK_LValue,
- OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc);
-}
-
-ExprResult Sema::ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
- SourceLocation RParenLoc,
- ArrayRef<Expr *> Dims,
- ArrayRef<SourceRange> Brackets) {
- if (Base->hasPlaceholderType()) {
- ExprResult Result = CheckPlaceholderExpr(Base);
- if (Result.isInvalid())
- return ExprError();
- Result = DefaultLvalueConversion(Result.get());
- if (Result.isInvalid())
- return ExprError();
- Base = Result.get();
- }
- QualType BaseTy = Base->getType();
- // Delay analysis of the types/expressions if instantiation/specialization is
- // required.
- if (!BaseTy->isPointerType() && Base->isTypeDependent())
- return OMPArrayShapingExpr::Create(Context, Context.DependentTy, Base,
- LParenLoc, RParenLoc, Dims, Brackets);
- if (!BaseTy->isPointerType() ||
- (!Base->isTypeDependent() &&
- BaseTy->getPointeeType()->isIncompleteType()))
- return ExprError(Diag(Base->getExprLoc(),
- diag::err_omp_non_pointer_type_array_shaping_base)
- << Base->getSourceRange());
-
- SmallVector<Expr *, 4> NewDims;
- bool ErrorFound = false;
- for (Expr *Dim : Dims) {
- if (Dim->hasPlaceholderType()) {
- ExprResult Result = CheckPlaceholderExpr(Dim);
- if (Result.isInvalid()) {
- ErrorFound = true;
- continue;
- }
- Result = DefaultLvalueConversion(Result.get());
- if (Result.isInvalid()) {
- ErrorFound = true;
- continue;
- }
- Dim = Result.get();
- }
- if (!Dim->isTypeDependent()) {
- ExprResult Result =
- PerformOpenMPImplicitIntegerConversion(Dim->getExprLoc(), Dim);
- if (Result.isInvalid()) {
- ErrorFound = true;
- Diag(Dim->getExprLoc(), diag::err_omp_typecheck_shaping_not_integer)
- << Dim->getSourceRange();
- continue;
- }
- Dim = Result.get();
- Expr::EvalResult EvResult;
- if (!Dim->isValueDependent() && Dim->EvaluateAsInt(EvResult, Context)) {
- // OpenMP 5.0, [2.1.4 Array Shaping]
- // Each si is an integral type expression that must evaluate to a
- // positive integer.
- llvm::APSInt Value = EvResult.Val.getInt();
- if (!Value.isStrictlyPositive()) {
- Diag(Dim->getExprLoc(), diag::err_omp_shaping_dimension_not_positive)
- << toString(Value, /*Radix=*/10, /*Signed=*/true)
- << Dim->getSourceRange();
- ErrorFound = true;
- continue;
- }
- }
- }
- NewDims.push_back(Dim);
- }
- if (ErrorFound)
- return ExprError();
- return OMPArrayShapingExpr::Create(Context, Context.OMPArrayShapingTy, Base,
- LParenLoc, RParenLoc, NewDims, Brackets);
-}
-
-ExprResult Sema::ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
- SourceLocation LLoc, SourceLocation RLoc,
- ArrayRef<OMPIteratorData> Data) {
- SmallVector<OMPIteratorExpr::IteratorDefinition, 4> ID;
- bool IsCorrect = true;
- for (const OMPIteratorData &D : Data) {
- TypeSourceInfo *TInfo = nullptr;
- SourceLocation StartLoc;
- QualType DeclTy;
- if (!D.Type.getAsOpaquePtr()) {
- // OpenMP 5.0, 2.1.6 Iterators
- // In an iterator-specifier, if the iterator-type is not specified then
- // the type of that iterator is of int type.
- DeclTy = Context.IntTy;
- StartLoc = D.DeclIdentLoc;
- } else {
- DeclTy = GetTypeFromParser(D.Type, &TInfo);
- StartLoc = TInfo->getTypeLoc().getBeginLoc();
- }
-
- bool IsDeclTyDependent = DeclTy->isDependentType() ||
- DeclTy->containsUnexpandedParameterPack() ||
- DeclTy->isInstantiationDependentType();
- if (!IsDeclTyDependent) {
- if (!DeclTy->isIntegralType(Context) && !DeclTy->isAnyPointerType()) {
- // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++
- // The iterator-type must be an integral or pointer type.
- Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer)
- << DeclTy;
- IsCorrect = false;
- continue;
- }
- if (DeclTy.isConstant(Context)) {
- // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++
- // The iterator-type must not be const qualified.
- Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer)
- << DeclTy;
- IsCorrect = false;
- continue;
- }
- }
-
- // Iterator declaration.
- assert(D.DeclIdent && "Identifier expected.");
- // Always try to create iterator declarator to avoid extra error messages
- // about unknown declarations use.
- auto *VD = VarDecl::Create(Context, CurContext, StartLoc, D.DeclIdentLoc,
- D.DeclIdent, DeclTy, TInfo, SC_None);
- VD->setImplicit();
- if (S) {
- // Check for conflicting previous declaration.
- DeclarationNameInfo NameInfo(VD->getDeclName(), D.DeclIdentLoc);
- LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
- ForVisibleRedeclaration);
- Previous.suppressDiagnostics();
- LookupName(Previous, S);
-
- FilterLookupForScope(Previous, CurContext, S, /*ConsiderLinkage=*/false,
- /*AllowInlineNamespace=*/false);
- if (!Previous.empty()) {
- NamedDecl *Old = Previous.getRepresentativeDecl();
- Diag(D.DeclIdentLoc, diag::err_redefinition) << VD->getDeclName();
- Diag(Old->getLocation(), diag::note_previous_definition);
- } else {
- PushOnScopeChains(VD, S);
- }
- } else {
- CurContext->addDecl(VD);
- }
-
- /// Act on the iterator variable declaration.
- ActOnOpenMPIteratorVarDecl(VD);
-
- Expr *Begin = D.Range.Begin;
- if (!IsDeclTyDependent && Begin && !Begin->isTypeDependent()) {
- ExprResult BeginRes =
- PerformImplicitConversion(Begin, DeclTy, AA_Converting);
- Begin = BeginRes.get();
- }
- Expr *End = D.Range.End;
- if (!IsDeclTyDependent && End && !End->isTypeDependent()) {
- ExprResult EndRes = PerformImplicitConversion(End, DeclTy, AA_Converting);
- End = EndRes.get();
- }
- Expr *Step = D.Range.Step;
- if (!IsDeclTyDependent && Step && !Step->isTypeDependent()) {
- if (!Step->getType()->isIntegralType(Context)) {
- Diag(Step->getExprLoc(), diag::err_omp_iterator_step_not_integral)
- << Step << Step->getSourceRange();
- IsCorrect = false;
- continue;
- }
- std::optional<llvm::APSInt> Result =
- Step->getIntegerConstantExpr(Context);
- // OpenMP 5.0, 2.1.6 Iterators, Restrictions
- // If the step expression of a range-specification equals zero, the
- // behavior is unspecified.
- if (Result && Result->isZero()) {
- Diag(Step->getExprLoc(), diag::err_omp_iterator_step_constant_zero)
- << Step << Step->getSourceRange();
- IsCorrect = false;
- continue;
- }
- }
- if (!Begin || !End || !IsCorrect) {
- IsCorrect = false;
- continue;
- }
- OMPIteratorExpr::IteratorDefinition &IDElem = ID.emplace_back();
- IDElem.IteratorDecl = VD;
- IDElem.AssignmentLoc = D.AssignLoc;
- IDElem.Range.Begin = Begin;
- IDElem.Range.End = End;
- IDElem.Range.Step = Step;
- IDElem.ColonLoc = D.ColonLoc;
- IDElem.SecondColonLoc = D.SecColonLoc;
- }
- if (!IsCorrect) {
- // Invalidate all created iterator declarations if error is found.
- for (const OMPIteratorExpr::IteratorDefinition &D : ID) {
- if (Decl *ID = D.IteratorDecl)
- ID->setInvalidDecl();
- }
- return ExprError();
- }
- SmallVector<OMPIteratorHelperData, 4> Helpers;
- if (!CurContext->isDependentContext()) {
- // Build number of ityeration for each iteration range.
- // Ni = ((Stepi > 0) ? ((Endi + Stepi -1 - Begini)/Stepi) :
- // ((Begini-Stepi-1-Endi) / -Stepi);
- for (OMPIteratorExpr::IteratorDefinition &D : ID) {
- // (Endi - Begini)
- ExprResult Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, D.Range.End,
- D.Range.Begin);
- if(!Res.isUsable()) {
- IsCorrect = false;
- continue;
- }
- ExprResult St, St1;
- if (D.Range.Step) {
- St = D.Range.Step;
- // (Endi - Begini) + Stepi
- Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res.get(), St.get());
- if (!Res.isUsable()) {
- IsCorrect = false;
- continue;
- }
- // (Endi - Begini) + Stepi - 1
- Res =
- CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, Res.get(),
- ActOnIntegerConstant(D.AssignmentLoc, 1).get());
- if (!Res.isUsable()) {
- IsCorrect = false;
- continue;
- }
- // ((Endi - Begini) + Stepi - 1) / Stepi
- Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res.get(), St.get());
- if (!Res.isUsable()) {
- IsCorrect = false;
- continue;
- }
- St1 = CreateBuiltinUnaryOp(D.AssignmentLoc, UO_Minus, D.Range.Step);
- // (Begini - Endi)
- ExprResult Res1 = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub,
- D.Range.Begin, D.Range.End);
- if (!Res1.isUsable()) {
- IsCorrect = false;
- continue;
- }
- // (Begini - Endi) - Stepi
- Res1 =
- CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res1.get(), St1.get());
- if (!Res1.isUsable()) {
- IsCorrect = false;
- continue;
- }
- // (Begini - Endi) - Stepi - 1
- Res1 =
- CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, Res1.get(),
- ActOnIntegerConstant(D.AssignmentLoc, 1).get());
- if (!Res1.isUsable()) {
- IsCorrect = false;
- continue;
- }
- // ((Begini - Endi) - Stepi - 1) / (-Stepi)
- Res1 =
- CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res1.get(), St1.get());
- if (!Res1.isUsable()) {
- IsCorrect = false;
- continue;
- }
- // Stepi > 0.
- ExprResult CmpRes =
- CreateBuiltinBinOp(D.AssignmentLoc, BO_GT, D.Range.Step,
- ActOnIntegerConstant(D.AssignmentLoc, 0).get());
- if (!CmpRes.isUsable()) {
- IsCorrect = false;
- continue;
- }
- Res = ActOnConditionalOp(D.AssignmentLoc, D.AssignmentLoc, CmpRes.get(),
- Res.get(), Res1.get());
- if (!Res.isUsable()) {
- IsCorrect = false;
- continue;
- }
- }
- Res = ActOnFinishFullExpr(Res.get(), /*DiscardedValue=*/false);
- if (!Res.isUsable()) {
- IsCorrect = false;
- continue;
- }
-
- // Build counter update.
- // Build counter.
- auto *CounterVD =
- VarDecl::Create(Context, CurContext, D.IteratorDecl->getBeginLoc(),
- D.IteratorDecl->getBeginLoc(), nullptr,
- Res.get()->getType(), nullptr, SC_None);
- CounterVD->setImplicit();
- ExprResult RefRes =
- BuildDeclRefExpr(CounterVD, CounterVD->getType(), VK_LValue,
- D.IteratorDecl->getBeginLoc());
- // Build counter update.
- // I = Begini + counter * Stepi;
- ExprResult UpdateRes;
- if (D.Range.Step) {
- UpdateRes = CreateBuiltinBinOp(
- D.AssignmentLoc, BO_Mul,
- DefaultLvalueConversion(RefRes.get()).get(), St.get());
- } else {
- UpdateRes = DefaultLvalueConversion(RefRes.get());
- }
- if (!UpdateRes.isUsable()) {
- IsCorrect = false;
- continue;
- }
- UpdateRes = CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, D.Range.Begin,
- UpdateRes.get());
- if (!UpdateRes.isUsable()) {
- IsCorrect = false;
- continue;
- }
- ExprResult VDRes =
- BuildDeclRefExpr(cast<VarDecl>(D.IteratorDecl),
- cast<VarDecl>(D.IteratorDecl)->getType(), VK_LValue,
- D.IteratorDecl->getBeginLoc());
- UpdateRes = CreateBuiltinBinOp(D.AssignmentLoc, BO_Assign, VDRes.get(),
- UpdateRes.get());
- if (!UpdateRes.isUsable()) {
- IsCorrect = false;
- continue;
- }
- UpdateRes =
- ActOnFinishFullExpr(UpdateRes.get(), /*DiscardedValue=*/true);
- if (!UpdateRes.isUsable()) {
- IsCorrect = false;
- continue;
- }
- ExprResult CounterUpdateRes =
- CreateBuiltinUnaryOp(D.AssignmentLoc, UO_PreInc, RefRes.get());
- if (!CounterUpdateRes.isUsable()) {
- IsCorrect = false;
- continue;
- }
- CounterUpdateRes =
- ActOnFinishFullExpr(CounterUpdateRes.get(), /*DiscardedValue=*/true);
- if (!CounterUpdateRes.isUsable()) {
- IsCorrect = false;
- continue;
- }
- OMPIteratorHelperData &HD = Helpers.emplace_back();
- HD.CounterVD = CounterVD;
- HD.Upper = Res.get();
- HD.Update = UpdateRes.get();
- HD.CounterUpdate = CounterUpdateRes.get();
- }
- } else {
- Helpers.assign(ID.size(), {});
- }
- if (!IsCorrect) {
- // Invalidate all created iterator declarations if error is found.
- for (const OMPIteratorExpr::IteratorDefinition &D : ID) {
- if (Decl *ID = D.IteratorDecl)
- ID->setInvalidDecl();
- }
- return ExprError();
- }
- return OMPIteratorExpr::Create(Context, Context.OMPIteratorTy, IteratorKwLoc,
- LLoc, RLoc, ID, Helpers);
-}
-
ExprResult
Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc) {
@@ -5911,7 +5106,7 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
}
// Perform default conversions.
- if (!LHSExp->getType()->getAs<VectorType>()) {
+ if (!LHSExp->getType()->isSubscriptableVectorType()) {
ExprResult Result = DefaultFunctionArrayLvalueConversion(LHSExp);
if (Result.isInvalid())
return ExprError();
@@ -5947,8 +5142,8 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
// Use custom logic if this should be the pseudo-object subscript
// expression.
if (!LangOpts.isSubscriptPointerArithmetic())
- return BuildObjCSubscriptExpression(RLoc, BaseExpr, IndexExpr, nullptr,
- nullptr);
+ return ObjC().BuildObjCSubscriptExpression(RLoc, BaseExpr, IndexExpr,
+ nullptr, nullptr);
ResultType = PTy->getPointeeType();
} else if (const PointerType *PTy = RHSTy->getAs<PointerType>()) {
@@ -5967,36 +5162,22 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
<< ResultType << BaseExpr->getSourceRange();
return ExprError();
}
- } else if (const VectorType *VTy = LHSTy->getAs<VectorType>()) {
- BaseExpr = LHSExp; // vectors: V[123]
- IndexExpr = RHSExp;
- // We apply C++ DR1213 to vector subscripting too.
- if (getLangOpts().CPlusPlus11 && LHSExp->isPRValue()) {
- ExprResult Materialized = TemporaryMaterializationConversion(LHSExp);
- if (Materialized.isInvalid())
- return ExprError();
- LHSExp = Materialized.get();
+ } else if (LHSTy->isSubscriptableVectorType()) {
+ if (LHSTy->isBuiltinType() &&
+ LHSTy->getAs<BuiltinType>()->isSveVLSBuiltinType()) {
+ const BuiltinType *BTy = LHSTy->getAs<BuiltinType>();
+ if (BTy->isSVEBool())
+ return ExprError(Diag(LLoc, diag::err_subscript_svbool_t)
+ << LHSExp->getSourceRange()
+ << RHSExp->getSourceRange());
+ ResultType = BTy->getSveEltType(Context);
+ } else {
+ const VectorType *VTy = LHSTy->getAs<VectorType>();
+ ResultType = VTy->getElementType();
}
- VK = LHSExp->getValueKind();
- if (VK != VK_PRValue)
- OK = OK_VectorComponent;
-
- ResultType = VTy->getElementType();
- QualType BaseType = BaseExpr->getType();
- Qualifiers BaseQuals = BaseType.getQualifiers();
- Qualifiers MemberQuals = ResultType.getQualifiers();
- Qualifiers Combined = BaseQuals + MemberQuals;
- if (Combined != MemberQuals)
- ResultType = Context.getQualifiedType(ResultType, Combined);
- } else if (LHSTy->isBuiltinType() &&
- LHSTy->getAs<BuiltinType>()->isSveVLSBuiltinType()) {
- const BuiltinType *BTy = LHSTy->getAs<BuiltinType>();
- if (BTy->isSVEBool())
- return ExprError(Diag(LLoc, diag::err_subscript_svbool_t)
- << LHSExp->getSourceRange() << RHSExp->getSourceRange());
-
- BaseExpr = LHSExp;
+ BaseExpr = LHSExp; // vectors: V[123]
IndexExpr = RHSExp;
+ // We apply C++ DR1213 to vector subscripting too.
if (getLangOpts().CPlusPlus11 && LHSExp->isPRValue()) {
ExprResult Materialized = TemporaryMaterializationConversion(LHSExp);
if (Materialized.isInvalid())
@@ -6007,8 +5188,6 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
if (VK != VK_PRValue)
OK = OK_VectorComponent;
- ResultType = BTy->getSveEltType(Context);
-
QualType BaseType = BaseExpr->getType();
Qualifiers BaseQuals = BaseType.getQualifiers();
Qualifiers MemberQuals = ResultType.getQualifiers();
@@ -6198,6 +5377,12 @@ struct ImmediateCallVisitor : public RecursiveASTVisitor<ImmediateCallVisitor> {
return RecursiveASTVisitor<ImmediateCallVisitor>::VisitStmt(E);
}
+ bool VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (const FunctionDecl *FD = E->getConstructor())
+ HasImmediateCalls |= FD->isImmediateFunction();
+ return RecursiveASTVisitor<ImmediateCallVisitor>::VisitStmt(E);
+ }
+
// SourceLocExpr are not immediate invocations
// but CXXDefaultInitExpr/CXXDefaultArgExpr containing a SourceLocExpr
// need to be rebuilt so that they refer to the correct SourceLocation and
@@ -6217,12 +5402,6 @@ struct ImmediateCallVisitor : public RecursiveASTVisitor<ImmediateCallVisitor> {
return VisitCXXMethodDecl(E->getCallOperator());
}
- // Blocks don't support default parameters, and, as for lambdas,
- // we don't consider their body a subexpression.
- bool VisitBlockDecl(BlockDecl *B) { return false; }
-
- bool VisitCompoundStmt(CompoundStmt *B) { return false; }
-
bool VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
return TraverseStmt(E->getExpr());
}
@@ -6248,6 +5427,28 @@ struct EnsureImmediateInvocationInDefaultArgs
// cause it to incorrectly point it to the outermost class
// in the case of nested struct initialization.
ExprResult TransformCXXThisExpr(CXXThisExpr *E) { return E; }
+
+ // Rewrite to source location to refer to the context in which they are used.
+ ExprResult TransformSourceLocExpr(SourceLocExpr *E) {
+ DeclContext *DC = E->getParentContext();
+ if (DC == SemaRef.CurContext)
+ return E;
+
+ // FIXME: During instantiation, because the rebuild of defaults arguments
+ // is not always done in the context of the template instantiator,
+ // we run the risk of producing a dependent source location
+ // that would never be rebuilt.
+ // This usually happens during overload resolution, or in contexts
+ // where the value of the source location does not matter.
+ // However, we should find a better way to deal with source location
+ // of function templates.
+ if (!SemaRef.CurrentInstantiationScope ||
+ !SemaRef.CurContext->isDependentContext() || DC->isDependentContext())
+ DC = SemaRef.CurContext;
+
+ return getDerived().RebuildSourceLocExpr(
+ E->getIdentKind(), E->getType(), E->getBeginLoc(), E->getEndLoc(), DC);
+ }
};
ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
@@ -6256,7 +5457,7 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
assert(Param->hasDefaultArg() && "can't build nonexistent default arg");
bool NestedDefaultChecking = isCheckingDefaultArgumentOrInitializer();
-
+ bool InLifetimeExtendingContext = isInLifetimeExtendingContext();
std::optional<ExpressionEvaluationContextRecord::InitializationContext>
InitializationContext =
OutermostDeclarationWithDelayedImmediateInvocations();
@@ -6289,9 +5490,16 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
ImmediateCallVisitor V(getASTContext());
if (!NestedDefaultChecking)
V.TraverseDecl(Param);
- if (V.HasImmediateCalls) {
- ExprEvalContexts.back().DelayedDefaultInitializationContext = {
- CallLoc, Param, CurContext};
+
+ // Rewrite the call argument that was created from the corresponding
+ // parameter's default argument.
+ if (V.HasImmediateCalls || InLifetimeExtendingContext) {
+ if (V.HasImmediateCalls)
+ ExprEvalContexts.back().DelayedDefaultInitializationContext = {
+ CallLoc, Param, CurContext};
+ // Pass down lifetime extending flag, and collect temporaries in
+ // CreateMaterializeTemporaryExpr when we rewrite the call argument.
+ keepInLifetimeExtendingContext();
EnsureImmediateInvocationInDefaultArgs Immediate(*this);
ExprResult Res;
runWithSufficientStackSpace(CallLoc, [&] {
@@ -6522,12 +5730,26 @@ static TypoCorrection TryTypoCorrectionForCall(Sema &S, Expr *Fn,
return TypoCorrection();
}
-/// ConvertArgumentsForCall - Converts the arguments specified in
-/// Args/NumArgs to the parameter types of the function FDecl with
-/// function prototype Proto. Call is the call expression itself, and
-/// Fn is the function expression. For a C++ member function, this
-/// routine does not attempt to convert the object argument. Returns
-/// true if the call is ill-formed.
+// [C++26][[expr.unary.op]/p4
+// A pointer to member is only formed when an explicit &
+// is used and its operand is a qualified-id not enclosed in parentheses.
+static bool isParenthetizedAndQualifiedAddressOfExpr(Expr *Fn) {
+ if (!isa<ParenExpr>(Fn))
+ return false;
+
+ Fn = Fn->IgnoreParens();
+
+ auto *UO = dyn_cast<UnaryOperator>(Fn);
+ if (!UO || UO->getOpcode() != clang::UO_AddrOf)
+ return false;
+ if (auto *DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr()->IgnoreParens())) {
+ return DRE->hasQualifier();
+ }
+ if (auto *OVL = dyn_cast<OverloadExpr>(UO->getSubExpr()->IgnoreParens()))
+ return OVL->getQualifier();
+ return false;
+}
+
bool
Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
@@ -6543,8 +5765,10 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
// C99 6.5.2.2p7 - the arguments are implicitly converted, as if by
// assignment, to the types of the corresponding parameter, ...
+
+ bool AddressOf = isParenthetizedAndQualifiedAddressOfExpr(Fn);
bool HasExplicitObjectParameter =
- FDecl && FDecl->hasCXXExplicitFunctionObjectParameter();
+ !AddressOf && FDecl && FDecl->hasCXXExplicitFunctionObjectParameter();
unsigned ExplicitObjectParameterOffset = HasExplicitObjectParameter ? 1 : 0;
unsigned NumParams = Proto->getNumParams();
bool Invalid = false;
@@ -6694,7 +5918,7 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
if (Arg->getType() == Context.ARCUnbridgedCastTy &&
FDecl && FDecl->hasAttr<CFAuditedTransferAttr>() &&
(!Param || !Param->hasAttr<CFConsumedAttr>()))
- Arg = stripARCUnbridgedCast(Arg);
+ Arg = ObjC().stripARCUnbridgedCast(Arg);
else if (getLangOpts().ObjCAutoRefCount &&
FDecl && FDecl->hasAttr<CFAuditedTransferAttr>() &&
(!Param || !Param->hasAttr<CFConsumedAttr>()))
@@ -6780,14 +6004,6 @@ static void DiagnoseCalleeStaticArrayParam(Sema &S, ParmVarDecl *PVD) {
<< ATL.getLocalSourceRange();
}
-/// CheckStaticArrayArgument - If the given argument corresponds to a static
-/// array parameter, check that it is non-null, and that if it is formed by
-/// array-to-pointer decay, the underlying array is sufficiently large.
-///
-/// C99 6.7.5.3p7: If the keyword static also appears within the [ and ] of the
-/// array type derivation, then for each call to the function, the value of the
-/// corresponding actual argument shall provide access to the first element of
-/// an array with at least as many elements as specified by the size expression.
void
Sema::CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
@@ -6822,9 +6038,8 @@ Sema::CheckStaticArrayArgument(SourceLocation CallLoc,
ArgCAT->getElementType())) {
if (ArgCAT->getSize().ult(CAT->getSize())) {
Diag(CallLoc, diag::warn_static_array_too_small)
- << ArgExpr->getSourceRange()
- << (unsigned)ArgCAT->getSize().getZExtValue()
- << (unsigned)CAT->getSize().getZExtValue() << 0;
+ << ArgExpr->getSourceRange() << (unsigned)ArgCAT->getZExtSize()
+ << (unsigned)CAT->getZExtSize() << 0;
DiagnoseCalleeStaticArrayParam(*this, Param);
}
return;
@@ -6873,11 +6088,14 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
#include "clang/Basic/RISCVVTypes.def"
#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
#define PLACEHOLDER_TYPE(ID, SINGLETON_ID)
#define BUILTIN_TYPE(ID, SINGLETON_ID) case BuiltinType::ID:
#include "clang/AST/BuiltinTypes.def"
return false;
+ case BuiltinType::UnresolvedTemplate:
// We cannot lower out overload sets; they might validly be resolved
// by the call machinery.
case BuiltinType::Overload:
@@ -6901,7 +6119,7 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
case BuiltinType::BoundMember:
case BuiltinType::BuiltinFn:
case BuiltinType::IncompleteMatrixIdx:
- case BuiltinType::OMPArraySection:
+ case BuiltinType::ArraySection:
case BuiltinType::OMPArrayShaping:
case BuiltinType::OMPIterator:
return true;
@@ -7169,18 +6387,23 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
}
if (LangOpts.OpenMP)
- Call = ActOnOpenMPCall(Call, Scope, LParenLoc, ArgExprs, RParenLoc,
- ExecConfig);
+ Call = OpenMP().ActOnOpenMPCall(Call, Scope, LParenLoc, ArgExprs, RParenLoc,
+ ExecConfig);
if (LangOpts.CPlusPlus) {
if (const auto *CE = dyn_cast<CallExpr>(Call.get()))
DiagnosedUnqualifiedCallsToStdFunctions(*this, CE);
+
+ // If we previously found that the id-expression of this call refers to a
+ // consteval function but the call is dependent, we should not treat is an
+ // an invalid immediate call.
+ if (auto *DRE = dyn_cast<DeclRefExpr>(Fn->IgnoreParens());
+ DRE && Call.get()->isValueDependent()) {
+ currentEvaluationContext().ReferenceToConsteval.erase(DRE);
+ }
}
return Call;
}
-/// BuildCallExpr - Handle a call to Fn with the specified array of arguments.
-/// This provides the location of the left/right parens and a list of comma
-/// locations.
ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig, bool IsExecConfig,
@@ -7255,7 +6478,7 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
OverloadExpr::FindResult find = OverloadExpr::find(Fn);
// We aren't supposed to apply this logic if there's an '&' involved.
- if (!find.HasFormOfMemberPointer) {
+ if (!find.HasFormOfMemberPointer || find.IsAddressOfOperandWithParen) {
if (Expr::hasAnyTypeDependentArguments(ArgExprs))
return CallExpr::Create(Context, Fn, ArgExprs, Context.DependentTy,
VK_PRValue, RParenLoc, CurFPFeatureOverrides());
@@ -7323,7 +6546,8 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
// the parameter type.
if (getLangOpts().HIP && getLangOpts().CUDAIsDevice && FD &&
FD->getBuiltinID()) {
- for (unsigned Idx = 0; Idx < FD->param_size(); ++Idx) {
+ for (unsigned Idx = 0; Idx < ArgExprs.size() && Idx < FD->param_size();
+ ++Idx) {
ParmVarDecl *Param = FD->getParamDecl(Idx);
if (!ArgExprs[Idx] || !Param || !Param->getType()->isPointerType() ||
!ArgExprs[Idx]->getType()->isPointerType())
@@ -7381,8 +6605,6 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
ExecConfig, IsExecConfig);
}
-/// BuildBuiltinCallExpr - Create a call to a builtin function specified by Id
-// with the specified CallArgs
Expr *Sema::BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id,
MultiExprArg CallArgs) {
StringRef Name = Context.BuiltinInfo.getName(Id);
@@ -7404,10 +6626,6 @@ Expr *Sema::BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id,
return Call.get();
}
-/// Parse a __builtin_astype expression.
-///
-/// __builtin_astype( value, dst type )
-///
ExprResult Sema::ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc) {
@@ -7415,7 +6633,6 @@ ExprResult Sema::ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
return BuildAsTypeExpr(E, DstTy, BuiltinLoc, RParenLoc);
}
-/// Create a new AsTypeExpr node (bitcast) from the arguments.
ExprResult Sema::BuildAsTypeExpr(Expr *E, QualType DestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc) {
@@ -7430,25 +6647,14 @@ ExprResult Sema::BuildAsTypeExpr(Expr *E, QualType DestTy,
return new (Context) AsTypeExpr(E, DestTy, VK, OK, BuiltinLoc, RParenLoc);
}
-/// ActOnConvertVectorExpr - create a new convert-vector expression from the
-/// provided arguments.
-///
-/// __builtin_convertvector( value, dst type )
-///
ExprResult Sema::ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc) {
TypeSourceInfo *TInfo;
GetTypeFromParser(ParsedDestTy, &TInfo);
- return SemaConvertVectorExpr(E, TInfo, BuiltinLoc, RParenLoc);
+ return ConvertVectorExpr(E, TInfo, BuiltinLoc, RParenLoc);
}
-/// BuildResolvedCallExpr - Build a call to a resolved expression,
-/// i.e. an expression not of \p OverloadTy. The expression should
-/// unary-convert to an expression of function-pointer or
-/// block-pointer type.
-///
-/// \param NDecl the declaration being called, if available
ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
@@ -7458,27 +6664,21 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0);
// Functions with 'interrupt' attribute cannot be called directly.
- if (FDecl && FDecl->hasAttr<AnyX86InterruptAttr>()) {
- Diag(Fn->getExprLoc(), diag::err_anyx86_interrupt_called);
- return ExprError();
+ if (FDecl) {
+ if (FDecl->hasAttr<AnyX86InterruptAttr>()) {
+ Diag(Fn->getExprLoc(), diag::err_anyx86_interrupt_called);
+ return ExprError();
+ }
+ if (FDecl->hasAttr<ARMInterruptAttr>()) {
+ Diag(Fn->getExprLoc(), diag::err_arm_interrupt_called);
+ return ExprError();
+ }
}
- // Interrupt handlers don't save off the VFP regs automatically on ARM,
- // so there's some risk when calling out to non-interrupt handler functions
- // that the callee might not preserve them. This is easy to diagnose here,
- // but can be very challenging to debug.
- // Likewise, X86 interrupt handlers may only call routines with attribute
+ // X86 interrupt handlers may only call routines with attribute
// no_caller_saved_registers since there is no efficient way to
// save and restore the non-GPR state.
if (auto *Caller = getCurFunctionDecl()) {
- if (Caller->hasAttr<ARMInterruptAttr>()) {
- bool VFP = Context.getTargetInfo().hasFeature("vfp");
- if (VFP && (!FDecl || !FDecl->hasAttr<ARMInterruptAttr>())) {
- Diag(Fn->getExprLoc(), diag::warn_arm_interrupt_calling_convention);
- if (FDecl)
- Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
- }
- }
if (Caller->hasAttr<AnyX86InterruptAttr>() ||
Caller->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) {
const TargetInfo &TI = Context.getTargetInfo();
@@ -7504,7 +6704,7 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// Extract the return type from the (builtin) function pointer type.
// FIXME Several builtins still have setType in
// Sema::CheckBuiltinFunctionCall. One should review their definitions in
- // Builtins.def to ensure they are correct before removing setType calls.
+ // Builtins.td to ensure they are correct before removing setType calls.
QualType FnPtrTy = Context.getPointerType(FDecl->getType());
Result = ImpCastExprToType(Fn, FnPtrTy, CK_BuiltinFnToFnPtr).get();
ResultTy = FDecl->getCallResultType();
@@ -7601,8 +6801,12 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
}
// Bail out early if calling a builtin with custom type checking.
- if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID))
- return CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall);
+ if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID)) {
+ ExprResult E = CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall);
+ if (!E.isInvalid() && Context.BuiltinInfo.isImmediate(BuiltinID))
+ E = CheckForImmediateInvocation(E, FDecl);
+ return E;
+ }
if (getLangOpts().CUDA) {
if (Config) {
@@ -7794,12 +6998,19 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
// init a VLA in C++ in all cases (such as with non-trivial constructors).
// FIXME: should we allow this construct in C++ when it makes sense to do
// so?
- std::optional<unsigned> NumInits;
- if (const auto *ILE = dyn_cast<InitListExpr>(LiteralExpr))
- NumInits = ILE->getNumInits();
- if ((LangOpts.CPlusPlus || NumInits.value_or(0)) &&
- !tryToFixVariablyModifiedVarType(TInfo, literalType, LParenLoc,
- diag::err_variable_object_no_init))
+ //
+ // But: C99-C23 6.5.2.5 Compound literals constraint 1: The type name
+ // shall specify an object type or an array of unknown size, but not a
+ // variable length array type. This seems odd, as it allows 'int a[size] =
+ // {}', but forbids 'int *a = (int[size]){}'. As this is what the standard
+ // says, this is what's implemented here for C (except for the extension
+ // that permits constant foldable size arrays)
+
+ auto diagID = LangOpts.CPlusPlus
+ ? diag::err_variable_object_no_init
+ : diag::err_compound_literal_with_vla_type;
+ if (!tryToFixVariablyModifiedVarType(TInfo, literalType, LParenLoc,
+ diagID))
return ExprError();
}
} else if (!literalType->isDependentType() &&
@@ -7859,7 +7070,7 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
if (!LiteralExpr->isTypeDependent() &&
!LiteralExpr->isValueDependent() &&
!literalType->isDependentType()) // C99 6.5.2.5p3
- if (CheckForConstantInitializer(LiteralExpr, literalType))
+ if (CheckForConstantInitializer(LiteralExpr))
return ExprError();
} else if (literalType.getAddressSpace() != LangAS::opencl_private &&
literalType.getAddressSpace() != LangAS::Default) {
@@ -7985,13 +7196,12 @@ Sema::BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList,
}
}
- InitListExpr *E = new (Context) InitListExpr(Context, LBraceLoc, InitArgList,
- RBraceLoc);
+ InitListExpr *E =
+ new (Context) InitListExpr(Context, LBraceLoc, InitArgList, RBraceLoc);
E->setType(Context.VoidTy); // FIXME: just a place holder for now.
return E;
}
-/// Do an explicit extend of the given block pointer if we're in ARC.
void Sema::maybeExtendBlockObject(ExprResult &E) {
assert(E.get()->getType()->isBlockPointerType());
assert(E.get()->isPRValue());
@@ -8005,23 +7215,6 @@ void Sema::maybeExtendBlockObject(ExprResult &E) {
Cleanup.setExprNeedsCleanups(true);
}
-/// Prepare a conversion of the given expression to an ObjC object
-/// pointer type.
-CastKind Sema::PrepareCastToObjCObjectPointer(ExprResult &E) {
- QualType type = E.get()->getType();
- if (type->isObjCObjectPointerType()) {
- return CK_BitCast;
- } else if (type->isBlockPointerType()) {
- maybeExtendBlockObject(E);
- return CK_BlockPointerToObjCPointerCast;
- } else {
- assert(type->isPointerType());
- return CK_CPointerToObjCPointerCast;
- }
-}
-
-/// Prepares for a scalar cast, performing all the necessary stages
-/// except the final cast and returning the kind required.
CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
// Both Src and Dest are scalar types, i.e. arithmetic or pointer.
// Also, callers should have filtered out the invalid cases with
@@ -8248,12 +7441,6 @@ static bool breakDownVectorType(QualType type, uint64_t &len,
return true;
}
-/// Are the two types SVE-bitcast-compatible types? I.e. is bitcasting from the
-/// first SVE type (e.g. an SVE VLAT) to the second type (e.g. an SVE VLST)
-/// allowed?
-///
-/// This will also return false if the two given types do not make sense from
-/// the perspective of SVE bitcasts.
bool Sema::isValidSveBitcast(QualType srcTy, QualType destTy) {
assert(srcTy->isVectorType() || destTy->isVectorType());
@@ -8269,29 +7456,6 @@ bool Sema::isValidSveBitcast(QualType srcTy, QualType destTy) {
ValidScalableConversion(destTy, srcTy);
}
-/// Are the two types RVV-bitcast-compatible types? I.e. is bitcasting from the
-/// first RVV type (e.g. an RVV scalable type) to the second type (e.g. an RVV
-/// VLS type) allowed?
-///
-/// This will also return false if the two given types do not make sense from
-/// the perspective of RVV bitcasts.
-bool Sema::isValidRVVBitcast(QualType srcTy, QualType destTy) {
- assert(srcTy->isVectorType() || destTy->isVectorType());
-
- auto ValidScalableConversion = [](QualType FirstType, QualType SecondType) {
- if (!FirstType->isRVVSizelessBuiltinType())
- return false;
-
- const auto *VecTy = SecondType->getAs<VectorType>();
- return VecTy && VecTy->getVectorKind() == VectorKind::RVVFixedLengthData;
- };
-
- return ValidScalableConversion(srcTy, destTy) ||
- ValidScalableConversion(destTy, srcTy);
-}
-
-/// Are the two types matrix types and do they have the same dimensions i.e.
-/// do they have the same number of rows and the same number of columns?
bool Sema::areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy) {
if (!destTy->isMatrixType() || !srcTy->isMatrixType())
return false;
@@ -8322,7 +7486,6 @@ bool Sema::areVectorTypesSameSize(QualType SrcTy, QualType DestTy) {
return (SrcLen * SrcEltSize == DestLen * DestEltSize);
}
-// This returns true if at least one of the types is an altivec vector.
bool Sema::anyAltivecTypes(QualType SrcTy, QualType DestTy) {
assert((DestTy->isVectorType() || SrcTy->isVectorType()) &&
"expected at least one type to be a vector here");
@@ -8346,13 +7509,6 @@ bool Sema::anyAltivecTypes(QualType SrcTy, QualType DestTy) {
return (IsSrcTyAltivec || IsDestTyAltivec);
}
-/// Are the two types lax-compatible vector types? That is, given
-/// that one of them is a vector, do they have equal storage sizes,
-/// where the storage size is the number of elements times the element
-/// size?
-///
-/// This will also return false if either of the types is neither a
-/// vector nor a real type.
bool Sema::areLaxCompatibleVectorTypes(QualType srcTy, QualType destTy) {
assert(destTy->isVectorType() || srcTy->isVectorType());
@@ -8367,8 +7523,6 @@ bool Sema::areLaxCompatibleVectorTypes(QualType srcTy, QualType destTy) {
return areVectorTypesSameSize(srcTy, destTy);
}
-/// Is this a legal conversion between two types, one of which is
-/// known to be a vector type?
bool Sema::isLaxVectorConversion(QualType srcTy, QualType destTy) {
assert(destTy->isVectorType() || srcTy->isVectorType());
@@ -8570,9 +7724,9 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
if (getLangOpts().CPlusPlus && !castType->isVoidType())
Diag(LParenLoc, diag::warn_old_style_cast) << CastExpr->getSourceRange();
- CheckTollFreeBridgeCast(castType, CastExpr);
+ ObjC().CheckTollFreeBridgeCast(castType, CastExpr);
- CheckObjCBridgeRelatedCast(castType, CastExpr);
+ ObjC().CheckObjCBridgeRelatedCast(castType, CastExpr);
DiscardMisalignedMemberAddress(castType.getTypePtr(), CastExpr);
@@ -8661,8 +7815,6 @@ ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc,
return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, initE);
}
-/// This is not an AltiVec-style cast or or C++ direct-initialization, so turn
-/// the ParenListExpr into a sequence of comma binary operators.
ExprResult
Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *OrigExpr) {
ParenListExpr *E = dyn_cast<ParenListExpr>(OrigExpr);
@@ -8686,9 +7838,6 @@ ExprResult Sema::ActOnParenListExpr(SourceLocation L,
return ParenListExpr::Create(Context, L, Val, R);
}
-/// Emit a specialized diagnostic when one expression is a null pointer
-/// constant and the other is not a pointer. Returns true if a diagnostic is
-/// emitted.
bool Sema::DiagnoseConditionalForNull(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation QuestionLoc) {
const Expr *NullExpr = LHSExpr;
@@ -9322,8 +8471,8 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (!checkConditionalNullPointer(*this, LHS, RHSTy)) return RHSTy;
// All objective-c pointer type analysis is done here.
- QualType compositeType = FindCompositeObjCPointerType(LHS, RHS,
- QuestionLoc);
+ QualType compositeType =
+ ObjC().FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
if (!compositeType.isNull())
@@ -9367,148 +8516,6 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
return QualType();
}
-/// FindCompositeObjCPointerType - Helper method to find composite type of
-/// two objective-c pointer types of the two input expressions.
-QualType Sema::FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
- SourceLocation QuestionLoc) {
- QualType LHSTy = LHS.get()->getType();
- QualType RHSTy = RHS.get()->getType();
-
- // Handle things like Class and struct objc_class*. Here we case the result
- // to the pseudo-builtin, because that will be implicitly cast back to the
- // redefinition type if an attempt is made to access its fields.
- if (LHSTy->isObjCClassType() &&
- (Context.hasSameType(RHSTy, Context.getObjCClassRedefinitionType()))) {
- RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_CPointerToObjCPointerCast);
- return LHSTy;
- }
- if (RHSTy->isObjCClassType() &&
- (Context.hasSameType(LHSTy, Context.getObjCClassRedefinitionType()))) {
- LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_CPointerToObjCPointerCast);
- return RHSTy;
- }
- // And the same for struct objc_object* / id
- if (LHSTy->isObjCIdType() &&
- (Context.hasSameType(RHSTy, Context.getObjCIdRedefinitionType()))) {
- RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_CPointerToObjCPointerCast);
- return LHSTy;
- }
- if (RHSTy->isObjCIdType() &&
- (Context.hasSameType(LHSTy, Context.getObjCIdRedefinitionType()))) {
- LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_CPointerToObjCPointerCast);
- return RHSTy;
- }
- // And the same for struct objc_selector* / SEL
- if (Context.isObjCSelType(LHSTy) &&
- (Context.hasSameType(RHSTy, Context.getObjCSelRedefinitionType()))) {
- RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_BitCast);
- return LHSTy;
- }
- if (Context.isObjCSelType(RHSTy) &&
- (Context.hasSameType(LHSTy, Context.getObjCSelRedefinitionType()))) {
- LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_BitCast);
- return RHSTy;
- }
- // Check constraints for Objective-C object pointers types.
- if (LHSTy->isObjCObjectPointerType() && RHSTy->isObjCObjectPointerType()) {
-
- if (Context.getCanonicalType(LHSTy) == Context.getCanonicalType(RHSTy)) {
- // Two identical object pointer types are always compatible.
- return LHSTy;
- }
- const ObjCObjectPointerType *LHSOPT = LHSTy->castAs<ObjCObjectPointerType>();
- const ObjCObjectPointerType *RHSOPT = RHSTy->castAs<ObjCObjectPointerType>();
- QualType compositeType = LHSTy;
-
- // If both operands are interfaces and either operand can be
- // assigned to the other, use that type as the composite
- // type. This allows
- // xxx ? (A*) a : (B*) b
- // where B is a subclass of A.
- //
- // Additionally, as for assignment, if either type is 'id'
- // allow silent coercion. Finally, if the types are
- // incompatible then make sure to use 'id' as the composite
- // type so the result is acceptable for sending messages to.
-
- // FIXME: Consider unifying with 'areComparableObjCPointerTypes'.
- // It could return the composite type.
- if (!(compositeType =
- Context.areCommonBaseCompatible(LHSOPT, RHSOPT)).isNull()) {
- // Nothing more to do.
- } else if (Context.canAssignObjCInterfaces(LHSOPT, RHSOPT)) {
- compositeType = RHSOPT->isObjCBuiltinType() ? RHSTy : LHSTy;
- } else if (Context.canAssignObjCInterfaces(RHSOPT, LHSOPT)) {
- compositeType = LHSOPT->isObjCBuiltinType() ? LHSTy : RHSTy;
- } else if ((LHSOPT->isObjCQualifiedIdType() ||
- RHSOPT->isObjCQualifiedIdType()) &&
- Context.ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT,
- true)) {
- // Need to handle "id<xx>" explicitly.
- // GCC allows qualified id and any Objective-C type to devolve to
- // id. Currently localizing to here until clear this should be
- // part of ObjCQualifiedIdTypesAreCompatible.
- compositeType = Context.getObjCIdType();
- } else if (LHSTy->isObjCIdType() || RHSTy->isObjCIdType()) {
- compositeType = Context.getObjCIdType();
- } else {
- Diag(QuestionLoc, diag::ext_typecheck_cond_incompatible_operands)
- << LHSTy << RHSTy
- << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
- QualType incompatTy = Context.getObjCIdType();
- LHS = ImpCastExprToType(LHS.get(), incompatTy, CK_BitCast);
- RHS = ImpCastExprToType(RHS.get(), incompatTy, CK_BitCast);
- return incompatTy;
- }
- // The object pointer types are compatible.
- LHS = ImpCastExprToType(LHS.get(), compositeType, CK_BitCast);
- RHS = ImpCastExprToType(RHS.get(), compositeType, CK_BitCast);
- return compositeType;
- }
- // Check Objective-C object pointer types and 'void *'
- if (LHSTy->isVoidPointerType() && RHSTy->isObjCObjectPointerType()) {
- if (getLangOpts().ObjCAutoRefCount) {
- // ARC forbids the implicit conversion of object pointers to 'void *',
- // so these types are not compatible.
- Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy
- << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
- LHS = RHS = true;
- return QualType();
- }
- QualType lhptee = LHSTy->castAs<PointerType>()->getPointeeType();
- QualType rhptee = RHSTy->castAs<ObjCObjectPointerType>()->getPointeeType();
- QualType destPointee
- = Context.getQualifiedType(lhptee, rhptee.getQualifiers());
- QualType destType = Context.getPointerType(destPointee);
- // Add qualifiers if necessary.
- LHS = ImpCastExprToType(LHS.get(), destType, CK_NoOp);
- // Promote to void*.
- RHS = ImpCastExprToType(RHS.get(), destType, CK_BitCast);
- return destType;
- }
- if (LHSTy->isObjCObjectPointerType() && RHSTy->isVoidPointerType()) {
- if (getLangOpts().ObjCAutoRefCount) {
- // ARC forbids the implicit conversion of object pointers to 'void *',
- // so these types are not compatible.
- Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy
- << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
- LHS = RHS = true;
- return QualType();
- }
- QualType lhptee = LHSTy->castAs<ObjCObjectPointerType>()->getPointeeType();
- QualType rhptee = RHSTy->castAs<PointerType>()->getPointeeType();
- QualType destPointee
- = Context.getQualifiedType(rhptee, lhptee.getQualifiers());
- QualType destType = Context.getPointerType(destPointee);
- // Add qualifiers if necessary.
- RHS = ImpCastExprToType(RHS.get(), destType, CK_NoOp);
- // Promote to void*.
- LHS = ImpCastExprToType(LHS.get(), destType, CK_BitCast);
- return destType;
- }
- return QualType();
-}
-
/// SuggestParentheses - Emit a note with a fixit hint that wraps
/// ParenRange in parentheses.
static void SuggestParentheses(Sema &Self, SourceLocation Loc,
@@ -9693,8 +8700,6 @@ static QualType computeConditionalNullability(QualType ResTy, bool IsBin,
return Ctx.getAttributedType(NewAttr, ResTy, ResTy);
}
-/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
-/// in the case of a the GNU conditional expr extension.
ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr,
@@ -9798,7 +8803,6 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
ColonLoc, result, VK, OK);
}
-// Check that the SME attributes for PSTATE.ZA and PSTATE.SM are compatible.
bool Sema::IsInvalidSMECallConversion(QualType FromType, QualType ToType) {
unsigned FromAttributes = 0, ToAttributes = 0;
if (const auto *FromFn =
@@ -10373,7 +9377,7 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
checkObjCPointerTypesForAssignment(*this, LHSType, RHSType);
if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
result == Compatible &&
- !CheckObjCARCUnavailableWeakConversion(OrigLHSType, RHSType))
+ !ObjC().CheckObjCARCUnavailableWeakConversion(OrigLHSType, RHSType))
result = IncompatibleObjCWeakRef;
return result;
}
@@ -10599,7 +9603,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
return Incompatible;
Sema::AssignConvertType result = Compatible;
if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
- !CheckObjCARCUnavailableWeakConversion(LHSType, RHSType))
+ !ObjC().CheckObjCARCUnavailableWeakConversion(LHSType, RHSType))
result = IncompatibleObjCWeakRef;
return result;
}
@@ -10705,15 +9709,16 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
// diagnostics and just checking for errors, e.g., during overload
// resolution, return Incompatible to indicate the failure.
if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
- CheckObjCConversion(SourceRange(), Ty, E, CCK_ImplicitConversion,
- Diagnose, DiagnoseCFAudited) != ACR_okay) {
+ ObjC().CheckObjCConversion(SourceRange(), Ty, E,
+ CheckedConversionKind::Implicit, Diagnose,
+ DiagnoseCFAudited) != SemaObjC::ACR_okay) {
if (!Diagnose)
return Incompatible;
}
if (getLangOpts().ObjC &&
- (CheckObjCBridgeRelatedConversions(E->getBeginLoc(), LHSType,
- E->getType(), E, Diagnose) ||
- CheckConversionToObjCLiteral(LHSType, E, Diagnose))) {
+ (ObjC().CheckObjCBridgeRelatedConversions(E->getBeginLoc(), LHSType,
+ E->getType(), E, Diagnose) ||
+ ObjC().CheckConversionToObjCLiteral(LHSType, E, Diagnose))) {
if (!Diagnose)
return Incompatible;
// Replace the expression with a corrected version and continue so we
@@ -10776,9 +9781,6 @@ QualType Sema::InvalidOperands(SourceLocation Loc, ExprResult &LHS,
return QualType();
}
-// Diagnose cases where a scalar was implicitly converted to a vector and
-// diagnose the underlying types. Otherwise, diagnose the error
-// as invalid vector logical operands for non-C++ cases.
QualType Sema::InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS) {
QualType LHSType = LHS.get()->IgnoreImpCasts()->getType();
@@ -11647,7 +10649,7 @@ static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc,
if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>())
ResType = ResAtomicType->getValueType();
- assert(ResType->isAnyPointerType() && !ResType->isDependentType());
+ assert(ResType->isAnyPointerType());
QualType PointeeTy = ResType->getPointeeType();
return S.RequireCompleteSizedType(
Loc, PointeeTy,
@@ -11937,6 +10939,14 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
if (isObjCPointer && checkArithmeticOnObjCPointer(*this, Loc, PExp))
return QualType();
+ // Arithmetic on label addresses is normally allowed, except when we add
+ // a ptrauth signature to the addresses.
+ if (isa<AddrLabelExpr>(PExp) && getLangOpts().PointerAuthIndirectGotos) {
+ Diag(Loc, diag::err_ptrauth_indirect_goto_addrlabel_arithmetic)
+ << /*addition*/ 1;
+ return QualType();
+ }
+
// Check array bounds for pointer arithemtic
CheckArrayAccess(PExp, IExp);
@@ -12011,6 +11021,15 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
checkArithmeticOnObjCPointer(*this, Loc, LHS.get()))
return QualType();
+ // Arithmetic on label addresses is normally allowed, except when we add
+ // a ptrauth signature to the addresses.
+ if (isa<AddrLabelExpr>(LHS.get()) &&
+ getLangOpts().PointerAuthIndirectGotos) {
+ Diag(Loc, diag::err_ptrauth_indirect_goto_addrlabel_arithmetic)
+ << /*subtraction*/ 0;
+ return QualType();
+ }
+
// The result type of a pointer-int computation is the pointer type.
if (RHS.get()->getType()->isIntegerType()) {
// Subtracting from a null pointer should produce a warning.
@@ -12118,7 +11137,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
if (Right.isNegative()) {
S.DiagRuntimeBehavior(Loc, RHS.get(),
S.PDiag(diag::warn_shift_negative)
- << RHS.get()->getSourceRange());
+ << RHS.get()->getSourceRange());
return;
}
@@ -12133,7 +11152,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
if (Right.uge(LeftSize)) {
S.DiagRuntimeBehavior(Loc, RHS.get(),
S.PDiag(diag::warn_shift_gt_typewidth)
- << RHS.get()->getSourceRange());
+ << RHS.get()->getSourceRange());
return;
}
@@ -12166,7 +11185,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
if (Left.isNegative()) {
S.DiagRuntimeBehavior(Loc, LHS.get(),
S.PDiag(diag::warn_shift_lhs_negative)
- << LHS.get()->getSourceRange());
+ << LHS.get()->getSourceRange());
return;
}
@@ -12530,19 +11549,20 @@ static bool hasIsEqualMethod(Sema &S, const Expr *LHS, const Expr *RHS) {
return false;
// Try to find the -isEqual: method.
- Selector IsEqualSel = S.NSAPIObj->getIsEqualSelector();
- ObjCMethodDecl *Method = S.LookupMethodInObjectType(IsEqualSel,
- InterfaceType,
- /*IsInstance=*/true);
+ Selector IsEqualSel = S.ObjC().NSAPIObj->getIsEqualSelector();
+ ObjCMethodDecl *Method =
+ S.ObjC().LookupMethodInObjectType(IsEqualSel, InterfaceType,
+ /*IsInstance=*/true);
if (!Method) {
if (Type->isObjCIdType()) {
// For 'id', just check the global pool.
- Method = S.LookupInstanceMethodInGlobalPool(IsEqualSel, SourceRange(),
- /*receiverId=*/true);
+ Method =
+ S.ObjC().LookupInstanceMethodInGlobalPool(IsEqualSel, SourceRange(),
+ /*receiverId=*/true);
} else {
// Check protocols.
- Method = S.LookupMethodInQualifiedType(IsEqualSel, Type,
- /*IsInstance=*/true);
+ Method = S.ObjC().LookupMethodInQualifiedType(IsEqualSel, Type,
+ /*IsInstance=*/true);
}
}
@@ -12560,48 +11580,6 @@ static bool hasIsEqualMethod(Sema &S, const Expr *LHS, const Expr *RHS) {
return true;
}
-Sema::ObjCLiteralKind Sema::CheckLiteralKind(Expr *FromE) {
- FromE = FromE->IgnoreParenImpCasts();
- switch (FromE->getStmtClass()) {
- default:
- break;
- case Stmt::ObjCStringLiteralClass:
- // "string literal"
- return LK_String;
- case Stmt::ObjCArrayLiteralClass:
- // "array literal"
- return LK_Array;
- case Stmt::ObjCDictionaryLiteralClass:
- // "dictionary literal"
- return LK_Dictionary;
- case Stmt::BlockExprClass:
- return LK_Block;
- case Stmt::ObjCBoxedExprClass: {
- Expr *Inner = cast<ObjCBoxedExpr>(FromE)->getSubExpr()->IgnoreParens();
- switch (Inner->getStmtClass()) {
- case Stmt::IntegerLiteralClass:
- case Stmt::FloatingLiteralClass:
- case Stmt::CharacterLiteralClass:
- case Stmt::ObjCBoolLiteralExprClass:
- case Stmt::CXXBoolLiteralExprClass:
- // "numeric literal"
- return LK_Numeric;
- case Stmt::ImplicitCastExprClass: {
- CastKind CK = cast<CastExpr>(Inner)->getCastKind();
- // Boolean literals can be represented by implicit casts.
- if (CK == CK_IntegralToBoolean || CK == CK_IntegralCast)
- return LK_Numeric;
- break;
- }
- default:
- break;
- }
- return LK_Boxed;
- }
- }
- return LK_None;
-}
-
static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc,
ExprResult &LHS, ExprResult &RHS,
BinaryOperator::Opcode Opc){
@@ -12624,13 +11602,13 @@ static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc,
// This should be kept in sync with warn_objc_literal_comparison.
// LK_String should always be after the other literals, since it has its own
// warning flag.
- Sema::ObjCLiteralKind LiteralKind = S.CheckLiteralKind(Literal);
- assert(LiteralKind != Sema::LK_Block);
- if (LiteralKind == Sema::LK_None) {
+ SemaObjC::ObjCLiteralKind LiteralKind = S.ObjC().CheckLiteralKind(Literal);
+ assert(LiteralKind != SemaObjC::LK_Block);
+ if (LiteralKind == SemaObjC::LK_None) {
llvm_unreachable("Unknown Objective-C object literal kind");
}
- if (LiteralKind == Sema::LK_String)
+ if (LiteralKind == SemaObjC::LK_String)
S.Diag(Loc, diag::warn_objc_string_literal_comparison)
<< Literal->getSourceRange();
else
@@ -12874,6 +11852,8 @@ static ImplicitConversionKind castKindToImplicitConversionKind(CastKind CK) {
case CK_IntegralComplexToReal:
case CK_IntegralRealToComplex:
return ICK_Complex_Real;
+ case CK_HLSLArrayRValue:
+ return ICK_HLSL_Array_RValue;
}
}
@@ -13424,17 +12404,18 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
if (LHSIsNull && !RHSIsNull) {
Expr *E = LHS.get();
if (getLangOpts().ObjCAutoRefCount)
- CheckObjCConversion(SourceRange(), RHSType, E,
- CCK_ImplicitConversion);
+ ObjC().CheckObjCConversion(SourceRange(), RHSType, E,
+ CheckedConversionKind::Implicit);
LHS = ImpCastExprToType(E, RHSType,
RPT ? CK_BitCast :CK_CPointerToObjCPointerCast);
}
else {
Expr *E = RHS.get();
if (getLangOpts().ObjCAutoRefCount)
- CheckObjCConversion(SourceRange(), LHSType, E, CCK_ImplicitConversion,
- /*Diagnose=*/true,
- /*DiagnoseCFAudited=*/false, Opc);
+ ObjC().CheckObjCConversion(SourceRange(), LHSType, E,
+ CheckedConversionKind::Implicit,
+ /*Diagnose=*/true,
+ /*DiagnoseCFAudited=*/false, Opc);
RHS = ImpCastExprToType(E, LHSType,
LPT ? CK_BitCast :CK_CPointerToObjCPointerCast);
}
@@ -13543,11 +12524,6 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
return InvalidOperands(Loc, LHS, RHS);
}
-// Return a signed ext_vector_type that is of identical size and number of
-// elements. For floating point vectors, return an integer type of identical
-// size and number of elements. In the non ext_vector_type case, search from
-// the largest type to the smallest type to avoid cases where long long == long,
-// where long gets picked over long long.
QualType Sema::GetSignedVectorType(QualType V) {
const VectorType *VTy = V->castAs<VectorType>();
unsigned TypeSize = Context.getTypeSize(VTy->getElementType());
@@ -13603,10 +12579,6 @@ QualType Sema::GetSignedSizelessVectorType(QualType V) {
return Context.getScalableVectorType(IntTy, VecSize.getKnownMinValue());
}
-/// CheckVectorCompareOperands - vector comparisons are a clang extension that
-/// operates on extended vector types. Instead of producing an IntTy result,
-/// like a scalar comparison, a vector comparison produces a vector of integer
-/// types.
QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc) {
@@ -14645,7 +13617,7 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
const Expr *InnerLHS = LHSExpr->IgnoreParenCasts();
const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(InnerLHS);
if (!DRE || DRE->getDecl()->hasAttr<BlocksAttr>())
- checkRetainCycles(LHSExpr, RHS.get());
+ ObjC().checkRetainCycles(LHSExpr, RHS.get());
}
if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong ||
@@ -14678,6 +13650,9 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
CheckForNullPointerDereference(*this, LHSExpr);
+ AssignedEntity AE{LHSExpr};
+ checkExprLifetime(*this, AE, RHS.get());
+
if (getLangOpts().CPlusPlus20 && LHSType.isVolatileQualified()) {
if (CompoundType.isNull()) {
// C++2a [expr.ass]p5:
@@ -14722,9 +13697,6 @@ static bool IgnoreCommaOperand(const Expr *E, const ASTContext &Context) {
return false;
}
-// Look for instances where it is likely the comma operator is confused with
-// another operator. There is an explicit list of acceptable expressions for
-// the left hand side of the comma operator, otherwise emit a warning.
void Sema::DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc) {
// No warnings in macros
if (Loc.isMacroID())
@@ -14813,11 +13785,8 @@ static QualType CheckCommaOperands(Sema &S, ExprResult &LHS, ExprResult &RHS,
static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
ExprValueKind &VK,
ExprObjectKind &OK,
- SourceLocation OpLoc,
- bool IsInc, bool IsPrefix) {
- if (Op->isTypeDependent())
- return S.Context.DependentTy;
-
+ SourceLocation OpLoc, bool IsInc,
+ bool IsPrefix) {
QualType ResType = Op->getType();
// Atomic types can be used for increment / decrement where the non-atomic
// versions can, so ignore the _Atomic() specifier for the purpose of
@@ -14855,8 +13824,9 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
return QualType();
} else if (ResType->isAnyComplexType()) {
// C99 does not support ++/-- on complex types, we allow as an extension.
- S.Diag(OpLoc, diag::ext_integer_increment_complex)
- << ResType << Op->getSourceRange();
+ S.Diag(OpLoc, S.getLangOpts().C2y ? diag::warn_c2y_compat_increment_complex
+ : diag::ext_c2y_increment_complex)
+ << IsInc << Op->getSourceRange();
} else if (ResType->isPlaceholderType()) {
ExprResult PR = S.CheckPlaceholderExpr(Op);
if (PR.isInvalid()) return QualType();
@@ -14899,7 +13869,6 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
}
}
-
/// getPrimaryDecl - Helper function for CheckAddressOfOperand().
/// This routine allows us to typecheck complex/recursive expressions
/// where the declaration is needed for type checking. We only need to
@@ -15008,13 +13977,6 @@ bool Sema::CheckUseOfCXXMethodAsAddressOfOperand(SourceLocation OpLoc,
<< FixItHint::CreateInsertion(DRE->getSourceRange().getBegin(), Qual);
}
-/// CheckAddressOfOperand - The operand of & must be either a function
-/// designator or an lvalue designating an object. If it is an lvalue, the
-/// object cannot be declared with storage class register or be a bit field.
-/// Note: The usual conversions are *not* applied to the operand of the &
-/// operator (C99 6.3.2.1p[2-4]), and its result is never an lvalue.
-/// In C++, the operand might be an overloaded function name, in which case
-/// we allow the '&' but retain the overloaded-function type.
QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
if (const BuiltinType *PTy = OrigOp.get()->getType()->getAsPlaceholderType()){
if (PTy->getKind() == BuiltinType::Overload) {
@@ -15121,6 +14083,39 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
QualType MPTy = Context.getMemberPointerType(
op->getType(), Context.getTypeDeclType(MD->getParent()).getTypePtr());
+
+ if (getLangOpts().PointerAuthCalls && MD->isVirtual() &&
+ !isUnevaluatedContext() && !MPTy->isDependentType()) {
+ // When pointer authentication is enabled, argument and return types of
+ // vitual member functions must be complete. This is because vitrual
+ // member function pointers are implemented using virtual dispatch
+ // thunks and the thunks cannot be emitted if the argument or return
+ // types are incomplete.
+ auto ReturnOrParamTypeIsIncomplete = [&](QualType T,
+ SourceLocation DeclRefLoc,
+ SourceLocation RetArgTypeLoc) {
+ if (RequireCompleteType(DeclRefLoc, T, diag::err_incomplete_type)) {
+ Diag(DeclRefLoc,
+ diag::note_ptrauth_virtual_function_pointer_incomplete_arg_ret);
+ Diag(RetArgTypeLoc,
+ diag::note_ptrauth_virtual_function_incomplete_arg_ret_type)
+ << T;
+ return true;
+ }
+ return false;
+ };
+ QualType RetTy = MD->getReturnType();
+ bool IsIncomplete =
+ !RetTy->isVoidType() &&
+ ReturnOrParamTypeIsIncomplete(
+ RetTy, OpLoc, MD->getReturnTypeSourceRange().getBegin());
+ for (auto *PVD : MD->parameters())
+ IsIncomplete |= ReturnOrParamTypeIsIncomplete(PVD->getType(), OpLoc,
+ PVD->getBeginLoc());
+ if (IsIncomplete)
+ return QualType();
+ }
+
// Under the MS ABI, lock down the inheritance model now.
if (Context.getTargetInfo().getCXXABI().isMicrosoft())
(void)isCompleteType(OpLoc, MPTy);
@@ -15169,7 +14164,14 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
// Okay: we can take the address of a field.
// Could be a pointer to member, though, if there is an explicit
// scope qualifier for the class.
- if (isa<DeclRefExpr>(op) && cast<DeclRefExpr>(op)->getQualifier()) {
+
+ // [C++26] [expr.prim.id.general]
+ // If an id-expression E denotes a non-static non-type member
+ // of some class C [...] and if E is a qualified-id, E is
+ // not the un-parenthesized operand of the unary & operator [...]
+ // the id-expression is transformed into a class member access expression.
+ if (isa<DeclRefExpr>(op) && cast<DeclRefExpr>(op)->getQualifier() &&
+ !isa<ParenExpr>(OrigOp.get())) {
DeclContext *Ctx = dcl->getDeclContext();
if (Ctx && Ctx->isRecord()) {
if (dcl->getType()->isReferenceType()) {
@@ -15253,9 +14255,6 @@ static void RecordModifiableNonNullParam(Sema &S, const Expr *Exp) {
static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
SourceLocation OpLoc,
bool IsAfterAmp = false) {
- if (Op->isTypeDependent())
- return S.Context.DependentTy;
-
ExprResult ConvResult = S.UsualUnaryConversions(Op);
if (ConvResult.isInvalid())
return QualType();
@@ -15587,9 +14586,6 @@ static bool needsConversionOfHalfVec(bool OpRequiresConversion, ASTContext &Ctx,
return HasVectorOfHalfType(E0) && (!E1 || HasVectorOfHalfType(E1));
}
-/// CreateBuiltinBinOp - Creates a new built-in binary operation with
-/// operator @p Opc at location @c TokLoc. This routine only supports
-/// built-in operations; ActOnBinOp handles overloaded operators.
ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr) {
@@ -15721,6 +14717,11 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
case BO_GT:
ConvertHalfVec = true;
ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc);
+
+ if (const auto *BI = dyn_cast<BinaryOperator>(LHSExpr);
+ BI && BI->isComparisonOp())
+ Diag(OpLoc, diag::warn_consecutive_comparison);
+
break;
case BO_EQ:
case BO_NE:
@@ -16057,7 +15058,6 @@ static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc,
DiagnoseShiftCompare(Self, OpLoc, LHSExpr, RHSExpr);
}
-// Binary Operators. 'Tok' is the token for the operator.
ExprResult Sema::ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind,
Expr *LHSExpr, Expr *RHSExpr) {
@@ -16131,7 +15131,7 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
LHSExpr = LHS.get();
RHSExpr = RHS.get();
- // We want to end up calling one of checkPseudoObjectAssignment
+ // We want to end up calling one of SemaPseudoObject::checkAssignment
// (if the LHS is a pseudo-object), BuildOverloadedBinOp (if
// both expressions are overloadable or either is type-dependent),
// or CreateBuiltinBinOp (in any other case). We also want to get
@@ -16142,7 +15142,7 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
// Assignments with a pseudo-object l-value need special analysis.
if (pty->getKind() == BuiltinType::PseudoObject &&
BinaryOperator::isAssignmentOp(Opc))
- return checkPseudoObjectAssignment(S, OpLoc, Opc, LHSExpr, RHSExpr);
+ return PseudoObject().checkAssignment(S, OpLoc, Opc, LHSExpr, RHSExpr);
// Don't resolve overloads if the other type is overloadable.
if (getLangOpts().CPlusPlus && pty->getKind() == BuiltinType::Overload) {
@@ -16211,14 +15211,10 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
}
if (getLangOpts().CPlusPlus) {
- // If either expression is type-dependent, always build an
- // overloaded op.
- if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent())
- return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
-
- // Otherwise, build an overloaded op if either expression has an
- // overloadable type.
- if (LHSExpr->getType()->isOverloadableType() ||
+ // Otherwise, build an overloaded op if either expression is type-dependent
+ // or has an overloadable type.
+ if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent() ||
+ LHSExpr->getType()->isOverloadableType() ||
RHSExpr->getType()->isOverloadableType())
return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
}
@@ -16309,188 +15305,191 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
return ExprError(Diag(OpLoc, diag::err_hlsl_operator_unsupported) << 1);
}
- switch (Opc) {
- case UO_PreInc:
- case UO_PreDec:
- case UO_PostInc:
- case UO_PostDec:
- resultType = CheckIncrementDecrementOperand(*this, Input.get(), VK, OK,
- OpLoc,
- Opc == UO_PreInc ||
- Opc == UO_PostInc,
- Opc == UO_PreInc ||
- Opc == UO_PreDec);
- CanOverflow = isOverflowingIntegerType(Context, resultType);
- break;
- case UO_AddrOf:
- resultType = CheckAddressOfOperand(Input, OpLoc);
- CheckAddressOfNoDeref(InputExpr);
- RecordModifiableNonNullParam(*this, InputExpr);
- break;
- case UO_Deref: {
- Input = DefaultFunctionArrayLvalueConversion(Input.get());
- if (Input.isInvalid()) return ExprError();
- resultType =
- CheckIndirectionOperand(*this, Input.get(), VK, OpLoc, IsAfterAmp);
- break;
- }
- case UO_Plus:
- case UO_Minus:
- CanOverflow = Opc == UO_Minus &&
- isOverflowingIntegerType(Context, Input.get()->getType());
- Input = UsualUnaryConversions(Input.get());
- if (Input.isInvalid()) return ExprError();
- // Unary plus and minus require promoting an operand of half vector to a
- // float vector and truncating the result back to a half vector. For now, we
- // do this only when HalfArgsAndReturns is set (that is, when the target is
- // arm or arm64).
- ConvertHalfVec = needsConversionOfHalfVec(true, Context, Input.get());
-
- // If the operand is a half vector, promote it to a float vector.
- if (ConvertHalfVec)
- Input = convertVector(Input.get(), Context.FloatTy, *this);
- resultType = Input.get()->getType();
- if (resultType->isDependentType())
- break;
- if (resultType->isArithmeticType()) // C99 6.5.3.3p1
- break;
- else if (resultType->isVectorType() &&
- // The z vector extensions don't allow + or - with bool vectors.
- (!Context.getLangOpts().ZVector ||
- resultType->castAs<VectorType>()->getVectorKind() !=
- VectorKind::AltiVecBool))
- break;
- else if (resultType->isSveVLSBuiltinType()) // SVE vectors allow + and -
- break;
- else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6
- Opc == UO_Plus &&
- resultType->isPointerType())
+ if (InputExpr->isTypeDependent() &&
+ InputExpr->getType()->isSpecificBuiltinType(BuiltinType::Dependent)) {
+ resultType = Context.DependentTy;
+ } else {
+ switch (Opc) {
+ case UO_PreInc:
+ case UO_PreDec:
+ case UO_PostInc:
+ case UO_PostDec:
+ resultType =
+ CheckIncrementDecrementOperand(*this, Input.get(), VK, OK, OpLoc,
+ Opc == UO_PreInc || Opc == UO_PostInc,
+ Opc == UO_PreInc || Opc == UO_PreDec);
+ CanOverflow = isOverflowingIntegerType(Context, resultType);
break;
-
- return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
- << resultType << Input.get()->getSourceRange());
-
- case UO_Not: // bitwise complement
- Input = UsualUnaryConversions(Input.get());
- if (Input.isInvalid())
- return ExprError();
- resultType = Input.get()->getType();
- if (resultType->isDependentType())
+ case UO_AddrOf:
+ resultType = CheckAddressOfOperand(Input, OpLoc);
+ CheckAddressOfNoDeref(InputExpr);
+ RecordModifiableNonNullParam(*this, InputExpr);
break;
- // C99 6.5.3.3p1. We allow complex int and float as a GCC extension.
- if (resultType->isComplexType() || resultType->isComplexIntegerType())
- // C99 does not support '~' for complex conjugation.
- Diag(OpLoc, diag::ext_integer_complement_complex)
- << resultType << Input.get()->getSourceRange();
- else if (resultType->hasIntegerRepresentation())
+ case UO_Deref: {
+ Input = DefaultFunctionArrayLvalueConversion(Input.get());
+ if (Input.isInvalid())
+ return ExprError();
+ resultType =
+ CheckIndirectionOperand(*this, Input.get(), VK, OpLoc, IsAfterAmp);
break;
- else if (resultType->isExtVectorType() && Context.getLangOpts().OpenCL) {
- // OpenCL v1.1 s6.3.f: The bitwise operator not (~) does not operate
- // on vector float types.
- QualType T = resultType->castAs<ExtVectorType>()->getElementType();
- if (!T->isIntegerType())
- return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
- << resultType << Input.get()->getSourceRange());
- } else {
- return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
- << resultType << Input.get()->getSourceRange());
- }
- break;
-
- case UO_LNot: // logical negation
- // Unlike +/-/~, integer promotions aren't done here (C99 6.5.3.3p5).
- Input = DefaultFunctionArrayLvalueConversion(Input.get());
- if (Input.isInvalid()) return ExprError();
- resultType = Input.get()->getType();
-
- // Though we still have to promote half FP to float...
- if (resultType->isHalfType() && !Context.getLangOpts().NativeHalfType) {
- Input = ImpCastExprToType(Input.get(), Context.FloatTy, CK_FloatingCast).get();
- resultType = Context.FloatTy;
}
+ case UO_Plus:
+ case UO_Minus:
+ CanOverflow = Opc == UO_Minus &&
+ isOverflowingIntegerType(Context, Input.get()->getType());
+ Input = UsualUnaryConversions(Input.get());
+ if (Input.isInvalid())
+ return ExprError();
+ // Unary plus and minus require promoting an operand of half vector to a
+ // float vector and truncating the result back to a half vector. For now,
+ // we do this only when HalfArgsAndReturns is set (that is, when the
+ // target is arm or arm64).
+ ConvertHalfVec = needsConversionOfHalfVec(true, Context, Input.get());
+
+ // If the operand is a half vector, promote it to a float vector.
+ if (ConvertHalfVec)
+ Input = convertVector(Input.get(), Context.FloatTy, *this);
+ resultType = Input.get()->getType();
+ if (resultType->isArithmeticType()) // C99 6.5.3.3p1
+ break;
+ else if (resultType->isVectorType() &&
+ // The z vector extensions don't allow + or - with bool vectors.
+ (!Context.getLangOpts().ZVector ||
+ resultType->castAs<VectorType>()->getVectorKind() !=
+ VectorKind::AltiVecBool))
+ break;
+ else if (resultType->isSveVLSBuiltinType()) // SVE vectors allow + and -
+ break;
+ else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6
+ Opc == UO_Plus && resultType->isPointerType())
+ break;
- // WebAsembly tables can't be used in unary expressions.
- if (resultType->isPointerType() &&
- resultType->getPointeeType().isWebAssemblyReferenceType()) {
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input.get()->getSourceRange());
- }
- if (resultType->isDependentType())
- break;
- if (resultType->isScalarType() && !isScopedEnumerationType(resultType)) {
- // C99 6.5.3.3p1: ok, fallthrough;
- if (Context.getLangOpts().CPlusPlus) {
- // C++03 [expr.unary.op]p8, C++0x [expr.unary.op]p9:
- // operand contextually converted to bool.
- Input = ImpCastExprToType(Input.get(), Context.BoolTy,
- ScalarTypeToBooleanCastKind(resultType));
- } else if (Context.getLangOpts().OpenCL &&
- Context.getLangOpts().OpenCLVersion < 120) {
- // OpenCL v1.1 6.3.h: The logical operator not (!) does not
- // operate on scalar float types.
- if (!resultType->isIntegerType() && !resultType->isPointerType())
- return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
- << resultType << Input.get()->getSourceRange());
- }
- } else if (resultType->isExtVectorType()) {
- if (Context.getLangOpts().OpenCL &&
- Context.getLangOpts().getOpenCLCompatibleVersion() < 120) {
- // OpenCL v1.1 6.3.h: The logical operator not (!) does not
- // operate on vector float types.
+ case UO_Not: // bitwise complement
+ Input = UsualUnaryConversions(Input.get());
+ if (Input.isInvalid())
+ return ExprError();
+ resultType = Input.get()->getType();
+ // C99 6.5.3.3p1. We allow complex int and float as a GCC extension.
+ if (resultType->isComplexType() || resultType->isComplexIntegerType())
+ // C99 does not support '~' for complex conjugation.
+ Diag(OpLoc, diag::ext_integer_complement_complex)
+ << resultType << Input.get()->getSourceRange();
+ else if (resultType->hasIntegerRepresentation())
+ break;
+ else if (resultType->isExtVectorType() && Context.getLangOpts().OpenCL) {
+ // OpenCL v1.1 s6.3.f: The bitwise operator not (~) does not operate
+ // on vector float types.
QualType T = resultType->castAs<ExtVectorType>()->getElementType();
if (!T->isIntegerType())
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input.get()->getSourceRange());
+ } else {
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
}
- // Vector logical not returns the signed variant of the operand type.
- resultType = GetSignedVectorType(resultType);
break;
- } else if (Context.getLangOpts().CPlusPlus && resultType->isVectorType()) {
- const VectorType *VTy = resultType->castAs<VectorType>();
- if (VTy->getVectorKind() != VectorKind::Generic)
+
+ case UO_LNot: // logical negation
+ // Unlike +/-/~, integer promotions aren't done here (C99 6.5.3.3p5).
+ Input = DefaultFunctionArrayLvalueConversion(Input.get());
+ if (Input.isInvalid())
+ return ExprError();
+ resultType = Input.get()->getType();
+
+ // Though we still have to promote half FP to float...
+ if (resultType->isHalfType() && !Context.getLangOpts().NativeHalfType) {
+ Input = ImpCastExprToType(Input.get(), Context.FloatTy, CK_FloatingCast)
+ .get();
+ resultType = Context.FloatTy;
+ }
+
+ // WebAsembly tables can't be used in unary expressions.
+ if (resultType->isPointerType() &&
+ resultType->getPointeeType().isWebAssemblyReferenceType()) {
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input.get()->getSourceRange());
+ }
- // Vector logical not returns the signed variant of the operand type.
- resultType = GetSignedVectorType(resultType);
- break;
- } else {
- return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
- << resultType << Input.get()->getSourceRange());
- }
+ if (resultType->isScalarType() && !isScopedEnumerationType(resultType)) {
+ // C99 6.5.3.3p1: ok, fallthrough;
+ if (Context.getLangOpts().CPlusPlus) {
+ // C++03 [expr.unary.op]p8, C++0x [expr.unary.op]p9:
+ // operand contextually converted to bool.
+ Input = ImpCastExprToType(Input.get(), Context.BoolTy,
+ ScalarTypeToBooleanCastKind(resultType));
+ } else if (Context.getLangOpts().OpenCL &&
+ Context.getLangOpts().OpenCLVersion < 120) {
+ // OpenCL v1.1 6.3.h: The logical operator not (!) does not
+ // operate on scalar float types.
+ if (!resultType->isIntegerType() && !resultType->isPointerType())
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+ } else if (resultType->isExtVectorType()) {
+ if (Context.getLangOpts().OpenCL &&
+ Context.getLangOpts().getOpenCLCompatibleVersion() < 120) {
+ // OpenCL v1.1 6.3.h: The logical operator not (!) does not
+ // operate on vector float types.
+ QualType T = resultType->castAs<ExtVectorType>()->getElementType();
+ if (!T->isIntegerType())
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+ // Vector logical not returns the signed variant of the operand type.
+ resultType = GetSignedVectorType(resultType);
+ break;
+ } else if (Context.getLangOpts().CPlusPlus &&
+ resultType->isVectorType()) {
+ const VectorType *VTy = resultType->castAs<VectorType>();
+ if (VTy->getVectorKind() != VectorKind::Generic)
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
- // LNot always has type int. C99 6.5.3.3p5.
- // In C++, it's bool. C++ 5.3.1p8
- resultType = Context.getLogicalOperationType();
- break;
- case UO_Real:
- case UO_Imag:
- resultType = CheckRealImagOperand(*this, Input, OpLoc, Opc == UO_Real);
- // _Real maps ordinary l-values into ordinary l-values. _Imag maps ordinary
- // complex l-values to ordinary l-values and all other values to r-values.
- if (Input.isInvalid()) return ExprError();
- if (Opc == UO_Real || Input.get()->getType()->isAnyComplexType()) {
- if (Input.get()->isGLValue() &&
- Input.get()->getObjectKind() == OK_Ordinary)
- VK = Input.get()->getValueKind();
- } else if (!getLangOpts().CPlusPlus) {
- // In C, a volatile scalar is read by __imag. In C++, it is not.
- Input = DefaultLvalueConversion(Input.get());
+ // Vector logical not returns the signed variant of the operand type.
+ resultType = GetSignedVectorType(resultType);
+ break;
+ } else {
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+
+ // LNot always has type int. C99 6.5.3.3p5.
+ // In C++, it's bool. C++ 5.3.1p8
+ resultType = Context.getLogicalOperationType();
+ break;
+ case UO_Real:
+ case UO_Imag:
+ resultType = CheckRealImagOperand(*this, Input, OpLoc, Opc == UO_Real);
+ // _Real maps ordinary l-values into ordinary l-values. _Imag maps
+ // ordinary complex l-values to ordinary l-values and all other values to
+ // r-values.
+ if (Input.isInvalid())
+ return ExprError();
+ if (Opc == UO_Real || Input.get()->getType()->isAnyComplexType()) {
+ if (Input.get()->isGLValue() &&
+ Input.get()->getObjectKind() == OK_Ordinary)
+ VK = Input.get()->getValueKind();
+ } else if (!getLangOpts().CPlusPlus) {
+ // In C, a volatile scalar is read by __imag. In C++, it is not.
+ Input = DefaultLvalueConversion(Input.get());
+ }
+ break;
+ case UO_Extension:
+ resultType = Input.get()->getType();
+ VK = Input.get()->getValueKind();
+ OK = Input.get()->getObjectKind();
+ break;
+ case UO_Coawait:
+ // It's unnecessary to represent the pass-through operator co_await in the
+ // AST; just return the input expression instead.
+ assert(!Input.get()->getType()->isDependentType() &&
+ "the co_await expression must be non-dependant before "
+ "building operator co_await");
+ return Input;
}
- break;
- case UO_Extension:
- resultType = Input.get()->getType();
- VK = Input.get()->getValueKind();
- OK = Input.get()->getObjectKind();
- break;
- case UO_Coawait:
- // It's unnecessary to represent the pass-through operator co_await in the
- // AST; just return the input expression instead.
- assert(!Input.get()->getType()->isDependentType() &&
- "the co_await expression must be non-dependant before "
- "building operator co_await");
- return Input;
}
if (resultType.isNull() || Input.isInvalid())
return ExprError();
@@ -16517,9 +15516,6 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
return UO;
}
-/// Determine whether the given expression is a qualified member
-/// access expression, of a form that could be turned into a pointer to member
-/// with the address-of operator.
bool Sema::isQualifiedMemberAccess(Expr *E) {
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
if (!DRE->getQualifier())
@@ -16566,7 +15562,7 @@ ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
// Increment and decrement of pseudo-object references.
if (pty->getKind() == BuiltinType::PseudoObject &&
UnaryOperator::isIncrementDecrementOp(Opc))
- return checkPseudoObjectIncDec(S, OpLoc, Opc, Input);
+ return PseudoObject().checkIncDec(S, OpLoc, Opc, Input);
// extension is always a builtin operator.
if (Opc == UO_Extension)
@@ -16601,14 +15597,12 @@ ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
return CreateBuiltinUnaryOp(OpLoc, Opc, Input, IsAfterAmp);
}
-// Unary Operators. 'Tok' is the token for the operator.
ExprResult Sema::ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op,
Expr *Input, bool IsAfterAmp) {
return BuildUnaryOp(S, OpLoc, ConvertTokenKindToUnaryOpcode(Op), Input,
IsAfterAmp);
}
-/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl) {
TheDecl->markUsed(Context);
@@ -16935,7 +15929,6 @@ ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
// Clang Extensions.
//===----------------------------------------------------------------------===//
-/// ActOnBlockStart - This callback is invoked when a block literal is started.
void Sema::ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope) {
BlockDecl *Block = BlockDecl::Create(Context, CurContext, CaretLoc);
@@ -17084,8 +16077,6 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
}
}
-/// ActOnBlockError - If there is an error parsing a block, this callback
-/// is invoked to pop the information about the block from the action impl.
void Sema::ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope) {
// Leave the expression-evaluation context.
DiscardCleanupsInEvaluationContext();
@@ -17096,8 +16087,6 @@ void Sema::ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope) {
PopFunctionScopeInfo();
}
-/// ActOnBlockStmtExpr - This is called when the body of a block statement
-/// literal was successfully completed. ^(int x){...}
ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
Stmt *Body, Scope *CurScope) {
// If blocks are disabled, emit an error.
@@ -17303,8 +16292,9 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
// CUDA device code does not support varargs.
if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
if (const FunctionDecl *F = dyn_cast<FunctionDecl>(CurContext)) {
- CUDAFunctionTarget T = IdentifyCUDATarget(F);
- if (T == CFT_Global || T == CFT_Device || T == CFT_HostDevice)
+ CUDAFunctionTarget T = CUDA().IdentifyTarget(F);
+ if (T == CUDAFunctionTarget::Global || T == CUDAFunctionTarget::Device ||
+ T == CUDAFunctionTarget::HostDevice)
return ExprError(Diag(E->getBeginLoc(), diag::err_va_arg_in_device));
}
}
@@ -17575,59 +16565,13 @@ ExprResult Sema::BuildSourceLocExpr(SourceLocIdentKind Kind, QualType ResultTy,
SourceLocExpr(Context, Kind, ResultTy, BuiltinLoc, RPLoc, ParentContext);
}
-bool Sema::CheckConversionToObjCLiteral(QualType DstType, Expr *&Exp,
- bool Diagnose) {
- if (!getLangOpts().ObjC)
- return false;
-
- const ObjCObjectPointerType *PT = DstType->getAs<ObjCObjectPointerType>();
- if (!PT)
- return false;
- const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
-
- // Ignore any parens, implicit casts (should only be
- // array-to-pointer decays), and not-so-opaque values. The last is
- // important for making this trigger for property assignments.
- Expr *SrcExpr = Exp->IgnoreParenImpCasts();
- if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(SrcExpr))
- if (OV->getSourceExpr())
- SrcExpr = OV->getSourceExpr()->IgnoreParenImpCasts();
-
- if (auto *SL = dyn_cast<StringLiteral>(SrcExpr)) {
- if (!PT->isObjCIdType() &&
- !(ID && ID->getIdentifier()->isStr("NSString")))
- return false;
- if (!SL->isOrdinary())
- return false;
-
- if (Diagnose) {
- Diag(SL->getBeginLoc(), diag::err_missing_atsign_prefix)
- << /*string*/0 << FixItHint::CreateInsertion(SL->getBeginLoc(), "@");
- Exp = BuildObjCStringLiteral(SL->getBeginLoc(), SL).get();
- }
- return true;
- }
-
- if ((isa<IntegerLiteral>(SrcExpr) || isa<CharacterLiteral>(SrcExpr) ||
- isa<FloatingLiteral>(SrcExpr) || isa<ObjCBoolLiteralExpr>(SrcExpr) ||
- isa<CXXBoolLiteralExpr>(SrcExpr)) &&
- !SrcExpr->isNullPointerConstant(
- getASTContext(), Expr::NPC_NeverValueDependent)) {
- if (!ID || !ID->getIdentifier()->isStr("NSNumber"))
- return false;
- if (Diagnose) {
- Diag(SrcExpr->getBeginLoc(), diag::err_missing_atsign_prefix)
- << /*number*/1
- << FixItHint::CreateInsertion(SrcExpr->getBeginLoc(), "@");
- Expr *NumLit =
- BuildObjCNumericLiteral(SrcExpr->getBeginLoc(), SrcExpr).get();
- if (NumLit)
- Exp = NumLit;
- }
- return true;
- }
-
- return false;
+ExprResult Sema::ActOnEmbedExpr(SourceLocation EmbedKeywordLoc,
+ StringLiteral *BinaryData) {
+ EmbedDataStorage *Data = new (Context) EmbedDataStorage;
+ Data->BinaryData = BinaryData;
+ return new (Context)
+ EmbedExpr(Context, EmbedKeywordLoc, Data, /*NumOfElements=*/0,
+ Data->getDataElementCount());
}
static bool maybeDiagnoseAssignmentToFunction(Sema &S, QualType DstType,
@@ -17719,11 +16663,11 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
}
CheckInferredResultType = DstType->isObjCObjectPointerType() &&
SrcType->isObjCObjectPointerType();
- if (!CheckInferredResultType) {
- ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
- } else if (CheckInferredResultType) {
+ if (CheckInferredResultType) {
SrcType = SrcType.getUnqualifiedType();
DstType = DstType.getUnqualifiedType();
+ } else {
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
}
MayHaveConvFixit = true;
break;
@@ -17754,7 +16698,6 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
if (lhq.getAddressSpace() != rhq.getAddressSpace()) {
DiagKind = diag::err_typecheck_incompatible_address_space;
break;
-
} else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) {
DiagKind = diag::err_typecheck_incompatible_ownership;
break;
@@ -17924,10 +16867,10 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
FirstType, /*TakingAddress=*/true);
if (CheckInferredResultType)
- EmitRelatedResultTypeNote(SrcExpr);
+ ObjC().EmitRelatedResultTypeNote(SrcExpr);
if (Action == AA_Returning && ConvTy == IncompatiblePointer)
- EmitRelatedResultTypeNoteForReturn(DstType);
+ ObjC().EmitRelatedResultTypeNoteForReturn(DstType);
if (Complained)
*Complained = true;
@@ -18046,6 +16989,12 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
if (Converted.isInvalid())
return Converted;
E = Converted.get();
+ // The 'explicit' case causes us to get a RecoveryExpr. Give up here so we
+ // don't try to evaluate it later. We also don't want to return the
+ // RecoveryExpr here, as it results in this call succeeding, thus callers of
+ // this function will attempt to use 'Value'.
+ if (isa<RecoveryExpr>(E))
+ return ExprError();
if (!E->getType()->isIntegralOrUnscopedEnumerationType())
return ExprError();
} else if (!E->getType()->isIntegralOrUnscopedEnumerationType()) {
@@ -18065,11 +17014,38 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
// Circumvent ICE checking in C++11 to avoid evaluating the expression twice
// in the non-ICE case.
if (!getLangOpts().CPlusPlus11 && E->isIntegerConstantExpr(Context)) {
+ SmallVector<PartialDiagnosticAt, 8> Notes;
if (Result)
- *Result = E->EvaluateKnownConstIntCheckOverflow(Context);
+ *Result = E->EvaluateKnownConstIntCheckOverflow(Context, &Notes);
if (!isa<ConstantExpr>(E))
E = Result ? ConstantExpr::Create(Context, E, APValue(*Result))
: ConstantExpr::Create(Context, E);
+
+ if (Notes.empty())
+ return E;
+
+ // If our only note is the usual "invalid subexpression" note, just point
+ // the caret at its location rather than producing an essentially
+ // redundant note.
+ if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
+ diag::note_invalid_subexpr_in_const_expr) {
+ DiagLoc = Notes[0].first;
+ Notes.clear();
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ if (!Diagnoser.Suppress) {
+ Diagnoser.diagnoseNotICE(*this, DiagLoc) << E->getSourceRange();
+ for (const PartialDiagnosticAt &Note : Notes)
+ Diag(Note.first, Note.second);
+ }
+ return ExprError();
+ }
+
+ Diagnoser.diagnoseFold(*this, DiagLoc) << E->getSourceRange();
+ for (const PartialDiagnosticAt &Note : Notes)
+ Diag(Note.first, Note.second);
+
return E;
}
@@ -18081,7 +17057,8 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
// not a constant expression as a side-effect.
bool Folded =
E->EvaluateAsRValue(EvalResult, Context, /*isConstantContext*/ true) &&
- EvalResult.Val.isInt() && !EvalResult.HasSideEffects;
+ EvalResult.Val.isInt() && !EvalResult.HasSideEffects &&
+ (!getLangOpts().CPlusPlus || !EvalResult.HasUndefinedBehavior);
if (!isa<ConstantExpr>(E))
E = ConstantExpr::Create(Context, E, EvalResult.Val);
@@ -18184,8 +17161,7 @@ ExprResult Sema::TransformToPotentiallyEvaluated(Expr *E) {
TypeSourceInfo *Sema::TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo) {
assert(isUnevaluatedContext() &&
"Should only transform unevaluated expressions");
- ExprEvalContexts.back().Context =
- ExprEvalContexts[ExprEvalContexts.size() - 2].Context;
+ ExprEvalContexts.back().Context = parentEvaluationContext().Context;
if (isUnevaluatedContext())
return TInfo;
return TransformToPE(*this).TransformType(TInfo);
@@ -18202,14 +17178,13 @@ Sema::PushExpressionEvaluationContext(
// discarded statements or immediate context are themselves
// a discarded statement or an immediate context, respectively.
ExprEvalContexts.back().InDiscardedStatement =
- ExprEvalContexts[ExprEvalContexts.size() - 2]
- .isDiscardedStatementContext();
+ parentEvaluationContext().isDiscardedStatementContext();
// C++23 [expr.const]/p15
// An expression or conversion is in an immediate function context if [...]
// it is a subexpression of a manifestly constant-evaluated expression or
// conversion.
- const auto &Prev = ExprEvalContexts[ExprEvalContexts.size() - 2];
+ const auto &Prev = parentEvaluationContext();
ExprEvalContexts.back().InImmediateFunctionContext =
Prev.isImmediateFunctionContext() || Prev.isConstantEvaluated();
@@ -18274,10 +17249,6 @@ void Sema::WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec) {
Rec.PossibleDerefs.clear();
}
-/// Check whether E, which is either a discarded-value expression or an
-/// unevaluated operand, is a simple-assignment to a volatlie-qualified lvalue,
-/// and if so, remove it from the list of volatile-qualified assignments that
-/// we are going to warn are deprecated.
void Sema::CheckUnusedVolatileAssignment(Expr *E) {
if (!E->getType().isVolatileQualified() || !getLangOpts().CPlusPlus20)
return;
@@ -18533,7 +17504,7 @@ HandleImmediateInvocations(Sema &SemaRef,
Sema::ExpressionEvaluationContextRecord &Rec) {
if ((Rec.ImmediateInvocationCandidates.size() == 0 &&
Rec.ReferenceToConsteval.size() == 0) ||
- SemaRef.RebuildingImmediateInvocation)
+ Rec.isImmediateFunctionContext() || SemaRef.RebuildingImmediateInvocation)
return;
/// When we have more than 1 ImmediateInvocationCandidates or previously
@@ -18652,6 +17623,16 @@ void Sema::PopExpressionEvaluationContext() {
}
}
+ // Append the collected materialized temporaries into previous context before
+ // exit if the previous also is a lifetime extending context.
+ auto &PrevRecord = parentEvaluationContext();
+ if (getLangOpts().CPlusPlus23 && Rec.InLifetimeExtendingContext &&
+ PrevRecord.InLifetimeExtendingContext &&
+ !Rec.ForRangeLifetimeExtendTemps.empty()) {
+ PrevRecord.ForRangeLifetimeExtendTemps.append(
+ Rec.ForRangeLifetimeExtendTemps);
+ }
+
WarnOnPendingNoDerefs(Rec);
HandleImmediateInvocations(*this, Rec);
@@ -18867,8 +17848,6 @@ static bool isImplicitlyDefinableConstexprFunction(FunctionDecl *Func) {
return CCD && CCD->getInheritedConstructor();
}
-/// Mark a function referenced, and check whether it is odr-used
-/// (C++ [basic.def.odr]p2, C99 6.9p3)
void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse) {
assert(Func && "No function?");
@@ -18931,8 +17910,10 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
// Note that we skip the implicit instantiation of templates that are only
// used in unused default arguments or by recursive calls to themselves.
// This is formally non-conforming, but seems reasonable in practice.
- bool NeedDefinition = !IsRecursiveCall && (OdrUse == OdrUseContext::Used ||
- NeededForConstantEvaluation);
+ bool NeedDefinition =
+ !IsRecursiveCall &&
+ (OdrUse == OdrUseContext::Used ||
+ (NeededForConstantEvaluation && !Func->isPureVirtual()));
// C++14 [temp.expl.spec]p6:
// If a template [...] is explicitly specialized then that specialization
@@ -18945,7 +17926,7 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
checkSpecializationReachability(Loc, Func);
if (getLangOpts().CUDA)
- CheckCUDACall(Loc, Func);
+ CUDA().CheckCall(Loc, Func);
// If we need a definition, try to create one.
if (NeedDefinition && !Func->getBody()) {
@@ -19092,7 +18073,7 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
// side. Therefore keep trying until it is recorded.
if (LangOpts.OffloadImplicitHostDeviceTemplates && LangOpts.CUDAIsDevice &&
!getASTContext().CUDAImplicitHostDeviceFunUsedByDevice.count(Func))
- CUDARecordImplicitHostDeviceFuncUsedByDevice(Func);
+ CUDA().RecordImplicitHostDeviceFuncUsedByDevice(Func);
// If this is the first "real" use, act on that.
if (OdrUse == OdrUseContext::Used && !Func->isUsed(/*CheckUsedAttr=*/false)) {
@@ -19157,7 +18138,7 @@ MarkVarDeclODRUsed(ValueDecl *V, SourceLocation Loc, Sema &SemaRef,
}
QualType CaptureType, DeclRefType;
if (SemaRef.LangOpts.OpenMP)
- SemaRef.tryCaptureOpenMPLambdas(V);
+ SemaRef.OpenMP().tryCaptureOpenMPLambdas(V);
SemaRef.tryCaptureVariable(V, Loc, Sema::TryCapture_Implicit,
/*EllipsisLoc*/ SourceLocation(),
/*BuildAndDiagnose*/ true, CaptureType,
@@ -19165,26 +18146,28 @@ MarkVarDeclODRUsed(ValueDecl *V, SourceLocation Loc, Sema &SemaRef,
if (SemaRef.LangOpts.CUDA && Var->hasGlobalStorage()) {
auto *FD = dyn_cast_or_null<FunctionDecl>(SemaRef.CurContext);
- auto VarTarget = SemaRef.IdentifyCUDATarget(Var);
- auto UserTarget = SemaRef.IdentifyCUDATarget(FD);
- if (VarTarget == Sema::CVT_Host &&
- (UserTarget == Sema::CFT_Device || UserTarget == Sema::CFT_HostDevice ||
- UserTarget == Sema::CFT_Global)) {
+ auto VarTarget = SemaRef.CUDA().IdentifyTarget(Var);
+ auto UserTarget = SemaRef.CUDA().IdentifyTarget(FD);
+ if (VarTarget == SemaCUDA::CVT_Host &&
+ (UserTarget == CUDAFunctionTarget::Device ||
+ UserTarget == CUDAFunctionTarget::HostDevice ||
+ UserTarget == CUDAFunctionTarget::Global)) {
// Diagnose ODR-use of host global variables in device functions.
// Reference of device global variables in host functions is allowed
// through shadow variables therefore it is not diagnosed.
if (SemaRef.LangOpts.CUDAIsDevice && !SemaRef.LangOpts.HIPStdPar) {
SemaRef.targetDiag(Loc, diag::err_ref_bad_target)
- << /*host*/ 2 << /*variable*/ 1 << Var << UserTarget;
+ << /*host*/ 2 << /*variable*/ 1 << Var
+ << llvm::to_underlying(UserTarget);
SemaRef.targetDiag(Var->getLocation(),
Var->getType().isConstQualified()
? diag::note_cuda_const_var_unpromoted
: diag::note_cuda_host_var);
}
- } else if (VarTarget == Sema::CVT_Device &&
+ } else if (VarTarget == SemaCUDA::CVT_Device &&
!Var->hasAttr<CUDASharedAttr>() &&
- (UserTarget == Sema::CFT_Host ||
- UserTarget == Sema::CFT_HostDevice)) {
+ (UserTarget == CUDAFunctionTarget::Host ||
+ UserTarget == CUDAFunctionTarget::HostDevice)) {
// Record a CUDA/HIP device side variable if it is ODR-used
// by host code. This is done conservatively, when the variable is
// referenced in any of the following contexts:
@@ -19197,7 +18180,10 @@ MarkVarDeclODRUsed(ValueDecl *V, SourceLocation Loc, Sema &SemaRef,
// externalize the static device side variable ODR-used by host code.
if (!Var->hasExternalStorage())
SemaRef.getASTContext().CUDADeviceVarODRUsedByHost.insert(Var);
- else if (SemaRef.LangOpts.GPURelocatableDeviceCode)
+ else if (SemaRef.LangOpts.GPURelocatableDeviceCode &&
+ (!FD || (!FD->getDescribedFunctionTemplate() &&
+ SemaRef.getASTContext().GetGVALinkageForFunction(FD) ==
+ GVA_StrongExternal)))
SemaRef.getASTContext().CUDAExternalDeviceDeclODRUsedByHost.insert(Var);
}
}
@@ -19433,7 +18419,7 @@ static bool captureInBlock(BlockScopeInfo *BSI, ValueDecl *Var,
const bool HasBlocksAttr = Var->hasAttr<BlocksAttr>();
if (HasBlocksAttr || CaptureType->isReferenceType() ||
- (S.getLangOpts().OpenMP && S.isOpenMPCapturedDecl(Var))) {
+ (S.getLangOpts().OpenMP && S.OpenMP().isOpenMPCapturedDecl(Var))) {
// Block capture by reference does not change the capture or
// declaration reference types.
ByRef = true;
@@ -19463,7 +18449,7 @@ static bool captureInCapturedRegion(
ByRef = (Kind == Sema::TryCapture_ExplicitByRef);
} else if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP) {
// Using an LValue reference type is consistent with Lambdas (see below).
- if (S.isOpenMPCapturedDecl(Var)) {
+ if (S.OpenMP().isOpenMPCapturedDecl(Var)) {
bool HasConst = DeclRefType.isConstQualified();
DeclRefType = DeclRefType.getUnqualifiedType();
// Don't lose diagnostics about assignments to const.
@@ -19471,11 +18457,11 @@ static bool captureInCapturedRegion(
DeclRefType.addConst();
}
// Do not capture firstprivates in tasks.
- if (S.isOpenMPPrivateDecl(Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel) !=
- OMPC_unknown)
+ if (S.OpenMP().isOpenMPPrivateDecl(Var, RSI->OpenMPLevel,
+ RSI->OpenMPCaptureLevel) != OMPC_unknown)
return true;
- ByRef = S.isOpenMPCapturedByRef(Var, RSI->OpenMPLevel,
- RSI->OpenMPCaptureLevel);
+ ByRef = S.OpenMP().isOpenMPCapturedByRef(Var, RSI->OpenMPLevel,
+ RSI->OpenMPCaptureLevel);
}
if (ByRef)
@@ -19507,16 +18493,6 @@ static bool captureInLambda(LambdaScopeInfo *LSI, ValueDecl *Var,
ByRef = (LSI->ImpCaptureStyle == LambdaScopeInfo::ImpCap_LambdaByref);
}
- BindingDecl *BD = dyn_cast<BindingDecl>(Var);
- // FIXME: We should support capturing structured bindings in OpenMP.
- if (!Invalid && BD && S.LangOpts.OpenMP) {
- if (BuildAndDiagnose) {
- S.Diag(Loc, diag::err_capture_binding_openmp) << Var;
- S.Diag(Var->getLocation(), diag::note_entity_declared_at) << Var;
- }
- Invalid = true;
- }
-
if (BuildAndDiagnose && S.Context.getTargetInfo().getTriple().isWasm() &&
CaptureType.getNonReferenceType().isWebAssemblyReferenceType()) {
S.Diag(Loc, diag::err_wasm_ca_reference) << 0;
@@ -19715,6 +18691,10 @@ bool Sema::tryCaptureVariable(
DeclContext *VarDC = Var->getDeclContext();
DeclContext *DC = CurContext;
+ // Skip past RequiresExprBodys because they don't constitute function scopes.
+ while (DC->isRequiresExprBody())
+ DC = DC->getParent();
+
// tryCaptureVariable is called every time a DeclRef is formed,
// it can therefore have non-negigible impact on performances.
// For local variables and when there is no capturing scope,
@@ -19722,6 +18702,12 @@ bool Sema::tryCaptureVariable(
if (CapturingFunctionScopes == 0 && (!BuildAndDiagnose || VarDC == DC))
return true;
+ // Exception: Function parameters are not tied to the function's DeclContext
+ // until we enter the function definition. Capturing them anyway would result
+ // in an out-of-bounds error while traversing DC and its parents.
+ if (isa<ParmVarDecl>(Var) && !VarDC->isFunctionOrMethod())
+ return true;
+
const auto *VD = dyn_cast<VarDecl>(Var);
if (VD) {
if (VD->isInitCapture())
@@ -19746,9 +18732,9 @@ bool Sema::tryCaptureVariable(
// Capture global variables if it is required to use private copy of this
// variable.
bool IsGlobal = !VD->hasLocalStorage();
- if (IsGlobal &&
- !(LangOpts.OpenMP && isOpenMPCapturedDecl(Var, /*CheckScopeInfo=*/true,
- MaxFunctionScopesIndex)))
+ if (IsGlobal && !(LangOpts.OpenMP &&
+ OpenMP().isOpenMPCapturedDecl(Var, /*CheckScopeInfo=*/true,
+ MaxFunctionScopesIndex)))
return true;
if (isa<VarDecl>(Var))
@@ -19858,7 +18844,15 @@ bool Sema::tryCaptureVariable(
// just break here. Similarly, global variables that are captured in a
// target region should not be captured outside the scope of the region.
if (RSI->CapRegionKind == CR_OpenMP) {
- OpenMPClauseKind IsOpenMPPrivateDecl = isOpenMPPrivateDecl(
+ // FIXME: We should support capturing structured bindings in OpenMP.
+ if (isa<BindingDecl>(Var)) {
+ if (BuildAndDiagnose) {
+ Diag(ExprLoc, diag::err_capture_binding_openmp) << Var;
+ Diag(Var->getLocation(), diag::note_entity_declared_at) << Var;
+ }
+ return true;
+ }
+ OpenMPClauseKind IsOpenMPPrivateDecl = OpenMP().isOpenMPPrivateDecl(
Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel);
// If the variable is private (i.e. not captured) and has variably
// modified type, we still need to capture the type for correct
@@ -19869,7 +18863,8 @@ bool Sema::tryCaptureVariable(
QualType QTy = Var->getType();
if (ParmVarDecl *PVD = dyn_cast_or_null<ParmVarDecl>(Var))
QTy = PVD->getOriginalType();
- for (int I = 1, E = getNumberOfConstructScopes(RSI->OpenMPLevel);
+ for (int I = 1,
+ E = OpenMP().getNumberOfConstructScopes(RSI->OpenMPLevel);
I < E; ++I) {
auto *OuterRSI = cast<CapturedRegionScopeInfo>(
FunctionScopes[FunctionScopesIndex - I]);
@@ -19881,18 +18876,19 @@ bool Sema::tryCaptureVariable(
}
bool IsTargetCap =
IsOpenMPPrivateDecl != OMPC_private &&
- isOpenMPTargetCapturedDecl(Var, RSI->OpenMPLevel,
- RSI->OpenMPCaptureLevel);
+ OpenMP().isOpenMPTargetCapturedDecl(Var, RSI->OpenMPLevel,
+ RSI->OpenMPCaptureLevel);
// Do not capture global if it is not privatized in outer regions.
bool IsGlobalCap =
- IsGlobal && isOpenMPGlobalCapturedDecl(Var, RSI->OpenMPLevel,
- RSI->OpenMPCaptureLevel);
+ IsGlobal && OpenMP().isOpenMPGlobalCapturedDecl(
+ Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel);
// When we detect target captures we are looking from inside the
// target region, therefore we need to propagate the capture from the
// enclosing region. Therefore, the capture is not initially nested.
if (IsTargetCap)
- adjustOpenMPTargetScopeIndex(FunctionScopesIndex, RSI->OpenMPLevel);
+ OpenMP().adjustOpenMPTargetScopeIndex(FunctionScopesIndex,
+ RSI->OpenMPLevel);
if (IsTargetCap || IsOpenMPPrivateDecl == OMPC_private ||
(IsGlobal && !IsGlobalCap)) {
@@ -20212,18 +19208,17 @@ static ExprResult rebuildPotentialResultsAsNonOdrUsed(Sema &S, Expr *E,
ExprResult Sub = Rebuild(LHS);
if (!Sub.isUsable())
return Sub;
- LHS = Sub.get();
+ BO->setLHS(Sub.get());
// -- If e is a comma expression, ...
} else if (BO->getOpcode() == BO_Comma) {
ExprResult Sub = Rebuild(RHS);
if (!Sub.isUsable())
return Sub;
- RHS = Sub.get();
+ BO->setRHS(Sub.get());
} else {
break;
}
- return S.BuildBinOp(nullptr, BO->getOperatorLoc(), BO->getOpcode(),
- LHS, RHS);
+ return ExprResult(BO);
}
// -- If e has the form (e1)...
@@ -20653,9 +19648,6 @@ static void DoMarkBindingDeclReferenced(Sema &SemaRef, SourceLocation Loc,
}
}
-/// Mark a variable referenced, and check whether it is odr-used
-/// (C++ [basic.def.odr]p2, C99 6.9p3). Note that this should not be
-/// used directly for normal expressions referring to VarDecl.
void Sema::MarkVariableReferenced(SourceLocation Loc, VarDecl *Var) {
DoMarkVarDeclReferenced(*this, Loc, Var, nullptr, RefsMinusAssignments);
}
@@ -20668,20 +19660,42 @@ void Sema::MarkVariableReferenced(SourceLocation Loc, VarDecl *Var) {
static void FixDependencyOfIdExpressionsInLambdaWithDependentObjectParameter(
Sema &SemaRef, ValueDecl *D, Expr *E) {
auto *ID = dyn_cast<DeclRefExpr>(E);
- if (!ID || ID->isTypeDependent())
+ if (!ID || ID->isTypeDependent() || !ID->refersToEnclosingVariableOrCapture())
return;
+ // If any enclosing lambda with a dependent explicit object parameter either
+ // explicitly captures the variable by value, or has a capture default of '='
+ // and does not capture the variable by reference, then the type of the DRE
+ // is dependent on the type of that lambda's explicit object parameter.
auto IsDependent = [&]() {
- const LambdaScopeInfo *LSI = SemaRef.getCurLambda();
- if (!LSI)
- return false;
- if (!LSI->ExplicitObjectParameter ||
- !LSI->ExplicitObjectParameter->getType()->isDependentType())
- return false;
- if (!LSI->CaptureMap.count(D))
- return false;
- const Capture &Cap = LSI->getCapture(D);
- return !Cap.isCopyCapture();
+ for (auto *Scope : llvm::reverse(SemaRef.FunctionScopes)) {
+ auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope);
+ if (!LSI)
+ continue;
+
+ if (LSI->Lambda && !LSI->Lambda->Encloses(SemaRef.CurContext) &&
+ LSI->AfterParameterList)
+ return false;
+
+ const auto *MD = LSI->CallOperator;
+ if (MD->getType().isNull())
+ continue;
+
+ const auto *Ty = MD->getType()->getAs<FunctionProtoType>();
+ if (!Ty || !MD->isExplicitObjectMemberFunction() ||
+ !Ty->getParamType(0)->isDependentType())
+ continue;
+
+ if (auto *C = LSI->CaptureMap.count(D) ? &LSI->getCapture(D) : nullptr) {
+ if (C->isCopyCapture())
+ return true;
+ continue;
+ }
+
+ if (LSI->ImpCaptureStyle == LambdaScopeInfo::ImpCap_LambdaByval)
+ return true;
+ }
+ return false;
}();
ID->setCapturedByCopyInLambdaWithExplicitObjectParameter(
@@ -20692,8 +19706,8 @@ static void
MarkExprReferenced(Sema &SemaRef, SourceLocation Loc, Decl *D, Expr *E,
bool MightBeOdrUse,
llvm::DenseMap<const VarDecl *, int> &RefsMinusAssignments) {
- if (SemaRef.isInOpenMPDeclareTargetContext())
- SemaRef.checkDeclIsAllowedInOpenMPTarget(E, D);
+ if (SemaRef.OpenMP().isInOpenMPDeclareTargetContext())
+ SemaRef.OpenMP().checkDeclIsAllowedInOpenMPTarget(E, D);
if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
DoMarkVarDeclReferenced(SemaRef, Loc, Var, E, RefsMinusAssignments);
@@ -20734,10 +19748,6 @@ MarkExprReferenced(Sema &SemaRef, SourceLocation Loc, Decl *D, Expr *E,
SemaRef.MarkAnyDeclReferenced(Loc, DM, MightBeOdrUse);
}
-/// Perform reference-marking and odr-use handling for a DeclRefExpr.
-///
-/// Note, this may change the dependence of the DeclRefExpr, and so needs to be
-/// handled with care if the DeclRefExpr is not newly-created.
void Sema::MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base) {
// TODO: update this with DR# once a defect report is filed.
// C++11 defect. The address of a pure member should not be an ODR use, even
@@ -20760,7 +19770,6 @@ void Sema::MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base) {
RefsMinusAssignments);
}
-/// Perform reference-marking and odr-use handling for a MemberExpr.
void Sema::MarkMemberReferenced(MemberExpr *E) {
// C++11 [basic.def.odr]p2:
// A non-overloaded function whose name appears as a potentially-evaluated
@@ -20780,7 +19789,6 @@ void Sema::MarkMemberReferenced(MemberExpr *E) {
RefsMinusAssignments);
}
-/// Perform reference-marking and odr-use handling for a FunctionParmPackExpr.
void Sema::MarkFunctionParmPackReferenced(FunctionParmPackExpr *E) {
for (VarDecl *VD : *E)
MarkExprReferenced(*this, E->getParameterPackLocation(), VD, E, true,
@@ -20898,12 +19906,6 @@ public:
};
} // namespace
-/// Mark any declarations that appear within this expression or any
-/// potentially-evaluated subexpressions as "referenced".
-///
-/// \param SkipLocalVariables If true, don't mark local variables as
-/// 'referenced'.
-/// \param StopAt Subexpressions that we shouldn't recurse into.
void Sema::MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables,
ArrayRef<const Expr*> StopAt) {
@@ -21052,7 +20054,7 @@ void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
Selector Sel = ME->getSelector();
// self = [<foo> init...]
- if (isSelfExpr(Op->getLHS()) && ME->getMethodFamily() == OMF_init)
+ if (ObjC().isSelfExpr(Op->getLHS()) && ME->getMethodFamily() == OMF_init)
diagnostic = diag::warn_condition_is_idiomatic_assignment;
// <foo> = [<bar> nextObject]
@@ -21090,8 +20092,6 @@ void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
<< FixItHint::CreateReplacement(Loc, "==");
}
-/// Redundant parentheses over an equality comparison can indicate
-/// that the user intended an assignment used as condition.
void Sema::DiagnoseEqualityWithExtraParens(ParenExpr *ParenE) {
// Don't warn if the parens came from a macro.
SourceLocation parenLoc = ParenE->getBeginLoc();
@@ -21613,8 +20613,6 @@ ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
return E;
}
-/// Check a cast of an unknown-any type. We intentionally only
-/// trigger this for C-style casts.
ExprResult Sema::checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path) {
@@ -21705,8 +20703,6 @@ static ExprResult diagnoseUnknownAnyExpr(Sema &S, Expr *E) {
return ExprError();
}
-/// Check for operands with placeholder types and complain if found.
-/// Returns ExprError() if there was an error and no recovery was possible.
ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
if (!Context.isDependenceAllowed()) {
// C cannot handle TypoExpr nodes on either side of a binop because it
@@ -21721,6 +20717,27 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
if (!placeholderType) return E;
switch (placeholderType->getKind()) {
+ case BuiltinType::UnresolvedTemplate: {
+ auto *ULE = cast<UnresolvedLookupExpr>(E);
+ const DeclarationNameInfo &NameInfo = ULE->getNameInfo();
+ // There's only one FoundDecl for UnresolvedTemplate type. See
+ // BuildTemplateIdExpr.
+ NamedDecl *Temp = *ULE->decls_begin();
+ const bool IsTypeAliasTemplateDecl = isa<TypeAliasTemplateDecl>(Temp);
+
+ if (NestedNameSpecifierLoc Loc = ULE->getQualifierLoc(); Loc.hasQualifier())
+ Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_type_template)
+ << Loc.getNestedNameSpecifier() << NameInfo.getName().getAsString()
+ << Loc.getSourceRange() << IsTypeAliasTemplateDecl;
+ else
+ Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_type_template)
+ << "" << NameInfo.getName().getAsString() << ULE->getSourceRange()
+ << IsTypeAliasTemplateDecl;
+ Diag(Temp->getLocation(), diag::note_referenced_type_template)
+ << IsTypeAliasTemplateDecl;
+
+ return CreateRecoveryExpr(NameInfo.getBeginLoc(), NameInfo.getEndLoc(), {});
+ }
// Overloaded expressions.
case BuiltinType::Overload: {
@@ -21762,8 +20779,8 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
// ARC unbridged casts.
case BuiltinType::ARCUnbridgedCast: {
- Expr *realCast = stripARCUnbridgedCast(E);
- diagnoseARCUnbridgedCast(realCast);
+ Expr *realCast = ObjC().stripARCUnbridgedCast(E);
+ ObjC().diagnoseARCUnbridgedCast(realCast);
return realCast;
}
@@ -21773,7 +20790,7 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
// Pseudo-objects.
case BuiltinType::PseudoObject:
- return checkPseudoObjectRValue(E);
+ return PseudoObject().checkRValue(E);
case BuiltinType::BuiltinFn: {
// Accept __noop without parens by implicitly converting it to a call expr.
@@ -21833,8 +20850,9 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
return ExprError();
// Expressions of unknown type.
- case BuiltinType::OMPArraySection:
- Diag(E->getBeginLoc(), diag::err_omp_array_section_use);
+ case BuiltinType::ArraySection:
+ Diag(E->getBeginLoc(), diag::err_array_section_use)
+ << cast<ArraySectionExpr>(E)->isOMPArraySection();
return ExprError();
// Expressions of unknown type.
@@ -21861,6 +20879,8 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
#include "clang/Basic/RISCVVTypes.def"
#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
#define BUILTIN_TYPE(Id, SingletonId) case BuiltinType::Id:
#define PLACEHOLDER_TYPE(Id, SingletonId)
#include "clang/AST/BuiltinTypes.def"
@@ -21878,61 +20898,6 @@ bool Sema::CheckCaseExpression(Expr *E) {
return false;
}
-/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
-ExprResult
-Sema::ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
- assert((Kind == tok::kw___objc_yes || Kind == tok::kw___objc_no) &&
- "Unknown Objective-C Boolean value!");
- QualType BoolT = Context.ObjCBuiltinBoolTy;
- if (!Context.getBOOLDecl()) {
- LookupResult Result(*this, &Context.Idents.get("BOOL"), OpLoc,
- Sema::LookupOrdinaryName);
- if (LookupName(Result, getCurScope()) && Result.isSingleResult()) {
- NamedDecl *ND = Result.getFoundDecl();
- if (TypedefDecl *TD = dyn_cast<TypedefDecl>(ND))
- Context.setBOOLDecl(TD);
- }
- }
- if (Context.getBOOLDecl())
- BoolT = Context.getBOOLType();
- return new (Context)
- ObjCBoolLiteralExpr(Kind == tok::kw___objc_yes, BoolT, OpLoc);
-}
-
-ExprResult Sema::ActOnObjCAvailabilityCheckExpr(
- llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc,
- SourceLocation RParen) {
- auto FindSpecVersion =
- [&](StringRef Platform) -> std::optional<VersionTuple> {
- auto Spec = llvm::find_if(AvailSpecs, [&](const AvailabilitySpec &Spec) {
- return Spec.getPlatform() == Platform;
- });
- // Transcribe the "ios" availability check to "maccatalyst" when compiling
- // for "maccatalyst" if "maccatalyst" is not specified.
- if (Spec == AvailSpecs.end() && Platform == "maccatalyst") {
- Spec = llvm::find_if(AvailSpecs, [&](const AvailabilitySpec &Spec) {
- return Spec.getPlatform() == "ios";
- });
- }
- if (Spec == AvailSpecs.end())
- return std::nullopt;
- return Spec->getVersion();
- };
-
- VersionTuple Version;
- if (auto MaybeVersion =
- FindSpecVersion(Context.getTargetInfo().getPlatformName()))
- Version = *MaybeVersion;
-
- // The use of `@available` in the enclosing context should be analyzed to
- // warn when it's used inappropriately (i.e. not if(@available)).
- if (FunctionScopeInfo *Context = getCurFunctionAvailabilityContext())
- Context->HasPotentialAvailabilityViolations = true;
-
- return new (Context)
- ObjCAvailabilityCheckExpr(Version, AtLoc, RParen, Context.BoolTy);
-}
-
ExprResult Sema::CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs, QualType T) {
if (!Context.getLangOpts().RecoveryAST)
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
index 953bfe484a52..de50786f4d6c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
@@ -38,12 +38,16 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaLambda.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaPPC.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TypeSize.h"
@@ -51,13 +55,9 @@
using namespace clang;
using namespace sema;
-/// Handle the result of the special case name lookup for inheriting
-/// constructor declarations. 'NS::X::X' and 'NS::X<...>::X' are treated as
-/// constructor names in member using declarations, even if 'X' is not the
-/// name of the corresponding type.
ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
- IdentifierInfo &Name) {
+ const IdentifierInfo &Name) {
NestedNameSpecifier *NNS = SS.getScopeRep();
// Convert the nested-name-specifier into a type.
@@ -89,10 +89,9 @@ ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
Context.getTrivialTypeSourceInfo(Type, NameLoc));
}
-ParsedType Sema::getConstructorName(IdentifierInfo &II,
- SourceLocation NameLoc,
- Scope *S, CXXScopeSpec &SS,
- bool EnteringContext) {
+ParsedType Sema::getConstructorName(const IdentifierInfo &II,
+ SourceLocation NameLoc, Scope *S,
+ CXXScopeSpec &SS, bool EnteringContext) {
CXXRecordDecl *CurClass = getCurrentClass(S, &SS);
assert(CurClass && &II == CurClass->getIdentifier() &&
"not a constructor name");
@@ -140,9 +139,9 @@ ParsedType Sema::getConstructorName(IdentifierInfo &II,
return ParsedType::make(T);
}
-ParsedType Sema::getDestructorName(IdentifierInfo &II, SourceLocation NameLoc,
- Scope *S, CXXScopeSpec &SS,
- ParsedType ObjectTypePtr,
+ParsedType Sema::getDestructorName(const IdentifierInfo &II,
+ SourceLocation NameLoc, Scope *S,
+ CXXScopeSpec &SS, ParsedType ObjectTypePtr,
bool EnteringContext) {
// Determine where to perform name lookup.
@@ -500,7 +499,7 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
//
// double operator""_Bq(long double); // OK: not a reserved identifier
// double operator"" _Bq(long double); // ill-formed, no diagnostic required
- IdentifierInfo *II = Name.Identifier;
+ const IdentifierInfo *II = Name.Identifier;
ReservedIdentifierStatus Status = II->isReserved(PP.getLangOpts());
SourceLocation Loc = Name.getEndLoc();
if (!PP.getSourceManager().isInSystemHeader(Loc)) {
@@ -541,7 +540,6 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
llvm_unreachable("unknown nested name specifier kind");
}
-/// Build a C++ typeid expression with a type operand.
ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
@@ -569,7 +567,6 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceRange(TypeidLoc, RParenLoc));
}
-/// Build a C++ typeid expression with an expression operand.
ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *E,
@@ -744,7 +741,6 @@ getUuidAttrOfType(Sema &SemaRef, QualType QT,
}
}
-/// Build a Microsoft __uuidof expression with a type operand.
ExprResult Sema::BuildCXXUuidof(QualType Type,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
@@ -764,7 +760,6 @@ ExprResult Sema::BuildCXXUuidof(QualType Type,
CXXUuidofExpr(Type, Operand, Guid, SourceRange(TypeidLoc, RParenLoc));
}
-/// Build a Microsoft __uuidof expression with an expression operand.
ExprResult Sema::BuildCXXUuidof(QualType Type, SourceLocation TypeidLoc,
Expr *E, SourceLocation RParenLoc) {
MSGuidDecl *Guid = nullptr;
@@ -812,7 +807,6 @@ Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc,
return BuildCXXUuidof(GuidType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
}
-/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult
Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
assert((Kind == tok::kw_true || Kind == tok::kw_false) &&
@@ -821,13 +815,11 @@ Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
CXXBoolLiteralExpr(Kind == tok::kw_true, Context.BoolTy, OpLoc);
}
-/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult
Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) {
return new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc);
}
-/// ActOnCXXThrow - Parse throw expressions.
ExprResult
Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
bool IsThrownVarInScope = false;
@@ -884,12 +876,18 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
// Exceptions aren't allowed in CUDA device code.
if (getLangOpts().CUDA)
- CUDADiagIfDeviceCode(OpLoc, diag::err_cuda_device_exceptions)
- << "throw" << CurrentCUDATarget();
+ CUDA().DiagIfDeviceCode(OpLoc, diag::err_cuda_device_exceptions)
+ << "throw" << llvm::to_underlying(CUDA().CurrentTarget());
if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw";
+ // Exceptions that escape a compute construct are ill-formed.
+ if (getLangOpts().OpenACC && getCurScope() &&
+ getCurScope()->isInOpenACCComputeConstructScope(Scope::TryScope))
+ Diag(OpLoc, diag::err_acc_branch_in_out_compute_construct)
+ << /*throw*/ 2 << /*out of*/ 0;
+
if (Ex && !Ex->isTypeDependent()) {
// Initialize the exception result. This implicitly weeds out
// abstract types or types with inaccessible copy constructors.
@@ -923,7 +921,7 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
// PPC MMA non-pointer types are not allowed as throw expr types.
if (Ex && Context.getTargetInfo().getTriple().isPPC64())
- CheckPPCMMAType(Ex->getType(), Ex->getBeginLoc());
+ PPC().CheckPPCMMAType(Ex->getType(), Ex->getBeginLoc());
return new (Context)
CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope);
@@ -978,7 +976,6 @@ static void getUnambiguousPublicSubobjects(
}
}
-/// CheckCXXThrowOperand - Validate the operand of a throw.
bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
QualType ExceptionObjectTy, Expr *E) {
// If the type of the exception would be an incomplete type or a pointer
@@ -1220,7 +1217,7 @@ static QualType adjustCVQualifiersForCXXThisWithinLambda(
: nullptr;
}
}
- return ASTCtx.getPointerType(ClassType);
+ return ThisTy;
}
QualType Sema::getCurrentThisType() {
@@ -1409,26 +1406,42 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
}
ExprResult Sema::ActOnCXXThis(SourceLocation Loc) {
- /// C++ 9.3.2: In the body of a non-static member function, the keyword this
- /// is a non-lvalue expression whose value is the address of the object for
- /// which the function is called.
+ // C++20 [expr.prim.this]p1:
+ // The keyword this names a pointer to the object for which an
+ // implicit object member function is invoked or a non-static
+ // data member's initializer is evaluated.
QualType ThisTy = getCurrentThisType();
- if (ThisTy.isNull()) {
- DeclContext *DC = getFunctionLevelDeclContext();
+ if (CheckCXXThisType(Loc, ThisTy))
+ return ExprError();
- if (const auto *Method = dyn_cast<CXXMethodDecl>(DC);
- Method && Method->isExplicitObjectMemberFunction()) {
- return Diag(Loc, diag::err_invalid_this_use) << 1;
- }
+ return BuildCXXThisExpr(Loc, ThisTy, /*IsImplicit=*/false);
+}
- if (isLambdaCallWithExplicitObjectParameter(CurContext))
- return Diag(Loc, diag::err_invalid_this_use) << 1;
+bool Sema::CheckCXXThisType(SourceLocation Loc, QualType Type) {
+ if (!Type.isNull())
+ return false;
- return Diag(Loc, diag::err_invalid_this_use) << 0;
+ // C++20 [expr.prim.this]p3:
+ // If a declaration declares a member function or member function template
+ // of a class X, the expression this is a prvalue of type
+ // "pointer to cv-qualifier-seq X" wherever X is the current class between
+ // the optional cv-qualifier-seq and the end of the function-definition,
+ // member-declarator, or declarator. It shall not appear within the
+ // declaration of either a static member function or an explicit object
+ // member function of the current class (although its type and value
+ // category are defined within such member functions as they are within
+ // an implicit object member function).
+ DeclContext *DC = getFunctionLevelDeclContext();
+ const auto *Method = dyn_cast<CXXMethodDecl>(DC);
+ if (Method && Method->isExplicitObjectMemberFunction()) {
+ Diag(Loc, diag::err_invalid_this_use) << 1;
+ } else if (Method && isLambdaCallWithExplicitObjectParameter(CurContext)) {
+ Diag(Loc, diag::err_invalid_this_use) << 1;
+ } else {
+ Diag(Loc, diag::err_invalid_this_use) << 0;
}
-
- return BuildCXXThisExpr(Loc, ThisTy, /*IsImplicit=*/false);
+ return true;
}
Expr *Sema::BuildCXXThisExpr(SourceLocation Loc, QualType Type,
@@ -1440,6 +1453,42 @@ Expr *Sema::BuildCXXThisExpr(SourceLocation Loc, QualType Type,
void Sema::MarkThisReferenced(CXXThisExpr *This) {
CheckCXXThisCapture(This->getExprLoc());
+ if (This->isTypeDependent())
+ return;
+
+ // Check if 'this' is captured by value in a lambda with a dependent explicit
+ // object parameter, and mark it as type-dependent as well if so.
+ auto IsDependent = [&]() {
+ for (auto *Scope : llvm::reverse(FunctionScopes)) {
+ auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope);
+ if (!LSI)
+ continue;
+
+ if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
+ LSI->AfterParameterList)
+ return false;
+
+ // If this lambda captures 'this' by value, then 'this' is dependent iff
+ // this lambda has a dependent explicit object parameter. If we can't
+ // determine whether it does (e.g. because the CXXMethodDecl's type is
+ // null), assume it doesn't.
+ if (LSI->isCXXThisCaptured()) {
+ if (!LSI->getCXXThisCapture().isCopyCapture())
+ continue;
+
+ const auto *MD = LSI->CallOperator;
+ if (MD->getType().isNull())
+ return false;
+
+ const auto *Ty = MD->getType()->getAs<FunctionProtoType>();
+ return Ty && MD->isExplicitObjectMemberFunction() &&
+ Ty->getParamType(0)->isDependentType();
+ }
+ }
+ return false;
+ }();
+
+ This->setCapturedByCopyInLambdaWithExplicitObjectParameter(IsDependent);
}
bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) {
@@ -1454,10 +1503,6 @@ bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) {
return Class && Class->isBeingDefined();
}
-/// Parse construction of a specified type.
-/// Can be interpreted either as function-style casting ("int(x)")
-/// or class type construction ("ClassType(x,y,z)")
-/// or creation of a value-initialized type ("int()").
ExprResult
Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
@@ -1554,12 +1599,13 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
TemplateDeductionInfo Info(Deduce->getExprLoc());
TemplateDeductionResult Result =
DeduceAutoType(TInfo->getTypeLoc(), Deduce, DeducedType, Info);
- if (Result != TDK_Success && Result != TDK_AlreadyDiagnosed)
+ if (Result != TemplateDeductionResult::Success &&
+ Result != TemplateDeductionResult::AlreadyDiagnosed)
return ExprError(Diag(TyBeginLoc, diag::err_auto_expr_deduction_failure)
<< Ty << Deduce->getType() << FullRange
<< Deduce->getSourceRange());
if (DeducedType.isNull()) {
- assert(Result == TDK_AlreadyDiagnosed);
+ assert(Result == TemplateDeductionResult::AlreadyDiagnosed);
return ExprError();
}
@@ -1649,17 +1695,17 @@ bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
// [CUDA] Ignore this function, if we can't call it.
const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
if (getLangOpts().CUDA) {
- auto CallPreference = IdentifyCUDAPreference(Caller, Method);
+ auto CallPreference = CUDA().IdentifyPreference(Caller, Method);
// If it's not callable at all, it's not the right function.
- if (CallPreference < CFP_WrongSide)
+ if (CallPreference < SemaCUDA::CFP_WrongSide)
return false;
- if (CallPreference == CFP_WrongSide) {
+ if (CallPreference == SemaCUDA::CFP_WrongSide) {
// Maybe. We have to check if there are better alternatives.
DeclContext::lookup_result R =
Method->getDeclContext()->lookup(Method->getDeclName());
for (const auto *D : R) {
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
- if (IdentifyCUDAPreference(Caller, FD) > CFP_WrongSide)
+ if (CUDA().IdentifyPreference(Caller, FD) > SemaCUDA::CFP_WrongSide)
return false;
}
}
@@ -1678,7 +1724,7 @@ bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
return llvm::none_of(PreventedBy, [&](const FunctionDecl *FD) {
assert(FD->getNumParams() == 1 &&
"Only single-operand functions should be in PreventedBy");
- return IdentifyCUDAPreference(Caller, FD) >= CFP_HostDevice;
+ return CUDA().IdentifyPreference(Caller, FD) >= SemaCUDA::CFP_HostDevice;
});
}
@@ -1715,7 +1761,7 @@ namespace {
UsualDeallocFnInfo(Sema &S, DeclAccessPair Found)
: Found(Found), FD(dyn_cast<FunctionDecl>(Found->getUnderlyingDecl())),
Destroying(false), HasSizeT(false), HasAlignValT(false),
- CUDAPref(Sema::CFP_Native) {
+ CUDAPref(SemaCUDA::CFP_Native) {
// A function template declaration is never a usual deallocation function.
if (!FD)
return;
@@ -1741,7 +1787,7 @@ namespace {
// In CUDA, determine how much we'd like / dislike to call this.
if (S.getLangOpts().CUDA)
- CUDAPref = S.IdentifyCUDAPreference(
+ CUDAPref = S.CUDA().IdentifyPreference(
S.getCurFunctionDecl(/*AllowLambda=*/true), FD);
}
@@ -1772,7 +1818,7 @@ namespace {
DeclAccessPair Found;
FunctionDecl *FD;
bool Destroying, HasSizeT, HasAlignValT;
- Sema::CUDAFunctionPreference CUDAPref;
+ SemaCUDA::CUDAFunctionPreference CUDAPref;
};
}
@@ -1796,7 +1842,7 @@ static UsualDeallocFnInfo resolveDeallocationOverload(
for (auto I = R.begin(), E = R.end(); I != E; ++I) {
UsualDeallocFnInfo Info(S, I.getPair());
if (!Info || !isNonPlacementDeallocationFunction(S, Info.FD) ||
- Info.CUDAPref == Sema::CFP_Never)
+ Info.CUDAPref == SemaCUDA::CFP_Never)
continue;
if (!Best) {
@@ -1858,22 +1904,6 @@ static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
return Best && Best.HasSizeT;
}
-/// Parsed a C++ 'new' expression (C++ 5.3.4).
-///
-/// E.g.:
-/// @code new (memory) int[size][4] @endcode
-/// or
-/// @code ::new Foo(23, "hello") @endcode
-///
-/// \param StartLoc The first location of the expression.
-/// \param UseGlobal True if 'new' was prefixed with '::'.
-/// \param PlacementLParen Opening paren of the placement arguments.
-/// \param PlacementArgs Placement new arguments.
-/// \param PlacementRParen Closing paren of the placement arguments.
-/// \param TypeIdParens If the type is in parens, the source range.
-/// \param D The type to be allocated, as well as array dimensions.
-/// \param Initializer The initializing expression or initializer-list, or null
-/// if there is none.
ExprResult
Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
@@ -2012,7 +2042,7 @@ ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
if (DirectInitRange.isValid()) {
assert(Initializer && "Have parens but no initializer.");
InitStyle = CXXNewInitializationStyle::Parens;
- } else if (Initializer && isa<InitListExpr>(Initializer))
+ } else if (isa_and_nonnull<InitListExpr>(Initializer))
InitStyle = CXXNewInitializationStyle::Braces;
else {
assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
@@ -2098,12 +2128,13 @@ ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
TemplateDeductionInfo Info(Deduce->getExprLoc());
TemplateDeductionResult Result =
DeduceAutoType(AllocTypeInfo->getTypeLoc(), Deduce, DeducedType, Info);
- if (Result != TDK_Success && Result != TDK_AlreadyDiagnosed)
+ if (Result != TemplateDeductionResult::Success &&
+ Result != TemplateDeductionResult::AlreadyDiagnosed)
return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
<< AllocType << Deduce->getType() << TypeRange
<< Deduce->getSourceRange());
if (DeducedType.isNull()) {
- assert(Result == TDK_AlreadyDiagnosed);
+ assert(Result == TemplateDeductionResult::AlreadyDiagnosed);
return ExprError();
}
AllocType = DeducedType;
@@ -2482,8 +2513,6 @@ ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
DirectInitRange);
}
-/// Checks that a type is suitable as the allocated type
-/// in a new-expression.
bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R) {
// C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
@@ -2655,13 +2684,9 @@ static bool resolveAllocationOverload(
return true;
case OR_Deleted: {
- if (Diagnose) {
- Candidates.NoteCandidates(
- PartialDiagnosticAt(R.getNameLoc(),
- S.PDiag(diag::err_ovl_deleted_call)
- << R.getLookupName() << Range),
- S, OCD_AllCandidates, Args);
- }
+ if (Diagnose)
+ S.DiagnoseUseOfDeletedFunction(R.getNameLoc(), Range, R.getLookupName(),
+ Candidates, Best->Function, Args);
return true;
}
}
@@ -2883,7 +2908,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// expected function type.
TemplateDeductionInfo Info(StartLoc);
if (DeduceTemplateArguments(FnTmpl, nullptr, ExpectedFunctionType, Fn,
- Info))
+ Info) != TemplateDeductionResult::Success)
continue;
} else
Fn = cast<FunctionDecl>((*D)->getUnderlyingDecl());
@@ -2896,8 +2921,8 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
}
if (getLangOpts().CUDA)
- EraseUnwantedCUDAMatches(getCurFunctionDecl(/*AllowLambda=*/true),
- Matches);
+ CUDA().EraseUnwantedMatches(getCurFunctionDecl(/*AllowLambda=*/true),
+ Matches);
} else {
// C++1y [expr.new]p22:
// For a non-placement allocation function, the normal deallocation
@@ -2981,29 +3006,6 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
return false;
}
-/// DeclareGlobalNewDelete - Declare the global forms of operator new and
-/// delete. These are:
-/// @code
-/// // C++03:
-/// void* operator new(std::size_t) throw(std::bad_alloc);
-/// void* operator new[](std::size_t) throw(std::bad_alloc);
-/// void operator delete(void *) throw();
-/// void operator delete[](void *) throw();
-/// // C++11:
-/// void* operator new(std::size_t);
-/// void* operator new[](std::size_t);
-/// void operator delete(void *) noexcept;
-/// void operator delete[](void *) noexcept;
-/// // C++1y:
-/// void* operator new(std::size_t);
-/// void* operator new[](std::size_t);
-/// void operator delete(void *) noexcept;
-/// void operator delete[](void *) noexcept;
-/// void operator delete(void *, std::size_t) noexcept;
-/// void operator delete[](void *, std::size_t) noexcept;
-/// @endcode
-/// Note that the placement and nothrow forms of new are *not* implicitly
-/// declared. Their use requires including \<new\>.
void Sema::DeclareGlobalNewDelete() {
if (GlobalNewDeleteDeclared)
return;
@@ -3315,7 +3317,9 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
// FIXME: DiagnoseUseOfDecl?
if (Operator->isDeleted()) {
if (Diagnose) {
- Diag(StartLoc, diag::err_deleted_function_use);
+ StringLiteral *Msg = Operator->getDeletedMessage();
+ Diag(StartLoc, diag::err_deleted_function_use)
+ << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef());
NoteDeletedFunction(Operator);
}
return true;
@@ -3615,10 +3619,6 @@ void Sema::AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
}
}
-/// ActOnCXXDelete - Parsed a C++ 'delete' expression (C++ 5.3.5), as in:
-/// @code ::delete ptr; @endcode
-/// or
-/// @code delete [] ptr; @endcode
ExprResult
Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
bool ArrayForm, Expr *ExE) {
@@ -3719,8 +3719,11 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
// The C++ standard bans deleting a pointer to a non-object type, which
// effectively bans deletion of "void*". However, most compilers support
// this, so we treat it as a warning unless we're in a SFINAE context.
- Diag(StartLoc, diag::ext_delete_void_ptr_operand)
- << Type << Ex.get()->getSourceRange();
+ // But we still prohibit this since C++26.
+ Diag(StartLoc, LangOpts.CPlusPlus26 ? diag::err_delete_incomplete
+ : diag::ext_delete_void_ptr_operand)
+ << (LangOpts.CPlusPlus26 ? Pointee : Type)
+ << Ex.get()->getSourceRange();
} else if (Pointee->isFunctionType() || Pointee->isVoidType() ||
Pointee->isSizelessType()) {
return ExprError(Diag(StartLoc, diag::err_delete_operand)
@@ -3729,7 +3732,10 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
// FIXME: This can result in errors if the definition was imported from a
// module but is hidden.
if (!RequireCompleteType(StartLoc, Pointee,
- diag::warn_delete_incomplete, Ex.get())) {
+ LangOpts.CPlusPlus26
+ ? diag::err_delete_incomplete
+ : diag::warn_delete_incomplete,
+ Ex.get())) {
if (const RecordType *RT = PointeeElem->getAs<RecordType>())
PointeeRD = cast<CXXRecordDecl>(RT->getDecl());
}
@@ -3762,7 +3768,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
// Otherwise, the usual operator delete[] should be the
// function we just found.
- else if (OperatorDelete && isa<CXXMethodDecl>(OperatorDelete))
+ else if (isa_and_nonnull<CXXMethodDecl>(OperatorDelete))
UsualArrayDeleteWantsSize =
UsualDeallocFnInfo(*this,
DeclAccessPair::make(OperatorDelete, AS_public))
@@ -3919,20 +3925,16 @@ static bool resolveBuiltinNewDeleteOverload(Sema &S, CallExpr *TheCall,
S, OCD_AmbiguousCandidates, Args);
return true;
- case OR_Deleted: {
- Candidates.NoteCandidates(
- PartialDiagnosticAt(R.getNameLoc(), S.PDiag(diag::err_ovl_deleted_call)
- << R.getLookupName() << Range),
- S, OCD_AllCandidates, Args);
+ case OR_Deleted:
+ S.DiagnoseUseOfDeletedFunction(R.getNameLoc(), Range, R.getLookupName(),
+ Candidates, Best->Function, Args);
return true;
}
- }
llvm_unreachable("Unreachable, bad result from BestViableFunction");
}
-ExprResult
-Sema::SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
- bool IsDelete) {
+ExprResult Sema::BuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
+ bool IsDelete) {
CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
if (!getLangOpts().CPlusPlus) {
Diag(TheCall->getExprLoc(), diag::err_builtin_requires_language)
@@ -4028,8 +4030,6 @@ Sema::ConditionResult Sema::ActOnConditionVariable(Decl *ConditionVar,
CK == ConditionKind::ConstexprIf);
}
-/// Check the use of the given variable as a C++ condition in an if,
-/// while, do-while, or switch statement.
ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK) {
@@ -4067,7 +4067,6 @@ ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
llvm_unreachable("unexpected condition kind");
}
-/// CheckCXXBooleanCondition - Returns true if a conversion to bool is invalid.
ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
// C++11 6.4p4:
// The value of a condition that is an initialized declaration in a statement
@@ -4095,10 +4094,6 @@ ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
return E;
}
-/// Helper function to determine whether this is the (deprecated) C++
-/// conversion from a string literal to a pointer to non-const char or
-/// non-const wchar_t (for narrow and wide string literals,
-/// respectively).
bool
Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
// Look inside the implicit cast, if it exists.
@@ -4200,18 +4195,14 @@ static ExprResult BuildCXXCastArgument(Sema &S,
}
}
-/// PerformImplicitConversion - Perform an implicit conversion of the
-/// expression From to the type ToType using the pre-computed implicit
-/// conversion sequence ICS. Returns the converted
-/// expression. Action is the kind of conversion we're performing,
-/// used in the error message.
ExprResult
Sema::PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence &ICS,
AssignmentAction Action,
CheckedConversionKind CCK) {
// C++ [over.match.oper]p7: [...] operands of class type are converted [...]
- if (CCK == CCK_ForBuiltinOverloadedOp && !From->getType()->isRecordType())
+ if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp &&
+ !From->getType()->isRecordType())
return From;
switch (ICS.getKind()) {
@@ -4272,7 +4263,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// C++ [over.match.oper]p7:
// [...] the second standard conversion sequence of a user-defined
// conversion sequence is not applied.
- if (CCK == CCK_ForBuiltinOverloadedOp)
+ if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp)
return From;
return PerformImplicitConversion(From, ToType, ICS.UserDefined.After,
@@ -4303,17 +4294,27 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
return From;
}
-/// PerformImplicitConversion - Perform an implicit conversion of the
-/// expression From to the type ToType by following the standard
-/// conversion sequence SCS. Returns the converted
-/// expression. Flavor is the context in which we're performing this
-/// conversion, for use in error messages.
+// adjustVectorType - Compute the intermediate cast type casting elements of the
+// from type to the elements of the to type without resizing the vector.
+static QualType adjustVectorType(ASTContext &Context, QualType FromTy,
+ QualType ToType, QualType *ElTy = nullptr) {
+ auto *ToVec = ToType->castAs<VectorType>();
+ QualType ElType = ToVec->getElementType();
+ if (ElTy)
+ *ElTy = ElType;
+ if (!FromTy->isVectorType())
+ return ElType;
+ auto *FromVec = FromTy->castAs<VectorType>();
+ return Context.getExtVectorType(ElType, FromVec->getNumElements());
+}
+
ExprResult
Sema::PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK) {
- bool CStyle = (CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast);
+ bool CStyle = (CCK == CheckedConversionKind::CStyleCast ||
+ CCK == CheckedConversionKind::FunctionalCast);
// Overall FIXME: we are recomputing too many types here and doing far too
// much extra work. What this means is that we need to keep track of more
@@ -4408,6 +4409,13 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
.get();
break;
+ case ICK_HLSL_Array_RValue:
+ FromType = Context.getArrayParameterType(FromType);
+ From = ImpCastExprToType(From, FromType, CK_HLSLArrayRValue, VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
+ break;
+
case ICK_Function_To_Pointer:
FromType = Context.getPointerType(FromType);
From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay,
@@ -4449,27 +4457,36 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
break;
case ICK_Integral_Promotion:
- case ICK_Integral_Conversion:
- if (ToType->isBooleanType()) {
+ case ICK_Integral_Conversion: {
+ QualType ElTy = ToType;
+ QualType StepTy = ToType;
+ if (ToType->isVectorType())
+ StepTy = adjustVectorType(Context, FromType, ToType, &ElTy);
+ if (ElTy->isBooleanType()) {
assert(FromType->castAs<EnumType>()->getDecl()->isFixed() &&
SCS.Second == ICK_Integral_Promotion &&
"only enums with fixed underlying type can promote to bool");
- From = ImpCastExprToType(From, ToType, CK_IntegralToBoolean, VK_PRValue,
+ From = ImpCastExprToType(From, StepTy, CK_IntegralToBoolean, VK_PRValue,
/*BasePath=*/nullptr, CCK)
.get();
} else {
- From = ImpCastExprToType(From, ToType, CK_IntegralCast, VK_PRValue,
+ From = ImpCastExprToType(From, StepTy, CK_IntegralCast, VK_PRValue,
/*BasePath=*/nullptr, CCK)
.get();
}
break;
+ }
case ICK_Floating_Promotion:
- case ICK_Floating_Conversion:
- From = ImpCastExprToType(From, ToType, CK_FloatingCast, VK_PRValue,
+ case ICK_Floating_Conversion: {
+ QualType StepTy = ToType;
+ if (ToType->isVectorType())
+ StepTy = adjustVectorType(Context, FromType, ToType);
+ From = ImpCastExprToType(From, StepTy, CK_FloatingCast, VK_PRValue,
/*BasePath=*/nullptr, CCK)
.get();
break;
+ }
case ICK_Complex_Promotion:
case ICK_Complex_Conversion: {
@@ -4492,16 +4509,21 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
break;
}
- case ICK_Floating_Integral:
- if (ToType->isRealFloatingType())
- From = ImpCastExprToType(From, ToType, CK_IntegralToFloating, VK_PRValue,
+ case ICK_Floating_Integral: {
+ QualType ElTy = ToType;
+ QualType StepTy = ToType;
+ if (ToType->isVectorType())
+ StepTy = adjustVectorType(Context, FromType, ToType, &ElTy);
+ if (ElTy->isRealFloatingType())
+ From = ImpCastExprToType(From, StepTy, CK_IntegralToFloating, VK_PRValue,
/*BasePath=*/nullptr, CCK)
.get();
else
- From = ImpCastExprToType(From, ToType, CK_FloatingToIntegral, VK_PRValue,
+ From = ImpCastExprToType(From, StepTy, CK_FloatingToIntegral, VK_PRValue,
/*BasePath=*/nullptr, CCK)
.get();
break;
+ }
case ICK_Fixed_Point_Conversion:
assert((FromType->isFixedPointType() || ToType->isFixedPointType()) &&
@@ -4555,10 +4577,10 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (From->getType()->isObjCObjectPointerType() &&
ToType->isObjCObjectPointerType())
- EmitRelatedResultTypeNote(From);
+ ObjC().EmitRelatedResultTypeNote(From);
} else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
- !CheckObjCARCUnavailableWeakConversion(ToType,
- From->getType())) {
+ !ObjC().CheckObjCARCUnavailableWeakConversion(ToType,
+ From->getType())) {
if (Action == AA_Initializing)
Diag(From->getBeginLoc(), diag::err_arc_weak_unavailable_assign);
else
@@ -4593,11 +4615,11 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// FIXME: doing this here is really ugly.
if (Kind == CK_BlockPointerToObjCPointerCast) {
ExprResult E = From;
- (void) PrepareCastToObjCObjectPointer(E);
+ (void)ObjC().PrepareCastToObjCObjectPointer(E);
From = E.get();
}
if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers())
- CheckObjCConversion(SourceRange(), NewToType, From, CCK);
+ ObjC().CheckObjCConversion(SourceRange(), NewToType, From, CCK);
From = ImpCastExprToType(From, NewToType, Kind, VK_PRValue, &BasePath, CCK)
.get();
break;
@@ -4623,18 +4645,26 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
break;
}
- case ICK_Boolean_Conversion:
+ case ICK_Boolean_Conversion: {
// Perform half-to-boolean conversion via float.
if (From->getType()->isHalfType()) {
From = ImpCastExprToType(From, Context.FloatTy, CK_FloatingCast).get();
FromType = Context.FloatTy;
}
+ QualType ElTy = FromType;
+ QualType StepTy = ToType;
+ if (FromType->isVectorType()) {
+ if (getLangOpts().HLSL)
+ StepTy = adjustVectorType(Context, FromType, ToType);
+ ElTy = FromType->castAs<VectorType>()->getElementType();
+ }
- From = ImpCastExprToType(From, Context.BoolTy,
- ScalarTypeToBooleanCastKind(FromType), VK_PRValue,
+ From = ImpCastExprToType(From, StepTy, ScalarTypeToBooleanCastKind(ElTy),
+ VK_PRValue,
/*BasePath=*/nullptr, CCK)
.get();
break;
+ }
case ICK_Derived_To_Base: {
CXXCastPath BasePath;
@@ -4769,9 +4799,52 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Num_Conversion_Kinds:
case ICK_C_Only_Conversion:
case ICK_Incompatible_Pointer_Conversion:
+ case ICK_HLSL_Array_RValue:
+ case ICK_HLSL_Vector_Truncation:
+ case ICK_HLSL_Vector_Splat:
llvm_unreachable("Improper second standard conversion");
}
+ if (SCS.Dimension != ICK_Identity) {
+ // If SCS.Element is not ICK_Identity the To and From types must be HLSL
+ // vectors or matrices.
+
+ // TODO: Support HLSL matrices.
+ assert((!From->getType()->isMatrixType() && !ToType->isMatrixType()) &&
+ "Dimension conversion for matrix types is not implemented yet.");
+ assert(ToType->isVectorType() &&
+ "Dimension conversion is only supported for vector types.");
+ switch (SCS.Dimension) {
+ case ICK_HLSL_Vector_Splat: {
+ // Vector splat from any arithmetic type to a vector.
+ Expr *Elem = prepareVectorSplat(ToType, From).get();
+ From = ImpCastExprToType(Elem, ToType, CK_VectorSplat, VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
+ break;
+ }
+ case ICK_HLSL_Vector_Truncation: {
+ // Note: HLSL built-in vectors are ExtVectors. Since this truncates a
+ // vector to a smaller vector, this can only operate on arguments where
+ // the source and destination types are ExtVectors.
+ assert(From->getType()->isExtVectorType() && ToType->isExtVectorType() &&
+ "HLSL vector truncation should only apply to ExtVectors");
+ auto *FromVec = From->getType()->castAs<VectorType>();
+ auto *ToVec = ToType->castAs<VectorType>();
+ QualType ElType = FromVec->getElementType();
+ QualType TruncTy =
+ Context.getExtVectorType(ElType, ToVec->getNumElements());
+ From = ImpCastExprToType(From, TruncTy, CK_HLSLVectorTruncation,
+ From->getValueKind())
+ .get();
+ break;
+ }
+ case ICK_Identity:
+ default:
+ llvm_unreachable("Improper element standard conversion");
+ }
+ }
+
switch (SCS.Third) {
case ICK_Identity:
// Nothing to do.
@@ -4858,6 +4931,20 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
return From;
}
+/// Checks that type T is not a VLA.
+///
+/// @returns @c true if @p T is VLA and a diagnostic was emitted,
+/// @c false otherwise.
+static bool DiagnoseVLAInCXXTypeTrait(Sema &S, const TypeSourceInfo *T,
+ clang::tok::TokenKind TypeTraitID) {
+ if (!T->getType()->isVariableArrayType())
+ return false;
+
+ S.Diag(T->getTypeLoc().getBeginLoc(), diag::err_vla_unsupported)
+ << 1 << TypeTraitID;
+ return true;
+}
+
/// Check the completeness of a type in a unary type trait.
///
/// If the particular type trait requires a complete type, tries to complete
@@ -4962,6 +5049,7 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_IsStandardLayout:
case UTT_IsPOD:
case UTT_IsLiteral:
+ case UTT_IsBitwiseCloneable:
// By analogy, is_trivially_relocatable and is_trivially_equality_comparable
// impose the same constraints.
case UTT_IsTriviallyRelocatable:
@@ -4981,6 +5069,10 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_HasTrivialCopy:
case UTT_HasTrivialDestructor:
case UTT_HasVirtualDestructor:
+ // has_unique_object_representations<T> when T is an array is defined in terms
+ // of has_unique_object_representations<remove_all_extents_t<T>>, so the base
+ // type needs to be complete even if the type is an incomplete array type.
+ case UTT_HasUniqueObjectRepresentations:
ArgTy = QualType(ArgTy->getBaseElementTypeUnsafe(), 0);
[[fallthrough]];
@@ -4989,7 +5081,6 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_IsDestructible:
case UTT_IsNothrowDestructible:
case UTT_IsTriviallyDestructible:
- case UTT_HasUniqueObjectRepresentations:
if (ArgTy->isIncompleteArrayType() || ArgTy->isVoidType())
return true;
@@ -5033,8 +5124,88 @@ static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op,
return false;
}
+static bool HasNonDeletedDefaultedEqualityComparison(Sema &S,
+ const CXXRecordDecl *Decl,
+ SourceLocation KeyLoc) {
+ if (Decl->isUnion())
+ return false;
+ if (Decl->isLambda())
+ return Decl->isCapturelessLambda();
+
+ {
+ EnterExpressionEvaluationContext UnevaluatedContext(
+ S, Sema::ExpressionEvaluationContext::Unevaluated);
+ Sema::SFINAETrap SFINAE(S, /*AccessCheckingSFINAE=*/true);
+ Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl());
+
+ // const ClassT& obj;
+ OpaqueValueExpr Operand(
+ KeyLoc,
+ Decl->getTypeForDecl()->getCanonicalTypeUnqualified().withConst(),
+ ExprValueKind::VK_LValue);
+ UnresolvedSet<16> Functions;
+ // obj == obj;
+ S.LookupBinOp(S.TUScope, {}, BinaryOperatorKind::BO_EQ, Functions);
+
+ auto Result = S.CreateOverloadedBinOp(KeyLoc, BinaryOperatorKind::BO_EQ,
+ Functions, &Operand, &Operand);
+ if (Result.isInvalid() || SFINAE.hasErrorOccurred())
+ return false;
+
+ const auto *CallExpr = dyn_cast<CXXOperatorCallExpr>(Result.get());
+ if (!CallExpr)
+ return false;
+ const auto *Callee = CallExpr->getDirectCallee();
+ auto ParamT = Callee->getParamDecl(0)->getType();
+ if (!Callee->isDefaulted())
+ return false;
+ if (!ParamT->isReferenceType() && !Decl->isTriviallyCopyable())
+ return false;
+ if (ParamT.getNonReferenceType()->getUnqualifiedDesugaredType() !=
+ Decl->getTypeForDecl())
+ return false;
+ }
+
+ return llvm::all_of(Decl->bases(),
+ [&](const CXXBaseSpecifier &BS) {
+ if (const auto *RD = BS.getType()->getAsCXXRecordDecl())
+ return HasNonDeletedDefaultedEqualityComparison(
+ S, RD, KeyLoc);
+ return true;
+ }) &&
+ llvm::all_of(Decl->fields(), [&](const FieldDecl *FD) {
+ auto Type = FD->getType();
+ if (Type->isArrayType())
+ Type = Type->getBaseElementTypeUnsafe()
+ ->getCanonicalTypeUnqualified();
+
+ if (Type->isReferenceType() || Type->isEnumeralType())
+ return false;
+ if (const auto *RD = Type->getAsCXXRecordDecl())
+ return HasNonDeletedDefaultedEqualityComparison(S, RD, KeyLoc);
+ return true;
+ });
+}
+
+static bool isTriviallyEqualityComparableType(Sema &S, QualType Type, SourceLocation KeyLoc) {
+ QualType CanonicalType = Type.getCanonicalType();
+ if (CanonicalType->isIncompleteType() || CanonicalType->isDependentType() ||
+ CanonicalType->isEnumeralType() || CanonicalType->isArrayType())
+ return false;
+
+ if (const auto *RD = CanonicalType->getAsCXXRecordDecl()) {
+ if (!HasNonDeletedDefaultedEqualityComparison(S, RD, KeyLoc))
+ return false;
+ }
+
+ return S.getASTContext().hasUniqueObjectRepresentations(
+ CanonicalType, /*CheckIfTriviallyCopyable=*/false);
+}
+
static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
- SourceLocation KeyLoc, QualType T) {
+ SourceLocation KeyLoc,
+ TypeSourceInfo *TInfo) {
+ QualType T = TInfo->getType();
assert(!T->isDependentType() && "Cannot evaluate traits of dependent type");
ASTContext &C = Self.Context;
@@ -5049,23 +5220,23 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
case UTT_IsFloatingPoint:
return T->isFloatingType();
case UTT_IsArray:
+ // Zero-sized arrays aren't considered arrays in partial specializations,
+ // so __is_array shouldn't consider them arrays either.
+ if (const auto *CAT = C.getAsConstantArrayType(T))
+ return CAT->getSize() != 0;
return T->isArrayType();
case UTT_IsBoundedArray:
- if (!T->isVariableArrayType()) {
- return T->isArrayType() && !T->isIncompleteArrayType();
- }
-
- Self.Diag(KeyLoc, diag::err_vla_unsupported)
- << 1 << tok::kw___is_bounded_array;
- return false;
+ if (DiagnoseVLAInCXXTypeTrait(Self, TInfo, tok::kw___is_bounded_array))
+ return false;
+ // Zero-sized arrays aren't considered arrays in partial specializations,
+ // so __is_bounded_array shouldn't consider them arrays either.
+ if (const auto *CAT = C.getAsConstantArrayType(T))
+ return CAT->getSize() != 0;
+ return T->isArrayType() && !T->isIncompleteArrayType();
case UTT_IsUnboundedArray:
- if (!T->isVariableArrayType()) {
- return T->isIncompleteArrayType();
- }
-
- Self.Diag(KeyLoc, diag::err_vla_unsupported)
- << 1 << tok::kw___is_unbounded_array;
- return false;
+ if (DiagnoseVLAInCXXTypeTrait(Self, TInfo, tok::kw___is_unbounded_array))
+ return false;
+ return T->isIncompleteArrayType();
case UTT_IsPointer:
return T->isAnyPointerType();
case UTT_IsNullPointer:
@@ -5453,6 +5624,8 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return C.hasUniqueObjectRepresentations(T);
case UTT_IsTriviallyRelocatable:
return T.isTriviallyRelocatableType(C);
+ case UTT_IsBitwiseCloneable:
+ return T.isBitwiseCloneableType(C);
case UTT_IsReferenceable:
return T.isReferenceable();
case UTT_CanPassInRegs:
@@ -5461,12 +5634,82 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
Self.Diag(KeyLoc, diag::err_builtin_pass_in_regs_non_class) << T;
return false;
case UTT_IsTriviallyEqualityComparable:
- return T.isTriviallyEqualityComparableType(C);
+ return isTriviallyEqualityComparableType(Self, T, KeyLoc);
}
}
-static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
- QualType RhsT, SourceLocation KeyLoc);
+static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, const TypeSourceInfo *Lhs,
+ const TypeSourceInfo *Rhs, SourceLocation KeyLoc);
+
+static ExprResult CheckConvertibilityForTypeTraits(
+ Sema &Self, const TypeSourceInfo *Lhs, const TypeSourceInfo *Rhs,
+ SourceLocation KeyLoc, llvm::BumpPtrAllocator &OpaqueExprAllocator) {
+
+ QualType LhsT = Lhs->getType();
+ QualType RhsT = Rhs->getType();
+
+ // C++0x [meta.rel]p4:
+ // Given the following function prototype:
+ //
+ // template <class T>
+ // typename add_rvalue_reference<T>::type create();
+ //
+ // the predicate condition for a template specialization
+ // is_convertible<From, To> shall be satisfied if and only if
+ // the return expression in the following code would be
+ // well-formed, including any implicit conversions to the return
+ // type of the function:
+ //
+ // To test() {
+ // return create<From>();
+ // }
+ //
+ // Access checking is performed as if in a context unrelated to To and
+ // From. Only the validity of the immediate context of the expression
+ // of the return-statement (including conversions to the return type)
+ // is considered.
+ //
+ // We model the initialization as a copy-initialization of a temporary
+ // of the appropriate type, which for this expression is identical to the
+ // return statement (since NRVO doesn't apply).
+
+ // Functions aren't allowed to return function or array types.
+ if (RhsT->isFunctionType() || RhsT->isArrayType())
+ return ExprError();
+
+ // A function definition requires a complete, non-abstract return type.
+ if (!Self.isCompleteType(Rhs->getTypeLoc().getBeginLoc(), RhsT) ||
+ Self.isAbstractType(Rhs->getTypeLoc().getBeginLoc(), RhsT))
+ return ExprError();
+
+ // Compute the result of add_rvalue_reference.
+ if (LhsT->isObjectType() || LhsT->isFunctionType())
+ LhsT = Self.Context.getRValueReferenceType(LhsT);
+
+ // Build a fake source and destination for initialization.
+ InitializedEntity To(InitializedEntity::InitializeTemporary(RhsT));
+ Expr *From = new (OpaqueExprAllocator.Allocate<OpaqueValueExpr>())
+ OpaqueValueExpr(KeyLoc, LhsT.getNonLValueExprType(Self.Context),
+ Expr::getValueKindForType(LhsT));
+ InitializationKind Kind =
+ InitializationKind::CreateCopy(KeyLoc, SourceLocation());
+
+ // Perform the initialization in an unevaluated context within a SFINAE
+ // trap at translation unit scope.
+ EnterExpressionEvaluationContext Unevaluated(
+ Self, Sema::ExpressionEvaluationContext::Unevaluated);
+ Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true);
+ Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl());
+ InitializationSequence Init(Self, To, Kind, From);
+ if (Init.Failed())
+ return ExprError();
+
+ ExprResult Result = Init.Perform(Self, To, Kind, From);
+ if (Result.isInvalid() || SFINAE.hasErrorOccurred())
+ return ExprError();
+
+ return Result;
+}
static bool EvaluateBooleanTypeTrait(Sema &S, TypeTrait Kind,
SourceLocation KWLoc,
@@ -5477,17 +5720,20 @@ static bool EvaluateBooleanTypeTrait(Sema &S, TypeTrait Kind,
return false;
if (Kind <= UTT_Last)
- return EvaluateUnaryTypeTrait(S, Kind, KWLoc, Args[0]->getType());
+ return EvaluateUnaryTypeTrait(S, Kind, KWLoc, Args[0]);
// Evaluate ReferenceBindsToTemporary and ReferenceConstructsFromTemporary
// alongside the IsConstructible traits to avoid duplication.
- if (Kind <= BTT_Last && Kind != BTT_ReferenceBindsToTemporary && Kind != BTT_ReferenceConstructsFromTemporary)
- return EvaluateBinaryTypeTrait(S, Kind, Args[0]->getType(),
- Args[1]->getType(), RParenLoc);
+ if (Kind <= BTT_Last && Kind != BTT_ReferenceBindsToTemporary &&
+ Kind != BTT_ReferenceConstructsFromTemporary &&
+ Kind != BTT_ReferenceConvertsFromTemporary)
+ return EvaluateBinaryTypeTrait(S, Kind, Args[0],
+ Args[1], RParenLoc);
switch (Kind) {
case clang::BTT_ReferenceBindsToTemporary:
case clang::BTT_ReferenceConstructsFromTemporary:
+ case clang::BTT_ReferenceConvertsFromTemporary:
case clang::TT_IsConstructible:
case clang::TT_IsNothrowConstructible:
case clang::TT_IsTriviallyConstructible: {
@@ -5551,8 +5797,10 @@ static bool EvaluateBooleanTypeTrait(Sema &S, TypeTrait Kind,
Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl());
InitializedEntity To(
InitializedEntity::InitializeTemporary(S.Context, Args[0]));
- InitializationKind InitKind(InitializationKind::CreateDirect(KWLoc, KWLoc,
- RParenLoc));
+ InitializationKind InitKind(
+ Kind == clang::BTT_ReferenceConvertsFromTemporary
+ ? InitializationKind::CreateCopy(KWLoc, KWLoc)
+ : InitializationKind::CreateDirect(KWLoc, KWLoc, RParenLoc));
InitializationSequence Init(S, To, InitKind, ArgExprs);
if (Init.Failed())
return false;
@@ -5564,7 +5812,9 @@ static bool EvaluateBooleanTypeTrait(Sema &S, TypeTrait Kind,
if (Kind == clang::TT_IsConstructible)
return true;
- if (Kind == clang::BTT_ReferenceBindsToTemporary || Kind == clang::BTT_ReferenceConstructsFromTemporary) {
+ if (Kind == clang::BTT_ReferenceBindsToTemporary ||
+ Kind == clang::BTT_ReferenceConstructsFromTemporary ||
+ Kind == clang::BTT_ReferenceConvertsFromTemporary) {
if (!T->isReferenceType())
return false;
@@ -5578,9 +5828,13 @@ static bool EvaluateBooleanTypeTrait(Sema &S, TypeTrait Kind,
if (U->isReferenceType())
return false;
- QualType TPtr = S.Context.getPointerType(S.BuiltinRemoveReference(T, UnaryTransformType::RemoveCVRef, {}));
- QualType UPtr = S.Context.getPointerType(S.BuiltinRemoveReference(U, UnaryTransformType::RemoveCVRef, {}));
- return EvaluateBinaryTypeTrait(S, TypeTrait::BTT_IsConvertibleTo, UPtr, TPtr, RParenLoc);
+ TypeSourceInfo *TPtr = S.Context.CreateTypeSourceInfo(
+ S.Context.getPointerType(T.getNonReferenceType()));
+ TypeSourceInfo *UPtr = S.Context.CreateTypeSourceInfo(
+ S.Context.getPointerType(U.getNonReferenceType()));
+ return !CheckConvertibilityForTypeTraits(S, UPtr, TPtr, RParenLoc,
+ OpaqueExprAllocator)
+ .isInvalid();
}
if (Kind == clang::TT_IsNothrowConstructible)
@@ -5713,8 +5967,11 @@ ExprResult Sema::ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
return BuildTypeTrait(Kind, KWLoc, ConvertedArgs, RParenLoc);
}
-static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
- QualType RhsT, SourceLocation KeyLoc) {
+static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, const TypeSourceInfo *Lhs,
+ const TypeSourceInfo *Rhs, SourceLocation KeyLoc) {
+ QualType LhsT = Lhs->getType();
+ QualType RhsT = Rhs->getType();
+
assert(!LhsT->isDependentType() && !RhsT->isDependentType() &&
"Cannot evaluate traits of dependent types");
@@ -5739,7 +5996,8 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
return false;
if (Self.RequireCompleteType(
- KeyLoc, RhsT, diag::err_incomplete_type_used_in_type_trait_expr))
+ Rhs->getTypeLoc().getBeginLoc(), RhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
return false;
return BaseInterface->isSuperClassOf(DerivedInterface);
@@ -5762,8 +6020,9 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
// If Base and Derived are class types and are different types
// (ignoring possible cv-qualifiers) then Derived shall be a
// complete type.
- if (Self.RequireCompleteType(KeyLoc, RhsT,
- diag::err_incomplete_type_used_in_type_trait_expr))
+ if (Self.RequireCompleteType(
+ Rhs->getTypeLoc().getBeginLoc(), RhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
return false;
return cast<CXXRecordDecl>(rhsRecord->getDecl())
@@ -5779,68 +6038,20 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
return Self.Context.typesAreCompatible(Lhs, Rhs);
}
case BTT_IsConvertible:
- case BTT_IsConvertibleTo: {
- // C++0x [meta.rel]p4:
- // Given the following function prototype:
- //
- // template <class T>
- // typename add_rvalue_reference<T>::type create();
- //
- // the predicate condition for a template specialization
- // is_convertible<From, To> shall be satisfied if and only if
- // the return expression in the following code would be
- // well-formed, including any implicit conversions to the return
- // type of the function:
- //
- // To test() {
- // return create<From>();
- // }
- //
- // Access checking is performed as if in a context unrelated to To and
- // From. Only the validity of the immediate context of the expression
- // of the return-statement (including conversions to the return type)
- // is considered.
- //
- // We model the initialization as a copy-initialization of a temporary
- // of the appropriate type, which for this expression is identical to the
- // return statement (since NRVO doesn't apply).
-
- // Functions aren't allowed to return function or array types.
- if (RhsT->isFunctionType() || RhsT->isArrayType())
- return false;
-
- // A return statement in a void function must have void type.
+ case BTT_IsConvertibleTo:
+ case BTT_IsNothrowConvertible: {
if (RhsT->isVoidType())
return LhsT->isVoidType();
-
- // A function definition requires a complete, non-abstract return type.
- if (!Self.isCompleteType(KeyLoc, RhsT) || Self.isAbstractType(KeyLoc, RhsT))
+ llvm::BumpPtrAllocator OpaqueExprAllocator;
+ ExprResult Result = CheckConvertibilityForTypeTraits(Self, Lhs, Rhs, KeyLoc,
+ OpaqueExprAllocator);
+ if (Result.isInvalid())
return false;
- // Compute the result of add_rvalue_reference.
- if (LhsT->isObjectType() || LhsT->isFunctionType())
- LhsT = Self.Context.getRValueReferenceType(LhsT);
-
- // Build a fake source and destination for initialization.
- InitializedEntity To(InitializedEntity::InitializeTemporary(RhsT));
- OpaqueValueExpr From(KeyLoc, LhsT.getNonLValueExprType(Self.Context),
- Expr::getValueKindForType(LhsT));
- Expr *FromPtr = &From;
- InitializationKind Kind(InitializationKind::CreateCopy(KeyLoc,
- SourceLocation()));
-
- // Perform the initialization in an unevaluated context within a SFINAE
- // trap at translation unit scope.
- EnterExpressionEvaluationContext Unevaluated(
- Self, Sema::ExpressionEvaluationContext::Unevaluated);
- Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true);
- Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl());
- InitializationSequence Init(Self, To, Kind, FromPtr);
- if (Init.Failed())
- return false;
+ if (BTT != BTT_IsNothrowConvertible)
+ return true;
- ExprResult Result = Init.Perform(Self, To, Kind, FromPtr);
- return !Result.isInvalid() && !SFINAE.hasErrorOccurred();
+ return Self.canThrow(Result.get()) == CT_Cannot;
}
case BTT_IsAssignable:
@@ -5858,12 +6069,14 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
// For both, T and U shall be complete types, (possibly cv-qualified)
// void, or arrays of unknown bound.
if (!LhsT->isVoidType() && !LhsT->isIncompleteArrayType() &&
- Self.RequireCompleteType(KeyLoc, LhsT,
- diag::err_incomplete_type_used_in_type_trait_expr))
+ Self.RequireCompleteType(
+ Lhs->getTypeLoc().getBeginLoc(), LhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
return false;
if (!RhsT->isVoidType() && !RhsT->isIncompleteArrayType() &&
- Self.RequireCompleteType(KeyLoc, RhsT,
- diag::err_incomplete_type_used_in_type_trait_expr))
+ Self.RequireCompleteType(
+ Rhs->getTypeLoc().getBeginLoc(), RhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
return false;
// cv void is never assignable.
@@ -5916,7 +6129,42 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
llvm_unreachable("unhandled type trait");
return false;
}
- default: llvm_unreachable("not a BTT");
+ case BTT_IsLayoutCompatible: {
+ if (!LhsT->isVoidType() && !LhsT->isIncompleteArrayType())
+ Self.RequireCompleteType(Lhs->getTypeLoc().getBeginLoc(), LhsT,
+ diag::err_incomplete_type);
+ if (!RhsT->isVoidType() && !RhsT->isIncompleteArrayType())
+ Self.RequireCompleteType(Rhs->getTypeLoc().getBeginLoc(), RhsT,
+ diag::err_incomplete_type);
+
+ DiagnoseVLAInCXXTypeTrait(Self, Lhs, tok::kw___is_layout_compatible);
+ DiagnoseVLAInCXXTypeTrait(Self, Rhs, tok::kw___is_layout_compatible);
+
+ return Self.IsLayoutCompatible(LhsT, RhsT);
+ }
+ case BTT_IsPointerInterconvertibleBaseOf: {
+ if (LhsT->isStructureOrClassType() && RhsT->isStructureOrClassType() &&
+ !Self.getASTContext().hasSameUnqualifiedType(LhsT, RhsT)) {
+ Self.RequireCompleteType(Rhs->getTypeLoc().getBeginLoc(), RhsT,
+ diag::err_incomplete_type);
+ }
+
+ DiagnoseVLAInCXXTypeTrait(Self, Lhs,
+ tok::kw___is_pointer_interconvertible_base_of);
+ DiagnoseVLAInCXXTypeTrait(Self, Rhs,
+ tok::kw___is_pointer_interconvertible_base_of);
+
+ return Self.IsPointerInterconvertibleBaseOf(Lhs, Rhs);
+ }
+ case BTT_IsDeducible: {
+ const auto *TSTToBeDeduced = cast<DeducedTemplateSpecializationType>(LhsT);
+ sema::TemplateDeductionInfo Info(KeyLoc);
+ return Self.DeduceTemplateArgumentsFromType(
+ TSTToBeDeduced->getTemplateName().getAsTemplateDecl(), RhsT,
+ Info) == TemplateDeductionResult::Success;
+ }
+ default:
+ llvm_unreachable("not a BTT");
}
llvm_unreachable("Unknown type trait or not implemented");
}
@@ -5979,7 +6227,7 @@ static uint64_t EvaluateArrayTypeTrait(Sema &Self, ArrayTypeTrait ATT,
if (Matched && T->isArrayType()) {
if (const ConstantArrayType *CAT = Self.Context.getAsConstantArrayType(T))
- return CAT->getSize().getLimitedValue();
+ return CAT->getLimitedSize();
}
}
return 0;
@@ -6568,26 +6816,6 @@ QualType Sema::CheckSizelessVectorConditionalTypes(ExprResult &Cond,
return ResultType;
}
-/// Check the operands of ?: under C++ semantics.
-///
-/// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y
-/// extension. In this case, LHS == Cond. (But they're not aliases.)
-///
-/// This function also implements GCC's vector extension and the
-/// OpenCL/ext_vector_type extension for conditionals. The vector extensions
-/// permit the use of a?b:c where the type of a is that of a integer vector with
-/// the same number of elements and size as the vectors of b and c. If one of
-/// either b or c is a scalar it is implicitly converted to match the type of
-/// the vector. Otherwise the expression is ill-formed. If both b and c are
-/// scalars, then b and c are checked and converted to the type of a if
-/// possible.
-///
-/// The expressions are evaluated differently for GCC's and OpenCL's extensions.
-/// For the GCC extension, the ?: operator is evaluated as
-/// (a[0] != 0 ? b[0] : c[0], .. , a[n] != 0 ? b[n] : c[n]).
-/// For the OpenCL extensions, the ?: operator is evaluated as
-/// (most-significant-bit-set(a[0]) ? b[0] : c[0], .. ,
-/// most-significant-bit-set(a[n]) ? b[n] : c[n]).
QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS, ExprValueKind &VK,
ExprObjectKind &OK,
@@ -6872,7 +7100,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
return Composite;
// Similarly, attempt to find composite type of two objective-c pointers.
- Composite = FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
+ Composite = ObjC().FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
if (!Composite.isNull())
@@ -6888,17 +7116,6 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
return QualType();
}
-/// Find a merged pointer type and convert the two expressions to it.
-///
-/// This finds the composite pointer type for \p E1 and \p E2 according to
-/// C++2a [expr.type]p3. It converts both expressions to this type and returns
-/// it. It does not emit diagnostics (FIXME: that's not true if \p ConvertArgs
-/// is \c true).
-///
-/// \param Loc The location of the operator requiring these two expressions to
-/// be converted to the composite pointer type.
-///
-/// \param ConvertArgs If \c false, do not convert E1 and E2 to the target type.
QualType Sema::FindCompositePointerType(SourceLocation Loc,
Expr *&E1, Expr *&E2,
bool ConvertArgs) {
@@ -7485,10 +7702,6 @@ Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
return MaybeCreateExprWithCleanups(E);
}
-/// Process the expression contained within a decltype. For such expressions,
-/// certain semantic checks on temporaries are delayed until this point, and
-/// are omitted for the 'topmost' call in the decltype expression. If the
-/// topmost call bound a temporary, strip that temporary off the expression.
ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
assert(ExprEvalContexts.back().ExprContext ==
ExpressionEvaluationContextRecord::EK_Decltype &&
@@ -8099,20 +8312,36 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation TildeLoc,
const DeclSpec& DS) {
QualType ObjectType;
+ QualType T;
+ TypeLocBuilder TLB;
if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
return ExprError();
- if (DS.getTypeSpecType() == DeclSpec::TST_decltype_auto) {
+ switch (DS.getTypeSpecType()) {
+ case DeclSpec::TST_decltype_auto: {
Diag(DS.getTypeSpecTypeLoc(), diag::err_decltype_auto_invalid);
return true;
}
-
- QualType T = BuildDecltypeType(DS.getRepAsExpr(), /*AsUnevaluated=*/false);
-
- TypeLocBuilder TLB;
- DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
- DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
- DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
+ case DeclSpec::TST_decltype: {
+ T = BuildDecltypeType(DS.getRepAsExpr(), /*AsUnevaluated=*/false);
+ DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
+ DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
+ DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
+ break;
+ }
+ case DeclSpec::TST_typename_pack_indexing: {
+ T = ActOnPackIndexingType(DS.getRepAsType().get(), DS.getPackIndexingExpr(),
+ DS.getBeginLoc(), DS.getEllipsisLoc());
+ TLB.pushTrivial(getASTContext(),
+ cast<PackIndexingType>(T.getTypePtr())->getPattern(),
+ DS.getBeginLoc());
+ PackIndexingTypeLoc PITL = TLB.push<PackIndexingTypeLoc>(T);
+ PITL.setEllipsisLoc(DS.getEllipsisLoc());
+ break;
+ }
+ default:
+ llvm_unreachable("Unsupported type in pseudo destructor");
+ }
TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T);
PseudoDestructorTypeStorage Destructed(DestructedTypeInfo);
@@ -8206,21 +8435,6 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
E = result.get();
}
- // C99 6.3.2.1:
- // [Except in specific positions,] an lvalue that does not have
- // array type is converted to the value stored in the
- // designated object (and is no longer an lvalue).
- if (E->isPRValue()) {
- // In C, function designators (i.e. expressions of function type)
- // are r-values, but we still want to do function-to-pointer decay
- // on them. This is both technically correct and convenient for
- // some clients.
- if (!getLangOpts().CPlusPlus && E->getType()->isFunctionType())
- return DefaultFunctionArrayConversion(E);
-
- return E;
- }
-
if (getLangOpts().CPlusPlus) {
// The C++11 standard defines the notion of a discarded-value expression;
// normally, we don't need to do anything to handle it, but if it is a
@@ -8241,11 +8455,32 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
// If the expression is a prvalue after this optional conversion, the
// temporary materialization conversion is applied.
//
- // We skip this step: IR generation is able to synthesize the storage for
- // itself in the aggregate case, and adding the extra node to the AST is
- // just clutter.
- // FIXME: We don't emit lifetime markers for the temporaries due to this.
- // FIXME: Do any other AST consumers care about this?
+ // We do not materialize temporaries by default in order to avoid creating
+ // unnecessary temporary objects. If we skip this step, IR generation is
+ // able to synthesize the storage for itself in the aggregate case, and
+ // adding the extra node to the AST is just clutter.
+ if (isInLifetimeExtendingContext() && getLangOpts().CPlusPlus17 &&
+ E->isPRValue() && !E->getType()->isVoidType()) {
+ ExprResult Res = TemporaryMaterializationConversion(E);
+ if (Res.isInvalid())
+ return E;
+ E = Res.get();
+ }
+ return E;
+ }
+
+ // C99 6.3.2.1:
+ // [Except in specific positions,] an lvalue that does not have
+ // array type is converted to the value stored in the
+ // designated object (and is no longer an lvalue).
+ if (E->isPRValue()) {
+ // In C, function designators (i.e. expressions of function type)
+ // are r-values, but we still want to do function-to-pointer decay
+ // on them. This is both technically correct and convenient for
+ // some clients.
+ if (!getLangOpts().CPlusPlus && E->getType()->isFunctionType())
+ return DefaultFunctionArrayConversion(E);
+
return E;
}
@@ -8326,7 +8561,7 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
assert(S.CurContext->isDependentContext());
#ifndef NDEBUG
DeclContext *DC = S.CurContext;
- while (DC && isa<CapturedDecl>(DC))
+ while (isa_and_nonnull<CapturedDecl>(DC))
DC = DC->getParent();
assert(
CurrentLSI->CallOperator == DC &&
@@ -8437,27 +8672,14 @@ static ExprResult attemptRecovery(Sema &SemaRef,
// Detect and handle the case where the decl might be an implicit
// member.
- bool MightBeImplicitMember;
- if (!Consumer.isAddressOfOperand())
- MightBeImplicitMember = true;
- else if (!NewSS.isEmpty())
- MightBeImplicitMember = false;
- else if (R.isOverloadedResult())
- MightBeImplicitMember = false;
- else if (R.isUnresolvableResult())
- MightBeImplicitMember = true;
- else
- MightBeImplicitMember = isa<FieldDecl>(ND) ||
- isa<IndirectFieldDecl>(ND) ||
- isa<MSPropertyDecl>(ND);
-
- if (MightBeImplicitMember)
+ if (SemaRef.isPotentialImplicitMemberAccess(
+ NewSS, R, Consumer.isAddressOfOperand()))
return SemaRef.BuildPossibleImplicitMemberExpr(
NewSS, /*TemplateKWLoc*/ SourceLocation(), R,
/*TemplateArgs*/ nullptr, /*S*/ nullptr);
} else if (auto *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
- return SemaRef.LookupInObjCMethod(R, Consumer.getScope(),
- Ivar->getIdentifier());
+ return SemaRef.ObjC().LookupInObjCMethod(R, Consumer.getScope(),
+ Ivar->getIdentifier());
}
}
@@ -8916,7 +9138,7 @@ ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
// - Teach the handful of places that iterate over FunctionScopes to
// stop at the outermost enclosing lexical scope."
DeclContext *DC = CurContext;
- while (DC && isa<CapturedDecl>(DC))
+ while (isa_and_nonnull<CapturedDecl>(DC))
DC = DC->getParent();
const bool IsInLambdaDeclContext = isLambdaCallOperator(DC);
if (IsInLambdaDeclContext && CurrentLSI &&
@@ -8946,8 +9168,8 @@ Sema::CheckMicrosoftIfExistsSymbol(Scope *S,
// Do the redeclaration lookup in the current scope.
LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName,
- Sema::NotForRedeclaration);
- LookupParsedName(R, S, &SS);
+ RedeclarationKind::NotForRedeclaration);
+ LookupParsedName(R, S, &SS, /*ObjectType=*/QualType());
R.suppressDiagnostics();
switch (R.getResultKind()) {
@@ -8988,10 +9210,9 @@ concepts::Requirement *Sema::ActOnSimpleRequirement(Expr *E) {
/*ReturnTypeRequirement=*/{});
}
-concepts::Requirement *
-Sema::ActOnTypeRequirement(SourceLocation TypenameKWLoc, CXXScopeSpec &SS,
- SourceLocation NameLoc, IdentifierInfo *TypeName,
- TemplateIdAnnotation *TemplateId) {
+concepts::Requirement *Sema::ActOnTypeRequirement(
+ SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
+ const IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId) {
assert(((!TypeName && TemplateId) || (TypeName && !TemplateId)) &&
"Exactly one of TypeName and TemplateId must be specified.");
TypeSourceInfo *TSI = nullptr;
@@ -9091,9 +9312,7 @@ Sema::BuildExprRequirement(
auto *Param = cast<TemplateTypeParmDecl>(TPL->getParam(0));
- TemplateArgumentList TAL(TemplateArgumentList::OnStack, Args);
- MultiLevelTemplateArgumentList MLTAL(Param, TAL.asArray(),
- /*Final=*/false);
+ MultiLevelTemplateArgumentList MLTAL(Param, Args, /*Final=*/false);
MLTAL.addOuterRetainedLevels(TPL->getDepth());
const TypeConstraint *TC = Param->getTypeConstraint();
assert(TC && "Type Constraint cannot be null here");
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
index 32998ae60eaf..f1ba26f38520 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
@@ -9,7 +9,6 @@
// This file implements semantic analysis member access expressions.
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/Overload.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
@@ -18,9 +17,12 @@
#include "clang/AST/ExprObjC.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Overload.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenMP.h"
using namespace clang;
using namespace sema;
@@ -61,6 +63,10 @@ enum IMAKind {
/// The reference is a contextually-permitted abstract member reference.
IMA_Abstract,
+ /// Whether the context is static is dependent on the enclosing template (i.e.
+ /// in a dependent class scope explicit specialization).
+ IMA_Dependent,
+
/// The reference may be to an unresolved using declaration and the
/// context is not an instance method.
IMA_Unresolved_StaticOrExplicitContext,
@@ -91,14 +97,25 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
DeclContext *DC = SemaRef.getFunctionLevelDeclContext();
- bool isStaticOrExplicitContext =
- SemaRef.CXXThisTypeOverride.isNull() &&
- (!isa<CXXMethodDecl>(DC) || cast<CXXMethodDecl>(DC)->isStatic() ||
- cast<CXXMethodDecl>(DC)->isExplicitObjectMemberFunction());
+ bool couldInstantiateToStatic = false;
+ bool isStaticOrExplicitContext = SemaRef.CXXThisTypeOverride.isNull();
+
+ if (auto *MD = dyn_cast<CXXMethodDecl>(DC)) {
+ if (MD->isImplicitObjectMemberFunction()) {
+ isStaticOrExplicitContext = false;
+ // A dependent class scope function template explicit specialization
+ // that is neither declared 'static' nor with an explicit object
+ // parameter could instantiate to a static or non-static member function.
+ couldInstantiateToStatic = MD->getDependentSpecializationInfo();
+ }
+ }
- if (R.isUnresolvableResult())
+ if (R.isUnresolvableResult()) {
+ if (couldInstantiateToStatic)
+ return IMA_Dependent;
return isStaticOrExplicitContext ? IMA_Unresolved_StaticOrExplicitContext
: IMA_Unresolved;
+ }
// Collect all the declaring classes of instance members we find.
bool hasNonInstance = false;
@@ -123,6 +140,9 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
if (Classes.empty())
return IMA_Static;
+ if (couldInstantiateToStatic)
+ return IMA_Dependent;
+
// C++11 [expr.prim.general]p12:
// An id-expression that denotes a non-static data member or non-static
// member function of a class can only be used:
@@ -263,21 +283,36 @@ static void diagnoseInstanceReference(Sema &SemaRef,
}
}
-/// Builds an expression which might be an implicit member expression.
+bool Sema::isPotentialImplicitMemberAccess(const CXXScopeSpec &SS,
+ LookupResult &R,
+ bool IsAddressOfOperand) {
+ if (!getLangOpts().CPlusPlus)
+ return false;
+ else if (R.empty() || !R.begin()->isCXXClassMember())
+ return false;
+ else if (!IsAddressOfOperand)
+ return true;
+ else if (!SS.isEmpty())
+ return false;
+ else if (R.isOverloadedResult())
+ return false;
+ else if (R.isUnresolvableResult())
+ return true;
+ else
+ return isa<FieldDecl, IndirectFieldDecl, MSPropertyDecl>(R.getFoundDecl());
+}
+
ExprResult Sema::BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
- const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
- UnresolvedLookupExpr *AsULE) {
- switch (ClassifyImplicitMemberAccess(*this, R)) {
+ const TemplateArgumentListInfo *TemplateArgs, const Scope *S) {
+ switch (IMAKind Classification = ClassifyImplicitMemberAccess(*this, R)) {
case IMA_Instance:
- return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, true, S);
-
case IMA_Mixed:
case IMA_Mixed_Unrelated:
case IMA_Unresolved:
- return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, false,
- S);
-
+ return BuildImplicitMemberExpr(
+ SS, TemplateKWLoc, R, TemplateArgs,
+ /*IsKnownInstance=*/Classification == IMA_Instance, S);
case IMA_Field_Uneval_Context:
Diag(R.getNameLoc(), diag::warn_cxx98_compat_non_static_member_use)
<< R.getLookupNameInfo().getName();
@@ -287,8 +322,17 @@ ExprResult Sema::BuildPossibleImplicitMemberExpr(
case IMA_Mixed_StaticOrExplicitContext:
case IMA_Unresolved_StaticOrExplicitContext:
if (TemplateArgs || TemplateKWLoc.isValid())
- return BuildTemplateIdExpr(SS, TemplateKWLoc, R, false, TemplateArgs);
- return AsULE ? AsULE : BuildDeclarationNameExpr(SS, R, false);
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*RequiresADL=*/false,
+ TemplateArgs);
+ return BuildDeclarationNameExpr(SS, R, /*NeedsADL=*/false,
+ /*AcceptInvalidDecl=*/false);
+ case IMA_Dependent:
+ R.suppressDiagnostics();
+ return UnresolvedLookupExpr::Create(
+ Context, R.getNamingClass(), SS.getWithLocInContext(Context),
+ TemplateKWLoc, R.getLookupNameInfo(), /*RequiresADL=*/false,
+ TemplateArgs, R.begin(), R.end(), /*KnownDependent=*/true,
+ /*KnownInstantiationDependent=*/true);
case IMA_Error_StaticOrExplicitContext:
case IMA_Error_Unrelated:
@@ -569,18 +613,6 @@ static void DiagnoseQualifiedMemberReference(Sema &SemaRef,
<< SS.getRange() << rep << BaseType;
}
-// Check whether the declarations we found through a nested-name
-// specifier in a member expression are actually members of the base
-// type. The restriction here is:
-//
-// C++ [expr.ref]p2:
-// ... In these cases, the id-expression shall name a
-// member of the class or of one of its base classes.
-//
-// So it's perfectly legitimate for the nested-name specifier to name
-// an unrelated class, and for us to find an overload set including
-// decls from classes which are not superclasses, as long as the decl
-// we actually pick through overload resolution is from a superclass.
bool Sema::CheckQualifiedMemberReference(Expr *BaseExpr,
QualType BaseType,
const CXXScopeSpec &SS,
@@ -624,8 +656,8 @@ namespace {
// classes, one of its base classes.
class RecordMemberExprValidatorCCC final : public CorrectionCandidateCallback {
public:
- explicit RecordMemberExprValidatorCCC(const RecordType *RTy)
- : Record(RTy->getDecl()) {
+ explicit RecordMemberExprValidatorCCC(QualType RTy)
+ : Record(RTy->getAsRecordDecl()) {
// Don't add bare keywords to the consumer since they will always fail
// validation by virtue of not being associated with any decls.
WantTypeSpecifiers = false;
@@ -670,64 +702,42 @@ private:
}
static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
- Expr *BaseExpr,
- const RecordType *RTy,
+ Expr *BaseExpr, QualType RTy,
SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, bool HasTemplateArgs,
SourceLocation TemplateKWLoc,
TypoExpr *&TE) {
SourceRange BaseRange = BaseExpr ? BaseExpr->getSourceRange() : SourceRange();
- RecordDecl *RDecl = RTy->getDecl();
- if (!SemaRef.isThisOutsideMemberFunctionBody(QualType(RTy, 0)) &&
- SemaRef.RequireCompleteType(OpLoc, QualType(RTy, 0),
- diag::err_typecheck_incomplete_tag,
- BaseRange))
+ if (!RTy->isDependentType() &&
+ !SemaRef.isThisOutsideMemberFunctionBody(RTy) &&
+ SemaRef.RequireCompleteType(
+ OpLoc, RTy, diag::err_typecheck_incomplete_tag, BaseRange))
return true;
- if (HasTemplateArgs || TemplateKWLoc.isValid()) {
- // LookupTemplateName doesn't expect these both to exist simultaneously.
- QualType ObjectType = SS.isSet() ? QualType() : QualType(RTy, 0);
-
- bool MOUS;
- return SemaRef.LookupTemplateName(R, nullptr, SS, ObjectType, false, MOUS,
- TemplateKWLoc);
- }
-
- DeclContext *DC = RDecl;
- if (SS.isSet()) {
- // If the member name was a qualified-id, look into the
- // nested-name-specifier.
- DC = SemaRef.computeDeclContext(SS, false);
-
- if (SemaRef.RequireCompleteDeclContext(SS, DC)) {
- SemaRef.Diag(SS.getRange().getEnd(), diag::err_typecheck_incomplete_tag)
- << SS.getRange() << DC;
- return true;
- }
+ // LookupTemplateName/LookupParsedName don't expect these both to exist
+ // simultaneously.
+ QualType ObjectType = SS.isSet() ? QualType() : RTy;
+ if (HasTemplateArgs || TemplateKWLoc.isValid())
+ return SemaRef.LookupTemplateName(R,
+ /*S=*/nullptr, SS, ObjectType,
+ /*EnteringContext=*/false, TemplateKWLoc);
- assert(DC && "Cannot handle non-computable dependent contexts in lookup");
+ SemaRef.LookupParsedName(R, /*S=*/nullptr, &SS, ObjectType);
- if (!isa<TypeDecl>(DC)) {
- SemaRef.Diag(R.getNameLoc(), diag::err_qualified_member_nonclass)
- << DC << SS.getRange();
- return true;
- }
- }
-
- // The record definition is complete, now look up the member.
- SemaRef.LookupQualifiedName(R, DC, SS);
-
- if (!R.empty())
+ if (!R.empty() || R.wasNotFoundInCurrentInstantiation())
return false;
DeclarationName Typo = R.getLookupName();
SourceLocation TypoLoc = R.getNameLoc();
+ // Recompute the lookup context.
+ DeclContext *DC = SS.isSet() ? SemaRef.computeDeclContext(SS)
+ : SemaRef.computeDeclContext(RTy);
struct QueryState {
Sema &SemaRef;
DeclarationNameInfo NameInfo;
Sema::LookupNameKind LookupKind;
- Sema::RedeclarationKind Redecl;
+ RedeclarationKind Redecl;
};
QueryState Q = {R.getSema(), R.getLookupNameInfo(), R.getLookupKind(),
R.redeclarationKind()};
@@ -745,7 +755,8 @@ static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
<< Typo << DC << DroppedSpecifier
<< SS.getRange());
} else {
- SemaRef.Diag(TypoLoc, diag::err_no_member) << Typo << DC << BaseRange;
+ SemaRef.Diag(TypoLoc, diag::err_no_member)
+ << Typo << DC << (SS.isSet() ? SS.getRange() : BaseRange);
}
},
[=](Sema &SemaRef, TypoExpr *TE, TypoCorrection TC) mutable {
@@ -771,24 +782,12 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
Decl *ObjCImpDecl, bool HasTemplateArgs,
SourceLocation TemplateKWLoc);
-ExprResult
-Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
- SourceLocation OpLoc, bool IsArrow,
- CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- NamedDecl *FirstQualifierInScope,
- const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *TemplateArgs,
- const Scope *S,
- ActOnMemberAccessExtraArgs *ExtraArgs) {
- if (BaseType->isDependentType() ||
- (SS.isSet() && isDependentScopeSpecifier(SS)) ||
- NameInfo.getName().isDependentName())
- return ActOnDependentMemberExpr(Base, BaseType,
- IsArrow, OpLoc,
- SS, TemplateKWLoc, FirstQualifierInScope,
- NameInfo, TemplateArgs);
-
+ExprResult Sema::BuildMemberReferenceExpr(
+ Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
+ CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
+ ActOnMemberAccessExtraArgs *ExtraArgs) {
LookupResult R(*this, NameInfo, LookupMemberName);
// Implicit member accesses.
@@ -796,9 +795,9 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
TypoExpr *TE = nullptr;
QualType RecordTy = BaseType;
if (IsArrow) RecordTy = RecordTy->castAs<PointerType>()->getPointeeType();
- if (LookupMemberExprInRecord(
- *this, R, nullptr, RecordTy->castAs<RecordType>(), OpLoc, IsArrow,
- SS, TemplateArgs != nullptr, TemplateKWLoc, TE))
+ if (LookupMemberExprInRecord(*this, R, nullptr, RecordTy, OpLoc, IsArrow,
+ SS, TemplateArgs != nullptr, TemplateKWLoc,
+ TE))
return ExprError();
if (TE)
return TE;
@@ -825,6 +824,11 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
BaseType = Base->getType();
}
+ // BuildMemberReferenceExpr expects the nested-name-specifier, if any, to be
+ // valid.
+ if (SS.isInvalid())
+ return ExprError();
+
return BuildMemberReferenceExpr(Base, BaseType,
OpLoc, IsArrow, SS, TemplateKWLoc,
FirstQualifierInScope, R, TemplateArgs, S,
@@ -926,19 +930,6 @@ BuildMSPropertyRefExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
}
MemberExpr *Sema::BuildMemberExpr(
- Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS,
- SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl,
- bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo,
- QualType Ty, ExprValueKind VK, ExprObjectKind OK,
- const TemplateArgumentListInfo *TemplateArgs) {
- NestedNameSpecifierLoc NNS =
- SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc();
- return BuildMemberExpr(Base, IsArrow, OpLoc, NNS, TemplateKWLoc, Member,
- FoundDecl, HadMultipleCandidates, MemberNameInfo, Ty,
- VK, OK, TemplateArgs);
-}
-
-MemberExpr *Sema::BuildMemberExpr(
Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS,
SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo,
@@ -990,6 +981,18 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
const Scope *S,
bool SuppressQualifierCheck,
ActOnMemberAccessExtraArgs *ExtraArgs) {
+ assert(!SS.isInvalid() && "nested-name-specifier cannot be invalid");
+ // If the member wasn't found in the current instantiation, or if the
+ // arrow operator was used with a dependent non-pointer object expression,
+ // build a CXXDependentScopeMemberExpr.
+ if (R.wasNotFoundInCurrentInstantiation() ||
+ (R.getLookupName().getCXXOverloadedOperator() == OO_Equal &&
+ (SS.isSet() ? SS.getScopeRep()->isDependent()
+ : BaseExprType->isDependentType())))
+ return ActOnDependentMemberExpr(BaseExpr, BaseExprType, IsArrow, OpLoc, SS,
+ TemplateKWLoc, FirstQualifierInScope,
+ R.getLookupNameInfo(), TemplateArgs);
+
QualType BaseType = BaseExprType;
if (IsArrow) {
assert(BaseType->isPointerType());
@@ -997,6 +1000,11 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
}
R.setBaseObjectType(BaseType);
+ assert((SS.isEmpty()
+ ? !BaseType->isDependentType() || computeDeclContext(BaseType)
+ : !isDependentScopeSpecifier(SS) || computeDeclContext(SS)) &&
+ "dependent lookup context that isn't the current instantiation?");
+
// C++1z [expr.ref]p2:
// For the first option (dot) the first expression shall be a glvalue [...]
if (!IsArrow && BaseExpr && BaseExpr->isPRValue()) {
@@ -1025,40 +1033,39 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
<< isa<CXXDestructorDecl>(FD);
if (R.empty()) {
- // Rederive where we looked up.
- DeclContext *DC = (SS.isSet()
- ? computeDeclContext(SS, false)
- : BaseType->castAs<RecordType>()->getDecl());
-
- if (ExtraArgs) {
- ExprResult RetryExpr;
- if (!IsArrow && BaseExpr) {
- SFINAETrap Trap(*this, true);
- ParsedType ObjectType;
- bool MayBePseudoDestructor = false;
- RetryExpr = ActOnStartCXXMemberReference(getCurScope(), BaseExpr,
- OpLoc, tok::arrow, ObjectType,
- MayBePseudoDestructor);
- if (RetryExpr.isUsable() && !Trap.hasErrorOccurred()) {
- CXXScopeSpec TempSS(SS);
- RetryExpr = ActOnMemberAccessExpr(
- ExtraArgs->S, RetryExpr.get(), OpLoc, tok::arrow, TempSS,
- TemplateKWLoc, ExtraArgs->Id, ExtraArgs->ObjCImpDecl);
- }
- if (Trap.hasErrorOccurred())
- RetryExpr = ExprError();
- }
- if (RetryExpr.isUsable()) {
- Diag(OpLoc, diag::err_no_member_overloaded_arrow)
- << MemberName << DC << FixItHint::CreateReplacement(OpLoc, "->");
- return RetryExpr;
+ ExprResult RetryExpr = ExprError();
+ if (ExtraArgs && !IsArrow && BaseExpr && !BaseExpr->isTypeDependent()) {
+ SFINAETrap Trap(*this, true);
+ ParsedType ObjectType;
+ bool MayBePseudoDestructor = false;
+ RetryExpr = ActOnStartCXXMemberReference(getCurScope(), BaseExpr, OpLoc,
+ tok::arrow, ObjectType,
+ MayBePseudoDestructor);
+ if (RetryExpr.isUsable() && !Trap.hasErrorOccurred()) {
+ CXXScopeSpec TempSS(SS);
+ RetryExpr = ActOnMemberAccessExpr(
+ ExtraArgs->S, RetryExpr.get(), OpLoc, tok::arrow, TempSS,
+ TemplateKWLoc, ExtraArgs->Id, ExtraArgs->ObjCImpDecl);
}
+ if (Trap.hasErrorOccurred())
+ RetryExpr = ExprError();
}
- Diag(R.getNameLoc(), diag::err_no_member)
- << MemberName << DC
- << (BaseExpr ? BaseExpr->getSourceRange() : SourceRange());
- return ExprError();
+ // Rederive where we looked up.
+ DeclContext *DC =
+ (SS.isSet() ? computeDeclContext(SS) : computeDeclContext(BaseType));
+ assert(DC);
+
+ if (RetryExpr.isUsable())
+ Diag(OpLoc, diag::err_no_member_overloaded_arrow)
+ << MemberName << DC << FixItHint::CreateReplacement(OpLoc, "->");
+ else
+ Diag(R.getNameLoc(), diag::err_no_member)
+ << MemberName << DC
+ << (SS.isSet()
+ ? SS.getRange()
+ : (BaseExpr ? BaseExpr->getSourceRange() : SourceRange()));
+ return RetryExpr;
}
// Diagnose lookups that find only declarations from a non-base
@@ -1143,7 +1150,8 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
OpLoc);
if (VarDecl *Var = dyn_cast<VarDecl>(MemberDecl)) {
- return BuildMemberExpr(BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc, Var,
+ return BuildMemberExpr(BaseExpr, IsArrow, OpLoc,
+ SS.getWithLocInContext(Context), TemplateKWLoc, Var,
FoundDecl, /*HadMultipleCandidates=*/false,
MemberNameInfo, Var->getType().getNonReferenceType(),
VK_LValue, OK_Ordinary);
@@ -1160,22 +1168,24 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
type = MemberFn->getType();
}
- return BuildMemberExpr(BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc,
+ return BuildMemberExpr(BaseExpr, IsArrow, OpLoc,
+ SS.getWithLocInContext(Context), TemplateKWLoc,
MemberFn, FoundDecl, /*HadMultipleCandidates=*/false,
MemberNameInfo, type, valueKind, OK_Ordinary);
}
assert(!isa<FunctionDecl>(MemberDecl) && "member function not C++ method?");
if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl)) {
- return BuildMemberExpr(BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc, Enum,
- FoundDecl, /*HadMultipleCandidates=*/false,
- MemberNameInfo, Enum->getType(), VK_PRValue,
- OK_Ordinary);
+ return BuildMemberExpr(
+ BaseExpr, IsArrow, OpLoc, SS.getWithLocInContext(Context),
+ TemplateKWLoc, Enum, FoundDecl, /*HadMultipleCandidates=*/false,
+ MemberNameInfo, Enum->getType(), VK_PRValue, OK_Ordinary);
}
if (VarTemplateDecl *VarTempl = dyn_cast<VarTemplateDecl>(MemberDecl)) {
if (!TemplateArgs) {
- diagnoseMissingTemplateArguments(TemplateName(VarTempl), MemberLoc);
+ diagnoseMissingTemplateArguments(
+ SS, /*TemplateKeyword=*/TemplateKWLoc.isValid(), VarTempl, MemberLoc);
return ExprError();
}
@@ -1194,7 +1204,8 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
if (!Var->getTemplateSpecializationKind())
Var->setTemplateSpecializationKind(TSK_ImplicitInstantiation, MemberLoc);
- return BuildMemberExpr(BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc, Var,
+ return BuildMemberExpr(BaseExpr, IsArrow, OpLoc,
+ SS.getWithLocInContext(Context), TemplateKWLoc, Var,
FoundDecl, /*HadMultipleCandidates=*/false,
MemberNameInfo, Var->getType().getNonReferenceType(),
VK_LValue, OK_Ordinary, TemplateArgs);
@@ -1255,7 +1266,6 @@ static bool isPointerToRecordType(QualType T) {
return false;
}
-/// Perform conversions on the LHS of a member access expression.
ExprResult
Sema::PerformMemberExprBaseConversion(Expr *Base, bool IsArrow) {
if (IsArrow && !Base->getType()->isFunctionType())
@@ -1287,7 +1297,6 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
return ExprError();
QualType BaseType = BaseExpr.get()->getType();
- assert(!BaseType->isDependentType());
DeclarationName MemberName = R.getLookupName();
SourceLocation MemberLoc = R.getNameLoc();
@@ -1299,9 +1308,13 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
if (IsArrow) {
if (const PointerType *Ptr = BaseType->getAs<PointerType>())
BaseType = Ptr->getPointeeType();
- else if (const ObjCObjectPointerType *Ptr
- = BaseType->getAs<ObjCObjectPointerType>())
+ else if (const ObjCObjectPointerType *Ptr =
+ BaseType->getAs<ObjCObjectPointerType>())
BaseType = Ptr->getPointeeType();
+ else if (BaseType->isFunctionType())
+ goto fail;
+ else if (BaseType->isDependentType())
+ BaseType = S.Context.DependentTy;
else if (BaseType->isRecordType()) {
// Recover from arrow accesses to records, e.g.:
// struct MyRecord foo;
@@ -1312,15 +1325,13 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
// was encountered while looking for the overloaded operator->.
if (!S.getLangOpts().CPlusPlus) {
S.Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
- << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
- << FixItHint::CreateReplacement(OpLoc, ".");
+ << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
+ << FixItHint::CreateReplacement(OpLoc, ".");
}
IsArrow = false;
- } else if (BaseType->isFunctionType()) {
- goto fail;
} else {
S.Diag(MemberLoc, diag::err_typecheck_member_reference_arrow)
- << BaseType << BaseExpr.get()->getSourceRange();
+ << BaseType << BaseExpr.get()->getSourceRange();
return ExprError();
}
}
@@ -1341,10 +1352,10 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
}
// Handle field access to simple records.
- if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
+ if (BaseType->getAsRecordDecl()) {
TypoExpr *TE = nullptr;
- if (LookupMemberExprInRecord(S, R, BaseExpr.get(), RTy, OpLoc, IsArrow, SS,
- HasTemplateArgs, TemplateKWLoc, TE))
+ if (LookupMemberExprInRecord(S, R, BaseExpr.get(), BaseType, OpLoc, IsArrow,
+ SS, HasTemplateArgs, TemplateKWLoc, TE))
return ExprError();
// Returning valid-but-null is how we indicate to the caller that
@@ -1352,6 +1363,9 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
// failed, the lookup result will have been cleared--that combined with the
// valid-but-null ExprResult will trigger the appropriate diagnostics.
return ExprResult(TE);
+ } else if (BaseType->isDependentType()) {
+ R.setNotFoundInCurrentInstantiation();
+ return ExprEmpty();
}
// Handle ivar access to Objective-C objects.
@@ -1495,9 +1509,8 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
if (warn) {
if (ObjCMethodDecl *MD = S.getCurMethodDecl()) {
ObjCMethodFamily MF = MD->getMethodFamily();
- warn = (MF != OMF_init && MF != OMF_dealloc &&
- MF != OMF_finalize &&
- !S.IvarBacksCurrentMethodAccessor(IDecl, MD, IV));
+ warn = (MF != OMF_init && MF != OMF_dealloc && MF != OMF_finalize &&
+ !S.ObjC().IvarBacksCurrentMethodAccessor(IDecl, MD, IV));
}
if (warn)
S.Diag(MemberLoc, diag::warn_direct_ivar_access) << IV->getDeclName();
@@ -1635,9 +1648,9 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
}
// Normal property access.
- return S.HandleExprPropertyRefExpr(OPT, BaseExpr.get(), OpLoc, MemberName,
- MemberLoc, SourceLocation(), QualType(),
- false);
+ return S.ObjC().HandleExprPropertyRefExpr(
+ OPT, BaseExpr.get(), OpLoc, MemberName, MemberLoc, SourceLocation(),
+ QualType(), false);
}
if (BaseType->isExtVectorBoolType()) {
@@ -1733,26 +1746,11 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
return ExprError();
}
-/// The main callback when the parser finds something like
-/// expression . [nested-name-specifier] identifier
-/// expression -> [nested-name-specifier] identifier
-/// where 'identifier' encompasses a fairly broad spectrum of
-/// possibilities, including destructor and operator references.
-///
-/// \param OpKind either tok::arrow or tok::period
-/// \param ObjCImpDecl the current Objective-C \@implementation
-/// decl; this is an ugly hack around the fact that Objective-C
-/// \@implementations aren't properly put in the context chain
ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
- tok::TokenKind OpKind,
- CXXScopeSpec &SS,
+ tok::TokenKind OpKind, CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
- UnqualifiedId &Id,
- Decl *ObjCImpDecl) {
- if (SS.isSet() && SS.isInvalid())
- return ExprError();
-
+ UnqualifiedId &Id, Decl *ObjCImpDecl) {
// Warn about the explicit constructor calls Microsoft extension.
if (getLangOpts().MicrosoftExt &&
Id.getKind() == UnqualifiedIdKind::IK_ConstructorName)
@@ -1767,7 +1765,6 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
DecomposeUnqualifiedId(Id, TemplateArgsBuffer,
NameInfo, TemplateArgs);
- DeclarationName Name = NameInfo.getName();
bool IsArrow = (OpKind == tok::arrow);
if (getLangOpts().HLSL && IsArrow)
@@ -1781,13 +1778,6 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
if (Result.isInvalid()) return ExprError();
Base = Result.get();
- if (Base->getType()->isDependentType() || Name.isDependentName() ||
- isDependentScopeSpecifier(SS)) {
- return ActOnDependentMemberExpr(Base, Base->getType(), IsArrow, OpLoc, SS,
- TemplateKWLoc, FirstQualifierInScope,
- NameInfo, TemplateArgs);
- }
-
ActOnMemberAccessExtraArgs ExtraArgs = {S, Id, ObjCImpDecl};
ExprResult Res = BuildMemberReferenceExpr(
Base, Base->getType(), OpLoc, IsArrow, SS, TemplateKWLoc,
@@ -1900,22 +1890,18 @@ Sema::BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
if (getLangOpts().OpenMP && IsArrow &&
!CurContext->isDependentContext() &&
isa<CXXThisExpr>(Base.get()->IgnoreParenImpCasts())) {
- if (auto *PrivateCopy = isOpenMPCapturedDecl(Field)) {
- return getOpenMPCapturedExpr(PrivateCopy, VK, OK,
- MemberNameInfo.getLoc());
+ if (auto *PrivateCopy = OpenMP().isOpenMPCapturedDecl(Field)) {
+ return OpenMP().getOpenMPCapturedExpr(PrivateCopy, VK, OK,
+ MemberNameInfo.getLoc());
}
}
- return BuildMemberExpr(Base.get(), IsArrow, OpLoc, &SS,
- /*TemplateKWLoc=*/SourceLocation(), Field, FoundDecl,
- /*HadMultipleCandidates=*/false, MemberNameInfo,
- MemberType, VK, OK);
+ return BuildMemberExpr(
+ Base.get(), IsArrow, OpLoc, SS.getWithLocInContext(Context),
+ /*TemplateKWLoc=*/SourceLocation(), Field, FoundDecl,
+ /*HadMultipleCandidates=*/false, MemberNameInfo, MemberType, VK, OK);
}
-/// Builds an implicit member access expression. The current context
-/// is known to be an instance method, and the given unqualified lookup
-/// set is known to contain only instance members, at least one of which
-/// is from an appropriate type.
ExprResult
Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
index a8853f634c9c..7ccecf055fee 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/TypeLoc.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Edit/Commit.h"
#include "clang/Edit/Rewriters.h"
#include "clang/Lex/Preprocessor.h"
@@ -25,6 +26,7 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ConvertUTF.h"
#include <optional>
@@ -33,8 +35,9 @@ using namespace clang;
using namespace sema;
using llvm::ArrayRef;
-ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
- ArrayRef<Expr *> Strings) {
+ExprResult SemaObjC::ParseObjCStringLiteral(SourceLocation *AtLocs,
+ ArrayRef<Expr *> Strings) {
+ ASTContext &Context = getASTContext();
// Most ObjC strings are formed out of a single piece. However, we *can*
// have strings formed out of multiple @ strings with multiple pptokens in
// each one, e.g. @"foo" "bar" @"baz" "qux" which need to be turned into one
@@ -79,7 +82,9 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
return BuildObjCStringLiteral(AtLocs[0], S);
}
-ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S){
+ExprResult SemaObjC::BuildObjCStringLiteral(SourceLocation AtLoc,
+ StringLiteral *S) {
+ ASTContext &Context = getASTContext();
// Verify that this composite string is acceptable for ObjC strings.
if (CheckObjCString(S))
return true;
@@ -100,8 +105,8 @@ ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S){
else
NSIdent = &Context.Idents.get(StringClass);
- NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLoc,
- LookupOrdinaryName);
+ NamedDecl *IF = SemaRef.LookupSingleName(SemaRef.TUScope, NSIdent, AtLoc,
+ Sema::LookupOrdinaryName);
if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) {
Context.setObjCConstantStringInterface(StrIF);
Ty = Context.getObjCConstantStringInterface();
@@ -115,8 +120,8 @@ ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S){
}
} else {
IdentifierInfo *NSIdent = NSAPIObj->getNSClassId(NSAPI::ClassId_NSString);
- NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLoc,
- LookupOrdinaryName);
+ NamedDecl *IF = SemaRef.LookupSingleName(SemaRef.TUScope, NSIdent, AtLoc,
+ Sema::LookupOrdinaryName);
if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) {
Context.setObjCConstantStringInterface(StrIF);
Ty = Context.getObjCConstantStringInterface();
@@ -168,25 +173,25 @@ static bool validateBoxingMethod(Sema &S, SourceLocation Loc,
}
/// Maps ObjCLiteralKind to NSClassIdKindKind
-static NSAPI::NSClassIdKindKind ClassKindFromLiteralKind(
- Sema::ObjCLiteralKind LiteralKind) {
+static NSAPI::NSClassIdKindKind
+ClassKindFromLiteralKind(SemaObjC::ObjCLiteralKind LiteralKind) {
switch (LiteralKind) {
- case Sema::LK_Array:
- return NSAPI::ClassId_NSArray;
- case Sema::LK_Dictionary:
- return NSAPI::ClassId_NSDictionary;
- case Sema::LK_Numeric:
- return NSAPI::ClassId_NSNumber;
- case Sema::LK_String:
- return NSAPI::ClassId_NSString;
- case Sema::LK_Boxed:
- return NSAPI::ClassId_NSValue;
-
- // there is no corresponding matching
- // between LK_None/LK_Block and NSClassIdKindKind
- case Sema::LK_Block:
- case Sema::LK_None:
- break;
+ case SemaObjC::LK_Array:
+ return NSAPI::ClassId_NSArray;
+ case SemaObjC::LK_Dictionary:
+ return NSAPI::ClassId_NSDictionary;
+ case SemaObjC::LK_Numeric:
+ return NSAPI::ClassId_NSNumber;
+ case SemaObjC::LK_String:
+ return NSAPI::ClassId_NSString;
+ case SemaObjC::LK_Boxed:
+ return NSAPI::ClassId_NSValue;
+
+ // there is no corresponding matching
+ // between LK_None/LK_Block and NSClassIdKindKind
+ case SemaObjC::LK_Block:
+ case SemaObjC::LK_None:
+ break;
}
llvm_unreachable("LiteralKind can't be converted into a ClassKind");
}
@@ -194,12 +199,13 @@ static NSAPI::NSClassIdKindKind ClassKindFromLiteralKind(
/// Validates ObjCInterfaceDecl availability.
/// ObjCInterfaceDecl, used to create ObjC literals, should be defined
/// if clang not in a debugger mode.
-static bool ValidateObjCLiteralInterfaceDecl(Sema &S, ObjCInterfaceDecl *Decl,
- SourceLocation Loc,
- Sema::ObjCLiteralKind LiteralKind) {
+static bool
+ValidateObjCLiteralInterfaceDecl(Sema &S, ObjCInterfaceDecl *Decl,
+ SourceLocation Loc,
+ SemaObjC::ObjCLiteralKind LiteralKind) {
if (!Decl) {
NSAPI::NSClassIdKindKind Kind = ClassKindFromLiteralKind(LiteralKind);
- IdentifierInfo *II = S.NSAPIObj->getNSClassId(Kind);
+ IdentifierInfo *II = S.ObjC().NSAPIObj->getNSClassId(Kind);
S.Diag(Loc, diag::err_undeclared_objc_literal_class)
<< II->getName() << LiteralKind;
return false;
@@ -216,11 +222,11 @@ static bool ValidateObjCLiteralInterfaceDecl(Sema &S, ObjCInterfaceDecl *Decl,
/// Looks up ObjCInterfaceDecl of a given NSClassIdKindKind.
/// Used to create ObjC literals, such as NSDictionary (@{}),
/// NSArray (@[]) and Boxed Expressions (@())
-static ObjCInterfaceDecl *LookupObjCInterfaceDeclForLiteral(Sema &S,
- SourceLocation Loc,
- Sema::ObjCLiteralKind LiteralKind) {
+static ObjCInterfaceDecl *
+LookupObjCInterfaceDeclForLiteral(Sema &S, SourceLocation Loc,
+ SemaObjC::ObjCLiteralKind LiteralKind) {
NSAPI::NSClassIdKindKind ClassKind = ClassKindFromLiteralKind(LiteralKind);
- IdentifierInfo *II = S.NSAPIObj->getNSClassId(ClassKind);
+ IdentifierInfo *II = S.ObjC().NSAPIObj->getNSClassId(ClassKind);
NamedDecl *IF = S.LookupSingleName(S.TUScope, II, Loc,
Sema::LookupOrdinaryName);
ObjCInterfaceDecl *ID = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
@@ -240,7 +246,7 @@ static ObjCInterfaceDecl *LookupObjCInterfaceDeclForLiteral(Sema &S,
/// Retrieve the NSNumber factory method that should be used to create
/// an Objective-C literal for the given type.
-static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
+static ObjCMethodDecl *getNSNumberFactoryMethod(SemaObjC &S, SourceLocation Loc,
QualType NumberType,
bool isLiteral = false,
SourceRange R = SourceRange()) {
@@ -262,13 +268,13 @@ static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
Selector Sel = S.NSAPIObj->getNSNumberLiteralSelector(*Kind,
/*Instance=*/false);
- ASTContext &CX = S.Context;
+ ASTContext &CX = S.SemaRef.Context;
// Look up the NSNumber class, if we haven't done so already. It's cached
// in the Sema instance.
if (!S.NSNumberDecl) {
- S.NSNumberDecl = LookupObjCInterfaceDeclForLiteral(S, Loc,
- Sema::LK_Numeric);
+ S.NSNumberDecl =
+ LookupObjCInterfaceDeclForLiteral(S.SemaRef, Loc, SemaObjC::LK_Numeric);
if (!S.NSNumberDecl) {
return nullptr;
}
@@ -294,15 +300,14 @@ static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
/*isImplicitlyDeclared=*/true,
/*isDefined=*/false, ObjCImplementationControl::Required,
/*HasRelatedResultType=*/false);
- ParmVarDecl *value = ParmVarDecl::Create(S.Context, Method,
- SourceLocation(), SourceLocation(),
- &CX.Idents.get("value"),
- NumberType, /*TInfo=*/nullptr,
- SC_None, nullptr);
- Method->setMethodParams(S.Context, value, std::nullopt);
+ ParmVarDecl *value =
+ ParmVarDecl::Create(S.SemaRef.Context, Method, SourceLocation(),
+ SourceLocation(), &CX.Idents.get("value"),
+ NumberType, /*TInfo=*/nullptr, SC_None, nullptr);
+ Method->setMethodParams(S.SemaRef.Context, value, std::nullopt);
}
- if (!validateBoxingMethod(S, Loc, S.NSNumberDecl, Sel, Method))
+ if (!validateBoxingMethod(S.SemaRef, Loc, S.NSNumberDecl, Sel, Method))
return nullptr;
// Note: if the parameter type is out-of-line, we'll catch it later in the
@@ -314,7 +319,9 @@ static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *".
-ExprResult Sema::BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number) {
+ExprResult SemaObjC::BuildObjCNumericLiteral(SourceLocation AtLoc,
+ Expr *Number) {
+ ASTContext &Context = getASTContext();
// Determine the type of the literal.
QualType NumberType = Number->getType();
if (CharacterLiteral *Char = dyn_cast<CharacterLiteral>(Number)) {
@@ -352,31 +359,30 @@ ExprResult Sema::BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number) {
ParmVarDecl *ParamDecl = Method->parameters()[0];
InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
ParamDecl);
- ExprResult ConvertedNumber = PerformCopyInitialization(Entity,
- SourceLocation(),
- Number);
+ ExprResult ConvertedNumber =
+ SemaRef.PerformCopyInitialization(Entity, SourceLocation(), Number);
if (ConvertedNumber.isInvalid())
return ExprError();
Number = ConvertedNumber.get();
// Use the effective source range of the literal, including the leading '@'.
- return MaybeBindToTemporary(
- new (Context) ObjCBoxedExpr(Number, NSNumberPointer, Method,
- SourceRange(AtLoc, NR.getEnd())));
+ return SemaRef.MaybeBindToTemporary(new (Context) ObjCBoxedExpr(
+ Number, NSNumberPointer, Method, SourceRange(AtLoc, NR.getEnd())));
}
-ExprResult Sema::ActOnObjCBoolLiteral(SourceLocation AtLoc,
- SourceLocation ValueLoc,
- bool Value) {
+ExprResult SemaObjC::ActOnObjCBoolLiteral(SourceLocation AtLoc,
+ SourceLocation ValueLoc, bool Value) {
+ ASTContext &Context = getASTContext();
ExprResult Inner;
if (getLangOpts().CPlusPlus) {
- Inner = ActOnCXXBoolLiteral(ValueLoc, Value? tok::kw_true : tok::kw_false);
+ Inner = SemaRef.ActOnCXXBoolLiteral(ValueLoc,
+ Value ? tok::kw_true : tok::kw_false);
} else {
// C doesn't actually have a way to represent literal values of type
// _Bool. So, we'll use 0/1 and implicit cast to _Bool.
- Inner = ActOnIntegerConstant(ValueLoc, Value? 1 : 0);
- Inner = ImpCastExprToType(Inner.get(), Context.BoolTy,
- CK_IntegralToBoolean);
+ Inner = SemaRef.ActOnIntegerConstant(ValueLoc, Value ? 1 : 0);
+ Inner = SemaRef.ImpCastExprToType(Inner.get(), Context.BoolTy,
+ CK_IntegralToBoolean);
}
return BuildObjCNumericLiteral(AtLoc, Inner.get());
@@ -428,7 +434,8 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
isa<FloatingLiteral>(OrigElement) ||
isa<ObjCBoolLiteralExpr>(OrigElement) ||
isa<CXXBoolLiteralExpr>(OrigElement)) {
- if (S.NSAPIObj->getNSNumberFactoryMethodKind(OrigElement->getType())) {
+ if (S.ObjC().NSAPIObj->getNSNumberFactoryMethodKind(
+ OrigElement->getType())) {
int Which = isa<CharacterLiteral>(OrigElement) ? 1
: (isa<CXXBoolLiteralExpr>(OrigElement) ||
isa<ObjCBoolLiteralExpr>(OrigElement)) ? 2
@@ -438,8 +445,8 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
<< Which << OrigElement->getSourceRange()
<< FixItHint::CreateInsertion(OrigElement->getBeginLoc(), "@");
- Result =
- S.BuildObjCNumericLiteral(OrigElement->getBeginLoc(), OrigElement);
+ Result = S.ObjC().BuildObjCNumericLiteral(OrigElement->getBeginLoc(),
+ OrigElement);
if (Result.isInvalid())
return ExprError();
@@ -454,7 +461,8 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
<< 0 << OrigElement->getSourceRange()
<< FixItHint::CreateInsertion(OrigElement->getBeginLoc(), "@");
- Result = S.BuildObjCStringLiteral(OrigElement->getBeginLoc(), String);
+ Result =
+ S.ObjC().BuildObjCStringLiteral(OrigElement->getBeginLoc(), String);
if (Result.isInvalid())
return ExprError();
@@ -498,7 +506,8 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
Element->getBeginLoc(), Element);
}
-ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
+ExprResult SemaObjC::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
+ ASTContext &Context = getASTContext();
if (ValueExpr->isTypeDependent()) {
ObjCBoxedExpr *BoxedExpr =
new (Context) ObjCBoxedExpr(ValueExpr, Context.DependentTy, nullptr, SR);
@@ -507,7 +516,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
ObjCMethodDecl *BoxingMethod = nullptr;
QualType BoxedType;
// Convert the expression to an RValue, so we can check for pointer types...
- ExprResult RValue = DefaultFunctionArrayLvalueConversion(ValueExpr);
+ ExprResult RValue = SemaRef.DefaultFunctionArrayLvalueConversion(ValueExpr);
if (RValue.isInvalid()) {
return ExprError();
}
@@ -519,8 +528,8 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
if (Context.hasSameUnqualifiedType(PointeeType, Context.CharTy)) {
if (!NSStringDecl) {
- NSStringDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
- Sema::LK_String);
+ NSStringDecl =
+ LookupObjCInterfaceDeclForLiteral(SemaRef, Loc, LK_String);
if (!NSStringDecl) {
return ExprError();
}
@@ -582,9 +591,9 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
BoxingMethod = M;
}
- if (!validateBoxingMethod(*this, Loc, NSStringDecl,
+ if (!validateBoxingMethod(SemaRef, Loc, NSStringDecl,
stringWithUTF8String, BoxingMethod))
- return ExprError();
+ return ExprError();
StringWithUTF8StringMethod = BoxingMethod;
}
@@ -651,8 +660,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
// Look up the NSValue class, if we haven't done so already. It's cached
// in the Sema instance.
if (!NSValueDecl) {
- NSValueDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
- Sema::LK_Boxed);
+ NSValueDecl = LookupObjCInterfaceDeclForLiteral(SemaRef, Loc, LK_Boxed);
if (!NSValueDecl) {
return ExprError();
}
@@ -663,10 +671,8 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
}
if (!ValueWithBytesObjCTypeMethod) {
- IdentifierInfo *II[] = {
- &Context.Idents.get("valueWithBytes"),
- &Context.Idents.get("objCType")
- };
+ const IdentifierInfo *II[] = {&Context.Idents.get("valueWithBytes"),
+ &Context.Idents.get("objCType")};
Selector ValueWithBytesObjCType = Context.Selectors.getSelector(2, II);
// Look for the appropriate method within NSValue.
@@ -710,7 +716,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
BoxingMethod = M;
}
- if (!validateBoxingMethod(*this, Loc, NSValueDecl,
+ if (!validateBoxingMethod(SemaRef, Loc, NSValueDecl,
ValueWithBytesObjCType, BoxingMethod))
return ExprError();
@@ -733,20 +739,20 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
return ExprError();
}
- DiagnoseUseOfDecl(BoxingMethod, Loc);
+ SemaRef.DiagnoseUseOfDecl(BoxingMethod, Loc);
ExprResult ConvertedValueExpr;
if (ValueType->isObjCBoxableRecordType()) {
InitializedEntity IE = InitializedEntity::InitializeTemporary(ValueType);
- ConvertedValueExpr = PerformCopyInitialization(IE, ValueExpr->getExprLoc(),
- ValueExpr);
+ ConvertedValueExpr = SemaRef.PerformCopyInitialization(
+ IE, ValueExpr->getExprLoc(), ValueExpr);
} else {
// Convert the expression to the type that the parameter requires.
ParmVarDecl *ParamDecl = BoxingMethod->parameters()[0];
InitializedEntity IE = InitializedEntity::InitializeParameter(Context,
ParamDecl);
- ConvertedValueExpr = PerformCopyInitialization(IE, SourceLocation(),
- ValueExpr);
+ ConvertedValueExpr =
+ SemaRef.PerformCopyInitialization(IE, SourceLocation(), ValueExpr);
}
if (ConvertedValueExpr.isInvalid())
@@ -756,16 +762,16 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
ObjCBoxedExpr *BoxedExpr =
new (Context) ObjCBoxedExpr(ValueExpr, BoxedType,
BoxingMethod, SR);
- return MaybeBindToTemporary(BoxedExpr);
+ return SemaRef.MaybeBindToTemporary(BoxedExpr);
}
/// Build an ObjC subscript pseudo-object expression, given that
/// that's supported by the runtime.
-ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
- Expr *IndexExpr,
- ObjCMethodDecl *getterMethod,
- ObjCMethodDecl *setterMethod) {
- assert(!LangOpts.isSubscriptPointerArithmetic());
+ExprResult SemaObjC::BuildObjCSubscriptExpression(
+ SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr,
+ ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod) {
+ assert(!getLangOpts().isSubscriptPointerArithmetic());
+ ASTContext &Context = getASTContext();
// We can't get dependent types here; our callers should have
// filtered them out.
@@ -774,13 +780,13 @@ ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
// Filter out placeholders in the index. In theory, overloads could
// be preserved here, although that might not actually work correctly.
- ExprResult Result = CheckPlaceholderExpr(IndexExpr);
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(IndexExpr);
if (Result.isInvalid())
return ExprError();
IndexExpr = Result.get();
// Perform lvalue-to-rvalue conversion on the base.
- Result = DefaultLvalueConversion(BaseExpr);
+ Result = SemaRef.DefaultLvalueConversion(BaseExpr);
if (Result.isInvalid())
return ExprError();
BaseExpr = Result.get();
@@ -791,12 +797,14 @@ ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
getterMethod, setterMethod, RB);
}
-ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
+ExprResult SemaObjC::BuildObjCArrayLiteral(SourceRange SR,
+ MultiExprArg Elements) {
+ ASTContext &Context = getASTContext();
SourceLocation Loc = SR.getBegin();
if (!NSArrayDecl) {
- NSArrayDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
- Sema::LK_Array);
+ NSArrayDecl =
+ LookupObjCInterfaceDeclForLiteral(SemaRef, Loc, SemaObjC::LK_Array);
if (!NSArrayDecl) {
return ExprError();
}
@@ -837,7 +845,7 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
Method->setMethodParams(Context, Params, std::nullopt);
}
- if (!validateBoxingMethod(*this, Loc, NSArrayDecl, Sel, Method))
+ if (!validateBoxingMethod(SemaRef, Loc, NSArrayDecl, Sel, Method))
return ExprError();
// Dig out the type that all elements should be converted to.
@@ -877,9 +885,8 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
// performing conversions as necessary.
Expr **ElementsBuffer = Elements.data();
for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
- ExprResult Converted = CheckObjCCollectionLiteralElement(*this,
- ElementsBuffer[I],
- RequiredType, true);
+ ExprResult Converted = CheckObjCCollectionLiteralElement(
+ SemaRef, ElementsBuffer[I], RequiredType, true);
if (Converted.isInvalid())
return ExprError();
@@ -890,9 +897,8 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
= Context.getObjCObjectPointerType(
Context.getObjCInterfaceType(NSArrayDecl));
- return MaybeBindToTemporary(
- ObjCArrayLiteral::Create(Context, Elements, Ty,
- ArrayWithObjectsMethod, SR));
+ return SemaRef.MaybeBindToTemporary(ObjCArrayLiteral::Create(
+ Context, Elements, Ty, ArrayWithObjectsMethod, SR));
}
/// Check for duplicate keys in an ObjC dictionary literal. For instance:
@@ -951,13 +957,14 @@ CheckObjCDictionaryLiteralDuplicateKeys(Sema &S,
}
}
-ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
- MutableArrayRef<ObjCDictionaryElement> Elements) {
+ExprResult SemaObjC::BuildObjCDictionaryLiteral(
+ SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements) {
+ ASTContext &Context = getASTContext();
SourceLocation Loc = SR.getBegin();
if (!NSDictionaryDecl) {
- NSDictionaryDecl = LookupObjCInterfaceDeclForLiteral(*this, Loc,
- Sema::LK_Dictionary);
+ NSDictionaryDecl = LookupObjCInterfaceDeclForLiteral(
+ SemaRef, Loc, SemaObjC::LK_Dictionary);
if (!NSDictionaryDecl) {
return ExprError();
}
@@ -1007,9 +1014,9 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
Method->setMethodParams(Context, Params, std::nullopt);
}
- if (!validateBoxingMethod(*this, SR.getBegin(), NSDictionaryDecl, Sel,
+ if (!validateBoxingMethod(SemaRef, SR.getBegin(), NSDictionaryDecl, Sel,
Method))
- return ExprError();
+ return ExprError();
// Dig out the type that all values should be converted to.
QualType ValueT = Method->parameters()[0]->getType();
@@ -1086,14 +1093,14 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
bool HasPackExpansions = false;
for (ObjCDictionaryElement &Element : Elements) {
// Check the key.
- ExprResult Key = CheckObjCCollectionLiteralElement(*this, Element.Key,
- KeyT);
+ ExprResult Key =
+ CheckObjCCollectionLiteralElement(SemaRef, Element.Key, KeyT);
if (Key.isInvalid())
return ExprError();
// Check the value.
- ExprResult Value
- = CheckObjCCollectionLiteralElement(*this, Element.Value, ValueT);
+ ExprResult Value =
+ CheckObjCCollectionLiteralElement(SemaRef, Element.Value, ValueT);
if (Value.isInvalid())
return ExprError();
@@ -1121,13 +1128,14 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
auto *Literal =
ObjCDictionaryLiteral::Create(Context, Elements, HasPackExpansions, Ty,
DictionaryWithObjectsMethod, SR);
- CheckObjCDictionaryLiteralDuplicateKeys(*this, Literal);
- return MaybeBindToTemporary(Literal);
+ CheckObjCDictionaryLiteralDuplicateKeys(SemaRef, Literal);
+ return SemaRef.MaybeBindToTemporary(Literal);
}
-ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
- TypeSourceInfo *EncodedTypeInfo,
- SourceLocation RParenLoc) {
+ExprResult SemaObjC::BuildObjCEncodeExpression(SourceLocation AtLoc,
+ TypeSourceInfo *EncodedTypeInfo,
+ SourceLocation RParenLoc) {
+ ASTContext &Context = getASTContext();
QualType EncodedType = EncodedTypeInfo->getType();
QualType StrTy;
if (EncodedType->isDependentType())
@@ -1135,9 +1143,9 @@ ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
else {
if (!EncodedType->getAsArrayTypeUnsafe() && //// Incomplete array is handled.
!EncodedType->isVoidType()) // void is handled too.
- if (RequireCompleteType(AtLoc, EncodedType,
- diag::err_incomplete_type_objc_at_encode,
- EncodedTypeInfo->getTypeLoc()))
+ if (SemaRef.RequireCompleteType(AtLoc, EncodedType,
+ diag::err_incomplete_type_objc_at_encode,
+ EncodedTypeInfo->getTypeLoc()))
return ExprError();
std::string Str;
@@ -1155,17 +1163,18 @@ ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
return new (Context) ObjCEncodeExpr(StrTy, EncodedTypeInfo, AtLoc, RParenLoc);
}
-ExprResult Sema::ParseObjCEncodeExpression(SourceLocation AtLoc,
- SourceLocation EncodeLoc,
- SourceLocation LParenLoc,
- ParsedType ty,
- SourceLocation RParenLoc) {
+ExprResult SemaObjC::ParseObjCEncodeExpression(SourceLocation AtLoc,
+ SourceLocation EncodeLoc,
+ SourceLocation LParenLoc,
+ ParsedType ty,
+ SourceLocation RParenLoc) {
+ ASTContext &Context = getASTContext();
// FIXME: Preserve type source info ?
TypeSourceInfo *TInfo;
- QualType EncodedType = GetTypeFromParser(ty, &TInfo);
+ QualType EncodedType = SemaRef.GetTypeFromParser(ty, &TInfo);
if (!TInfo)
- TInfo = Context.getTrivialTypeSourceInfo(EncodedType,
- getLocForEndOfToken(LParenLoc));
+ TInfo = Context.getTrivialTypeSourceInfo(
+ EncodedType, SemaRef.getLocForEndOfToken(LParenLoc));
return BuildObjCEncodeExpression(AtLoc, TInfo, RParenLoc);
}
@@ -1184,8 +1193,8 @@ static bool HelperToDiagnoseMismatchedMethodsInGlobalPool(Sema &S,
isa<ObjCImplDecl>(MatchingMethodDecl->getDeclContext()) ||
MatchingMethodDecl->getSelector() != Method->getSelector())
continue;
- if (!S.MatchTwoMethodDeclarations(Method,
- MatchingMethodDecl, Sema::MMS_loose)) {
+ if (!S.ObjC().MatchTwoMethodDeclarations(Method, MatchingMethodDecl,
+ SemaObjC::MMS_loose)) {
if (!Warned) {
Warned = true;
S.Diag(AtLoc, diag::warn_multiple_selectors)
@@ -1210,8 +1219,9 @@ static void DiagnoseMismatchedSelectors(Sema &S, SourceLocation AtLoc,
S.Diags.isIgnored(diag::warn_multiple_selectors, SourceLocation()))
return;
bool Warned = false;
- for (Sema::GlobalMethodPool::iterator b = S.MethodPool.begin(),
- e = S.MethodPool.end(); b != e; b++) {
+ for (SemaObjC::GlobalMethodPool::iterator b = S.ObjC().MethodPool.begin(),
+ e = S.ObjC().MethodPool.end();
+ b != e; b++) {
// first, instance methods
ObjCMethodList &InstMethList = b->second.first;
if (HelperToDiagnoseMismatchedMethodsInGlobalPool(S, AtLoc, LParenLoc, RParenLoc,
@@ -1255,8 +1265,8 @@ static ObjCMethodDecl *LookupDirectMethodInMethodList(Sema &S, Selector Sel,
static ObjCMethodDecl *LookupDirectMethodInGlobalPool(Sema &S, Selector Sel,
bool &onlyDirect,
bool &anyDirect) {
- auto Iter = S.MethodPool.find(Sel);
- if (Iter == S.MethodPool.end())
+ auto Iter = S.ObjC().MethodPool.find(Sel);
+ if (Iter == S.ObjC().MethodPool.end())
return nullptr;
ObjCMethodDecl *DirectInstance = LookupDirectMethodInMethodList(
@@ -1288,12 +1298,13 @@ static ObjCMethodDecl *findMethodInCurrentClass(Sema &S, Selector Sel) {
return nullptr;
}
-ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
- SourceLocation AtLoc,
- SourceLocation SelLoc,
- SourceLocation LParenLoc,
- SourceLocation RParenLoc,
- bool WarnMultipleSelectors) {
+ExprResult SemaObjC::ParseObjCSelectorExpression(Selector Sel,
+ SourceLocation AtLoc,
+ SourceLocation SelLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ bool WarnMultipleSelectors) {
+ ASTContext &Context = getASTContext();
ObjCMethodDecl *Method = LookupInstanceMethodInGlobalPool(Sel,
SourceRange(LParenLoc, RParenLoc));
if (!Method)
@@ -1311,13 +1322,13 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
} else
Diag(SelLoc, diag::warn_undeclared_selector) << Sel;
} else {
- DiagnoseMismatchedSelectors(*this, AtLoc, Method, LParenLoc, RParenLoc,
+ DiagnoseMismatchedSelectors(SemaRef, AtLoc, Method, LParenLoc, RParenLoc,
WarnMultipleSelectors);
bool onlyDirect = true;
bool anyDirect = false;
ObjCMethodDecl *GlobalDirectMethod =
- LookupDirectMethodInGlobalPool(*this, Sel, onlyDirect, anyDirect);
+ LookupDirectMethodInGlobalPool(SemaRef, Sel, onlyDirect, anyDirect);
if (onlyDirect) {
Diag(AtLoc, diag::err_direct_selector_expression)
@@ -1328,7 +1339,8 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
// If we saw any direct methods, see if we see a direct member of the
// current class. If so, the @selector will likely be used to refer to
// this direct method.
- ObjCMethodDecl *LikelyTargetMethod = findMethodInCurrentClass(*this, Sel);
+ ObjCMethodDecl *LikelyTargetMethod =
+ findMethodInCurrentClass(SemaRef, Sel);
if (LikelyTargetMethod && LikelyTargetMethod->isDirectMethod()) {
Diag(AtLoc, diag::warn_potentially_direct_selector_expression) << Sel;
Diag(LikelyTargetMethod->getLocation(),
@@ -1349,7 +1361,7 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
if (Method &&
Method->getImplementationControl() !=
ObjCImplementationControl::Optional &&
- !getSourceManager().isInSystemHeader(Method->getLocation()))
+ !SemaRef.getSourceManager().isInSystemHeader(Method->getLocation()))
ReferencedSelectors.insert(std::make_pair(Sel, AtLoc));
// In ARC, forbid the user from using @selector for
@@ -1382,12 +1394,13 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
return new (Context) ObjCSelectorExpr(Ty, Sel, AtLoc, RParenLoc);
}
-ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
- SourceLocation AtLoc,
- SourceLocation ProtoLoc,
- SourceLocation LParenLoc,
- SourceLocation ProtoIdLoc,
- SourceLocation RParenLoc) {
+ExprResult SemaObjC::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
+ SourceLocation AtLoc,
+ SourceLocation ProtoLoc,
+ SourceLocation LParenLoc,
+ SourceLocation ProtoIdLoc,
+ SourceLocation RParenLoc) {
+ ASTContext &Context = getASTContext();
ObjCProtocolDecl* PDecl = LookupProtocol(ProtocolId, ProtoIdLoc);
if (!PDecl) {
Diag(ProtoLoc, diag::err_undeclared_protocol) << ProtocolId;
@@ -1411,8 +1424,8 @@ ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
}
/// Try to capture an implicit reference to 'self'.
-ObjCMethodDecl *Sema::tryCaptureObjCSelf(SourceLocation Loc) {
- DeclContext *DC = getFunctionLevelDeclContext();
+ObjCMethodDecl *SemaObjC::tryCaptureObjCSelf(SourceLocation Loc) {
+ DeclContext *DC = SemaRef.getFunctionLevelDeclContext();
// If we're not in an ObjC method, error out. Note that, unlike the
// C++ case, we don't require an instance method --- class methods
@@ -1421,7 +1434,7 @@ ObjCMethodDecl *Sema::tryCaptureObjCSelf(SourceLocation Loc) {
if (!method)
return nullptr;
- tryCaptureVariable(method->getSelfDecl(), Loc);
+ SemaRef.tryCaptureVariable(method->getSelfDecl(), Loc);
return method;
}
@@ -1515,16 +1528,15 @@ static QualType getBaseMessageSendResultType(Sema &S,
return transferNullability(ReceiverType);
}
-QualType Sema::getMessageSendResultType(const Expr *Receiver,
- QualType ReceiverType,
- ObjCMethodDecl *Method,
- bool isClassMessage,
- bool isSuperMessage) {
+QualType SemaObjC::getMessageSendResultType(const Expr *Receiver,
+ QualType ReceiverType,
+ ObjCMethodDecl *Method,
+ bool isClassMessage,
+ bool isSuperMessage) {
+ ASTContext &Context = getASTContext();
// Produce the result type.
- QualType resultType = getBaseMessageSendResultType(*this, ReceiverType,
- Method,
- isClassMessage,
- isSuperMessage);
+ QualType resultType = getBaseMessageSendResultType(
+ SemaRef, ReceiverType, Method, isClassMessage, isSuperMessage);
// If this is a class message, ignore the nullability of the receiver.
if (isClassMessage) {
@@ -1653,10 +1665,11 @@ findExplicitInstancetypeDeclarer(const ObjCMethodDecl *MD,
return nullptr;
}
-void Sema::EmitRelatedResultTypeNoteForReturn(QualType destType) {
+void SemaObjC::EmitRelatedResultTypeNoteForReturn(QualType destType) {
+ ASTContext &Context = getASTContext();
// Only complain if we're in an ObjC method and the required return
// type doesn't match the method's declared return type.
- ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CurContext);
+ ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(SemaRef.CurContext);
if (!MD || !MD->hasRelatedResultType() ||
Context.hasSameUnqualifiedType(destType, MD->getReturnType()))
return;
@@ -1682,7 +1695,8 @@ void Sema::EmitRelatedResultTypeNoteForReturn(QualType destType) {
<< family;
}
-void Sema::EmitRelatedResultTypeNote(const Expr *E) {
+void SemaObjC::EmitRelatedResultTypeNote(const Expr *E) {
+ ASTContext &Context = getASTContext();
E = E->IgnoreParenImpCasts();
const ObjCMessageExpr *MsgSend = dyn_cast<ObjCMessageExpr>(E);
if (!MsgSend)
@@ -1708,12 +1722,13 @@ void Sema::EmitRelatedResultTypeNote(const Expr *E) {
<< MsgSend->getType();
}
-bool Sema::CheckMessageArgumentTypes(
+bool SemaObjC::CheckMessageArgumentTypes(
const Expr *Receiver, QualType ReceiverType, MultiExprArg Args,
Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType,
ExprValueKind &VK) {
+ ASTContext &Context = getASTContext();
SourceLocation SelLoc;
if (!SelectorLocs.empty() && SelectorLocs.front().isValid())
SelLoc = SelectorLocs.front();
@@ -1729,9 +1744,9 @@ bool Sema::CheckMessageArgumentTypes(
ExprResult result;
if (getLangOpts().DebuggerSupport) {
QualType paramTy; // ignored
- result = checkUnknownAnyArg(SelLoc, Args[i], paramTy);
+ result = SemaRef.checkUnknownAnyArg(SelLoc, Args[i], paramTy);
} else {
- result = DefaultArgumentPromotion(Args[i]);
+ result = SemaRef.DefaultArgumentPromotion(Args[i]);
}
if (result.isInvalid())
return true;
@@ -1837,7 +1852,7 @@ bool Sema::CheckMessageArgumentTypes(
// from the argument.
if (param->getType() == Context.UnknownAnyTy) {
QualType paramType;
- ExprResult argE = checkUnknownAnyArg(SelLoc, argExpr, paramType);
+ ExprResult argE = SemaRef.checkUnknownAnyArg(SelLoc, argExpr, paramType);
if (argE.isInvalid()) {
IsError = true;
} else {
@@ -1857,14 +1872,15 @@ bool Sema::CheckMessageArgumentTypes(
*typeArgs,
ObjCSubstitutionContext::Parameter);
- if (RequireCompleteType(argExpr->getSourceRange().getBegin(),
- paramType,
- diag::err_call_incomplete_argument, argExpr))
+ if (SemaRef.RequireCompleteType(
+ argExpr->getSourceRange().getBegin(), paramType,
+ diag::err_call_incomplete_argument, argExpr))
return true;
InitializedEntity Entity
= InitializedEntity::InitializeParameter(Context, param, paramType);
- ExprResult ArgE = PerformCopyInitialization(Entity, SourceLocation(), argExpr);
+ ExprResult ArgE =
+ SemaRef.PerformCopyInitialization(Entity, SourceLocation(), argExpr);
if (ArgE.isInvalid())
IsError = true;
else {
@@ -1877,7 +1893,7 @@ bool Sema::CheckMessageArgumentTypes(
Args[i]->getType()->isBlockPointerType() &&
origParamType->isObjCObjectPointerType()) {
ExprResult arg = Args[i];
- maybeExtendBlockObject(arg);
+ SemaRef.maybeExtendBlockObject(arg);
Args[i] = arg.get();
}
}
@@ -1889,8 +1905,8 @@ bool Sema::CheckMessageArgumentTypes(
if (Args[i]->isTypeDependent())
continue;
- ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
- nullptr);
+ ExprResult Arg = SemaRef.DefaultVariadicArgumentPromotion(
+ Args[i], Sema::VariadicMethod, nullptr);
IsError |= Arg.isInvalid();
Args[i] = Arg.get();
}
@@ -1906,7 +1922,7 @@ bool Sema::CheckMessageArgumentTypes(
}
}
- DiagnoseSentinelCalls(Method, SelLoc, Args);
+ SemaRef.DiagnoseSentinelCalls(Method, SelLoc, Args);
// Do additional checkings on method.
IsError |=
@@ -1915,14 +1931,14 @@ bool Sema::CheckMessageArgumentTypes(
return IsError;
}
-bool Sema::isSelfExpr(Expr *RExpr) {
+bool SemaObjC::isSelfExpr(Expr *RExpr) {
// 'self' is objc 'self' in an objc method only.
- ObjCMethodDecl *Method =
- dyn_cast_or_null<ObjCMethodDecl>(CurContext->getNonClosureAncestor());
+ ObjCMethodDecl *Method = dyn_cast_or_null<ObjCMethodDecl>(
+ SemaRef.CurContext->getNonClosureAncestor());
return isSelfExpr(RExpr, Method);
}
-bool Sema::isSelfExpr(Expr *receiver, const ObjCMethodDecl *method) {
+bool SemaObjC::isSelfExpr(Expr *receiver, const ObjCMethodDecl *method) {
if (!method) return false;
receiver = receiver->IgnoreParenLValueCasts();
@@ -1933,8 +1949,8 @@ bool Sema::isSelfExpr(Expr *receiver, const ObjCMethodDecl *method) {
}
/// LookupMethodInType - Look up a method in an ObjCObjectType.
-ObjCMethodDecl *Sema::LookupMethodInObjectType(Selector sel, QualType type,
- bool isInstance) {
+ObjCMethodDecl *SemaObjC::LookupMethodInObjectType(Selector sel, QualType type,
+ bool isInstance) {
const ObjCObjectType *objType = type->castAs<ObjCObjectType>();
if (ObjCInterfaceDecl *iface = objType->getInterface()) {
// Look it up in the main interface (and categories, etc.)
@@ -1957,10 +1973,8 @@ ObjCMethodDecl *Sema::LookupMethodInObjectType(Selector sel, QualType type,
/// LookupMethodInQualifiedType - Lookups up a method in protocol qualifier
/// list of a qualified objective pointer type.
-ObjCMethodDecl *Sema::LookupMethodInQualifiedType(Selector Sel,
- const ObjCObjectPointerType *OPT,
- bool Instance)
-{
+ObjCMethodDecl *SemaObjC::LookupMethodInQualifiedType(
+ Selector Sel, const ObjCObjectPointerType *OPT, bool Instance) {
ObjCMethodDecl *MD = nullptr;
for (const auto *PROTO : OPT->quals()) {
if ((MD = PROTO->lookupMethod(Sel, Instance))) {
@@ -1972,13 +1986,11 @@ ObjCMethodDecl *Sema::LookupMethodInQualifiedType(Selector Sel,
/// HandleExprPropertyRefExpr - Handle foo.bar where foo is a pointer to an
/// objective C interface. This is a property reference expression.
-ExprResult Sema::
-HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
- Expr *BaseExpr, SourceLocation OpLoc,
- DeclarationName MemberName,
- SourceLocation MemberLoc,
- SourceLocation SuperLoc, QualType SuperType,
- bool Super) {
+ExprResult SemaObjC::HandleExprPropertyRefExpr(
+ const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc,
+ DeclarationName MemberName, SourceLocation MemberLoc,
+ SourceLocation SuperLoc, QualType SuperType, bool Super) {
+ ASTContext &Context = getASTContext();
const ObjCInterfaceType *IFaceT = OPT->getInterfaceType();
ObjCInterfaceDecl *IFace = IFaceT->getDecl();
@@ -1992,15 +2004,15 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
SourceRange BaseRange = Super? SourceRange(SuperLoc)
: BaseExpr->getSourceRange();
- if (RequireCompleteType(MemberLoc, OPT->getPointeeType(),
- diag::err_property_not_found_forward_class,
- MemberName, BaseRange))
+ if (SemaRef.RequireCompleteType(MemberLoc, OPT->getPointeeType(),
+ diag::err_property_not_found_forward_class,
+ MemberName, BaseRange))
return ExprError();
if (ObjCPropertyDecl *PD = IFace->FindPropertyDeclaration(
Member, ObjCPropertyQueryKind::OBJC_PR_query_instance)) {
// Check whether we can reference this property.
- if (DiagnoseUseOfDecl(PD, MemberLoc))
+ if (SemaRef.DiagnoseUseOfDecl(PD, MemberLoc))
return ExprError();
if (Super)
return new (Context)
@@ -2016,7 +2028,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
if (ObjCPropertyDecl *PD = I->FindPropertyDeclaration(
Member, ObjCPropertyQueryKind::OBJC_PR_query_instance)) {
// Check whether we can reference this property.
- if (DiagnoseUseOfDecl(PD, MemberLoc))
+ if (SemaRef.DiagnoseUseOfDecl(PD, MemberLoc))
return ExprError();
if (Super)
@@ -2034,7 +2046,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
// FIXME: The logic for looking up nullary and unary selectors should be
// shared with the code in ActOnInstanceMessage.
- Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
+ Selector Sel = SemaRef.PP.getSelectorTable().getNullarySelector(Member);
ObjCMethodDecl *Getter = IFace->lookupInstanceMethod(Sel);
// May be found in property's qualified list.
@@ -2047,14 +2059,13 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
if (Getter) {
// Check if we can reference this property.
- if (DiagnoseUseOfDecl(Getter, MemberLoc))
+ if (SemaRef.DiagnoseUseOfDecl(Getter, MemberLoc))
return ExprError();
}
// If we found a getter then this may be a valid dot-reference, we
// will look for the matching setter, in case it is needed.
- Selector SetterSel =
- SelectorTable::constructSetterSelector(PP.getIdentifierTable(),
- PP.getSelectorTable(), Member);
+ Selector SetterSel = SelectorTable::constructSetterSelector(
+ SemaRef.PP.getIdentifierTable(), SemaRef.PP.getSelectorTable(), Member);
ObjCMethodDecl *Setter = IFace->lookupInstanceMethod(SetterSel);
// May be found in property's qualified list.
@@ -2067,7 +2078,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Setter = IFace->lookupPrivateMethod(SetterSel);
}
- if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
+ if (Setter && SemaRef.DiagnoseUseOfDecl(Setter, MemberLoc))
return ExprError();
// Special warning if member name used in a property-dot for a setter accessor
@@ -2102,9 +2113,9 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
// Attempt to correct for typos in property names.
DeclFilterCCC<ObjCPropertyDecl> CCC{};
- if (TypoCorrection Corrected = CorrectTypo(
- DeclarationNameInfo(MemberName, MemberLoc), LookupOrdinaryName,
- nullptr, nullptr, CCC, CTK_ErrorRecovery, IFace, false, OPT)) {
+ if (TypoCorrection Corrected = SemaRef.CorrectTypo(
+ DeclarationNameInfo(MemberName, MemberLoc), Sema::LookupOrdinaryName,
+ nullptr, nullptr, CCC, Sema::CTK_ErrorRecovery, IFace, false, OPT)) {
DeclarationName TypoResult = Corrected.getCorrection();
if (TypoResult.isIdentifier() &&
TypoResult.getAsIdentifierInfo() == Member) {
@@ -2122,8 +2133,9 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
return ExprError();
}
} else {
- diagnoseTypo(Corrected, PDiag(diag::err_property_not_found_suggest)
- << MemberName << QualType(OPT, 0));
+ SemaRef.diagnoseTypo(Corrected,
+ PDiag(diag::err_property_not_found_suggest)
+ << MemberName << QualType(OPT, 0));
return HandleExprPropertyRefExpr(OPT, BaseExpr, OpLoc,
TypoResult, MemberLoc,
SuperLoc, SuperType, Super);
@@ -2135,9 +2147,9 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
QualType T = Ivar->getType();
if (const ObjCObjectPointerType * OBJPT =
T->getAsObjCInterfacePointerType()) {
- if (RequireCompleteType(MemberLoc, OBJPT->getPointeeType(),
- diag::err_property_not_as_forward_class,
- MemberName, BaseExpr))
+ if (SemaRef.RequireCompleteType(MemberLoc, OBJPT->getPointeeType(),
+ diag::err_property_not_as_forward_class,
+ MemberName, BaseExpr))
return ExprError();
}
Diag(MemberLoc,
@@ -2155,13 +2167,11 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
return ExprError();
}
-ExprResult Sema::
-ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
- IdentifierInfo &propertyName,
- SourceLocation receiverNameLoc,
- SourceLocation propertyNameLoc) {
-
- IdentifierInfo *receiverNamePtr = &receiverName;
+ExprResult SemaObjC::ActOnClassPropertyRefExpr(
+ const IdentifierInfo &receiverName, const IdentifierInfo &propertyName,
+ SourceLocation receiverNameLoc, SourceLocation propertyNameLoc) {
+ ASTContext &Context = getASTContext();
+ const IdentifierInfo *receiverNamePtr = &receiverName;
ObjCInterfaceDecl *IFace = getObjCInterfaceDecl(receiverNamePtr,
receiverNameLoc);
@@ -2211,9 +2221,10 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
GetterSel = PD->getGetterName();
SetterSel = PD->getSetterName();
} else {
- GetterSel = PP.getSelectorTable().getNullarySelector(&propertyName);
+ GetterSel = SemaRef.PP.getSelectorTable().getNullarySelector(&propertyName);
SetterSel = SelectorTable::constructSetterSelector(
- PP.getIdentifierTable(), PP.getSelectorTable(), &propertyName);
+ SemaRef.PP.getIdentifierTable(), SemaRef.PP.getSelectorTable(),
+ &propertyName);
}
// Search for a declared property first.
@@ -2226,7 +2237,7 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
if (Getter) {
// FIXME: refactor/share with ActOnMemberReference().
// Check if we can reference this property.
- if (DiagnoseUseOfDecl(Getter, propertyNameLoc))
+ if (SemaRef.DiagnoseUseOfDecl(Getter, propertyNameLoc))
return ExprError();
}
@@ -2241,7 +2252,7 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
if (!Setter)
Setter = IFace->getCategoryClassMethod(SetterSel);
- if (Setter && DiagnoseUseOfDecl(Setter, propertyNameLoc))
+ if (Setter && SemaRef.DiagnoseUseOfDecl(Setter, propertyNameLoc))
return ExprError();
if (Getter || Setter) {
@@ -2281,12 +2292,11 @@ class ObjCInterfaceOrSuperCCC final : public CorrectionCandidateCallback {
} // end anonymous namespace
-Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
- IdentifierInfo *Name,
- SourceLocation NameLoc,
- bool IsSuper,
- bool HasTrailingDot,
- ParsedType &ReceiverType) {
+SemaObjC::ObjCMessageKind
+SemaObjC::getObjCMessageKind(Scope *S, IdentifierInfo *Name,
+ SourceLocation NameLoc, bool IsSuper,
+ bool HasTrailingDot, ParsedType &ReceiverType) {
+ ASTContext &Context = getASTContext();
ReceiverType = nullptr;
// If the identifier is "super" and there is no trailing dot, we're
@@ -2295,8 +2305,8 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
if (IsSuper && S->isInObjcMethodScope())
return HasTrailingDot? ObjCInstanceMessage : ObjCSuperMessage;
- LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName);
- LookupName(Result, S);
+ LookupResult Result(SemaRef, Name, NameLoc, Sema::LookupOrdinaryName);
+ SemaRef.LookupName(Result, S);
switch (Result.getResultKind()) {
case LookupResult::NotFound:
@@ -2304,7 +2314,7 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
// Objective-C method, look for ivars. If we find one, we're done!
// FIXME: This is a hack. Ivar lookup should be part of normal
// lookup.
- if (ObjCMethodDecl *Method = getCurMethodDecl()) {
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) {
if (!Method->getClassInterface()) {
// Fall back: let the parser try to parse it as an instance message.
return ObjCInstanceMessage;
@@ -2339,7 +2349,7 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
T = Context.getObjCInterfaceType(Class);
else if (TypeDecl *Type = dyn_cast<TypeDecl>(ND)) {
T = Context.getTypeDeclType(Type);
- DiagnoseUseOfDecl(Type, NameLoc);
+ SemaRef.DiagnoseUseOfDecl(Type, NameLoc);
}
else
return ObjCInstanceMessage;
@@ -2347,30 +2357,30 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
// We have a class message, and T is the type we're
// messaging. Build source-location information for it.
TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(T, NameLoc);
- ReceiverType = CreateParsedType(T, TSInfo);
+ ReceiverType = SemaRef.CreateParsedType(T, TSInfo);
return ObjCClassMessage;
}
}
- ObjCInterfaceOrSuperCCC CCC(getCurMethodDecl());
- if (TypoCorrection Corrected = CorrectTypo(
+ ObjCInterfaceOrSuperCCC CCC(SemaRef.getCurMethodDecl());
+ if (TypoCorrection Corrected = SemaRef.CorrectTypo(
Result.getLookupNameInfo(), Result.getLookupKind(), S, nullptr, CCC,
- CTK_ErrorRecovery, nullptr, false, nullptr, false)) {
+ Sema::CTK_ErrorRecovery, nullptr, false, nullptr, false)) {
if (Corrected.isKeyword()) {
// If we've found the keyword "super" (the only keyword that would be
// returned by CorrectTypo), this is a send to super.
- diagnoseTypo(Corrected,
- PDiag(diag::err_unknown_receiver_suggest) << Name);
+ SemaRef.diagnoseTypo(Corrected, PDiag(diag::err_unknown_receiver_suggest)
+ << Name);
return ObjCSuperMessage;
} else if (ObjCInterfaceDecl *Class =
Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) {
// If we found a declaration, correct when it refers to an Objective-C
// class.
- diagnoseTypo(Corrected,
- PDiag(diag::err_unknown_receiver_suggest) << Name);
+ SemaRef.diagnoseTypo(Corrected, PDiag(diag::err_unknown_receiver_suggest)
+ << Name);
QualType T = Context.getObjCInterfaceType(Class);
TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(T, NameLoc);
- ReceiverType = CreateParsedType(T, TSInfo);
+ ReceiverType = SemaRef.CreateParsedType(T, TSInfo);
return ObjCClassMessage;
}
}
@@ -2379,13 +2389,12 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
return ObjCInstanceMessage;
}
-ExprResult Sema::ActOnSuperMessage(Scope *S,
- SourceLocation SuperLoc,
- Selector Sel,
- SourceLocation LBracLoc,
- ArrayRef<SourceLocation> SelectorLocs,
- SourceLocation RBracLoc,
- MultiExprArg Args) {
+ExprResult SemaObjC::ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
+ Selector Sel, SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args) {
+ ASTContext &Context = getASTContext();
// Determine whether we are inside a method or not.
ObjCMethodDecl *Method = tryCaptureObjCSelf(SuperLoc);
if (!Method) {
@@ -2411,7 +2420,7 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
// We are in a method whose class has a superclass, so 'super'
// is acting as a keyword.
if (Method->getSelector() == Sel)
- getCurFunction()->ObjCShouldCallSuper = false;
+ SemaRef.getCurFunction()->ObjCShouldCallSuper = false;
if (Method->isInstanceMethod()) {
// Since we are in an instance method, this is an instance
@@ -2430,12 +2439,12 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
LBracLoc, SelectorLocs, RBracLoc, Args);
}
-ExprResult Sema::BuildClassMessageImplicit(QualType ReceiverType,
- bool isSuperReceiver,
- SourceLocation Loc,
- Selector Sel,
- ObjCMethodDecl *Method,
- MultiExprArg Args) {
+ExprResult SemaObjC::BuildClassMessageImplicit(QualType ReceiverType,
+ bool isSuperReceiver,
+ SourceLocation Loc, Selector Sel,
+ ObjCMethodDecl *Method,
+ MultiExprArg Args) {
+ ASTContext &Context = getASTContext();
TypeSourceInfo *receiverTypeInfo = nullptr;
if (!ReceiverType.isNull())
receiverTypeInfo = Context.getTrivialTypeSourceInfo(ReceiverType);
@@ -2459,7 +2468,7 @@ static void applyCocoaAPICheck(Sema &S, const ObjCMessageExpr *Msg,
SourceManager &SM = S.SourceMgr;
edit::Commit ECommit(SM, S.LangOpts);
- if (refactor(Msg,*S.NSAPIObj, ECommit)) {
+ if (refactor(Msg, *S.ObjC().NSAPIObj, ECommit)) {
auto Builder = S.Diag(MsgLoc, DiagID)
<< Msg->getSelector() << Msg->getSourceRange();
// FIXME: Don't emit diagnostic at all if fixits are non-commitable.
@@ -2555,7 +2564,7 @@ DiagnoseCStringFormatDirectiveInObjCAPI(Sema &S,
}
else if (Method) {
for (const auto *I : Method->specific_attrs<FormatAttr>()) {
- if (S.GetFormatNSStringIdx(I, Idx)) {
+ if (S.ObjC().GetFormatNSStringIdx(I, Idx)) {
Format = true;
break;
}
@@ -2606,16 +2615,12 @@ DiagnoseCStringFormatDirectiveInObjCAPI(Sema &S,
/// \param RBracLoc The location of the closing square bracket ']'.
///
/// \param ArgsIn The message arguments.
-ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
- QualType ReceiverType,
- SourceLocation SuperLoc,
- Selector Sel,
- ObjCMethodDecl *Method,
- SourceLocation LBracLoc,
- ArrayRef<SourceLocation> SelectorLocs,
- SourceLocation RBracLoc,
- MultiExprArg ArgsIn,
- bool isImplicit) {
+ExprResult SemaObjC::BuildClassMessage(
+ TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType,
+ SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method,
+ SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc, MultiExprArg ArgsIn, bool isImplicit) {
+ ASTContext &Context = getASTContext();
SourceLocation Loc = SuperLoc.isValid()? SuperLoc
: ReceiverTypeInfo->getTypeLoc().getSourceRange().getBegin();
if (LBracLoc.isInvalid()) {
@@ -2653,17 +2658,17 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
assert(Class && "We don't know which class we're messaging?");
// objc++ diagnoses during typename annotation.
if (!getLangOpts().CPlusPlus)
- (void)DiagnoseUseOfDecl(Class, SelectorSlotLocs);
+ (void)SemaRef.DiagnoseUseOfDecl(Class, SelectorSlotLocs);
// Find the method we are messaging.
if (!Method) {
SourceRange TypeRange
= SuperLoc.isValid()? SourceRange(SuperLoc)
: ReceiverTypeInfo->getTypeLoc().getSourceRange();
- if (RequireCompleteType(Loc, Context.getObjCInterfaceType(Class),
- (getLangOpts().ObjCAutoRefCount
- ? diag::err_arc_receiver_forward_class
- : diag::warn_receiver_forward_class),
- TypeRange)) {
+ if (SemaRef.RequireCompleteType(Loc, Context.getObjCInterfaceType(Class),
+ (getLangOpts().ObjCAutoRefCount
+ ? diag::err_arc_receiver_forward_class
+ : diag::warn_receiver_forward_class),
+ TypeRange)) {
// A forward class used in messaging is treated as a 'Class'
Method = LookupFactoryMethodInGlobalPool(Sel,
SourceRange(LBracLoc, RBracLoc));
@@ -2678,8 +2683,8 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
if (!Method)
Method = Class->lookupPrivateClassMethod(Sel);
- if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs,
- nullptr, false, false, Class))
+ if (Method && SemaRef.DiagnoseUseOfDecl(Method, SelectorSlotLocs, nullptr,
+ false, false, Class))
return ExprError();
}
@@ -2696,8 +2701,9 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
return ExprError();
if (Method && !Method->getReturnType()->isVoidType() &&
- RequireCompleteType(LBracLoc, Method->getReturnType(),
- diag::err_illegal_message_expr_incomplete_type))
+ SemaRef.RequireCompleteType(
+ LBracLoc, Method->getReturnType(),
+ diag::err_illegal_message_expr_incomplete_type))
return ExprError();
if (Method && Method->isDirectMethod() && SuperLoc.isValid()) {
@@ -2720,8 +2726,7 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
Diag(Method->getLocation(), diag::note_method_declared_at)
<< Method->getDeclName();
}
- }
- else if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) {
+ } else if (ObjCMethodDecl *CurMeth = SemaRef.getCurMethodDecl()) {
// [super initialize] is allowed only within an +initialize implementation
if (CurMeth->getMethodFamily() != OMF_initialize) {
Diag(Loc, diag::warn_direct_super_initialize_call);
@@ -2733,7 +2738,7 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
}
}
- DiagnoseCStringFormatDirectiveInObjCAPI(*this, Method, Sel, Args, NumArgs);
+ DiagnoseCStringFormatDirectiveInObjCAPI(SemaRef, Method, Sel, Args, NumArgs);
// Construct the appropriate ObjCMessageExpr.
ObjCMessageExpr *Result;
@@ -2747,26 +2752,26 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
Context, ReturnType, VK, LBracLoc, ReceiverTypeInfo, Sel, SelectorLocs,
Method, ArrayRef(Args, NumArgs), RBracLoc, isImplicit);
if (!isImplicit)
- checkCocoaAPI(*this, Result);
+ checkCocoaAPI(SemaRef, Result);
}
if (Method)
- checkFoundationAPI(*this, SelLoc, Method, ArrayRef(Args, NumArgs),
+ checkFoundationAPI(SemaRef, SelLoc, Method, ArrayRef(Args, NumArgs),
ReceiverType, /*IsClassObjectCall=*/true);
- return MaybeBindToTemporary(Result);
+ return SemaRef.MaybeBindToTemporary(Result);
}
// ActOnClassMessage - used for both unary and keyword messages.
// ArgExprs is optional - if it is present, the number of expressions
// is obtained from Sel.getNumArgs().
-ExprResult Sema::ActOnClassMessage(Scope *S,
- ParsedType Receiver,
- Selector Sel,
- SourceLocation LBracLoc,
- ArrayRef<SourceLocation> SelectorLocs,
- SourceLocation RBracLoc,
- MultiExprArg Args) {
+ExprResult SemaObjC::ActOnClassMessage(Scope *S, ParsedType Receiver,
+ Selector Sel, SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args) {
+ ASTContext &Context = getASTContext();
TypeSourceInfo *ReceiverTypeInfo;
- QualType ReceiverType = GetTypeFromParser(Receiver, &ReceiverTypeInfo);
+ QualType ReceiverType =
+ SemaRef.GetTypeFromParser(Receiver, &ReceiverTypeInfo);
if (ReceiverType.isNull())
return ExprError();
@@ -2779,12 +2784,9 @@ ExprResult Sema::ActOnClassMessage(Scope *S,
Args);
}
-ExprResult Sema::BuildInstanceMessageImplicit(Expr *Receiver,
- QualType ReceiverType,
- SourceLocation Loc,
- Selector Sel,
- ObjCMethodDecl *Method,
- MultiExprArg Args) {
+ExprResult SemaObjC::BuildInstanceMessageImplicit(
+ Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel,
+ ObjCMethodDecl *Method, MultiExprArg Args) {
return BuildInstanceMessage(Receiver, ReceiverType,
/*SuperLoc=*/!Receiver ? Loc : SourceLocation(),
Sel, Method, Loc, Loc, Loc, Args,
@@ -2792,12 +2794,13 @@ ExprResult Sema::BuildInstanceMessageImplicit(Expr *Receiver,
}
static bool isMethodDeclaredInRootProtocol(Sema &S, const ObjCMethodDecl *M) {
- if (!S.NSAPIObj)
+ if (!S.ObjC().NSAPIObj)
return false;
const auto *Protocol = dyn_cast<ObjCProtocolDecl>(M->getDeclContext());
if (!Protocol)
return false;
- const IdentifierInfo *II = S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject);
+ const IdentifierInfo *II =
+ S.ObjC().NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject);
if (const auto *RootClass = dyn_cast_or_null<ObjCInterfaceDecl>(
S.LookupSingleName(S.TUScope, II, Protocol->getBeginLoc(),
Sema::LookupOrdinaryName))) {
@@ -2837,19 +2840,15 @@ static bool isMethodDeclaredInRootProtocol(Sema &S, const ObjCMethodDecl *M) {
/// \param RBracLoc The location of the closing square bracket ']'.
///
/// \param ArgsIn The message arguments.
-ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
- QualType ReceiverType,
- SourceLocation SuperLoc,
- Selector Sel,
- ObjCMethodDecl *Method,
- SourceLocation LBracLoc,
- ArrayRef<SourceLocation> SelectorLocs,
- SourceLocation RBracLoc,
- MultiExprArg ArgsIn,
- bool isImplicit) {
+ExprResult SemaObjC::BuildInstanceMessage(
+ Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc,
+ Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc,
+ MultiExprArg ArgsIn, bool isImplicit) {
assert((Receiver || SuperLoc.isValid()) && "If the Receiver is null, the "
"SuperLoc must be valid so we can "
"use it instead.");
+ ASTContext &Context = getASTContext();
// The location of the receiver.
SourceLocation Loc = SuperLoc.isValid() ? SuperLoc : Receiver->getBeginLoc();
@@ -2874,9 +2873,10 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (Receiver->hasPlaceholderType()) {
ExprResult Result;
if (Receiver->getType() == Context.UnknownAnyTy)
- Result = forceUnknownAnyToType(Receiver, Context.getObjCIdType());
+ Result =
+ SemaRef.forceUnknownAnyToType(Receiver, Context.getObjCIdType());
else
- Result = CheckPlaceholderExpr(Receiver);
+ Result = SemaRef.CheckPlaceholderExpr(Receiver);
if (Result.isInvalid()) return ExprError();
Receiver = Result.get();
}
@@ -2895,7 +2895,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// If necessary, apply function/array conversion to the receiver.
// C99 6.7.5.3p[7,8].
- ExprResult Result = DefaultFunctionArrayLvalueConversion(Receiver);
+ ExprResult Result = SemaRef.DefaultFunctionArrayLvalueConversion(Receiver);
if (Result.isInvalid())
return ExprError();
Receiver = Result.get();
@@ -2914,24 +2914,28 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// But not in ARC.
Diag(Loc, diag::warn_bad_receiver_type) << ReceiverType << RecRange;
if (ReceiverType->isPointerType()) {
- Receiver = ImpCastExprToType(Receiver, Context.getObjCIdType(),
- CK_CPointerToObjCPointerCast).get();
+ Receiver = SemaRef
+ .ImpCastExprToType(Receiver, Context.getObjCIdType(),
+ CK_CPointerToObjCPointerCast)
+ .get();
} else {
// TODO: specialized warning on null receivers?
bool IsNull = Receiver->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNull);
CastKind Kind = IsNull ? CK_NullToPointer : CK_IntegralToPointer;
- Receiver = ImpCastExprToType(Receiver, Context.getObjCIdType(),
- Kind).get();
+ Receiver =
+ SemaRef.ImpCastExprToType(Receiver, Context.getObjCIdType(), Kind)
+ .get();
}
ReceiverType = Receiver->getType();
} else if (getLangOpts().CPlusPlus) {
// The receiver must be a complete type.
- if (RequireCompleteType(Loc, Receiver->getType(),
- diag::err_incomplete_receiver_type))
+ if (SemaRef.RequireCompleteType(Loc, Receiver->getType(),
+ diag::err_incomplete_receiver_type))
return ExprError();
- ExprResult result = PerformContextuallyConvertToObjCPointer(Receiver);
+ ExprResult result =
+ SemaRef.PerformContextuallyConvertToObjCPointer(Receiver);
if (result.isUsable()) {
Receiver = result.get();
ReceiverType = Receiver->getType();
@@ -2960,14 +2964,14 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// select a better one.
Method = Methods[0];
- if (ObjCMethodDecl *BestMethod =
- SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod(), Methods))
+ if (ObjCMethodDecl *BestMethod = SemaRef.SelectBestMethod(
+ Sel, ArgsIn, Method->isInstanceMethod(), Methods))
Method = BestMethod;
if (!AreMultipleMethodsInGlobalPool(Sel, Method,
SourceRange(LBracLoc, RBracLoc),
receiverIsIdLike, Methods))
- DiagnoseUseOfDecl(Method, SelectorSlotLocs);
+ SemaRef.DiagnoseUseOfDecl(Method, SelectorSlotLocs);
}
} else if (ReceiverType->isObjCClassOrClassKindOfType() ||
ReceiverType->isObjCQualifiedClassType()) {
@@ -2983,7 +2987,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (!Method) {
Method = LookupMethodInQualifiedType(Sel, QClassTy, true);
// warn if instance method found for a Class message.
- if (Method && !isMethodDeclaredInRootProtocol(*this, Method)) {
+ if (Method && !isMethodDeclaredInRootProtocol(SemaRef, Method)) {
Diag(SelLoc, diag::warn_instance_method_on_class_found)
<< Method->getSelector() << Sel;
Diag(Method->getLocation(), diag::note_method_declared_at)
@@ -2991,7 +2995,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
}
} else {
- if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) {
+ if (ObjCMethodDecl *CurMeth = SemaRef.getCurMethodDecl()) {
if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface()) {
// As a guess, try looking for the method in the current interface.
// This very well may not produce the "right" method.
@@ -3002,7 +3006,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (!Method)
Method = ClassDecl->lookupPrivateClassMethod(Sel);
- if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs))
+ if (Method && SemaRef.DiagnoseUseOfDecl(Method, SelectorSlotLocs))
return ExprError();
}
}
@@ -3030,10 +3034,9 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
}
- if (ObjCMethodDecl *BestMethod =
- SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod(),
- Methods))
- Method = BestMethod;
+ if (ObjCMethodDecl *BestMethod = SemaRef.SelectBestMethod(
+ Sel, ArgsIn, Method->isInstanceMethod(), Methods))
+ Method = BestMethod;
}
}
}
@@ -3050,7 +3053,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
Method = LookupMethodInQualifiedType(Sel, QIdTy, true);
if (!Method)
Method = LookupMethodInQualifiedType(Sel, QIdTy, false);
- if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs))
+ if (Method && SemaRef.DiagnoseUseOfDecl(Method, SelectorSlotLocs))
return ExprError();
} else if (const ObjCObjectPointerType *OCIType
= ReceiverType->getAsObjCInterfacePointerType()) {
@@ -3062,11 +3065,12 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// FIXME: In the non-ARC case, this will still be a hard error if the
// definition is found in a module that's not visible.
const ObjCInterfaceDecl *forwardClass = nullptr;
- if (RequireCompleteType(Loc, OCIType->getPointeeType(),
- getLangOpts().ObjCAutoRefCount
- ? diag::err_arc_receiver_forward_instance
- : diag::warn_receiver_forward_instance,
- RecRange)) {
+ if (SemaRef.RequireCompleteType(
+ Loc, OCIType->getPointeeType(),
+ getLangOpts().ObjCAutoRefCount
+ ? diag::err_arc_receiver_forward_instance
+ : diag::warn_receiver_forward_instance,
+ RecRange)) {
if (getLangOpts().ObjCAutoRefCount)
return ExprError();
@@ -3107,9 +3111,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// to select a better one.
Method = Methods[0];
- if (ObjCMethodDecl *BestMethod =
- SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod(),
- Methods))
+ if (ObjCMethodDecl *BestMethod = SemaRef.SelectBestMethod(
+ Sel, ArgsIn, Method->isInstanceMethod(), Methods))
Method = BestMethod;
AreMultipleMethodsInGlobalPool(Sel, Method,
@@ -3124,7 +3127,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
}
}
- if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs, forwardClass))
+ if (Method &&
+ SemaRef.DiagnoseUseOfDecl(Method, SelectorSlotLocs, forwardClass))
return ExprError();
} else {
// Reject other random receiver types (e.g. structs).
@@ -3135,8 +3139,9 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
FunctionScopeInfo *DIFunctionScopeInfo =
- (Method && Method->getMethodFamily() == OMF_init)
- ? getEnclosingFunction() : nullptr;
+ (Method && Method->getMethodFamily() == OMF_init)
+ ? SemaRef.getEnclosingFunction()
+ : nullptr;
if (Method && Method->isDirectMethod()) {
if (ReceiverType->isObjCIdType() && !isImplicit) {
@@ -3202,7 +3207,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (!isDesignatedInitChain) {
const ObjCMethodDecl *InitMethod = nullptr;
bool isDesignated =
- getCurMethodDecl()->isDesignatedInitializerForTheInterface(&InitMethod);
+ SemaRef.getCurMethodDecl()->isDesignatedInitializerForTheInterface(
+ &InitMethod);
assert(isDesignated && InitMethod);
(void)isDesignated;
Diag(SelLoc, SuperLoc.isValid() ?
@@ -3237,8 +3243,9 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
return ExprError();
if (Method && !Method->getReturnType()->isVoidType() &&
- RequireCompleteType(LBracLoc, Method->getReturnType(),
- diag::err_illegal_message_expr_incomplete_type))
+ SemaRef.RequireCompleteType(
+ LBracLoc, Method->getReturnType(),
+ diag::err_illegal_message_expr_incomplete_type))
return ExprError();
// In ARC, forbid the user from sending messages to
@@ -3322,7 +3329,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
}
- DiagnoseCStringFormatDirectiveInObjCAPI(*this, Method, Sel, Args, NumArgs);
+ DiagnoseCStringFormatDirectiveInObjCAPI(SemaRef, Method, Sel, Args, NumArgs);
// Construct the appropriate ObjCMessageExpr instance.
ObjCMessageExpr *Result;
@@ -3336,7 +3343,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
Context, ReturnType, VK, LBracLoc, Receiver, Sel, SelectorLocs, Method,
ArrayRef(Args, NumArgs), RBracLoc, isImplicit);
if (!isImplicit)
- checkCocoaAPI(*this, Result);
+ checkCocoaAPI(SemaRef, Result);
}
if (Method) {
bool IsClassObjectCall = ClassMessage;
@@ -3347,7 +3354,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (Receiver && isSelfExpr(Receiver)) {
if (const auto *OPT = ReceiverType->getAs<ObjCObjectPointerType>()) {
if (OPT->getObjectType()->isObjCClass()) {
- if (const auto *CurMeth = getCurMethodDecl()) {
+ if (const auto *CurMeth = SemaRef.getCurMethodDecl()) {
IsClassObjectCall = true;
ReceiverType =
Context.getObjCInterfaceType(CurMeth->getClassInterface());
@@ -3355,7 +3362,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
}
}
- checkFoundationAPI(*this, SelLoc, Method, ArrayRef(Args, NumArgs),
+ checkFoundationAPI(SemaRef, SelLoc, Method, ArrayRef(Args, NumArgs),
ReceiverType, IsClassObjectCall);
}
@@ -3365,7 +3372,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
(SuperLoc.isValid() || isSelfExpr(Receiver))) {
// Only consider init calls *directly* in init implementations,
// not within blocks.
- ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CurContext);
+ ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(SemaRef.CurContext);
if (method && method->getMethodFamily() == OMF_init) {
// The implicit assignment to self means we also don't want to
// consume the result.
@@ -3386,19 +3393,20 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak;
if (!IsWeak && Sel.isUnarySelector())
IsWeak = ReturnType.getObjCLifetime() & Qualifiers::OCL_Weak;
- if (IsWeak && !isUnevaluatedContext() &&
- !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, LBracLoc))
- getCurFunction()->recordUseOfWeak(Result, Prop);
+ if (IsWeak && !SemaRef.isUnevaluatedContext() &&
+ !getDiagnostics().isIgnored(diag::warn_arc_repeated_use_of_weak,
+ LBracLoc))
+ SemaRef.getCurFunction()->recordUseOfWeak(Result, Prop);
}
}
}
CheckObjCCircularContainer(Result);
- return MaybeBindToTemporary(Result);
+ return SemaRef.MaybeBindToTemporary(Result);
}
-static void RemoveSelectorFromWarningCache(Sema &S, Expr* Arg) {
+static void RemoveSelectorFromWarningCache(SemaObjC &S, Expr *Arg) {
if (ObjCSelectorExpr *OSE =
dyn_cast<ObjCSelectorExpr>(Arg->IgnoreParenCasts())) {
Selector Sel = OSE->getSelector();
@@ -3412,19 +3420,19 @@ static void RemoveSelectorFromWarningCache(Sema &S, Expr* Arg) {
// ActOnInstanceMessage - used for both unary and keyword messages.
// ArgExprs is optional - if it is present, the number of expressions
// is obtained from Sel.getNumArgs().
-ExprResult Sema::ActOnInstanceMessage(Scope *S,
- Expr *Receiver,
- Selector Sel,
- SourceLocation LBracLoc,
- ArrayRef<SourceLocation> SelectorLocs,
- SourceLocation RBracLoc,
- MultiExprArg Args) {
+ExprResult SemaObjC::ActOnInstanceMessage(Scope *S, Expr *Receiver,
+ Selector Sel, SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args) {
+ ASTContext &Context = getASTContext();
if (!Receiver)
return ExprError();
// A ParenListExpr can show up while doing error recovery with invalid code.
if (isa<ParenListExpr>(Receiver)) {
- ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Receiver);
+ ExprResult Result =
+ SemaRef.MaybeConvertParenListExprToParenExpr(S, Receiver);
if (Result.isInvalid()) return ExprError();
Receiver = Result.get();
}
@@ -3738,32 +3746,33 @@ namespace {
};
} // end anonymous namespace
-bool Sema::isKnownName(StringRef name) {
+bool SemaObjC::isKnownName(StringRef name) {
+ ASTContext &Context = getASTContext();
if (name.empty())
return false;
- LookupResult R(*this, &Context.Idents.get(name), SourceLocation(),
+ LookupResult R(SemaRef, &Context.Idents.get(name), SourceLocation(),
Sema::LookupOrdinaryName);
- return LookupName(R, TUScope, false);
+ return SemaRef.LookupName(R, SemaRef.TUScope, false);
}
template <typename DiagBuilderT>
static void addFixitForObjCARCConversion(
- Sema &S, DiagBuilderT &DiagB, Sema::CheckedConversionKind CCK,
+ Sema &S, DiagBuilderT &DiagB, CheckedConversionKind CCK,
SourceLocation afterLParen, QualType castType, Expr *castExpr,
Expr *realCast, const char *bridgeKeyword, const char *CFBridgeName) {
// We handle C-style and implicit casts here.
switch (CCK) {
- case Sema::CCK_ImplicitConversion:
- case Sema::CCK_ForBuiltinOverloadedOp:
- case Sema::CCK_CStyleCast:
- case Sema::CCK_OtherCast:
+ case CheckedConversionKind::Implicit:
+ case CheckedConversionKind::ForBuiltinOverloadedOp:
+ case CheckedConversionKind::CStyleCast:
+ case CheckedConversionKind::OtherCast:
break;
- case Sema::CCK_FunctionalCast:
+ case CheckedConversionKind::FunctionalCast:
return;
}
if (CFBridgeName) {
- if (CCK == Sema::CCK_OtherCast) {
+ if (CCK == CheckedConversionKind::OtherCast) {
if (const CXXNamedCastExpr *NCE = dyn_cast<CXXNamedCastExpr>(realCast)) {
SourceRange range(NCE->getOperatorLoc(),
NCE->getAngleBrackets().getEnd());
@@ -3808,9 +3817,9 @@ static void addFixitForObjCARCConversion(
return;
}
- if (CCK == Sema::CCK_CStyleCast) {
+ if (CCK == CheckedConversionKind::CStyleCast) {
DiagB.AddFixItHint(FixItHint::CreateInsertion(afterLParen, bridgeKeyword));
- } else if (CCK == Sema::CCK_OtherCast) {
+ } else if (CCK == CheckedConversionKind::OtherCast) {
if (const CXXNamedCastExpr *NCE = dyn_cast<CXXNamedCastExpr>(realCast)) {
std::string castCode = "(";
castCode += bridgeKeyword;
@@ -3869,12 +3878,12 @@ static ObjCBridgeRelatedAttr *ObjCBridgeRelatedAttrFromType(QualType T,
return nullptr;
}
-static void
-diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
- QualType castType, ARCConversionTypeClass castACTC,
- Expr *castExpr, Expr *realCast,
- ARCConversionTypeClass exprACTC,
- Sema::CheckedConversionKind CCK) {
+static void diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
+ QualType castType,
+ ARCConversionTypeClass castACTC,
+ Expr *castExpr, Expr *realCast,
+ ARCConversionTypeClass exprACTC,
+ CheckedConversionKind CCK) {
SourceLocation loc =
(castRange.isValid() ? castRange.getBegin() : castExpr->getExprLoc());
@@ -3924,13 +3933,13 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
<< castType
<< castRange
<< castExpr->getSourceRange();
- bool br = S.isKnownName("CFBridgingRelease");
+ bool br = S.ObjC().isKnownName("CFBridgingRelease");
ACCResult CreateRule =
ARCCastChecker(S.Context, exprACTC, castACTC, true).Visit(castExpr);
assert(CreateRule != ACC_bottom && "This cast should already be accepted.");
if (CreateRule != ACC_plusOne)
{
- auto DiagB = (CCK != Sema::CCK_OtherCast)
+ auto DiagB = (CCK != CheckedConversionKind::OtherCast)
? S.Diag(noteLoc, diag::note_arc_bridge)
: S.Diag(noteLoc, diag::note_arc_cstyle_bridge);
@@ -3940,7 +3949,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
}
if (CreateRule != ACC_plusZero)
{
- auto DiagB = (CCK == Sema::CCK_OtherCast && !br)
+ auto DiagB = (CCK == CheckedConversionKind::OtherCast && !br)
? S.Diag(noteLoc, diag::note_arc_cstyle_bridge_transfer)
<< castExprType
: S.Diag(br ? castExpr->getExprLoc() : noteLoc,
@@ -3957,7 +3966,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
// Bridge from a CF type to an ARC type.
if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC)) {
- bool br = S.isKnownName("CFBridgingRetain");
+ bool br = S.ObjC().isKnownName("CFBridgingRetain");
S.Diag(loc, diag::err_arc_cast_requires_bridge)
<< convKindForDiag
<< unsigned(castExprType->isBlockPointerType()) // of ObjC|block type
@@ -3971,7 +3980,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
assert(CreateRule != ACC_bottom && "This cast should already be accepted.");
if (CreateRule != ACC_plusOne)
{
- auto DiagB = (CCK != Sema::CCK_OtherCast)
+ auto DiagB = (CCK != CheckedConversionKind::OtherCast)
? S.Diag(noteLoc, diag::note_arc_bridge)
: S.Diag(noteLoc, diag::note_arc_cstyle_bridge);
addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
@@ -3980,7 +3989,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
}
if (CreateRule != ACC_plusZero)
{
- auto DiagB = (CCK == Sema::CCK_OtherCast && !br)
+ auto DiagB = (CCK == CheckedConversionKind::OtherCast && !br)
? S.Diag(noteLoc, diag::note_arc_cstyle_bridge_retained)
<< castType
: S.Diag(br ? castExpr->getExprLoc() : noteLoc,
@@ -4133,7 +4142,7 @@ static bool CheckObjCBridgeCFCast(Sema &S, QualType castType, Expr *castExpr,
return true;
}
-void Sema::CheckTollFreeBridgeCast(QualType castType, Expr *castExpr) {
+void SemaObjC::CheckTollFreeBridgeCast(QualType castType, Expr *castExpr) {
if (!getLangOpts().ObjC)
return;
// warn in presence of __bridge casting to or from a toll free bridge cast.
@@ -4141,49 +4150,47 @@ void Sema::CheckTollFreeBridgeCast(QualType castType, Expr *castExpr) {
ARCConversionTypeClass castACTC = classifyTypeForARCConversion(castType);
if (castACTC == ACTC_retainable && exprACTC == ACTC_coreFoundation) {
bool HasObjCBridgeAttr;
- bool ObjCBridgeAttrWillNotWarn =
- CheckObjCBridgeNSCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr,
- false);
+ bool ObjCBridgeAttrWillNotWarn = CheckObjCBridgeNSCast<ObjCBridgeAttr>(
+ SemaRef, castType, castExpr, HasObjCBridgeAttr, false);
if (ObjCBridgeAttrWillNotWarn && HasObjCBridgeAttr)
return;
bool HasObjCBridgeMutableAttr;
bool ObjCBridgeMutableAttrWillNotWarn =
- CheckObjCBridgeNSCast<ObjCBridgeMutableAttr>(*this, castType, castExpr,
- HasObjCBridgeMutableAttr, false);
+ CheckObjCBridgeNSCast<ObjCBridgeMutableAttr>(
+ SemaRef, castType, castExpr, HasObjCBridgeMutableAttr, false);
if (ObjCBridgeMutableAttrWillNotWarn && HasObjCBridgeMutableAttr)
return;
if (HasObjCBridgeAttr)
- CheckObjCBridgeNSCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr,
- true);
+ CheckObjCBridgeNSCast<ObjCBridgeAttr>(SemaRef, castType, castExpr,
+ HasObjCBridgeAttr, true);
else if (HasObjCBridgeMutableAttr)
- CheckObjCBridgeNSCast<ObjCBridgeMutableAttr>(*this, castType, castExpr,
- HasObjCBridgeMutableAttr, true);
+ CheckObjCBridgeNSCast<ObjCBridgeMutableAttr>(
+ SemaRef, castType, castExpr, HasObjCBridgeMutableAttr, true);
}
else if (castACTC == ACTC_coreFoundation && exprACTC == ACTC_retainable) {
bool HasObjCBridgeAttr;
- bool ObjCBridgeAttrWillNotWarn =
- CheckObjCBridgeCFCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr,
- false);
+ bool ObjCBridgeAttrWillNotWarn = CheckObjCBridgeCFCast<ObjCBridgeAttr>(
+ SemaRef, castType, castExpr, HasObjCBridgeAttr, false);
if (ObjCBridgeAttrWillNotWarn && HasObjCBridgeAttr)
return;
bool HasObjCBridgeMutableAttr;
bool ObjCBridgeMutableAttrWillNotWarn =
- CheckObjCBridgeCFCast<ObjCBridgeMutableAttr>(*this, castType, castExpr,
- HasObjCBridgeMutableAttr, false);
+ CheckObjCBridgeCFCast<ObjCBridgeMutableAttr>(
+ SemaRef, castType, castExpr, HasObjCBridgeMutableAttr, false);
if (ObjCBridgeMutableAttrWillNotWarn && HasObjCBridgeMutableAttr)
return;
if (HasObjCBridgeAttr)
- CheckObjCBridgeCFCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr,
- true);
+ CheckObjCBridgeCFCast<ObjCBridgeAttr>(SemaRef, castType, castExpr,
+ HasObjCBridgeAttr, true);
else if (HasObjCBridgeMutableAttr)
- CheckObjCBridgeCFCast<ObjCBridgeMutableAttr>(*this, castType, castExpr,
- HasObjCBridgeMutableAttr, true);
+ CheckObjCBridgeCFCast<ObjCBridgeMutableAttr>(
+ SemaRef, castType, castExpr, HasObjCBridgeMutableAttr, true);
}
}
-void Sema::CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr) {
+void SemaObjC::CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr) {
QualType SrcType = castExpr->getType();
if (ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(castExpr)) {
if (PRE->isExplicitProperty()) {
@@ -4204,8 +4211,8 @@ void Sema::CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr) {
castExpr);
}
-bool Sema::CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
- CastKind &Kind) {
+bool SemaObjC::CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
+ CastKind &Kind) {
if (!getLangOpts().ObjC)
return false;
ARCConversionTypeClass exprACTC =
@@ -4221,13 +4228,12 @@ bool Sema::CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
return false;
}
-bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
- QualType DestType, QualType SrcType,
- ObjCInterfaceDecl *&RelatedClass,
- ObjCMethodDecl *&ClassMethod,
- ObjCMethodDecl *&InstanceMethod,
- TypedefNameDecl *&TDNDecl,
- bool CfToNs, bool Diagnose) {
+bool SemaObjC::checkObjCBridgeRelatedComponents(
+ SourceLocation Loc, QualType DestType, QualType SrcType,
+ ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod,
+ ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs,
+ bool Diagnose) {
+ ASTContext &Context = getASTContext();
QualType T = CfToNs ? SrcType : DestType;
ObjCBridgeRelatedAttr *ObjCBAttr = ObjCBridgeRelatedAttrFromType(T, TDNDecl);
if (!ObjCBAttr)
@@ -4240,9 +4246,9 @@ bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
return false;
NamedDecl *Target = nullptr;
// Check for an existing type with this name.
- LookupResult R(*this, DeclarationName(RCId), SourceLocation(),
+ LookupResult R(SemaRef, DeclarationName(RCId), SourceLocation(),
Sema::LookupOrdinaryName);
- if (!LookupName(R, TUScope)) {
+ if (!SemaRef.LookupName(R, SemaRef.TUScope)) {
if (Diagnose) {
Diag(Loc, diag::err_objc_bridged_related_invalid_class) << RCId
<< SrcType << DestType;
@@ -4294,10 +4300,12 @@ bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
return true;
}
-bool
-Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
- QualType DestType, QualType SrcType,
- Expr *&SrcExpr, bool Diagnose) {
+bool SemaObjC::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
+ QualType DestType,
+ QualType SrcType,
+ Expr *&SrcExpr,
+ bool Diagnose) {
+ ASTContext &Context = getASTContext();
ARCConversionTypeClass rhsExprACTC = classifyTypeForARCConversion(SrcType);
ARCConversionTypeClass lhsExprACTC = classifyTypeForARCConversion(DestType);
bool CfToNs = (rhsExprACTC == ACTC_coreFoundation && lhsExprACTC == ACTC_retainable);
@@ -4323,7 +4331,7 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
ExpressionString += " ";
ExpressionString += ClassMethod->getSelector().getAsString();
SourceLocation SrcExprEndLoc =
- getLocForEndOfToken(SrcExpr->getEndLoc());
+ SemaRef.getLocForEndOfToken(SrcExpr->getEndLoc());
// Provide a fixit: [RelatedClass ClassMethod SrcExpr]
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << ClassMethod->getSelector() << false
@@ -4351,7 +4359,7 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
if (Diagnose) {
std::string ExpressionString;
SourceLocation SrcExprEndLoc =
- getLocForEndOfToken(SrcExpr->getEndLoc());
+ SemaRef.getLocForEndOfToken(SrcExpr->getEndLoc());
if (InstanceMethod->isPropertyAccessor())
if (const ObjCPropertyDecl *PDecl =
InstanceMethod->findPropertyDecl()) {
@@ -4387,11 +4395,12 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
return false;
}
-Sema::ARCConversionResult
-Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
- Expr *&castExpr, CheckedConversionKind CCK,
- bool Diagnose, bool DiagnoseCFAudited,
- BinaryOperatorKind Opc) {
+SemaObjC::ARCConversionResult
+SemaObjC::CheckObjCConversion(SourceRange castRange, QualType castType,
+ Expr *&castExpr, CheckedConversionKind CCK,
+ bool Diagnose, bool DiagnoseCFAudited,
+ BinaryOperatorKind Opc) {
+ ASTContext &Context = getASTContext();
QualType castExprType = castExpr->getType();
// For the purposes of the classification, we assume reference types
@@ -4406,7 +4415,8 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
// Check for viability and report error if casting an rvalue to a
// life-time qualifier.
if (castACTC == ACTC_retainable &&
- (CCK == CCK_CStyleCast || CCK == CCK_OtherCast) &&
+ (CCK == CheckedConversionKind::CStyleCast ||
+ CCK == CheckedConversionKind::OtherCast) &&
castType != castExprType) {
const Type *DT = castType.getTypePtr();
QualType QDT = castType;
@@ -4451,11 +4461,11 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
// pointers too, but only when the conversions are explicit.
if (exprACTC == ACTC_indirectRetainable &&
(castACTC == ACTC_voidPtr ||
- (castACTC == ACTC_coreFoundation && isCast(CCK))))
+ (castACTC == ACTC_coreFoundation && SemaRef.isCast(CCK))))
return ACR_okay;
if (castACTC == ACTC_indirectRetainable &&
(exprACTC == ACTC_voidPtr || exprACTC == ACTC_coreFoundation) &&
- isCast(CCK))
+ SemaRef.isCast(CCK))
return ACR_okay;
switch (ARCCastChecker(Context, exprACTC, castACTC, false).Visit(castExpr)) {
@@ -4473,14 +4483,15 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
castExpr = ImplicitCastExpr::Create(Context, castExpr->getType(),
CK_ARCConsumeObject, castExpr, nullptr,
VK_PRValue, FPOptionsOverride());
- Cleanup.setExprNeedsCleanups(true);
+ SemaRef.Cleanup.setExprNeedsCleanups(true);
return ACR_okay;
}
// If this is a non-implicit cast from id or block type to a
// CoreFoundation type, delay complaining in case the cast is used
// in an acceptable context.
- if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC) && isCast(CCK))
+ if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC) &&
+ SemaRef.isCast(CCK))
return ACR_unbridged;
// Issue a diagnostic about a missing @-sign when implicit casting a cstring
@@ -4499,8 +4510,8 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
!(exprACTC == ACTC_voidPtr && castACTC == ACTC_retainable &&
(Opc == BO_NE || Opc == BO_EQ))) {
if (Diagnose)
- diagnoseObjCARCConversion(*this, castRange, castType, castACTC, castExpr,
- castExpr, exprACTC, CCK);
+ diagnoseObjCARCConversion(SemaRef, castRange, castType, castACTC,
+ castExpr, castExpr, exprACTC, CCK);
return ACR_error;
}
return ACR_okay;
@@ -4508,7 +4519,7 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
/// Given that we saw an expression with the ARCUnbridgedCastTy
/// placeholder type, complain bitterly.
-void Sema::diagnoseARCUnbridgedCast(Expr *e) {
+void SemaObjC::diagnoseARCUnbridgedCast(Expr *e) {
// We expect the spurious ImplicitCastExpr to already have been stripped.
assert(!e->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
CastExpr *realCast = cast<CastExpr>(e->IgnoreParens());
@@ -4520,11 +4531,11 @@ void Sema::diagnoseARCUnbridgedCast(Expr *e) {
if (CStyleCastExpr *cast = dyn_cast<CStyleCastExpr>(realCast)) {
castRange = SourceRange(cast->getLParenLoc(), cast->getRParenLoc());
castType = cast->getTypeAsWritten();
- CCK = CCK_CStyleCast;
+ CCK = CheckedConversionKind::CStyleCast;
} else if (ExplicitCastExpr *cast = dyn_cast<ExplicitCastExpr>(realCast)) {
castRange = cast->getTypeInfoAsWritten()->getTypeLoc().getSourceRange();
castType = cast->getTypeAsWritten();
- CCK = CCK_OtherCast;
+ CCK = CheckedConversionKind::OtherCast;
} else {
llvm_unreachable("Unexpected ImplicitCastExpr");
}
@@ -4535,14 +4546,15 @@ void Sema::diagnoseARCUnbridgedCast(Expr *e) {
Expr *castExpr = realCast->getSubExpr();
assert(classifyTypeForARCConversion(castExpr->getType()) == ACTC_retainable);
- diagnoseObjCARCConversion(*this, castRange, castType, castACTC,
- castExpr, realCast, ACTC_retainable, CCK);
+ diagnoseObjCARCConversion(SemaRef, castRange, castType, castACTC, castExpr,
+ realCast, ACTC_retainable, CCK);
}
/// stripARCUnbridgedCast - Given an expression of ARCUnbridgedCast
/// type, remove the placeholder cast.
-Expr *Sema::stripARCUnbridgedCast(Expr *e) {
+Expr *SemaObjC::stripARCUnbridgedCast(Expr *e) {
assert(e->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
+ ASTContext &Context = getASTContext();
if (ParenExpr *pe = dyn_cast<ParenExpr>(e)) {
Expr *sub = stripARCUnbridgedCast(pe->getSubExpr());
@@ -4553,7 +4565,7 @@ Expr *Sema::stripARCUnbridgedCast(Expr *e) {
return UnaryOperator::Create(Context, sub, UO_Extension, sub->getType(),
sub->getValueKind(), sub->getObjectKind(),
uo->getOperatorLoc(), false,
- CurFPFeatureOverrides());
+ SemaRef.CurFPFeatureOverrides());
} else if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
assert(!gse->isResultDependent());
assert(!gse->isTypePredicate());
@@ -4581,8 +4593,9 @@ Expr *Sema::stripARCUnbridgedCast(Expr *e) {
}
}
-bool Sema::CheckObjCARCUnavailableWeakConversion(QualType castType,
- QualType exprType) {
+bool SemaObjC::CheckObjCARCUnavailableWeakConversion(QualType castType,
+ QualType exprType) {
+ ASTContext &Context = getASTContext();
QualType canCastType =
Context.getCanonicalType(castType).getUnqualifiedType();
QualType canExprType =
@@ -4635,12 +4648,13 @@ static Expr *maybeUndoReclaimObject(Expr *e) {
return e;
}
-ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
- ObjCBridgeCastKind Kind,
- SourceLocation BridgeKeywordLoc,
- TypeSourceInfo *TSInfo,
- Expr *SubExpr) {
- ExprResult SubResult = UsualUnaryConversions(SubExpr);
+ExprResult SemaObjC::BuildObjCBridgedCast(SourceLocation LParenLoc,
+ ObjCBridgeCastKind Kind,
+ SourceLocation BridgeKeywordLoc,
+ TypeSourceInfo *TSInfo,
+ Expr *SubExpr) {
+ ASTContext &Context = getASTContext();
+ ExprResult SubResult = SemaRef.UsualUnaryConversions(SubExpr);
if (SubResult.isInvalid()) return ExprError();
SubExpr = SubResult.get();
@@ -4738,7 +4752,7 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
TSInfo, SubExpr);
if (MustConsume) {
- Cleanup.setExprNeedsCleanups(true);
+ SemaRef.Cleanup.setExprNeedsCleanups(true);
Result = ImplicitCastExpr::Create(Context, T, CK_ARCConsumeObject, Result,
nullptr, VK_PRValue, FPOptionsOverride());
}
@@ -4746,15 +4760,15 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
return Result;
}
-ExprResult Sema::ActOnObjCBridgedCast(Scope *S,
- SourceLocation LParenLoc,
- ObjCBridgeCastKind Kind,
- SourceLocation BridgeKeywordLoc,
- ParsedType Type,
- SourceLocation RParenLoc,
- Expr *SubExpr) {
+ExprResult SemaObjC::ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc,
+ ObjCBridgeCastKind Kind,
+ SourceLocation BridgeKeywordLoc,
+ ParsedType Type,
+ SourceLocation RParenLoc,
+ Expr *SubExpr) {
+ ASTContext &Context = getASTContext();
TypeSourceInfo *TSInfo = nullptr;
- QualType T = GetTypeFromParser(Type, &TSInfo);
+ QualType T = SemaRef.GetTypeFromParser(Type, &TSInfo);
if (Kind == OBC_Bridge)
CheckTollFreeBridgeCast(T, SubExpr);
if (!TSInfo)
@@ -4762,3 +4776,473 @@ ExprResult Sema::ActOnObjCBridgedCast(Scope *S,
return BuildObjCBridgedCast(LParenLoc, Kind, BridgeKeywordLoc, TSInfo,
SubExpr);
}
+
+DeclResult SemaObjC::LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
+ IdentifierInfo *II) {
+ SourceLocation Loc = Lookup.getNameLoc();
+ ObjCMethodDecl *CurMethod = SemaRef.getCurMethodDecl();
+
+ // Check for error condition which is already reported.
+ if (!CurMethod)
+ return DeclResult(true);
+
+ // There are two cases to handle here. 1) scoped lookup could have failed,
+ // in which case we should look for an ivar. 2) scoped lookup could have
+ // found a decl, but that decl is outside the current instance method (i.e.
+ // a global variable). In these two cases, we do a lookup for an ivar with
+ // this name, if the lookup sucedes, we replace it our current decl.
+
+ // If we're in a class method, we don't normally want to look for
+ // ivars. But if we don't find anything else, and there's an
+ // ivar, that's an error.
+ bool IsClassMethod = CurMethod->isClassMethod();
+
+ bool LookForIvars;
+ if (Lookup.empty())
+ LookForIvars = true;
+ else if (IsClassMethod)
+ LookForIvars = false;
+ else
+ LookForIvars = (Lookup.isSingleResult() &&
+ Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod());
+ ObjCInterfaceDecl *IFace = nullptr;
+ if (LookForIvars) {
+ IFace = CurMethod->getClassInterface();
+ ObjCInterfaceDecl *ClassDeclared;
+ ObjCIvarDecl *IV = nullptr;
+ if (IFace && (IV = IFace->lookupInstanceVariable(II, ClassDeclared))) {
+ // Diagnose using an ivar in a class method.
+ if (IsClassMethod) {
+ Diag(Loc, diag::err_ivar_use_in_class_method) << IV->getDeclName();
+ return DeclResult(true);
+ }
+
+ // Diagnose the use of an ivar outside of the declaring class.
+ if (IV->getAccessControl() == ObjCIvarDecl::Private &&
+ !declaresSameEntity(ClassDeclared, IFace) &&
+ !getLangOpts().DebuggerSupport)
+ Diag(Loc, diag::err_private_ivar_access) << IV->getDeclName();
+
+ // Success.
+ return IV;
+ }
+ } else if (CurMethod->isInstanceMethod()) {
+ // We should warn if a local variable hides an ivar.
+ if (ObjCInterfaceDecl *IFace = CurMethod->getClassInterface()) {
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *IV = IFace->lookupInstanceVariable(II, ClassDeclared)) {
+ if (IV->getAccessControl() != ObjCIvarDecl::Private ||
+ declaresSameEntity(IFace, ClassDeclared))
+ Diag(Loc, diag::warn_ivar_use_hidden) << IV->getDeclName();
+ }
+ }
+ } else if (Lookup.isSingleResult() &&
+ Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod()) {
+ // If accessing a stand-alone ivar in a class method, this is an error.
+ if (const ObjCIvarDecl *IV =
+ dyn_cast<ObjCIvarDecl>(Lookup.getFoundDecl())) {
+ Diag(Loc, diag::err_ivar_use_in_class_method) << IV->getDeclName();
+ return DeclResult(true);
+ }
+ }
+
+ // Didn't encounter an error, didn't find an ivar.
+ return DeclResult(false);
+}
+
+ExprResult SemaObjC::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
+ IdentifierInfo *II,
+ bool AllowBuiltinCreation) {
+ // FIXME: Integrate this lookup step into LookupParsedName.
+ DeclResult Ivar = LookupIvarInObjCMethod(Lookup, S, II);
+ if (Ivar.isInvalid())
+ return ExprError();
+ if (Ivar.isUsable())
+ return BuildIvarRefExpr(S, Lookup.getNameLoc(),
+ cast<ObjCIvarDecl>(Ivar.get()));
+
+ if (Lookup.empty() && II && AllowBuiltinCreation)
+ SemaRef.LookupBuiltin(Lookup);
+
+ // Sentinel value saying that we didn't do anything special.
+ return ExprResult(false);
+}
+
+ExprResult SemaObjC::BuildIvarRefExpr(Scope *S, SourceLocation Loc,
+ ObjCIvarDecl *IV) {
+ ASTContext &Context = getASTContext();
+ ObjCMethodDecl *CurMethod = SemaRef.getCurMethodDecl();
+ assert(CurMethod && CurMethod->isInstanceMethod() &&
+ "should not reference ivar from this context");
+
+ ObjCInterfaceDecl *IFace = CurMethod->getClassInterface();
+ assert(IFace && "should not reference ivar from this context");
+
+ // If we're referencing an invalid decl, just return this as a silent
+ // error node. The error diagnostic was already emitted on the decl.
+ if (IV->isInvalidDecl())
+ return ExprError();
+
+ // Check if referencing a field with __attribute__((deprecated)).
+ if (SemaRef.DiagnoseUseOfDecl(IV, Loc))
+ return ExprError();
+
+ // FIXME: This should use a new expr for a direct reference, don't
+ // turn this into Self->ivar, just return a BareIVarExpr or something.
+ IdentifierInfo &II = Context.Idents.get("self");
+ UnqualifiedId SelfName;
+ SelfName.setImplicitSelfParam(&II);
+ CXXScopeSpec SelfScopeSpec;
+ SourceLocation TemplateKWLoc;
+ ExprResult SelfExpr =
+ SemaRef.ActOnIdExpression(S, SelfScopeSpec, TemplateKWLoc, SelfName,
+ /*HasTrailingLParen=*/false,
+ /*IsAddressOfOperand=*/false);
+ if (SelfExpr.isInvalid())
+ return ExprError();
+
+ SelfExpr = SemaRef.DefaultLvalueConversion(SelfExpr.get());
+ if (SelfExpr.isInvalid())
+ return ExprError();
+
+ SemaRef.MarkAnyDeclReferenced(Loc, IV, true);
+
+ ObjCMethodFamily MF = CurMethod->getMethodFamily();
+ if (MF != OMF_init && MF != OMF_dealloc && MF != OMF_finalize &&
+ !IvarBacksCurrentMethodAccessor(IFace, CurMethod, IV))
+ Diag(Loc, diag::warn_direct_ivar_access) << IV->getDeclName();
+
+ ObjCIvarRefExpr *Result = new (Context)
+ ObjCIvarRefExpr(IV, IV->getUsageType(SelfExpr.get()->getType()), Loc,
+ IV->getLocation(), SelfExpr.get(), true, true);
+
+ if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ if (!SemaRef.isUnevaluatedContext() &&
+ !getDiagnostics().isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
+ SemaRef.getCurFunction()->recordUseOfWeak(Result);
+ }
+ if (getLangOpts().ObjCAutoRefCount && !SemaRef.isUnevaluatedContext())
+ if (const BlockDecl *BD = SemaRef.CurContext->getInnermostBlockDecl())
+ SemaRef.ImplicitlyRetainedSelfLocs.push_back({Loc, BD});
+
+ return Result;
+}
+
+QualType SemaObjC::FindCompositeObjCPointerType(ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation QuestionLoc) {
+ ASTContext &Context = getASTContext();
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+
+ // Handle things like Class and struct objc_class*. Here we case the result
+ // to the pseudo-builtin, because that will be implicitly cast back to the
+ // redefinition type if an attempt is made to access its fields.
+ if (LHSTy->isObjCClassType() &&
+ (Context.hasSameType(RHSTy, Context.getObjCClassRedefinitionType()))) {
+ RHS = SemaRef.ImpCastExprToType(RHS.get(), LHSTy,
+ CK_CPointerToObjCPointerCast);
+ return LHSTy;
+ }
+ if (RHSTy->isObjCClassType() &&
+ (Context.hasSameType(LHSTy, Context.getObjCClassRedefinitionType()))) {
+ LHS = SemaRef.ImpCastExprToType(LHS.get(), RHSTy,
+ CK_CPointerToObjCPointerCast);
+ return RHSTy;
+ }
+ // And the same for struct objc_object* / id
+ if (LHSTy->isObjCIdType() &&
+ (Context.hasSameType(RHSTy, Context.getObjCIdRedefinitionType()))) {
+ RHS = SemaRef.ImpCastExprToType(RHS.get(), LHSTy,
+ CK_CPointerToObjCPointerCast);
+ return LHSTy;
+ }
+ if (RHSTy->isObjCIdType() &&
+ (Context.hasSameType(LHSTy, Context.getObjCIdRedefinitionType()))) {
+ LHS = SemaRef.ImpCastExprToType(LHS.get(), RHSTy,
+ CK_CPointerToObjCPointerCast);
+ return RHSTy;
+ }
+ // And the same for struct objc_selector* / SEL
+ if (Context.isObjCSelType(LHSTy) &&
+ (Context.hasSameType(RHSTy, Context.getObjCSelRedefinitionType()))) {
+ RHS = SemaRef.ImpCastExprToType(RHS.get(), LHSTy, CK_BitCast);
+ return LHSTy;
+ }
+ if (Context.isObjCSelType(RHSTy) &&
+ (Context.hasSameType(LHSTy, Context.getObjCSelRedefinitionType()))) {
+ LHS = SemaRef.ImpCastExprToType(LHS.get(), RHSTy, CK_BitCast);
+ return RHSTy;
+ }
+ // Check constraints for Objective-C object pointers types.
+ if (LHSTy->isObjCObjectPointerType() && RHSTy->isObjCObjectPointerType()) {
+
+ if (Context.getCanonicalType(LHSTy) == Context.getCanonicalType(RHSTy)) {
+ // Two identical object pointer types are always compatible.
+ return LHSTy;
+ }
+ const ObjCObjectPointerType *LHSOPT =
+ LHSTy->castAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *RHSOPT =
+ RHSTy->castAs<ObjCObjectPointerType>();
+ QualType compositeType = LHSTy;
+
+ // If both operands are interfaces and either operand can be
+ // assigned to the other, use that type as the composite
+ // type. This allows
+ // xxx ? (A*) a : (B*) b
+ // where B is a subclass of A.
+ //
+ // Additionally, as for assignment, if either type is 'id'
+ // allow silent coercion. Finally, if the types are
+ // incompatible then make sure to use 'id' as the composite
+ // type so the result is acceptable for sending messages to.
+
+ // FIXME: Consider unifying with 'areComparableObjCPointerTypes'.
+ // It could return the composite type.
+ if (!(compositeType = Context.areCommonBaseCompatible(LHSOPT, RHSOPT))
+ .isNull()) {
+ // Nothing more to do.
+ } else if (Context.canAssignObjCInterfaces(LHSOPT, RHSOPT)) {
+ compositeType = RHSOPT->isObjCBuiltinType() ? RHSTy : LHSTy;
+ } else if (Context.canAssignObjCInterfaces(RHSOPT, LHSOPT)) {
+ compositeType = LHSOPT->isObjCBuiltinType() ? LHSTy : RHSTy;
+ } else if ((LHSOPT->isObjCQualifiedIdType() ||
+ RHSOPT->isObjCQualifiedIdType()) &&
+ Context.ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT,
+ true)) {
+ // Need to handle "id<xx>" explicitly.
+ // GCC allows qualified id and any Objective-C type to devolve to
+ // id. Currently localizing to here until clear this should be
+ // part of ObjCQualifiedIdTypesAreCompatible.
+ compositeType = Context.getObjCIdType();
+ } else if (LHSTy->isObjCIdType() || RHSTy->isObjCIdType()) {
+ compositeType = Context.getObjCIdType();
+ } else {
+ Diag(QuestionLoc, diag::ext_typecheck_cond_incompatible_operands)
+ << LHSTy << RHSTy << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ QualType incompatTy = Context.getObjCIdType();
+ LHS = SemaRef.ImpCastExprToType(LHS.get(), incompatTy, CK_BitCast);
+ RHS = SemaRef.ImpCastExprToType(RHS.get(), incompatTy, CK_BitCast);
+ return incompatTy;
+ }
+ // The object pointer types are compatible.
+ LHS = SemaRef.ImpCastExprToType(LHS.get(), compositeType, CK_BitCast);
+ RHS = SemaRef.ImpCastExprToType(RHS.get(), compositeType, CK_BitCast);
+ return compositeType;
+ }
+ // Check Objective-C object pointer types and 'void *'
+ if (LHSTy->isVoidPointerType() && RHSTy->isObjCObjectPointerType()) {
+ if (getLangOpts().ObjCAutoRefCount) {
+ // ARC forbids the implicit conversion of object pointers to 'void *',
+ // so these types are not compatible.
+ Diag(QuestionLoc, diag::err_cond_voidptr_arc)
+ << LHSTy << RHSTy << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ LHS = RHS = true;
+ return QualType();
+ }
+ QualType lhptee = LHSTy->castAs<PointerType>()->getPointeeType();
+ QualType rhptee = RHSTy->castAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType destPointee =
+ Context.getQualifiedType(lhptee, rhptee.getQualifiers());
+ QualType destType = Context.getPointerType(destPointee);
+ // Add qualifiers if necessary.
+ LHS = SemaRef.ImpCastExprToType(LHS.get(), destType, CK_NoOp);
+ // Promote to void*.
+ RHS = SemaRef.ImpCastExprToType(RHS.get(), destType, CK_BitCast);
+ return destType;
+ }
+ if (LHSTy->isObjCObjectPointerType() && RHSTy->isVoidPointerType()) {
+ if (getLangOpts().ObjCAutoRefCount) {
+ // ARC forbids the implicit conversion of object pointers to 'void *',
+ // so these types are not compatible.
+ Diag(QuestionLoc, diag::err_cond_voidptr_arc)
+ << LHSTy << RHSTy << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ LHS = RHS = true;
+ return QualType();
+ }
+ QualType lhptee = LHSTy->castAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType rhptee = RHSTy->castAs<PointerType>()->getPointeeType();
+ QualType destPointee =
+ Context.getQualifiedType(rhptee, lhptee.getQualifiers());
+ QualType destType = Context.getPointerType(destPointee);
+ // Add qualifiers if necessary.
+ RHS = SemaRef.ImpCastExprToType(RHS.get(), destType, CK_NoOp);
+ // Promote to void*.
+ LHS = SemaRef.ImpCastExprToType(LHS.get(), destType, CK_BitCast);
+ return destType;
+ }
+ return QualType();
+}
+
+bool SemaObjC::CheckConversionToObjCLiteral(QualType DstType, Expr *&Exp,
+ bool Diagnose) {
+ if (!getLangOpts().ObjC)
+ return false;
+
+ const ObjCObjectPointerType *PT = DstType->getAs<ObjCObjectPointerType>();
+ if (!PT)
+ return false;
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+
+ // Ignore any parens, implicit casts (should only be
+ // array-to-pointer decays), and not-so-opaque values. The last is
+ // important for making this trigger for property assignments.
+ Expr *SrcExpr = Exp->IgnoreParenImpCasts();
+ if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(SrcExpr))
+ if (OV->getSourceExpr())
+ SrcExpr = OV->getSourceExpr()->IgnoreParenImpCasts();
+
+ if (auto *SL = dyn_cast<StringLiteral>(SrcExpr)) {
+ if (!PT->isObjCIdType() && !(ID && ID->getIdentifier()->isStr("NSString")))
+ return false;
+ if (!SL->isOrdinary())
+ return false;
+
+ if (Diagnose) {
+ Diag(SL->getBeginLoc(), diag::err_missing_atsign_prefix)
+ << /*string*/ 0 << FixItHint::CreateInsertion(SL->getBeginLoc(), "@");
+ Exp = BuildObjCStringLiteral(SL->getBeginLoc(), SL).get();
+ }
+ return true;
+ }
+
+ if ((isa<IntegerLiteral>(SrcExpr) || isa<CharacterLiteral>(SrcExpr) ||
+ isa<FloatingLiteral>(SrcExpr) || isa<ObjCBoolLiteralExpr>(SrcExpr) ||
+ isa<CXXBoolLiteralExpr>(SrcExpr)) &&
+ !SrcExpr->isNullPointerConstant(getASTContext(),
+ Expr::NPC_NeverValueDependent)) {
+ if (!ID || !ID->getIdentifier()->isStr("NSNumber"))
+ return false;
+ if (Diagnose) {
+ Diag(SrcExpr->getBeginLoc(), diag::err_missing_atsign_prefix)
+ << /*number*/ 1
+ << FixItHint::CreateInsertion(SrcExpr->getBeginLoc(), "@");
+ Expr *NumLit =
+ BuildObjCNumericLiteral(SrcExpr->getBeginLoc(), SrcExpr).get();
+ if (NumLit)
+ Exp = NumLit;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
+ExprResult SemaObjC::ActOnObjCBoolLiteral(SourceLocation OpLoc,
+ tok::TokenKind Kind) {
+ assert((Kind == tok::kw___objc_yes || Kind == tok::kw___objc_no) &&
+ "Unknown Objective-C Boolean value!");
+ ASTContext &Context = getASTContext();
+ QualType BoolT = Context.ObjCBuiltinBoolTy;
+ if (!Context.getBOOLDecl()) {
+ LookupResult Result(SemaRef, &Context.Idents.get("BOOL"), OpLoc,
+ Sema::LookupOrdinaryName);
+ if (SemaRef.LookupName(Result, SemaRef.getCurScope()) &&
+ Result.isSingleResult()) {
+ NamedDecl *ND = Result.getFoundDecl();
+ if (TypedefDecl *TD = dyn_cast<TypedefDecl>(ND))
+ Context.setBOOLDecl(TD);
+ }
+ }
+ if (Context.getBOOLDecl())
+ BoolT = Context.getBOOLType();
+ return new (Context)
+ ObjCBoolLiteralExpr(Kind == tok::kw___objc_yes, BoolT, OpLoc);
+}
+
+ExprResult SemaObjC::ActOnObjCAvailabilityCheckExpr(
+ llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc,
+ SourceLocation RParen) {
+ ASTContext &Context = getASTContext();
+ auto FindSpecVersion =
+ [&](StringRef Platform) -> std::optional<VersionTuple> {
+ auto Spec = llvm::find_if(AvailSpecs, [&](const AvailabilitySpec &Spec) {
+ return Spec.getPlatform() == Platform;
+ });
+ // Transcribe the "ios" availability check to "maccatalyst" when compiling
+ // for "maccatalyst" if "maccatalyst" is not specified.
+ if (Spec == AvailSpecs.end() && Platform == "maccatalyst") {
+ Spec = llvm::find_if(AvailSpecs, [&](const AvailabilitySpec &Spec) {
+ return Spec.getPlatform() == "ios";
+ });
+ }
+ if (Spec == AvailSpecs.end())
+ return std::nullopt;
+ return Spec->getVersion();
+ };
+
+ VersionTuple Version;
+ if (auto MaybeVersion =
+ FindSpecVersion(Context.getTargetInfo().getPlatformName()))
+ Version = *MaybeVersion;
+
+ // The use of `@available` in the enclosing context should be analyzed to
+ // warn when it's used inappropriately (i.e. not if(@available)).
+ if (FunctionScopeInfo *Context = SemaRef.getCurFunctionAvailabilityContext())
+ Context->HasPotentialAvailabilityViolations = true;
+
+ return new (Context)
+ ObjCAvailabilityCheckExpr(Version, AtLoc, RParen, Context.BoolTy);
+}
+
+/// Prepare a conversion of the given expression to an ObjC object
+/// pointer type.
+CastKind SemaObjC::PrepareCastToObjCObjectPointer(ExprResult &E) {
+ QualType type = E.get()->getType();
+ if (type->isObjCObjectPointerType()) {
+ return CK_BitCast;
+ } else if (type->isBlockPointerType()) {
+ SemaRef.maybeExtendBlockObject(E);
+ return CK_BlockPointerToObjCPointerCast;
+ } else {
+ assert(type->isPointerType());
+ return CK_CPointerToObjCPointerCast;
+ }
+}
+
+SemaObjC::ObjCLiteralKind SemaObjC::CheckLiteralKind(Expr *FromE) {
+ FromE = FromE->IgnoreParenImpCasts();
+ switch (FromE->getStmtClass()) {
+ default:
+ break;
+ case Stmt::ObjCStringLiteralClass:
+ // "string literal"
+ return LK_String;
+ case Stmt::ObjCArrayLiteralClass:
+ // "array literal"
+ return LK_Array;
+ case Stmt::ObjCDictionaryLiteralClass:
+ // "dictionary literal"
+ return LK_Dictionary;
+ case Stmt::BlockExprClass:
+ return LK_Block;
+ case Stmt::ObjCBoxedExprClass: {
+ Expr *Inner = cast<ObjCBoxedExpr>(FromE)->getSubExpr()->IgnoreParens();
+ switch (Inner->getStmtClass()) {
+ case Stmt::IntegerLiteralClass:
+ case Stmt::FloatingLiteralClass:
+ case Stmt::CharacterLiteralClass:
+ case Stmt::ObjCBoolLiteralExprClass:
+ case Stmt::CXXBoolLiteralExprClass:
+ // "numeric literal"
+ return LK_Numeric;
+ case Stmt::ImplicitCastExprClass: {
+ CastKind CK = cast<CastExpr>(Inner)->getCastKind();
+ // Boolean literals can be represented by implicit casts.
+ if (CK == CK_IntegralToBoolean || CK == CK_IntegralCast)
+ return LK_Numeric;
+ break;
+ }
+ default:
+ break;
+ }
+ return LK_Boxed;
+ }
+ }
+ return LK_None;
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaHLSL.cpp b/contrib/llvm-project/clang/lib/Sema/SemaHLSL.cpp
index cf82cc9bccdf..9940bc5b4a60 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaHLSL.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaHLSL.cpp
@@ -8,27 +8,1116 @@
// This implements Semantic Analysis for HLSL constructs.
//===----------------------------------------------------------------------===//
+#include "clang/Sema/SemaHLSL.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/ParsedAttr.h"
#include "clang/Sema/Sema.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TargetParser/Triple.h"
+#include <iterator>
using namespace clang;
-Decl *Sema::ActOnStartHLSLBuffer(Scope *BufferScope, bool CBuffer,
+SemaHLSL::SemaHLSL(Sema &S) : SemaBase(S) {}
+
+Decl *SemaHLSL::ActOnStartBuffer(Scope *BufferScope, bool CBuffer,
SourceLocation KwLoc, IdentifierInfo *Ident,
SourceLocation IdentLoc,
SourceLocation LBrace) {
// For anonymous namespace, take the location of the left brace.
- DeclContext *LexicalParent = getCurLexicalContext();
+ DeclContext *LexicalParent = SemaRef.getCurLexicalContext();
HLSLBufferDecl *Result = HLSLBufferDecl::Create(
- Context, LexicalParent, CBuffer, KwLoc, Ident, IdentLoc, LBrace);
+ getASTContext(), LexicalParent, CBuffer, KwLoc, Ident, IdentLoc, LBrace);
- PushOnScopeChains(Result, BufferScope);
- PushDeclContext(BufferScope, Result);
+ SemaRef.PushOnScopeChains(Result, BufferScope);
+ SemaRef.PushDeclContext(BufferScope, Result);
return Result;
}
-void Sema::ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace) {
+// Calculate the size of a legacy cbuffer type based on
+// https://learn.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-packing-rules
+static unsigned calculateLegacyCbufferSize(const ASTContext &Context,
+ QualType T) {
+ unsigned Size = 0;
+ constexpr unsigned CBufferAlign = 128;
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ for (const FieldDecl *Field : RD->fields()) {
+ QualType Ty = Field->getType();
+ unsigned FieldSize = calculateLegacyCbufferSize(Context, Ty);
+ unsigned FieldAlign = 32;
+ if (Ty->isAggregateType())
+ FieldAlign = CBufferAlign;
+ Size = llvm::alignTo(Size, FieldAlign);
+ Size += FieldSize;
+ }
+ } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
+ if (unsigned ElementCount = AT->getSize().getZExtValue()) {
+ unsigned ElementSize =
+ calculateLegacyCbufferSize(Context, AT->getElementType());
+ unsigned AlignedElementSize = llvm::alignTo(ElementSize, CBufferAlign);
+ Size = AlignedElementSize * (ElementCount - 1) + ElementSize;
+ }
+ } else if (const VectorType *VT = T->getAs<VectorType>()) {
+ unsigned ElementCount = VT->getNumElements();
+ unsigned ElementSize =
+ calculateLegacyCbufferSize(Context, VT->getElementType());
+ Size = ElementSize * ElementCount;
+ } else {
+ Size = Context.getTypeSize(T);
+ }
+ return Size;
+}
+
+void SemaHLSL::ActOnFinishBuffer(Decl *Dcl, SourceLocation RBrace) {
auto *BufDecl = cast<HLSLBufferDecl>(Dcl);
BufDecl->setRBraceLoc(RBrace);
- PopDeclContext();
+
+ // Validate packoffset.
+ llvm::SmallVector<std::pair<VarDecl *, HLSLPackOffsetAttr *>> PackOffsetVec;
+ bool HasPackOffset = false;
+ bool HasNonPackOffset = false;
+ for (auto *Field : BufDecl->decls()) {
+ VarDecl *Var = dyn_cast<VarDecl>(Field);
+ if (!Var)
+ continue;
+ if (Field->hasAttr<HLSLPackOffsetAttr>()) {
+ PackOffsetVec.emplace_back(Var, Field->getAttr<HLSLPackOffsetAttr>());
+ HasPackOffset = true;
+ } else {
+ HasNonPackOffset = true;
+ }
+ }
+
+ if (HasPackOffset && HasNonPackOffset)
+ Diag(BufDecl->getLocation(), diag::warn_hlsl_packoffset_mix);
+
+ if (HasPackOffset) {
+ ASTContext &Context = getASTContext();
+ // Make sure no overlap in packoffset.
+ // Sort PackOffsetVec by offset.
+ std::sort(PackOffsetVec.begin(), PackOffsetVec.end(),
+ [](const std::pair<VarDecl *, HLSLPackOffsetAttr *> &LHS,
+ const std::pair<VarDecl *, HLSLPackOffsetAttr *> &RHS) {
+ return LHS.second->getOffset() < RHS.second->getOffset();
+ });
+
+ for (unsigned i = 0; i < PackOffsetVec.size() - 1; i++) {
+ VarDecl *Var = PackOffsetVec[i].first;
+ HLSLPackOffsetAttr *Attr = PackOffsetVec[i].second;
+ unsigned Size = calculateLegacyCbufferSize(Context, Var->getType());
+ unsigned Begin = Attr->getOffset() * 32;
+ unsigned End = Begin + Size;
+ unsigned NextBegin = PackOffsetVec[i + 1].second->getOffset() * 32;
+ if (End > NextBegin) {
+ VarDecl *NextVar = PackOffsetVec[i + 1].first;
+ Diag(NextVar->getLocation(), diag::err_hlsl_packoffset_overlap)
+ << NextVar << Var;
+ }
+ }
+ }
+
+ SemaRef.PopDeclContext();
+}
+
+HLSLNumThreadsAttr *SemaHLSL::mergeNumThreadsAttr(Decl *D,
+ const AttributeCommonInfo &AL,
+ int X, int Y, int Z) {
+ if (HLSLNumThreadsAttr *NT = D->getAttr<HLSLNumThreadsAttr>()) {
+ if (NT->getX() != X || NT->getY() != Y || NT->getZ() != Z) {
+ Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL;
+ Diag(AL.getLoc(), diag::note_conflicting_attribute);
+ }
+ return nullptr;
+ }
+ return ::new (getASTContext())
+ HLSLNumThreadsAttr(getASTContext(), AL, X, Y, Z);
+}
+
+HLSLShaderAttr *
+SemaHLSL::mergeShaderAttr(Decl *D, const AttributeCommonInfo &AL,
+ llvm::Triple::EnvironmentType ShaderType) {
+ if (HLSLShaderAttr *NT = D->getAttr<HLSLShaderAttr>()) {
+ if (NT->getType() != ShaderType) {
+ Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL;
+ Diag(AL.getLoc(), diag::note_conflicting_attribute);
+ }
+ return nullptr;
+ }
+ return HLSLShaderAttr::Create(getASTContext(), ShaderType, AL);
+}
+
+HLSLParamModifierAttr *
+SemaHLSL::mergeParamModifierAttr(Decl *D, const AttributeCommonInfo &AL,
+ HLSLParamModifierAttr::Spelling Spelling) {
+ // We can only merge an `in` attribute with an `out` attribute. All other
+ // combinations of duplicated attributes are ill-formed.
+ if (HLSLParamModifierAttr *PA = D->getAttr<HLSLParamModifierAttr>()) {
+ if ((PA->isIn() && Spelling == HLSLParamModifierAttr::Keyword_out) ||
+ (PA->isOut() && Spelling == HLSLParamModifierAttr::Keyword_in)) {
+ D->dropAttr<HLSLParamModifierAttr>();
+ SourceRange AdjustedRange = {PA->getLocation(), AL.getRange().getEnd()};
+ return HLSLParamModifierAttr::Create(
+ getASTContext(), /*MergedSpelling=*/true, AdjustedRange,
+ HLSLParamModifierAttr::Keyword_inout);
+ }
+ Diag(AL.getLoc(), diag::err_hlsl_duplicate_parameter_modifier) << AL;
+ Diag(PA->getLocation(), diag::note_conflicting_attribute);
+ return nullptr;
+ }
+ return HLSLParamModifierAttr::Create(getASTContext(), AL);
+}
+
+void SemaHLSL::ActOnTopLevelFunction(FunctionDecl *FD) {
+ auto &TargetInfo = getASTContext().getTargetInfo();
+
+ if (FD->getName() != TargetInfo.getTargetOpts().HLSLEntry)
+ return;
+
+ llvm::Triple::EnvironmentType Env = TargetInfo.getTriple().getEnvironment();
+ if (HLSLShaderAttr::isValidShaderType(Env) && Env != llvm::Triple::Library) {
+ if (const auto *Shader = FD->getAttr<HLSLShaderAttr>()) {
+ // The entry point is already annotated - check that it matches the
+ // triple.
+ if (Shader->getType() != Env) {
+ Diag(Shader->getLocation(), diag::err_hlsl_entry_shader_attr_mismatch)
+ << Shader;
+ FD->setInvalidDecl();
+ }
+ } else {
+ // Implicitly add the shader attribute if the entry function isn't
+ // explicitly annotated.
+ FD->addAttr(HLSLShaderAttr::CreateImplicit(getASTContext(), Env,
+ FD->getBeginLoc()));
+ }
+ } else {
+ switch (Env) {
+ case llvm::Triple::UnknownEnvironment:
+ case llvm::Triple::Library:
+ break;
+ default:
+ llvm_unreachable("Unhandled environment in triple");
+ }
+ }
+}
+
+void SemaHLSL::CheckEntryPoint(FunctionDecl *FD) {
+ const auto *ShaderAttr = FD->getAttr<HLSLShaderAttr>();
+ assert(ShaderAttr && "Entry point has no shader attribute");
+ llvm::Triple::EnvironmentType ST = ShaderAttr->getType();
+
+ switch (ST) {
+ case llvm::Triple::Pixel:
+ case llvm::Triple::Vertex:
+ case llvm::Triple::Geometry:
+ case llvm::Triple::Hull:
+ case llvm::Triple::Domain:
+ case llvm::Triple::RayGeneration:
+ case llvm::Triple::Intersection:
+ case llvm::Triple::AnyHit:
+ case llvm::Triple::ClosestHit:
+ case llvm::Triple::Miss:
+ case llvm::Triple::Callable:
+ if (const auto *NT = FD->getAttr<HLSLNumThreadsAttr>()) {
+ DiagnoseAttrStageMismatch(NT, ST,
+ {llvm::Triple::Compute,
+ llvm::Triple::Amplification,
+ llvm::Triple::Mesh});
+ FD->setInvalidDecl();
+ }
+ break;
+
+ case llvm::Triple::Compute:
+ case llvm::Triple::Amplification:
+ case llvm::Triple::Mesh:
+ if (!FD->hasAttr<HLSLNumThreadsAttr>()) {
+ Diag(FD->getLocation(), diag::err_hlsl_missing_numthreads)
+ << llvm::Triple::getEnvironmentTypeName(ST);
+ FD->setInvalidDecl();
+ }
+ break;
+ default:
+ llvm_unreachable("Unhandled environment in triple");
+ }
+
+ for (ParmVarDecl *Param : FD->parameters()) {
+ if (const auto *AnnotationAttr = Param->getAttr<HLSLAnnotationAttr>()) {
+ CheckSemanticAnnotation(FD, Param, AnnotationAttr);
+ } else {
+ // FIXME: Handle struct parameters where annotations are on struct fields.
+ // See: https://github.com/llvm/llvm-project/issues/57875
+ Diag(FD->getLocation(), diag::err_hlsl_missing_semantic_annotation);
+ Diag(Param->getLocation(), diag::note_previous_decl) << Param;
+ FD->setInvalidDecl();
+ }
+ }
+ // FIXME: Verify return type semantic annotation.
+}
+
+void SemaHLSL::CheckSemanticAnnotation(
+ FunctionDecl *EntryPoint, const Decl *Param,
+ const HLSLAnnotationAttr *AnnotationAttr) {
+ auto *ShaderAttr = EntryPoint->getAttr<HLSLShaderAttr>();
+ assert(ShaderAttr && "Entry point has no shader attribute");
+ llvm::Triple::EnvironmentType ST = ShaderAttr->getType();
+
+ switch (AnnotationAttr->getKind()) {
+ case attr::HLSLSV_DispatchThreadID:
+ case attr::HLSLSV_GroupIndex:
+ if (ST == llvm::Triple::Compute)
+ return;
+ DiagnoseAttrStageMismatch(AnnotationAttr, ST, {llvm::Triple::Compute});
+ break;
+ default:
+ llvm_unreachable("Unknown HLSLAnnotationAttr");
+ }
+}
+
+void SemaHLSL::DiagnoseAttrStageMismatch(
+ const Attr *A, llvm::Triple::EnvironmentType Stage,
+ std::initializer_list<llvm::Triple::EnvironmentType> AllowedStages) {
+ SmallVector<StringRef, 8> StageStrings;
+ llvm::transform(AllowedStages, std::back_inserter(StageStrings),
+ [](llvm::Triple::EnvironmentType ST) {
+ return StringRef(
+ HLSLShaderAttr::ConvertEnvironmentTypeToStr(ST));
+ });
+ Diag(A->getLoc(), diag::err_hlsl_attr_unsupported_in_stage)
+ << A << llvm::Triple::getEnvironmentTypeName(Stage)
+ << (AllowedStages.size() != 1) << join(StageStrings, ", ");
+}
+
+void SemaHLSL::handleNumThreadsAttr(Decl *D, const ParsedAttr &AL) {
+ llvm::VersionTuple SMVersion =
+ getASTContext().getTargetInfo().getTriple().getOSVersion();
+ uint32_t ZMax = 1024;
+ uint32_t ThreadMax = 1024;
+ if (SMVersion.getMajor() <= 4) {
+ ZMax = 1;
+ ThreadMax = 768;
+ } else if (SMVersion.getMajor() == 5) {
+ ZMax = 64;
+ ThreadMax = 1024;
+ }
+
+ uint32_t X;
+ if (!SemaRef.checkUInt32Argument(AL, AL.getArgAsExpr(0), X))
+ return;
+ if (X > 1024) {
+ Diag(AL.getArgAsExpr(0)->getExprLoc(),
+ diag::err_hlsl_numthreads_argument_oor)
+ << 0 << 1024;
+ return;
+ }
+ uint32_t Y;
+ if (!SemaRef.checkUInt32Argument(AL, AL.getArgAsExpr(1), Y))
+ return;
+ if (Y > 1024) {
+ Diag(AL.getArgAsExpr(1)->getExprLoc(),
+ diag::err_hlsl_numthreads_argument_oor)
+ << 1 << 1024;
+ return;
+ }
+ uint32_t Z;
+ if (!SemaRef.checkUInt32Argument(AL, AL.getArgAsExpr(2), Z))
+ return;
+ if (Z > ZMax) {
+ SemaRef.Diag(AL.getArgAsExpr(2)->getExprLoc(),
+ diag::err_hlsl_numthreads_argument_oor)
+ << 2 << ZMax;
+ return;
+ }
+
+ if (X * Y * Z > ThreadMax) {
+ Diag(AL.getLoc(), diag::err_hlsl_numthreads_invalid) << ThreadMax;
+ return;
+ }
+
+ HLSLNumThreadsAttr *NewAttr = mergeNumThreadsAttr(D, AL, X, Y, Z);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+static bool isLegalTypeForHLSLSV_DispatchThreadID(QualType T) {
+ if (!T->hasUnsignedIntegerRepresentation())
+ return false;
+ if (const auto *VT = T->getAs<VectorType>())
+ return VT->getNumElements() <= 3;
+ return true;
+}
+
+void SemaHLSL::handleSV_DispatchThreadIDAttr(Decl *D, const ParsedAttr &AL) {
+ auto *VD = cast<ValueDecl>(D);
+ if (!isLegalTypeForHLSLSV_DispatchThreadID(VD->getType())) {
+ Diag(AL.getLoc(), diag::err_hlsl_attr_invalid_type)
+ << AL << "uint/uint2/uint3";
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ HLSLSV_DispatchThreadIDAttr(getASTContext(), AL));
+}
+
+void SemaHLSL::handlePackOffsetAttr(Decl *D, const ParsedAttr &AL) {
+ if (!isa<VarDecl>(D) || !isa<HLSLBufferDecl>(D->getDeclContext())) {
+ Diag(AL.getLoc(), diag::err_hlsl_attr_invalid_ast_node)
+ << AL << "shader constant in a constant buffer";
+ return;
+ }
+
+ uint32_t SubComponent;
+ if (!SemaRef.checkUInt32Argument(AL, AL.getArgAsExpr(0), SubComponent))
+ return;
+ uint32_t Component;
+ if (!SemaRef.checkUInt32Argument(AL, AL.getArgAsExpr(1), Component))
+ return;
+
+ QualType T = cast<VarDecl>(D)->getType().getCanonicalType();
+ // Check if T is an array or struct type.
+ // TODO: mark matrix type as aggregate type.
+ bool IsAggregateTy = (T->isArrayType() || T->isStructureType());
+
+ // Check Component is valid for T.
+ if (Component) {
+ unsigned Size = getASTContext().getTypeSize(T);
+ if (IsAggregateTy || Size > 128) {
+ Diag(AL.getLoc(), diag::err_hlsl_packoffset_cross_reg_boundary);
+ return;
+ } else {
+ // Make sure Component + sizeof(T) <= 4.
+ if ((Component * 32 + Size) > 128) {
+ Diag(AL.getLoc(), diag::err_hlsl_packoffset_cross_reg_boundary);
+ return;
+ }
+ QualType EltTy = T;
+ if (const auto *VT = T->getAs<VectorType>())
+ EltTy = VT->getElementType();
+ unsigned Align = getASTContext().getTypeAlign(EltTy);
+ if (Align > 32 && Component == 1) {
+ // NOTE: Component 3 will hit err_hlsl_packoffset_cross_reg_boundary.
+ // So we only need to check Component 1 here.
+ Diag(AL.getLoc(), diag::err_hlsl_packoffset_alignment_mismatch)
+ << Align << EltTy;
+ return;
+ }
+ }
+ }
+
+ D->addAttr(::new (getASTContext()) HLSLPackOffsetAttr(
+ getASTContext(), AL, SubComponent, Component));
+}
+
+void SemaHLSL::handleShaderAttr(Decl *D, const ParsedAttr &AL) {
+ StringRef Str;
+ SourceLocation ArgLoc;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+
+ llvm::Triple::EnvironmentType ShaderType;
+ if (!HLSLShaderAttr::ConvertStrToEnvironmentType(Str, ShaderType)) {
+ Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL << Str << ArgLoc;
+ return;
+ }
+
+ // FIXME: check function match the shader stage.
+
+ HLSLShaderAttr *NewAttr = mergeShaderAttr(D, AL, ShaderType);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+void SemaHLSL::handleResourceClassAttr(Decl *D, const ParsedAttr &AL) {
+ if (!AL.isArgIdent(0)) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierLoc *Loc = AL.getArgAsIdent(0);
+ StringRef Identifier = Loc->Ident->getName();
+ SourceLocation ArgLoc = Loc->Loc;
+
+ // Validate.
+ llvm::dxil::ResourceClass RC;
+ if (!HLSLResourceClassAttr::ConvertStrToResourceClass(Identifier, RC)) {
+ Diag(ArgLoc, diag::warn_attribute_type_not_supported)
+ << "ResourceClass" << Identifier;
+ return;
+ }
+
+ D->addAttr(HLSLResourceClassAttr::Create(getASTContext(), RC, ArgLoc));
+}
+
+void SemaHLSL::handleResourceBindingAttr(Decl *D, const ParsedAttr &AL) {
+ StringRef Space = "space0";
+ StringRef Slot = "";
+
+ if (!AL.isArgIdent(0)) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierLoc *Loc = AL.getArgAsIdent(0);
+ StringRef Str = Loc->Ident->getName();
+ SourceLocation ArgLoc = Loc->Loc;
+
+ SourceLocation SpaceArgLoc;
+ if (AL.getNumArgs() == 2) {
+ Slot = Str;
+ if (!AL.isArgIdent(1)) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierLoc *Loc = AL.getArgAsIdent(1);
+ Space = Loc->Ident->getName();
+ SpaceArgLoc = Loc->Loc;
+ } else {
+ Slot = Str;
+ }
+
+ // Validate.
+ if (!Slot.empty()) {
+ switch (Slot[0]) {
+ case 'u':
+ case 'b':
+ case 's':
+ case 't':
+ break;
+ default:
+ Diag(ArgLoc, diag::err_hlsl_unsupported_register_type)
+ << Slot.substr(0, 1);
+ return;
+ }
+
+ StringRef SlotNum = Slot.substr(1);
+ unsigned Num = 0;
+ if (SlotNum.getAsInteger(10, Num)) {
+ Diag(ArgLoc, diag::err_hlsl_unsupported_register_number);
+ return;
+ }
+ }
+
+ if (!Space.starts_with("space")) {
+ Diag(SpaceArgLoc, diag::err_hlsl_expected_space) << Space;
+ return;
+ }
+ StringRef SpaceNum = Space.substr(5);
+ unsigned Num = 0;
+ if (SpaceNum.getAsInteger(10, Num)) {
+ Diag(SpaceArgLoc, diag::err_hlsl_expected_space) << Space;
+ return;
+ }
+
+ // FIXME: check reg type match decl. Issue
+ // https://github.com/llvm/llvm-project/issues/57886.
+ HLSLResourceBindingAttr *NewAttr =
+ HLSLResourceBindingAttr::Create(getASTContext(), Slot, Space, AL);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+void SemaHLSL::handleParamModifierAttr(Decl *D, const ParsedAttr &AL) {
+ HLSLParamModifierAttr *NewAttr = mergeParamModifierAttr(
+ D, AL,
+ static_cast<HLSLParamModifierAttr::Spelling>(AL.getSemanticSpelling()));
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+namespace {
+
+/// This class implements HLSL availability diagnostics for default
+/// and relaxed mode
+///
+/// The goal of this diagnostic is to emit an error or warning when an
+/// unavailable API is found in code that is reachable from the shader
+/// entry function or from an exported function (when compiling a shader
+/// library).
+///
+/// This is done by traversing the AST of all shader entry point functions
+/// and of all exported functions, and any functions that are referenced
+/// from this AST. In other words, any functions that are reachable from
+/// the entry points.
+class DiagnoseHLSLAvailability
+ : public RecursiveASTVisitor<DiagnoseHLSLAvailability> {
+
+ Sema &SemaRef;
+
+ // Stack of functions to be scaned
+ llvm::SmallVector<const FunctionDecl *, 8> DeclsToScan;
+
+ // Tracks which environments functions have been scanned in.
+ //
+ // Maps FunctionDecl to an unsigned number that represents the set of shader
+ // environments the function has been scanned for.
+ // The llvm::Triple::EnvironmentType enum values for shader stages guaranteed
+ // to be numbered from llvm::Triple::Pixel to llvm::Triple::Amplification
+ // (verified by static_asserts in Triple.cpp), we can use it to index
+ // individual bits in the set, as long as we shift the values to start with 0
+ // by subtracting the value of llvm::Triple::Pixel first.
+ //
+ // The N'th bit in the set will be set if the function has been scanned
+ // in shader environment whose llvm::Triple::EnvironmentType integer value
+ // equals (llvm::Triple::Pixel + N).
+ //
+ // For example, if a function has been scanned in compute and pixel stage
+ // environment, the value will be 0x21 (100001 binary) because:
+ //
+ // (int)(llvm::Triple::Pixel - llvm::Triple::Pixel) == 0
+ // (int)(llvm::Triple::Compute - llvm::Triple::Pixel) == 5
+ //
+ // A FunctionDecl is mapped to 0 (or not included in the map) if it has not
+ // been scanned in any environment.
+ llvm::DenseMap<const FunctionDecl *, unsigned> ScannedDecls;
+
+ // Do not access these directly, use the get/set methods below to make
+ // sure the values are in sync
+ llvm::Triple::EnvironmentType CurrentShaderEnvironment;
+ unsigned CurrentShaderStageBit;
+
+ // True if scanning a function that was already scanned in a different
+ // shader stage context, and therefore we should not report issues that
+ // depend only on shader model version because they would be duplicate.
+ bool ReportOnlyShaderStageIssues;
+
+ // Helper methods for dealing with current stage context / environment
+ void SetShaderStageContext(llvm::Triple::EnvironmentType ShaderType) {
+ static_assert(sizeof(unsigned) >= 4);
+ assert(HLSLShaderAttr::isValidShaderType(ShaderType));
+ assert((unsigned)(ShaderType - llvm::Triple::Pixel) < 31 &&
+ "ShaderType is too big for this bitmap"); // 31 is reserved for
+ // "unknown"
+
+ unsigned bitmapIndex = ShaderType - llvm::Triple::Pixel;
+ CurrentShaderEnvironment = ShaderType;
+ CurrentShaderStageBit = (1 << bitmapIndex);
+ }
+
+ void SetUnknownShaderStageContext() {
+ CurrentShaderEnvironment = llvm::Triple::UnknownEnvironment;
+ CurrentShaderStageBit = (1 << 31);
+ }
+
+ llvm::Triple::EnvironmentType GetCurrentShaderEnvironment() const {
+ return CurrentShaderEnvironment;
+ }
+
+ bool InUnknownShaderStageContext() const {
+ return CurrentShaderEnvironment == llvm::Triple::UnknownEnvironment;
+ }
+
+ // Helper methods for dealing with shader stage bitmap
+ void AddToScannedFunctions(const FunctionDecl *FD) {
+ unsigned &ScannedStages = ScannedDecls.getOrInsertDefault(FD);
+ ScannedStages |= CurrentShaderStageBit;
+ }
+
+ unsigned GetScannedStages(const FunctionDecl *FD) {
+ return ScannedDecls.getOrInsertDefault(FD);
+ }
+
+ bool WasAlreadyScannedInCurrentStage(const FunctionDecl *FD) {
+ return WasAlreadyScannedInCurrentStage(GetScannedStages(FD));
+ }
+
+ bool WasAlreadyScannedInCurrentStage(unsigned ScannerStages) {
+ return ScannerStages & CurrentShaderStageBit;
+ }
+
+ static bool NeverBeenScanned(unsigned ScannedStages) {
+ return ScannedStages == 0;
+ }
+
+ // Scanning methods
+ void HandleFunctionOrMethodRef(FunctionDecl *FD, Expr *RefExpr);
+ void CheckDeclAvailability(NamedDecl *D, const AvailabilityAttr *AA,
+ SourceRange Range);
+ const AvailabilityAttr *FindAvailabilityAttr(const Decl *D);
+ bool HasMatchingEnvironmentOrNone(const AvailabilityAttr *AA);
+
+public:
+ DiagnoseHLSLAvailability(Sema &SemaRef) : SemaRef(SemaRef) {}
+
+ // AST traversal methods
+ void RunOnTranslationUnit(const TranslationUnitDecl *TU);
+ void RunOnFunction(const FunctionDecl *FD);
+
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) {
+ FunctionDecl *FD = llvm::dyn_cast<FunctionDecl>(DRE->getDecl());
+ if (FD)
+ HandleFunctionOrMethodRef(FD, DRE);
+ return true;
+ }
+
+ bool VisitMemberExpr(MemberExpr *ME) {
+ FunctionDecl *FD = llvm::dyn_cast<FunctionDecl>(ME->getMemberDecl());
+ if (FD)
+ HandleFunctionOrMethodRef(FD, ME);
+ return true;
+ }
+};
+
+void DiagnoseHLSLAvailability::HandleFunctionOrMethodRef(FunctionDecl *FD,
+ Expr *RefExpr) {
+ assert((isa<DeclRefExpr>(RefExpr) || isa<MemberExpr>(RefExpr)) &&
+ "expected DeclRefExpr or MemberExpr");
+
+ // has a definition -> add to stack to be scanned
+ const FunctionDecl *FDWithBody = nullptr;
+ if (FD->hasBody(FDWithBody)) {
+ if (!WasAlreadyScannedInCurrentStage(FDWithBody))
+ DeclsToScan.push_back(FDWithBody);
+ return;
+ }
+
+ // no body -> diagnose availability
+ const AvailabilityAttr *AA = FindAvailabilityAttr(FD);
+ if (AA)
+ CheckDeclAvailability(
+ FD, AA, SourceRange(RefExpr->getBeginLoc(), RefExpr->getEndLoc()));
+}
+
+void DiagnoseHLSLAvailability::RunOnTranslationUnit(
+ const TranslationUnitDecl *TU) {
+
+ // Iterate over all shader entry functions and library exports, and for those
+ // that have a body (definiton), run diag scan on each, setting appropriate
+ // shader environment context based on whether it is a shader entry function
+ // or an exported function. Exported functions can be in namespaces and in
+ // export declarations so we need to scan those declaration contexts as well.
+ llvm::SmallVector<const DeclContext *, 8> DeclContextsToScan;
+ DeclContextsToScan.push_back(TU);
+
+ while (!DeclContextsToScan.empty()) {
+ const DeclContext *DC = DeclContextsToScan.pop_back_val();
+ for (auto &D : DC->decls()) {
+ // do not scan implicit declaration generated by the implementation
+ if (D->isImplicit())
+ continue;
+
+ // for namespace or export declaration add the context to the list to be
+ // scanned later
+ if (llvm::dyn_cast<NamespaceDecl>(D) || llvm::dyn_cast<ExportDecl>(D)) {
+ DeclContextsToScan.push_back(llvm::dyn_cast<DeclContext>(D));
+ continue;
+ }
+
+ // skip over other decls or function decls without body
+ const FunctionDecl *FD = llvm::dyn_cast<FunctionDecl>(D);
+ if (!FD || !FD->isThisDeclarationADefinition())
+ continue;
+
+ // shader entry point
+ if (HLSLShaderAttr *ShaderAttr = FD->getAttr<HLSLShaderAttr>()) {
+ SetShaderStageContext(ShaderAttr->getType());
+ RunOnFunction(FD);
+ continue;
+ }
+ // exported library function
+ // FIXME: replace this loop with external linkage check once issue #92071
+ // is resolved
+ bool isExport = FD->isInExportDeclContext();
+ if (!isExport) {
+ for (const auto *Redecl : FD->redecls()) {
+ if (Redecl->isInExportDeclContext()) {
+ isExport = true;
+ break;
+ }
+ }
+ }
+ if (isExport) {
+ SetUnknownShaderStageContext();
+ RunOnFunction(FD);
+ continue;
+ }
+ }
+ }
+}
+
+void DiagnoseHLSLAvailability::RunOnFunction(const FunctionDecl *FD) {
+ assert(DeclsToScan.empty() && "DeclsToScan should be empty");
+ DeclsToScan.push_back(FD);
+
+ while (!DeclsToScan.empty()) {
+ // Take one decl from the stack and check it by traversing its AST.
+ // For any CallExpr found during the traversal add it's callee to the top of
+ // the stack to be processed next. Functions already processed are stored in
+ // ScannedDecls.
+ const FunctionDecl *FD = DeclsToScan.pop_back_val();
+
+ // Decl was already scanned
+ const unsigned ScannedStages = GetScannedStages(FD);
+ if (WasAlreadyScannedInCurrentStage(ScannedStages))
+ continue;
+
+ ReportOnlyShaderStageIssues = !NeverBeenScanned(ScannedStages);
+
+ AddToScannedFunctions(FD);
+ TraverseStmt(FD->getBody());
+ }
+}
+
+bool DiagnoseHLSLAvailability::HasMatchingEnvironmentOrNone(
+ const AvailabilityAttr *AA) {
+ IdentifierInfo *IIEnvironment = AA->getEnvironment();
+ if (!IIEnvironment)
+ return true;
+
+ llvm::Triple::EnvironmentType CurrentEnv = GetCurrentShaderEnvironment();
+ if (CurrentEnv == llvm::Triple::UnknownEnvironment)
+ return false;
+
+ llvm::Triple::EnvironmentType AttrEnv =
+ AvailabilityAttr::getEnvironmentType(IIEnvironment->getName());
+
+ return CurrentEnv == AttrEnv;
+}
+
+const AvailabilityAttr *
+DiagnoseHLSLAvailability::FindAvailabilityAttr(const Decl *D) {
+ AvailabilityAttr const *PartialMatch = nullptr;
+ // Check each AvailabilityAttr to find the one for this platform.
+ // For multiple attributes with the same platform try to find one for this
+ // environment.
+ for (const auto *A : D->attrs()) {
+ if (const auto *Avail = dyn_cast<AvailabilityAttr>(A)) {
+ StringRef AttrPlatform = Avail->getPlatform()->getName();
+ StringRef TargetPlatform =
+ SemaRef.getASTContext().getTargetInfo().getPlatformName();
+
+ // Match the platform name.
+ if (AttrPlatform == TargetPlatform) {
+ // Find the best matching attribute for this environment
+ if (HasMatchingEnvironmentOrNone(Avail))
+ return Avail;
+ PartialMatch = Avail;
+ }
+ }
+ }
+ return PartialMatch;
+}
+
+// Check availability against target shader model version and current shader
+// stage and emit diagnostic
+void DiagnoseHLSLAvailability::CheckDeclAvailability(NamedDecl *D,
+ const AvailabilityAttr *AA,
+ SourceRange Range) {
+
+ IdentifierInfo *IIEnv = AA->getEnvironment();
+
+ if (!IIEnv) {
+ // The availability attribute does not have environment -> it depends only
+ // on shader model version and not on specific the shader stage.
+
+ // Skip emitting the diagnostics if the diagnostic mode is set to
+ // strict (-fhlsl-strict-availability) because all relevant diagnostics
+ // were already emitted in the DiagnoseUnguardedAvailability scan
+ // (SemaAvailability.cpp).
+ if (SemaRef.getLangOpts().HLSLStrictAvailability)
+ return;
+
+ // Do not report shader-stage-independent issues if scanning a function
+ // that was already scanned in a different shader stage context (they would
+ // be duplicate)
+ if (ReportOnlyShaderStageIssues)
+ return;
+
+ } else {
+ // The availability attribute has environment -> we need to know
+ // the current stage context to property diagnose it.
+ if (InUnknownShaderStageContext())
+ return;
+ }
+
+ // Check introduced version and if environment matches
+ bool EnvironmentMatches = HasMatchingEnvironmentOrNone(AA);
+ VersionTuple Introduced = AA->getIntroduced();
+ VersionTuple TargetVersion =
+ SemaRef.Context.getTargetInfo().getPlatformMinVersion();
+
+ if (TargetVersion >= Introduced && EnvironmentMatches)
+ return;
+
+ // Emit diagnostic message
+ const TargetInfo &TI = SemaRef.getASTContext().getTargetInfo();
+ llvm::StringRef PlatformName(
+ AvailabilityAttr::getPrettyPlatformName(TI.getPlatformName()));
+
+ llvm::StringRef CurrentEnvStr =
+ llvm::Triple::getEnvironmentTypeName(GetCurrentShaderEnvironment());
+
+ llvm::StringRef AttrEnvStr =
+ AA->getEnvironment() ? AA->getEnvironment()->getName() : "";
+ bool UseEnvironment = !AttrEnvStr.empty();
+
+ if (EnvironmentMatches) {
+ SemaRef.Diag(Range.getBegin(), diag::warn_hlsl_availability)
+ << Range << D << PlatformName << Introduced.getAsString()
+ << UseEnvironment << CurrentEnvStr;
+ } else {
+ SemaRef.Diag(Range.getBegin(), diag::warn_hlsl_availability_unavailable)
+ << Range << D;
+ }
+
+ SemaRef.Diag(D->getLocation(), diag::note_partial_availability_specified_here)
+ << D << PlatformName << Introduced.getAsString()
+ << SemaRef.Context.getTargetInfo().getPlatformMinVersion().getAsString()
+ << UseEnvironment << AttrEnvStr << CurrentEnvStr;
+}
+
+} // namespace
+
+void SemaHLSL::DiagnoseAvailabilityViolations(TranslationUnitDecl *TU) {
+ // Skip running the diagnostics scan if the diagnostic mode is
+ // strict (-fhlsl-strict-availability) and the target shader stage is known
+ // because all relevant diagnostics were already emitted in the
+ // DiagnoseUnguardedAvailability scan (SemaAvailability.cpp).
+ const TargetInfo &TI = SemaRef.getASTContext().getTargetInfo();
+ if (SemaRef.getLangOpts().HLSLStrictAvailability &&
+ TI.getTriple().getEnvironment() != llvm::Triple::EnvironmentType::Library)
+ return;
+
+ DiagnoseHLSLAvailability(SemaRef).RunOnTranslationUnit(TU);
+}
+
+// Helper function for CheckHLSLBuiltinFunctionCall
+bool CheckVectorElementCallArgs(Sema *S, CallExpr *TheCall) {
+ assert(TheCall->getNumArgs() > 1);
+ ExprResult A = TheCall->getArg(0);
+
+ QualType ArgTyA = A.get()->getType();
+
+ auto *VecTyA = ArgTyA->getAs<VectorType>();
+ SourceLocation BuiltinLoc = TheCall->getBeginLoc();
+
+ for (unsigned i = 1; i < TheCall->getNumArgs(); ++i) {
+ ExprResult B = TheCall->getArg(i);
+ QualType ArgTyB = B.get()->getType();
+ auto *VecTyB = ArgTyB->getAs<VectorType>();
+ if (VecTyA == nullptr && VecTyB == nullptr)
+ return false;
+
+ if (VecTyA && VecTyB) {
+ bool retValue = false;
+ if (VecTyA->getElementType() != VecTyB->getElementType()) {
+ // Note: type promotion is intended to be handeled via the intrinsics
+ // and not the builtin itself.
+ S->Diag(TheCall->getBeginLoc(),
+ diag::err_vec_builtin_incompatible_vector)
+ << TheCall->getDirectCallee() << /*useAllTerminology*/ true
+ << SourceRange(A.get()->getBeginLoc(), B.get()->getEndLoc());
+ retValue = true;
+ }
+ if (VecTyA->getNumElements() != VecTyB->getNumElements()) {
+ // You should only be hitting this case if you are calling the builtin
+ // directly. HLSL intrinsics should avoid this case via a
+ // HLSLVectorTruncation.
+ S->Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
+ << TheCall->getDirectCallee() << /*useAllTerminology*/ true
+ << SourceRange(TheCall->getArg(0)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc());
+ retValue = true;
+ }
+ return retValue;
+ }
+ }
+
+ // Note: if we get here one of the args is a scalar which
+ // requires a VectorSplat on Arg0 or Arg1
+ S->Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
+ << TheCall->getDirectCallee() << /*useAllTerminology*/ true
+ << SourceRange(TheCall->getArg(0)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc());
+ return true;
+}
+
+bool CheckArgsTypesAreCorrect(
+ Sema *S, CallExpr *TheCall, QualType ExpectedType,
+ llvm::function_ref<bool(clang::QualType PassedType)> Check) {
+ for (unsigned i = 0; i < TheCall->getNumArgs(); ++i) {
+ QualType PassedType = TheCall->getArg(i)->getType();
+ if (Check(PassedType)) {
+ if (auto *VecTyA = PassedType->getAs<VectorType>())
+ ExpectedType = S->Context.getVectorType(
+ ExpectedType, VecTyA->getNumElements(), VecTyA->getVectorKind());
+ S->Diag(TheCall->getArg(0)->getBeginLoc(),
+ diag::err_typecheck_convert_incompatible)
+ << PassedType << ExpectedType << 1 << 0 << 0;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool CheckAllArgsHaveFloatRepresentation(Sema *S, CallExpr *TheCall) {
+ auto checkAllFloatTypes = [](clang::QualType PassedType) -> bool {
+ return !PassedType->hasFloatingRepresentation();
+ };
+ return CheckArgsTypesAreCorrect(S, TheCall, S->Context.FloatTy,
+ checkAllFloatTypes);
+}
+
+bool CheckFloatOrHalfRepresentations(Sema *S, CallExpr *TheCall) {
+ auto checkFloatorHalf = [](clang::QualType PassedType) -> bool {
+ clang::QualType BaseType =
+ PassedType->isVectorType()
+ ? PassedType->getAs<clang::VectorType>()->getElementType()
+ : PassedType;
+ return !BaseType->isHalfType() && !BaseType->isFloat32Type();
+ };
+ return CheckArgsTypesAreCorrect(S, TheCall, S->Context.FloatTy,
+ checkFloatorHalf);
+}
+
+bool CheckNoDoubleVectors(Sema *S, CallExpr *TheCall) {
+ auto checkDoubleVector = [](clang::QualType PassedType) -> bool {
+ if (const auto *VecTy = PassedType->getAs<VectorType>())
+ return VecTy->getElementType()->isDoubleType();
+ return false;
+ };
+ return CheckArgsTypesAreCorrect(S, TheCall, S->Context.FloatTy,
+ checkDoubleVector);
+}
+
+bool CheckUnsignedIntRepresentation(Sema *S, CallExpr *TheCall) {
+ auto checkAllUnsignedTypes = [](clang::QualType PassedType) -> bool {
+ return !PassedType->hasUnsignedIntegerRepresentation();
+ };
+ return CheckArgsTypesAreCorrect(S, TheCall, S->Context.UnsignedIntTy,
+ checkAllUnsignedTypes);
+}
+
+void SetElementTypeAsReturnType(Sema *S, CallExpr *TheCall,
+ QualType ReturnType) {
+ auto *VecTyA = TheCall->getArg(0)->getType()->getAs<VectorType>();
+ if (VecTyA)
+ ReturnType = S->Context.getVectorType(ReturnType, VecTyA->getNumElements(),
+ VectorKind::Generic);
+ TheCall->setType(ReturnType);
+}
+
+// Note: returning true in this case results in CheckBuiltinFunctionCall
+// returning an ExprError
+bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ switch (BuiltinID) {
+ case Builtin::BI__builtin_hlsl_elementwise_all:
+ case Builtin::BI__builtin_hlsl_elementwise_any: {
+ if (SemaRef.checkArgCount(TheCall, 1))
+ return true;
+ break;
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_clamp: {
+ if (SemaRef.checkArgCount(TheCall, 3))
+ return true;
+ if (CheckVectorElementCallArgs(&SemaRef, TheCall))
+ return true;
+ if (SemaRef.BuiltinElementwiseTernaryMath(
+ TheCall, /*CheckForFloatArgs*/
+ TheCall->getArg(0)->getType()->hasFloatingRepresentation()))
+ return true;
+ break;
+ }
+ case Builtin::BI__builtin_hlsl_dot: {
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+ if (CheckVectorElementCallArgs(&SemaRef, TheCall))
+ return true;
+ if (SemaRef.BuiltinVectorToScalarMath(TheCall))
+ return true;
+ if (CheckNoDoubleVectors(&SemaRef, TheCall))
+ return true;
+ break;
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_rcp: {
+ if (CheckAllArgsHaveFloatRepresentation(&SemaRef, TheCall))
+ return true;
+ if (SemaRef.PrepareBuiltinElementwiseMathOneArgCall(TheCall))
+ return true;
+ break;
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_rsqrt:
+ case Builtin::BI__builtin_hlsl_elementwise_frac: {
+ if (CheckFloatOrHalfRepresentations(&SemaRef, TheCall))
+ return true;
+ if (SemaRef.PrepareBuiltinElementwiseMathOneArgCall(TheCall))
+ return true;
+ break;
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
+ if (CheckFloatOrHalfRepresentations(&SemaRef, TheCall))
+ return true;
+ if (SemaRef.PrepareBuiltinElementwiseMathOneArgCall(TheCall))
+ return true;
+ SetElementTypeAsReturnType(&SemaRef, TheCall, getASTContext().BoolTy);
+ break;
+ }
+ case Builtin::BI__builtin_hlsl_lerp: {
+ if (SemaRef.checkArgCount(TheCall, 3))
+ return true;
+ if (CheckVectorElementCallArgs(&SemaRef, TheCall))
+ return true;
+ if (SemaRef.BuiltinElementwiseTernaryMath(TheCall))
+ return true;
+ if (CheckFloatOrHalfRepresentations(&SemaRef, TheCall))
+ return true;
+ break;
+ }
+ case Builtin::BI__builtin_hlsl_mad: {
+ if (SemaRef.checkArgCount(TheCall, 3))
+ return true;
+ if (CheckVectorElementCallArgs(&SemaRef, TheCall))
+ return true;
+ if (SemaRef.BuiltinElementwiseTernaryMath(
+ TheCall, /*CheckForFloatArgs*/
+ TheCall->getArg(0)->getType()->hasFloatingRepresentation()))
+ return true;
+ break;
+ }
+ // Note these are llvm builtins that we want to catch invalid intrinsic
+ // generation. Normal handling of these builitns will occur elsewhere.
+ case Builtin::BI__builtin_elementwise_bitreverse: {
+ if (CheckUnsignedIntRepresentation(&SemaRef, TheCall))
+ return true;
+ break;
+ }
+ case Builtin::BI__builtin_elementwise_acos:
+ case Builtin::BI__builtin_elementwise_asin:
+ case Builtin::BI__builtin_elementwise_atan:
+ case Builtin::BI__builtin_elementwise_ceil:
+ case Builtin::BI__builtin_elementwise_cos:
+ case Builtin::BI__builtin_elementwise_cosh:
+ case Builtin::BI__builtin_elementwise_exp:
+ case Builtin::BI__builtin_elementwise_exp2:
+ case Builtin::BI__builtin_elementwise_floor:
+ case Builtin::BI__builtin_elementwise_log:
+ case Builtin::BI__builtin_elementwise_log2:
+ case Builtin::BI__builtin_elementwise_log10:
+ case Builtin::BI__builtin_elementwise_pow:
+ case Builtin::BI__builtin_elementwise_roundeven:
+ case Builtin::BI__builtin_elementwise_sin:
+ case Builtin::BI__builtin_elementwise_sinh:
+ case Builtin::BI__builtin_elementwise_sqrt:
+ case Builtin::BI__builtin_elementwise_tan:
+ case Builtin::BI__builtin_elementwise_tanh:
+ case Builtin::BI__builtin_elementwise_trunc: {
+ if (CheckFloatOrHalfRepresentations(&SemaRef, TheCall))
+ return true;
+ break;
+ }
+ }
+ return false;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaHexagon.cpp b/contrib/llvm-project/clang/lib/Sema/SemaHexagon.cpp
new file mode 100644
index 000000000000..5c921c0bc9e3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaHexagon.cpp
@@ -0,0 +1,290 @@
+//===------ SemaHexagon.cpp ------ Hexagon target-specific routines -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to Hexagon.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaHexagon.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/STLExtras.h"
+#include <cstdint>
+#include <iterator>
+
+namespace clang {
+
+SemaHexagon::SemaHexagon(Sema &S) : SemaBase(S) {}
+
+bool SemaHexagon::CheckHexagonBuiltinArgument(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ struct ArgInfo {
+ uint8_t OpNum;
+ bool IsSigned;
+ uint8_t BitWidth;
+ uint8_t Align;
+ };
+ struct BuiltinInfo {
+ unsigned BuiltinID;
+ ArgInfo Infos[2];
+ };
+
+ static BuiltinInfo Infos[] = {
+ { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
+ { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
+ { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
+ { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
+ { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
+ { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
+ { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
+ { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
+ { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
+ { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
+ { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
+
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
+
+ { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
+ {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
+ {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
+ { 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
+ { 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
+ { 3, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
+ { 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
+ { 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
+ {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
+ {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
+ {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
+ {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
+ {{ 3, false, 1, 0 }} },
+
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B,
+ {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B,
+ {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {{ 3, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B,
+ {{ 3, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {{ 3, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B,
+ {{ 3, false, 3, 0 }} },
+ };
+
+ // Use a dynamically initialized static to sort the table exactly once on
+ // first run.
+ static const bool SortOnce =
+ (llvm::sort(Infos,
+ [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
+ return LHS.BuiltinID < RHS.BuiltinID;
+ }),
+ true);
+ (void)SortOnce;
+
+ const BuiltinInfo *F = llvm::partition_point(
+ Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
+ if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
+ return false;
+
+ bool Error = false;
+
+ for (const ArgInfo &A : F->Infos) {
+ // Ignore empty ArgInfo elements.
+ if (A.BitWidth == 0)
+ continue;
+
+ int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
+ int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
+ if (!A.Align) {
+ Error |= SemaRef.BuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
+ } else {
+ unsigned M = 1 << A.Align;
+ Min *= M;
+ Max *= M;
+ Error |= SemaRef.BuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
+ Error |= SemaRef.BuiltinConstantArgMultiple(TheCall, A.OpNum, M);
+ }
+ }
+ return Error;
+}
+
+bool SemaHexagon::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp b/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
index 457fa377355a..eea4bdfa68b5 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CheckExprLifetime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
@@ -28,9 +29,11 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
@@ -190,12 +193,34 @@ static void updateGNUCompoundLiteralRValue(Expr *E) {
}
}
+static bool initializingConstexprVariable(const InitializedEntity &Entity) {
+ Decl *D = Entity.getDecl();
+ const InitializedEntity *Parent = &Entity;
+
+ while (Parent) {
+ D = Parent->getDecl();
+ Parent = Parent->getParent();
+ }
+
+ if (const auto *VD = dyn_cast_if_present<VarDecl>(D); VD && VD->isConstexpr())
+ return true;
+
+ return false;
+}
+
+static void CheckC23ConstexprInitStringLiteral(const StringLiteral *SE,
+ Sema &SemaRef, QualType &TT);
+
static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
- Sema &S) {
+ Sema &S, bool CheckC23ConstexprInit = false) {
// Get the length of the string as parsed.
auto *ConstantArrayTy =
cast<ConstantArrayType>(Str->getType()->getAsArrayTypeUnsafe());
- uint64_t StrLength = ConstantArrayTy->getSize().getZExtValue();
+ uint64_t StrLength = ConstantArrayTy->getZExtSize();
+
+ if (CheckC23ConstexprInit)
+ if (const StringLiteral *SL = dyn_cast<StringLiteral>(Str->IgnoreParens()))
+ CheckC23ConstexprInitStringLiteral(SL, S, DeclT);
if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
// C99 6.7.8p14. We have an array of character type with unknown size
@@ -224,14 +249,13 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
}
// [dcl.init.string]p2
- if (StrLength > CAT->getSize().getZExtValue())
+ if (StrLength > CAT->getZExtSize())
S.Diag(Str->getBeginLoc(),
diag::err_initializer_string_for_char_array_too_long)
- << CAT->getSize().getZExtValue() << StrLength
- << Str->getSourceRange();
+ << CAT->getZExtSize() << StrLength << Str->getSourceRange();
} else {
// C99 6.7.8p14.
- if (StrLength-1 > CAT->getSize().getZExtValue())
+ if (StrLength - 1 > CAT->getZExtSize())
S.Diag(Str->getBeginLoc(),
diag::ext_initializer_string_for_char_array_too_long)
<< Str->getSourceRange();
@@ -290,6 +314,8 @@ class InitListChecker {
InitListExpr *FullyStructuredList = nullptr;
NoInitExpr *DummyExpr = nullptr;
SmallVectorImpl<QualType> *AggrDeductionCandidateParamTypes = nullptr;
+ EmbedExpr *CurEmbed = nullptr; // Save current embed we're processing.
+ unsigned CurEmbedIndex = 0;
NoInitExpr *getDummyInit() {
if (!DummyExpr)
@@ -478,6 +504,42 @@ class InitListChecker {
void CheckEmptyInitializable(const InitializedEntity &Entity,
SourceLocation Loc);
+ Expr *HandleEmbed(EmbedExpr *Embed, const InitializedEntity &Entity) {
+ Expr *Result = nullptr;
+ // Undrestand which part of embed we'd like to reference.
+ if (!CurEmbed) {
+ CurEmbed = Embed;
+ CurEmbedIndex = 0;
+ }
+ // Reference just one if we're initializing a single scalar.
+ uint64_t ElsCount = 1;
+ // Otherwise try to fill whole array with embed data.
+ if (Entity.getKind() == InitializedEntity::EK_ArrayElement) {
+ auto *AType =
+ SemaRef.Context.getAsArrayType(Entity.getParent()->getType());
+ assert(AType && "expected array type when initializing array");
+ ElsCount = Embed->getDataElementCount();
+ if (const auto *CAType = dyn_cast<ConstantArrayType>(AType))
+ ElsCount = std::min(CAType->getSize().getZExtValue(),
+ ElsCount - CurEmbedIndex);
+ if (ElsCount == Embed->getDataElementCount()) {
+ CurEmbed = nullptr;
+ CurEmbedIndex = 0;
+ return Embed;
+ }
+ }
+
+ Result = new (SemaRef.Context)
+ EmbedExpr(SemaRef.Context, Embed->getLocation(), Embed->getData(),
+ CurEmbedIndex, ElsCount);
+ CurEmbedIndex += ElsCount;
+ if (CurEmbedIndex >= Embed->getDataElementCount()) {
+ CurEmbed = nullptr;
+ CurEmbedIndex = 0;
+ }
+ return Result;
+ }
+
public:
InitListChecker(
Sema &S, const InitializedEntity &Entity, InitListExpr *IL, QualType &T,
@@ -490,7 +552,7 @@ public:
: InitListChecker(S, Entity, IL, T, /*VerifyOnly=*/true,
/*TreatUnavailableAsInvalid=*/false,
/*InOverloadResolution=*/false,
- &AggrDeductionCandidateParamTypes){};
+ &AggrDeductionCandidateParamTypes) {}
bool HadError() { return hadError; }
@@ -791,19 +853,13 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
const RecordDecl *RDecl = RType->getDecl();
- if (RDecl->isUnion() && ILE->getInitializedFieldInUnion())
- FillInEmptyInitForField(0, ILE->getInitializedFieldInUnion(),
- Entity, ILE, RequiresSecondPass, FillWithNoInit);
- else if (RDecl->isUnion() && isa<CXXRecordDecl>(RDecl) &&
- cast<CXXRecordDecl>(RDecl)->hasInClassInitializer()) {
- for (auto *Field : RDecl->fields()) {
- if (Field->hasInClassInitializer()) {
- FillInEmptyInitForField(0, Field, Entity, ILE, RequiresSecondPass,
- FillWithNoInit);
- break;
- }
- }
+ if (RDecl->isUnion() && ILE->getInitializedFieldInUnion()) {
+ FillInEmptyInitForField(0, ILE->getInitializedFieldInUnion(), Entity, ILE,
+ RequiresSecondPass, FillWithNoInit);
} else {
+ assert((!RDecl->isUnion() || !isa<CXXRecordDecl>(RDecl) ||
+ !cast<CXXRecordDecl>(RDecl)->hasInClassInitializer()) &&
+ "We should have computed initialized fields already");
// The fields beyond ILE->getNumInits() are default initialized, so in
// order to leave them uninitialized, the ILE is expanded and the extra
// fields are then filled with NoInitExpr.
@@ -827,7 +883,7 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
}
for (auto *Field : RDecl->fields()) {
- if (Field->isUnnamedBitfield())
+ if (Field->isUnnamedBitField())
continue;
if (hadError)
@@ -853,11 +909,11 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
InitializedEntity ElementEntity = Entity;
unsigned NumInits = ILE->getNumInits();
- unsigned NumElements = NumInits;
+ uint64_t NumElements = NumInits;
if (const ArrayType *AType = SemaRef.Context.getAsArrayType(ILE->getType())) {
ElementType = AType->getElementType();
if (const auto *CAType = dyn_cast<ConstantArrayType>(AType))
- NumElements = CAType->getSize().getZExtValue();
+ NumElements = CAType->getZExtSize();
// For an array new with an unknown bound, ask for one additional element
// in order to populate the array filler.
if (Entity.isVariableLengthArrayNew())
@@ -873,7 +929,7 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
ElementType = ILE->getType();
bool SkipEmptyInitChecks = false;
- for (unsigned Init = 0; Init != NumElements; ++Init) {
+ for (uint64_t Init = 0; Init != NumElements; ++Init) {
if (hadError)
return;
@@ -994,7 +1050,7 @@ int InitListChecker::numArrayElements(QualType DeclType) {
int maxElements = 0x7FFFFFFF;
if (const ConstantArrayType *CAT =
SemaRef.Context.getAsConstantArrayType(DeclType)) {
- maxElements = static_cast<int>(CAT->getSize().getZExtValue());
+ maxElements = static_cast<int>(CAT->getZExtSize());
}
return maxElements;
}
@@ -1005,7 +1061,7 @@ int InitListChecker::numStructUnionElements(QualType DeclType) {
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(structDecl))
InitializableMembers += CXXRD->getNumBases();
for (const auto *Field : structDecl->fields())
- if (!Field->isUnnamedBitfield())
+ if (!Field->isUnnamedBitField())
++InitializableMembers;
if (structDecl->isUnion())
@@ -1426,7 +1482,21 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
// dependent non-array type or an array type with a value-dependent
// bound
assert(AggrDeductionCandidateParamTypes);
- if (!isa_and_nonnull<ConstantArrayType>(
+
+ // In the presence of a braced-init-list within the initializer, we should
+ // not perform brace-elision, even if brace elision would otherwise be
+ // applicable. For example, given:
+ //
+ // template <class T> struct Foo {
+ // T t[2];
+ // };
+ //
+ // Foo t = {{1, 2}};
+ //
+ // we don't want the (T, T) but rather (T [2]) in terms of the initializer
+ // {{1, 2}}.
+ if (isa<InitListExpr, DesignatedInitExpr>(expr) ||
+ !isa_and_present<ConstantArrayType>(
SemaRef.Context.getAsArrayType(ElemType))) {
++Index;
AggrDeductionCandidateParamTypes->push_back(ElemType);
@@ -1442,6 +1512,9 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
// Brace elision is never performed if the element is not an
// assignment-expression.
if (Seq || isa<InitListExpr>(expr)) {
+ if (auto *Embed = dyn_cast<EmbedExpr>(expr)) {
+ expr = HandleEmbed(Embed, Entity);
+ }
if (!VerifyOnly) {
ExprResult Result = Seq.Perform(SemaRef, TmpEntity, Kind, expr);
if (Result.isInvalid())
@@ -1455,7 +1528,8 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
UpdateStructuredListElement(StructuredList, StructuredIndex,
getDummyInit());
}
- ++Index;
+ if (!CurEmbed)
+ ++Index;
if (AggrDeductionCandidateParamTypes)
AggrDeductionCandidateParamTypes->push_back(ElemType);
return;
@@ -1476,7 +1550,9 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
if (IsStringInit(expr, arrayType, SemaRef.Context) == SIF_None) {
// FIXME: Should we do this checking in verify-only mode?
if (!VerifyOnly)
- CheckStringInit(expr, ElemType, arrayType, SemaRef);
+ CheckStringInit(expr, ElemType, arrayType, SemaRef,
+ SemaRef.getLangOpts().C23 &&
+ initializingConstexprVariable(Entity));
if (StructuredList)
UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
++Index;
@@ -1646,6 +1722,8 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
++Index;
++StructuredIndex;
return;
+ } else if (auto *Embed = dyn_cast<EmbedExpr>(expr)) {
+ expr = HandleEmbed(Embed, Entity);
}
ExprResult Result;
@@ -1667,14 +1745,16 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
else {
ResultExpr = Result.getAs<Expr>();
- if (ResultExpr != expr && !VerifyOnly) {
+ if (ResultExpr != expr && !VerifyOnly && !CurEmbed) {
// The type was promoted, update initializer list.
// FIXME: Why are we updating the syntactic init list?
IList->setInit(Index, ResultExpr);
}
}
+
UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
- ++Index;
+ if (!CurEmbed)
+ ++Index;
if (AggrDeductionCandidateParamTypes)
AggrDeductionCandidateParamTypes->push_back(DeclType);
}
@@ -1913,6 +1993,39 @@ static bool checkDestructorReference(QualType ElementType, SourceLocation Loc,
return SemaRef.DiagnoseUseOfDecl(Destructor, Loc);
}
+static bool
+canInitializeArrayWithEmbedDataString(ArrayRef<Expr *> ExprList,
+ const InitializedEntity &Entity,
+ ASTContext &Context) {
+ QualType InitType = Entity.getType();
+ const InitializedEntity *Parent = &Entity;
+
+ while (Parent) {
+ InitType = Parent->getType();
+ Parent = Parent->getParent();
+ }
+
+ // Only one initializer, it's an embed and the types match;
+ EmbedExpr *EE =
+ ExprList.size() == 1
+ ? dyn_cast_if_present<EmbedExpr>(ExprList[0]->IgnoreParens())
+ : nullptr;
+ if (!EE)
+ return false;
+
+ if (InitType->isArrayType()) {
+ const ArrayType *InitArrayType = InitType->getAsArrayTypeUnsafe();
+ QualType InitElementTy = InitArrayType->getElementType();
+ QualType EmbedExprElementTy = EE->getDataStringLiteral()->getType();
+ const bool TypesMatch =
+ Context.typesAreCompatible(InitElementTy, EmbedExprElementTy) ||
+ (InitElementTy->isCharType() && EmbedExprElementTy->isCharType());
+ if (TypesMatch)
+ return true;
+ }
+ return false;
+}
+
void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
InitListExpr *IList, QualType &DeclType,
llvm::APSInt elementIndex,
@@ -1930,6 +2043,12 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
}
}
+ if (canInitializeArrayWithEmbedDataString(IList->inits(), Entity,
+ SemaRef.Context)) {
+ EmbedExpr *Embed = cast<EmbedExpr>(IList->inits()[0]);
+ IList->setInit(0, Embed->getDataStringLiteral());
+ }
+
// Check for the special-case of initializing an array with a string.
if (Index < IList->getNumInits()) {
if (IsStringInit(IList->getInit(Index), arrayType, SemaRef.Context) ==
@@ -1941,7 +2060,9 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
// constant for each string.
// FIXME: Should we do these checks in verify-only mode too?
if (!VerifyOnly)
- CheckStringInit(IList->getInit(Index), DeclType, arrayType, SemaRef);
+ CheckStringInit(IList->getInit(Index), DeclType, arrayType, SemaRef,
+ SemaRef.getLangOpts().C23 &&
+ initializingConstexprVariable(Entity));
if (StructuredList) {
UpdateStructuredListElement(StructuredList, StructuredIndex,
IList->getInit(Index));
@@ -2030,13 +2151,24 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
if (maxElementsKnown && elementIndex == maxElements)
break;
- InitializedEntity ElementEntity =
- InitializedEntity::InitializeElement(SemaRef.Context, StructuredIndex,
- Entity);
+ InitializedEntity ElementEntity = InitializedEntity::InitializeElement(
+ SemaRef.Context, StructuredIndex, Entity);
+
+ unsigned EmbedElementIndexBeforeInit = CurEmbedIndex;
// Check this element.
CheckSubElementType(ElementEntity, IList, elementType, Index,
StructuredList, StructuredIndex);
++elementIndex;
+ if ((CurEmbed || isa<EmbedExpr>(Init)) && elementType->isScalarType()) {
+ if (CurEmbed) {
+ elementIndex =
+ elementIndex + CurEmbedIndex - EmbedElementIndexBeforeInit - 1;
+ } else {
+ auto Embed = cast<EmbedExpr>(Init);
+ elementIndex = elementIndex + Embed->getDataElementCount() -
+ EmbedElementIndexBeforeInit - 1;
+ }
+ }
// If the array is of incomplete type, keep track of the number of
// elements in the initializer.
@@ -2137,19 +2269,22 @@ void InitListChecker::CheckStructUnionTypes(
return;
for (RecordDecl::field_iterator FieldEnd = RD->field_end();
Field != FieldEnd; ++Field) {
- if (Field->hasInClassInitializer()) {
+ if (Field->hasInClassInitializer() ||
+ (Field->isAnonymousStructOrUnion() &&
+ Field->getType()->getAsCXXRecordDecl()->hasInClassInitializer())) {
StructuredList->setInitializedFieldInUnion(*Field);
// FIXME: Actually build a CXXDefaultInitExpr?
return;
}
}
+ llvm_unreachable("Couldn't find in-class initializer");
}
// Value-initialize the first member of the union that isn't an unnamed
// bitfield.
for (RecordDecl::field_iterator FieldEnd = RD->field_end();
Field != FieldEnd; ++Field) {
- if (!Field->isUnnamedBitfield()) {
+ if (!Field->isUnnamedBitField()) {
CheckEmptyInitializable(
InitializedEntity::InitializeMember(*Field, &Entity),
IList->getEndLoc());
@@ -2170,7 +2305,7 @@ void InitListChecker::CheckStructUnionTypes(
// Designated inits always initialize fields, so if we see one, all
// remaining base classes have no explicit initializer.
- if (Init && isa<DesignatedInitExpr>(Init))
+ if (isa_and_nonnull<DesignatedInitExpr>(Init))
Init = nullptr;
// C++ [over.match.class.deduct]p1.6:
@@ -2227,8 +2362,6 @@ void InitListChecker::CheckStructUnionTypes(
size_t NumRecordDecls = llvm::count_if(RD->decls(), [&](const Decl *D) {
return isa<FieldDecl>(D) || isa<RecordDecl>(D);
});
- bool CheckForMissingFields =
- !IList->isIdiomaticZeroInitializer(SemaRef.getLangOpts());
bool HasDesignatedInit = false;
llvm::SmallPtrSet<FieldDecl *, 4> InitializedFields;
@@ -2269,11 +2402,6 @@ void InitListChecker::CheckStructUnionTypes(
}
InitializedSomething = true;
-
- // Disable check for missing fields when designators are used.
- // This matches gcc behaviour.
- if (!SemaRef.getLangOpts().CPlusPlus)
- CheckForMissingFields = false;
continue;
}
@@ -2285,7 +2413,7 @@ void InitListChecker::CheckStructUnionTypes(
// These are okay for randomized structures. [C99 6.7.8p19]
//
// Also, if there is only one element in the structure, we allow something
- // like this, because it's really not randomized in the tranditional sense.
+ // like this, because it's really not randomized in the traditional sense.
//
// struct foo h = {bar};
auto IsZeroInitializer = [&](const Expr *I) {
@@ -2311,15 +2439,15 @@ void InitListChecker::CheckStructUnionTypes(
break;
}
- // We've already initialized a member of a union. We're done.
+ // We've already initialized a member of a union. We can stop entirely.
if (InitializedSomething && RD->isUnion())
- break;
+ return;
- // If we've hit the flexible array member at the end, we're done.
+ // Stop if we've hit a flexible array member.
if (Field->getType()->isIncompleteArrayType())
break;
- if (Field->isUnnamedBitfield()) {
+ if (Field->isUnnamedBitField()) {
// Don't initialize unnamed bitfields, e.g. "int : 20;"
++Field;
continue;
@@ -2363,8 +2491,13 @@ void InitListChecker::CheckStructUnionTypes(
}
// Emit warnings for missing struct field initializers.
- if (!VerifyOnly && InitializedSomething && CheckForMissingFields &&
- !RD->isUnion()) {
+ // This check is disabled for designated initializers in C.
+ // This matches gcc behaviour.
+ bool IsCDesignatedInitializer =
+ HasDesignatedInit && !SemaRef.getLangOpts().CPlusPlus;
+ if (!VerifyOnly && InitializedSomething && !RD->isUnion() &&
+ !IList->isIdiomaticZeroInitializer(SemaRef.getLangOpts()) &&
+ !IsCDesignatedInitializer) {
// It is possible we have one or more unnamed bitfields remaining.
// Find first (if any) named field and emit warning.
for (RecordDecl::field_iterator it = HasDesignatedInit ? RD->field_begin()
@@ -2374,11 +2507,12 @@ void InitListChecker::CheckStructUnionTypes(
if (HasDesignatedInit && InitializedFields.count(*it))
continue;
- if (!it->isUnnamedBitfield() && !it->hasInClassInitializer() &&
+ if (!it->isUnnamedBitField() && !it->hasInClassInitializer() &&
!it->getType()->isIncompleteArrayType()) {
- SemaRef.Diag(IList->getSourceRange().getEnd(),
- diag::warn_missing_field_initializers)
- << *it;
+ auto Diag = HasDesignatedInit
+ ? diag::warn_missing_designated_field_initializers
+ : diag::warn_missing_field_initializers;
+ SemaRef.Diag(IList->getSourceRange().getEnd(), Diag) << *it;
break;
}
}
@@ -2389,7 +2523,7 @@ void InitListChecker::CheckStructUnionTypes(
if (!StructuredList && Field != FieldEnd && !RD->isUnion() &&
!Field->getType()->isIncompleteArrayType()) {
for (; Field != FieldEnd && !hadError; ++Field) {
- if (!Field->isUnnamedBitfield() && !Field->hasInClassInitializer())
+ if (!Field->isUnnamedBitField() && !Field->hasInClassInitializer())
CheckEmptyInitializable(
InitializedEntity::InitializeMember(*Field, &Entity),
IList->getEndLoc());
@@ -2432,6 +2566,11 @@ void InitListChecker::CheckStructUnionTypes(
else
CheckImplicitInitList(MemberEntity, IList, Field->getType(), Index,
StructuredList, StructuredIndex);
+
+ if (RD->isUnion() && StructuredList) {
+ // Initialize the first field within the union.
+ StructuredList->setInitializedFieldInUnion(*Field);
+ }
}
/// Expand a field designator that refers to a member of an
@@ -2754,7 +2893,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
unsigned FieldIndex = NumBases;
for (auto *FI : RD->fields()) {
- if (FI->isUnnamedBitfield())
+ if (FI->isUnnamedBitField())
continue;
if (declaresSameEntity(KnownField, FI)) {
KnownField = FI;
@@ -2828,7 +2967,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Find the field that we just initialized.
FieldDecl *PrevField = nullptr;
for (auto FI = RD->field_begin(); FI != RD->field_end(); ++FI) {
- if (FI->isUnnamedBitfield())
+ if (FI->isUnnamedBitField())
continue;
if (*NextField != RD->field_end() &&
declaresSameEntity(*FI, **NextField))
@@ -2946,7 +3085,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// If this the first designator, our caller will continue checking
// the rest of this struct/class/union subobject.
if (IsFirstDesignator) {
- if (Field != RD->field_end() && Field->isUnnamedBitfield())
+ if (Field != RD->field_end() && Field->isUnnamedBitField())
++Field;
if (NextField)
@@ -3076,7 +3215,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Get the length of the string.
uint64_t StrLen = SL->getLength();
if (cast<ConstantArrayType>(AT)->getSize().ult(StrLen))
- StrLen = cast<ConstantArrayType>(AT)->getSize().getZExtValue();
+ StrLen = cast<ConstantArrayType>(AT)->getZExtSize();
StructuredList->resizeInits(Context, StrLen);
// Build a literal for each character in the string, and put them into
@@ -3099,7 +3238,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Get the length of the string.
uint64_t StrLen = Str.size();
if (cast<ConstantArrayType>(AT)->getSize().ult(StrLen))
- StrLen = cast<ConstantArrayType>(AT)->getSize().getZExtValue();
+ StrLen = cast<ConstantArrayType>(AT)->getZExtSize();
StructuredList->resizeInits(Context, StrLen);
// Build a literal for each character in the string, and put them into
@@ -3258,7 +3397,7 @@ InitListChecker::createInitListExpr(QualType CurrentObjectType,
if (const ArrayType *AType
= SemaRef.Context.getAsArrayType(CurrentObjectType)) {
if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType)) {
- NumElements = CAType->getSize().getZExtValue();
+ NumElements = CAType->getZExtSize();
// Simple heuristic so that we don't allocate a very large
// initializer with many empty entries at the end.
if (NumElements > ExpectedNumInits)
@@ -3299,8 +3438,6 @@ void InitListChecker::UpdateStructuredListElement(InitListExpr *StructuredList,
++StructuredIndex;
}
-/// Determine whether we can perform aggregate initialization for the purposes
-/// of overload resolution.
bool Sema::CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From) {
QualType Type = Entity.getType();
@@ -5448,6 +5585,10 @@ static void TryOrBuildParenListInitialization(
ExprResult ER;
ER = IS.Perform(S, SubEntity, SubKind,
Arg ? MultiExprArg(Arg) : std::nullopt);
+
+ if (ER.isInvalid())
+ return false;
+
if (InitExpr)
*InitExpr = ER.get();
else
@@ -5467,7 +5608,7 @@ static void TryOrBuildParenListInitialization(
// having k elements.
if (const ConstantArrayType *CAT =
S.getASTContext().getAsConstantArrayType(Entity.getType())) {
- ArrayLength = CAT->getSize().getZExtValue();
+ ArrayLength = CAT->getZExtSize();
ResultType = Entity.getType();
} else if (const VariableArrayType *VAT =
S.getASTContext().getAsVariableArrayType(Entity.getType())) {
@@ -5480,7 +5621,7 @@ static void TryOrBuildParenListInitialization(
<< SE->getSourceRange();
return;
} else {
- assert(isa<IncompleteArrayType>(Entity.getType()));
+ assert(Entity.getType()->isIncompleteArrayType());
ArrayLength = Args.size();
}
EntityIndexToProcess = ArrayLength;
@@ -5555,7 +5696,7 @@ static void TryOrBuildParenListInitialization(
for (FieldDecl *FD : RD->fields()) {
// Unnamed bitfields should not be initialized at all, either with an arg
// or by default.
- if (FD->isUnnamedBitfield())
+ if (FD->isUnnamedBitField())
continue;
InitializedEntity SubEntity =
@@ -5988,8 +6129,8 @@ static bool tryObjCWritebackConversion(Sema &S,
// Handle write-back conversion.
QualType ConvertedArgType;
- if (!S.isObjCWritebackConversion(ArgType, Entity.getType(),
- ConvertedArgType))
+ if (!S.ObjC().isObjCWritebackConversion(ArgType, Entity.getType(),
+ ConvertedArgType))
return false;
// We should copy unless we're passing to an argument explicitly
@@ -6181,10 +6322,10 @@ void InitializationSequence::InitializeFrom(Sema &S,
if (Args.size() == 1) {
Initializer = Args[0];
if (S.getLangOpts().ObjC) {
- if (S.CheckObjCBridgeRelatedConversions(Initializer->getBeginLoc(),
- DestType, Initializer->getType(),
- Initializer) ||
- S.CheckConversionToObjCLiteral(DestType, Initializer))
+ if (S.ObjC().CheckObjCBridgeRelatedConversions(
+ Initializer->getBeginLoc(), DestType, Initializer->getType(),
+ Initializer) ||
+ S.ObjC().CheckConversionToObjCLiteral(DestType, Initializer))
Args[0] = Initializer;
}
if (!isa<InitListExpr>(Initializer))
@@ -6240,7 +6381,10 @@ void InitializationSequence::InitializeFrom(Sema &S,
// initializer is a string literal, see 8.5.2.
// - Otherwise, if the destination type is an array, the program is
// ill-formed.
- if (const ArrayType *DestAT = Context.getAsArrayType(DestType)) {
+ // - Except in HLSL, where non-decaying array parameters behave like
+ // non-array types for initialization.
+ if (DestType->isArrayType() && !DestType->isArrayParameterType()) {
+ const ArrayType *DestAT = Context.getAsArrayType(DestType);
if (Initializer && isa<VariableArrayType>(DestAT)) {
SetFailed(FK_VariableLengthArrayHasInitializer);
return;
@@ -6319,7 +6463,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
// class member of array type from a parenthesized initializer list.
else if (S.getLangOpts().CPlusPlus &&
Entity.getKind() == InitializedEntity::EK_Member &&
- Initializer && isa<InitListExpr>(Initializer)) {
+ isa_and_nonnull<InitListExpr>(Initializer)) {
TryListInitialization(S, Entity, Kind, cast<InitListExpr>(Initializer),
*this, TreatUnavailableAsInvalid);
AddParenthesizedArrayInitStep(DestType);
@@ -6432,7 +6576,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
// For HLSL ext vector types we allow list initialization behavior for C++
// constructor syntax. This is accomplished by converting initialization
// arguments an InitListExpr late.
- if (S.getLangOpts().HLSL && DestType->isExtVectorType() &&
+ if (S.getLangOpts().HLSL && Args.size() > 1 && DestType->isExtVectorType() &&
(SourceType.isNull() ||
!Context.hasSameUnqualifiedType(SourceType, DestType))) {
@@ -6543,12 +6687,12 @@ void InitializationSequence::InitializeFrom(Sema &S,
AddPassByIndirectCopyRestoreStep(DestType, ShouldCopy);
} else if (ICS.isBad()) {
- DeclAccessPair dap;
- if (isLibstdcxxPointerReturnFalseHack(S, Entity, Initializer)) {
+ if (isLibstdcxxPointerReturnFalseHack(S, Entity, Initializer))
AddZeroInitializationStep(Entity.getType());
- } else if (Initializer->getType() == Context.OverloadTy &&
- !S.ResolveAddressOfOverloadedFunction(Initializer, DestType,
- false, dap))
+ else if (DeclAccessPair Found;
+ Initializer->getType() == Context.OverloadTy &&
+ !S.ResolveAddressOfOverloadedFunction(Initializer, DestType,
+ /*Complain=*/false, Found))
SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
else if (Initializer->getType()->isFunctionType() &&
isExprAnUnaddressableFunction(S, Initializer))
@@ -7050,6 +7194,11 @@ PerformConstructorInitialization(Sema &S,
hasCopyOrMoveCtorParam(S.Context,
getConstructorInfo(Step.Function.FoundDecl));
+ // A smart pointer constructed from a nullable pointer is nullable.
+ if (NumArgs == 1 && !Kind.isExplicitCast())
+ S.diagnoseNullableToNonnullConversion(
+ Entity.getType(), Args.front()->getType(), Kind.getLocation());
+
// Determine the arguments required to actually perform the constructor
// call.
if (S.CompleteConstructorCall(Constructor, Step.Type, Args, Loc,
@@ -7147,1217 +7296,9 @@ PerformConstructorInitialization(Sema &S,
return CurInit;
}
-namespace {
-enum LifetimeKind {
- /// The lifetime of a temporary bound to this entity ends at the end of the
- /// full-expression, and that's (probably) fine.
- LK_FullExpression,
-
- /// The lifetime of a temporary bound to this entity is extended to the
- /// lifeitme of the entity itself.
- LK_Extended,
-
- /// The lifetime of a temporary bound to this entity probably ends too soon,
- /// because the entity is allocated in a new-expression.
- LK_New,
-
- /// The lifetime of a temporary bound to this entity ends too soon, because
- /// the entity is a return object.
- LK_Return,
-
- /// The lifetime of a temporary bound to this entity ends too soon, because
- /// the entity is the result of a statement expression.
- LK_StmtExprResult,
-
- /// This is a mem-initializer: if it would extend a temporary (other than via
- /// a default member initializer), the program is ill-formed.
- LK_MemInitializer,
-};
-using LifetimeResult =
- llvm::PointerIntPair<const InitializedEntity *, 3, LifetimeKind>;
-}
-
-/// Determine the declaration which an initialized entity ultimately refers to,
-/// for the purpose of lifetime-extending a temporary bound to a reference in
-/// the initialization of \p Entity.
-static LifetimeResult getEntityLifetime(
- const InitializedEntity *Entity,
- const InitializedEntity *InitField = nullptr) {
- // C++11 [class.temporary]p5:
- switch (Entity->getKind()) {
- case InitializedEntity::EK_Variable:
- // The temporary [...] persists for the lifetime of the reference
- return {Entity, LK_Extended};
-
- case InitializedEntity::EK_Member:
- // For subobjects, we look at the complete object.
- if (Entity->getParent())
- return getEntityLifetime(Entity->getParent(), Entity);
-
- // except:
- // C++17 [class.base.init]p8:
- // A temporary expression bound to a reference member in a
- // mem-initializer is ill-formed.
- // C++17 [class.base.init]p11:
- // A temporary expression bound to a reference member from a
- // default member initializer is ill-formed.
- //
- // The context of p11 and its example suggest that it's only the use of a
- // default member initializer from a constructor that makes the program
- // ill-formed, not its mere existence, and that it can even be used by
- // aggregate initialization.
- return {Entity, Entity->isDefaultMemberInitializer() ? LK_Extended
- : LK_MemInitializer};
-
- case InitializedEntity::EK_Binding:
- // Per [dcl.decomp]p3, the binding is treated as a variable of reference
- // type.
- return {Entity, LK_Extended};
-
- case InitializedEntity::EK_Parameter:
- case InitializedEntity::EK_Parameter_CF_Audited:
- // -- A temporary bound to a reference parameter in a function call
- // persists until the completion of the full-expression containing
- // the call.
- return {nullptr, LK_FullExpression};
-
- case InitializedEntity::EK_TemplateParameter:
- // FIXME: This will always be ill-formed; should we eagerly diagnose it here?
- return {nullptr, LK_FullExpression};
-
- case InitializedEntity::EK_Result:
- // -- The lifetime of a temporary bound to the returned value in a
- // function return statement is not extended; the temporary is
- // destroyed at the end of the full-expression in the return statement.
- return {nullptr, LK_Return};
-
- case InitializedEntity::EK_StmtExprResult:
- // FIXME: Should we lifetime-extend through the result of a statement
- // expression?
- return {nullptr, LK_StmtExprResult};
-
- case InitializedEntity::EK_New:
- // -- A temporary bound to a reference in a new-initializer persists
- // until the completion of the full-expression containing the
- // new-initializer.
- return {nullptr, LK_New};
-
- case InitializedEntity::EK_Temporary:
- case InitializedEntity::EK_CompoundLiteralInit:
- case InitializedEntity::EK_RelatedResult:
- // We don't yet know the storage duration of the surrounding temporary.
- // Assume it's got full-expression duration for now, it will patch up our
- // storage duration if that's not correct.
- return {nullptr, LK_FullExpression};
-
- case InitializedEntity::EK_ArrayElement:
- // For subobjects, we look at the complete object.
- return getEntityLifetime(Entity->getParent(), InitField);
-
- case InitializedEntity::EK_Base:
- // For subobjects, we look at the complete object.
- if (Entity->getParent())
- return getEntityLifetime(Entity->getParent(), InitField);
- return {InitField, LK_MemInitializer};
-
- case InitializedEntity::EK_Delegating:
- // We can reach this case for aggregate initialization in a constructor:
- // struct A { int &&r; };
- // struct B : A { B() : A{0} {} };
- // In this case, use the outermost field decl as the context.
- return {InitField, LK_MemInitializer};
-
- case InitializedEntity::EK_BlockElement:
- case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
- case InitializedEntity::EK_LambdaCapture:
- case InitializedEntity::EK_VectorElement:
- case InitializedEntity::EK_ComplexElement:
- return {nullptr, LK_FullExpression};
-
- case InitializedEntity::EK_Exception:
- // FIXME: Can we diagnose lifetime problems with exceptions?
- return {nullptr, LK_FullExpression};
-
- case InitializedEntity::EK_ParenAggInitMember:
- // -- A temporary object bound to a reference element of an aggregate of
- // class type initialized from a parenthesized expression-list
- // [dcl.init, 9.3] persists until the completion of the full-expression
- // containing the expression-list.
- return {nullptr, LK_FullExpression};
- }
-
- llvm_unreachable("unknown entity kind");
-}
-
-namespace {
-enum ReferenceKind {
- /// Lifetime would be extended by a reference binding to a temporary.
- RK_ReferenceBinding,
- /// Lifetime would be extended by a std::initializer_list object binding to
- /// its backing array.
- RK_StdInitializerList,
-};
-
-/// A temporary or local variable. This will be one of:
-/// * A MaterializeTemporaryExpr.
-/// * A DeclRefExpr whose declaration is a local.
-/// * An AddrLabelExpr.
-/// * A BlockExpr for a block with captures.
-using Local = Expr*;
-
-/// Expressions we stepped over when looking for the local state. Any steps
-/// that would inhibit lifetime extension or take us out of subexpressions of
-/// the initializer are included.
-struct IndirectLocalPathEntry {
- enum EntryKind {
- DefaultInit,
- AddressOf,
- VarInit,
- LValToRVal,
- LifetimeBoundCall,
- TemporaryCopy,
- LambdaCaptureInit,
- GslReferenceInit,
- GslPointerInit
- } Kind;
- Expr *E;
- union {
- const Decl *D = nullptr;
- const LambdaCapture *Capture;
- };
- IndirectLocalPathEntry() {}
- IndirectLocalPathEntry(EntryKind K, Expr *E) : Kind(K), E(E) {}
- IndirectLocalPathEntry(EntryKind K, Expr *E, const Decl *D)
- : Kind(K), E(E), D(D) {}
- IndirectLocalPathEntry(EntryKind K, Expr *E, const LambdaCapture *Capture)
- : Kind(K), E(E), Capture(Capture) {}
-};
-
-using IndirectLocalPath = llvm::SmallVectorImpl<IndirectLocalPathEntry>;
-
-struct RevertToOldSizeRAII {
- IndirectLocalPath &Path;
- unsigned OldSize = Path.size();
- RevertToOldSizeRAII(IndirectLocalPath &Path) : Path(Path) {}
- ~RevertToOldSizeRAII() { Path.resize(OldSize); }
-};
-
-using LocalVisitor = llvm::function_ref<bool(IndirectLocalPath &Path, Local L,
- ReferenceKind RK)>;
-}
-
-static bool isVarOnPath(IndirectLocalPath &Path, VarDecl *VD) {
- for (auto E : Path)
- if (E.Kind == IndirectLocalPathEntry::VarInit && E.D == VD)
- return true;
- return false;
-}
-
-static bool pathContainsInit(IndirectLocalPath &Path) {
- return llvm::any_of(Path, [=](IndirectLocalPathEntry E) {
- return E.Kind == IndirectLocalPathEntry::DefaultInit ||
- E.Kind == IndirectLocalPathEntry::VarInit;
- });
-}
-
-static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
- Expr *Init, LocalVisitor Visit,
- bool RevisitSubinits,
- bool EnableLifetimeWarnings);
-
-static void visitLocalsRetainedByReferenceBinding(IndirectLocalPath &Path,
- Expr *Init, ReferenceKind RK,
- LocalVisitor Visit,
- bool EnableLifetimeWarnings);
-
-template <typename T> static bool isRecordWithAttr(QualType Type) {
- if (auto *RD = Type->getAsCXXRecordDecl())
- return RD->hasAttr<T>();
- return false;
-}
-
-// Decl::isInStdNamespace will return false for iterators in some STL
-// implementations due to them being defined in a namespace outside of the std
-// namespace.
-static bool isInStlNamespace(const Decl *D) {
- const DeclContext *DC = D->getDeclContext();
- if (!DC)
- return false;
- if (const auto *ND = dyn_cast<NamespaceDecl>(DC))
- if (const IdentifierInfo *II = ND->getIdentifier()) {
- StringRef Name = II->getName();
- if (Name.size() >= 2 && Name.front() == '_' &&
- (Name[1] == '_' || isUppercase(Name[1])))
- return true;
- }
-
- return DC->isStdNamespace();
-}
-
-static bool shouldTrackImplicitObjectArg(const CXXMethodDecl *Callee) {
- if (auto *Conv = dyn_cast_or_null<CXXConversionDecl>(Callee))
- if (isRecordWithAttr<PointerAttr>(Conv->getConversionType()))
- return true;
- if (!isInStlNamespace(Callee->getParent()))
- return false;
- if (!isRecordWithAttr<PointerAttr>(
- Callee->getFunctionObjectParameterType()) &&
- !isRecordWithAttr<OwnerAttr>(Callee->getFunctionObjectParameterType()))
- return false;
- if (Callee->getReturnType()->isPointerType() ||
- isRecordWithAttr<PointerAttr>(Callee->getReturnType())) {
- if (!Callee->getIdentifier())
- return false;
- return llvm::StringSwitch<bool>(Callee->getName())
- .Cases("begin", "rbegin", "cbegin", "crbegin", true)
- .Cases("end", "rend", "cend", "crend", true)
- .Cases("c_str", "data", "get", true)
- // Map and set types.
- .Cases("find", "equal_range", "lower_bound", "upper_bound", true)
- .Default(false);
- } else if (Callee->getReturnType()->isReferenceType()) {
- if (!Callee->getIdentifier()) {
- auto OO = Callee->getOverloadedOperator();
- return OO == OverloadedOperatorKind::OO_Subscript ||
- OO == OverloadedOperatorKind::OO_Star;
- }
- return llvm::StringSwitch<bool>(Callee->getName())
- .Cases("front", "back", "at", "top", "value", true)
- .Default(false);
- }
- return false;
-}
-
-static bool shouldTrackFirstArgument(const FunctionDecl *FD) {
- if (!FD->getIdentifier() || FD->getNumParams() != 1)
- return false;
- const auto *RD = FD->getParamDecl(0)->getType()->getPointeeCXXRecordDecl();
- if (!FD->isInStdNamespace() || !RD || !RD->isInStdNamespace())
- return false;
- if (!isRecordWithAttr<PointerAttr>(QualType(RD->getTypeForDecl(), 0)) &&
- !isRecordWithAttr<OwnerAttr>(QualType(RD->getTypeForDecl(), 0)))
- return false;
- if (FD->getReturnType()->isPointerType() ||
- isRecordWithAttr<PointerAttr>(FD->getReturnType())) {
- return llvm::StringSwitch<bool>(FD->getName())
- .Cases("begin", "rbegin", "cbegin", "crbegin", true)
- .Cases("end", "rend", "cend", "crend", true)
- .Case("data", true)
- .Default(false);
- } else if (FD->getReturnType()->isReferenceType()) {
- return llvm::StringSwitch<bool>(FD->getName())
- .Cases("get", "any_cast", true)
- .Default(false);
- }
- return false;
-}
-
-static void handleGslAnnotatedTypes(IndirectLocalPath &Path, Expr *Call,
- LocalVisitor Visit) {
- auto VisitPointerArg = [&](const Decl *D, Expr *Arg, bool Value) {
- // We are not interested in the temporary base objects of gsl Pointers:
- // Temp().ptr; // Here ptr might not dangle.
- if (isa<MemberExpr>(Arg->IgnoreImpCasts()))
- return;
- // Once we initialized a value with a reference, it can no longer dangle.
- if (!Value) {
- for (const IndirectLocalPathEntry &PE : llvm::reverse(Path)) {
- if (PE.Kind == IndirectLocalPathEntry::GslReferenceInit)
- continue;
- if (PE.Kind == IndirectLocalPathEntry::GslPointerInit)
- return;
- break;
- }
- }
- Path.push_back({Value ? IndirectLocalPathEntry::GslPointerInit
- : IndirectLocalPathEntry::GslReferenceInit,
- Arg, D});
- if (Arg->isGLValue())
- visitLocalsRetainedByReferenceBinding(Path, Arg, RK_ReferenceBinding,
- Visit,
- /*EnableLifetimeWarnings=*/true);
- else
- visitLocalsRetainedByInitializer(Path, Arg, Visit, true,
- /*EnableLifetimeWarnings=*/true);
- Path.pop_back();
- };
-
- if (auto *MCE = dyn_cast<CXXMemberCallExpr>(Call)) {
- const auto *MD = cast_or_null<CXXMethodDecl>(MCE->getDirectCallee());
- if (MD && shouldTrackImplicitObjectArg(MD))
- VisitPointerArg(MD, MCE->getImplicitObjectArgument(),
- !MD->getReturnType()->isReferenceType());
- return;
- } else if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(Call)) {
- FunctionDecl *Callee = OCE->getDirectCallee();
- if (Callee && Callee->isCXXInstanceMember() &&
- shouldTrackImplicitObjectArg(cast<CXXMethodDecl>(Callee)))
- VisitPointerArg(Callee, OCE->getArg(0),
- !Callee->getReturnType()->isReferenceType());
- return;
- } else if (auto *CE = dyn_cast<CallExpr>(Call)) {
- FunctionDecl *Callee = CE->getDirectCallee();
- if (Callee && shouldTrackFirstArgument(Callee))
- VisitPointerArg(Callee, CE->getArg(0),
- !Callee->getReturnType()->isReferenceType());
- return;
- }
-
- if (auto *CCE = dyn_cast<CXXConstructExpr>(Call)) {
- const auto *Ctor = CCE->getConstructor();
- const CXXRecordDecl *RD = Ctor->getParent();
- if (CCE->getNumArgs() > 0 && RD->hasAttr<PointerAttr>())
- VisitPointerArg(Ctor->getParamDecl(0), CCE->getArgs()[0], true);
- }
-}
-
-static bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD) {
- const TypeSourceInfo *TSI = FD->getTypeSourceInfo();
- if (!TSI)
- return false;
- // Don't declare this variable in the second operand of the for-statement;
- // GCC miscompiles that by ending its lifetime before evaluating the
- // third operand. See gcc.gnu.org/PR86769.
- AttributedTypeLoc ATL;
- for (TypeLoc TL = TSI->getTypeLoc();
- (ATL = TL.getAsAdjusted<AttributedTypeLoc>());
- TL = ATL.getModifiedLoc()) {
- if (ATL.getAttrAs<LifetimeBoundAttr>())
- return true;
- }
-
- // Assume that all assignment operators with a "normal" return type return
- // *this, that is, an lvalue reference that is the same type as the implicit
- // object parameter (or the LHS for a non-member operator$=).
- OverloadedOperatorKind OO = FD->getDeclName().getCXXOverloadedOperator();
- if (OO == OO_Equal || isCompoundAssignmentOperator(OO)) {
- QualType RetT = FD->getReturnType();
- if (RetT->isLValueReferenceType()) {
- ASTContext &Ctx = FD->getASTContext();
- QualType LHST;
- auto *MD = dyn_cast<CXXMethodDecl>(FD);
- if (MD && MD->isCXXInstanceMember())
- LHST = Ctx.getLValueReferenceType(MD->getFunctionObjectParameterType());
- else
- LHST = MD->getParamDecl(0)->getType();
- if (Ctx.hasSameType(RetT, LHST))
- return true;
- }
- }
-
- return false;
-}
-
-static void visitLifetimeBoundArguments(IndirectLocalPath &Path, Expr *Call,
- LocalVisitor Visit) {
- const FunctionDecl *Callee;
- ArrayRef<Expr*> Args;
-
- if (auto *CE = dyn_cast<CallExpr>(Call)) {
- Callee = CE->getDirectCallee();
- Args = llvm::ArrayRef(CE->getArgs(), CE->getNumArgs());
- } else {
- auto *CCE = cast<CXXConstructExpr>(Call);
- Callee = CCE->getConstructor();
- Args = llvm::ArrayRef(CCE->getArgs(), CCE->getNumArgs());
- }
- if (!Callee)
- return;
-
- Expr *ObjectArg = nullptr;
- if (isa<CXXOperatorCallExpr>(Call) && Callee->isCXXInstanceMember()) {
- ObjectArg = Args[0];
- Args = Args.slice(1);
- } else if (auto *MCE = dyn_cast<CXXMemberCallExpr>(Call)) {
- ObjectArg = MCE->getImplicitObjectArgument();
- }
-
- auto VisitLifetimeBoundArg = [&](const Decl *D, Expr *Arg) {
- Path.push_back({IndirectLocalPathEntry::LifetimeBoundCall, Arg, D});
- if (Arg->isGLValue())
- visitLocalsRetainedByReferenceBinding(Path, Arg, RK_ReferenceBinding,
- Visit,
- /*EnableLifetimeWarnings=*/false);
- else
- visitLocalsRetainedByInitializer(Path, Arg, Visit, true,
- /*EnableLifetimeWarnings=*/false);
- Path.pop_back();
- };
-
- bool CheckCoroCall = false;
- if (const auto *RD = Callee->getReturnType()->getAsRecordDecl()) {
- CheckCoroCall = RD->hasAttr<CoroLifetimeBoundAttr>() &&
- RD->hasAttr<CoroReturnTypeAttr>() &&
- !Callee->hasAttr<CoroDisableLifetimeBoundAttr>();
- }
-
- if (ObjectArg) {
- bool CheckCoroObjArg = CheckCoroCall;
- // Coroutine lambda objects with empty capture list are not lifetimebound.
- if (auto *LE = dyn_cast<LambdaExpr>(ObjectArg->IgnoreImplicit());
- LE && LE->captures().empty())
- CheckCoroObjArg = false;
- // Allow `get_return_object()` as the object param (__promise) is not
- // lifetimebound.
- if (Sema::CanBeGetReturnObject(Callee))
- CheckCoroObjArg = false;
- if (implicitObjectParamIsLifetimeBound(Callee) || CheckCoroObjArg)
- VisitLifetimeBoundArg(Callee, ObjectArg);
- }
-
- for (unsigned I = 0,
- N = std::min<unsigned>(Callee->getNumParams(), Args.size());
- I != N; ++I) {
- if (CheckCoroCall || Callee->getParamDecl(I)->hasAttr<LifetimeBoundAttr>())
- VisitLifetimeBoundArg(Callee->getParamDecl(I), Args[I]);
- }
-}
-
-/// Visit the locals that would be reachable through a reference bound to the
-/// glvalue expression \c Init.
-static void visitLocalsRetainedByReferenceBinding(IndirectLocalPath &Path,
- Expr *Init, ReferenceKind RK,
- LocalVisitor Visit,
- bool EnableLifetimeWarnings) {
- RevertToOldSizeRAII RAII(Path);
-
- // Walk past any constructs which we can lifetime-extend across.
- Expr *Old;
- do {
- Old = Init;
-
- if (auto *FE = dyn_cast<FullExpr>(Init))
- Init = FE->getSubExpr();
-
- if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
- // If this is just redundant braces around an initializer, step over it.
- if (ILE->isTransparent())
- Init = ILE->getInit(0);
- }
-
- // Step over any subobject adjustments; we may have a materialized
- // temporary inside them.
- Init = const_cast<Expr *>(Init->skipRValueSubobjectAdjustments());
-
- // Per current approach for DR1376, look through casts to reference type
- // when performing lifetime extension.
- if (CastExpr *CE = dyn_cast<CastExpr>(Init))
- if (CE->getSubExpr()->isGLValue())
- Init = CE->getSubExpr();
-
- // Per the current approach for DR1299, look through array element access
- // on array glvalues when performing lifetime extension.
- if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Init)) {
- Init = ASE->getBase();
- auto *ICE = dyn_cast<ImplicitCastExpr>(Init);
- if (ICE && ICE->getCastKind() == CK_ArrayToPointerDecay)
- Init = ICE->getSubExpr();
- else
- // We can't lifetime extend through this but we might still find some
- // retained temporaries.
- return visitLocalsRetainedByInitializer(Path, Init, Visit, true,
- EnableLifetimeWarnings);
- }
-
- // Step into CXXDefaultInitExprs so we can diagnose cases where a
- // constructor inherits one as an implicit mem-initializer.
- if (auto *DIE = dyn_cast<CXXDefaultInitExpr>(Init)) {
- Path.push_back(
- {IndirectLocalPathEntry::DefaultInit, DIE, DIE->getField()});
- Init = DIE->getExpr();
- }
- } while (Init != Old);
-
- if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Init)) {
- if (Visit(Path, Local(MTE), RK))
- visitLocalsRetainedByInitializer(Path, MTE->getSubExpr(), Visit, true,
- EnableLifetimeWarnings);
- }
-
- if (isa<CallExpr>(Init)) {
- if (EnableLifetimeWarnings)
- handleGslAnnotatedTypes(Path, Init, Visit);
- return visitLifetimeBoundArguments(Path, Init, Visit);
- }
-
- switch (Init->getStmtClass()) {
- case Stmt::DeclRefExprClass: {
- // If we find the name of a local non-reference parameter, we could have a
- // lifetime problem.
- auto *DRE = cast<DeclRefExpr>(Init);
- auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
- if (VD && VD->hasLocalStorage() &&
- !DRE->refersToEnclosingVariableOrCapture()) {
- if (!VD->getType()->isReferenceType()) {
- Visit(Path, Local(DRE), RK);
- } else if (isa<ParmVarDecl>(DRE->getDecl())) {
- // The lifetime of a reference parameter is unknown; assume it's OK
- // for now.
- break;
- } else if (VD->getInit() && !isVarOnPath(Path, VD)) {
- Path.push_back({IndirectLocalPathEntry::VarInit, DRE, VD});
- visitLocalsRetainedByReferenceBinding(Path, VD->getInit(),
- RK_ReferenceBinding, Visit,
- EnableLifetimeWarnings);
- }
- }
- break;
- }
-
- case Stmt::UnaryOperatorClass: {
- // The only unary operator that make sense to handle here
- // is Deref. All others don't resolve to a "name." This includes
- // handling all sorts of rvalues passed to a unary operator.
- const UnaryOperator *U = cast<UnaryOperator>(Init);
- if (U->getOpcode() == UO_Deref)
- visitLocalsRetainedByInitializer(Path, U->getSubExpr(), Visit, true,
- EnableLifetimeWarnings);
- break;
- }
-
- case Stmt::OMPArraySectionExprClass: {
- visitLocalsRetainedByInitializer(Path,
- cast<OMPArraySectionExpr>(Init)->getBase(),
- Visit, true, EnableLifetimeWarnings);
- break;
- }
-
- case Stmt::ConditionalOperatorClass:
- case Stmt::BinaryConditionalOperatorClass: {
- auto *C = cast<AbstractConditionalOperator>(Init);
- if (!C->getTrueExpr()->getType()->isVoidType())
- visitLocalsRetainedByReferenceBinding(Path, C->getTrueExpr(), RK, Visit,
- EnableLifetimeWarnings);
- if (!C->getFalseExpr()->getType()->isVoidType())
- visitLocalsRetainedByReferenceBinding(Path, C->getFalseExpr(), RK, Visit,
- EnableLifetimeWarnings);
- break;
- }
-
- // FIXME: Visit the left-hand side of an -> or ->*.
-
- default:
- break;
- }
-}
-
-/// Visit the locals that would be reachable through an object initialized by
-/// the prvalue expression \c Init.
-static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
- Expr *Init, LocalVisitor Visit,
- bool RevisitSubinits,
- bool EnableLifetimeWarnings) {
- RevertToOldSizeRAII RAII(Path);
-
- Expr *Old;
- do {
- Old = Init;
-
- // Step into CXXDefaultInitExprs so we can diagnose cases where a
- // constructor inherits one as an implicit mem-initializer.
- if (auto *DIE = dyn_cast<CXXDefaultInitExpr>(Init)) {
- Path.push_back({IndirectLocalPathEntry::DefaultInit, DIE, DIE->getField()});
- Init = DIE->getExpr();
- }
-
- if (auto *FE = dyn_cast<FullExpr>(Init))
- Init = FE->getSubExpr();
-
- // Dig out the expression which constructs the extended temporary.
- Init = const_cast<Expr *>(Init->skipRValueSubobjectAdjustments());
-
- if (CXXBindTemporaryExpr *BTE = dyn_cast<CXXBindTemporaryExpr>(Init))
- Init = BTE->getSubExpr();
-
- Init = Init->IgnoreParens();
-
- // Step over value-preserving rvalue casts.
- if (auto *CE = dyn_cast<CastExpr>(Init)) {
- switch (CE->getCastKind()) {
- case CK_LValueToRValue:
- // If we can match the lvalue to a const object, we can look at its
- // initializer.
- Path.push_back({IndirectLocalPathEntry::LValToRVal, CE});
- return visitLocalsRetainedByReferenceBinding(
- Path, Init, RK_ReferenceBinding,
- [&](IndirectLocalPath &Path, Local L, ReferenceKind RK) -> bool {
- if (auto *DRE = dyn_cast<DeclRefExpr>(L)) {
- auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
- if (VD && VD->getType().isConstQualified() && VD->getInit() &&
- !isVarOnPath(Path, VD)) {
- Path.push_back({IndirectLocalPathEntry::VarInit, DRE, VD});
- visitLocalsRetainedByInitializer(Path, VD->getInit(), Visit, true,
- EnableLifetimeWarnings);
- }
- } else if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(L)) {
- if (MTE->getType().isConstQualified())
- visitLocalsRetainedByInitializer(Path, MTE->getSubExpr(), Visit,
- true, EnableLifetimeWarnings);
- }
- return false;
- }, EnableLifetimeWarnings);
-
- // We assume that objects can be retained by pointers cast to integers,
- // but not if the integer is cast to floating-point type or to _Complex.
- // We assume that casts to 'bool' do not preserve enough information to
- // retain a local object.
- case CK_NoOp:
- case CK_BitCast:
- case CK_BaseToDerived:
- case CK_DerivedToBase:
- case CK_UncheckedDerivedToBase:
- case CK_Dynamic:
- case CK_ToUnion:
- case CK_UserDefinedConversion:
- case CK_ConstructorConversion:
- case CK_IntegralToPointer:
- case CK_PointerToIntegral:
- case CK_VectorSplat:
- case CK_IntegralCast:
- case CK_CPointerToObjCPointerCast:
- case CK_BlockPointerToObjCPointerCast:
- case CK_AnyPointerToBlockPointerCast:
- case CK_AddressSpaceConversion:
- break;
-
- case CK_ArrayToPointerDecay:
- // Model array-to-pointer decay as taking the address of the array
- // lvalue.
- Path.push_back({IndirectLocalPathEntry::AddressOf, CE});
- return visitLocalsRetainedByReferenceBinding(Path, CE->getSubExpr(),
- RK_ReferenceBinding, Visit,
- EnableLifetimeWarnings);
-
- default:
- return;
- }
-
- Init = CE->getSubExpr();
- }
- } while (Old != Init);
-
- // C++17 [dcl.init.list]p6:
- // initializing an initializer_list object from the array extends the
- // lifetime of the array exactly like binding a reference to a temporary.
- if (auto *ILE = dyn_cast<CXXStdInitializerListExpr>(Init))
- return visitLocalsRetainedByReferenceBinding(Path, ILE->getSubExpr(),
- RK_StdInitializerList, Visit,
- EnableLifetimeWarnings);
-
- if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
- // We already visited the elements of this initializer list while
- // performing the initialization. Don't visit them again unless we've
- // changed the lifetime of the initialized entity.
- if (!RevisitSubinits)
- return;
-
- if (ILE->isTransparent())
- return visitLocalsRetainedByInitializer(Path, ILE->getInit(0), Visit,
- RevisitSubinits,
- EnableLifetimeWarnings);
-
- if (ILE->getType()->isArrayType()) {
- for (unsigned I = 0, N = ILE->getNumInits(); I != N; ++I)
- visitLocalsRetainedByInitializer(Path, ILE->getInit(I), Visit,
- RevisitSubinits,
- EnableLifetimeWarnings);
- return;
- }
-
- if (CXXRecordDecl *RD = ILE->getType()->getAsCXXRecordDecl()) {
- assert(RD->isAggregate() && "aggregate init on non-aggregate");
-
- // If we lifetime-extend a braced initializer which is initializing an
- // aggregate, and that aggregate contains reference members which are
- // bound to temporaries, those temporaries are also lifetime-extended.
- if (RD->isUnion() && ILE->getInitializedFieldInUnion() &&
- ILE->getInitializedFieldInUnion()->getType()->isReferenceType())
- visitLocalsRetainedByReferenceBinding(Path, ILE->getInit(0),
- RK_ReferenceBinding, Visit,
- EnableLifetimeWarnings);
- else {
- unsigned Index = 0;
- for (; Index < RD->getNumBases() && Index < ILE->getNumInits(); ++Index)
- visitLocalsRetainedByInitializer(Path, ILE->getInit(Index), Visit,
- RevisitSubinits,
- EnableLifetimeWarnings);
- for (const auto *I : RD->fields()) {
- if (Index >= ILE->getNumInits())
- break;
- if (I->isUnnamedBitfield())
- continue;
- Expr *SubInit = ILE->getInit(Index);
- if (I->getType()->isReferenceType())
- visitLocalsRetainedByReferenceBinding(Path, SubInit,
- RK_ReferenceBinding, Visit,
- EnableLifetimeWarnings);
- else
- // This might be either aggregate-initialization of a member or
- // initialization of a std::initializer_list object. Regardless,
- // we should recursively lifetime-extend that initializer.
- visitLocalsRetainedByInitializer(Path, SubInit, Visit,
- RevisitSubinits,
- EnableLifetimeWarnings);
- ++Index;
- }
- }
- }
- return;
- }
-
- // The lifetime of an init-capture is that of the closure object constructed
- // by a lambda-expression.
- if (auto *LE = dyn_cast<LambdaExpr>(Init)) {
- LambdaExpr::capture_iterator CapI = LE->capture_begin();
- for (Expr *E : LE->capture_inits()) {
- assert(CapI != LE->capture_end());
- const LambdaCapture &Cap = *CapI++;
- if (!E)
- continue;
- if (Cap.capturesVariable())
- Path.push_back({IndirectLocalPathEntry::LambdaCaptureInit, E, &Cap});
- if (E->isGLValue())
- visitLocalsRetainedByReferenceBinding(Path, E, RK_ReferenceBinding,
- Visit, EnableLifetimeWarnings);
- else
- visitLocalsRetainedByInitializer(Path, E, Visit, true,
- EnableLifetimeWarnings);
- if (Cap.capturesVariable())
- Path.pop_back();
- }
- }
-
- // Assume that a copy or move from a temporary references the same objects
- // that the temporary does.
- if (auto *CCE = dyn_cast<CXXConstructExpr>(Init)) {
- if (CCE->getConstructor()->isCopyOrMoveConstructor()) {
- if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(CCE->getArg(0))) {
- Expr *Arg = MTE->getSubExpr();
- Path.push_back({IndirectLocalPathEntry::TemporaryCopy, Arg,
- CCE->getConstructor()});
- visitLocalsRetainedByInitializer(Path, Arg, Visit, true,
- /*EnableLifetimeWarnings*/false);
- Path.pop_back();
- }
- }
- }
-
- if (isa<CallExpr>(Init) || isa<CXXConstructExpr>(Init)) {
- if (EnableLifetimeWarnings)
- handleGslAnnotatedTypes(Path, Init, Visit);
- return visitLifetimeBoundArguments(Path, Init, Visit);
- }
-
- switch (Init->getStmtClass()) {
- case Stmt::UnaryOperatorClass: {
- auto *UO = cast<UnaryOperator>(Init);
- // If the initializer is the address of a local, we could have a lifetime
- // problem.
- if (UO->getOpcode() == UO_AddrOf) {
- // If this is &rvalue, then it's ill-formed and we have already diagnosed
- // it. Don't produce a redundant warning about the lifetime of the
- // temporary.
- if (isa<MaterializeTemporaryExpr>(UO->getSubExpr()))
- return;
-
- Path.push_back({IndirectLocalPathEntry::AddressOf, UO});
- visitLocalsRetainedByReferenceBinding(Path, UO->getSubExpr(),
- RK_ReferenceBinding, Visit,
- EnableLifetimeWarnings);
- }
- break;
- }
-
- case Stmt::BinaryOperatorClass: {
- // Handle pointer arithmetic.
- auto *BO = cast<BinaryOperator>(Init);
- BinaryOperatorKind BOK = BO->getOpcode();
- if (!BO->getType()->isPointerType() || (BOK != BO_Add && BOK != BO_Sub))
- break;
-
- if (BO->getLHS()->getType()->isPointerType())
- visitLocalsRetainedByInitializer(Path, BO->getLHS(), Visit, true,
- EnableLifetimeWarnings);
- else if (BO->getRHS()->getType()->isPointerType())
- visitLocalsRetainedByInitializer(Path, BO->getRHS(), Visit, true,
- EnableLifetimeWarnings);
- break;
- }
-
- case Stmt::ConditionalOperatorClass:
- case Stmt::BinaryConditionalOperatorClass: {
- auto *C = cast<AbstractConditionalOperator>(Init);
- // In C++, we can have a throw-expression operand, which has 'void' type
- // and isn't interesting from a lifetime perspective.
- if (!C->getTrueExpr()->getType()->isVoidType())
- visitLocalsRetainedByInitializer(Path, C->getTrueExpr(), Visit, true,
- EnableLifetimeWarnings);
- if (!C->getFalseExpr()->getType()->isVoidType())
- visitLocalsRetainedByInitializer(Path, C->getFalseExpr(), Visit, true,
- EnableLifetimeWarnings);
- break;
- }
-
- case Stmt::BlockExprClass:
- if (cast<BlockExpr>(Init)->getBlockDecl()->hasCaptures()) {
- // This is a local block, whose lifetime is that of the function.
- Visit(Path, Local(cast<BlockExpr>(Init)), RK_ReferenceBinding);
- }
- break;
-
- case Stmt::AddrLabelExprClass:
- // We want to warn if the address of a label would escape the function.
- Visit(Path, Local(cast<AddrLabelExpr>(Init)), RK_ReferenceBinding);
- break;
-
- default:
- break;
- }
-}
-
-/// Whether a path to an object supports lifetime extension.
-enum PathLifetimeKind {
- /// Lifetime-extend along this path.
- Extend,
- /// We should lifetime-extend, but we don't because (due to technical
- /// limitations) we can't. This happens for default member initializers,
- /// which we don't clone for every use, so we don't have a unique
- /// MaterializeTemporaryExpr to update.
- ShouldExtend,
- /// Do not lifetime extend along this path.
- NoExtend
-};
-
-/// Determine whether this is an indirect path to a temporary that we are
-/// supposed to lifetime-extend along.
-static PathLifetimeKind
-shouldLifetimeExtendThroughPath(const IndirectLocalPath &Path) {
- PathLifetimeKind Kind = PathLifetimeKind::Extend;
- for (auto Elem : Path) {
- if (Elem.Kind == IndirectLocalPathEntry::DefaultInit)
- Kind = PathLifetimeKind::ShouldExtend;
- else if (Elem.Kind != IndirectLocalPathEntry::LambdaCaptureInit)
- return PathLifetimeKind::NoExtend;
- }
- return Kind;
-}
-
-/// Find the range for the first interesting entry in the path at or after I.
-static SourceRange nextPathEntryRange(const IndirectLocalPath &Path, unsigned I,
- Expr *E) {
- for (unsigned N = Path.size(); I != N; ++I) {
- switch (Path[I].Kind) {
- case IndirectLocalPathEntry::AddressOf:
- case IndirectLocalPathEntry::LValToRVal:
- case IndirectLocalPathEntry::LifetimeBoundCall:
- case IndirectLocalPathEntry::TemporaryCopy:
- case IndirectLocalPathEntry::GslReferenceInit:
- case IndirectLocalPathEntry::GslPointerInit:
- // These exist primarily to mark the path as not permitting or
- // supporting lifetime extension.
- break;
-
- case IndirectLocalPathEntry::VarInit:
- if (cast<VarDecl>(Path[I].D)->isImplicit())
- return SourceRange();
- [[fallthrough]];
- case IndirectLocalPathEntry::DefaultInit:
- return Path[I].E->getSourceRange();
-
- case IndirectLocalPathEntry::LambdaCaptureInit:
- if (!Path[I].Capture->capturesVariable())
- continue;
- return Path[I].E->getSourceRange();
- }
- }
- return E->getSourceRange();
-}
-
-static bool pathOnlyInitializesGslPointer(IndirectLocalPath &Path) {
- for (const auto &It : llvm::reverse(Path)) {
- if (It.Kind == IndirectLocalPathEntry::VarInit)
- continue;
- if (It.Kind == IndirectLocalPathEntry::AddressOf)
- continue;
- if (It.Kind == IndirectLocalPathEntry::LifetimeBoundCall)
- continue;
- return It.Kind == IndirectLocalPathEntry::GslPointerInit ||
- It.Kind == IndirectLocalPathEntry::GslReferenceInit;
- }
- return false;
-}
-
void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
Expr *Init) {
- LifetimeResult LR = getEntityLifetime(&Entity);
- LifetimeKind LK = LR.getInt();
- const InitializedEntity *ExtendingEntity = LR.getPointer();
-
- // If this entity doesn't have an interesting lifetime, don't bother looking
- // for temporaries within its initializer.
- if (LK == LK_FullExpression)
- return;
-
- auto TemporaryVisitor = [&](IndirectLocalPath &Path, Local L,
- ReferenceKind RK) -> bool {
- SourceRange DiagRange = nextPathEntryRange(Path, 0, L);
- SourceLocation DiagLoc = DiagRange.getBegin();
-
- auto *MTE = dyn_cast<MaterializeTemporaryExpr>(L);
-
- bool IsGslPtrInitWithGslTempOwner = false;
- bool IsLocalGslOwner = false;
- if (pathOnlyInitializesGslPointer(Path)) {
- if (isa<DeclRefExpr>(L)) {
- // We do not want to follow the references when returning a pointer originating
- // from a local owner to avoid the following false positive:
- // int &p = *localUniquePtr;
- // someContainer.add(std::move(localUniquePtr));
- // return p;
- IsLocalGslOwner = isRecordWithAttr<OwnerAttr>(L->getType());
- if (pathContainsInit(Path) || !IsLocalGslOwner)
- return false;
- } else {
- IsGslPtrInitWithGslTempOwner = MTE && !MTE->getExtendingDecl() &&
- isRecordWithAttr<OwnerAttr>(MTE->getType());
- // Skipping a chain of initializing gsl::Pointer annotated objects.
- // We are looking only for the final source to find out if it was
- // a local or temporary owner or the address of a local variable/param.
- if (!IsGslPtrInitWithGslTempOwner)
- return true;
- }
- }
-
- switch (LK) {
- case LK_FullExpression:
- llvm_unreachable("already handled this");
-
- case LK_Extended: {
- if (!MTE) {
- // The initialized entity has lifetime beyond the full-expression,
- // and the local entity does too, so don't warn.
- //
- // FIXME: We should consider warning if a static / thread storage
- // duration variable retains an automatic storage duration local.
- return false;
- }
-
- if (IsGslPtrInitWithGslTempOwner && DiagLoc.isValid()) {
- Diag(DiagLoc, diag::warn_dangling_lifetime_pointer) << DiagRange;
- return false;
- }
-
- switch (shouldLifetimeExtendThroughPath(Path)) {
- case PathLifetimeKind::Extend:
- // Update the storage duration of the materialized temporary.
- // FIXME: Rebuild the expression instead of mutating it.
- MTE->setExtendingDecl(ExtendingEntity->getDecl(),
- ExtendingEntity->allocateManglingNumber());
- // Also visit the temporaries lifetime-extended by this initializer.
- return true;
-
- case PathLifetimeKind::ShouldExtend:
- // We're supposed to lifetime-extend the temporary along this path (per
- // the resolution of DR1815), but we don't support that yet.
- //
- // FIXME: Properly handle this situation. Perhaps the easiest approach
- // would be to clone the initializer expression on each use that would
- // lifetime extend its temporaries.
- Diag(DiagLoc, diag::warn_unsupported_lifetime_extension)
- << RK << DiagRange;
- break;
-
- case PathLifetimeKind::NoExtend:
- // If the path goes through the initialization of a variable or field,
- // it can't possibly reach a temporary created in this full-expression.
- // We will have already diagnosed any problems with the initializer.
- if (pathContainsInit(Path))
- return false;
-
- Diag(DiagLoc, diag::warn_dangling_variable)
- << RK << !Entity.getParent()
- << ExtendingEntity->getDecl()->isImplicit()
- << ExtendingEntity->getDecl() << Init->isGLValue() << DiagRange;
- break;
- }
- break;
- }
-
- case LK_MemInitializer: {
- if (isa<MaterializeTemporaryExpr>(L)) {
- // Under C++ DR1696, if a mem-initializer (or a default member
- // initializer used by the absence of one) would lifetime-extend a
- // temporary, the program is ill-formed.
- if (auto *ExtendingDecl =
- ExtendingEntity ? ExtendingEntity->getDecl() : nullptr) {
- if (IsGslPtrInitWithGslTempOwner) {
- Diag(DiagLoc, diag::warn_dangling_lifetime_pointer_member)
- << ExtendingDecl << DiagRange;
- Diag(ExtendingDecl->getLocation(),
- diag::note_ref_or_ptr_member_declared_here)
- << true;
- return false;
- }
- bool IsSubobjectMember = ExtendingEntity != &Entity;
- Diag(DiagLoc, shouldLifetimeExtendThroughPath(Path) !=
- PathLifetimeKind::NoExtend
- ? diag::err_dangling_member
- : diag::warn_dangling_member)
- << ExtendingDecl << IsSubobjectMember << RK << DiagRange;
- // Don't bother adding a note pointing to the field if we're inside
- // its default member initializer; our primary diagnostic points to
- // the same place in that case.
- if (Path.empty() ||
- Path.back().Kind != IndirectLocalPathEntry::DefaultInit) {
- Diag(ExtendingDecl->getLocation(),
- diag::note_lifetime_extending_member_declared_here)
- << RK << IsSubobjectMember;
- }
- } else {
- // We have a mem-initializer but no particular field within it; this
- // is either a base class or a delegating initializer directly
- // initializing the base-class from something that doesn't live long
- // enough.
- //
- // FIXME: Warn on this.
- return false;
- }
- } else {
- // Paths via a default initializer can only occur during error recovery
- // (there's no other way that a default initializer can refer to a
- // local). Don't produce a bogus warning on those cases.
- if (pathContainsInit(Path))
- return false;
-
- // Suppress false positives for code like the one below:
- // Ctor(unique_ptr<T> up) : member(*up), member2(move(up)) {}
- if (IsLocalGslOwner && pathOnlyInitializesGslPointer(Path))
- return false;
-
- auto *DRE = dyn_cast<DeclRefExpr>(L);
- auto *VD = DRE ? dyn_cast<VarDecl>(DRE->getDecl()) : nullptr;
- if (!VD) {
- // A member was initialized to a local block.
- // FIXME: Warn on this.
- return false;
- }
-
- if (auto *Member =
- ExtendingEntity ? ExtendingEntity->getDecl() : nullptr) {
- bool IsPointer = !Member->getType()->isReferenceType();
- Diag(DiagLoc, IsPointer ? diag::warn_init_ptr_member_to_parameter_addr
- : diag::warn_bind_ref_member_to_parameter)
- << Member << VD << isa<ParmVarDecl>(VD) << DiagRange;
- Diag(Member->getLocation(),
- diag::note_ref_or_ptr_member_declared_here)
- << (unsigned)IsPointer;
- }
- }
- break;
- }
-
- case LK_New:
- if (isa<MaterializeTemporaryExpr>(L)) {
- if (IsGslPtrInitWithGslTempOwner)
- Diag(DiagLoc, diag::warn_dangling_lifetime_pointer) << DiagRange;
- else
- Diag(DiagLoc, RK == RK_ReferenceBinding
- ? diag::warn_new_dangling_reference
- : diag::warn_new_dangling_initializer_list)
- << !Entity.getParent() << DiagRange;
- } else {
- // We can't determine if the allocation outlives the local declaration.
- return false;
- }
- break;
-
- case LK_Return:
- case LK_StmtExprResult:
- if (auto *DRE = dyn_cast<DeclRefExpr>(L)) {
- // We can't determine if the local variable outlives the statement
- // expression.
- if (LK == LK_StmtExprResult)
- return false;
- Diag(DiagLoc, diag::warn_ret_stack_addr_ref)
- << Entity.getType()->isReferenceType() << DRE->getDecl()
- << isa<ParmVarDecl>(DRE->getDecl()) << DiagRange;
- } else if (isa<BlockExpr>(L)) {
- Diag(DiagLoc, diag::err_ret_local_block) << DiagRange;
- } else if (isa<AddrLabelExpr>(L)) {
- // Don't warn when returning a label from a statement expression.
- // Leaving the scope doesn't end its lifetime.
- if (LK == LK_StmtExprResult)
- return false;
- Diag(DiagLoc, diag::warn_ret_addr_label) << DiagRange;
- } else {
- Diag(DiagLoc, diag::warn_ret_local_temp_addr_ref)
- << Entity.getType()->isReferenceType() << DiagRange;
- }
- break;
- }
-
- for (unsigned I = 0; I != Path.size(); ++I) {
- auto Elem = Path[I];
-
- switch (Elem.Kind) {
- case IndirectLocalPathEntry::AddressOf:
- case IndirectLocalPathEntry::LValToRVal:
- // These exist primarily to mark the path as not permitting or
- // supporting lifetime extension.
- break;
-
- case IndirectLocalPathEntry::LifetimeBoundCall:
- case IndirectLocalPathEntry::TemporaryCopy:
- case IndirectLocalPathEntry::GslPointerInit:
- case IndirectLocalPathEntry::GslReferenceInit:
- // FIXME: Consider adding a note for these.
- break;
-
- case IndirectLocalPathEntry::DefaultInit: {
- auto *FD = cast<FieldDecl>(Elem.D);
- Diag(FD->getLocation(), diag::note_init_with_default_member_initializer)
- << FD << nextPathEntryRange(Path, I + 1, L);
- break;
- }
-
- case IndirectLocalPathEntry::VarInit: {
- const VarDecl *VD = cast<VarDecl>(Elem.D);
- Diag(VD->getLocation(), diag::note_local_var_initializer)
- << VD->getType()->isReferenceType()
- << VD->isImplicit() << VD->getDeclName()
- << nextPathEntryRange(Path, I + 1, L);
- break;
- }
-
- case IndirectLocalPathEntry::LambdaCaptureInit:
- if (!Elem.Capture->capturesVariable())
- break;
- // FIXME: We can't easily tell apart an init-capture from a nested
- // capture of an init-capture.
- const ValueDecl *VD = Elem.Capture->getCapturedVar();
- Diag(Elem.Capture->getLocation(), diag::note_lambda_capture_initializer)
- << VD << VD->isInitCapture() << Elem.Capture->isExplicit()
- << (Elem.Capture->getCaptureKind() == LCK_ByRef) << VD
- << nextPathEntryRange(Path, I + 1, L);
- break;
- }
- }
-
- // We didn't lifetime-extend, so don't go any further; we don't need more
- // warnings or errors on inner temporaries within this one's initializer.
- return false;
- };
-
- bool EnableLifetimeWarnings = !getDiagnostics().isIgnored(
- diag::warn_dangling_lifetime_pointer, SourceLocation());
- llvm::SmallVector<IndirectLocalPathEntry, 8> Path;
- if (Init->isGLValue())
- visitLocalsRetainedByReferenceBinding(Path, Init, RK_ReferenceBinding,
- TemporaryVisitor,
- EnableLifetimeWarnings);
- else
- visitLocalsRetainedByInitializer(Path, Init, TemporaryVisitor, false,
- EnableLifetimeWarnings);
+ return sema::checkExprLifetime(*this, Entity, Init);
}
static void DiagnoseNarrowingInInitList(Sema &S,
@@ -8366,6 +7307,9 @@ static void DiagnoseNarrowingInInitList(Sema &S,
QualType EntityType,
const Expr *PostInit);
+static void CheckC23ConstexprInitConversion(Sema &S, QualType FromType,
+ QualType ToType, Expr *Init);
+
/// Provide warnings when std::move is used on construction.
static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr,
bool IsReturnStmt) {
@@ -8491,6 +7435,10 @@ Sema::CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
// are done in both CreateMaterializeTemporaryExpr and MaybeBindToTemporary,
// but there may be a chance to merge them.
Cleanup.setExprNeedsCleanups(false);
+ if (isInLifetimeExtendingContext()) {
+ auto &Record = ExprEvalContexts.back();
+ Record.ForRangeLifetimeExtendTemps.push_back(MTE);
+ }
return MTE;
}
@@ -8729,7 +7677,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
// constant expressions here in order to perform narrowing checks =(
EnterExpressionEvaluationContext Evaluated(
S, EnterExpressionEvaluationContext::InitList,
- CurInit.get() && isa<InitListExpr>(CurInit.get()));
+ isa_and_nonnull<InitListExpr>(CurInit.get()));
// C++ [class.abstract]p2:
// no objects of an abstract class can be created except as subobjects
@@ -8999,19 +7947,18 @@ ExprResult InitializationSequence::Perform(Sema &S,
}
}
}
-
- Sema::CheckedConversionKind CCK
- = Kind.isCStyleCast()? Sema::CCK_CStyleCast
- : Kind.isFunctionalCast()? Sema::CCK_FunctionalCast
- : Kind.isExplicitCast()? Sema::CCK_OtherCast
- : Sema::CCK_ImplicitConversion;
- ExprResult CurInitExprRes =
- S.PerformImplicitConversion(CurInit.get(), Step->Type, *Step->ICS,
- getAssignmentAction(Entity), CCK);
+ Expr *Init = CurInit.get();
+ CheckedConversionKind CCK =
+ Kind.isCStyleCast() ? CheckedConversionKind::CStyleCast
+ : Kind.isFunctionalCast() ? CheckedConversionKind::FunctionalCast
+ : Kind.isExplicitCast() ? CheckedConversionKind::OtherCast
+ : CheckedConversionKind::Implicit;
+ ExprResult CurInitExprRes = S.PerformImplicitConversion(
+ Init, Step->Type, *Step->ICS, getAssignmentAction(Entity), CCK);
if (CurInitExprRes.isInvalid())
return ExprError();
- S.DiscardMisalignedMemberAddress(Step->Type.getTypePtr(), CurInit.get());
+ S.DiscardMisalignedMemberAddress(Step->Type.getTypePtr(), Init);
CurInit = CurInitExprRes;
@@ -9166,10 +8113,11 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_CAssignment: {
QualType SourceType = CurInit.get()->getType();
+ Expr *Init = CurInit.get();
// Save off the initial CurInit in case we need to emit a diagnostic
- ExprResult InitialCurInit = CurInit;
- ExprResult Result = CurInit;
+ ExprResult InitialCurInit = Init;
+ ExprResult Result = Init;
Sema::AssignConvertType ConvTy =
S.CheckSingleAssignmentConstraints(Step->Type, Result, true,
Entity.getKind() == InitializedEntity::EK_Parameter_CF_Audited);
@@ -9188,6 +8136,23 @@ ExprResult InitializationSequence::Perform(Sema &S,
return ExprError();
CurInit = CurInitExprRes;
+ if (S.getLangOpts().C23 && initializingConstexprVariable(Entity)) {
+ CheckC23ConstexprInitConversion(S, SourceType, Entity.getType(),
+ CurInit.get());
+
+ // C23 6.7.1p6: If an object or subobject declared with storage-class
+ // specifier constexpr has pointer, integer, or arithmetic type, any
+ // explicit initializer value for it shall be null, an integer
+ // constant expression, or an arithmetic constant expression,
+ // respectively.
+ Expr::EvalResult ER;
+ if (Entity.getType()->getAs<PointerType>() &&
+ CurInit.get()->EvaluateAsRValue(ER, S.Context) &&
+ !ER.Val.isNullPointer()) {
+ S.Diag(Kind.getLocation(), diag::err_c23_constexpr_pointer_not_null);
+ }
+ }
+
bool Complained;
if (S.DiagnoseAssignmentResult(ConvTy, Kind.getLocation(),
Step->Type, SourceType,
@@ -9205,7 +8170,9 @@ ExprResult InitializationSequence::Perform(Sema &S,
QualType Ty = Step->Type;
bool UpdateType = ResultType && Entity.getType()->isIncompleteArrayType();
CheckStringInit(CurInit.get(), UpdateType ? *ResultType : Ty,
- S.Context.getAsArrayType(Ty), S);
+ S.Context.getAsArrayType(Ty), S,
+ S.getLangOpts().C23 &&
+ initializingConstexprVariable(Entity));
break;
}
@@ -9295,6 +8262,57 @@ ExprResult InitializationSequence::Perform(Sema &S,
// Wrap it in a construction of a std::initializer_list<T>.
CurInit = new (S.Context) CXXStdInitializerListExpr(Step->Type, MTE);
+ if (!Step->Type->isDependentType()) {
+ QualType ElementType;
+ [[maybe_unused]] bool IsStdInitializerList =
+ S.isStdInitializerList(Step->Type, &ElementType);
+ assert(IsStdInitializerList &&
+ "StdInitializerList step to non-std::initializer_list");
+ const CXXRecordDecl *Record =
+ Step->Type->getAsCXXRecordDecl()->getDefinition();
+ assert(Record && Record->isCompleteDefinition() &&
+ "std::initializer_list should have already be "
+ "complete/instantiated by this point");
+
+ auto InvalidType = [&] {
+ S.Diag(Record->getLocation(),
+ diag::err_std_initializer_list_malformed)
+ << Step->Type.getUnqualifiedType();
+ return ExprError();
+ };
+
+ if (Record->isUnion() || Record->getNumBases() != 0 ||
+ Record->isPolymorphic())
+ return InvalidType();
+
+ RecordDecl::field_iterator Field = Record->field_begin();
+ if (Field == Record->field_end())
+ return InvalidType();
+
+ // Start pointer
+ if (!Field->getType()->isPointerType() ||
+ !S.Context.hasSameType(Field->getType()->getPointeeType(),
+ ElementType.withConst()))
+ return InvalidType();
+
+ if (++Field == Record->field_end())
+ return InvalidType();
+
+ // Size or end pointer
+ if (const auto *PT = Field->getType()->getAs<PointerType>()) {
+ if (!S.Context.hasSameType(PT->getPointeeType(),
+ ElementType.withConst()))
+ return InvalidType();
+ } else {
+ if (Field->isBitField() ||
+ !S.Context.hasSameType(Field->getType(), S.Context.getSizeType()))
+ return InvalidType();
+ }
+
+ if (++Field != Record->field_end())
+ return InvalidType();
+ }
+
// Bind the result, in case the library has given initializer_list a
// non-trivial destructor.
if (shouldBindAsTemporary(Entity))
@@ -9456,7 +8474,7 @@ static bool DiagnoseUninitializedReference(Sema &S, SourceLocation Loc,
return false;
for (const auto *FI : RD->fields()) {
- if (FI->isUnnamedBitfield())
+ if (FI->isUnnamedBitField())
continue;
if (DiagnoseUninitializedReference(S, FI->getLocation(), FI->getType())) {
@@ -9490,12 +8508,12 @@ static void emitBadConversionNotes(Sema &S, const InitializedEntity &entity,
// Emit a possible note about the conversion failing because the
// operand is a message send with a related result type.
- S.EmitRelatedResultTypeNote(op);
+ S.ObjC().EmitRelatedResultTypeNote(op);
// Emit a possible note about a return failing because we're
// expecting a related result type.
if (entity.getKind() == InitializedEntity::EK_Result)
- S.EmitRelatedResultTypeNoteForReturn(destType);
+ S.ObjC().EmitRelatedResultTypeNoteForReturn(destType);
}
QualType fromType = op->getType();
QualType fromPointeeType = fromType.getCanonicalType()->getPointeeType();
@@ -9556,6 +8574,8 @@ bool InitializationSequence::Diagnose(Sema &S,
if (!Failed())
return false;
+ QualType DestType = Entity.getType();
+
// When we want to diagnose only one element of a braced-init-list,
// we need to factor it out.
Expr *OnlyArg;
@@ -9565,11 +8585,21 @@ bool InitializationSequence::Diagnose(Sema &S,
OnlyArg = List->getInit(0);
else
OnlyArg = Args[0];
+
+ if (OnlyArg->getType() == S.Context.OverloadTy) {
+ DeclAccessPair Found;
+ if (FunctionDecl *FD = S.ResolveAddressOfOverloadedFunction(
+ OnlyArg, DestType.getNonReferenceType(), /*Complain=*/false,
+ Found)) {
+ if (Expr *Resolved =
+ S.FixOverloadedFunctionReference(OnlyArg, Found, FD).get())
+ OnlyArg = Resolved;
+ }
+ }
}
else
OnlyArg = nullptr;
- QualType DestType = Entity.getType();
switch (Failure) {
case FK_TooManyInitsForReference:
// FIXME: Customize for the initialized entity?
@@ -9687,12 +8717,15 @@ bool InitializationSequence::Diagnose(Sema &S,
break;
}
case OR_Deleted: {
- S.Diag(Kind.getLocation(), diag::err_typecheck_deleted_function)
- << OnlyArg->getType() << DestType.getNonReferenceType()
- << Args[0]->getSourceRange();
OverloadCandidateSet::iterator Best;
OverloadingResult Ovl
= FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best);
+
+ StringLiteral *Msg = Best->Function->getDeletedMessage();
+ S.Diag(Kind.getLocation(), diag::err_typecheck_deleted_function)
+ << OnlyArg->getType() << DestType.getNonReferenceType()
+ << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef())
+ << Args[0]->getSourceRange();
if (Ovl == OR_Deleted) {
S.NoteDeletedFunction(Best->Function);
} else {
@@ -9948,11 +8981,15 @@ bool InitializationSequence::Diagnose(Sema &S,
// implicit.
if (S.isImplicitlyDeleted(Best->Function))
S.Diag(Kind.getLocation(), diag::err_ovl_deleted_special_init)
- << S.getSpecialMember(cast<CXXMethodDecl>(Best->Function))
- << DestType << ArgsRange;
- else
- S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
+ << llvm::to_underlying(
+ S.getSpecialMember(cast<CXXMethodDecl>(Best->Function)))
<< DestType << ArgsRange;
+ else {
+ StringLiteral *Msg = Best->Function->getDeletedMessage();
+ S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
+ << DestType << (Msg != nullptr)
+ << (Msg ? Msg->getString() : StringRef()) << ArgsRange;
+ }
S.NoteDeletedFunction(Best->Function);
break;
@@ -10494,6 +9531,69 @@ static void DiagnoseNarrowingInInitList(Sema &S,
S.getLocForEndOfToken(PostInit->getEndLoc()), ")");
}
+static void CheckC23ConstexprInitConversion(Sema &S, QualType FromType,
+ QualType ToType, Expr *Init) {
+ assert(S.getLangOpts().C23);
+ ImplicitConversionSequence ICS = S.TryImplicitConversion(
+ Init->IgnoreParenImpCasts(), ToType, /*SuppressUserConversions*/ false,
+ Sema::AllowedExplicit::None,
+ /*InOverloadResolution*/ false,
+ /*CStyle*/ false,
+ /*AllowObjCWritebackConversion=*/false);
+
+ if (!ICS.isStandard())
+ return;
+
+ APValue Value;
+ QualType PreNarrowingType;
+ // Reuse C++ narrowing check.
+ switch (ICS.Standard.getNarrowingKind(
+ S.Context, Init, Value, PreNarrowingType,
+ /*IgnoreFloatToIntegralConversion*/ false)) {
+ // The value doesn't fit.
+ case NK_Constant_Narrowing:
+ S.Diag(Init->getBeginLoc(), diag::err_c23_constexpr_init_not_representable)
+ << Value.getAsString(S.Context, PreNarrowingType) << ToType;
+ return;
+
+ // Conversion to a narrower type.
+ case NK_Type_Narrowing:
+ S.Diag(Init->getBeginLoc(), diag::err_c23_constexpr_init_type_mismatch)
+ << ToType << FromType;
+ return;
+
+ // Since we only reuse narrowing check for C23 constexpr variables here, we're
+ // not really interested in these cases.
+ case NK_Dependent_Narrowing:
+ case NK_Variable_Narrowing:
+ case NK_Not_Narrowing:
+ return;
+ }
+ llvm_unreachable("unhandled case in switch");
+}
+
+static void CheckC23ConstexprInitStringLiteral(const StringLiteral *SE,
+ Sema &SemaRef, QualType &TT) {
+ assert(SemaRef.getLangOpts().C23);
+ // character that string literal contains fits into TT - target type.
+ const ArrayType *AT = SemaRef.Context.getAsArrayType(TT);
+ QualType CharType = AT->getElementType();
+ uint32_t BitWidth = SemaRef.Context.getTypeSize(CharType);
+ bool isUnsigned = CharType->isUnsignedIntegerType();
+ llvm::APSInt Value(BitWidth, isUnsigned);
+ for (unsigned I = 0, N = SE->getLength(); I != N; ++I) {
+ int64_t C = SE->getCodeUnitS(I, SemaRef.Context.getCharWidth());
+ Value = C;
+ if (Value != C) {
+ SemaRef.Diag(SemaRef.getLocationOfStringLiteralByte(SE, I),
+ diag::err_c23_constexpr_init_not_representable)
+ << C << CharType;
+ return;
+ }
+ }
+ return;
+}
+
//===----------------------------------------------------------------------===//
// Initialization helper functions
//===----------------------------------------------------------------------===//
@@ -10594,13 +9694,40 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
if (TemplateName.isDependent())
return SubstAutoTypeDependent(TSInfo->getType());
- // We can only perform deduction for class templates.
+ // We can only perform deduction for class templates or alias templates.
auto *Template =
dyn_cast_or_null<ClassTemplateDecl>(TemplateName.getAsTemplateDecl());
+ TemplateDecl *LookupTemplateDecl = Template;
+ if (!Template) {
+ if (auto *AliasTemplate = dyn_cast_or_null<TypeAliasTemplateDecl>(
+ TemplateName.getAsTemplateDecl())) {
+ Diag(Kind.getLocation(),
+ diag::warn_cxx17_compat_ctad_for_alias_templates);
+ LookupTemplateDecl = AliasTemplate;
+ auto UnderlyingType = AliasTemplate->getTemplatedDecl()
+ ->getUnderlyingType()
+ .getCanonicalType();
+ // C++ [over.match.class.deduct#3]: ..., the defining-type-id of A must be
+ // of the form
+ // [typename] [nested-name-specifier] [template] simple-template-id
+ if (const auto *TST =
+ UnderlyingType->getAs<TemplateSpecializationType>()) {
+ Template = dyn_cast_or_null<ClassTemplateDecl>(
+ TST->getTemplateName().getAsTemplateDecl());
+ } else if (const auto *RT = UnderlyingType->getAs<RecordType>()) {
+ // Cases where template arguments in the RHS of the alias are not
+ // dependent. e.g.
+ // using AliasFoo = Foo<bool>;
+ if (const auto *CTSD = llvm::dyn_cast<ClassTemplateSpecializationDecl>(
+ RT->getAsCXXRecordDecl()))
+ Template = CTSD->getSpecializedTemplate();
+ }
+ }
+ }
if (!Template) {
Diag(Kind.getLocation(),
- diag::err_deduced_non_class_template_specialization_type)
- << (int)getTemplateNameKindForDiagnostics(TemplateName) << TemplateName;
+ diag::err_deduced_non_class_or_alias_template_specialization_type)
+ << (int)getTemplateNameKindForDiagnostics(TemplateName) << TemplateName;
if (auto *TD = TemplateName.getAsTemplateDecl())
NoteTemplateLocation(*TD);
return QualType();
@@ -10617,8 +9744,6 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// FIXME: Perform "exact type" matching first, per CWG discussion?
// Or implement this via an implied 'T(T) -> T' deduction guide?
- // FIXME: Do we need/want a std::initializer_list<T> special case?
-
// Look up deduction guides, including those synthesized from constructors.
//
// C++1z [over.match.class.deduct]p1:
@@ -10627,10 +9752,10 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// template-name, a function template [...]
// - For each deduction-guide, a function or function template [...]
DeclarationNameInfo NameInfo(
- Context.DeclarationNames.getCXXDeductionGuideName(Template),
+ Context.DeclarationNames.getCXXDeductionGuideName(LookupTemplateDecl),
TSInfo->getTypeLoc().getEndLoc());
LookupResult Guides(*this, NameInfo, LookupOrdinaryName);
- LookupQualifiedName(Guides, Template->getDeclContext());
+ LookupQualifiedName(Guides, LookupTemplateDecl->getDeclContext());
// FIXME: Do not diagnose inaccessible deduction guides. The standard isn't
// clear on this, but they're not found by name so access does not apply.
@@ -10699,11 +9824,6 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// C++ [over.best.ics]p4:
// When [...] the constructor [...] is a candidate by
// - [over.match.copy] (in all cases)
- // FIXME: The "second phase of [over.match.list] case can also
- // theoretically happen here, but it's not clear whether we can
- // ever have a parameter of the right type.
- bool SuppressUserConversions = Kind.isCopyInit();
-
if (TD) {
SmallVector<Expr *, 8> TmpInits;
for (Expr *E : Inits)
@@ -10713,12 +9833,12 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
TmpInits.push_back(E);
AddTemplateOverloadCandidate(
TD, FoundDecl, /*ExplicitArgs=*/nullptr, TmpInits, Candidates,
- SuppressUserConversions,
+ /*SuppressUserConversions=*/false,
/*PartialOverloading=*/false, AllowExplicit, ADLCallKind::NotADL,
/*PO=*/{}, AllowAggregateDeductionCandidate);
} else {
AddOverloadCandidate(GD, FoundDecl, Inits, Candidates,
- SuppressUserConversions,
+ /*SuppressUserConversions=*/false,
/*PartialOverloading=*/false, AllowExplicit);
}
};
@@ -10750,14 +9870,14 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// if e_i is of array type and x_i is a braced-init-list, T_i is an
// rvalue reference to the declared type of e_i and
// C++ [over.match.class.deduct]p1.9:
- // if e_i is of array type and x_i is a bstring-literal, T_i is an
+ // if e_i is of array type and x_i is a string-literal, T_i is an
// lvalue reference to the const-qualified declared type of e_i and
// C++ [over.match.class.deduct]p1.10:
// otherwise, T_i is the declared type of e_i
for (int I = 0, E = ListInit->getNumInits();
I < E && !isa<PackExpansionType>(ElementTypes[I]); ++I)
if (ElementTypes[I]->isArrayType()) {
- if (isa<InitListExpr>(ListInit->getInit(I)))
+ if (isa<InitListExpr, DesignatedInitExpr>(ListInit->getInit(I)))
ElementTypes[I] = Context.getRValueReferenceType(ElementTypes[I]);
else if (isa<StringLiteral>(
ListInit->getInit(I)->IgnoreParenImpCasts()))
@@ -10765,32 +9885,16 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
Context.getLValueReferenceType(ElementTypes[I].withConst());
}
- llvm::FoldingSetNodeID ID;
- ID.AddPointer(Template);
- for (auto &T : ElementTypes)
- T.getCanonicalType().Profile(ID);
- unsigned Hash = ID.ComputeHash();
- if (AggregateDeductionCandidates.count(Hash) == 0) {
- if (FunctionTemplateDecl *TD =
- DeclareImplicitDeductionGuideFromInitList(
- Template, ElementTypes,
- TSInfo->getTypeLoc().getEndLoc())) {
- auto *GD = cast<CXXDeductionGuideDecl>(TD->getTemplatedDecl());
- GD->setDeductionCandidateKind(DeductionCandidate::Aggregate);
- AggregateDeductionCandidates[Hash] = GD;
- addDeductionCandidate(TD, GD, DeclAccessPair::make(TD, AS_public),
- OnlyListConstructors,
- /*AllowAggregateDeductionCandidate=*/true);
- }
- } else {
- CXXDeductionGuideDecl *GD = AggregateDeductionCandidates[Hash];
- FunctionTemplateDecl *TD = GD->getDescribedFunctionTemplate();
- assert(TD && "aggregate deduction candidate is function template");
+ if (FunctionTemplateDecl *TD =
+ DeclareAggregateDeductionGuideFromInitList(
+ LookupTemplateDecl, ElementTypes,
+ TSInfo->getTypeLoc().getEndLoc())) {
+ auto *GD = cast<CXXDeductionGuideDecl>(TD->getTemplatedDecl());
addDeductionCandidate(TD, GD, DeclAccessPair::make(TD, AS_public),
OnlyListConstructors,
/*AllowAggregateDeductionCandidate=*/true);
+ HasAnyDeductionGuide = true;
}
- HasAnyDeductionGuide = true;
}
};
@@ -10910,6 +10014,9 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
}
case OR_Deleted: {
+ // FIXME: There are no tests for this diagnostic, and it doesn't seem
+ // like we ever get here; attempts to trigger this seem to yield a
+ // generic c'all to deleted function' diagnostic instead.
Diag(Kind.getLocation(), diag::err_deduced_class_template_deleted)
<< TemplateName;
NoteDeletedFunction(Best->Function);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
index 5b95bae567b7..809b94bb7412 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
@@ -9,17 +9,20 @@
// This file implements semantic analysis for C++ lambda expressions.
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/SemaLambda.h"
#include "TypeLocBuilder.h"
#include "clang/AST/ASTLambda.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaInternal.h"
-#include "clang/Sema/SemaLambda.h"
+#include "clang/Sema/SemaOpenMP.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLExtras.h"
#include <optional>
@@ -384,30 +387,69 @@ buildTypeForLambdaCallOperator(Sema &S, clang::CXXRecordDecl *Class,
// parameter, if any, of the lambda's function call operator (possibly
// instantiated from a function call operator template) shall be either:
// - the closure type,
-// - class type derived from the closure type, or
+// - class type publicly and unambiguously derived from the closure type, or
// - a reference to a possibly cv-qualified such type.
-void Sema::DiagnoseInvalidExplicitObjectParameterInLambda(
- CXXMethodDecl *Method) {
+bool Sema::DiagnoseInvalidExplicitObjectParameterInLambda(
+ CXXMethodDecl *Method, SourceLocation CallLoc) {
if (!isLambdaCallWithExplicitObjectParameter(Method))
- return;
+ return false;
CXXRecordDecl *RD = Method->getParent();
if (Method->getType()->isDependentType())
- return;
+ return false;
if (RD->isCapturelessLambda())
- return;
- QualType ExplicitObjectParameterType = Method->getParamDecl(0)
- ->getType()
+ return false;
+
+ ParmVarDecl *Param = Method->getParamDecl(0);
+ QualType ExplicitObjectParameterType = Param->getType()
.getNonReferenceType()
.getUnqualifiedType()
.getDesugaredType(getASTContext());
QualType LambdaType = getASTContext().getRecordType(RD);
if (LambdaType == ExplicitObjectParameterType)
- return;
- if (IsDerivedFrom(RD->getLocation(), ExplicitObjectParameterType, LambdaType))
- return;
- Diag(Method->getParamDecl(0)->getLocation(),
- diag::err_invalid_explicit_object_type_in_lambda)
- << ExplicitObjectParameterType;
+ return false;
+
+ // Don't check the same instantiation twice.
+ //
+ // If this call operator is ill-formed, there is no point in issuing
+ // a diagnostic every time it is called because the problem is in the
+ // definition of the derived type, not at the call site.
+ //
+ // FIXME: Move this check to where we instantiate the method? This should
+ // be possible, but the naive approach of just marking the method as invalid
+ // leads to us emitting more diagnostics than we should have to for this case
+ // (1 error here *and* 1 error about there being no matching overload at the
+ // call site). It might be possible to avoid that by also checking if there
+ // is an empty cast path for the method stored in the context (signalling that
+ // we've already diagnosed it) and then just not building the call, but that
+ // doesn't really seem any simpler than diagnosing it at the call site...
+ if (auto It = Context.LambdaCastPaths.find(Method);
+ It != Context.LambdaCastPaths.end())
+ return It->second.empty();
+
+ CXXCastPath &Path = Context.LambdaCastPaths[Method];
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (!IsDerivedFrom(RD->getLocation(), ExplicitObjectParameterType, LambdaType,
+ Paths)) {
+ Diag(Param->getLocation(), diag::err_invalid_explicit_object_type_in_lambda)
+ << ExplicitObjectParameterType;
+ return true;
+ }
+
+ if (Paths.isAmbiguous(LambdaType->getCanonicalTypeUnqualified())) {
+ std::string PathsDisplay = getAmbiguousPathsDisplayString(Paths);
+ Diag(CallLoc, diag::err_explicit_object_lambda_ambiguous_base)
+ << LambdaType << PathsDisplay;
+ return true;
+ }
+
+ if (CheckBaseClassAccess(CallLoc, LambdaType, ExplicitObjectParameterType,
+ Paths.front(),
+ diag::err_explicit_object_lambda_inaccessible_base))
+ return true;
+
+ BuildBasePathArray(Paths, Path);
+ return false;
}
void Sema::handleLambdaNumbering(
@@ -1034,16 +1076,27 @@ void Sema::ActOnLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro,
// be dependent, because there are template parameters in scope.
CXXRecordDecl::LambdaDependencyKind LambdaDependencyKind =
CXXRecordDecl::LDK_Unknown;
- if (LSI->NumExplicitTemplateParams > 0) {
- Scope *TemplateParamScope = CurScope->getTemplateParamParent();
- assert(TemplateParamScope &&
- "Lambda with explicit template param list should establish a "
- "template param scope");
- assert(TemplateParamScope->getParent());
- if (TemplateParamScope->getParent()->getTemplateParamParent() != nullptr)
- LambdaDependencyKind = CXXRecordDecl::LDK_AlwaysDependent;
- } else if (CurScope->getTemplateParamParent() != nullptr) {
+ if (CurScope->getTemplateParamParent() != nullptr) {
LambdaDependencyKind = CXXRecordDecl::LDK_AlwaysDependent;
+ } else if (Scope *P = CurScope->getParent()) {
+ // Given a lambda defined inside a requires expression,
+ //
+ // struct S {
+ // S(auto var) requires requires { [&] -> decltype(var) { }; }
+ // {}
+ // };
+ //
+ // The parameter var is not injected into the function Decl at the point of
+ // parsing lambda. In such scenarios, perceiving it as dependent could
+ // result in the constraint being evaluated, which matches what GCC does.
+ while (P->getEntity() && P->getEntity()->isRequiresExprBody())
+ P = P->getParent();
+ if (P->isFunctionDeclarationScope() &&
+ llvm::any_of(P->decls(), [](Decl *D) {
+ return isa<ParmVarDecl>(D) &&
+ cast<ParmVarDecl>(D)->getType()->isTemplateTypeParmType();
+ }))
+ LambdaDependencyKind = CXXRecordDecl::LDK_AlwaysDependent;
}
CXXRecordDecl *Class = createLambdaClosureType(
@@ -1193,7 +1246,11 @@ void Sema::ActOnLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro,
if (auto *BD = R.getAsSingle<BindingDecl>())
Var = BD;
- else
+ else if (R.getAsSingle<FieldDecl>()) {
+ Diag(C->Loc, diag::err_capture_class_member_does_not_name_variable)
+ << C->Id;
+ continue;
+ } else
Var = R.getAsSingle<VarDecl>();
if (Var && DiagnoseUseOfDecl(Var, C->Loc))
continue;
@@ -1261,7 +1318,6 @@ void Sema::ActOnLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro,
if (C->Init.isUsable()) {
addInitCapture(LSI, cast<VarDecl>(Var), C->Kind == LCK_ByRef);
- PushOnScopeChains(Var, CurScope, false);
} else {
TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef
: TryCapture_ExplicitByVal;
@@ -1323,6 +1379,8 @@ void Sema::ActOnLambdaClosureParameters(
AddTemplateParametersToLambdaCallOperator(LSI->CallOperator, LSI->Lambda,
TemplateParams);
LSI->Lambda->setLambdaIsGeneric(true);
+ LSI->ContainsUnexpandedParameterPack |=
+ TemplateParams->containsUnexpandedParameterPack();
}
LSI->AfterParameterList = true;
}
@@ -1393,11 +1451,11 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// CUDA lambdas get implicit host and device attributes.
if (getLangOpts().CUDA)
- CUDASetLambdaAttrs(Method);
+ CUDA().SetLambdaAttrs(Method);
// OpenMP lambdas might get assumumption attributes.
if (LangOpts.OpenMP)
- ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Method);
+ OpenMP().ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Method);
handleLambdaNumbering(Class, Method);
@@ -2136,7 +2194,7 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
CaptureInits.push_back(Init.get());
if (LangOpts.CUDA)
- CUDACheckLambdaCapture(CallOperator, From);
+ CUDA().CheckLambdaCapture(CallOperator, From);
}
Class->setCaptures(Context, Captures);
@@ -2326,23 +2384,37 @@ Sema::LambdaScopeForCallOperatorInstantiationRAII::
SemaRef.RebuildLambdaScopeInfo(cast<CXXMethodDecl>(FD));
- FunctionDecl *Pattern = getPatternFunctionDecl(FD);
- if (Pattern) {
- SemaRef.addInstantiatedCapturesToScope(FD, Pattern, Scope, MLTAL);
+ FunctionDecl *FDPattern = getPatternFunctionDecl(FD);
+ if (!FDPattern)
+ return;
- FunctionDecl *ParentFD = FD;
- while (ShouldAddDeclsFromParentScope) {
+ SemaRef.addInstantiatedCapturesToScope(FD, FDPattern, Scope, MLTAL);
- ParentFD =
- dyn_cast<FunctionDecl>(getLambdaAwareParentOfDeclContext(ParentFD));
- Pattern =
- dyn_cast<FunctionDecl>(getLambdaAwareParentOfDeclContext(Pattern));
+ if (!ShouldAddDeclsFromParentScope)
+ return;
- if (!FD || !Pattern)
- break;
+ llvm::SmallVector<std::pair<FunctionDecl *, FunctionDecl *>, 4>
+ ParentInstantiations;
+ while (true) {
+ FDPattern =
+ dyn_cast<FunctionDecl>(getLambdaAwareParentOfDeclContext(FDPattern));
+ FD = dyn_cast<FunctionDecl>(getLambdaAwareParentOfDeclContext(FD));
- SemaRef.addInstantiatedParametersToScope(ParentFD, Pattern, Scope, MLTAL);
- SemaRef.addInstantiatedLocalVarsToScope(ParentFD, Pattern, Scope);
- }
+ if (!FDPattern || !FD)
+ break;
+
+ ParentInstantiations.emplace_back(FDPattern, FD);
+ }
+
+ // Add instantiated parameters and local vars to scopes, starting from the
+ // outermost lambda to the innermost lambda. This ordering ensures that
+ // parameters in inner lambdas can correctly depend on those defined
+ // in outer lambdas, e.g. auto L = [](auto... x) {
+ // return [](decltype(x)... y) { }; // `y` depends on `x`
+ // };
+
+ for (const auto &[FDPattern, FD] : llvm::reverse(ParentInstantiations)) {
+ SemaRef.addInstantiatedParametersToScope(FD, FDPattern, Scope, MLTAL);
+ SemaRef.addInstantiatedLocalVarsToScope(FD, FDPattern, Scope);
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
index 02b1a045df44..d3d4bf27ae72 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
@@ -34,9 +34,11 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaRISCV.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/ADT/edit_distance.h"
@@ -568,7 +570,7 @@ void LookupResult::resolveKind() {
// For non-type declarations, check for a prior lookup result naming this
// canonical declaration.
- if (!D->isPlaceholderVar(getSema().getLangOpts()) && !ExistingI) {
+ if (!ExistingI) {
auto UniqueResult = Unique.insert(std::make_pair(D, I));
if (!UniqueResult.second) {
// We've seen this entity before.
@@ -912,8 +914,6 @@ static void InsertOCLBuiltinDeclarationsFromTable(Sema &S, LookupResult &LR,
LR.resolveKind();
}
-/// Lookup a builtin function, when name lookup would otherwise
-/// fail.
bool Sema::LookupBuiltin(LookupResult &R) {
Sema::LookupNameKind NameKind = R.getLookupKind();
@@ -944,13 +944,13 @@ bool Sema::LookupBuiltin(LookupResult &R) {
}
}
- if (DeclareRISCVVBuiltins || DeclareRISCVSiFiveVectorBuiltins) {
- if (!RVIntrinsicManager)
- RVIntrinsicManager = CreateRISCVIntrinsicManager(*this);
+ if (RISCV().DeclareRVVBuiltins || RISCV().DeclareSiFiveVectorBuiltins) {
+ if (!RISCV().IntrinsicManager)
+ RISCV().IntrinsicManager = CreateRISCVIntrinsicManager(*this);
- RVIntrinsicManager->InitIntrinsicList();
+ RISCV().IntrinsicManager->InitIntrinsicList();
- if (RVIntrinsicManager->CreateIntrinsicIfFound(R, II, PP))
+ if (RISCV().IntrinsicManager->CreateIntrinsicIfFound(R, II, PP))
return true;
}
@@ -1200,8 +1200,8 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
// Perform template argument deduction against the type that we would
// expect the function to have.
if (R.getSema().DeduceTemplateArguments(ConvTemplate, nullptr, ExpectedType,
- Specialization, Info)
- == Sema::TDK_Success) {
+ Specialization, Info) ==
+ TemplateDeductionResult::Success) {
R.addDecl(Specialization);
Found = true;
}
@@ -1282,6 +1282,18 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
DeclareImplicitMemberFunctionsWithName(*this, Name, R.getNameLoc(), DC);
}
+ // C++23 [temp.dep.general]p2:
+ // The component name of an unqualified-id is dependent if
+ // - it is a conversion-function-id whose conversion-type-id
+ // is dependent, or
+ // - it is operator= and the current class is a templated entity, or
+ // - the unqualified-id is the postfix-expression in a dependent call.
+ if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName &&
+ Name.getCXXNameType()->isDependentType()) {
+ R.setNotFoundInCurrentInstantiation();
+ return false;
+ }
+
// Implicitly declare member functions with the name we're looking for, if in
// fact we are in a scope where it matters.
@@ -1580,7 +1592,6 @@ llvm::DenseSet<Module*> &Sema::getLookupModules() {
return LookupModulesCache;
}
-/// Determine if we could use all the declarations in the module.
bool Sema::isUsableModule(const Module *M) {
assert(M && "We shouldn't check nullness for module here");
// Return quickly if we cached the result.
@@ -1592,22 +1603,32 @@ bool Sema::isUsableModule(const Module *M) {
// [module.global.frag]p1:
// The global module fragment can be used to provide declarations that are
// attached to the global module and usable within the module unit.
- if (M == TheGlobalModuleFragment || M == TheImplicitGlobalModuleFragment ||
- // If M is the module we're parsing, it should be usable. This covers the
- // private module fragment. The private module fragment is usable only if
- // it is within the current module unit. And it must be the current
- // parsing module unit if it is within the current module unit according
- // to the grammar of the private module fragment. NOTE: This is covered by
- // the following condition. The intention of the check is to avoid string
- // comparison as much as possible.
- M == getCurrentModule() ||
- // The module unit which is in the same module with the current module
- // unit is usable.
- //
- // FIXME: Here we judge if they are in the same module by comparing the
- // string. Is there any better solution?
- M->getPrimaryModuleInterfaceName() ==
- llvm::StringRef(getLangOpts().CurrentModule).split(':').first) {
+ if (M == TheGlobalModuleFragment || M == TheImplicitGlobalModuleFragment) {
+ UsableModuleUnitsCache.insert(M);
+ return true;
+ }
+
+ // Otherwise, the global module fragment from other translation unit is not
+ // directly usable.
+ if (M->isGlobalModule())
+ return false;
+
+ Module *Current = getCurrentModule();
+
+ // If we're not parsing a module, we can't use all the declarations from
+ // another module easily.
+ if (!Current)
+ return false;
+
+ // If M is the module we're parsing or M and the current module unit lives in
+ // the same module, M should be usable.
+ //
+ // Note: It should be fine to search the vector `ModuleScopes` linearly since
+ // it should be generally small enough. There should be rare module fragments
+ // in a named module unit.
+ if (llvm::count_if(ModuleScopes,
+ [&M](const ModuleScope &MS) { return MS.Module == M; }) ||
+ getASTContext().isInSameModule(M, Current)) {
UsableModuleUnitsCache.insert(M);
return true;
}
@@ -2151,34 +2172,6 @@ bool LookupResult::isAvailableForLookup(Sema &SemaRef, NamedDecl *ND) {
return false;
}
-/// Perform unqualified name lookup starting from a given
-/// scope.
-///
-/// Unqualified name lookup (C++ [basic.lookup.unqual], C99 6.2.1) is
-/// used to find names within the current scope. For example, 'x' in
-/// @code
-/// int x;
-/// int f() {
-/// return x; // unqualified name look finds 'x' in the global scope
-/// }
-/// @endcode
-///
-/// Different lookup criteria can find different names. For example, a
-/// particular scope can have both a struct and a function of the same
-/// name, and each can be found by certain lookup criteria. For more
-/// information about lookup criteria, see the documentation for the
-/// class LookupCriteria.
-///
-/// @param S The scope from which unqualified name lookup will
-/// begin. If the lookup criteria permits, name lookup may also search
-/// in the parent scopes.
-///
-/// @param [in,out] R Specifies the lookup to perform (e.g., the name to
-/// look up and the lookup kind), and is updated with the results of lookup
-/// including zero or more declarations and possibly additional information
-/// used to diagnose ambiguities.
-///
-/// @returns \c true if lookup succeeded and false otherwise.
bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation,
bool ForceNoCPlusPlus) {
DeclarationName Name = R.getLookupName();
@@ -2332,7 +2325,7 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
// We have already looked into the initial namespace; seed the queue
// with its using-children.
for (auto *I : StartDC->using_directives()) {
- NamespaceDecl *ND = I->getNominatedNamespace()->getOriginalNamespace();
+ NamespaceDecl *ND = I->getNominatedNamespace()->getFirstDecl();
if (S.isVisible(I) && Visited.insert(ND).second)
Queue.push_back(ND);
}
@@ -2396,28 +2389,6 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
return Found;
}
-/// Perform qualified name lookup into a given context.
-///
-/// Qualified name lookup (C++ [basic.lookup.qual]) is used to find
-/// names when the context of those names is explicit specified, e.g.,
-/// "std::vector" or "x->member", or as part of unqualified name lookup.
-///
-/// Different lookup criteria can find different names. For example, a
-/// particular scope can have both a struct and a function of the same
-/// name, and each can be found by certain lookup criteria. For more
-/// information about lookup criteria, see the documentation for the
-/// class LookupCriteria.
-///
-/// \param R captures both the lookup criteria and any lookup results found.
-///
-/// \param LookupCtx The context in which qualified name lookup will
-/// search. If the lookup criteria permits, name lookup may also search
-/// in the parent contexts or (for C++ classes) base classes.
-///
-/// \param InUnqualifiedLookup true if this is qualified name lookup that
-/// occurs as part of unqualified name lookup.
-///
-/// \returns true if lookup succeeded, false if it failed.
bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup) {
assert(LookupCtx && "Sema::LookupQualifiedName requires a lookup context");
@@ -2445,10 +2416,31 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
}
} QL(LookupCtx);
+ CXXRecordDecl *LookupRec = dyn_cast<CXXRecordDecl>(LookupCtx);
+ // FIXME: Per [temp.dep.general]p2, an unqualified name is also dependent
+ // if it's a dependent conversion-function-id or operator= where the current
+ // class is a templated entity. This should be handled in LookupName.
+ if (!InUnqualifiedLookup && !R.isForRedeclaration()) {
+ // C++23 [temp.dep.type]p5:
+ // A qualified name is dependent if
+ // - it is a conversion-function-id whose conversion-type-id
+ // is dependent, or
+ // - [...]
+ // - its lookup context is the current instantiation and it
+ // is operator=, or
+ // - [...]
+ if (DeclarationName Name = R.getLookupName();
+ Name.getNameKind() == DeclarationName::CXXConversionFunctionName &&
+ Name.getCXXNameType()->isDependentType()) {
+ R.setNotFoundInCurrentInstantiation();
+ return false;
+ }
+ }
+
if (LookupDirect(*this, R, LookupCtx)) {
R.resolveKind();
- if (isa<CXXRecordDecl>(LookupCtx))
- R.setNamingClass(cast<CXXRecordDecl>(LookupCtx));
+ if (LookupRec)
+ R.setNamingClass(LookupRec);
return true;
}
@@ -2470,7 +2462,6 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// If this isn't a C++ class, we aren't allowed to look into base
// classes, we're done.
- CXXRecordDecl *LookupRec = dyn_cast<CXXRecordDecl>(LookupCtx);
if (!LookupRec || !LookupRec->getDefinition())
return false;
@@ -2672,21 +2663,6 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
return true;
}
-/// Performs qualified name lookup or special type of lookup for
-/// "__super::" scope specifier.
-///
-/// This routine is a convenience overload meant to be called from contexts
-/// that need to perform a qualified name lookup with an optional C++ scope
-/// specifier that might require special kind of lookup.
-///
-/// \param R captures both the lookup criteria and any lookup results found.
-///
-/// \param LookupCtx The context in which qualified name lookup will
-/// search.
-///
-/// \param SS An optional C++ scope-specifier.
-///
-/// \returns true if lookup succeeded, false if it failed.
bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS) {
auto *NNS = SS.getScopeRep();
@@ -2697,69 +2673,60 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
return LookupQualifiedName(R, LookupCtx);
}
-/// Performs name lookup for a name that was parsed in the
-/// source code, and may contain a C++ scope specifier.
-///
-/// This routine is a convenience routine meant to be called from
-/// contexts that receive a name and an optional C++ scope specifier
-/// (e.g., "N::M::x"). It will then perform either qualified or
-/// unqualified name lookup (with LookupQualifiedName or LookupName,
-/// respectively) on the given name and return those results. It will
-/// perform a special type of lookup for "__super::" scope specifier.
-///
-/// @param S The scope from which unqualified name lookup will
-/// begin.
-///
-/// @param SS An optional C++ scope-specifier, e.g., "::N::M".
-///
-/// @param EnteringContext Indicates whether we are going to enter the
-/// context of the scope-specifier SS (if present).
-///
-/// @returns True if any decls were found (but possibly ambiguous)
bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
- bool AllowBuiltinCreation, bool EnteringContext) {
- if (SS && SS->isInvalid()) {
- // When the scope specifier is invalid, don't even look for
- // anything.
+ QualType ObjectType, bool AllowBuiltinCreation,
+ bool EnteringContext) {
+ // When the scope specifier is invalid, don't even look for anything.
+ if (SS && SS->isInvalid())
return false;
- }
-
- if (SS && SS->isSet()) {
- NestedNameSpecifier *NNS = SS->getScopeRep();
- if (NNS->getKind() == NestedNameSpecifier::Super)
- return LookupInSuper(R, NNS->getAsRecordDecl());
- if (DeclContext *DC = computeDeclContext(*SS, EnteringContext)) {
- // We have resolved the scope specifier to a particular declaration
- // contex, and will perform name lookup in that context.
+ // Determine where to perform name lookup
+ DeclContext *DC = nullptr;
+ bool IsDependent = false;
+ if (!ObjectType.isNull()) {
+ // This nested-name-specifier occurs in a member access expression, e.g.,
+ // x->B::f, and we are looking into the type of the object.
+ assert((!SS || SS->isEmpty()) &&
+ "ObjectType and scope specifier cannot coexist");
+ DC = computeDeclContext(ObjectType);
+ IsDependent = !DC && ObjectType->isDependentType();
+ assert(((!DC && ObjectType->isDependentType()) ||
+ !ObjectType->isIncompleteType() || !ObjectType->getAs<TagType>() ||
+ ObjectType->castAs<TagType>()->isBeingDefined()) &&
+ "Caller should have completed object type");
+ } else if (SS && SS->isNotEmpty()) {
+ // This nested-name-specifier occurs after another nested-name-specifier,
+ // so long into the context associated with the prior nested-name-specifier.
+ if ((DC = computeDeclContext(*SS, EnteringContext))) {
+ // The declaration context must be complete.
if (!DC->isDependentContext() && RequireCompleteDeclContext(*SS, DC))
return false;
-
R.setContextRange(SS->getRange());
- return LookupQualifiedName(R, DC);
+ // FIXME: '__super' lookup semantics could be implemented by a
+ // LookupResult::isSuperLookup flag which skips the initial search of
+ // the lookup context in LookupQualified.
+ if (NestedNameSpecifier *NNS = SS->getScopeRep();
+ NNS->getKind() == NestedNameSpecifier::Super)
+ return LookupInSuper(R, NNS->getAsRecordDecl());
}
+ IsDependent = !DC && isDependentScopeSpecifier(*SS);
+ } else {
+ // Perform unqualified name lookup starting in the given scope.
+ return LookupName(R, S, AllowBuiltinCreation);
+ }
+ // If we were able to compute a declaration context, perform qualified name
+ // lookup in that context.
+ if (DC)
+ return LookupQualifiedName(R, DC);
+ else if (IsDependent)
// We could not resolve the scope specified to a specific declaration
// context, which means that SS refers to an unknown specialization.
// Name lookup can't find anything in this case.
R.setNotFoundInCurrentInstantiation();
- R.setContextRange(SS->getRange());
- return false;
- }
-
- // Perform unqualified name lookup starting in the given scope.
- return LookupName(R, S, AllowBuiltinCreation);
+ return false;
}
-/// Perform qualified name lookup into all base classes of the given
-/// class.
-///
-/// \param R captures both the lookup criteria and any lookup results found.
-///
-/// \param Class The context in which qualified name lookup will
-/// search. Name lookup will search in all base classes merging the results.
-///
-/// @returns True if any decls were found (but possibly ambiguous)
bool Sema::LookupInSuper(LookupResult &R, CXXRecordDecl *Class) {
// The access-control rules we use here are essentially the rules for
// doing a lookup in Class that just magically skipped the direct
@@ -2789,10 +2756,6 @@ bool Sema::LookupInSuper(LookupResult &R, CXXRecordDecl *Class) {
return !R.empty();
}
-/// Produce a diagnostic describing the ambiguity that resulted
-/// from name lookup.
-///
-/// \param Result The result of the ambiguous lookup to be diagnosed.
void Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
assert(Result.isAmbiguous() && "Lookup result must be ambiguous");
@@ -3243,6 +3206,10 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
case Type::Pipe:
T = cast<PipeType>(T)->getElementType().getTypePtr();
continue;
+
+ // Array parameter types are treated as fundamental types.
+ case Type::ArrayParameter:
+ break;
}
if (Queue.empty())
@@ -3251,13 +3218,6 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
}
}
-/// Find the associated classes and namespaces for
-/// argument-dependent lookup for a call with the given set of
-/// arguments.
-///
-/// This routine computes the sets of associated classes and associated
-/// namespaces searched by argument-dependent lookup
-/// (C++ [basic.lookup.argdep]) for a given set of arguments.
void Sema::FindAssociatedClassesAndNamespaces(
SourceLocation InstantiationLoc, ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
@@ -3312,15 +3272,6 @@ NamedDecl *Sema::LookupSingleName(Scope *S, DeclarationName Name,
return R.getAsSingle<NamedDecl>();
}
-/// Find the protocol with the given name, if any.
-ObjCProtocolDecl *Sema::LookupProtocol(IdentifierInfo *II,
- SourceLocation IdLoc,
- RedeclarationKind Redecl) {
- Decl *D = LookupSingleName(TUScope, II, IdLoc,
- LookupObjCProtocolName, Redecl);
- return cast_or_null<ObjCProtocolDecl>(D);
-}
-
void Sema::LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions) {
// C++ [over.match.oper]p3:
@@ -3337,21 +3288,20 @@ void Sema::LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
Functions.append(Operators.begin(), Operators.end());
}
-Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
- CXXSpecialMember SM,
- bool ConstArg,
- bool VolatileArg,
- bool RValueThis,
- bool ConstThis,
- bool VolatileThis) {
+Sema::SpecialMemberOverloadResult
+Sema::LookupSpecialMember(CXXRecordDecl *RD, CXXSpecialMemberKind SM,
+ bool ConstArg, bool VolatileArg, bool RValueThis,
+ bool ConstThis, bool VolatileThis) {
assert(CanDeclareSpecialMemberFunction(RD) &&
"doing special member lookup into record that isn't fully complete");
RD = RD->getDefinition();
if (RValueThis || ConstThis || VolatileThis)
- assert((SM == CXXCopyAssignment || SM == CXXMoveAssignment) &&
+ assert((SM == CXXSpecialMemberKind::CopyAssignment ||
+ SM == CXXSpecialMemberKind::MoveAssignment) &&
"constructors and destructors always have unqualified lvalue this");
if (ConstArg || VolatileArg)
- assert((SM != CXXDefaultConstructor && SM != CXXDestructor) &&
+ assert((SM != CXXSpecialMemberKind::DefaultConstructor &&
+ SM != CXXSpecialMemberKind::Destructor) &&
"parameter-less special members can't have qualified arguments");
// FIXME: Get the caller to pass in a location for the lookup.
@@ -3359,7 +3309,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
llvm::FoldingSetNodeID ID;
ID.AddPointer(RD);
- ID.AddInteger(SM);
+ ID.AddInteger(llvm::to_underlying(SM));
ID.AddInteger(ConstArg);
ID.AddInteger(VolatileArg);
ID.AddInteger(RValueThis);
@@ -3378,7 +3328,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
Result = new (Result) SpecialMemberOverloadResultEntry(ID);
SpecialMemberCache.InsertNode(Result, InsertPoint);
- if (SM == CXXDestructor) {
+ if (SM == CXXSpecialMemberKind::Destructor) {
if (RD->needsImplicitDestructor()) {
runWithSufficientStackSpace(RD->getLocation(), [&] {
DeclareImplicitDestructor(RD);
@@ -3402,7 +3352,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
QualType ArgType = CanTy;
ExprValueKind VK = VK_LValue;
- if (SM == CXXDefaultConstructor) {
+ if (SM == CXXSpecialMemberKind::DefaultConstructor) {
Name = Context.DeclarationNames.getCXXConstructorName(CanTy);
NumArgs = 0;
if (RD->needsImplicitDefaultConstructor()) {
@@ -3411,7 +3361,8 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
});
}
} else {
- if (SM == CXXCopyConstructor || SM == CXXMoveConstructor) {
+ if (SM == CXXSpecialMemberKind::CopyConstructor ||
+ SM == CXXSpecialMemberKind::MoveConstructor) {
Name = Context.DeclarationNames.getCXXConstructorName(CanTy);
if (RD->needsImplicitCopyConstructor()) {
runWithSufficientStackSpace(RD->getLocation(), [&] {
@@ -3449,7 +3400,8 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
// Possibly an XValue is actually correct in the case of move, but
// there is no semantic difference for class types in this restricted
// case.
- if (SM == CXXCopyConstructor || SM == CXXCopyAssignment)
+ if (SM == CXXSpecialMemberKind::CopyConstructor ||
+ SM == CXXSpecialMemberKind::CopyAssignment)
VK = VK_LValue;
else
VK = VK_PRValue;
@@ -3457,7 +3409,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
OpaqueValueExpr FakeArg(LookupLoc, ArgType, VK);
- if (SM != CXXDefaultConstructor) {
+ if (SM != CXXSpecialMemberKind::DefaultConstructor) {
NumArgs = 1;
Arg = &FakeArg;
}
@@ -3483,7 +3435,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
// type, rather than because there's some other declared constructor.
// Every class has a copy/move constructor, copy/move assignment, and
// destructor.
- assert(SM == CXXDefaultConstructor &&
+ assert(SM == CXXSpecialMemberKind::DefaultConstructor &&
"lookup for a constructor or assignment operator was empty");
Result->setMethod(nullptr);
Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted);
@@ -3501,7 +3453,8 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
DeclAccessPair Cand = DeclAccessPair::make(CandDecl, AS_public);
auto CtorInfo = getConstructorInfo(Cand);
if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Cand->getUnderlyingDecl())) {
- if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
+ if (SM == CXXSpecialMemberKind::CopyAssignment ||
+ SM == CXXSpecialMemberKind::MoveAssignment)
AddMethodCandidate(M, Cand, RD, ThisTy, Classification,
llvm::ArrayRef(&Arg, NumArgs), OCS, true);
else if (CtorInfo)
@@ -3513,7 +3466,8 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
/*SuppressUserConversions*/ true);
} else if (FunctionTemplateDecl *Tmpl =
dyn_cast<FunctionTemplateDecl>(Cand->getUnderlyingDecl())) {
- if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
+ if (SM == CXXSpecialMemberKind::CopyAssignment ||
+ SM == CXXSpecialMemberKind::MoveAssignment)
AddMethodTemplateCandidate(Tmpl, Cand, RD, nullptr, ThisTy,
Classification,
llvm::ArrayRef(&Arg, NumArgs), OCS, true);
@@ -3556,38 +3510,34 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
return *Result;
}
-/// Look up the default constructor for the given class.
CXXConstructorDecl *Sema::LookupDefaultConstructor(CXXRecordDecl *Class) {
SpecialMemberOverloadResult Result =
- LookupSpecialMember(Class, CXXDefaultConstructor, false, false, false,
- false, false);
+ LookupSpecialMember(Class, CXXSpecialMemberKind::DefaultConstructor,
+ false, false, false, false, false);
return cast_or_null<CXXConstructorDecl>(Result.getMethod());
}
-/// Look up the copying constructor for the given class.
CXXConstructorDecl *Sema::LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals) {
assert(!(Quals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
"non-const, non-volatile qualifiers for copy ctor arg");
- SpecialMemberOverloadResult Result =
- LookupSpecialMember(Class, CXXCopyConstructor, Quals & Qualifiers::Const,
- Quals & Qualifiers::Volatile, false, false, false);
+ SpecialMemberOverloadResult Result = LookupSpecialMember(
+ Class, CXXSpecialMemberKind::CopyConstructor, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, false, false, false);
return cast_or_null<CXXConstructorDecl>(Result.getMethod());
}
-/// Look up the moving constructor for the given class.
CXXConstructorDecl *Sema::LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals) {
- SpecialMemberOverloadResult Result =
- LookupSpecialMember(Class, CXXMoveConstructor, Quals & Qualifiers::Const,
- Quals & Qualifiers::Volatile, false, false, false);
+ SpecialMemberOverloadResult Result = LookupSpecialMember(
+ Class, CXXSpecialMemberKind::MoveConstructor, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, false, false, false);
return cast_or_null<CXXConstructorDecl>(Result.getMethod());
}
-/// Look up the constructors for the given class.
DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
// If the implicit constructors have not yet been declared, do so now.
if (CanDeclareSpecialMemberFunction(Class)) {
@@ -3606,7 +3556,6 @@ DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
return Class->lookup(Name);
}
-/// Look up the copying assignment operator for the given class.
CXXMethodDecl *Sema::LookupCopyingAssignment(CXXRecordDecl *Class,
unsigned Quals, bool RValueThis,
unsigned ThisQuals) {
@@ -3614,50 +3563,35 @@ CXXMethodDecl *Sema::LookupCopyingAssignment(CXXRecordDecl *Class,
"non-const, non-volatile qualifiers for copy assignment arg");
assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
"non-const, non-volatile qualifiers for copy assignment this");
- SpecialMemberOverloadResult Result =
- LookupSpecialMember(Class, CXXCopyAssignment, Quals & Qualifiers::Const,
- Quals & Qualifiers::Volatile, RValueThis,
- ThisQuals & Qualifiers::Const,
- ThisQuals & Qualifiers::Volatile);
+ SpecialMemberOverloadResult Result = LookupSpecialMember(
+ Class, CXXSpecialMemberKind::CopyAssignment, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, RValueThis, ThisQuals & Qualifiers::Const,
+ ThisQuals & Qualifiers::Volatile);
return Result.getMethod();
}
-/// Look up the moving assignment operator for the given class.
CXXMethodDecl *Sema::LookupMovingAssignment(CXXRecordDecl *Class,
unsigned Quals,
bool RValueThis,
unsigned ThisQuals) {
assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
"non-const, non-volatile qualifiers for copy assignment this");
- SpecialMemberOverloadResult Result =
- LookupSpecialMember(Class, CXXMoveAssignment, Quals & Qualifiers::Const,
- Quals & Qualifiers::Volatile, RValueThis,
- ThisQuals & Qualifiers::Const,
- ThisQuals & Qualifiers::Volatile);
+ SpecialMemberOverloadResult Result = LookupSpecialMember(
+ Class, CXXSpecialMemberKind::MoveAssignment, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, RValueThis, ThisQuals & Qualifiers::Const,
+ ThisQuals & Qualifiers::Volatile);
return Result.getMethod();
}
-/// Look for the destructor of the given class.
-///
-/// During semantic analysis, this routine should be used in lieu of
-/// CXXRecordDecl::getDestructor().
-///
-/// \returns The destructor for this class.
CXXDestructorDecl *Sema::LookupDestructor(CXXRecordDecl *Class) {
return cast_or_null<CXXDestructorDecl>(
- LookupSpecialMember(Class, CXXDestructor, false, false, false, false,
- false)
+ LookupSpecialMember(Class, CXXSpecialMemberKind::Destructor, false, false,
+ false, false, false)
.getMethod());
}
-/// LookupLiteralOperator - Determine which literal operator should be used for
-/// a user-defined literal, per C++11 [lex.ext].
-///
-/// Normal overload resolution is not used to select which literal operator to
-/// call for a user-defined literal. Look up the provided literal operator name,
-/// and filter the results to the appropriate set for the given argument types.
Sema::LiteralOperatorLookupResult
Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys, bool AllowRaw,
@@ -4425,10 +4359,6 @@ void Sema::LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
H.lookupVisibleDecls(*this, Ctx, Kind, IncludeGlobalScope);
}
-/// LookupOrCreateLabel - Do a name lookup of a label with the specified name.
-/// If GnuLabelLoc is a valid source location, then this is a definition
-/// of an __label__ label name, otherwise it is a normal label definition
-/// or use.
LabelDecl *Sema::LookupOrCreateLabel(IdentifierInfo *II, SourceLocation Loc,
SourceLocation GnuLabelLoc) {
// Do a lookup to see if we have a label with this name already.
@@ -4443,7 +4373,8 @@ LabelDecl *Sema::LookupOrCreateLabel(IdentifierInfo *II, SourceLocation Loc,
}
// Not a GNU local label.
- Res = LookupSingleName(CurScope, II, Loc, LookupLabel, NotForRedeclaration);
+ Res = LookupSingleName(CurScope, II, Loc, LookupLabel,
+ RedeclarationKind::NotForRedeclaration);
// If we found a label, check to see if it is in the same context as us.
// When in a Block, we don't want to reuse a label in an enclosing function.
if (Res && Res->getDeclContext() != CurContext)
@@ -5011,8 +4942,9 @@ static void LookupPotentialTypoResult(Sema &SemaRef,
return;
}
- SemaRef.LookupParsedName(Res, S, SS, /*AllowBuiltinCreation=*/false,
- EnteringContext);
+ SemaRef.LookupParsedName(Res, S, SS,
+ /*ObjectType=*/QualType(),
+ /*AllowBuiltinCreation=*/false, EnteringContext);
// Fake ivar lookup; this should really be part of
// LookupParsedName.
@@ -5051,7 +4983,7 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
static const char *const CTypeSpecs[] = {
"char", "const", "double", "enum", "float", "int", "long", "short",
"signed", "struct", "union", "unsigned", "void", "volatile",
- "_Complex", "_Imaginary",
+ "_Complex",
// storage-specifiers as well
"extern", "inline", "static", "typedef"
};
@@ -5059,6 +4991,9 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
for (const auto *CTS : CTypeSpecs)
Consumer.addKeywordResult(CTS);
+ if (SemaRef.getLangOpts().C99 && !SemaRef.getLangOpts().C2y)
+ Consumer.addKeywordResult("_Imaginary");
+
if (SemaRef.getLangOpts().C99)
Consumer.addKeywordResult("restrict");
if (SemaRef.getLangOpts().Bool || SemaRef.getLangOpts().CPlusPlus)
@@ -5323,37 +5258,6 @@ std::unique_ptr<TypoCorrectionConsumer> Sema::makeTypoCorrectionConsumer(
return Consumer;
}
-/// Try to "correct" a typo in the source code by finding
-/// visible declarations whose names are similar to the name that was
-/// present in the source code.
-///
-/// \param TypoName the \c DeclarationNameInfo structure that contains
-/// the name that was present in the source code along with its location.
-///
-/// \param LookupKind the name-lookup criteria used to search for the name.
-///
-/// \param S the scope in which name lookup occurs.
-///
-/// \param SS the nested-name-specifier that precedes the name we're
-/// looking for, if present.
-///
-/// \param CCC A CorrectionCandidateCallback object that provides further
-/// validation of typo correction candidates. It also provides flags for
-/// determining the set of keywords permitted.
-///
-/// \param MemberContext if non-NULL, the context in which to look for
-/// a member access expression.
-///
-/// \param EnteringContext whether we're entering the context described by
-/// the nested-name-specifier SS.
-///
-/// \param OPT when non-NULL, the search for visible declarations will
-/// also walk the protocols in the qualified interfaces of \p OPT.
-///
-/// \returns a \c TypoCorrection containing the corrected name if the typo
-/// along with information such as the \c NamedDecl where the corrected name
-/// was declared, and any additional \c NestedNameSpecifier needed to access
-/// it (C++ only). The \c TypoCorrection is empty if there is no correction.
TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
@@ -5451,44 +5355,6 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure && !SecondBestTC);
}
-/// Try to "correct" a typo in the source code by finding
-/// visible declarations whose names are similar to the name that was
-/// present in the source code.
-///
-/// \param TypoName the \c DeclarationNameInfo structure that contains
-/// the name that was present in the source code along with its location.
-///
-/// \param LookupKind the name-lookup criteria used to search for the name.
-///
-/// \param S the scope in which name lookup occurs.
-///
-/// \param SS the nested-name-specifier that precedes the name we're
-/// looking for, if present.
-///
-/// \param CCC A CorrectionCandidateCallback object that provides further
-/// validation of typo correction candidates. It also provides flags for
-/// determining the set of keywords permitted.
-///
-/// \param TDG A TypoDiagnosticGenerator functor that will be used to print
-/// diagnostics when the actual typo correction is attempted.
-///
-/// \param TRC A TypoRecoveryCallback functor that will be used to build an
-/// Expr from a typo correction candidate.
-///
-/// \param MemberContext if non-NULL, the context in which to look for
-/// a member access expression.
-///
-/// \param EnteringContext whether we're entering the context described by
-/// the nested-name-specifier SS.
-///
-/// \param OPT when non-NULL, the search for visible declarations will
-/// also walk the protocols in the qualified interfaces of \p OPT.
-///
-/// \returns a new \c TypoExpr that will later be replaced in the AST with an
-/// Expr representing the result of performing typo correction, or nullptr if
-/// typo correction is not possible. If nullptr is returned, no diagnostics will
-/// be emitted and it is the responsibility of the caller to emit any that are
-/// needed.
TypoExpr *Sema::CorrectTypoDelayed(
const DeclarationNameInfo &TypoName, Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC,
@@ -5765,19 +5631,13 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, const NamedDecl *Decl,
if (M->isModuleMapModule())
return M->getFullModuleName();
- Module *CurrentModule = getCurrentModule();
-
if (M->isImplicitGlobalModule())
M = M->getTopLevelModule();
- bool IsInTheSameModule =
- CurrentModule && CurrentModule->getPrimaryModuleInterfaceName() ==
- M->getPrimaryModuleInterfaceName();
-
// If the current module unit is in the same module with M, it is OK to show
// the partition name. Otherwise, it'll be sufficient to show the primary
// module name.
- if (IsInTheSameModule)
+ if (getASTContext().isInSameModule(M, getCurrentModule()))
return M->getTopLevelModuleName().str();
else
return M->getPrimaryModuleInterfaceName().str();
@@ -5810,18 +5670,6 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, const NamedDecl *Decl,
createImplicitModuleImportForErrorRecovery(UseLoc, Modules[0]);
}
-/// Diagnose a successfully-corrected typo. Separated from the correction
-/// itself to allow external validation of the result, etc.
-///
-/// \param Correction The result of performing typo correction.
-/// \param TypoDiag The diagnostic to produce. This will have the corrected
-/// string added to it (and usually also a fixit).
-/// \param PrevNote A note to use when indicating the location of the entity to
-/// which we are correcting. Will have the correction string added to it.
-/// \param ErrorRecovery If \c true (the default), the caller is going to
-/// recover from the typo as if the corrected string had been typed.
-/// In this case, \c PDiag must be an error, and we will attach a fixit
-/// to it.
void Sema::diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
@@ -5846,6 +5694,16 @@ void Sema::diagnoseTypo(const TypoCorrection &Correction,
NamedDecl *ChosenDecl =
Correction.isKeyword() ? nullptr : Correction.getFoundDecl();
+
+ // For builtin functions which aren't declared anywhere in source,
+ // don't emit the "declared here" note.
+ if (const auto *FD = dyn_cast_if_present<FunctionDecl>(ChosenDecl);
+ FD && FD->getBuiltinID() &&
+ PrevNote.getDiagID() == diag::note_previous_decl &&
+ Correction.getCorrectionRange().getBegin() == FD->getBeginLoc()) {
+ ChosenDecl = nullptr;
+ }
+
if (PrevNote.getDiagID() && ChosenDecl)
Diag(ChosenDecl->getLocation(), PrevNote)
<< CorrectedQuotedStr << (ErrorRecovery ? FixItHint() : FixTypo);
@@ -5883,7 +5741,8 @@ void Sema::clearDelayedTypo(TypoExpr *TE) {
void Sema::ActOnPragmaDump(Scope *S, SourceLocation IILoc, IdentifierInfo *II) {
DeclarationNameInfo Name(II, IILoc);
- LookupResult R(*this, Name, LookupAnyName, Sema::NotForRedeclaration);
+ LookupResult R(*this, Name, LookupAnyName,
+ RedeclarationKind::NotForRedeclaration);
R.suppressDiagnostics();
R.setHideTags(false);
LookupName(R, S);
@@ -5893,3 +5752,13 @@ void Sema::ActOnPragmaDump(Scope *S, SourceLocation IILoc, IdentifierInfo *II) {
void Sema::ActOnPragmaDump(Expr *E) {
E->dump();
}
+
+RedeclarationKind Sema::forRedeclarationInCurContext() const {
+ // A declaration with an owning module for linkage can never link against
+ // anything that is not visible. We don't need to check linkage here; if
+ // the context has internal linkage, redeclaration lookup won't find things
+ // from other TUs, and we can't safely compute linkage yet in general.
+ if (cast<Decl>(CurContext)->getOwningModuleForLinkage())
+ return RedeclarationKind::ForVisibleRedeclaration;
+ return RedeclarationKind::ForExternalRedeclaration;
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLoongArch.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLoongArch.cpp
new file mode 100644
index 000000000000..0a67bf2c7738
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLoongArch.cpp
@@ -0,0 +1,515 @@
+//===------ SemaLoongArch.cpp ---- LoongArch target-specific routines -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to LoongArch.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaLoongArch.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/Support/MathExtras.h"
+
+namespace clang {
+
+SemaLoongArch::SemaLoongArch(Sema &S) : SemaBase(S) {}
+
+bool SemaLoongArch::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ default:
+ break;
+ // Basic intrinsics.
+ case LoongArch::BI__builtin_loongarch_cacop_d:
+ case LoongArch::BI__builtin_loongarch_cacop_w: {
+ SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5));
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12),
+ llvm::maxIntN(12));
+ break;
+ }
+ case LoongArch::BI__builtin_loongarch_break:
+ case LoongArch::BI__builtin_loongarch_dbar:
+ case LoongArch::BI__builtin_loongarch_ibar:
+ case LoongArch::BI__builtin_loongarch_syscall:
+ // Check if immediate is in [0, 32767].
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 32767);
+ case LoongArch::BI__builtin_loongarch_csrrd_w:
+ case LoongArch::BI__builtin_loongarch_csrrd_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 16383);
+ case LoongArch::BI__builtin_loongarch_csrwr_w:
+ case LoongArch::BI__builtin_loongarch_csrwr_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 16383);
+ case LoongArch::BI__builtin_loongarch_csrxchg_w:
+ case LoongArch::BI__builtin_loongarch_csrxchg_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 16383);
+ case LoongArch::BI__builtin_loongarch_lddir_d:
+ case LoongArch::BI__builtin_loongarch_ldpte_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case LoongArch::BI__builtin_loongarch_movfcsr2gr:
+ case LoongArch::BI__builtin_loongarch_movgr2fcsr:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(2));
+
+ // LSX intrinsics.
+ case LoongArch::BI__builtin_lsx_vbitclri_b:
+ case LoongArch::BI__builtin_lsx_vbitrevi_b:
+ case LoongArch::BI__builtin_lsx_vbitseti_b:
+ case LoongArch::BI__builtin_lsx_vsat_b:
+ case LoongArch::BI__builtin_lsx_vsat_bu:
+ case LoongArch::BI__builtin_lsx_vslli_b:
+ case LoongArch::BI__builtin_lsx_vsrai_b:
+ case LoongArch::BI__builtin_lsx_vsrari_b:
+ case LoongArch::BI__builtin_lsx_vsrli_b:
+ case LoongArch::BI__builtin_lsx_vsllwil_h_b:
+ case LoongArch::BI__builtin_lsx_vsllwil_hu_bu:
+ case LoongArch::BI__builtin_lsx_vrotri_b:
+ case LoongArch::BI__builtin_lsx_vsrlri_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lsx_vbitclri_h:
+ case LoongArch::BI__builtin_lsx_vbitrevi_h:
+ case LoongArch::BI__builtin_lsx_vbitseti_h:
+ case LoongArch::BI__builtin_lsx_vsat_h:
+ case LoongArch::BI__builtin_lsx_vsat_hu:
+ case LoongArch::BI__builtin_lsx_vslli_h:
+ case LoongArch::BI__builtin_lsx_vsrai_h:
+ case LoongArch::BI__builtin_lsx_vsrari_h:
+ case LoongArch::BI__builtin_lsx_vsrli_h:
+ case LoongArch::BI__builtin_lsx_vsllwil_w_h:
+ case LoongArch::BI__builtin_lsx_vsllwil_wu_hu:
+ case LoongArch::BI__builtin_lsx_vrotri_h:
+ case LoongArch::BI__builtin_lsx_vsrlri_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lsx_vssrarni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrarni_bu_h:
+ case LoongArch::BI__builtin_lsx_vssrani_b_h:
+ case LoongArch::BI__builtin_lsx_vssrani_bu_h:
+ case LoongArch::BI__builtin_lsx_vsrarni_b_h:
+ case LoongArch::BI__builtin_lsx_vsrlni_b_h:
+ case LoongArch::BI__builtin_lsx_vsrlrni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrlni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrlni_bu_h:
+ case LoongArch::BI__builtin_lsx_vssrlrni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrlrni_bu_h:
+ case LoongArch::BI__builtin_lsx_vsrani_b_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case LoongArch::BI__builtin_lsx_vslei_bu:
+ case LoongArch::BI__builtin_lsx_vslei_hu:
+ case LoongArch::BI__builtin_lsx_vslei_wu:
+ case LoongArch::BI__builtin_lsx_vslei_du:
+ case LoongArch::BI__builtin_lsx_vslti_bu:
+ case LoongArch::BI__builtin_lsx_vslti_hu:
+ case LoongArch::BI__builtin_lsx_vslti_wu:
+ case LoongArch::BI__builtin_lsx_vslti_du:
+ case LoongArch::BI__builtin_lsx_vmaxi_bu:
+ case LoongArch::BI__builtin_lsx_vmaxi_hu:
+ case LoongArch::BI__builtin_lsx_vmaxi_wu:
+ case LoongArch::BI__builtin_lsx_vmaxi_du:
+ case LoongArch::BI__builtin_lsx_vmini_bu:
+ case LoongArch::BI__builtin_lsx_vmini_hu:
+ case LoongArch::BI__builtin_lsx_vmini_wu:
+ case LoongArch::BI__builtin_lsx_vmini_du:
+ case LoongArch::BI__builtin_lsx_vaddi_bu:
+ case LoongArch::BI__builtin_lsx_vaddi_hu:
+ case LoongArch::BI__builtin_lsx_vaddi_wu:
+ case LoongArch::BI__builtin_lsx_vaddi_du:
+ case LoongArch::BI__builtin_lsx_vbitclri_w:
+ case LoongArch::BI__builtin_lsx_vbitrevi_w:
+ case LoongArch::BI__builtin_lsx_vbitseti_w:
+ case LoongArch::BI__builtin_lsx_vsat_w:
+ case LoongArch::BI__builtin_lsx_vsat_wu:
+ case LoongArch::BI__builtin_lsx_vslli_w:
+ case LoongArch::BI__builtin_lsx_vsrai_w:
+ case LoongArch::BI__builtin_lsx_vsrari_w:
+ case LoongArch::BI__builtin_lsx_vsrli_w:
+ case LoongArch::BI__builtin_lsx_vsllwil_d_w:
+ case LoongArch::BI__builtin_lsx_vsllwil_du_wu:
+ case LoongArch::BI__builtin_lsx_vsrlri_w:
+ case LoongArch::BI__builtin_lsx_vrotri_w:
+ case LoongArch::BI__builtin_lsx_vsubi_bu:
+ case LoongArch::BI__builtin_lsx_vsubi_hu:
+ case LoongArch::BI__builtin_lsx_vbsrl_v:
+ case LoongArch::BI__builtin_lsx_vbsll_v:
+ case LoongArch::BI__builtin_lsx_vsubi_wu:
+ case LoongArch::BI__builtin_lsx_vsubi_du:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case LoongArch::BI__builtin_lsx_vssrarni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrarni_hu_w:
+ case LoongArch::BI__builtin_lsx_vssrani_h_w:
+ case LoongArch::BI__builtin_lsx_vssrani_hu_w:
+ case LoongArch::BI__builtin_lsx_vsrarni_h_w:
+ case LoongArch::BI__builtin_lsx_vsrani_h_w:
+ case LoongArch::BI__builtin_lsx_vfrstpi_b:
+ case LoongArch::BI__builtin_lsx_vfrstpi_h:
+ case LoongArch::BI__builtin_lsx_vsrlni_h_w:
+ case LoongArch::BI__builtin_lsx_vsrlrni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrlni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrlni_hu_w:
+ case LoongArch::BI__builtin_lsx_vssrlrni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrlrni_hu_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ case LoongArch::BI__builtin_lsx_vbitclri_d:
+ case LoongArch::BI__builtin_lsx_vbitrevi_d:
+ case LoongArch::BI__builtin_lsx_vbitseti_d:
+ case LoongArch::BI__builtin_lsx_vsat_d:
+ case LoongArch::BI__builtin_lsx_vsat_du:
+ case LoongArch::BI__builtin_lsx_vslli_d:
+ case LoongArch::BI__builtin_lsx_vsrai_d:
+ case LoongArch::BI__builtin_lsx_vsrli_d:
+ case LoongArch::BI__builtin_lsx_vsrari_d:
+ case LoongArch::BI__builtin_lsx_vrotri_d:
+ case LoongArch::BI__builtin_lsx_vsrlri_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 63);
+ case LoongArch::BI__builtin_lsx_vssrarni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrarni_wu_d:
+ case LoongArch::BI__builtin_lsx_vssrani_w_d:
+ case LoongArch::BI__builtin_lsx_vssrani_wu_d:
+ case LoongArch::BI__builtin_lsx_vsrarni_w_d:
+ case LoongArch::BI__builtin_lsx_vsrlni_w_d:
+ case LoongArch::BI__builtin_lsx_vsrlrni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrlni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrlni_wu_d:
+ case LoongArch::BI__builtin_lsx_vssrlrni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrlrni_wu_d:
+ case LoongArch::BI__builtin_lsx_vsrani_w_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 63);
+ case LoongArch::BI__builtin_lsx_vssrarni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrarni_du_q:
+ case LoongArch::BI__builtin_lsx_vssrani_d_q:
+ case LoongArch::BI__builtin_lsx_vssrani_du_q:
+ case LoongArch::BI__builtin_lsx_vsrarni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrlni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrlni_du_q:
+ case LoongArch::BI__builtin_lsx_vssrlrni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrlrni_du_q:
+ case LoongArch::BI__builtin_lsx_vsrani_d_q:
+ case LoongArch::BI__builtin_lsx_vsrlrni_d_q:
+ case LoongArch::BI__builtin_lsx_vsrlni_d_q:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 127);
+ case LoongArch::BI__builtin_lsx_vseqi_b:
+ case LoongArch::BI__builtin_lsx_vseqi_h:
+ case LoongArch::BI__builtin_lsx_vseqi_w:
+ case LoongArch::BI__builtin_lsx_vseqi_d:
+ case LoongArch::BI__builtin_lsx_vslti_b:
+ case LoongArch::BI__builtin_lsx_vslti_h:
+ case LoongArch::BI__builtin_lsx_vslti_w:
+ case LoongArch::BI__builtin_lsx_vslti_d:
+ case LoongArch::BI__builtin_lsx_vslei_b:
+ case LoongArch::BI__builtin_lsx_vslei_h:
+ case LoongArch::BI__builtin_lsx_vslei_w:
+ case LoongArch::BI__builtin_lsx_vslei_d:
+ case LoongArch::BI__builtin_lsx_vmaxi_b:
+ case LoongArch::BI__builtin_lsx_vmaxi_h:
+ case LoongArch::BI__builtin_lsx_vmaxi_w:
+ case LoongArch::BI__builtin_lsx_vmaxi_d:
+ case LoongArch::BI__builtin_lsx_vmini_b:
+ case LoongArch::BI__builtin_lsx_vmini_h:
+ case LoongArch::BI__builtin_lsx_vmini_w:
+ case LoongArch::BI__builtin_lsx_vmini_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -16, 15);
+ case LoongArch::BI__builtin_lsx_vandi_b:
+ case LoongArch::BI__builtin_lsx_vnori_b:
+ case LoongArch::BI__builtin_lsx_vori_b:
+ case LoongArch::BI__builtin_lsx_vshuf4i_b:
+ case LoongArch::BI__builtin_lsx_vshuf4i_h:
+ case LoongArch::BI__builtin_lsx_vshuf4i_w:
+ case LoongArch::BI__builtin_lsx_vxori_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 255);
+ case LoongArch::BI__builtin_lsx_vbitseli_b:
+ case LoongArch::BI__builtin_lsx_vshuf4i_d:
+ case LoongArch::BI__builtin_lsx_vextrins_b:
+ case LoongArch::BI__builtin_lsx_vextrins_h:
+ case LoongArch::BI__builtin_lsx_vextrins_w:
+ case LoongArch::BI__builtin_lsx_vextrins_d:
+ case LoongArch::BI__builtin_lsx_vpermi_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 255);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_b:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_bu:
+ case LoongArch::BI__builtin_lsx_vreplvei_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_h:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_hu:
+ case LoongArch::BI__builtin_lsx_vreplvei_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_w:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_wu:
+ case LoongArch::BI__builtin_lsx_vreplvei_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 3);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_d:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_du:
+ case LoongArch::BI__builtin_lsx_vreplvei_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 1);
+ case LoongArch::BI__builtin_lsx_vstelm_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -128, 127) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 15);
+ case LoongArch::BI__builtin_lsx_vstelm_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -256, 254) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7);
+ case LoongArch::BI__builtin_lsx_vstelm_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -512, 508) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 3);
+ case LoongArch::BI__builtin_lsx_vstelm_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 1);
+ case LoongArch::BI__builtin_lsx_vldrepl_b:
+ case LoongArch::BI__builtin_lsx_vld:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2047);
+ case LoongArch::BI__builtin_lsx_vldrepl_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2046);
+ case LoongArch::BI__builtin_lsx_vldrepl_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2044);
+ case LoongArch::BI__builtin_lsx_vldrepl_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2040);
+ case LoongArch::BI__builtin_lsx_vst:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -2048, 2047);
+ case LoongArch::BI__builtin_lsx_vldi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, -4096, 4095);
+ case LoongArch::BI__builtin_lsx_vrepli_b:
+ case LoongArch::BI__builtin_lsx_vrepli_h:
+ case LoongArch::BI__builtin_lsx_vrepli_w:
+ case LoongArch::BI__builtin_lsx_vrepli_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, -512, 511);
+
+ // LASX intrinsics.
+ case LoongArch::BI__builtin_lasx_xvbitclri_b:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_b:
+ case LoongArch::BI__builtin_lasx_xvbitseti_b:
+ case LoongArch::BI__builtin_lasx_xvsat_b:
+ case LoongArch::BI__builtin_lasx_xvsat_bu:
+ case LoongArch::BI__builtin_lasx_xvslli_b:
+ case LoongArch::BI__builtin_lasx_xvsrai_b:
+ case LoongArch::BI__builtin_lasx_xvsrari_b:
+ case LoongArch::BI__builtin_lasx_xvsrli_b:
+ case LoongArch::BI__builtin_lasx_xvsllwil_h_b:
+ case LoongArch::BI__builtin_lasx_xvsllwil_hu_bu:
+ case LoongArch::BI__builtin_lasx_xvrotri_b:
+ case LoongArch::BI__builtin_lasx_xvsrlri_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvbitclri_h:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_h:
+ case LoongArch::BI__builtin_lasx_xvbitseti_h:
+ case LoongArch::BI__builtin_lasx_xvsat_h:
+ case LoongArch::BI__builtin_lasx_xvsat_hu:
+ case LoongArch::BI__builtin_lasx_xvslli_h:
+ case LoongArch::BI__builtin_lasx_xvsrai_h:
+ case LoongArch::BI__builtin_lasx_xvsrari_h:
+ case LoongArch::BI__builtin_lasx_xvsrli_h:
+ case LoongArch::BI__builtin_lasx_xvsllwil_w_h:
+ case LoongArch::BI__builtin_lasx_xvsllwil_wu_hu:
+ case LoongArch::BI__builtin_lasx_xvrotri_h:
+ case LoongArch::BI__builtin_lasx_xvsrlri_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvssrarni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrarni_bu_h:
+ case LoongArch::BI__builtin_lasx_xvssrani_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrani_bu_h:
+ case LoongArch::BI__builtin_lasx_xvsrarni_b_h:
+ case LoongArch::BI__builtin_lasx_xvsrlni_b_h:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrlni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrlni_bu_h:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_bu_h:
+ case LoongArch::BI__builtin_lasx_xvsrani_b_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvslei_bu:
+ case LoongArch::BI__builtin_lasx_xvslei_hu:
+ case LoongArch::BI__builtin_lasx_xvslei_wu:
+ case LoongArch::BI__builtin_lasx_xvslei_du:
+ case LoongArch::BI__builtin_lasx_xvslti_bu:
+ case LoongArch::BI__builtin_lasx_xvslti_hu:
+ case LoongArch::BI__builtin_lasx_xvslti_wu:
+ case LoongArch::BI__builtin_lasx_xvslti_du:
+ case LoongArch::BI__builtin_lasx_xvmaxi_bu:
+ case LoongArch::BI__builtin_lasx_xvmaxi_hu:
+ case LoongArch::BI__builtin_lasx_xvmaxi_wu:
+ case LoongArch::BI__builtin_lasx_xvmaxi_du:
+ case LoongArch::BI__builtin_lasx_xvmini_bu:
+ case LoongArch::BI__builtin_lasx_xvmini_hu:
+ case LoongArch::BI__builtin_lasx_xvmini_wu:
+ case LoongArch::BI__builtin_lasx_xvmini_du:
+ case LoongArch::BI__builtin_lasx_xvaddi_bu:
+ case LoongArch::BI__builtin_lasx_xvaddi_hu:
+ case LoongArch::BI__builtin_lasx_xvaddi_wu:
+ case LoongArch::BI__builtin_lasx_xvaddi_du:
+ case LoongArch::BI__builtin_lasx_xvbitclri_w:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_w:
+ case LoongArch::BI__builtin_lasx_xvbitseti_w:
+ case LoongArch::BI__builtin_lasx_xvsat_w:
+ case LoongArch::BI__builtin_lasx_xvsat_wu:
+ case LoongArch::BI__builtin_lasx_xvslli_w:
+ case LoongArch::BI__builtin_lasx_xvsrai_w:
+ case LoongArch::BI__builtin_lasx_xvsrari_w:
+ case LoongArch::BI__builtin_lasx_xvsrli_w:
+ case LoongArch::BI__builtin_lasx_xvsllwil_d_w:
+ case LoongArch::BI__builtin_lasx_xvsllwil_du_wu:
+ case LoongArch::BI__builtin_lasx_xvsrlri_w:
+ case LoongArch::BI__builtin_lasx_xvrotri_w:
+ case LoongArch::BI__builtin_lasx_xvsubi_bu:
+ case LoongArch::BI__builtin_lasx_xvsubi_hu:
+ case LoongArch::BI__builtin_lasx_xvsubi_wu:
+ case LoongArch::BI__builtin_lasx_xvsubi_du:
+ case LoongArch::BI__builtin_lasx_xvbsrl_v:
+ case LoongArch::BI__builtin_lasx_xvbsll_v:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case LoongArch::BI__builtin_lasx_xvssrarni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrarni_hu_w:
+ case LoongArch::BI__builtin_lasx_xvssrani_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrani_hu_w:
+ case LoongArch::BI__builtin_lasx_xvsrarni_h_w:
+ case LoongArch::BI__builtin_lasx_xvsrani_h_w:
+ case LoongArch::BI__builtin_lasx_xvfrstpi_b:
+ case LoongArch::BI__builtin_lasx_xvfrstpi_h:
+ case LoongArch::BI__builtin_lasx_xvsrlni_h_w:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrlni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrlni_hu_w:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_hu_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ case LoongArch::BI__builtin_lasx_xvbitclri_d:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_d:
+ case LoongArch::BI__builtin_lasx_xvbitseti_d:
+ case LoongArch::BI__builtin_lasx_xvsat_d:
+ case LoongArch::BI__builtin_lasx_xvsat_du:
+ case LoongArch::BI__builtin_lasx_xvslli_d:
+ case LoongArch::BI__builtin_lasx_xvsrai_d:
+ case LoongArch::BI__builtin_lasx_xvsrli_d:
+ case LoongArch::BI__builtin_lasx_xvsrari_d:
+ case LoongArch::BI__builtin_lasx_xvrotri_d:
+ case LoongArch::BI__builtin_lasx_xvsrlri_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 63);
+ case LoongArch::BI__builtin_lasx_xvssrarni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrarni_wu_d:
+ case LoongArch::BI__builtin_lasx_xvssrani_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrani_wu_d:
+ case LoongArch::BI__builtin_lasx_xvsrarni_w_d:
+ case LoongArch::BI__builtin_lasx_xvsrlni_w_d:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrlni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrlni_wu_d:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_wu_d:
+ case LoongArch::BI__builtin_lasx_xvsrani_w_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 63);
+ case LoongArch::BI__builtin_lasx_xvssrarni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrarni_du_q:
+ case LoongArch::BI__builtin_lasx_xvssrani_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrani_du_q:
+ case LoongArch::BI__builtin_lasx_xvsrarni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrlni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrlni_du_q:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_du_q:
+ case LoongArch::BI__builtin_lasx_xvsrani_d_q:
+ case LoongArch::BI__builtin_lasx_xvsrlni_d_q:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_d_q:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 127);
+ case LoongArch::BI__builtin_lasx_xvseqi_b:
+ case LoongArch::BI__builtin_lasx_xvseqi_h:
+ case LoongArch::BI__builtin_lasx_xvseqi_w:
+ case LoongArch::BI__builtin_lasx_xvseqi_d:
+ case LoongArch::BI__builtin_lasx_xvslti_b:
+ case LoongArch::BI__builtin_lasx_xvslti_h:
+ case LoongArch::BI__builtin_lasx_xvslti_w:
+ case LoongArch::BI__builtin_lasx_xvslti_d:
+ case LoongArch::BI__builtin_lasx_xvslei_b:
+ case LoongArch::BI__builtin_lasx_xvslei_h:
+ case LoongArch::BI__builtin_lasx_xvslei_w:
+ case LoongArch::BI__builtin_lasx_xvslei_d:
+ case LoongArch::BI__builtin_lasx_xvmaxi_b:
+ case LoongArch::BI__builtin_lasx_xvmaxi_h:
+ case LoongArch::BI__builtin_lasx_xvmaxi_w:
+ case LoongArch::BI__builtin_lasx_xvmaxi_d:
+ case LoongArch::BI__builtin_lasx_xvmini_b:
+ case LoongArch::BI__builtin_lasx_xvmini_h:
+ case LoongArch::BI__builtin_lasx_xvmini_w:
+ case LoongArch::BI__builtin_lasx_xvmini_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -16, 15);
+ case LoongArch::BI__builtin_lasx_xvandi_b:
+ case LoongArch::BI__builtin_lasx_xvnori_b:
+ case LoongArch::BI__builtin_lasx_xvori_b:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_b:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_h:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_w:
+ case LoongArch::BI__builtin_lasx_xvxori_b:
+ case LoongArch::BI__builtin_lasx_xvpermi_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 255);
+ case LoongArch::BI__builtin_lasx_xvbitseli_b:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_d:
+ case LoongArch::BI__builtin_lasx_xvextrins_b:
+ case LoongArch::BI__builtin_lasx_xvextrins_h:
+ case LoongArch::BI__builtin_lasx_xvextrins_w:
+ case LoongArch::BI__builtin_lasx_xvextrins_d:
+ case LoongArch::BI__builtin_lasx_xvpermi_q:
+ case LoongArch::BI__builtin_lasx_xvpermi_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 255);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_h:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_w:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_wu:
+ case LoongArch::BI__builtin_lasx_xvpickve_w_f:
+ case LoongArch::BI__builtin_lasx_xvpickve_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvinsgr2vr_w:
+ case LoongArch::BI__builtin_lasx_xvinsve0_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_w:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_d:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_du:
+ case LoongArch::BI__builtin_lasx_xvpickve_d_f:
+ case LoongArch::BI__builtin_lasx_xvpickve_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 3);
+ case LoongArch::BI__builtin_lasx_xvinsve0_d:
+ case LoongArch::BI__builtin_lasx_xvinsgr2vr_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case LoongArch::BI__builtin_lasx_xvstelm_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -128, 127) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 31);
+ case LoongArch::BI__builtin_lasx_xvstelm_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -256, 254) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvstelm_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -512, 508) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvstelm_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 3);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case LoongArch::BI__builtin_lasx_xvldrepl_b:
+ case LoongArch::BI__builtin_lasx_xvld:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2047);
+ case LoongArch::BI__builtin_lasx_xvldrepl_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2046);
+ case LoongArch::BI__builtin_lasx_xvldrepl_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2044);
+ case LoongArch::BI__builtin_lasx_xvldrepl_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2040);
+ case LoongArch::BI__builtin_lasx_xvst:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -2048, 2047);
+ case LoongArch::BI__builtin_lasx_xvldi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, -4096, 4095);
+ case LoongArch::BI__builtin_lasx_xvrepli_b:
+ case LoongArch::BI__builtin_lasx_xvrepli_h:
+ case LoongArch::BI__builtin_lasx_xvrepli_w:
+ case LoongArch::BI__builtin_lasx_xvrepli_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, -512, 511);
+ }
+ return false;
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaM68k.cpp b/contrib/llvm-project/clang/lib/Sema/SemaM68k.cpp
new file mode 100644
index 000000000000..f091827092f8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaM68k.cpp
@@ -0,0 +1,56 @@
+//===------ SemaM68k.cpp -------- M68k target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to M68k.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaM68k.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Sema/ParsedAttr.h"
+
+namespace clang {
+SemaM68k::SemaM68k(Sema &S) : SemaBase(S) {}
+
+void SemaM68k::handleInterruptAttr(Decl *D, const ParsedAttr &AL) {
+ if (!AL.checkExactlyNumArgs(SemaRef, 1))
+ return;
+
+ if (!AL.isArgExpr(0)) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIntegerConstant;
+ return;
+ }
+
+ // FIXME: Check for decl - it should be void ()(void).
+
+ Expr *NumParamsExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
+ auto MaybeNumParams = NumParamsExpr->getIntegerConstantExpr(getASTContext());
+ if (!MaybeNumParams) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIntegerConstant
+ << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ unsigned Num = MaybeNumParams->getLimitedValue(255);
+ if ((Num & 1) || Num > 30) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL << (int)MaybeNumParams->getSExtValue()
+ << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ M68kInterruptAttr(getASTContext(), AL, Num));
+ D->addAttr(UsedAttr::CreateImplicit(getASTContext()));
+}
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaMIPS.cpp b/contrib/llvm-project/clang/lib/Sema/SemaMIPS.cpp
new file mode 100644
index 000000000000..269d927903c5
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaMIPS.cpp
@@ -0,0 +1,300 @@
+//===------ SemaMIPS.cpp -------- MIPS target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to MIPS.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaMIPS.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Attr.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+
+SemaMIPS::SemaMIPS(Sema &S) : SemaBase(S) {}
+
+bool SemaMIPS::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
+ CheckMipsBuiltinArgument(BuiltinID, TheCall);
+}
+
+bool SemaMIPS::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
+
+ if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
+ BuiltinID <= Mips::BI__builtin_mips_lwx) {
+ if (!TI.hasFeature("dsp"))
+ return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
+ }
+
+ if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
+ BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
+ if (!TI.hasFeature("dspr2"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_mips_builtin_requires_dspr2);
+ }
+
+ if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
+ BuiltinID <= Mips::BI__builtin_msa_xori_b) {
+ if (!TI.hasFeature("msa"))
+ return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
+ }
+
+ return false;
+}
+
+// CheckMipsBuiltinArgument - Checks the constant value passed to the
+// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
+// ordering for DSP is unspecified. MSA is ordered by the data format used
+// by the underlying instruction i.e., df/m, df/n and then by size.
+//
+// FIXME: The size tests here should instead be tablegen'd along with the
+// definitions from include/clang/Basic/BuiltinsMips.def.
+// FIXME: GCC is strict on signedness for some of these intrinsics, we should
+// be too.
+bool SemaMIPS::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
+ unsigned i = 0, l = 0, u = 0, m = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
+ case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
+ case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
+ case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
+ // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
+ // df/m field.
+ // These intrinsics take an unsigned 3 bit immediate.
+ case Mips::BI__builtin_msa_bclri_b:
+ case Mips::BI__builtin_msa_bnegi_b:
+ case Mips::BI__builtin_msa_bseti_b:
+ case Mips::BI__builtin_msa_sat_s_b:
+ case Mips::BI__builtin_msa_sat_u_b:
+ case Mips::BI__builtin_msa_slli_b:
+ case Mips::BI__builtin_msa_srai_b:
+ case Mips::BI__builtin_msa_srari_b:
+ case Mips::BI__builtin_msa_srli_b:
+ case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
+ case Mips::BI__builtin_msa_binsli_b:
+ case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
+ // These intrinsics take an unsigned 4 bit immediate.
+ case Mips::BI__builtin_msa_bclri_h:
+ case Mips::BI__builtin_msa_bnegi_h:
+ case Mips::BI__builtin_msa_bseti_h:
+ case Mips::BI__builtin_msa_sat_s_h:
+ case Mips::BI__builtin_msa_sat_u_h:
+ case Mips::BI__builtin_msa_slli_h:
+ case Mips::BI__builtin_msa_srai_h:
+ case Mips::BI__builtin_msa_srari_h:
+ case Mips::BI__builtin_msa_srli_h:
+ case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
+ case Mips::BI__builtin_msa_binsli_h:
+ case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
+ // These intrinsics take an unsigned 5 bit immediate.
+ // The first block of intrinsics actually have an unsigned 5 bit field,
+ // not a df/n field.
+ case Mips::BI__builtin_msa_cfcmsa:
+ case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
+ case Mips::BI__builtin_msa_clei_u_b:
+ case Mips::BI__builtin_msa_clei_u_h:
+ case Mips::BI__builtin_msa_clei_u_w:
+ case Mips::BI__builtin_msa_clei_u_d:
+ case Mips::BI__builtin_msa_clti_u_b:
+ case Mips::BI__builtin_msa_clti_u_h:
+ case Mips::BI__builtin_msa_clti_u_w:
+ case Mips::BI__builtin_msa_clti_u_d:
+ case Mips::BI__builtin_msa_maxi_u_b:
+ case Mips::BI__builtin_msa_maxi_u_h:
+ case Mips::BI__builtin_msa_maxi_u_w:
+ case Mips::BI__builtin_msa_maxi_u_d:
+ case Mips::BI__builtin_msa_mini_u_b:
+ case Mips::BI__builtin_msa_mini_u_h:
+ case Mips::BI__builtin_msa_mini_u_w:
+ case Mips::BI__builtin_msa_mini_u_d:
+ case Mips::BI__builtin_msa_addvi_b:
+ case Mips::BI__builtin_msa_addvi_h:
+ case Mips::BI__builtin_msa_addvi_w:
+ case Mips::BI__builtin_msa_addvi_d:
+ case Mips::BI__builtin_msa_bclri_w:
+ case Mips::BI__builtin_msa_bnegi_w:
+ case Mips::BI__builtin_msa_bseti_w:
+ case Mips::BI__builtin_msa_sat_s_w:
+ case Mips::BI__builtin_msa_sat_u_w:
+ case Mips::BI__builtin_msa_slli_w:
+ case Mips::BI__builtin_msa_srai_w:
+ case Mips::BI__builtin_msa_srari_w:
+ case Mips::BI__builtin_msa_srli_w:
+ case Mips::BI__builtin_msa_srlri_w:
+ case Mips::BI__builtin_msa_subvi_b:
+ case Mips::BI__builtin_msa_subvi_h:
+ case Mips::BI__builtin_msa_subvi_w:
+ case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
+ case Mips::BI__builtin_msa_binsli_w:
+ case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
+ // These intrinsics take an unsigned 6 bit immediate.
+ case Mips::BI__builtin_msa_bclri_d:
+ case Mips::BI__builtin_msa_bnegi_d:
+ case Mips::BI__builtin_msa_bseti_d:
+ case Mips::BI__builtin_msa_sat_s_d:
+ case Mips::BI__builtin_msa_sat_u_d:
+ case Mips::BI__builtin_msa_slli_d:
+ case Mips::BI__builtin_msa_srai_d:
+ case Mips::BI__builtin_msa_srari_d:
+ case Mips::BI__builtin_msa_srli_d:
+ case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
+ case Mips::BI__builtin_msa_binsli_d:
+ case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
+ // These intrinsics take a signed 5 bit immediate.
+ case Mips::BI__builtin_msa_ceqi_b:
+ case Mips::BI__builtin_msa_ceqi_h:
+ case Mips::BI__builtin_msa_ceqi_w:
+ case Mips::BI__builtin_msa_ceqi_d:
+ case Mips::BI__builtin_msa_clti_s_b:
+ case Mips::BI__builtin_msa_clti_s_h:
+ case Mips::BI__builtin_msa_clti_s_w:
+ case Mips::BI__builtin_msa_clti_s_d:
+ case Mips::BI__builtin_msa_clei_s_b:
+ case Mips::BI__builtin_msa_clei_s_h:
+ case Mips::BI__builtin_msa_clei_s_w:
+ case Mips::BI__builtin_msa_clei_s_d:
+ case Mips::BI__builtin_msa_maxi_s_b:
+ case Mips::BI__builtin_msa_maxi_s_h:
+ case Mips::BI__builtin_msa_maxi_s_w:
+ case Mips::BI__builtin_msa_maxi_s_d:
+ case Mips::BI__builtin_msa_mini_s_b:
+ case Mips::BI__builtin_msa_mini_s_h:
+ case Mips::BI__builtin_msa_mini_s_w:
+ case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
+ // These intrinsics take an unsigned 8 bit immediate.
+ case Mips::BI__builtin_msa_andi_b:
+ case Mips::BI__builtin_msa_nori_b:
+ case Mips::BI__builtin_msa_ori_b:
+ case Mips::BI__builtin_msa_shf_b:
+ case Mips::BI__builtin_msa_shf_h:
+ case Mips::BI__builtin_msa_shf_w:
+ case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
+ case Mips::BI__builtin_msa_bseli_b:
+ case Mips::BI__builtin_msa_bmnzi_b:
+ case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
+ // df/n format
+ // These intrinsics take an unsigned 4 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_b:
+ case Mips::BI__builtin_msa_copy_u_b:
+ case Mips::BI__builtin_msa_insve_b:
+ case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
+ case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
+ // These intrinsics take an unsigned 3 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_h:
+ case Mips::BI__builtin_msa_copy_u_h:
+ case Mips::BI__builtin_msa_insve_h:
+ case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
+ case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
+ // These intrinsics take an unsigned 2 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_w:
+ case Mips::BI__builtin_msa_copy_u_w:
+ case Mips::BI__builtin_msa_insve_w:
+ case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
+ case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
+ // These intrinsics take an unsigned 1 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_d:
+ case Mips::BI__builtin_msa_copy_u_d:
+ case Mips::BI__builtin_msa_insve_d:
+ case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
+ case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
+ // Memory offsets and immediate loads.
+ // These intrinsics take a signed 10 bit immediate.
+ case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
+ case Mips::BI__builtin_msa_ldi_h:
+ case Mips::BI__builtin_msa_ldi_w:
+ case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
+ case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
+ case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
+ case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
+ case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
+ case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
+ case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
+ case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
+ case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
+ }
+
+ if (!m)
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u);
+
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u) ||
+ SemaRef.BuiltinConstantArgMultiple(TheCall, i, m);
+}
+
+void SemaMIPS::handleInterruptAttr(Decl *D, const ParsedAttr &AL) {
+ // Only one optional argument permitted.
+ if (AL.getNumArgs() > 1) {
+ Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 1;
+ return;
+ }
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+
+ if (AL.getNumArgs() == 0)
+ Str = "";
+ else if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+
+ // Semantic checks for a function with the 'interrupt' attribute for MIPS:
+ // a) Must be a function.
+ // b) Must have no parameters.
+ // c) Must have the 'void' return type.
+ // d) Cannot have the 'mips16' attribute, as that instruction set
+ // lacks the 'eret' instruction.
+ // e) The attribute itself must either have no argument or one of the
+ // valid interrupt types, see [MipsInterruptDocs].
+
+ if (!isFuncOrMethodForAttrSubject(D)) {
+ Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
+ Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*MIPS*/ 0 << 0;
+ return;
+ }
+
+ if (!getFunctionOrMethodResultType(D)->isVoidType()) {
+ Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*MIPS*/ 0 << 1;
+ return;
+ }
+
+ // We still have to do this manually because the Interrupt attributes are
+ // a bit special due to sharing their spellings across targets.
+ if (checkAttrMutualExclusion<Mips16Attr>(*this, D, AL))
+ return;
+
+ MipsInterruptAttr::InterruptType Kind;
+ if (!MipsInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
+ Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL << "'" + std::string(Str) + "'";
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ MipsInterruptAttr(getASTContext(), AL, Kind));
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaMSP430.cpp b/contrib/llvm-project/clang/lib/Sema/SemaMSP430.cpp
new file mode 100644
index 000000000000..4038a1ff61d6
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaMSP430.cpp
@@ -0,0 +1,78 @@
+//===------ SemaMSP430.cpp ----- MSP430 target-specific routines ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to NVPTX.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaMSP430.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Sema/Attr.h"
+#include "clang/Sema/ParsedAttr.h"
+
+namespace clang {
+
+SemaMSP430::SemaMSP430(Sema &S) : SemaBase(S) {}
+
+void SemaMSP430::handleInterruptAttr(Decl *D, const ParsedAttr &AL) {
+ // MSP430 'interrupt' attribute is applied to
+ // a function with no parameters and void return type.
+ if (!isFuncOrMethodForAttrSubject(D)) {
+ Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
+ Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*MSP430*/ 1 << 0;
+ return;
+ }
+
+ if (!getFunctionOrMethodResultType(D)->isVoidType()) {
+ Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*MSP430*/ 1 << 1;
+ return;
+ }
+
+ // The attribute takes one integer argument.
+ if (!AL.checkExactlyNumArgs(SemaRef, 1))
+ return;
+
+ if (!AL.isArgExpr(0)) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIntegerConstant;
+ return;
+ }
+
+ Expr *NumParamsExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
+ std::optional<llvm::APSInt> NumParams = llvm::APSInt(32);
+ if (!(NumParams = NumParamsExpr->getIntegerConstantExpr(getASTContext()))) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIntegerConstant
+ << NumParamsExpr->getSourceRange();
+ return;
+ }
+ // The argument should be in range 0..63.
+ unsigned Num = NumParams->getLimitedValue(255);
+ if (Num > 63) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL << (int)NumParams->getSExtValue()
+ << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ MSP430InterruptAttr(getASTContext(), AL, Num));
+ D->addAttr(UsedAttr::CreateImplicit(getASTContext()));
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
index ed7f626971f3..3b84e7bd4277 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTMutationListener.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/SemaInternal.h"
@@ -73,6 +74,91 @@ static std::string stringFromPath(ModuleIdPath Path) {
return Name;
}
+/// Helper function for makeTransitiveImportsVisible to decide whether
+/// the \param Imported module unit is in the same module with the \param
+/// CurrentModule.
+/// \param FoundPrimaryModuleInterface is a helper parameter to record the
+/// primary module interface unit corresponding to the module \param
+/// CurrentModule. Since currently it is expensive to decide whether two module
+/// units come from the same module by comparing the module name.
+static bool
+isImportingModuleUnitFromSameModule(ASTContext &Ctx, Module *Imported,
+ Module *CurrentModule,
+ Module *&FoundPrimaryModuleInterface) {
+ if (!Imported->isNamedModule())
+ return false;
+
+ // The a partition unit we're importing must be in the same module of the
+ // current module.
+ if (Imported->isModulePartition())
+ return true;
+
+ // If we found the primary module interface during the search process, we can
+ // return quickly to avoid expensive string comparison.
+ if (FoundPrimaryModuleInterface)
+ return Imported == FoundPrimaryModuleInterface;
+
+ if (!CurrentModule)
+ return false;
+
+ // Then the imported module must be a primary module interface unit. It
+ // is only allowed to import the primary module interface unit from the same
+ // module in the implementation unit and the implementation partition unit.
+
+ // Since we'll handle implementation unit above. We can only care
+ // about the implementation partition unit here.
+ if (!CurrentModule->isModulePartitionImplementation())
+ return false;
+
+ if (Ctx.isInSameModule(Imported, CurrentModule)) {
+ assert(!FoundPrimaryModuleInterface ||
+ FoundPrimaryModuleInterface == Imported);
+ FoundPrimaryModuleInterface = Imported;
+ return true;
+ }
+
+ return false;
+}
+
+/// [module.import]p7:
+/// Additionally, when a module-import-declaration in a module unit of some
+/// module M imports another module unit U of M, it also imports all
+/// translation units imported by non-exported module-import-declarations in
+/// the module unit purview of U. These rules can in turn lead to the
+/// importation of yet more translation units.
+static void
+makeTransitiveImportsVisible(ASTContext &Ctx, VisibleModuleSet &VisibleModules,
+ Module *Imported, Module *CurrentModule,
+ SourceLocation ImportLoc,
+ bool IsImportingPrimaryModuleInterface = false) {
+ assert(Imported->isNamedModule() &&
+ "'makeTransitiveImportsVisible()' is intended for standard C++ named "
+ "modules only.");
+
+ llvm::SmallVector<Module *, 4> Worklist;
+ Worklist.push_back(Imported);
+
+ Module *FoundPrimaryModuleInterface =
+ IsImportingPrimaryModuleInterface ? Imported : nullptr;
+
+ while (!Worklist.empty()) {
+ Module *Importing = Worklist.pop_back_val();
+
+ if (VisibleModules.isVisible(Importing))
+ continue;
+
+ // FIXME: The ImportLoc here is not meaningful. It may be problematic if we
+ // use the sourcelocation loaded from the visible modules.
+ VisibleModules.setVisible(Importing, ImportLoc);
+
+ if (isImportingModuleUnitFromSameModule(Ctx, Importing, CurrentModule,
+ FoundPrimaryModuleInterface))
+ for (Module *TransImported : Importing->Imports)
+ if (!VisibleModules.isVisible(TransImported))
+ Worklist.push_back(TransImported);
+ }
+}
+
Sema::DeclGroupPtrTy
Sema::ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc) {
// We start in the global module;
@@ -391,13 +477,17 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
getASTContext().setCurrentNamedModule(Mod);
+ if (auto *Listener = getASTMutationListener())
+ Listener->EnteringModulePurview();
+
// We already potentially made an implicit import (in the case of a module
// implementation unit importing its interface). Make this module visible
// and return the import decl to be added to the current TU.
if (Interface) {
- VisibleModules.setVisible(Interface, ModuleLoc);
- VisibleModules.makeTransitiveImportsVisible(Interface, ModuleLoc);
+ makeTransitiveImportsVisible(getASTContext(), VisibleModules, Interface,
+ Mod, ModuleLoc,
+ /*IsImportingPrimaryModuleInterface=*/true);
// Make the import decl for the interface in the impl module.
ImportDecl *Import = ImportDecl::Create(Context, CurContext, ModuleLoc,
@@ -554,7 +644,11 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
if (Mod->isHeaderUnit())
Diag(ImportLoc, diag::warn_experimental_header_unit);
- VisibleModules.setVisible(Mod, ImportLoc);
+ if (Mod->isNamedModule())
+ makeTransitiveImportsVisible(getASTContext(), VisibleModules, Mod,
+ getCurrentModule(), ImportLoc);
+ else
+ VisibleModules.setVisible(Mod, ImportLoc);
checkModuleImportContext(*this, Mod, ImportLoc, CurContext);
@@ -625,7 +719,7 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
return Import;
}
-void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
+void Sema::ActOnAnnotModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
BuildModuleInclude(DirectiveLoc, Mod);
}
@@ -635,9 +729,9 @@ void Sema::BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
// in that buffer do not qualify as module imports; they're just an
// implementation detail of us building the module.
//
- // FIXME: Should we even get ActOnModuleInclude calls for those?
+ // FIXME: Should we even get ActOnAnnotModuleInclude calls for those?
bool IsInModuleIncludes =
- TUKind == TU_Module &&
+ TUKind == TU_ClangModule &&
getSourceManager().isWrittenInMainFile(DirectiveLoc);
// If we are really importing a module (not just checking layering) due to an
@@ -664,7 +758,7 @@ void Sema::BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
}
}
-void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) {
+void Sema::ActOnAnnotModuleBegin(SourceLocation DirectiveLoc, Module *Mod) {
checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
ModuleScopes.push_back({});
@@ -688,7 +782,7 @@ void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) {
}
}
-void Sema::ActOnModuleEnd(SourceLocation EomLoc, Module *Mod) {
+void Sema::ActOnAnnotModuleEnd(SourceLocation EomLoc, Module *Mod) {
if (getLangOpts().ModulesLocalVisibility) {
VisibleModules = std::move(ModuleScopes.back().OuterVisibleModules);
// Leaving a module hides namespace names, so our visible namespace cache
@@ -747,8 +841,6 @@ void Sema::createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
VisibleModules.setVisible(Mod, Loc);
}
-/// We have parsed the start of an export declaration, including the '{'
-/// (if present).
Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc) {
ExportDecl *D = ExportDecl::Create(Context, CurContext, ExportLoc);
@@ -763,23 +855,25 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
// An export-declaration shall appear only [...] in the purview of a module
// interface unit. An export-declaration shall not appear directly or
// indirectly within [...] a private-module-fragment.
- if (!isCurrentModulePurview()) {
- Diag(ExportLoc, diag::err_export_not_in_module_interface) << 0;
- D->setInvalidDecl();
- return D;
- } else if (currentModuleIsImplementation()) {
- Diag(ExportLoc, diag::err_export_not_in_module_interface) << 1;
- Diag(ModuleScopes.back().BeginLoc,
- diag::note_not_module_interface_add_export)
- << FixItHint::CreateInsertion(ModuleScopes.back().BeginLoc, "export ");
- D->setInvalidDecl();
- return D;
- } else if (ModuleScopes.back().Module->Kind ==
- Module::PrivateModuleFragment) {
- Diag(ExportLoc, diag::err_export_in_private_module_fragment);
- Diag(ModuleScopes.back().BeginLoc, diag::note_private_module_fragment);
- D->setInvalidDecl();
- return D;
+ if (!getLangOpts().HLSL) {
+ if (!isCurrentModulePurview()) {
+ Diag(ExportLoc, diag::err_export_not_in_module_interface) << 0;
+ D->setInvalidDecl();
+ return D;
+ } else if (currentModuleIsImplementation()) {
+ Diag(ExportLoc, diag::err_export_not_in_module_interface) << 1;
+ Diag(ModuleScopes.back().BeginLoc,
+ diag::note_not_module_interface_add_export)
+ << FixItHint::CreateInsertion(ModuleScopes.back().BeginLoc, "export ");
+ D->setInvalidDecl();
+ return D;
+ } else if (ModuleScopes.back().Module->Kind ==
+ Module::PrivateModuleFragment) {
+ Diag(ExportLoc, diag::err_export_in_private_module_fragment);
+ Diag(ModuleScopes.back().BeginLoc, diag::note_private_module_fragment);
+ D->setInvalidDecl();
+ return D;
+ }
}
for (const DeclContext *DC = CurContext; DC; DC = DC->getLexicalParent()) {
@@ -799,7 +893,7 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
//
// Defer exporting the namespace until after we leave it, in order to
// avoid marking all subsequent declarations in the namespace as exported.
- if (!DeferredExportedNamespaces.insert(ND).second)
+ if (!getLangOpts().HLSL && !DeferredExportedNamespaces.insert(ND).second)
break;
}
}
@@ -814,7 +908,9 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
return D;
}
- D->setModuleOwnershipKind(Decl::ModuleOwnershipKind::VisibleWhenImported);
+ if (!getLangOpts().HLSL)
+ D->setModuleOwnershipKind(Decl::ModuleOwnershipKind::VisibleWhenImported);
+
return D;
}
@@ -832,6 +928,16 @@ static bool checkExportedDeclContext(Sema &S, DeclContext *DC,
/// Check that it's valid to export \p D.
static bool checkExportedDecl(Sema &S, Decl *D, SourceLocation BlockStart) {
+ // HLSL: export declaration is valid only on functions
+ if (S.getLangOpts().HLSL) {
+ // Export-within-export was already diagnosed in ActOnStartExportDecl
+ if (!dyn_cast<FunctionDecl>(D) && !dyn_cast<ExportDecl>(D)) {
+ S.Diag(D->getBeginLoc(), diag::err_hlsl_export_not_on_function);
+ D->setInvalidDecl();
+ return false;
+ }
+ }
+
// C++20 [module.interface]p3:
// [...] it shall not declare a name with internal linkage.
bool HasName = false;
@@ -884,7 +990,6 @@ static bool checkExportedDecl(Sema &S, Decl *D, SourceLocation BlockStart) {
return true;
}
-/// Complete the definition of an export declaration.
Decl *Sema::ActOnFinishExportDecl(Scope *S, Decl *D, SourceLocation RBraceLoc) {
auto *ED = cast<ExportDecl>(D);
if (RBraceLoc.isValid())
@@ -911,6 +1016,10 @@ Decl *Sema::ActOnFinishExportDecl(Scope *S, Decl *D, SourceLocation RBraceLoc) {
}
}
+ // Anything exported from a module should never be considered unused.
+ for (auto *Exported : ED->decls())
+ Exported->markUsed(getASTContext());
+
return D;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaNVPTX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaNVPTX.cpp
new file mode 100644
index 000000000000..cc8941071463
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaNVPTX.cpp
@@ -0,0 +1,35 @@
+//===------ SemaNVPTX.cpp ------- NVPTX target-specific routines ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to NVPTX.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaNVPTX.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+
+SemaNVPTX::SemaNVPTX(Sema &S) : SemaBase(S) {}
+
+bool SemaNVPTX::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
+ case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
+ return SemaRef.checkArgCountAtMost(TheCall, 3);
+ }
+
+ return false;
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaObjC.cpp
new file mode 100644
index 000000000000..75233689769c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaObjC.cpp
@@ -0,0 +1,2408 @@
+//===----- SemaObjC.cpp ---- Semantic Analysis for Objective-C ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements semantic analysis for Objective-C.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaObjC.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Attr.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "llvm/Support/ConvertUTF.h"
+
+namespace clang {
+
+SemaObjC::SemaObjC(Sema &S)
+ : SemaBase(S), NSNumberDecl(nullptr), NSValueDecl(nullptr),
+ NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr),
+ ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
+ ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
+ DictionaryWithObjectsMethod(nullptr) {}
+
+StmtResult SemaObjC::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
+ Stmt *First, Expr *collection,
+ SourceLocation RParenLoc) {
+ ASTContext &Context = getASTContext();
+ SemaRef.setFunctionHasBranchProtectedScope();
+
+ ExprResult CollectionExprResult =
+ CheckObjCForCollectionOperand(ForLoc, collection);
+
+ if (First) {
+ QualType FirstType;
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(First)) {
+ if (!DS->isSingleDecl())
+ return StmtError(Diag((*DS->decl_begin())->getLocation(),
+ diag::err_toomany_element_decls));
+
+ VarDecl *D = dyn_cast<VarDecl>(DS->getSingleDecl());
+ if (!D || D->isInvalidDecl())
+ return StmtError();
+
+ FirstType = D->getType();
+ // C99 6.8.5p3: The declaration part of a 'for' statement shall only
+ // declare identifiers for objects having storage class 'auto' or
+ // 'register'.
+ if (!D->hasLocalStorage())
+ return StmtError(
+ Diag(D->getLocation(), diag::err_non_local_variable_decl_in_for));
+
+ // If the type contained 'auto', deduce the 'auto' to 'id'.
+ if (FirstType->getContainedAutoType()) {
+ SourceLocation Loc = D->getLocation();
+ OpaqueValueExpr OpaqueId(Loc, Context.getObjCIdType(), VK_PRValue);
+ Expr *DeducedInit = &OpaqueId;
+ sema::TemplateDeductionInfo Info(Loc);
+ FirstType = QualType();
+ TemplateDeductionResult Result = SemaRef.DeduceAutoType(
+ D->getTypeSourceInfo()->getTypeLoc(), DeducedInit, FirstType, Info);
+ if (Result != TemplateDeductionResult::Success &&
+ Result != TemplateDeductionResult::AlreadyDiagnosed)
+ SemaRef.DiagnoseAutoDeductionFailure(D, DeducedInit);
+ if (FirstType.isNull()) {
+ D->setInvalidDecl();
+ return StmtError();
+ }
+
+ D->setType(FirstType);
+
+ if (!SemaRef.inTemplateInstantiation()) {
+ SourceLocation Loc =
+ D->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
+ Diag(Loc, diag::warn_auto_var_is_id) << D->getDeclName();
+ }
+ }
+
+ } else {
+ Expr *FirstE = cast<Expr>(First);
+ if (!FirstE->isTypeDependent() && !FirstE->isLValue())
+ return StmtError(
+ Diag(First->getBeginLoc(), diag::err_selector_element_not_lvalue)
+ << First->getSourceRange());
+
+ FirstType = static_cast<Expr *>(First)->getType();
+ if (FirstType.isConstQualified())
+ Diag(ForLoc, diag::err_selector_element_const_type)
+ << FirstType << First->getSourceRange();
+ }
+ if (!FirstType->isDependentType() &&
+ !FirstType->isObjCObjectPointerType() &&
+ !FirstType->isBlockPointerType())
+ return StmtError(Diag(ForLoc, diag::err_selector_element_type)
+ << FirstType << First->getSourceRange());
+ }
+
+ if (CollectionExprResult.isInvalid())
+ return StmtError();
+
+ CollectionExprResult = SemaRef.ActOnFinishFullExpr(CollectionExprResult.get(),
+ /*DiscardedValue*/ false);
+ if (CollectionExprResult.isInvalid())
+ return StmtError();
+
+ return new (Context) ObjCForCollectionStmt(First, CollectionExprResult.get(),
+ nullptr, ForLoc, RParenLoc);
+}
+
+ExprResult SemaObjC::CheckObjCForCollectionOperand(SourceLocation forLoc,
+ Expr *collection) {
+ ASTContext &Context = getASTContext();
+ if (!collection)
+ return ExprError();
+
+ ExprResult result = SemaRef.CorrectDelayedTyposInExpr(collection);
+ if (!result.isUsable())
+ return ExprError();
+ collection = result.get();
+
+ // Bail out early if we've got a type-dependent expression.
+ if (collection->isTypeDependent())
+ return collection;
+
+ // Perform normal l-value conversion.
+ result = SemaRef.DefaultFunctionArrayLvalueConversion(collection);
+ if (result.isInvalid())
+ return ExprError();
+ collection = result.get();
+
+ // The operand needs to have object-pointer type.
+ // TODO: should we do a contextual conversion?
+ const ObjCObjectPointerType *pointerType =
+ collection->getType()->getAs<ObjCObjectPointerType>();
+ if (!pointerType)
+ return Diag(forLoc, diag::err_collection_expr_type)
+ << collection->getType() << collection->getSourceRange();
+
+ // Check that the operand provides
+ // - countByEnumeratingWithState:objects:count:
+ const ObjCObjectType *objectType = pointerType->getObjectType();
+ ObjCInterfaceDecl *iface = objectType->getInterface();
+
+ // If we have a forward-declared type, we can't do this check.
+ // Under ARC, it is an error not to have a forward-declared class.
+ if (iface &&
+ (getLangOpts().ObjCAutoRefCount
+ ? SemaRef.RequireCompleteType(forLoc, QualType(objectType, 0),
+ diag::err_arc_collection_forward,
+ collection)
+ : !SemaRef.isCompleteType(forLoc, QualType(objectType, 0)))) {
+ // Otherwise, if we have any useful type information, check that
+ // the type declares the appropriate method.
+ } else if (iface || !objectType->qual_empty()) {
+ const IdentifierInfo *selectorIdents[] = {
+ &Context.Idents.get("countByEnumeratingWithState"),
+ &Context.Idents.get("objects"), &Context.Idents.get("count")};
+ Selector selector = Context.Selectors.getSelector(3, &selectorIdents[0]);
+
+ ObjCMethodDecl *method = nullptr;
+
+ // If there's an interface, look in both the public and private APIs.
+ if (iface) {
+ method = iface->lookupInstanceMethod(selector);
+ if (!method)
+ method = iface->lookupPrivateMethod(selector);
+ }
+
+ // Also check protocol qualifiers.
+ if (!method)
+ method = LookupMethodInQualifiedType(selector, pointerType,
+ /*instance*/ true);
+
+ // If we didn't find it anywhere, give up.
+ if (!method) {
+ Diag(forLoc, diag::warn_collection_expr_type)
+ << collection->getType() << selector << collection->getSourceRange();
+ }
+
+ // TODO: check for an incompatible signature?
+ }
+
+ // Wrap up any cleanups in the expression.
+ return collection;
+}
+
+StmtResult SemaObjC::FinishObjCForCollectionStmt(Stmt *S, Stmt *B) {
+ if (!S || !B)
+ return StmtError();
+ ObjCForCollectionStmt *ForStmt = cast<ObjCForCollectionStmt>(S);
+
+ ForStmt->setBody(B);
+ return S;
+}
+
+StmtResult SemaObjC::ActOnObjCAtCatchStmt(SourceLocation AtLoc,
+ SourceLocation RParen, Decl *Parm,
+ Stmt *Body) {
+ ASTContext &Context = getASTContext();
+ VarDecl *Var = cast_or_null<VarDecl>(Parm);
+ if (Var && Var->isInvalidDecl())
+ return StmtError();
+
+ return new (Context) ObjCAtCatchStmt(AtLoc, RParen, Var, Body);
+}
+
+StmtResult SemaObjC::ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body) {
+ ASTContext &Context = getASTContext();
+ return new (Context) ObjCAtFinallyStmt(AtLoc, Body);
+}
+
+StmtResult SemaObjC::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
+ MultiStmtArg CatchStmts,
+ Stmt *Finally) {
+ ASTContext &Context = getASTContext();
+ if (!getLangOpts().ObjCExceptions)
+ Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@try";
+
+ // Objective-C try is incompatible with SEH __try.
+ sema::FunctionScopeInfo *FSI = SemaRef.getCurFunction();
+ if (FSI->FirstSEHTryLoc.isValid()) {
+ Diag(AtLoc, diag::err_mixing_cxx_try_seh_try) << 1;
+ Diag(FSI->FirstSEHTryLoc, diag::note_conflicting_try_here) << "'__try'";
+ }
+
+ FSI->setHasObjCTry(AtLoc);
+ unsigned NumCatchStmts = CatchStmts.size();
+ return ObjCAtTryStmt::Create(Context, AtLoc, Try, CatchStmts.data(),
+ NumCatchStmts, Finally);
+}
+
+StmtResult SemaObjC::BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw) {
+ ASTContext &Context = getASTContext();
+ if (Throw) {
+ ExprResult Result = SemaRef.DefaultLvalueConversion(Throw);
+ if (Result.isInvalid())
+ return StmtError();
+
+ Result =
+ SemaRef.ActOnFinishFullExpr(Result.get(), /*DiscardedValue*/ false);
+ if (Result.isInvalid())
+ return StmtError();
+ Throw = Result.get();
+
+ QualType ThrowType = Throw->getType();
+ // Make sure the expression type is an ObjC pointer or "void *".
+ if (!ThrowType->isDependentType() &&
+ !ThrowType->isObjCObjectPointerType()) {
+ const PointerType *PT = ThrowType->getAs<PointerType>();
+ if (!PT || !PT->getPointeeType()->isVoidType())
+ return StmtError(Diag(AtLoc, diag::err_objc_throw_expects_object)
+ << Throw->getType() << Throw->getSourceRange());
+ }
+ }
+
+ return new (Context) ObjCAtThrowStmt(AtLoc, Throw);
+}
+
+StmtResult SemaObjC::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
+ Scope *CurScope) {
+ if (!getLangOpts().ObjCExceptions)
+ Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@throw";
+
+ if (!Throw) {
+ // @throw without an expression designates a rethrow (which must occur
+ // in the context of an @catch clause).
+ Scope *AtCatchParent = CurScope;
+ while (AtCatchParent && !AtCatchParent->isAtCatchScope())
+ AtCatchParent = AtCatchParent->getParent();
+ if (!AtCatchParent)
+ return StmtError(Diag(AtLoc, diag::err_rethrow_used_outside_catch));
+ }
+ return BuildObjCAtThrowStmt(AtLoc, Throw);
+}
+
+ExprResult SemaObjC::ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
+ Expr *operand) {
+ ExprResult result = SemaRef.DefaultLvalueConversion(operand);
+ if (result.isInvalid())
+ return ExprError();
+ operand = result.get();
+
+ // Make sure the expression type is an ObjC pointer or "void *".
+ QualType type = operand->getType();
+ if (!type->isDependentType() && !type->isObjCObjectPointerType()) {
+ const PointerType *pointerType = type->getAs<PointerType>();
+ if (!pointerType || !pointerType->getPointeeType()->isVoidType()) {
+ if (getLangOpts().CPlusPlus) {
+ if (SemaRef.RequireCompleteType(atLoc, type,
+ diag::err_incomplete_receiver_type))
+ return Diag(atLoc, diag::err_objc_synchronized_expects_object)
+ << type << operand->getSourceRange();
+
+ ExprResult result =
+ SemaRef.PerformContextuallyConvertToObjCPointer(operand);
+ if (result.isInvalid())
+ return ExprError();
+ if (!result.isUsable())
+ return Diag(atLoc, diag::err_objc_synchronized_expects_object)
+ << type << operand->getSourceRange();
+
+ operand = result.get();
+ } else {
+ return Diag(atLoc, diag::err_objc_synchronized_expects_object)
+ << type << operand->getSourceRange();
+ }
+ }
+ }
+
+ // The operand to @synchronized is a full-expression.
+ return SemaRef.ActOnFinishFullExpr(operand, /*DiscardedValue*/ false);
+}
+
+StmtResult SemaObjC::ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
+ Expr *SyncExpr,
+ Stmt *SyncBody) {
+ ASTContext &Context = getASTContext();
+ // We can't jump into or indirect-jump out of a @synchronized block.
+ SemaRef.setFunctionHasBranchProtectedScope();
+ return new (Context) ObjCAtSynchronizedStmt(AtLoc, SyncExpr, SyncBody);
+}
+
+StmtResult SemaObjC::ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc,
+ Stmt *Body) {
+ ASTContext &Context = getASTContext();
+ SemaRef.setFunctionHasBranchProtectedScope();
+ return new (Context) ObjCAutoreleasePoolStmt(AtLoc, Body);
+}
+
+TypeResult SemaObjC::actOnObjCProtocolQualifierType(
+ SourceLocation lAngleLoc, ArrayRef<Decl *> protocols,
+ ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc) {
+ ASTContext &Context = getASTContext();
+ // Form id<protocol-list>.
+ QualType Result = Context.getObjCObjectType(
+ Context.ObjCBuiltinIdTy, {},
+ llvm::ArrayRef((ObjCProtocolDecl *const *)protocols.data(),
+ protocols.size()),
+ false);
+ Result = Context.getObjCObjectPointerType(Result);
+
+ TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result);
+ TypeLoc ResultTL = ResultTInfo->getTypeLoc();
+
+ auto ObjCObjectPointerTL = ResultTL.castAs<ObjCObjectPointerTypeLoc>();
+ ObjCObjectPointerTL.setStarLoc(SourceLocation()); // implicit
+
+ auto ObjCObjectTL =
+ ObjCObjectPointerTL.getPointeeLoc().castAs<ObjCObjectTypeLoc>();
+ ObjCObjectTL.setHasBaseTypeAsWritten(false);
+ ObjCObjectTL.getBaseLoc().initialize(Context, SourceLocation());
+
+ // No type arguments.
+ ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation());
+ ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation());
+
+ // Fill in protocol qualifiers.
+ ObjCObjectTL.setProtocolLAngleLoc(lAngleLoc);
+ ObjCObjectTL.setProtocolRAngleLoc(rAngleLoc);
+ for (unsigned i = 0, n = protocols.size(); i != n; ++i)
+ ObjCObjectTL.setProtocolLoc(i, protocolLocs[i]);
+
+ // We're done. Return the completed type to the parser.
+ return SemaRef.CreateParsedType(Result, ResultTInfo);
+}
+
+TypeResult SemaObjC::actOnObjCTypeArgsAndProtocolQualifiers(
+ Scope *S, SourceLocation Loc, ParsedType BaseType,
+ SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs,
+ SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc,
+ ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs,
+ SourceLocation ProtocolRAngleLoc) {
+ ASTContext &Context = getASTContext();
+ TypeSourceInfo *BaseTypeInfo = nullptr;
+ QualType T = SemaRef.GetTypeFromParser(BaseType, &BaseTypeInfo);
+ if (T.isNull())
+ return true;
+
+ // Handle missing type-source info.
+ if (!BaseTypeInfo)
+ BaseTypeInfo = Context.getTrivialTypeSourceInfo(T, Loc);
+
+ // Extract type arguments.
+ SmallVector<TypeSourceInfo *, 4> ActualTypeArgInfos;
+ for (unsigned i = 0, n = TypeArgs.size(); i != n; ++i) {
+ TypeSourceInfo *TypeArgInfo = nullptr;
+ QualType TypeArg = SemaRef.GetTypeFromParser(TypeArgs[i], &TypeArgInfo);
+ if (TypeArg.isNull()) {
+ ActualTypeArgInfos.clear();
+ break;
+ }
+
+ assert(TypeArgInfo && "No type source info?");
+ ActualTypeArgInfos.push_back(TypeArgInfo);
+ }
+
+ // Build the object type.
+ QualType Result = BuildObjCObjectType(
+ T, BaseTypeInfo->getTypeLoc().getSourceRange().getBegin(),
+ TypeArgsLAngleLoc, ActualTypeArgInfos, TypeArgsRAngleLoc,
+ ProtocolLAngleLoc,
+ llvm::ArrayRef((ObjCProtocolDecl *const *)Protocols.data(),
+ Protocols.size()),
+ ProtocolLocs, ProtocolRAngleLoc,
+ /*FailOnError=*/false,
+ /*Rebuilding=*/false);
+
+ if (Result == T)
+ return BaseType;
+
+ // Create source information for this type.
+ TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result);
+ TypeLoc ResultTL = ResultTInfo->getTypeLoc();
+
+ // For id<Proto1, Proto2> or Class<Proto1, Proto2>, we'll have an
+ // object pointer type. Fill in source information for it.
+ if (auto ObjCObjectPointerTL = ResultTL.getAs<ObjCObjectPointerTypeLoc>()) {
+ // The '*' is implicit.
+ ObjCObjectPointerTL.setStarLoc(SourceLocation());
+ ResultTL = ObjCObjectPointerTL.getPointeeLoc();
+ }
+
+ if (auto OTPTL = ResultTL.getAs<ObjCTypeParamTypeLoc>()) {
+ // Protocol qualifier information.
+ if (OTPTL.getNumProtocols() > 0) {
+ assert(OTPTL.getNumProtocols() == Protocols.size());
+ OTPTL.setProtocolLAngleLoc(ProtocolLAngleLoc);
+ OTPTL.setProtocolRAngleLoc(ProtocolRAngleLoc);
+ for (unsigned i = 0, n = Protocols.size(); i != n; ++i)
+ OTPTL.setProtocolLoc(i, ProtocolLocs[i]);
+ }
+
+ // We're done. Return the completed type to the parser.
+ return SemaRef.CreateParsedType(Result, ResultTInfo);
+ }
+
+ auto ObjCObjectTL = ResultTL.castAs<ObjCObjectTypeLoc>();
+
+ // Type argument information.
+ if (ObjCObjectTL.getNumTypeArgs() > 0) {
+ assert(ObjCObjectTL.getNumTypeArgs() == ActualTypeArgInfos.size());
+ ObjCObjectTL.setTypeArgsLAngleLoc(TypeArgsLAngleLoc);
+ ObjCObjectTL.setTypeArgsRAngleLoc(TypeArgsRAngleLoc);
+ for (unsigned i = 0, n = ActualTypeArgInfos.size(); i != n; ++i)
+ ObjCObjectTL.setTypeArgTInfo(i, ActualTypeArgInfos[i]);
+ } else {
+ ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation());
+ ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation());
+ }
+
+ // Protocol qualifier information.
+ if (ObjCObjectTL.getNumProtocols() > 0) {
+ assert(ObjCObjectTL.getNumProtocols() == Protocols.size());
+ ObjCObjectTL.setProtocolLAngleLoc(ProtocolLAngleLoc);
+ ObjCObjectTL.setProtocolRAngleLoc(ProtocolRAngleLoc);
+ for (unsigned i = 0, n = Protocols.size(); i != n; ++i)
+ ObjCObjectTL.setProtocolLoc(i, ProtocolLocs[i]);
+ } else {
+ ObjCObjectTL.setProtocolLAngleLoc(SourceLocation());
+ ObjCObjectTL.setProtocolRAngleLoc(SourceLocation());
+ }
+
+ // Base type.
+ ObjCObjectTL.setHasBaseTypeAsWritten(true);
+ if (ObjCObjectTL.getType() == T)
+ ObjCObjectTL.getBaseLoc().initializeFullCopy(BaseTypeInfo->getTypeLoc());
+ else
+ ObjCObjectTL.getBaseLoc().initialize(Context, Loc);
+
+ // We're done. Return the completed type to the parser.
+ return SemaRef.CreateParsedType(Result, ResultTInfo);
+}
+
+QualType SemaObjC::BuildObjCTypeParamType(
+ const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc,
+ ArrayRef<ObjCProtocolDecl *> Protocols,
+ ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc,
+ bool FailOnError) {
+ ASTContext &Context = getASTContext();
+ QualType Result = QualType(Decl->getTypeForDecl(), 0);
+ if (!Protocols.empty()) {
+ bool HasError;
+ Result = Context.applyObjCProtocolQualifiers(Result, Protocols, HasError);
+ if (HasError) {
+ Diag(SourceLocation(), diag::err_invalid_protocol_qualifiers)
+ << SourceRange(ProtocolLAngleLoc, ProtocolRAngleLoc);
+ if (FailOnError)
+ Result = QualType();
+ }
+ if (FailOnError && Result.isNull())
+ return QualType();
+ }
+
+ return Result;
+}
+
+/// Apply Objective-C type arguments to the given type.
+static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
+ ArrayRef<TypeSourceInfo *> typeArgs,
+ SourceRange typeArgsRange, bool failOnError,
+ bool rebuilding) {
+ // We can only apply type arguments to an Objective-C class type.
+ const auto *objcObjectType = type->getAs<ObjCObjectType>();
+ if (!objcObjectType || !objcObjectType->getInterface()) {
+ S.Diag(loc, diag::err_objc_type_args_non_class) << type << typeArgsRange;
+
+ if (failOnError)
+ return QualType();
+ return type;
+ }
+
+ // The class type must be parameterized.
+ ObjCInterfaceDecl *objcClass = objcObjectType->getInterface();
+ ObjCTypeParamList *typeParams = objcClass->getTypeParamList();
+ if (!typeParams) {
+ S.Diag(loc, diag::err_objc_type_args_non_parameterized_class)
+ << objcClass->getDeclName() << FixItHint::CreateRemoval(typeArgsRange);
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // The type must not already be specialized.
+ if (objcObjectType->isSpecialized()) {
+ S.Diag(loc, diag::err_objc_type_args_specialized_class)
+ << type << FixItHint::CreateRemoval(typeArgsRange);
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // Check the type arguments.
+ SmallVector<QualType, 4> finalTypeArgs;
+ unsigned numTypeParams = typeParams->size();
+ bool anyPackExpansions = false;
+ for (unsigned i = 0, n = typeArgs.size(); i != n; ++i) {
+ TypeSourceInfo *typeArgInfo = typeArgs[i];
+ QualType typeArg = typeArgInfo->getType();
+
+ // Type arguments cannot have explicit qualifiers or nullability.
+ // We ignore indirect sources of these, e.g. behind typedefs or
+ // template arguments.
+ if (TypeLoc qual = typeArgInfo->getTypeLoc().findExplicitQualifierLoc()) {
+ bool diagnosed = false;
+ SourceRange rangeToRemove;
+ if (auto attr = qual.getAs<AttributedTypeLoc>()) {
+ rangeToRemove = attr.getLocalSourceRange();
+ if (attr.getTypePtr()->getImmediateNullability()) {
+ typeArg = attr.getTypePtr()->getModifiedType();
+ S.Diag(attr.getBeginLoc(),
+ diag::err_objc_type_arg_explicit_nullability)
+ << typeArg << FixItHint::CreateRemoval(rangeToRemove);
+ diagnosed = true;
+ }
+ }
+
+ // When rebuilding, qualifiers might have gotten here through a
+ // final substitution.
+ if (!rebuilding && !diagnosed) {
+ S.Diag(qual.getBeginLoc(), diag::err_objc_type_arg_qualified)
+ << typeArg << typeArg.getQualifiers().getAsString()
+ << FixItHint::CreateRemoval(rangeToRemove);
+ }
+ }
+
+ // Remove qualifiers even if they're non-local.
+ typeArg = typeArg.getUnqualifiedType();
+
+ finalTypeArgs.push_back(typeArg);
+
+ if (typeArg->getAs<PackExpansionType>())
+ anyPackExpansions = true;
+
+ // Find the corresponding type parameter, if there is one.
+ ObjCTypeParamDecl *typeParam = nullptr;
+ if (!anyPackExpansions) {
+ if (i < numTypeParams) {
+ typeParam = typeParams->begin()[i];
+ } else {
+ // Too many arguments.
+ S.Diag(loc, diag::err_objc_type_args_wrong_arity)
+ << false << objcClass->getDeclName() << (unsigned)typeArgs.size()
+ << numTypeParams;
+ S.Diag(objcClass->getLocation(), diag::note_previous_decl) << objcClass;
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+ }
+
+ // Objective-C object pointer types must be substitutable for the bounds.
+ if (const auto *typeArgObjC = typeArg->getAs<ObjCObjectPointerType>()) {
+ // If we don't have a type parameter to match against, assume
+ // everything is fine. There was a prior pack expansion that
+ // means we won't be able to match anything.
+ if (!typeParam) {
+ assert(anyPackExpansions && "Too many arguments?");
+ continue;
+ }
+
+ // Retrieve the bound.
+ QualType bound = typeParam->getUnderlyingType();
+ const auto *boundObjC = bound->castAs<ObjCObjectPointerType>();
+
+ // Determine whether the type argument is substitutable for the bound.
+ if (typeArgObjC->isObjCIdType()) {
+ // When the type argument is 'id', the only acceptable type
+ // parameter bound is 'id'.
+ if (boundObjC->isObjCIdType())
+ continue;
+ } else if (S.Context.canAssignObjCInterfaces(boundObjC, typeArgObjC)) {
+ // Otherwise, we follow the assignability rules.
+ continue;
+ }
+
+ // Diagnose the mismatch.
+ S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
+ diag::err_objc_type_arg_does_not_match_bound)
+ << typeArg << bound << typeParam->getDeclName();
+ S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here)
+ << typeParam->getDeclName();
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // Block pointer types are permitted for unqualified 'id' bounds.
+ if (typeArg->isBlockPointerType()) {
+ // If we don't have a type parameter to match against, assume
+ // everything is fine. There was a prior pack expansion that
+ // means we won't be able to match anything.
+ if (!typeParam) {
+ assert(anyPackExpansions && "Too many arguments?");
+ continue;
+ }
+
+ // Retrieve the bound.
+ QualType bound = typeParam->getUnderlyingType();
+ if (bound->isBlockCompatibleObjCPointerType(S.Context))
+ continue;
+
+ // Diagnose the mismatch.
+ S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
+ diag::err_objc_type_arg_does_not_match_bound)
+ << typeArg << bound << typeParam->getDeclName();
+ S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here)
+ << typeParam->getDeclName();
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // Types that have __attribute__((NSObject)) are permitted.
+ if (typeArg->isObjCNSObjectType()) {
+ continue;
+ }
+
+ // Dependent types will be checked at instantiation time.
+ if (typeArg->isDependentType()) {
+ continue;
+ }
+
+ // Diagnose non-id-compatible type arguments.
+ S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
+ diag::err_objc_type_arg_not_id_compatible)
+ << typeArg << typeArgInfo->getTypeLoc().getSourceRange();
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // Make sure we didn't have the wrong number of arguments.
+ if (!anyPackExpansions && finalTypeArgs.size() != numTypeParams) {
+ S.Diag(loc, diag::err_objc_type_args_wrong_arity)
+ << (typeArgs.size() < typeParams->size()) << objcClass->getDeclName()
+ << (unsigned)finalTypeArgs.size() << (unsigned)numTypeParams;
+ S.Diag(objcClass->getLocation(), diag::note_previous_decl) << objcClass;
+
+ if (failOnError)
+ return QualType();
+
+ return type;
+ }
+
+ // Success. Form the specialized type.
+ return S.Context.getObjCObjectType(type, finalTypeArgs, {}, false);
+}
+
+QualType SemaObjC::BuildObjCObjectType(
+ QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc,
+ ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc,
+ SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols,
+ ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc,
+ bool FailOnError, bool Rebuilding) {
+ ASTContext &Context = getASTContext();
+ QualType Result = BaseType;
+ if (!TypeArgs.empty()) {
+ Result =
+ applyObjCTypeArgs(SemaRef, Loc, Result, TypeArgs,
+ SourceRange(TypeArgsLAngleLoc, TypeArgsRAngleLoc),
+ FailOnError, Rebuilding);
+ if (FailOnError && Result.isNull())
+ return QualType();
+ }
+
+ if (!Protocols.empty()) {
+ bool HasError;
+ Result = Context.applyObjCProtocolQualifiers(Result, Protocols, HasError);
+ if (HasError) {
+ Diag(Loc, diag::err_invalid_protocol_qualifiers)
+ << SourceRange(ProtocolLAngleLoc, ProtocolRAngleLoc);
+ if (FailOnError)
+ Result = QualType();
+ }
+ if (FailOnError && Result.isNull())
+ return QualType();
+ }
+
+ return Result;
+}
+
+ParsedType SemaObjC::ActOnObjCInstanceType(SourceLocation Loc) {
+ ASTContext &Context = getASTContext();
+ QualType T = Context.getObjCInstanceType();
+ TypeSourceInfo *TInfo = Context.getTrivialTypeSourceInfo(T, Loc);
+ return SemaRef.CreateParsedType(T, TInfo);
+}
+
+//===--- CHECK: Objective-C retain cycles ----------------------------------//
+
+namespace {
+
+struct RetainCycleOwner {
+ VarDecl *Variable = nullptr;
+ SourceRange Range;
+ SourceLocation Loc;
+ bool Indirect = false;
+
+ RetainCycleOwner() = default;
+
+ void setLocsFrom(Expr *e) {
+ Loc = e->getExprLoc();
+ Range = e->getSourceRange();
+ }
+};
+
+} // namespace
+
+/// Consider whether capturing the given variable can possibly lead to
+/// a retain cycle.
+static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
+ // In ARC, it's captured strongly iff the variable has __strong
+ // lifetime. In MRR, it's captured strongly if the variable is
+ // __block and has an appropriate type.
+ if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return false;
+
+ owner.Variable = var;
+ if (ref)
+ owner.setLocsFrom(ref);
+ return true;
+}
+
+static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
+ while (true) {
+ e = e->IgnoreParens();
+ if (CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ switch (cast->getCastKind()) {
+ case CK_BitCast:
+ case CK_LValueBitCast:
+ case CK_LValueToRValue:
+ case CK_ARCReclaimReturnedObject:
+ e = cast->getSubExpr();
+ continue;
+
+ default:
+ return false;
+ }
+ }
+
+ if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) {
+ ObjCIvarDecl *ivar = ref->getDecl();
+ if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return false;
+
+ // Try to find a retain cycle in the base.
+ if (!findRetainCycleOwner(S, ref->getBase(), owner))
+ return false;
+
+ if (ref->isFreeIvar())
+ owner.setLocsFrom(ref);
+ owner.Indirect = true;
+ return true;
+ }
+
+ if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) {
+ VarDecl *var = dyn_cast<VarDecl>(ref->getDecl());
+ if (!var)
+ return false;
+ return considerVariable(var, ref, owner);
+ }
+
+ if (MemberExpr *member = dyn_cast<MemberExpr>(e)) {
+ if (member->isArrow())
+ return false;
+
+ // Don't count this as an indirect ownership.
+ e = member->getBase();
+ continue;
+ }
+
+ if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
+ // Only pay attention to pseudo-objects on property references.
+ ObjCPropertyRefExpr *pre = dyn_cast<ObjCPropertyRefExpr>(
+ pseudo->getSyntacticForm()->IgnoreParens());
+ if (!pre)
+ return false;
+ if (pre->isImplicitProperty())
+ return false;
+ ObjCPropertyDecl *property = pre->getExplicitProperty();
+ if (!property->isRetaining() &&
+ !(property->getPropertyIvarDecl() &&
+ property->getPropertyIvarDecl()->getType().getObjCLifetime() ==
+ Qualifiers::OCL_Strong))
+ return false;
+
+ owner.Indirect = true;
+ if (pre->isSuperReceiver()) {
+ owner.Variable = S.getCurMethodDecl()->getSelfDecl();
+ if (!owner.Variable)
+ return false;
+ owner.Loc = pre->getLocation();
+ owner.Range = pre->getSourceRange();
+ return true;
+ }
+ e = const_cast<Expr *>(
+ cast<OpaqueValueExpr>(pre->getBase())->getSourceExpr());
+ continue;
+ }
+
+ // Array ivars?
+
+ return false;
+ }
+}
+
+namespace {
+
+struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
+ VarDecl *Variable;
+ Expr *Capturer = nullptr;
+ bool VarWillBeReased = false;
+
+ FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
+ : EvaluatedExprVisitor<FindCaptureVisitor>(Context), Variable(variable) {}
+
+ void VisitDeclRefExpr(DeclRefExpr *ref) {
+ if (ref->getDecl() == Variable && !Capturer)
+ Capturer = ref;
+ }
+
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
+ if (Capturer)
+ return;
+ Visit(ref->getBase());
+ if (Capturer && ref->isFreeIvar())
+ Capturer = ref;
+ }
+
+ void VisitBlockExpr(BlockExpr *block) {
+ // Look inside nested blocks
+ if (block->getBlockDecl()->capturesVariable(Variable))
+ Visit(block->getBlockDecl()->getBody());
+ }
+
+ void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) {
+ if (Capturer)
+ return;
+ if (OVE->getSourceExpr())
+ Visit(OVE->getSourceExpr());
+ }
+
+ void VisitBinaryOperator(BinaryOperator *BinOp) {
+ if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign)
+ return;
+ Expr *LHS = BinOp->getLHS();
+ if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) {
+ if (DRE->getDecl() != Variable)
+ return;
+ if (Expr *RHS = BinOp->getRHS()) {
+ RHS = RHS->IgnoreParenCasts();
+ std::optional<llvm::APSInt> Value;
+ VarWillBeReased =
+ (RHS && (Value = RHS->getIntegerConstantExpr(Context)) &&
+ *Value == 0);
+ }
+ }
+ }
+};
+
+} // namespace
+
+/// Check whether the given argument is a block which captures a
+/// variable.
+static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
+ assert(owner.Variable && owner.Loc.isValid());
+
+ e = e->IgnoreParenCasts();
+
+ // Look through [^{...} copy] and Block_copy(^{...}).
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) {
+ Selector Cmd = ME->getSelector();
+ if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") {
+ e = ME->getInstanceReceiver();
+ if (!e)
+ return nullptr;
+ e = e->IgnoreParenCasts();
+ }
+ } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) {
+ if (CE->getNumArgs() == 1) {
+ FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl());
+ if (Fn) {
+ const IdentifierInfo *FnI = Fn->getIdentifier();
+ if (FnI && FnI->isStr("_Block_copy")) {
+ e = CE->getArg(0)->IgnoreParenCasts();
+ }
+ }
+ }
+ }
+
+ BlockExpr *block = dyn_cast<BlockExpr>(e);
+ if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable))
+ return nullptr;
+
+ FindCaptureVisitor visitor(S.Context, owner.Variable);
+ visitor.Visit(block->getBlockDecl()->getBody());
+ return visitor.VarWillBeReased ? nullptr : visitor.Capturer;
+}
+
+static void diagnoseRetainCycle(Sema &S, Expr *capturer,
+ RetainCycleOwner &owner) {
+ assert(capturer);
+ assert(owner.Variable && owner.Loc.isValid());
+
+ S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
+ << owner.Variable << capturer->getSourceRange();
+ S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
+ << owner.Indirect << owner.Range;
+}
+
+/// Check for a keyword selector that starts with the word 'add' or
+/// 'set'.
+static bool isSetterLikeSelector(Selector sel) {
+ if (sel.isUnarySelector())
+ return false;
+
+ StringRef str = sel.getNameForSlot(0);
+ str = str.ltrim('_');
+ if (str.starts_with("set"))
+ str = str.substr(3);
+ else if (str.starts_with("add")) {
+ // Specially allow 'addOperationWithBlock:'.
+ if (sel.getNumArgs() == 1 && str.starts_with("addOperationWithBlock"))
+ return false;
+ str = str.substr(3);
+ } else
+ return false;
+
+ if (str.empty())
+ return true;
+ return !isLowercase(str.front());
+}
+
+static std::optional<int>
+GetNSMutableArrayArgumentIndex(SemaObjC &S, ObjCMessageExpr *Message) {
+ bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(), NSAPI::ClassId_NSMutableArray);
+ if (!IsMutableArray) {
+ return std::nullopt;
+ }
+
+ Selector Sel = Message->getSelector();
+
+ std::optional<NSAPI::NSArrayMethodKind> MKOpt =
+ S.NSAPIObj->getNSArrayMethodKind(Sel);
+ if (!MKOpt) {
+ return std::nullopt;
+ }
+
+ NSAPI::NSArrayMethodKind MK = *MKOpt;
+
+ switch (MK) {
+ case NSAPI::NSMutableArr_addObject:
+ case NSAPI::NSMutableArr_insertObjectAtIndex:
+ case NSAPI::NSMutableArr_setObjectAtIndexedSubscript:
+ return 0;
+ case NSAPI::NSMutableArr_replaceObjectAtIndex:
+ return 1;
+
+ default:
+ return std::nullopt;
+ }
+
+ return std::nullopt;
+}
+
+static std::optional<int>
+GetNSMutableDictionaryArgumentIndex(SemaObjC &S, ObjCMessageExpr *Message) {
+ bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(), NSAPI::ClassId_NSMutableDictionary);
+ if (!IsMutableDictionary) {
+ return std::nullopt;
+ }
+
+ Selector Sel = Message->getSelector();
+
+ std::optional<NSAPI::NSDictionaryMethodKind> MKOpt =
+ S.NSAPIObj->getNSDictionaryMethodKind(Sel);
+ if (!MKOpt) {
+ return std::nullopt;
+ }
+
+ NSAPI::NSDictionaryMethodKind MK = *MKOpt;
+
+ switch (MK) {
+ case NSAPI::NSMutableDict_setObjectForKey:
+ case NSAPI::NSMutableDict_setValueForKey:
+ case NSAPI::NSMutableDict_setObjectForKeyedSubscript:
+ return 0;
+
+ default:
+ return std::nullopt;
+ }
+
+ return std::nullopt;
+}
+
+static std::optional<int> GetNSSetArgumentIndex(SemaObjC &S,
+ ObjCMessageExpr *Message) {
+ bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(), NSAPI::ClassId_NSMutableSet);
+
+ bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass(
+ Message->getReceiverInterface(), NSAPI::ClassId_NSMutableOrderedSet);
+ if (!IsMutableSet && !IsMutableOrderedSet) {
+ return std::nullopt;
+ }
+
+ Selector Sel = Message->getSelector();
+
+ std::optional<NSAPI::NSSetMethodKind> MKOpt =
+ S.NSAPIObj->getNSSetMethodKind(Sel);
+ if (!MKOpt) {
+ return std::nullopt;
+ }
+
+ NSAPI::NSSetMethodKind MK = *MKOpt;
+
+ switch (MK) {
+ case NSAPI::NSMutableSet_addObject:
+ case NSAPI::NSOrderedSet_setObjectAtIndex:
+ case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript:
+ case NSAPI::NSOrderedSet_insertObjectAtIndex:
+ return 0;
+ case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject:
+ return 1;
+ }
+
+ return std::nullopt;
+}
+
+void SemaObjC::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
+ if (!Message->isInstanceMessage()) {
+ return;
+ }
+
+ std::optional<int> ArgOpt;
+
+ if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) &&
+ !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) &&
+ !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) {
+ return;
+ }
+
+ int ArgIndex = *ArgOpt;
+
+ Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts();
+ if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) {
+ Arg = OE->getSourceExpr()->IgnoreImpCasts();
+ }
+
+ if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
+ if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
+ if (ArgRE->isObjCSelfExpr()) {
+ Diag(Message->getSourceRange().getBegin(),
+ diag::warn_objc_circular_container)
+ << ArgRE->getDecl() << StringRef("'super'");
+ }
+ }
+ } else {
+ Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
+
+ if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) {
+ Receiver = OE->getSourceExpr()->IgnoreImpCasts();
+ }
+
+ if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) {
+ if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) {
+ if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
+ ValueDecl *Decl = ReceiverRE->getDecl();
+ Diag(Message->getSourceRange().getBegin(),
+ diag::warn_objc_circular_container)
+ << Decl << Decl;
+ if (!ArgRE->isObjCSelfExpr()) {
+ Diag(Decl->getLocation(),
+ diag::note_objc_circular_container_declared_here)
+ << Decl;
+ }
+ }
+ }
+ } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) {
+ if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) {
+ if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
+ ObjCIvarDecl *Decl = IvarRE->getDecl();
+ Diag(Message->getSourceRange().getBegin(),
+ diag::warn_objc_circular_container)
+ << Decl << Decl;
+ Diag(Decl->getLocation(),
+ diag::note_objc_circular_container_declared_here)
+ << Decl;
+ }
+ }
+ }
+ }
+}
+
+/// Check a message send to see if it's likely to cause a retain cycle.
+void SemaObjC::checkRetainCycles(ObjCMessageExpr *msg) {
+ // Only check instance methods whose selector looks like a setter.
+ if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector()))
+ return;
+
+ // Try to find a variable that the receiver is strongly owned by.
+ RetainCycleOwner owner;
+ if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ if (!findRetainCycleOwner(SemaRef, msg->getInstanceReceiver(), owner))
+ return;
+ } else {
+ assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
+ owner.Variable = SemaRef.getCurMethodDecl()->getSelfDecl();
+ owner.Loc = msg->getSuperLoc();
+ owner.Range = msg->getSuperLoc();
+ }
+
+ // Check whether the receiver is captured by any of the arguments.
+ const ObjCMethodDecl *MD = msg->getMethodDecl();
+ for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) {
+ if (Expr *capturer = findCapturingExpr(SemaRef, msg->getArg(i), owner)) {
+ // noescape blocks should not be retained by the method.
+ if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>())
+ continue;
+ return diagnoseRetainCycle(SemaRef, capturer, owner);
+ }
+ }
+}
+
+/// Check a property assign to see if it's likely to cause a retain cycle.
+void SemaObjC::checkRetainCycles(Expr *receiver, Expr *argument) {
+ RetainCycleOwner owner;
+ if (!findRetainCycleOwner(SemaRef, receiver, owner))
+ return;
+
+ if (Expr *capturer = findCapturingExpr(SemaRef, argument, owner))
+ diagnoseRetainCycle(SemaRef, capturer, owner);
+}
+
+void SemaObjC::checkRetainCycles(VarDecl *Var, Expr *Init) {
+ RetainCycleOwner Owner;
+ if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner))
+ return;
+
+ // Because we don't have an expression for the variable, we have to set the
+ // location explicitly here.
+ Owner.Loc = Var->getLocation();
+ Owner.Range = Var->getSourceRange();
+
+ if (Expr *Capturer = findCapturingExpr(SemaRef, Init, Owner))
+ diagnoseRetainCycle(SemaRef, Capturer, Owner);
+}
+
+/// CheckObjCString - Checks that the argument to the builtin
+/// CFString constructor is correct
+/// Note: It might also make sense to do the UTF-16 conversion here (would
+/// simplify the backend).
+bool SemaObjC::CheckObjCString(Expr *Arg) {
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
+
+ if (!Literal || !Literal->isOrdinary()) {
+ Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
+ << Arg->getSourceRange();
+ return true;
+ }
+
+ if (Literal->containsNonAsciiOrNull()) {
+ StringRef String = Literal->getString();
+ unsigned NumBytes = String.size();
+ SmallVector<llvm::UTF16, 128> ToBuf(NumBytes);
+ const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
+ llvm::UTF16 *ToPtr = &ToBuf[0];
+
+ llvm::ConversionResult Result =
+ llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
+ ToPtr + NumBytes, llvm::strictConversion);
+ // Check for conversion failure.
+ if (Result != llvm::conversionOK)
+ Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated)
+ << Arg->getSourceRange();
+ }
+ return false;
+}
+
+bool SemaObjC::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
+ ArrayRef<const Expr *> Args) {
+ Sema::VariadicCallType CallType =
+ Method->isVariadic() ? Sema::VariadicMethod : Sema::VariadicDoesNotApply;
+
+ SemaRef.checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
+ /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
+ CallType);
+
+ SemaRef.CheckTCBEnforcement(lbrac, Method);
+
+ return false;
+}
+
+const DeclContext *SemaObjC::getCurObjCLexicalContext() const {
+ const DeclContext *DC = SemaRef.getCurLexicalContext();
+ // A category implicitly has the attribute of the interface.
+ if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
+ DC = CatD->getClassInterface();
+ return DC;
+}
+
+/// Retrieve the identifier "NSError".
+IdentifierInfo *SemaObjC::getNSErrorIdent() {
+ if (!Ident_NSError)
+ Ident_NSError = SemaRef.PP.getIdentifierInfo("NSError");
+
+ return Ident_NSError;
+}
+
+void SemaObjC::ActOnObjCContainerStartDefinition(ObjCContainerDecl *IDecl) {
+ assert(
+ IDecl->getLexicalParent() == SemaRef.CurContext &&
+ "The next DeclContext should be lexically contained in the current one.");
+ SemaRef.CurContext = IDecl;
+}
+
+void SemaObjC::ActOnObjCContainerFinishDefinition() {
+ // Exit this scope of this interface definition.
+ SemaRef.PopDeclContext();
+}
+
+void SemaObjC::ActOnObjCTemporaryExitContainerContext(
+ ObjCContainerDecl *ObjCCtx) {
+ assert(ObjCCtx == SemaRef.CurContext && "Mismatch of container contexts");
+ SemaRef.OriginalLexicalContext = ObjCCtx;
+ ActOnObjCContainerFinishDefinition();
+}
+
+void SemaObjC::ActOnObjCReenterContainerContext(ObjCContainerDecl *ObjCCtx) {
+ ActOnObjCContainerStartDefinition(ObjCCtx);
+ SemaRef.OriginalLexicalContext = nullptr;
+}
+
+/// Find the protocol with the given name, if any.
+ObjCProtocolDecl *SemaObjC::LookupProtocol(IdentifierInfo *II,
+ SourceLocation IdLoc,
+ RedeclarationKind Redecl) {
+ Decl *D = SemaRef.LookupSingleName(SemaRef.TUScope, II, IdLoc,
+ Sema::LookupObjCProtocolName, Redecl);
+ return cast_or_null<ObjCProtocolDecl>(D);
+}
+
+/// Determine whether this is an Objective-C writeback conversion,
+/// used for parameter passing when performing automatic reference counting.
+///
+/// \param FromType The type we're converting form.
+///
+/// \param ToType The type we're converting to.
+///
+/// \param ConvertedType The type that will be produced after applying
+/// this conversion.
+bool SemaObjC::isObjCWritebackConversion(QualType FromType, QualType ToType,
+ QualType &ConvertedType) {
+ ASTContext &Context = getASTContext();
+ if (!getLangOpts().ObjCAutoRefCount ||
+ Context.hasSameUnqualifiedType(FromType, ToType))
+ return false;
+
+ // Parameter must be a pointer to __autoreleasing (with no other qualifiers).
+ QualType ToPointee;
+ if (const PointerType *ToPointer = ToType->getAs<PointerType>())
+ ToPointee = ToPointer->getPointeeType();
+ else
+ return false;
+
+ Qualifiers ToQuals = ToPointee.getQualifiers();
+ if (!ToPointee->isObjCLifetimeType() ||
+ ToQuals.getObjCLifetime() != Qualifiers::OCL_Autoreleasing ||
+ !ToQuals.withoutObjCLifetime().empty())
+ return false;
+
+ // Argument must be a pointer to __strong to __weak.
+ QualType FromPointee;
+ if (const PointerType *FromPointer = FromType->getAs<PointerType>())
+ FromPointee = FromPointer->getPointeeType();
+ else
+ return false;
+
+ Qualifiers FromQuals = FromPointee.getQualifiers();
+ if (!FromPointee->isObjCLifetimeType() ||
+ (FromQuals.getObjCLifetime() != Qualifiers::OCL_Strong &&
+ FromQuals.getObjCLifetime() != Qualifiers::OCL_Weak))
+ return false;
+
+ // Make sure that we have compatible qualifiers.
+ FromQuals.setObjCLifetime(Qualifiers::OCL_Autoreleasing);
+ if (!ToQuals.compatiblyIncludes(FromQuals))
+ return false;
+
+ // Remove qualifiers from the pointee type we're converting from; they
+ // aren't used in the compatibility check belong, and we'll be adding back
+ // qualifiers (with __autoreleasing) if the compatibility check succeeds.
+ FromPointee = FromPointee.getUnqualifiedType();
+
+ // The unqualified form of the pointee types must be compatible.
+ ToPointee = ToPointee.getUnqualifiedType();
+ bool IncompatibleObjC;
+ if (Context.typesAreCompatible(FromPointee, ToPointee))
+ FromPointee = ToPointee;
+ else if (!SemaRef.isObjCPointerConversion(FromPointee, ToPointee, FromPointee,
+ IncompatibleObjC))
+ return false;
+
+ /// Construct the type we're converting to, which is a pointer to
+ /// __autoreleasing pointee.
+ FromPointee = Context.getQualifiedType(FromPointee, FromQuals);
+ ConvertedType = Context.getPointerType(FromPointee);
+ return true;
+}
+
+/// CheckSubscriptingKind - This routine decide what type
+/// of indexing represented by "FromE" is being done.
+SemaObjC::ObjCSubscriptKind SemaObjC::CheckSubscriptingKind(Expr *FromE) {
+ // If the expression already has integral or enumeration type, we're golden.
+ QualType T = FromE->getType();
+ if (T->isIntegralOrEnumerationType())
+ return SemaObjC::OS_Array;
+
+ // If we don't have a class type in C++, there's no way we can get an
+ // expression of integral or enumeration type.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy && (T->isObjCObjectPointerType() || T->isVoidPointerType()))
+ // All other scalar cases are assumed to be dictionary indexing which
+ // caller handles, with diagnostics if needed.
+ return SemaObjC::OS_Dictionary;
+ if (!getLangOpts().CPlusPlus || !RecordTy || RecordTy->isIncompleteType()) {
+ // No indexing can be done. Issue diagnostics and quit.
+ const Expr *IndexExpr = FromE->IgnoreParenImpCasts();
+ if (isa<StringLiteral>(IndexExpr))
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_pointer)
+ << T << FixItHint::CreateInsertion(FromE->getExprLoc(), "@");
+ else
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion) << T;
+ return SemaObjC::OS_Error;
+ }
+
+ // We must have a complete class type.
+ if (SemaRef.RequireCompleteType(FromE->getExprLoc(), T,
+ diag::err_objc_index_incomplete_class_type,
+ FromE))
+ return SemaObjC::OS_Error;
+
+ // Look for a conversion to an integral, enumeration type, or
+ // objective-C pointer type.
+ int NoIntegrals = 0, NoObjCIdPointers = 0;
+ SmallVector<CXXConversionDecl *, 4> ConversionDecls;
+
+ for (NamedDecl *D : cast<CXXRecordDecl>(RecordTy->getDecl())
+ ->getVisibleConversionFunctions()) {
+ if (CXXConversionDecl *Conversion =
+ dyn_cast<CXXConversionDecl>(D->getUnderlyingDecl())) {
+ QualType CT = Conversion->getConversionType().getNonReferenceType();
+ if (CT->isIntegralOrEnumerationType()) {
+ ++NoIntegrals;
+ ConversionDecls.push_back(Conversion);
+ } else if (CT->isObjCIdType() || CT->isBlockPointerType()) {
+ ++NoObjCIdPointers;
+ ConversionDecls.push_back(Conversion);
+ }
+ }
+ }
+ if (NoIntegrals == 1 && NoObjCIdPointers == 0)
+ return SemaObjC::OS_Array;
+ if (NoIntegrals == 0 && NoObjCIdPointers == 1)
+ return SemaObjC::OS_Dictionary;
+ if (NoIntegrals == 0 && NoObjCIdPointers == 0) {
+ // No conversion function was found. Issue diagnostic and return.
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion)
+ << FromE->getType();
+ return SemaObjC::OS_Error;
+ }
+ Diag(FromE->getExprLoc(), diag::err_objc_multiple_subscript_type_conversion)
+ << FromE->getType();
+ for (unsigned int i = 0; i < ConversionDecls.size(); i++)
+ Diag(ConversionDecls[i]->getLocation(),
+ diag::note_conv_function_declared_at);
+
+ return SemaObjC::OS_Error;
+}
+
+void SemaObjC::AddCFAuditedAttribute(Decl *D) {
+ ASTContext &Context = getASTContext();
+ IdentifierInfo *Ident;
+ SourceLocation Loc;
+ std::tie(Ident, Loc) = SemaRef.PP.getPragmaARCCFCodeAuditedInfo();
+ if (!Loc.isValid())
+ return;
+
+ // Don't add a redundant or conflicting attribute.
+ if (D->hasAttr<CFAuditedTransferAttr>() ||
+ D->hasAttr<CFUnknownTransferAttr>())
+ return;
+
+ AttributeCommonInfo Info(Ident, SourceRange(Loc),
+ AttributeCommonInfo::Form::Pragma());
+ D->addAttr(CFAuditedTransferAttr::CreateImplicit(Context, Info));
+}
+
+bool SemaObjC::isCFError(RecordDecl *RD) {
+ // If we already know about CFError, test it directly.
+ if (CFError)
+ return CFError == RD;
+
+ // Check whether this is CFError, which we identify based on its bridge to
+ // NSError. CFErrorRef used to be declared with "objc_bridge" but is now
+ // declared with "objc_bridge_mutable", so look for either one of the two
+ // attributes.
+ if (RD->getTagKind() == TagTypeKind::Struct) {
+ IdentifierInfo *bridgedType = nullptr;
+ if (auto bridgeAttr = RD->getAttr<ObjCBridgeAttr>())
+ bridgedType = bridgeAttr->getBridgedType();
+ else if (auto bridgeAttr = RD->getAttr<ObjCBridgeMutableAttr>())
+ bridgedType = bridgeAttr->getBridgedType();
+
+ if (bridgedType == getNSErrorIdent()) {
+ CFError = RD;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool SemaObjC::isNSStringType(QualType T, bool AllowNSAttributedString) {
+ const auto *PT = T->getAs<ObjCObjectPointerType>();
+ if (!PT)
+ return false;
+
+ ObjCInterfaceDecl *Cls = PT->getObjectType()->getInterface();
+ if (!Cls)
+ return false;
+
+ IdentifierInfo *ClsName = Cls->getIdentifier();
+
+ if (AllowNSAttributedString &&
+ ClsName == &getASTContext().Idents.get("NSAttributedString"))
+ return true;
+ // FIXME: Should we walk the chain of classes?
+ return ClsName == &getASTContext().Idents.get("NSString") ||
+ ClsName == &getASTContext().Idents.get("NSMutableString");
+}
+
+bool SemaObjC::isCFStringType(QualType T) {
+ const auto *PT = T->getAs<PointerType>();
+ if (!PT)
+ return false;
+
+ const auto *RT = PT->getPointeeType()->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->getTagKind() != TagTypeKind::Struct)
+ return false;
+
+ return RD->getIdentifier() == &getASTContext().Idents.get("__CFString");
+}
+
+static bool checkIBOutletCommon(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // The IBOutlet/IBOutletCollection attributes only apply to instance
+ // variables or properties of Objective-C classes. The outlet must also
+ // have an object reference type.
+ if (const auto *VD = dyn_cast<ObjCIvarDecl>(D)) {
+ if (!VD->getType()->getAs<ObjCObjectPointerType>()) {
+ S.Diag(AL.getLoc(), diag::warn_iboutlet_object_type)
+ << AL << VD->getType() << 0;
+ return false;
+ }
+ } else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D)) {
+ if (!PD->getType()->getAs<ObjCObjectPointerType>()) {
+ S.Diag(AL.getLoc(), diag::warn_iboutlet_object_type)
+ << AL << PD->getType() << 1;
+ return false;
+ }
+ } else {
+ S.Diag(AL.getLoc(), diag::warn_attribute_iboutlet) << AL;
+ return false;
+ }
+
+ return true;
+}
+
+void SemaObjC::handleIBOutlet(Decl *D, const ParsedAttr &AL) {
+ if (!checkIBOutletCommon(SemaRef, D, AL))
+ return;
+
+ D->addAttr(::new (getASTContext()) IBOutletAttr(getASTContext(), AL));
+}
+
+void SemaObjC::handleIBOutletCollection(Decl *D, const ParsedAttr &AL) {
+
+ ASTContext &Context = getASTContext();
+ // The iboutletcollection attribute can have zero or one arguments.
+ if (AL.getNumArgs() > 1) {
+ Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
+ return;
+ }
+
+ if (!checkIBOutletCommon(SemaRef, D, AL))
+ return;
+
+ ParsedType PT;
+
+ if (AL.hasParsedType())
+ PT = AL.getTypeArg();
+ else {
+ PT = SemaRef.getTypeName(
+ Context.Idents.get("NSObject"), AL.getLoc(),
+ SemaRef.getScopeForContext(D->getDeclContext()->getParent()));
+ if (!PT) {
+ Diag(AL.getLoc(), diag::err_iboutletcollection_type) << "NSObject";
+ return;
+ }
+ }
+
+ TypeSourceInfo *QTLoc = nullptr;
+ QualType QT = SemaRef.GetTypeFromParser(PT, &QTLoc);
+ if (!QTLoc)
+ QTLoc = Context.getTrivialTypeSourceInfo(QT, AL.getLoc());
+
+ // Diagnose use of non-object type in iboutletcollection attribute.
+ // FIXME. Gnu attribute extension ignores use of builtin types in
+ // attributes. So, __attribute__((iboutletcollection(char))) will be
+ // treated as __attribute__((iboutletcollection())).
+ if (!QT->isObjCIdType() && !QT->isObjCObjectType()) {
+ Diag(AL.getLoc(), QT->isBuiltinType()
+ ? diag::err_iboutletcollection_builtintype
+ : diag::err_iboutletcollection_type)
+ << QT;
+ return;
+ }
+
+ D->addAttr(::new (Context) IBOutletCollectionAttr(Context, AL, QTLoc));
+}
+
+void SemaObjC::handleSuppresProtocolAttr(Decl *D, const ParsedAttr &AL) {
+ if (!cast<ObjCProtocolDecl>(D)->isThisDeclarationADefinition()) {
+ Diag(AL.getLoc(), diag::err_objc_attr_protocol_requires_definition)
+ << AL << AL.getRange();
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ ObjCExplicitProtocolImplAttr(getASTContext(), AL));
+}
+
+void SemaObjC::handleDirectAttr(Decl *D, const ParsedAttr &AL) {
+ // objc_direct cannot be set on methods declared in the context of a protocol
+ if (isa<ObjCProtocolDecl>(D->getDeclContext())) {
+ Diag(AL.getLoc(), diag::err_objc_direct_on_protocol) << false;
+ return;
+ }
+
+ if (getLangOpts().ObjCRuntime.allowsDirectDispatch()) {
+ handleSimpleAttribute<ObjCDirectAttr>(*this, D, AL);
+ } else {
+ Diag(AL.getLoc(), diag::warn_objc_direct_ignored) << AL;
+ }
+}
+
+void SemaObjC::handleDirectMembersAttr(Decl *D, const ParsedAttr &AL) {
+ if (getLangOpts().ObjCRuntime.allowsDirectDispatch()) {
+ handleSimpleAttribute<ObjCDirectMembersAttr>(*this, D, AL);
+ } else {
+ Diag(AL.getLoc(), diag::warn_objc_direct_ignored) << AL;
+ }
+}
+
+void SemaObjC::handleMethodFamilyAttr(Decl *D, const ParsedAttr &AL) {
+ const auto *M = cast<ObjCMethodDecl>(D);
+ if (!AL.isArgIdent(0)) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL << 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierLoc *IL = AL.getArgAsIdent(0);
+ ObjCMethodFamilyAttr::FamilyKind F;
+ if (!ObjCMethodFamilyAttr::ConvertStrToFamilyKind(IL->Ident->getName(), F)) {
+ Diag(IL->Loc, diag::warn_attribute_type_not_supported) << AL << IL->Ident;
+ return;
+ }
+
+ if (F == ObjCMethodFamilyAttr::OMF_init &&
+ !M->getReturnType()->isObjCObjectPointerType()) {
+ Diag(M->getLocation(), diag::err_init_method_bad_return_type)
+ << M->getReturnType();
+ // Ignore the attribute.
+ return;
+ }
+
+ D->addAttr(new (getASTContext())
+ ObjCMethodFamilyAttr(getASTContext(), AL, F));
+}
+
+void SemaObjC::handleNSObject(Decl *D, const ParsedAttr &AL) {
+ if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
+ QualType T = TD->getUnderlyingType();
+ if (!T->isCARCBridgableType()) {
+ Diag(TD->getLocation(), diag::err_nsobject_attribute);
+ return;
+ }
+ } else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D)) {
+ QualType T = PD->getType();
+ if (!T->isCARCBridgableType()) {
+ Diag(PD->getLocation(), diag::err_nsobject_attribute);
+ return;
+ }
+ } else {
+ // It is okay to include this attribute on properties, e.g.:
+ //
+ // @property (retain, nonatomic) struct Bork *Q __attribute__((NSObject));
+ //
+ // In this case it follows tradition and suppresses an error in the above
+ // case.
+ Diag(D->getLocation(), diag::warn_nsobject_attribute);
+ }
+ D->addAttr(::new (getASTContext()) ObjCNSObjectAttr(getASTContext(), AL));
+}
+
+void SemaObjC::handleIndependentClass(Decl *D, const ParsedAttr &AL) {
+ if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
+ QualType T = TD->getUnderlyingType();
+ if (!T->isObjCObjectPointerType()) {
+ Diag(TD->getLocation(), diag::warn_ptr_independentclass_attribute);
+ return;
+ }
+ } else {
+ Diag(D->getLocation(), diag::warn_independentclass_attribute);
+ return;
+ }
+ D->addAttr(::new (getASTContext())
+ ObjCIndependentClassAttr(getASTContext(), AL));
+}
+
+void SemaObjC::handleBlocksAttr(Decl *D, const ParsedAttr &AL) {
+ if (!AL.isArgIdent(0)) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL << 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
+ BlocksAttr::BlockType type;
+ if (!BlocksAttr::ConvertStrToBlockType(II->getName(), type)) {
+ Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) << AL << II;
+ return;
+ }
+
+ D->addAttr(::new (getASTContext()) BlocksAttr(getASTContext(), AL, type));
+}
+
+static bool isValidSubjectOfNSReturnsRetainedAttribute(QualType QT) {
+ return QT->isDependentType() || QT->isObjCRetainableType();
+}
+
+static bool isValidSubjectOfNSAttribute(QualType QT) {
+ return QT->isDependentType() || QT->isObjCObjectPointerType() ||
+ QT->isObjCNSObjectType();
+}
+
+static bool isValidSubjectOfCFAttribute(QualType QT) {
+ return QT->isDependentType() || QT->isPointerType() ||
+ isValidSubjectOfNSAttribute(QT);
+}
+
+static bool isValidSubjectOfOSAttribute(QualType QT) {
+ if (QT->isDependentType())
+ return true;
+ QualType PT = QT->getPointeeType();
+ return !PT.isNull() && PT->getAsCXXRecordDecl() != nullptr;
+}
+
+void SemaObjC::AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
+ Sema::RetainOwnershipKind K,
+ bool IsTemplateInstantiation) {
+ ValueDecl *VD = cast<ValueDecl>(D);
+ switch (K) {
+ case Sema::RetainOwnershipKind::OS:
+ handleSimpleAttributeOrDiagnose<OSConsumedAttr>(
+ *this, VD, CI, isValidSubjectOfOSAttribute(VD->getType()),
+ diag::warn_ns_attribute_wrong_parameter_type,
+ /*ExtraArgs=*/CI.getRange(), "os_consumed", /*pointers*/ 1);
+ return;
+ case Sema::RetainOwnershipKind::NS:
+ handleSimpleAttributeOrDiagnose<NSConsumedAttr>(
+ *this, VD, CI, isValidSubjectOfNSAttribute(VD->getType()),
+
+ // These attributes are normally just advisory, but in ARC, ns_consumed
+ // is significant. Allow non-dependent code to contain inappropriate
+ // attributes even in ARC, but require template instantiations to be
+ // set up correctly.
+ ((IsTemplateInstantiation && getLangOpts().ObjCAutoRefCount)
+ ? diag::err_ns_attribute_wrong_parameter_type
+ : diag::warn_ns_attribute_wrong_parameter_type),
+ /*ExtraArgs=*/CI.getRange(), "ns_consumed", /*objc pointers*/ 0);
+ return;
+ case Sema::RetainOwnershipKind::CF:
+ handleSimpleAttributeOrDiagnose<CFConsumedAttr>(
+ *this, VD, CI, isValidSubjectOfCFAttribute(VD->getType()),
+ diag::warn_ns_attribute_wrong_parameter_type,
+ /*ExtraArgs=*/CI.getRange(), "cf_consumed", /*pointers*/ 1);
+ return;
+ }
+}
+
+Sema::RetainOwnershipKind
+SemaObjC::parsedAttrToRetainOwnershipKind(const ParsedAttr &AL) {
+ switch (AL.getKind()) {
+ case ParsedAttr::AT_CFConsumed:
+ case ParsedAttr::AT_CFReturnsRetained:
+ case ParsedAttr::AT_CFReturnsNotRetained:
+ return Sema::RetainOwnershipKind::CF;
+ case ParsedAttr::AT_OSConsumesThis:
+ case ParsedAttr::AT_OSConsumed:
+ case ParsedAttr::AT_OSReturnsRetained:
+ case ParsedAttr::AT_OSReturnsNotRetained:
+ case ParsedAttr::AT_OSReturnsRetainedOnZero:
+ case ParsedAttr::AT_OSReturnsRetainedOnNonZero:
+ return Sema::RetainOwnershipKind::OS;
+ case ParsedAttr::AT_NSConsumesSelf:
+ case ParsedAttr::AT_NSConsumed:
+ case ParsedAttr::AT_NSReturnsRetained:
+ case ParsedAttr::AT_NSReturnsNotRetained:
+ case ParsedAttr::AT_NSReturnsAutoreleased:
+ return Sema::RetainOwnershipKind::NS;
+ default:
+ llvm_unreachable("Wrong argument supplied");
+ }
+}
+
+bool SemaObjC::checkNSReturnsRetainedReturnType(SourceLocation Loc,
+ QualType QT) {
+ if (isValidSubjectOfNSReturnsRetainedAttribute(QT))
+ return false;
+
+ Diag(Loc, diag::warn_ns_attribute_wrong_return_type)
+ << "'ns_returns_retained'" << 0 << 0;
+ return true;
+}
+
+/// \return whether the parameter is a pointer to OSObject pointer.
+bool SemaObjC::isValidOSObjectOutParameter(const Decl *D) {
+ const auto *PVD = dyn_cast<ParmVarDecl>(D);
+ if (!PVD)
+ return false;
+ QualType QT = PVD->getType();
+ QualType PT = QT->getPointeeType();
+ return !PT.isNull() && isValidSubjectOfOSAttribute(PT);
+}
+
+void SemaObjC::handleXReturnsXRetainedAttr(Decl *D, const ParsedAttr &AL) {
+ QualType ReturnType;
+ Sema::RetainOwnershipKind K = parsedAttrToRetainOwnershipKind(AL);
+
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ ReturnType = MD->getReturnType();
+ } else if (getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
+ (AL.getKind() == ParsedAttr::AT_NSReturnsRetained)) {
+ return; // ignore: was handled as a type attribute
+ } else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D)) {
+ ReturnType = PD->getType();
+ } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ ReturnType = FD->getReturnType();
+ } else if (const auto *Param = dyn_cast<ParmVarDecl>(D)) {
+ // Attributes on parameters are used for out-parameters,
+ // passed as pointers-to-pointers.
+ unsigned DiagID = K == Sema::RetainOwnershipKind::CF
+ ? /*pointer-to-CF-pointer*/ 2
+ : /*pointer-to-OSObject-pointer*/ 3;
+ ReturnType = Param->getType()->getPointeeType();
+ if (ReturnType.isNull()) {
+ Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_parameter_type)
+ << AL << DiagID << AL.getRange();
+ return;
+ }
+ } else if (AL.isUsedAsTypeAttr()) {
+ return;
+ } else {
+ AttributeDeclKind ExpectedDeclKind;
+ switch (AL.getKind()) {
+ default:
+ llvm_unreachable("invalid ownership attribute");
+ case ParsedAttr::AT_NSReturnsRetained:
+ case ParsedAttr::AT_NSReturnsAutoreleased:
+ case ParsedAttr::AT_NSReturnsNotRetained:
+ ExpectedDeclKind = ExpectedFunctionOrMethod;
+ break;
+
+ case ParsedAttr::AT_OSReturnsRetained:
+ case ParsedAttr::AT_OSReturnsNotRetained:
+ case ParsedAttr::AT_CFReturnsRetained:
+ case ParsedAttr::AT_CFReturnsNotRetained:
+ ExpectedDeclKind = ExpectedFunctionMethodOrParameter;
+ break;
+ }
+ Diag(D->getBeginLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL.getRange() << AL << AL.isRegularKeywordAttribute()
+ << ExpectedDeclKind;
+ return;
+ }
+
+ bool TypeOK;
+ bool Cf;
+ unsigned ParmDiagID = 2; // Pointer-to-CF-pointer
+ switch (AL.getKind()) {
+ default:
+ llvm_unreachable("invalid ownership attribute");
+ case ParsedAttr::AT_NSReturnsRetained:
+ TypeOK = isValidSubjectOfNSReturnsRetainedAttribute(ReturnType);
+ Cf = false;
+ break;
+
+ case ParsedAttr::AT_NSReturnsAutoreleased:
+ case ParsedAttr::AT_NSReturnsNotRetained:
+ TypeOK = isValidSubjectOfNSAttribute(ReturnType);
+ Cf = false;
+ break;
+
+ case ParsedAttr::AT_CFReturnsRetained:
+ case ParsedAttr::AT_CFReturnsNotRetained:
+ TypeOK = isValidSubjectOfCFAttribute(ReturnType);
+ Cf = true;
+ break;
+
+ case ParsedAttr::AT_OSReturnsRetained:
+ case ParsedAttr::AT_OSReturnsNotRetained:
+ TypeOK = isValidSubjectOfOSAttribute(ReturnType);
+ Cf = true;
+ ParmDiagID = 3; // Pointer-to-OSObject-pointer
+ break;
+ }
+
+ if (!TypeOK) {
+ if (AL.isUsedAsTypeAttr())
+ return;
+
+ if (isa<ParmVarDecl>(D)) {
+ Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_parameter_type)
+ << AL << ParmDiagID << AL.getRange();
+ } else {
+ // Needs to be kept in sync with warn_ns_attribute_wrong_return_type.
+ enum : unsigned { Function, Method, Property } SubjectKind = Function;
+ if (isa<ObjCMethodDecl>(D))
+ SubjectKind = Method;
+ else if (isa<ObjCPropertyDecl>(D))
+ SubjectKind = Property;
+ Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_return_type)
+ << AL << SubjectKind << Cf << AL.getRange();
+ }
+ return;
+ }
+
+ switch (AL.getKind()) {
+ default:
+ llvm_unreachable("invalid ownership attribute");
+ case ParsedAttr::AT_NSReturnsAutoreleased:
+ handleSimpleAttribute<NSReturnsAutoreleasedAttr>(*this, D, AL);
+ return;
+ case ParsedAttr::AT_CFReturnsNotRetained:
+ handleSimpleAttribute<CFReturnsNotRetainedAttr>(*this, D, AL);
+ return;
+ case ParsedAttr::AT_NSReturnsNotRetained:
+ handleSimpleAttribute<NSReturnsNotRetainedAttr>(*this, D, AL);
+ return;
+ case ParsedAttr::AT_CFReturnsRetained:
+ handleSimpleAttribute<CFReturnsRetainedAttr>(*this, D, AL);
+ return;
+ case ParsedAttr::AT_NSReturnsRetained:
+ handleSimpleAttribute<NSReturnsRetainedAttr>(*this, D, AL);
+ return;
+ case ParsedAttr::AT_OSReturnsRetained:
+ handleSimpleAttribute<OSReturnsRetainedAttr>(*this, D, AL);
+ return;
+ case ParsedAttr::AT_OSReturnsNotRetained:
+ handleSimpleAttribute<OSReturnsNotRetainedAttr>(*this, D, AL);
+ return;
+ };
+}
+
+void SemaObjC::handleReturnsInnerPointerAttr(Decl *D, const ParsedAttr &Attrs) {
+ const int EP_ObjCMethod = 1;
+ const int EP_ObjCProperty = 2;
+
+ SourceLocation loc = Attrs.getLoc();
+ QualType resultType;
+ if (isa<ObjCMethodDecl>(D))
+ resultType = cast<ObjCMethodDecl>(D)->getReturnType();
+ else
+ resultType = cast<ObjCPropertyDecl>(D)->getType();
+
+ if (!resultType->isReferenceType() &&
+ (!resultType->isPointerType() || resultType->isObjCRetainableType())) {
+ Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_return_type)
+ << SourceRange(loc) << Attrs
+ << (isa<ObjCMethodDecl>(D) ? EP_ObjCMethod : EP_ObjCProperty)
+ << /*non-retainable pointer*/ 2;
+
+ // Drop the attribute.
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ ObjCReturnsInnerPointerAttr(getASTContext(), Attrs));
+}
+
+void SemaObjC::handleRequiresSuperAttr(Decl *D, const ParsedAttr &Attrs) {
+ const auto *Method = cast<ObjCMethodDecl>(D);
+
+ const DeclContext *DC = Method->getDeclContext();
+ if (const auto *PDecl = dyn_cast_if_present<ObjCProtocolDecl>(DC)) {
+ Diag(D->getBeginLoc(), diag::warn_objc_requires_super_protocol)
+ << Attrs << 0;
+ Diag(PDecl->getLocation(), diag::note_protocol_decl);
+ return;
+ }
+ if (Method->getMethodFamily() == OMF_dealloc) {
+ Diag(D->getBeginLoc(), diag::warn_objc_requires_super_protocol)
+ << Attrs << 1;
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ ObjCRequiresSuperAttr(getASTContext(), Attrs));
+}
+
+void SemaObjC::handleNSErrorDomain(Decl *D, const ParsedAttr &Attr) {
+ if (!isa<TagDecl>(D)) {
+ Diag(D->getBeginLoc(), diag::err_nserrordomain_invalid_decl) << 0;
+ return;
+ }
+
+ IdentifierLoc *IdentLoc =
+ Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : nullptr;
+ if (!IdentLoc || !IdentLoc->Ident) {
+ // Try to locate the argument directly.
+ SourceLocation Loc = Attr.getLoc();
+ if (Attr.isArgExpr(0) && Attr.getArgAsExpr(0))
+ Loc = Attr.getArgAsExpr(0)->getBeginLoc();
+
+ Diag(Loc, diag::err_nserrordomain_invalid_decl) << 0;
+ return;
+ }
+
+ // Verify that the identifier is a valid decl in the C decl namespace.
+ LookupResult Result(SemaRef, DeclarationName(IdentLoc->Ident),
+ SourceLocation(),
+ Sema::LookupNameKind::LookupOrdinaryName);
+ if (!SemaRef.LookupName(Result, SemaRef.TUScope) ||
+ !Result.getAsSingle<VarDecl>()) {
+ Diag(IdentLoc->Loc, diag::err_nserrordomain_invalid_decl)
+ << 1 << IdentLoc->Ident;
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ NSErrorDomainAttr(getASTContext(), Attr, IdentLoc->Ident));
+}
+
+void SemaObjC::handleBridgeAttr(Decl *D, const ParsedAttr &AL) {
+ IdentifierLoc *Parm = AL.isArgIdent(0) ? AL.getArgAsIdent(0) : nullptr;
+
+ if (!Parm) {
+ Diag(D->getBeginLoc(), diag::err_objc_attr_not_id) << AL << 0;
+ return;
+ }
+
+ // Typedefs only allow objc_bridge(id) and have some additional checking.
+ if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (!Parm->Ident->isStr("id")) {
+ Diag(AL.getLoc(), diag::err_objc_attr_typedef_not_id) << AL;
+ return;
+ }
+
+ // Only allow 'cv void *'.
+ QualType T = TD->getUnderlyingType();
+ if (!T->isVoidPointerType()) {
+ Diag(AL.getLoc(), diag::err_objc_attr_typedef_not_void_pointer);
+ return;
+ }
+ }
+
+ D->addAttr(::new (getASTContext())
+ ObjCBridgeAttr(getASTContext(), AL, Parm->Ident));
+}
+
+void SemaObjC::handleBridgeMutableAttr(Decl *D, const ParsedAttr &AL) {
+ IdentifierLoc *Parm = AL.isArgIdent(0) ? AL.getArgAsIdent(0) : nullptr;
+
+ if (!Parm) {
+ Diag(D->getBeginLoc(), diag::err_objc_attr_not_id) << AL << 0;
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ ObjCBridgeMutableAttr(getASTContext(), AL, Parm->Ident));
+}
+
+void SemaObjC::handleBridgeRelatedAttr(Decl *D, const ParsedAttr &AL) {
+ IdentifierInfo *RelatedClass =
+ AL.isArgIdent(0) ? AL.getArgAsIdent(0)->Ident : nullptr;
+ if (!RelatedClass) {
+ Diag(D->getBeginLoc(), diag::err_objc_attr_not_id) << AL << 0;
+ return;
+ }
+ IdentifierInfo *ClassMethod =
+ AL.getArgAsIdent(1) ? AL.getArgAsIdent(1)->Ident : nullptr;
+ IdentifierInfo *InstanceMethod =
+ AL.getArgAsIdent(2) ? AL.getArgAsIdent(2)->Ident : nullptr;
+ D->addAttr(::new (getASTContext()) ObjCBridgeRelatedAttr(
+ getASTContext(), AL, RelatedClass, ClassMethod, InstanceMethod));
+}
+
+void SemaObjC::handleDesignatedInitializer(Decl *D, const ParsedAttr &AL) {
+ DeclContext *Ctx = D->getDeclContext();
+
+ // This attribute can only be applied to methods in interfaces or class
+ // extensions.
+ if (!isa<ObjCInterfaceDecl>(Ctx) &&
+ !(isa<ObjCCategoryDecl>(Ctx) &&
+ cast<ObjCCategoryDecl>(Ctx)->IsClassExtension())) {
+ Diag(D->getLocation(), diag::err_designated_init_attr_non_init);
+ return;
+ }
+
+ ObjCInterfaceDecl *IFace;
+ if (auto *CatDecl = dyn_cast<ObjCCategoryDecl>(Ctx))
+ IFace = CatDecl->getClassInterface();
+ else
+ IFace = cast<ObjCInterfaceDecl>(Ctx);
+
+ if (!IFace)
+ return;
+
+ IFace->setHasDesignatedInitializers();
+ D->addAttr(::new (getASTContext())
+ ObjCDesignatedInitializerAttr(getASTContext(), AL));
+}
+
+void SemaObjC::handleRuntimeName(Decl *D, const ParsedAttr &AL) {
+ StringRef MetaDataName;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, MetaDataName))
+ return;
+ D->addAttr(::new (getASTContext())
+ ObjCRuntimeNameAttr(getASTContext(), AL, MetaDataName));
+}
+
+// When a user wants to use objc_boxable with a union or struct
+// but they don't have access to the declaration (legacy/third-party code)
+// then they can 'enable' this feature with a typedef:
+// typedef struct __attribute((objc_boxable)) legacy_struct legacy_struct;
+void SemaObjC::handleBoxable(Decl *D, const ParsedAttr &AL) {
+ bool notify = false;
+
+ auto *RD = dyn_cast<RecordDecl>(D);
+ if (RD && RD->getDefinition()) {
+ RD = RD->getDefinition();
+ notify = true;
+ }
+
+ if (RD) {
+ ObjCBoxableAttr *BoxableAttr =
+ ::new (getASTContext()) ObjCBoxableAttr(getASTContext(), AL);
+ RD->addAttr(BoxableAttr);
+ if (notify) {
+ // we need to notify ASTReader/ASTWriter about
+ // modification of existing declaration
+ if (ASTMutationListener *L = SemaRef.getASTMutationListener())
+ L->AddedAttributeToRecord(BoxableAttr, RD);
+ }
+ }
+}
+
+void SemaObjC::handleOwnershipAttr(Decl *D, const ParsedAttr &AL) {
+ if (hasDeclarator(D))
+ return;
+
+ Diag(D->getBeginLoc(), diag::err_attribute_wrong_decl_type)
+ << AL.getRange() << AL << AL.isRegularKeywordAttribute()
+ << ExpectedVariable;
+}
+
+void SemaObjC::handlePreciseLifetimeAttr(Decl *D, const ParsedAttr &AL) {
+ const auto *VD = cast<ValueDecl>(D);
+ QualType QT = VD->getType();
+
+ if (!QT->isDependentType() && !QT->isObjCLifetimeType()) {
+ Diag(AL.getLoc(), diag::err_objc_precise_lifetime_bad_type) << QT;
+ return;
+ }
+
+ Qualifiers::ObjCLifetime Lifetime = QT.getObjCLifetime();
+
+ // If we have no lifetime yet, check the lifetime we're presumably
+ // going to infer.
+ if (Lifetime == Qualifiers::OCL_None && !QT->isDependentType())
+ Lifetime = QT->getObjCARCImplicitLifetime();
+
+ switch (Lifetime) {
+ case Qualifiers::OCL_None:
+ assert(QT->isDependentType() &&
+ "didn't infer lifetime for non-dependent type?");
+ break;
+
+ case Qualifiers::OCL_Weak: // meaningful
+ case Qualifiers::OCL_Strong: // meaningful
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ Diag(AL.getLoc(), diag::warn_objc_precise_lifetime_meaningless)
+ << (Lifetime == Qualifiers::OCL_Autoreleasing);
+ break;
+ }
+
+ D->addAttr(::new (getASTContext())
+ ObjCPreciseLifetimeAttr(getASTContext(), AL));
+}
+
+static bool tryMakeVariablePseudoStrong(Sema &S, VarDecl *VD,
+ bool DiagnoseFailure) {
+ QualType Ty = VD->getType();
+ if (!Ty->isObjCRetainableType()) {
+ if (DiagnoseFailure) {
+ S.Diag(VD->getBeginLoc(), diag::warn_ignored_objc_externally_retained)
+ << 0;
+ }
+ return false;
+ }
+
+ Qualifiers::ObjCLifetime LifetimeQual = Ty.getQualifiers().getObjCLifetime();
+
+ // SemaObjC::inferObjCARCLifetime must run after processing decl attributes
+ // (because __block lowers to an attribute), so if the lifetime hasn't been
+ // explicitly specified, infer it locally now.
+ if (LifetimeQual == Qualifiers::OCL_None)
+ LifetimeQual = Ty->getObjCARCImplicitLifetime();
+
+ // The attributes only really makes sense for __strong variables; ignore any
+ // attempts to annotate a parameter with any other lifetime qualifier.
+ if (LifetimeQual != Qualifiers::OCL_Strong) {
+ if (DiagnoseFailure) {
+ S.Diag(VD->getBeginLoc(), diag::warn_ignored_objc_externally_retained)
+ << 1;
+ }
+ return false;
+ }
+
+ // Tampering with the type of a VarDecl here is a bit of a hack, but we need
+ // to ensure that the variable is 'const' so that we can error on
+ // modification, which can otherwise over-release.
+ VD->setType(Ty.withConst());
+ VD->setARCPseudoStrong(true);
+ return true;
+}
+
+void SemaObjC::handleExternallyRetainedAttr(Decl *D, const ParsedAttr &AL) {
+ if (auto *VD = dyn_cast<VarDecl>(D)) {
+ assert(!isa<ParmVarDecl>(VD) && "should be diagnosed automatically");
+ if (!VD->hasLocalStorage()) {
+ Diag(D->getBeginLoc(), diag::warn_ignored_objc_externally_retained) << 0;
+ return;
+ }
+
+ if (!tryMakeVariablePseudoStrong(SemaRef, VD, /*DiagnoseFailure=*/true))
+ return;
+
+ handleSimpleAttribute<ObjCExternallyRetainedAttr>(*this, D, AL);
+ return;
+ }
+
+ // If D is a function-like declaration (method, block, or function), then we
+ // make every parameter psuedo-strong.
+ unsigned NumParams =
+ hasFunctionProto(D) ? getFunctionOrMethodNumParams(D) : 0;
+ for (unsigned I = 0; I != NumParams; ++I) {
+ auto *PVD = const_cast<ParmVarDecl *>(getFunctionOrMethodParam(D, I));
+ QualType Ty = PVD->getType();
+
+ // If a user wrote a parameter with __strong explicitly, then assume they
+ // want "real" strong semantics for that parameter. This works because if
+ // the parameter was written with __strong, then the strong qualifier will
+ // be non-local.
+ if (Ty.getLocalUnqualifiedType().getQualifiers().getObjCLifetime() ==
+ Qualifiers::OCL_Strong)
+ continue;
+
+ tryMakeVariablePseudoStrong(SemaRef, PVD, /*DiagnoseFailure=*/false);
+ }
+ handleSimpleAttribute<ObjCExternallyRetainedAttr>(*this, D, AL);
+}
+
+bool SemaObjC::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
+ Sema::FormatStringInfo FSI;
+ if ((SemaRef.GetFormatStringType(Format) == Sema::FST_NSString) &&
+ SemaRef.getFormatStringInfo(Format, false, true, &FSI)) {
+ Idx = FSI.FormatIdx;
+ return true;
+ }
+ return false;
+}
+
+/// Diagnose use of %s directive in an NSString which is being passed
+/// as formatting string to formatting method.
+void SemaObjC::DiagnoseCStringFormatDirectiveInCFAPI(const NamedDecl *FDecl,
+ Expr **Args,
+ unsigned NumArgs) {
+ unsigned Idx = 0;
+ bool Format = false;
+ ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
+ if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
+ Idx = 2;
+ Format = true;
+ } else
+ for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
+ if (GetFormatNSStringIdx(I, Idx)) {
+ Format = true;
+ break;
+ }
+ }
+ if (!Format || NumArgs <= Idx)
+ return;
+ const Expr *FormatExpr = Args[Idx];
+ if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
+ FormatExpr = CSCE->getSubExpr();
+ const StringLiteral *FormatString;
+ if (const ObjCStringLiteral *OSL =
+ dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
+ FormatString = OSL->getString();
+ else
+ FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
+ if (!FormatString)
+ return;
+ if (SemaRef.FormatStringHasSArg(FormatString)) {
+ Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
+ << "%s" << 1 << 1;
+ Diag(FDecl->getLocation(), diag::note_entity_declared_at)
+ << FDecl->getDeclName();
+ }
+}
+
+bool SemaObjC::isSignedCharBool(QualType Ty) {
+ return Ty->isSpecificBuiltinType(BuiltinType::SChar) && getLangOpts().ObjC &&
+ NSAPIObj->isObjCBOOLType(Ty);
+}
+
+void SemaObjC::adornBoolConversionDiagWithTernaryFixit(
+ Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) {
+ Expr *Ignored = SourceExpr->IgnoreImplicit();
+ if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored))
+ Ignored = OVE->getSourceExpr();
+ bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) ||
+ isa<BinaryOperator>(Ignored) ||
+ isa<CXXOperatorCallExpr>(Ignored);
+ SourceLocation EndLoc = SemaRef.getLocForEndOfToken(SourceExpr->getEndLoc());
+ if (NeedsParens)
+ Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(")
+ << FixItHint::CreateInsertion(EndLoc, ")");
+ Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO");
+}
+
+/// Check a single element within a collection literal against the
+/// target element type.
+static void checkCollectionLiteralElement(Sema &S, QualType TargetElementType,
+ Expr *Element, unsigned ElementKind) {
+ // Skip a bitcast to 'id' or qualified 'id'.
+ if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) {
+ if (ICE->getCastKind() == CK_BitCast &&
+ ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>())
+ Element = ICE->getSubExpr();
+ }
+
+ QualType ElementType = Element->getType();
+ ExprResult ElementResult(Element);
+ if (ElementType->getAs<ObjCObjectPointerType>() &&
+ S.CheckSingleAssignmentConstraints(TargetElementType, ElementResult,
+ false, false) != Sema::Compatible) {
+ S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element)
+ << ElementType << ElementKind << TargetElementType
+ << Element->getSourceRange();
+ }
+
+ if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element))
+ S.ObjC().checkArrayLiteral(TargetElementType, ArrayLiteral);
+ else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element))
+ S.ObjC().checkDictionaryLiteral(TargetElementType, DictionaryLiteral);
+}
+
+/// Check an Objective-C array literal being converted to the given
+/// target type.
+void SemaObjC::checkArrayLiteral(QualType TargetType,
+ ObjCArrayLiteral *ArrayLiteral) {
+ if (!NSArrayDecl)
+ return;
+
+ const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
+ if (!TargetObjCPtr)
+ return;
+
+ if (TargetObjCPtr->isUnspecialized() ||
+ TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() !=
+ NSArrayDecl->getCanonicalDecl())
+ return;
+
+ auto TypeArgs = TargetObjCPtr->getTypeArgs();
+ if (TypeArgs.size() != 1)
+ return;
+
+ QualType TargetElementType = TypeArgs[0];
+ for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) {
+ checkCollectionLiteralElement(SemaRef, TargetElementType,
+ ArrayLiteral->getElement(I), 0);
+ }
+}
+
+void SemaObjC::checkDictionaryLiteral(
+ QualType TargetType, ObjCDictionaryLiteral *DictionaryLiteral) {
+ if (!NSDictionaryDecl)
+ return;
+
+ const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
+ if (!TargetObjCPtr)
+ return;
+
+ if (TargetObjCPtr->isUnspecialized() ||
+ TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() !=
+ NSDictionaryDecl->getCanonicalDecl())
+ return;
+
+ auto TypeArgs = TargetObjCPtr->getTypeArgs();
+ if (TypeArgs.size() != 2)
+ return;
+
+ QualType TargetKeyType = TypeArgs[0];
+ QualType TargetObjectType = TypeArgs[1];
+ for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) {
+ auto Element = DictionaryLiteral->getKeyValueElement(I);
+ checkCollectionLiteralElement(SemaRef, TargetKeyType, Element.Key, 1);
+ checkCollectionLiteralElement(SemaRef, TargetObjectType, Element.Value, 2);
+ }
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
index 349c7fc9c91b..031f2a6af877 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
@@ -20,6 +19,8 @@
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Initialization.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallString.h"
@@ -114,7 +115,8 @@ CheckPropertyAgainstProtocol(Sema &S, ObjCPropertyDecl *Prop,
// Look for a property with the same name.
if (ObjCPropertyDecl *ProtoProp = Proto->getProperty(
Prop->getIdentifier(), Prop->isInstanceProperty())) {
- S.DiagnosePropertyMismatch(Prop, ProtoProp, Proto->getIdentifier(), true);
+ S.ObjC().DiagnosePropertyMismatch(Prop, ProtoProp, Proto->getIdentifier(),
+ true);
return;
}
@@ -169,28 +171,26 @@ static unsigned getOwnershipRule(unsigned attr) {
return result;
}
-Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
- SourceLocation LParenLoc,
- FieldDeclarator &FD,
- ObjCDeclSpec &ODS,
- Selector GetterSel,
- Selector SetterSel,
- tok::ObjCKeywordKind MethodImplKind,
- DeclContext *lexicalDC) {
+Decl *SemaObjC::ActOnProperty(Scope *S, SourceLocation AtLoc,
+ SourceLocation LParenLoc, FieldDeclarator &FD,
+ ObjCDeclSpec &ODS, Selector GetterSel,
+ Selector SetterSel,
+ tok::ObjCKeywordKind MethodImplKind,
+ DeclContext *lexicalDC) {
unsigned Attributes = ODS.getPropertyAttributes();
FD.D.setObjCWeakProperty((Attributes & ObjCPropertyAttribute::kind_weak) !=
0);
- TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D);
+ TypeSourceInfo *TSI = SemaRef.GetTypeForDeclarator(FD.D);
QualType T = TSI->getType();
if (!getOwnershipRule(Attributes)) {
- Attributes |= deducePropertyOwnershipFromType(*this, T);
+ Attributes |= deducePropertyOwnershipFromType(SemaRef, T);
}
bool isReadWrite = ((Attributes & ObjCPropertyAttribute::kind_readwrite) ||
// default is readwrite!
!(Attributes & ObjCPropertyAttribute::kind_readonly));
// Proceed with constructing the ObjCPropertyDecls.
- ObjCContainerDecl *ClassDecl = cast<ObjCContainerDecl>(CurContext);
+ ObjCContainerDecl *ClassDecl = cast<ObjCContainerDecl>(SemaRef.CurContext);
ObjCPropertyDecl *Res = nullptr;
if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(ClassDecl)) {
if (CDecl->IsClassExtension()) {
@@ -223,7 +223,7 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
// Check consistency if the type has explicit ownership qualification.
if (Res->getType().getObjCLifetime())
- checkPropertyDeclWithOwnership(*this, Res);
+ checkPropertyDeclWithOwnership(SemaRef, Res);
llvm::SmallPtrSet<ObjCProtocolDecl *, 16> KnownProtos;
if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(ClassDecl)) {
@@ -243,12 +243,12 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
if (FoundInSuper) {
// Also compare the property against a property in our protocols.
for (auto *P : CurrentInterfaceDecl->protocols()) {
- CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
+ CheckPropertyAgainstProtocol(SemaRef, Res, P, KnownProtos);
}
} else {
// Slower path: look in all protocols we referenced.
for (auto *P : IFace->all_referenced_protocols()) {
- CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
+ CheckPropertyAgainstProtocol(SemaRef, Res, P, KnownProtos);
}
}
} else if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(ClassDecl)) {
@@ -257,14 +257,14 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
// when property in class extension is constructed.
if (!Cat->IsClassExtension())
for (auto *P : Cat->protocols())
- CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
+ CheckPropertyAgainstProtocol(SemaRef, Res, P, KnownProtos);
} else {
ObjCProtocolDecl *Proto = cast<ObjCProtocolDecl>(ClassDecl);
for (auto *P : Proto->protocols())
- CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos);
+ CheckPropertyAgainstProtocol(SemaRef, Res, P, KnownProtos);
}
- ActOnDocumentableDecl(Res);
+ SemaRef.ActOnDocumentableDecl(Res);
return Res;
}
@@ -401,25 +401,16 @@ static void checkAtomicPropertyMismatch(Sema &S,
S.Diag(OldProperty->getLocation(), diag::note_property_declare);
}
-ObjCPropertyDecl *
-Sema::HandlePropertyInClassExtension(Scope *S,
- SourceLocation AtLoc,
- SourceLocation LParenLoc,
- FieldDeclarator &FD,
- Selector GetterSel,
- SourceLocation GetterNameLoc,
- Selector SetterSel,
- SourceLocation SetterNameLoc,
- const bool isReadWrite,
- unsigned &Attributes,
- const unsigned AttributesAsWritten,
- QualType T,
- TypeSourceInfo *TSI,
- tok::ObjCKeywordKind MethodImplKind) {
- ObjCCategoryDecl *CDecl = cast<ObjCCategoryDecl>(CurContext);
+ObjCPropertyDecl *SemaObjC::HandlePropertyInClassExtension(
+ Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc,
+ FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc,
+ Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite,
+ unsigned &Attributes, const unsigned AttributesAsWritten, QualType T,
+ TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind) {
+ ObjCCategoryDecl *CDecl = cast<ObjCCategoryDecl>(SemaRef.CurContext);
// Diagnose if this property is already in continuation class.
- DeclContext *DC = CurContext;
- IdentifierInfo *PropertyId = FD.D.getIdentifier();
+ DeclContext *DC = SemaRef.CurContext;
+ const IdentifierInfo *PropertyId = FD.D.getIdentifier();
ObjCInterfaceDecl *CCPrimary = CDecl->getClassInterface();
// We need to look in the @interface to see if the @property was
@@ -515,7 +506,7 @@ Sema::HandlePropertyInClassExtension(Scope *S,
isReadWrite,
Attributes, AttributesAsWritten,
T, TSI, MethodImplKind, DC);
-
+ ASTContext &Context = getASTContext();
// If there was no declaration of a property with the same name in
// the primary class, we're done.
if (!PIDecl) {
@@ -536,9 +527,10 @@ Sema::HandlePropertyInClassExtension(Scope *S,
QualType ClassExtPropertyT = Context.getCanonicalType(PDecl->getType());
if (!isa<ObjCObjectPointerType>(PrimaryClassPropertyT) ||
!isa<ObjCObjectPointerType>(ClassExtPropertyT) ||
- (!isObjCPointerConversion(ClassExtPropertyT, PrimaryClassPropertyT,
- ConvertedType, IncompatibleObjC))
- || IncompatibleObjC) {
+ (!SemaRef.isObjCPointerConversion(ClassExtPropertyT,
+ PrimaryClassPropertyT, ConvertedType,
+ IncompatibleObjC)) ||
+ IncompatibleObjC) {
Diag(AtLoc,
diag::err_type_mismatch_continuation_class) << PDecl->getType();
Diag(PIDecl->getLocation(), diag::note_property_declare);
@@ -548,30 +540,23 @@ Sema::HandlePropertyInClassExtension(Scope *S,
// Check that atomicity of property in class extension matches the previous
// declaration.
- checkAtomicPropertyMismatch(*this, PIDecl, PDecl, true);
+ checkAtomicPropertyMismatch(SemaRef, PIDecl, PDecl, true);
// Make sure getter/setter are appropriately synthesized.
ProcessPropertyDecl(PDecl);
return PDecl;
}
-ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
- ObjCContainerDecl *CDecl,
- SourceLocation AtLoc,
- SourceLocation LParenLoc,
- FieldDeclarator &FD,
- Selector GetterSel,
- SourceLocation GetterNameLoc,
- Selector SetterSel,
- SourceLocation SetterNameLoc,
- const bool isReadWrite,
- const unsigned Attributes,
- const unsigned AttributesAsWritten,
- QualType T,
- TypeSourceInfo *TInfo,
- tok::ObjCKeywordKind MethodImplKind,
- DeclContext *lexicalDC){
- IdentifierInfo *PropertyId = FD.D.getIdentifier();
+ObjCPropertyDecl *SemaObjC::CreatePropertyDecl(
+ Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc,
+ SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel,
+ SourceLocation GetterNameLoc, Selector SetterSel,
+ SourceLocation SetterNameLoc, const bool isReadWrite,
+ const unsigned Attributes, const unsigned AttributesAsWritten, QualType T,
+ TypeSourceInfo *TInfo, tok::ObjCKeywordKind MethodImplKind,
+ DeclContext *lexicalDC) {
+ ASTContext &Context = getASTContext();
+ const IdentifierInfo *PropertyId = FD.D.getIdentifier();
// Property defaults to 'assign' if it is readwrite, unless this is ARC
// and the type is retainable.
@@ -603,7 +588,7 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
if (T->isObjCObjectType()) {
SourceLocation StarLoc = TInfo->getTypeLoc().getEndLoc();
- StarLoc = getLocForEndOfToken(StarLoc);
+ StarLoc = SemaRef.getLocForEndOfToken(StarLoc);
Diag(FD.D.getIdentifierLoc(), diag::err_statically_allocated_object)
<< FixItHint::CreateInsertion(StarLoc, "*");
T = Context.getObjCObjectPointerType(T);
@@ -638,8 +623,6 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
PDecl->setInvalidDecl();
}
- ProcessDeclAttributes(S, PDecl, FD.D);
-
// Regardless of setter/getter attribute, we save the default getter/setter
// selector names in anticipation of declaration of setter/getter methods.
PDecl->setGetterName(GetterSel, GetterNameLoc);
@@ -647,6 +630,8 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
PDecl->setPropertyAttributesAsWritten(
makePropertyAttributesAsWritten(AttributesAsWritten));
+ SemaRef.ProcessDeclAttributes(S, PDecl, FD.D);
+
if (Attributes & ObjCPropertyAttribute::kind_readonly)
PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_readonly);
@@ -1074,16 +1059,13 @@ RedeclarePropertyAccessor(ASTContext &Context, ObjCImplementationDecl *Impl,
/// builds the AST node for a property implementation declaration; declared
/// as \@synthesize or \@dynamic.
///
-Decl *Sema::ActOnPropertyImplDecl(Scope *S,
- SourceLocation AtLoc,
- SourceLocation PropertyLoc,
- bool Synthesize,
- IdentifierInfo *PropertyId,
- IdentifierInfo *PropertyIvar,
- SourceLocation PropertyIvarLoc,
- ObjCPropertyQueryKind QueryKind) {
+Decl *SemaObjC::ActOnPropertyImplDecl(
+ Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool Synthesize,
+ IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar,
+ SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind) {
+ ASTContext &Context = getASTContext();
ObjCContainerDecl *ClassImpDecl =
- dyn_cast<ObjCContainerDecl>(CurContext);
+ dyn_cast<ObjCContainerDecl>(SemaRef.CurContext);
// Make sure we have a context for the property implementation declaration.
if (!ClassImpDecl) {
Diag(AtLoc, diag::err_missing_property_context);
@@ -1167,7 +1149,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
}
}
if (Synthesize && isa<ObjCProtocolDecl>(property->getDeclContext()))
- property = SelectPropertyForSynthesisFromProtocols(*this, AtLoc, IDecl,
+ property = SelectPropertyForSynthesisFromProtocols(SemaRef, AtLoc, IDecl,
property);
} else if ((CatImplClass = dyn_cast<ObjCCategoryImplDecl>(ClassImpDecl))) {
@@ -1212,9 +1194,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
QualType PropType = property->getType();
QualType PropertyIvarType = PropType.getNonReferenceType();
- if (RequireCompleteType(PropertyDiagLoc, PropertyIvarType,
- diag::err_incomplete_synthesized_property,
- property->getDeclName())) {
+ if (SemaRef.RequireCompleteType(PropertyDiagLoc, PropertyIvarType,
+ diag::err_incomplete_synthesized_property,
+ property->getDeclName())) {
Diag(property->getLocation(), diag::note_property_declare);
CompleteTypeErr = true;
}
@@ -1320,10 +1302,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
PropertyIvarType, /*TInfo=*/nullptr,
ObjCIvarDecl::Private,
(Expr *)nullptr, true);
- if (RequireNonAbstractType(PropertyIvarLoc,
- PropertyIvarType,
- diag::err_abstract_type_in_decl,
- AbstractSynthesizedIvarType)) {
+ if (SemaRef.RequireNonAbstractType(PropertyIvarLoc, PropertyIvarType,
+ diag::err_abstract_type_in_decl,
+ Sema::AbstractSynthesizedIvarType)) {
Diag(property->getLocation(), diag::note_property_declare);
// An abstract type is as bad as an incomplete type.
CompleteTypeErr = true;
@@ -1367,9 +1348,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
PropertyIvarType->castAs<ObjCObjectPointerType>(),
IvarType->castAs<ObjCObjectPointerType>());
else {
- compat = (CheckAssignmentConstraints(PropertyIvarLoc, PropertyIvarType,
- IvarType)
- == Compatible);
+ compat = (SemaRef.CheckAssignmentConstraints(
+ PropertyIvarLoc, PropertyIvarType, IvarType) ==
+ Sema::Compatible);
}
if (!compat) {
Diag(PropertyDiagLoc, diag::err_property_ivar_type)
@@ -1413,19 +1394,17 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
}
if (getLangOpts().ObjCAutoRefCount || isARCWeak ||
Ivar->getType().getObjCLifetime())
- checkARCPropertyImpl(*this, PropertyLoc, property, Ivar);
+ checkARCPropertyImpl(SemaRef, PropertyLoc, property, Ivar);
} else if (PropertyIvar)
// @dynamic
Diag(PropertyDiagLoc, diag::err_dynamic_property_ivar_decl);
assert (property && "ActOnPropertyImplDecl - property declaration missing");
- ObjCPropertyImplDecl *PIDecl =
- ObjCPropertyImplDecl::Create(Context, CurContext, AtLoc, PropertyLoc,
- property,
- (Synthesize ?
- ObjCPropertyImplDecl::Synthesize
- : ObjCPropertyImplDecl::Dynamic),
- Ivar, PropertyIvarLoc);
+ ObjCPropertyImplDecl *PIDecl = ObjCPropertyImplDecl::Create(
+ Context, SemaRef.CurContext, AtLoc, PropertyLoc, property,
+ (Synthesize ? ObjCPropertyImplDecl::Synthesize
+ : ObjCPropertyImplDecl::Dynamic),
+ Ivar, PropertyIvarLoc);
if (CompleteTypeErr || !compat)
PIDecl->setInvalidDecl();
@@ -1449,12 +1428,12 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// For Objective-C++, need to synthesize the AST for the IVAR object to be
// returned by the getter as it must conform to C++'s copy-return rules.
// FIXME. Eventually we want to do this for Objective-C as well.
- SynthesizedFunctionScope Scope(*this, getterMethod);
+ Sema::SynthesizedFunctionScope Scope(SemaRef, getterMethod);
ImplicitParamDecl *SelfDecl = getterMethod->getSelfDecl();
DeclRefExpr *SelfExpr = new (Context)
DeclRefExpr(Context, SelfDecl, false, SelfDecl->getType(), VK_LValue,
PropertyDiagLoc);
- MarkDeclRefReferenced(SelfExpr);
+ SemaRef.MarkDeclRefReferenced(SelfExpr);
Expr *LoadSelfExpr = ImplicitCastExpr::Create(
Context, SelfDecl->getType(), CK_LValueToRValue, SelfExpr, nullptr,
VK_PRValue, FPOptionsOverride());
@@ -1464,14 +1443,14 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
PropertyDiagLoc,
Ivar->getLocation(),
LoadSelfExpr, true, true);
- ExprResult Res = PerformCopyInitialization(
+ ExprResult Res = SemaRef.PerformCopyInitialization(
InitializedEntity::InitializeResult(PropertyDiagLoc,
getterMethod->getReturnType()),
PropertyDiagLoc, IvarRefExpr);
if (!Res.isInvalid()) {
Expr *ResExpr = Res.getAs<Expr>();
if (ResExpr)
- ResExpr = MaybeCreateExprWithCleanups(ResExpr);
+ ResExpr = SemaRef.MaybeCreateExprWithCleanups(ResExpr);
PIDecl->setGetterCXXConstructor(ResExpr);
}
}
@@ -1511,12 +1490,12 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr &&
Ivar->getType()->isRecordType()) {
// FIXME. Eventually we want to do this for Objective-C as well.
- SynthesizedFunctionScope Scope(*this, setterMethod);
+ Sema::SynthesizedFunctionScope Scope(SemaRef, setterMethod);
ImplicitParamDecl *SelfDecl = setterMethod->getSelfDecl();
DeclRefExpr *SelfExpr = new (Context)
DeclRefExpr(Context, SelfDecl, false, SelfDecl->getType(), VK_LValue,
PropertyDiagLoc);
- MarkDeclRefReferenced(SelfExpr);
+ SemaRef.MarkDeclRefReferenced(SelfExpr);
Expr *LoadSelfExpr = ImplicitCastExpr::Create(
Context, SelfDecl->getType(), CK_LValueToRValue, SelfExpr, nullptr,
VK_PRValue, FPOptionsOverride());
@@ -1531,9 +1510,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
QualType T = Param->getType().getNonReferenceType();
DeclRefExpr *rhs = new (Context)
DeclRefExpr(Context, Param, false, T, VK_LValue, PropertyDiagLoc);
- MarkDeclRefReferenced(rhs);
- ExprResult Res = BuildBinOp(S, PropertyDiagLoc,
- BO_Assign, lhs, rhs);
+ SemaRef.MarkDeclRefReferenced(rhs);
+ ExprResult Res =
+ SemaRef.BuildBinOp(S, PropertyDiagLoc, BO_Assign, lhs, rhs);
if (property->getPropertyAttributes() &
ObjCPropertyAttribute::kind_atomic) {
Expr *callExpr = Res.getAs<Expr>();
@@ -1630,11 +1609,11 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
/// DiagnosePropertyMismatch - Compares two properties for their
/// attributes and types and warns on a variety of inconsistencies.
///
-void
-Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
- ObjCPropertyDecl *SuperProperty,
- const IdentifierInfo *inheritedName,
- bool OverridingProtocolProperty) {
+void SemaObjC::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
+ ObjCPropertyDecl *SuperProperty,
+ const IdentifierInfo *inheritedName,
+ bool OverridingProtocolProperty) {
+ ASTContext &Context = getASTContext();
ObjCPropertyAttribute::Kind CAttr = Property->getPropertyAttributes();
ObjCPropertyAttribute::Kind SAttr = SuperProperty->getPropertyAttributes();
@@ -1669,7 +1648,7 @@ Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
// Check for nonatomic; note that nonatomic is effectively
// meaningless for readonly properties, so don't diagnose if the
// atomic property is 'readonly'.
- checkAtomicPropertyMismatch(*this, SuperProperty, Property, false);
+ checkAtomicPropertyMismatch(SemaRef, SuperProperty, Property, false);
// Readonly properties from protocols can be implemented as "readwrite"
// with a custom setter name.
if (Property->getSetterName() != SuperProperty->getSetterName() &&
@@ -1695,19 +1674,20 @@ Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
// FIXME. For future support of covariant property types, revisit this.
bool IncompatibleObjC = false;
QualType ConvertedType;
- if (!isObjCPointerConversion(RHSType, LHSType,
- ConvertedType, IncompatibleObjC) ||
+ if (!SemaRef.isObjCPointerConversion(RHSType, LHSType, ConvertedType,
+ IncompatibleObjC) ||
IncompatibleObjC) {
- Diag(Property->getLocation(), diag::warn_property_types_are_incompatible)
- << Property->getType() << SuperProperty->getType() << inheritedName;
+ Diag(Property->getLocation(), diag::warn_property_types_are_incompatible)
+ << Property->getType() << SuperProperty->getType() << inheritedName;
Diag(SuperProperty->getLocation(), diag::note_property_declare);
}
}
}
-bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property,
- ObjCMethodDecl *GetterMethod,
- SourceLocation Loc) {
+bool SemaObjC::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property,
+ ObjCMethodDecl *GetterMethod,
+ SourceLocation Loc) {
+ ASTContext &Context = getASTContext();
if (!GetterMethod)
return false;
QualType GetterType = GetterMethod->getReturnType().getNonReferenceType();
@@ -1721,13 +1701,13 @@ bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property,
PropertyRValueType->getAs<ObjCObjectPointerType>()) &&
(getterObjCPtr = GetterType->getAs<ObjCObjectPointerType>()))
compat = Context.canAssignObjCInterfaces(getterObjCPtr, propertyObjCPtr);
- else if (CheckAssignmentConstraints(Loc, GetterType, PropertyRValueType)
- != Compatible) {
- Diag(Loc, diag::err_property_accessor_type)
- << property->getDeclName() << PropertyRValueType
- << GetterMethod->getSelector() << GetterType;
- Diag(GetterMethod->getLocation(), diag::note_declared_at);
- return true;
+ else if (SemaRef.CheckAssignmentConstraints(
+ Loc, GetterType, PropertyRValueType) != Sema::Compatible) {
+ Diag(Loc, diag::err_property_accessor_type)
+ << property->getDeclName() << PropertyRValueType
+ << GetterMethod->getSelector() << GetterType;
+ Diag(GetterMethod->getLocation(), diag::note_declared_at);
+ return true;
} else {
compat = true;
QualType lhsType = Context.getCanonicalType(PropertyRValueType);
@@ -1831,9 +1811,9 @@ static void CollectSuperClassPropertyImplementations(ObjCInterfaceDecl *CDecl,
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
-bool
-Sema::IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
- ObjCMethodDecl *Method, ObjCIvarDecl *IV) {
+bool SemaObjC::IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
+ ObjCMethodDecl *Method,
+ ObjCIvarDecl *IV) {
if (!IV->getSynthesize())
return false;
ObjCMethodDecl *IMD = IFace->lookupMethod(Method->getSelector(),
@@ -1883,9 +1863,10 @@ static bool SuperClassImplementsProperty(ObjCInterfaceDecl *IDecl,
/// Default synthesizes all properties which must be synthesized
/// in class's \@implementation.
-void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
- ObjCInterfaceDecl *IDecl,
- SourceLocation AtEnd) {
+void SemaObjC::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
+ ObjCInterfaceDecl *IDecl,
+ SourceLocation AtEnd) {
+ ASTContext &Context = getASTContext();
ObjCInterfaceDecl::PropertyMap PropMap;
IDecl->collectPropertiesToImplement(PropMap);
if (PropMap.empty())
@@ -1977,9 +1958,10 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
}
}
-void Sema::DefaultSynthesizeProperties(Scope *S, Decl *D,
- SourceLocation AtEnd) {
- if (!LangOpts.ObjCDefaultSynthProperties || LangOpts.ObjCRuntime.isFragile())
+void SemaObjC::DefaultSynthesizeProperties(Scope *S, Decl *D,
+ SourceLocation AtEnd) {
+ if (!getLangOpts().ObjCDefaultSynthProperties ||
+ getLangOpts().ObjCRuntime.isFragile())
return;
ObjCImplementationDecl *IC=dyn_cast_or_null<ObjCImplementationDecl>(D);
if (!IC)
@@ -2026,9 +2008,9 @@ static void DiagnoseUnimplementedAccessor(
}
}
-void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
- ObjCContainerDecl *CDecl,
- bool SynthesizeProperties) {
+void SemaObjC::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl *IMPDecl,
+ ObjCContainerDecl *CDecl,
+ bool SynthesizeProperties) {
ObjCContainerDecl::PropertyMap PropMap;
ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl);
@@ -2124,16 +2106,17 @@ void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
continue;
// Diagnose unimplemented getters and setters.
- DiagnoseUnimplementedAccessor(*this,
- PrimaryClass, Prop->getGetterName(), IMPDecl, CDecl, C, Prop, InsMap);
+ DiagnoseUnimplementedAccessor(SemaRef, PrimaryClass, Prop->getGetterName(),
+ IMPDecl, CDecl, C, Prop, InsMap);
if (!Prop->isReadOnly())
- DiagnoseUnimplementedAccessor(*this,
- PrimaryClass, Prop->getSetterName(),
- IMPDecl, CDecl, C, Prop, InsMap);
+ DiagnoseUnimplementedAccessor(SemaRef, PrimaryClass,
+ Prop->getSetterName(), IMPDecl, CDecl, C,
+ Prop, InsMap);
}
}
-void Sema::diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl) {
+void SemaObjC::diagnoseNullResettableSynthesizedSetters(
+ const ObjCImplDecl *impDecl) {
for (const auto *propertyImpl : impDecl->property_impls()) {
const auto *property = propertyImpl->getPropertyDecl();
// Warn about null_resettable properties with synthesized setters,
@@ -2158,9 +2141,8 @@ void Sema::diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl)
}
}
-void
-Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
- ObjCInterfaceDecl* IDecl) {
+void SemaObjC::AtomicPropertySetterGetterRules(ObjCImplDecl *IMPDecl,
+ ObjCInterfaceDecl *IDecl) {
// Rules apply in non-GC mode only
if (getLangOpts().getGC() != LangOptions::NonGC)
return;
@@ -2232,7 +2214,7 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
!(AttributesAsWritten & ObjCPropertyAttribute::kind_atomic)) {
// @property () ... case.
SourceLocation AfterLParen =
- getLocForEndOfToken(Property->getLParenLoc());
+ SemaRef.getLocForEndOfToken(Property->getLParenLoc());
StringRef NonatomicStr = AttributesAsWritten? "nonatomic, "
: "nonatomic";
Diag(Property->getLocation(),
@@ -2253,7 +2235,8 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
}
}
-void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D) {
+void SemaObjC::DiagnoseOwningPropertyGetterSynthesis(
+ const ObjCImplementationDecl *D) {
if (getLangOpts().getGC() == LangOptions::GCOnly)
return;
@@ -2288,7 +2271,7 @@ void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D
fixItLoc = getterRedecl->getEndLoc();
}
- Preprocessor &PP = getPreprocessor();
+ Preprocessor &PP = SemaRef.getPreprocessor();
TokenValue tokens[] = {
tok::kw___attribute, tok::l_paren, tok::l_paren,
PP.getIdentifierInfo("objc_method_family"), tok::l_paren,
@@ -2312,9 +2295,8 @@ void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D
}
}
-void Sema::DiagnoseMissingDesignatedInitOverrides(
- const ObjCImplementationDecl *ImplD,
- const ObjCInterfaceDecl *IFD) {
+void SemaObjC::DiagnoseMissingDesignatedInitOverrides(
+ const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD) {
assert(IFD->hasDesignatedInitializers());
const ObjCInterfaceDecl *SuperD = IFD->getSuperClass();
if (!SuperD)
@@ -2371,7 +2353,8 @@ static void AddPropertyAttrs(Sema &S, ObjCMethodDecl *PropertyMethod,
/// have the property type and issue diagnostics if they don't.
/// Also synthesize a getter/setter method if none exist (and update the
/// appropriate lookup tables.
-void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
+void SemaObjC::ProcessPropertyDecl(ObjCPropertyDecl *property) {
+ ASTContext &Context = getASTContext();
ObjCMethodDecl *GetterMethod, *SetterMethod;
ObjCContainerDecl *CD = cast<ObjCContainerDecl>(property->getDeclContext());
if (CD->isInvalidDecl())
@@ -2492,7 +2475,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
: ObjCImplementationControl::Required);
CD->addDecl(GetterMethod);
- AddPropertyAttrs(*this, GetterMethod, property);
+ AddPropertyAttrs(SemaRef, GetterMethod, property);
if (property->isDirectProperty())
GetterMethod->addAttr(ObjCDirectAttr::CreateImplicit(Context, Loc));
@@ -2509,6 +2492,8 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
GetterMethod->addAttr(SectionAttr::CreateImplicit(
Context, SA->getName(), Loc, SectionAttr::GNU_section));
+ SemaRef.ProcessAPINotes(GetterMethod);
+
if (getLangOpts().ObjCAutoRefCount)
CheckARCMethodDecl(GetterMethod);
} else
@@ -2569,7 +2554,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
nullptr);
SetterMethod->setMethodParams(Context, Argument, std::nullopt);
- AddPropertyAttrs(*this, SetterMethod, property);
+ AddPropertyAttrs(SemaRef, SetterMethod, property);
if (property->isDirectProperty())
SetterMethod->addAttr(ObjCDirectAttr::CreateImplicit(Context, Loc));
@@ -2578,6 +2563,9 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
if (const SectionAttr *SA = property->getAttr<SectionAttr>())
SetterMethod->addAttr(SectionAttr::CreateImplicit(
Context, SA->getName(), Loc, SectionAttr::GNU_section));
+
+ SemaRef.ProcessAPINotes(SetterMethod);
+
// It's possible for the user to have set a very odd custom
// setter selector that causes it to have a method family.
if (getLangOpts().ObjCAutoRefCount)
@@ -2623,15 +2611,14 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
CurrentClass = Impl->getClassInterface();
}
if (GetterMethod)
- CheckObjCMethodOverrides(GetterMethod, CurrentClass, Sema::RTC_Unknown);
+ CheckObjCMethodOverrides(GetterMethod, CurrentClass, SemaObjC::RTC_Unknown);
if (SetterMethod)
- CheckObjCMethodOverrides(SetterMethod, CurrentClass, Sema::RTC_Unknown);
+ CheckObjCMethodOverrides(SetterMethod, CurrentClass, SemaObjC::RTC_Unknown);
}
-void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
- SourceLocation Loc,
- unsigned &Attributes,
- bool propertyInPrimaryClass) {
+void SemaObjC::CheckObjCPropertyAttributes(Decl *PDecl, SourceLocation Loc,
+ unsigned &Attributes,
+ bool propertyInPrimaryClass) {
// FIXME: Improve the reported location.
if (!PDecl || PDecl->isInvalidDecl())
return;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOpenACC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOpenACC.cpp
new file mode 100644
index 000000000000..cf207be33175
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOpenACC.cpp
@@ -0,0 +1,1710 @@
+//===--- SemaOpenACC.cpp - Semantic Analysis for OpenACC constructs -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements semantic analysis for OpenACC constructs and
+/// clauses.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaOpenACC.h"
+#include "clang/AST/StmtOpenACC.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/OpenACCKinds.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Casting.h"
+
+using namespace clang;
+
+namespace {
+bool diagnoseConstructAppertainment(SemaOpenACC &S, OpenACCDirectiveKind K,
+ SourceLocation StartLoc, bool IsStmt) {
+ switch (K) {
+ default:
+ case OpenACCDirectiveKind::Invalid:
+ // Nothing to do here, both invalid and unimplemented don't really need to
+ // do anything.
+ break;
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Loop:
+ if (!IsStmt)
+ return S.Diag(StartLoc, diag::err_acc_construct_appertainment) << K;
+ break;
+ }
+ return false;
+}
+
+bool doesClauseApplyToDirective(OpenACCDirectiveKind DirectiveKind,
+ OpenACCClauseKind ClauseKind) {
+ switch (ClauseKind) {
+ // FIXME: For each clause as we implement them, we can add the
+ // 'legalization' list here.
+ case OpenACCClauseKind::Default:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ case OpenACCDirectiveKind::Data:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::If:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::EnterData:
+ case OpenACCDirectiveKind::ExitData:
+ case OpenACCDirectiveKind::HostData:
+ case OpenACCDirectiveKind::Init:
+ case OpenACCDirectiveKind::Shutdown:
+ case OpenACCDirectiveKind::Set:
+ case OpenACCDirectiveKind::Update:
+ case OpenACCDirectiveKind::Wait:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::Self:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Update:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::NumGangs:
+ case OpenACCClauseKind::NumWorkers:
+ case OpenACCClauseKind::VectorLength:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::FirstPrivate:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::Private:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Loop:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::NoCreate:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::Present:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::Declare:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+
+ case OpenACCClauseKind::Copy:
+ case OpenACCClauseKind::PCopy:
+ case OpenACCClauseKind::PresentOrCopy:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::Declare:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::CopyIn:
+ case OpenACCClauseKind::PCopyIn:
+ case OpenACCClauseKind::PresentOrCopyIn:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::EnterData:
+ case OpenACCDirectiveKind::Declare:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::CopyOut:
+ case OpenACCClauseKind::PCopyOut:
+ case OpenACCClauseKind::PresentOrCopyOut:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::ExitData:
+ case OpenACCDirectiveKind::Declare:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::Create:
+ case OpenACCClauseKind::PCreate:
+ case OpenACCClauseKind::PresentOrCreate:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::EnterData:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+
+ case OpenACCClauseKind::Attach:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::EnterData:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::DevicePtr:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::Declare:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::Async:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::EnterData:
+ case OpenACCDirectiveKind::ExitData:
+ case OpenACCDirectiveKind::Set:
+ case OpenACCDirectiveKind::Update:
+ case OpenACCDirectiveKind::Wait:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+ case OpenACCClauseKind::Wait:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::EnterData:
+ case OpenACCDirectiveKind::ExitData:
+ case OpenACCDirectiveKind::Update:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+
+ case OpenACCClauseKind::Seq:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Loop:
+ case OpenACCDirectiveKind::Routine:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+
+ case OpenACCClauseKind::Independent:
+ case OpenACCClauseKind::Auto:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Loop:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+
+ case OpenACCClauseKind::Reduction:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Loop:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+
+ case OpenACCClauseKind::DeviceType:
+ case OpenACCClauseKind::DType:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Data:
+ case OpenACCDirectiveKind::Init:
+ case OpenACCDirectiveKind::Shutdown:
+ case OpenACCDirectiveKind::Set:
+ case OpenACCDirectiveKind::Update:
+ case OpenACCDirectiveKind::Loop:
+ case OpenACCDirectiveKind::Routine:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+
+ default:
+ // Do nothing so we can go to the 'unimplemented' diagnostic instead.
+ return true;
+ }
+ llvm_unreachable("Invalid clause kind");
+}
+
+bool checkAlreadyHasClauseOfKind(
+ SemaOpenACC &S, ArrayRef<const OpenACCClause *> ExistingClauses,
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ const auto *Itr = llvm::find_if(ExistingClauses, [&](const OpenACCClause *C) {
+ return C->getClauseKind() == Clause.getClauseKind();
+ });
+ if (Itr != ExistingClauses.end()) {
+ S.Diag(Clause.getBeginLoc(), diag::err_acc_duplicate_clause_disallowed)
+ << Clause.getDirectiveKind() << Clause.getClauseKind();
+ S.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return true;
+ }
+ return false;
+}
+
+bool checkValidAfterDeviceType(
+ SemaOpenACC &S, const OpenACCDeviceTypeClause &DeviceTypeClause,
+ const SemaOpenACC::OpenACCParsedClause &NewClause) {
+ // This is only a requirement on compute and loop constructs so far, so this
+ // is fine otherwise.
+ if (!isOpenACCComputeDirectiveKind(NewClause.getDirectiveKind()) &&
+ NewClause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
+ return false;
+
+ // OpenACC3.3: Section 2.4: Clauses that precede any device_type clause are
+ // default clauses. Clauses that follow a device_type clause up to the end of
+ // the directive or up to the next device_type clause are device-specific
+ // clauses for the device types specified in the device_type argument.
+ //
+ // The above implies that despite what the individual text says, these are
+ // valid.
+ if (NewClause.getClauseKind() == OpenACCClauseKind::DType ||
+ NewClause.getClauseKind() == OpenACCClauseKind::DeviceType)
+ return false;
+
+ // Implement check from OpenACC3.3: section 2.5.4:
+ // Only the async, wait, num_gangs, num_workers, and vector_length clauses may
+ // follow a device_type clause.
+ if (isOpenACCComputeDirectiveKind(NewClause.getDirectiveKind())) {
+ switch (NewClause.getClauseKind()) {
+ case OpenACCClauseKind::Async:
+ case OpenACCClauseKind::Wait:
+ case OpenACCClauseKind::NumGangs:
+ case OpenACCClauseKind::NumWorkers:
+ case OpenACCClauseKind::VectorLength:
+ return false;
+ default:
+ break;
+ }
+ } else if (NewClause.getDirectiveKind() == OpenACCDirectiveKind::Loop) {
+ // Implement check from OpenACC3.3: section 2.9:
+ // Only the collapse, gang, worker, vector, seq, independent, auto, and tile
+ // clauses may follow a device_type clause.
+ switch (NewClause.getClauseKind()) {
+ case OpenACCClauseKind::Collapse:
+ case OpenACCClauseKind::Gang:
+ case OpenACCClauseKind::Worker:
+ case OpenACCClauseKind::Vector:
+ case OpenACCClauseKind::Seq:
+ case OpenACCClauseKind::Independent:
+ case OpenACCClauseKind::Auto:
+ case OpenACCClauseKind::Tile:
+ return false;
+ default:
+ break;
+ }
+ }
+ S.Diag(NewClause.getBeginLoc(), diag::err_acc_clause_after_device_type)
+ << NewClause.getClauseKind() << DeviceTypeClause.getClauseKind()
+ << isOpenACCComputeDirectiveKind(NewClause.getDirectiveKind())
+ << NewClause.getDirectiveKind();
+ S.Diag(DeviceTypeClause.getBeginLoc(), diag::note_acc_previous_clause_here);
+ return true;
+}
+
+class SemaOpenACCClauseVisitor {
+ SemaOpenACC &SemaRef;
+ ASTContext &Ctx;
+ ArrayRef<const OpenACCClause *> ExistingClauses;
+ bool NotImplemented = false;
+
+ OpenACCClause *isNotImplemented() {
+ NotImplemented = true;
+ return nullptr;
+ }
+
+public:
+ SemaOpenACCClauseVisitor(SemaOpenACC &S,
+ ArrayRef<const OpenACCClause *> ExistingClauses)
+ : SemaRef(S), Ctx(S.getASTContext()), ExistingClauses(ExistingClauses) {}
+ // Once we've implemented everything, we shouldn't need this infrastructure.
+ // But in the meantime, we use this to help decide whether the clause was
+ // handled for this directive.
+ bool diagNotImplemented() { return NotImplemented; }
+
+ OpenACCClause *Visit(SemaOpenACC::OpenACCParsedClause &Clause) {
+ switch (Clause.getClauseKind()) {
+ case OpenACCClauseKind::Gang:
+ case OpenACCClauseKind::Worker:
+ case OpenACCClauseKind::Vector: {
+ // TODO OpenACC: These are only implemented enough for the 'seq' diagnostic,
+ // otherwise treats itself as unimplemented. When we implement these, we
+ // can remove them from here.
+
+ // OpenACC 3.3 2.9:
+ // A 'gang', 'worker', or 'vector' clause may not appear if a 'seq' clause
+ // appears.
+ const auto *Itr =
+ llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCSeqClause>);
+
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_clause_cannot_combine)
+ << Clause.getClauseKind() << (*Itr)->getClauseKind();
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ }
+ return isNotImplemented();
+ }
+
+#define VISIT_CLAUSE(CLAUSE_NAME) \
+ case OpenACCClauseKind::CLAUSE_NAME: \
+ return Visit##CLAUSE_NAME##Clause(Clause);
+#define CLAUSE_ALIAS(ALIAS, CLAUSE_NAME, DEPRECATED) \
+ case OpenACCClauseKind::ALIAS: \
+ if (DEPRECATED) \
+ SemaRef.Diag(Clause.getBeginLoc(), diag::warn_acc_deprecated_alias_name) \
+ << Clause.getClauseKind() << OpenACCClauseKind::CLAUSE_NAME; \
+ return Visit##CLAUSE_NAME##Clause(Clause);
+#include "clang/Basic/OpenACCClauses.def"
+ default:
+ return isNotImplemented();
+ }
+ llvm_unreachable("Invalid clause kind");
+ }
+
+#define VISIT_CLAUSE(CLAUSE_NAME) \
+ OpenACCClause *Visit##CLAUSE_NAME##Clause( \
+ SemaOpenACC::OpenACCParsedClause &Clause);
+#include "clang/Basic/OpenACCClauses.def"
+};
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitDefaultClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // Don't add an invalid clause to the AST.
+ if (Clause.getDefaultClauseKind() == OpenACCDefaultClauseKind::Invalid)
+ return nullptr;
+
+ // OpenACC 3.3, Section 2.5.4:
+ // At most one 'default' clause may appear, and it must have a value of
+ // either 'none' or 'present'.
+ // Second half of the sentence is diagnosed during parsing.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
+
+ return OpenACCDefaultClause::Create(
+ Ctx, Clause.getDefaultClauseKind(), Clause.getBeginLoc(),
+ Clause.getLParenLoc(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitIfClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
+
+ // The parser has ensured that we have a proper condition expr, so there
+ // isn't really much to do here.
+
+ // If the 'if' clause is true, it makes the 'self' clause have no effect,
+ // diagnose that here.
+ // TODO OpenACC: When we add these two to other constructs, we might not
+ // want to warn on this (for example, 'update').
+ const auto *Itr =
+ llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCSelfClause>);
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::warn_acc_if_self_conflict);
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ }
+
+ return OpenACCIfClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getLParenLoc(),
+ Clause.getConditionExpr(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitSelfClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // TODO OpenACC: When we implement this for 'update', this takes a
+ // 'var-list' instead of a condition expression, so semantics/handling has
+ // to happen differently here.
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
+
+ // If the 'if' clause is true, it makes the 'self' clause have no effect,
+ // diagnose that here.
+ // TODO OpenACC: When we add these two to other constructs, we might not
+ // want to warn on this (for example, 'update').
+ const auto *Itr =
+ llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCIfClause>);
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::warn_acc_if_self_conflict);
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ }
+ return OpenACCSelfClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.getConditionExpr(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitNumGangsClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
+
+ // num_gangs requires at least 1 int expr in all forms. Diagnose here, but
+ // allow us to continue, an empty clause might be useful for future
+ // diagnostics.
+ if (Clause.getIntExprs().empty())
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_num_gangs_num_args)
+ << /*NoArgs=*/0;
+
+ unsigned MaxArgs =
+ (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel ||
+ Clause.getDirectiveKind() == OpenACCDirectiveKind::ParallelLoop)
+ ? 3
+ : 1;
+ // The max number of args differs between parallel and other constructs.
+ // Again, allow us to continue for the purposes of future diagnostics.
+ if (Clause.getIntExprs().size() > MaxArgs)
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_num_gangs_num_args)
+ << /*NoArgs=*/1 << Clause.getDirectiveKind() << MaxArgs
+ << Clause.getIntExprs().size();
+
+ // OpenACC 3.3 Section 2.5.4:
+ // A reduction clause may not appear on a parallel construct with a
+ // num_gangs clause that has more than one argument.
+ if (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel &&
+ Clause.getIntExprs().size() > 1) {
+ auto *Parallel =
+ llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCReductionClause>);
+
+ if (Parallel != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(),
+ diag::err_acc_reduction_num_gangs_conflict)
+ << Clause.getIntExprs().size();
+ SemaRef.Diag((*Parallel)->getBeginLoc(),
+ diag::note_acc_previous_clause_here);
+ return nullptr;
+ }
+ }
+ return OpenACCNumGangsClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getIntExprs(),
+ Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitNumWorkersClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
+
+ assert(Clause.getIntExprs().size() == 1 &&
+ "Invalid number of expressions for NumWorkers");
+ return OpenACCNumWorkersClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getIntExprs()[0],
+ Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitVectorLengthClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
+
+ assert(Clause.getIntExprs().size() == 1 &&
+ "Invalid number of expressions for NumWorkers");
+ return OpenACCVectorLengthClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getIntExprs()[0],
+ Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitAsyncClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
+
+ assert(Clause.getNumIntExprs() < 2 &&
+ "Invalid number of expressions for Async");
+ return OpenACCAsyncClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.getNumIntExprs() != 0 ? Clause.getIntExprs()[0] : nullptr,
+ Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitPrivateClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' and 'loop'
+ // constructs, and 'compute'/'loop' constructs are the only construct that
+ // can do anything with this yet, so skip/treat as unimplemented in this
+ // case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()) &&
+ Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
+ return isNotImplemented();
+
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCPrivateClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getLParenLoc(),
+ Clause.getVarList(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitFirstPrivateClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCFirstPrivateClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getVarList(),
+ Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitNoCreateClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCNoCreateClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getLParenLoc(),
+ Clause.getVarList(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitPresentClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCPresentClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getLParenLoc(),
+ Clause.getVarList(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitCopyClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCCopyClause::Create(
+ Ctx, Clause.getClauseKind(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.getVarList(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitCopyInClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCCopyInClause::Create(
+ Ctx, Clause.getClauseKind(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.isReadOnly(), Clause.getVarList(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitCopyOutClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCCopyOutClause::Create(
+ Ctx, Clause.getClauseKind(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.isZero(), Clause.getVarList(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitCreateClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCCreateClause::Create(
+ Ctx, Clause.getClauseKind(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.isZero(), Clause.getVarList(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitAttachClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // ActOnVar ensured that everything is a valid variable reference, but we
+ // still have to make sure it is a pointer type.
+ llvm::SmallVector<Expr *> VarList{Clause.getVarList()};
+ llvm::erase_if(VarList, [&](Expr *E) {
+ return SemaRef.CheckVarIsPointerType(OpenACCClauseKind::Attach, E);
+ });
+ Clause.setVarListDetails(VarList,
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+ return OpenACCAttachClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getLParenLoc(), Clause.getVarList(),
+ Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitDevicePtrClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // ActOnVar ensured that everything is a valid variable reference, but we
+ // still have to make sure it is a pointer type.
+ llvm::SmallVector<Expr *> VarList{Clause.getVarList()};
+ llvm::erase_if(VarList, [&](Expr *E) {
+ return SemaRef.CheckVarIsPointerType(OpenACCClauseKind::DevicePtr, E);
+ });
+ Clause.setVarListDetails(VarList,
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+
+ return OpenACCDevicePtrClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getVarList(),
+ Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitWaitClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ return OpenACCWaitClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getDevNumExpr(),
+ Clause.getQueuesLoc(), Clause.getQueueIdExprs(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitDeviceTypeClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' and 'loop'
+ // constructs, and 'compute'/'loop' constructs are the only construct that
+ // can do anything with this yet, so skip/treat as unimplemented in this
+ // case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()) &&
+ Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
+ return isNotImplemented();
+
+ // TODO OpenACC: Once we get enough of the CodeGen implemented that we have
+ // a source for the list of valid architectures, we need to warn on unknown
+ // identifiers here.
+
+ return OpenACCDeviceTypeClause::Create(
+ Ctx, Clause.getClauseKind(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.getDeviceTypeArchitectures(), Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitAutoClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'loop' constructs, and it is
+ // the only construct that can do anything with this, so skip/treat as
+ // unimplemented for the combined constructs.
+ if (Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
+ return isNotImplemented();
+
+ // OpenACC 3.3 2.9:
+ // Only one of the seq, independent, and auto clauses may appear.
+ const auto *Itr =
+ llvm::find_if(ExistingClauses,
+ llvm::IsaPred<OpenACCIndependentClause, OpenACCSeqClause>);
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_loop_spec_conflict)
+ << Clause.getClauseKind() << Clause.getDirectiveKind();
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
+ }
+
+ return OpenACCAutoClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitIndependentClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'loop' constructs, and it is
+ // the only construct that can do anything with this, so skip/treat as
+ // unimplemented for the combined constructs.
+ if (Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
+ return isNotImplemented();
+
+ // OpenACC 3.3 2.9:
+ // Only one of the seq, independent, and auto clauses may appear.
+ const auto *Itr = llvm::find_if(
+ ExistingClauses, llvm::IsaPred<OpenACCAutoClause, OpenACCSeqClause>);
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_loop_spec_conflict)
+ << Clause.getClauseKind() << Clause.getDirectiveKind();
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
+ }
+
+ return OpenACCIndependentClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitSeqClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'loop' constructs, and it is
+ // the only construct that can do anything with this, so skip/treat as
+ // unimplemented for the combined constructs.
+ if (Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
+ return isNotImplemented();
+
+ // OpenACC 3.3 2.9:
+ // Only one of the seq, independent, and auto clauses may appear.
+ const auto *Itr =
+ llvm::find_if(ExistingClauses,
+ llvm::IsaPred<OpenACCAutoClause, OpenACCIndependentClause>);
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_loop_spec_conflict)
+ << Clause.getClauseKind() << Clause.getDirectiveKind();
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
+ }
+
+ // OpenACC 3.3 2.9:
+ // A 'gang', 'worker', or 'vector' clause may not appear if a 'seq' clause
+ // appears.
+ Itr = llvm::find_if(ExistingClauses,
+ llvm::IsaPred<OpenACCGangClause, OpenACCWorkerClause,
+ OpenACCVectorClause>);
+
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_clause_cannot_combine)
+ << Clause.getClauseKind() << (*Itr)->getClauseKind();
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
+ }
+
+ // TODO OpenACC: 2.9 ~ line 2010 specifies that the associated loop has some
+ // restrictions when there is a 'seq' clause in place. We probably need to
+ // implement that.
+ return OpenACCSeqClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getEndLoc());
+}
+
+OpenACCClause *SemaOpenACCClauseVisitor::VisitReductionClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // OpenACC 3.3 Section 2.5.4:
+ // A reduction clause may not appear on a parallel construct with a
+ // num_gangs clause that has more than one argument.
+ if (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel) {
+ auto NumGangsClauses = llvm::make_filter_range(
+ ExistingClauses, llvm::IsaPred<OpenACCNumGangsClause>);
+
+ for (auto *NGC : NumGangsClauses) {
+ unsigned NumExprs =
+ cast<OpenACCNumGangsClause>(NGC)->getIntExprs().size();
+
+ if (NumExprs > 1) {
+ SemaRef.Diag(Clause.getBeginLoc(),
+ diag::err_acc_reduction_num_gangs_conflict)
+ << NumExprs;
+ SemaRef.Diag(NGC->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
+ }
+ }
+ }
+
+ SmallVector<Expr *> ValidVars;
+
+ for (Expr *Var : Clause.getVarList()) {
+ ExprResult Res = SemaRef.CheckReductionVar(Var);
+
+ if (Res.isUsable())
+ ValidVars.push_back(Res.get());
+ }
+
+ return OpenACCReductionClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getReductionOp(),
+ ValidVars, Clause.getEndLoc());
+}
+
+} // namespace
+
+SemaOpenACC::SemaOpenACC(Sema &S) : SemaBase(S) {}
+
+SemaOpenACC::AssociatedStmtRAII::AssociatedStmtRAII(SemaOpenACC &S,
+ OpenACCDirectiveKind DK)
+ : SemaRef(S), WasInsideComputeConstruct(S.InsideComputeConstruct),
+ DirKind(DK) {
+ // Compute constructs end up taking their 'loop'.
+ if (DirKind == OpenACCDirectiveKind::Parallel ||
+ DirKind == OpenACCDirectiveKind::Serial ||
+ DirKind == OpenACCDirectiveKind::Kernels) {
+ SemaRef.InsideComputeConstruct = true;
+ SemaRef.ParentlessLoopConstructs.swap(ParentlessLoopConstructs);
+ }
+}
+
+SemaOpenACC::AssociatedStmtRAII::~AssociatedStmtRAII() {
+ SemaRef.InsideComputeConstruct = WasInsideComputeConstruct;
+ if (DirKind == OpenACCDirectiveKind::Parallel ||
+ DirKind == OpenACCDirectiveKind::Serial ||
+ DirKind == OpenACCDirectiveKind::Kernels) {
+ assert(SemaRef.ParentlessLoopConstructs.empty() &&
+ "Didn't consume loop construct list?");
+ SemaRef.ParentlessLoopConstructs.swap(ParentlessLoopConstructs);
+ }
+}
+
+OpenACCClause *
+SemaOpenACC::ActOnClause(ArrayRef<const OpenACCClause *> ExistingClauses,
+ OpenACCParsedClause &Clause) {
+ if (Clause.getClauseKind() == OpenACCClauseKind::Invalid)
+ return nullptr;
+
+ // Diagnose that we don't support this clause on this directive.
+ if (!doesClauseApplyToDirective(Clause.getDirectiveKind(),
+ Clause.getClauseKind())) {
+ Diag(Clause.getBeginLoc(), diag::err_acc_clause_appertainment)
+ << Clause.getDirectiveKind() << Clause.getClauseKind();
+ return nullptr;
+ }
+
+ if (const auto *DevTypeClause =
+ llvm::find_if(ExistingClauses,
+ [&](const OpenACCClause *C) {
+ return isa<OpenACCDeviceTypeClause>(C);
+ });
+ DevTypeClause != ExistingClauses.end()) {
+ if (checkValidAfterDeviceType(
+ *this, *cast<OpenACCDeviceTypeClause>(*DevTypeClause), Clause))
+ return nullptr;
+ }
+
+ SemaOpenACCClauseVisitor Visitor{*this, ExistingClauses};
+ OpenACCClause *Result = Visitor.Visit(Clause);
+ assert((!Result || Result->getClauseKind() == Clause.getClauseKind()) &&
+ "Created wrong clause?");
+
+ if (Visitor.diagNotImplemented())
+ Diag(Clause.getBeginLoc(), diag::warn_acc_clause_unimplemented)
+ << Clause.getClauseKind();
+
+ return Result;
+
+ // switch (Clause.getClauseKind()) {
+ // case OpenACCClauseKind::PresentOrCopy:
+ // case OpenACCClauseKind::PCopy:
+ // Diag(Clause.getBeginLoc(), diag::warn_acc_deprecated_alias_name)
+ // << Clause.getClauseKind() << OpenACCClauseKind::Copy;
+ // LLVM_FALLTHROUGH;
+ // case OpenACCClauseKind::PresentOrCreate:
+ // case OpenACCClauseKind::PCreate:
+ // Diag(Clause.getBeginLoc(), diag::warn_acc_deprecated_alias_name)
+ // << Clause.getClauseKind() << OpenACCClauseKind::Create;
+ // LLVM_FALLTHROUGH;
+ //
+ //
+ //
+ //
+ // case OpenACCClauseKind::DType:
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ // case OpenACCClauseKind::Gang:
+ // case OpenACCClauseKind::Worker:
+ // case OpenACCClauseKind::Vector: {
+ // // OpenACC 3.3 2.9:
+ // // A 'gang', 'worker', or 'vector' clause may not appear if a 'seq'
+ // clause
+ // // appears.
+ // const auto *Itr =
+ // llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCSeqClause>);
+ //
+ // if (Itr != ExistingClauses.end()) {
+ // Diag(Clause.getBeginLoc(), diag::err_acc_clause_cannot_combine)
+ // << Clause.getClauseKind() << (*Itr)->getClauseKind();
+ // Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ // }
+ // // Not yet implemented, so immediately drop to the 'not yet implemented'
+ // // diagnostic.
+ // break;
+ // }
+ // */
+
+}
+
+/// OpenACC 3.3 section 2.5.15:
+/// At a mininmum, the supported data types include ... the numerical data types
+/// in C, C++, and Fortran.
+///
+/// If the reduction var is a composite variable, each
+/// member of the composite variable must be a supported datatype for the
+/// reduction operation.
+ExprResult SemaOpenACC::CheckReductionVar(Expr *VarExpr) {
+ VarExpr = VarExpr->IgnoreParenCasts();
+
+ auto TypeIsValid = [](QualType Ty) {
+ return Ty->isDependentType() || Ty->isScalarType();
+ };
+
+ if (isa<ArraySectionExpr>(VarExpr)) {
+ Expr *ASExpr = VarExpr;
+ QualType BaseTy = ArraySectionExpr::getBaseOriginalType(ASExpr);
+ QualType EltTy = getASTContext().getBaseElementType(BaseTy);
+
+ if (!TypeIsValid(EltTy)) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_type)
+ << EltTy << /*Sub array base type*/ 1;
+ return ExprError();
+ }
+ } else if (auto *RD = VarExpr->getType()->getAsRecordDecl()) {
+ if (!RD->isStruct() && !RD->isClass()) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
+ << /*not class or struct*/ 0 << VarExpr->getType();
+ return ExprError();
+ }
+
+ if (!RD->isCompleteDefinition()) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
+ << /*incomplete*/ 1 << VarExpr->getType();
+ return ExprError();
+ }
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+ CXXRD && !CXXRD->isAggregate()) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
+ << /*aggregate*/ 2 << VarExpr->getType();
+ return ExprError();
+ }
+
+ for (FieldDecl *FD : RD->fields()) {
+ if (!TypeIsValid(FD->getType())) {
+ Diag(VarExpr->getExprLoc(),
+ diag::err_acc_reduction_composite_member_type);
+ Diag(FD->getLocation(), diag::note_acc_reduction_composite_member_loc);
+ return ExprError();
+ }
+ }
+ } else if (!TypeIsValid(VarExpr->getType())) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_type)
+ << VarExpr->getType() << /*Sub array base type*/ 0;
+ return ExprError();
+ }
+
+ return VarExpr;
+}
+
+void SemaOpenACC::ActOnConstruct(OpenACCDirectiveKind K,
+ SourceLocation DirLoc) {
+ switch (K) {
+ case OpenACCDirectiveKind::Invalid:
+ // Nothing to do here, an invalid kind has nothing we can check here. We
+ // want to continue parsing clauses as far as we can, so we will just
+ // ensure that we can still work and don't check any construct-specific
+ // rules anywhere.
+ break;
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ case OpenACCDirectiveKind::Loop:
+ // Nothing to do here, there is no real legalization that needs to happen
+ // here as these constructs do not take any arguments.
+ break;
+ default:
+ Diag(DirLoc, diag::warn_acc_construct_unimplemented) << K;
+ break;
+ }
+}
+
+ExprResult SemaOpenACC::ActOnIntExpr(OpenACCDirectiveKind DK,
+ OpenACCClauseKind CK, SourceLocation Loc,
+ Expr *IntExpr) {
+
+ assert(((DK != OpenACCDirectiveKind::Invalid &&
+ CK == OpenACCClauseKind::Invalid) ||
+ (DK == OpenACCDirectiveKind::Invalid &&
+ CK != OpenACCClauseKind::Invalid) ||
+ (DK == OpenACCDirectiveKind::Invalid &&
+ CK == OpenACCClauseKind::Invalid)) &&
+ "Only one of directive or clause kind should be provided");
+
+ class IntExprConverter : public Sema::ICEConvertDiagnoser {
+ OpenACCDirectiveKind DirectiveKind;
+ OpenACCClauseKind ClauseKind;
+ Expr *IntExpr;
+
+ // gets the index into the diagnostics so we can use this for clauses,
+ // directives, and sub array.s
+ unsigned getDiagKind() const {
+ if (ClauseKind != OpenACCClauseKind::Invalid)
+ return 0;
+ if (DirectiveKind != OpenACCDirectiveKind::Invalid)
+ return 1;
+ return 2;
+ }
+
+ public:
+ IntExprConverter(OpenACCDirectiveKind DK, OpenACCClauseKind CK,
+ Expr *IntExpr)
+ : ICEConvertDiagnoser(/*AllowScopedEnumerations=*/false,
+ /*Suppress=*/false,
+ /*SuppressConversion=*/true),
+ DirectiveKind(DK), ClauseKind(CK), IntExpr(IntExpr) {}
+
+ bool match(QualType T) override {
+ // OpenACC spec just calls this 'integer expression' as having an
+ // 'integer type', so fall back on C99's 'integer type'.
+ return T->isIntegerType();
+ }
+ SemaBase::SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
+ QualType T) override {
+ return S.Diag(Loc, diag::err_acc_int_expr_requires_integer)
+ << getDiagKind() << ClauseKind << DirectiveKind << T;
+ }
+
+ SemaBase::SemaDiagnosticBuilder
+ diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) override {
+ return S.Diag(Loc, diag::err_acc_int_expr_incomplete_class_type)
+ << T << IntExpr->getSourceRange();
+ }
+
+ SemaBase::SemaDiagnosticBuilder
+ diagnoseExplicitConv(Sema &S, SourceLocation Loc, QualType T,
+ QualType ConvTy) override {
+ return S.Diag(Loc, diag::err_acc_int_expr_explicit_conversion)
+ << T << ConvTy;
+ }
+
+ SemaBase::SemaDiagnosticBuilder noteExplicitConv(Sema &S,
+ CXXConversionDecl *Conv,
+ QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_acc_int_expr_conversion)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ SemaBase::SemaDiagnosticBuilder
+ diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) override {
+ return S.Diag(Loc, diag::err_acc_int_expr_multiple_conversions) << T;
+ }
+
+ SemaBase::SemaDiagnosticBuilder
+ noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
+ return S.Diag(Conv->getLocation(), diag::note_acc_int_expr_conversion)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+
+ SemaBase::SemaDiagnosticBuilder
+ diagnoseConversion(Sema &S, SourceLocation Loc, QualType T,
+ QualType ConvTy) override {
+ llvm_unreachable("conversion functions are permitted");
+ }
+ } IntExprDiagnoser(DK, CK, IntExpr);
+
+ ExprResult IntExprResult = SemaRef.PerformContextualImplicitConversion(
+ Loc, IntExpr, IntExprDiagnoser);
+ if (IntExprResult.isInvalid())
+ return ExprError();
+
+ IntExpr = IntExprResult.get();
+ if (!IntExpr->isTypeDependent() && !IntExpr->getType()->isIntegerType())
+ return ExprError();
+
+ // TODO OpenACC: Do we want to perform usual unary conversions here? When
+ // doing codegen we might find that is necessary, but skip it for now.
+ return IntExpr;
+}
+
+bool SemaOpenACC::CheckVarIsPointerType(OpenACCClauseKind ClauseKind,
+ Expr *VarExpr) {
+ // We already know that VarExpr is a proper reference to a variable, so we
+ // should be able to just take the type of the expression to get the type of
+ // the referenced variable.
+
+ // We've already seen an error, don't diagnose anything else.
+ if (!VarExpr || VarExpr->containsErrors())
+ return false;
+
+ if (isa<ArraySectionExpr>(VarExpr->IgnoreParenImpCasts()) ||
+ VarExpr->hasPlaceholderType(BuiltinType::ArraySection)) {
+ Diag(VarExpr->getExprLoc(), diag::err_array_section_use) << /*OpenACC=*/0;
+ Diag(VarExpr->getExprLoc(), diag::note_acc_expected_pointer_var);
+ return true;
+ }
+
+ QualType Ty = VarExpr->getType();
+ Ty = Ty.getNonReferenceType().getUnqualifiedType();
+
+ // Nothing we can do if this is a dependent type.
+ if (Ty->isDependentType())
+ return false;
+
+ if (!Ty->isPointerType())
+ return Diag(VarExpr->getExprLoc(), diag::err_acc_var_not_pointer_type)
+ << ClauseKind << Ty;
+ return false;
+}
+
+ExprResult SemaOpenACC::ActOnVar(OpenACCClauseKind CK, Expr *VarExpr) {
+ Expr *CurVarExpr = VarExpr->IgnoreParenImpCasts();
+
+ // Sub-arrays/subscript-exprs are fine as long as the base is a
+ // VarExpr/MemberExpr. So strip all of those off.
+ while (isa<ArraySectionExpr, ArraySubscriptExpr>(CurVarExpr)) {
+ if (auto *SubScrpt = dyn_cast<ArraySubscriptExpr>(CurVarExpr))
+ CurVarExpr = SubScrpt->getBase()->IgnoreParenImpCasts();
+ else
+ CurVarExpr =
+ cast<ArraySectionExpr>(CurVarExpr)->getBase()->IgnoreParenImpCasts();
+ }
+
+ // References to a VarDecl are fine.
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(CurVarExpr)) {
+ if (isa<VarDecl, NonTypeTemplateParmDecl>(
+ DRE->getFoundDecl()->getCanonicalDecl()))
+ return VarExpr;
+ }
+
+ // If CK is a Reduction, this special cases for OpenACC3.3 2.5.15: "A var in a
+ // reduction clause must be a scalar variable name, an aggregate variable
+ // name, an array element, or a subarray.
+ // A MemberExpr that references a Field is valid.
+ if (CK != OpenACCClauseKind::Reduction) {
+ if (const auto *ME = dyn_cast<MemberExpr>(CurVarExpr)) {
+ if (isa<FieldDecl>(ME->getMemberDecl()->getCanonicalDecl()))
+ return VarExpr;
+ }
+ }
+
+ // Referring to 'this' is always OK.
+ if (isa<CXXThisExpr>(CurVarExpr))
+ return VarExpr;
+
+ // Nothing really we can do here, as these are dependent. So just return they
+ // are valid.
+ if (isa<DependentScopeDeclRefExpr>(CurVarExpr) ||
+ (CK != OpenACCClauseKind::Reduction &&
+ isa<CXXDependentScopeMemberExpr>(CurVarExpr)))
+ return VarExpr;
+
+ // There isn't really anything we can do in the case of a recovery expr, so
+ // skip the diagnostic rather than produce a confusing diagnostic.
+ if (isa<RecoveryExpr>(CurVarExpr))
+ return ExprError();
+
+ Diag(VarExpr->getExprLoc(), diag::err_acc_not_a_var_ref)
+ << (CK != OpenACCClauseKind::Reduction);
+ return ExprError();
+}
+
+ExprResult SemaOpenACC::ActOnArraySectionExpr(Expr *Base, SourceLocation LBLoc,
+ Expr *LowerBound,
+ SourceLocation ColonLoc,
+ Expr *Length,
+ SourceLocation RBLoc) {
+ ASTContext &Context = getASTContext();
+
+ // Handle placeholders.
+ if (Base->hasPlaceholderType() &&
+ !Base->hasPlaceholderType(BuiltinType::ArraySection)) {
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(Base);
+ if (Result.isInvalid())
+ return ExprError();
+ Base = Result.get();
+ }
+ if (LowerBound && LowerBound->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(LowerBound);
+ if (Result.isInvalid())
+ return ExprError();
+ Result = SemaRef.DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid())
+ return ExprError();
+ LowerBound = Result.get();
+ }
+ if (Length && Length->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(Length);
+ if (Result.isInvalid())
+ return ExprError();
+ Result = SemaRef.DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid())
+ return ExprError();
+ Length = Result.get();
+ }
+
+ // Check the 'base' value, it must be an array or pointer type, and not to/of
+ // a function type.
+ QualType OriginalBaseTy = ArraySectionExpr::getBaseOriginalType(Base);
+ QualType ResultTy;
+ if (!Base->isTypeDependent()) {
+ if (OriginalBaseTy->isAnyPointerType()) {
+ ResultTy = OriginalBaseTy->getPointeeType();
+ } else if (OriginalBaseTy->isArrayType()) {
+ ResultTy = OriginalBaseTy->getAsArrayTypeUnsafe()->getElementType();
+ } else {
+ return ExprError(
+ Diag(Base->getExprLoc(), diag::err_acc_typecheck_subarray_value)
+ << Base->getSourceRange());
+ }
+
+ if (ResultTy->isFunctionType()) {
+ Diag(Base->getExprLoc(), diag::err_acc_subarray_function_type)
+ << ResultTy << Base->getSourceRange();
+ return ExprError();
+ }
+
+ if (SemaRef.RequireCompleteType(Base->getExprLoc(), ResultTy,
+ diag::err_acc_subarray_incomplete_type,
+ Base))
+ return ExprError();
+
+ if (!Base->hasPlaceholderType(BuiltinType::ArraySection)) {
+ ExprResult Result = SemaRef.DefaultFunctionArrayLvalueConversion(Base);
+ if (Result.isInvalid())
+ return ExprError();
+ Base = Result.get();
+ }
+ }
+
+ auto GetRecovery = [&](Expr *E, QualType Ty) {
+ ExprResult Recovery =
+ SemaRef.CreateRecoveryExpr(E->getBeginLoc(), E->getEndLoc(), E, Ty);
+ return Recovery.isUsable() ? Recovery.get() : nullptr;
+ };
+
+ // Ensure both of the expressions are int-exprs.
+ if (LowerBound && !LowerBound->isTypeDependent()) {
+ ExprResult LBRes =
+ ActOnIntExpr(OpenACCDirectiveKind::Invalid, OpenACCClauseKind::Invalid,
+ LowerBound->getExprLoc(), LowerBound);
+
+ if (LBRes.isUsable())
+ LBRes = SemaRef.DefaultLvalueConversion(LBRes.get());
+ LowerBound =
+ LBRes.isUsable() ? LBRes.get() : GetRecovery(LowerBound, Context.IntTy);
+ }
+
+ if (Length && !Length->isTypeDependent()) {
+ ExprResult LenRes =
+ ActOnIntExpr(OpenACCDirectiveKind::Invalid, OpenACCClauseKind::Invalid,
+ Length->getExprLoc(), Length);
+
+ if (LenRes.isUsable())
+ LenRes = SemaRef.DefaultLvalueConversion(LenRes.get());
+ Length =
+ LenRes.isUsable() ? LenRes.get() : GetRecovery(Length, Context.IntTy);
+ }
+
+ // Length is required if the base type is not an array of known bounds.
+ if (!Length && (OriginalBaseTy.isNull() ||
+ (!OriginalBaseTy->isDependentType() &&
+ !OriginalBaseTy->isConstantArrayType() &&
+ !OriginalBaseTy->isDependentSizedArrayType()))) {
+ bool IsArray = !OriginalBaseTy.isNull() && OriginalBaseTy->isArrayType();
+ Diag(ColonLoc, diag::err_acc_subarray_no_length) << IsArray;
+ // Fill in a dummy 'length' so that when we instantiate this we don't
+ // double-diagnose here.
+ ExprResult Recovery = SemaRef.CreateRecoveryExpr(
+ ColonLoc, SourceLocation(), ArrayRef<Expr *>{std::nullopt},
+ Context.IntTy);
+ Length = Recovery.isUsable() ? Recovery.get() : nullptr;
+ }
+
+ // Check the values of each of the arguments, they cannot be negative(we
+ // assume), and if the array bound is known, must be within range. As we do
+ // so, do our best to continue with evaluation, we can set the
+ // value/expression to nullptr/nullopt if they are invalid, and treat them as
+ // not present for the rest of evaluation.
+
+ // We don't have to check for dependence, because the dependent size is
+ // represented as a different AST node.
+ std::optional<llvm::APSInt> BaseSize;
+ if (!OriginalBaseTy.isNull() && OriginalBaseTy->isConstantArrayType()) {
+ const auto *ArrayTy = Context.getAsConstantArrayType(OriginalBaseTy);
+ BaseSize = ArrayTy->getSize();
+ }
+
+ auto GetBoundValue = [&](Expr *E) -> std::optional<llvm::APSInt> {
+ if (!E || E->isInstantiationDependent())
+ return std::nullopt;
+
+ Expr::EvalResult Res;
+ if (!E->EvaluateAsInt(Res, Context))
+ return std::nullopt;
+ return Res.Val.getInt();
+ };
+
+ std::optional<llvm::APSInt> LowerBoundValue = GetBoundValue(LowerBound);
+ std::optional<llvm::APSInt> LengthValue = GetBoundValue(Length);
+
+ // Check lower bound for negative or out of range.
+ if (LowerBoundValue.has_value()) {
+ if (LowerBoundValue->isNegative()) {
+ Diag(LowerBound->getExprLoc(), diag::err_acc_subarray_negative)
+ << /*LowerBound=*/0 << toString(*LowerBoundValue, /*Radix=*/10);
+ LowerBoundValue.reset();
+ LowerBound = GetRecovery(LowerBound, LowerBound->getType());
+ } else if (BaseSize.has_value() &&
+ llvm::APSInt::compareValues(*LowerBoundValue, *BaseSize) >= 0) {
+ // Lower bound (start index) must be less than the size of the array.
+ Diag(LowerBound->getExprLoc(), diag::err_acc_subarray_out_of_range)
+ << /*LowerBound=*/0 << toString(*LowerBoundValue, /*Radix=*/10)
+ << toString(*BaseSize, /*Radix=*/10);
+ LowerBoundValue.reset();
+ LowerBound = GetRecovery(LowerBound, LowerBound->getType());
+ }
+ }
+
+ // Check length for negative or out of range.
+ if (LengthValue.has_value()) {
+ if (LengthValue->isNegative()) {
+ Diag(Length->getExprLoc(), diag::err_acc_subarray_negative)
+ << /*Length=*/1 << toString(*LengthValue, /*Radix=*/10);
+ LengthValue.reset();
+ Length = GetRecovery(Length, Length->getType());
+ } else if (BaseSize.has_value() &&
+ llvm::APSInt::compareValues(*LengthValue, *BaseSize) > 0) {
+ // Length must be lessthan or EQUAL to the size of the array.
+ Diag(Length->getExprLoc(), diag::err_acc_subarray_out_of_range)
+ << /*Length=*/1 << toString(*LengthValue, /*Radix=*/10)
+ << toString(*BaseSize, /*Radix=*/10);
+ LengthValue.reset();
+ Length = GetRecovery(Length, Length->getType());
+ }
+ }
+
+ // Adding two APSInts requires matching sign, so extract that here.
+ auto AddAPSInt = [](llvm::APSInt LHS, llvm::APSInt RHS) -> llvm::APSInt {
+ if (LHS.isSigned() == RHS.isSigned())
+ return LHS + RHS;
+
+ unsigned Width = std::max(LHS.getBitWidth(), RHS.getBitWidth()) + 1;
+ return llvm::APSInt(LHS.sext(Width) + RHS.sext(Width), /*Signed=*/true);
+ };
+
+ // If we know all 3 values, we can diagnose that the total value would be out
+ // of range.
+ if (BaseSize.has_value() && LowerBoundValue.has_value() &&
+ LengthValue.has_value() &&
+ llvm::APSInt::compareValues(AddAPSInt(*LowerBoundValue, *LengthValue),
+ *BaseSize) > 0) {
+ Diag(Base->getExprLoc(),
+ diag::err_acc_subarray_base_plus_length_out_of_range)
+ << toString(*LowerBoundValue, /*Radix=*/10)
+ << toString(*LengthValue, /*Radix=*/10)
+ << toString(*BaseSize, /*Radix=*/10);
+
+ LowerBoundValue.reset();
+ LowerBound = GetRecovery(LowerBound, LowerBound->getType());
+ LengthValue.reset();
+ Length = GetRecovery(Length, Length->getType());
+ }
+
+ // If any part of the expression is dependent, return a dependent sub-array.
+ QualType ArrayExprTy = Context.ArraySectionTy;
+ if (Base->isTypeDependent() ||
+ (LowerBound && LowerBound->isInstantiationDependent()) ||
+ (Length && Length->isInstantiationDependent()))
+ ArrayExprTy = Context.DependentTy;
+
+ return new (Context)
+ ArraySectionExpr(Base, LowerBound, Length, ArrayExprTy, VK_LValue,
+ OK_Ordinary, ColonLoc, RBLoc);
+}
+
+bool SemaOpenACC::ActOnStartStmtDirective(OpenACCDirectiveKind K,
+ SourceLocation StartLoc) {
+ return diagnoseConstructAppertainment(*this, K, StartLoc, /*IsStmt=*/true);
+}
+
+StmtResult SemaOpenACC::ActOnEndStmtDirective(OpenACCDirectiveKind K,
+ SourceLocation StartLoc,
+ SourceLocation DirLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OpenACCClause *> Clauses,
+ StmtResult AssocStmt) {
+ switch (K) {
+ default:
+ return StmtEmpty();
+ case OpenACCDirectiveKind::Invalid:
+ return StmtError();
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels: {
+ auto *ComputeConstruct = OpenACCComputeConstruct::Create(
+ getASTContext(), K, StartLoc, DirLoc, EndLoc, Clauses,
+ AssocStmt.isUsable() ? AssocStmt.get() : nullptr,
+ ParentlessLoopConstructs);
+
+ ParentlessLoopConstructs.clear();
+ return ComputeConstruct;
+ }
+ case OpenACCDirectiveKind::Loop: {
+ auto *LoopConstruct = OpenACCLoopConstruct::Create(
+ getASTContext(), StartLoc, DirLoc, EndLoc, Clauses,
+ AssocStmt.isUsable() ? AssocStmt.get() : nullptr);
+
+ // If we are in the scope of a compute construct, add this to the list of
+ // loop constructs that need assigning to the next closing compute
+ // construct.
+ if (InsideComputeConstruct)
+ ParentlessLoopConstructs.push_back(LoopConstruct);
+
+ return LoopConstruct;
+ }
+ }
+ llvm_unreachable("Unhandled case in directive handling?");
+}
+
+StmtResult SemaOpenACC::ActOnAssociatedStmt(SourceLocation DirectiveLoc,
+ OpenACCDirectiveKind K,
+ StmtResult AssocStmt) {
+ switch (K) {
+ default:
+ llvm_unreachable("Unimplemented associated statement application");
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Kernels:
+ // There really isn't any checking here that could happen. As long as we
+ // have a statement to associate, this should be fine.
+ // OpenACC 3.3 Section 6:
+ // Structured Block: in C or C++, an executable statement, possibly
+ // compound, with a single entry at the top and a single exit at the
+ // bottom.
+ // FIXME: Should we reject DeclStmt's here? The standard isn't clear, and
+ // an interpretation of it is to allow this and treat the initializer as
+ // the 'structured block'.
+ return AssocStmt;
+ case OpenACCDirectiveKind::Loop:
+ if (AssocStmt.isUsable() &&
+ !isa<CXXForRangeStmt, ForStmt>(AssocStmt.get())) {
+ Diag(AssocStmt.get()->getBeginLoc(), diag::err_acc_loop_not_for_loop);
+ Diag(DirectiveLoc, diag::note_acc_construct_here) << K;
+ return StmtError();
+ }
+ // TODO OpenACC: 2.9 ~ line 2010 specifies that the associated loop has some
+ // restrictions when there is a 'seq' clause in place. We probably need to
+ // implement that, including piping in the clauses here.
+ return AssocStmt;
+ }
+ llvm_unreachable("Invalid associated statement application");
+}
+
+bool SemaOpenACC::ActOnStartDeclDirective(OpenACCDirectiveKind K,
+ SourceLocation StartLoc) {
+ return diagnoseConstructAppertainment(*this, K, StartLoc, /*IsStmt=*/false);
+}
+
+DeclGroupRef SemaOpenACC::ActOnEndDeclDirective() { return DeclGroupRef{}; }
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOpenCL.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOpenCL.cpp
new file mode 100644
index 000000000000..9f746fffd34d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOpenCL.cpp
@@ -0,0 +1,578 @@
+//===--- SemaOpenCL.cpp --- Semantic Analysis for OpenCL constructs -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements semantic analysis for OpenCL.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaOpenCL.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+SemaOpenCL::SemaOpenCL(Sema &S) : SemaBase(S) {}
+
+void SemaOpenCL::handleNoSVMAttr(Decl *D, const ParsedAttr &AL) {
+ if (getLangOpts().getOpenCLCompatibleVersion() < 200)
+ Diag(AL.getLoc(), diag::err_attribute_requires_opencl_version)
+ << AL << "2.0" << 1;
+ else
+ Diag(AL.getLoc(), diag::warn_opencl_attr_deprecated_ignored)
+ << AL << getLangOpts().getOpenCLVersionString();
+}
+
+void SemaOpenCL::handleAccessAttr(Decl *D, const ParsedAttr &AL) {
+ if (D->isInvalidDecl())
+ return;
+
+ // Check if there is only one access qualifier.
+ if (D->hasAttr<OpenCLAccessAttr>()) {
+ if (D->getAttr<OpenCLAccessAttr>()->getSemanticSpelling() ==
+ AL.getSemanticSpelling()) {
+ Diag(AL.getLoc(), diag::warn_duplicate_declspec)
+ << AL.getAttrName()->getName() << AL.getRange();
+ } else {
+ Diag(AL.getLoc(), diag::err_opencl_multiple_access_qualifiers)
+ << D->getSourceRange();
+ D->setInvalidDecl(true);
+ return;
+ }
+ }
+
+ // OpenCL v2.0 s6.6 - read_write can be used for image types to specify that
+ // an image object can be read and written. OpenCL v2.0 s6.13.6 - A kernel
+ // cannot read from and write to the same pipe object. Using the read_write
+ // (or __read_write) qualifier with the pipe qualifier is a compilation error.
+ // OpenCL v3.0 s6.8 - For OpenCL C 2.0, or with the
+ // __opencl_c_read_write_images feature, image objects specified as arguments
+ // to a kernel can additionally be declared to be read-write.
+ // C++ for OpenCL 1.0 inherits rule from OpenCL C v2.0.
+ // C++ for OpenCL 2021 inherits rule from OpenCL C v3.0.
+ if (const auto *PDecl = dyn_cast<ParmVarDecl>(D)) {
+ const Type *DeclTy = PDecl->getType().getCanonicalType().getTypePtr();
+ if (AL.getAttrName()->getName().contains("read_write")) {
+ bool ReadWriteImagesUnsupported =
+ (getLangOpts().getOpenCLCompatibleVersion() < 200) ||
+ (getLangOpts().getOpenCLCompatibleVersion() == 300 &&
+ !SemaRef.getOpenCLOptions().isSupported(
+ "__opencl_c_read_write_images", getLangOpts()));
+ if (ReadWriteImagesUnsupported || DeclTy->isPipeType()) {
+ Diag(AL.getLoc(), diag::err_opencl_invalid_read_write)
+ << AL << PDecl->getType() << DeclTy->isImageType();
+ D->setInvalidDecl(true);
+ return;
+ }
+ }
+ }
+
+ D->addAttr(::new (getASTContext()) OpenCLAccessAttr(getASTContext(), AL));
+}
+
+void SemaOpenCL::handleSubGroupSize(Decl *D, const ParsedAttr &AL) {
+ uint32_t SGSize;
+ const Expr *E = AL.getArgAsExpr(0);
+ if (!SemaRef.checkUInt32Argument(AL, E, SGSize))
+ return;
+ if (SGSize == 0) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_is_zero)
+ << AL << E->getSourceRange();
+ return;
+ }
+
+ OpenCLIntelReqdSubGroupSizeAttr *Existing =
+ D->getAttr<OpenCLIntelReqdSubGroupSizeAttr>();
+ if (Existing && Existing->getSubGroupSize() != SGSize)
+ Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL;
+
+ D->addAttr(::new (getASTContext())
+ OpenCLIntelReqdSubGroupSizeAttr(getASTContext(), AL, SGSize));
+}
+
+static inline bool isBlockPointer(Expr *Arg) {
+ return Arg->getType()->isBlockPointerType();
+}
+
+/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
+/// void*, which is a requirement of device side enqueue.
+static bool checkBlockArgs(Sema &S, Expr *BlockArg) {
+ const BlockPointerType *BPT =
+ cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
+ ArrayRef<QualType> Params =
+ BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
+ unsigned ArgCounter = 0;
+ bool IllegalParams = false;
+ // Iterate through the block parameters until either one is found that is not
+ // a local void*, or the block is valid.
+ for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
+ I != E; ++I, ++ArgCounter) {
+ if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
+ (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
+ LangAS::opencl_local) {
+ // Get the location of the error. If a block literal has been passed
+ // (BlockExpr) then we can point straight to the offending argument,
+ // else we just point to the variable reference.
+ SourceLocation ErrorLoc;
+ if (isa<BlockExpr>(BlockArg)) {
+ BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
+ ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
+ } else if (isa<DeclRefExpr>(BlockArg)) {
+ ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
+ }
+ S.Diag(ErrorLoc,
+ diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
+ IllegalParams = true;
+ }
+ }
+
+ return IllegalParams;
+}
+
+bool SemaOpenCL::checkSubgroupExt(CallExpr *Call) {
+ // OpenCL device can support extension but not the feature as extension
+ // requires subgroup independent forward progress, but subgroup independent
+ // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature.
+ if (!SemaRef.getOpenCLOptions().isSupported("cl_khr_subgroups",
+ getLangOpts()) &&
+ !SemaRef.getOpenCLOptions().isSupported("__opencl_c_subgroups",
+ getLangOpts())) {
+ Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
+ << 1 << Call->getDirectCallee()
+ << "cl_khr_subgroups or __opencl_c_subgroups";
+ return true;
+ }
+ return false;
+}
+
+bool SemaOpenCL::checkBuiltinNDRangeAndBlock(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+
+ if (checkSubgroupExt(TheCall))
+ return true;
+
+ // First argument is an ndrange_t type.
+ Expr *NDRangeArg = TheCall->getArg(0);
+ if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
+ Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "'ndrange_t'";
+ return true;
+ }
+
+ Expr *BlockArg = TheCall->getArg(1);
+ if (!isBlockPointer(BlockArg)) {
+ Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "block";
+ return true;
+ }
+ return checkBlockArgs(SemaRef, BlockArg);
+}
+
+bool SemaOpenCL::checkBuiltinKernelWorkGroupSize(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 1))
+ return true;
+
+ Expr *BlockArg = TheCall->getArg(0);
+ if (!isBlockPointer(BlockArg)) {
+ Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "block";
+ return true;
+ }
+ return checkBlockArgs(SemaRef, BlockArg);
+}
+
+/// Diagnose integer type and any valid implicit conversion to it.
+static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
+ // Taking into account implicit conversions,
+ // allow any integer.
+ if (!E->getType()->isIntegerType()) {
+ S.Diag(E->getBeginLoc(),
+ diag::err_opencl_enqueue_kernel_invalid_local_size_type);
+ return true;
+ }
+ // Potentially emit standard warnings for implicit conversions if enabled
+ // using -Wconversion.
+ S.CheckImplicitConversion(E, IntT, E->getBeginLoc());
+ return false;
+}
+
+static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
+ unsigned Start, unsigned End) {
+ bool IllegalParams = false;
+ for (unsigned I = Start; I <= End; ++I)
+ IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
+ S.Context.getSizeType());
+ return IllegalParams;
+}
+
+/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
+/// 'local void*' parameter of passed block.
+static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
+ Expr *BlockArg,
+ unsigned NumNonVarArgs) {
+ const BlockPointerType *BPT =
+ cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
+ unsigned NumBlockParams =
+ BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
+ unsigned TotalNumArgs = TheCall->getNumArgs();
+
+ // For each argument passed to the block, a corresponding uint needs to
+ // be passed to describe the size of the local memory.
+ if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
+ S.Diag(TheCall->getBeginLoc(),
+ diag::err_opencl_enqueue_kernel_local_size_args);
+ return true;
+ }
+
+ // Check that the sizes of the local memory are specified by integers.
+ return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
+ TotalNumArgs - 1);
+}
+
+bool SemaOpenCL::checkBuiltinEnqueueKernel(CallExpr *TheCall) {
+ ASTContext &Context = getASTContext();
+ unsigned NumArgs = TheCall->getNumArgs();
+
+ if (NumArgs < 4) {
+ Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_few_args_at_least)
+ << 0 << 4 << NumArgs << /*is non object*/ 0;
+ return true;
+ }
+
+ Expr *Arg0 = TheCall->getArg(0);
+ Expr *Arg1 = TheCall->getArg(1);
+ Expr *Arg2 = TheCall->getArg(2);
+ Expr *Arg3 = TheCall->getArg(3);
+
+ // First argument always needs to be a queue_t type.
+ if (!Arg0->getType()->isQueueT()) {
+ Diag(TheCall->getArg(0)->getBeginLoc(),
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << getASTContext().OCLQueueTy;
+ return true;
+ }
+
+ // Second argument always needs to be a kernel_enqueue_flags_t enum value.
+ if (!Arg1->getType()->isIntegerType()) {
+ Diag(TheCall->getArg(1)->getBeginLoc(),
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
+ return true;
+ }
+
+ // Third argument is always an ndrange_t type.
+ if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
+ Diag(TheCall->getArg(2)->getBeginLoc(),
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "'ndrange_t'";
+ return true;
+ }
+
+ // With four arguments, there is only one form that the function could be
+ // called in: no events and no variable arguments.
+ if (NumArgs == 4) {
+ // check that the last argument is the right block type.
+ if (!isBlockPointer(Arg3)) {
+ Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "block";
+ return true;
+ }
+ // we have a block type, check the prototype
+ const BlockPointerType *BPT =
+ cast<BlockPointerType>(Arg3->getType().getCanonicalType());
+ if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() >
+ 0) {
+ Diag(Arg3->getBeginLoc(), diag::err_opencl_enqueue_kernel_blocks_no_args);
+ return true;
+ }
+ return false;
+ }
+ // we can have block + varargs.
+ if (isBlockPointer(Arg3))
+ return (checkBlockArgs(SemaRef, Arg3) ||
+ checkOpenCLEnqueueVariadicArgs(SemaRef, TheCall, Arg3, 4));
+ // last two cases with either exactly 7 args or 7 args and varargs.
+ if (NumArgs >= 7) {
+ // check common block argument.
+ Expr *Arg6 = TheCall->getArg(6);
+ if (!isBlockPointer(Arg6)) {
+ Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "block";
+ return true;
+ }
+ if (checkBlockArgs(SemaRef, Arg6))
+ return true;
+
+ // Forth argument has to be any integer type.
+ if (!Arg3->getType()->isIntegerType()) {
+ Diag(TheCall->getArg(3)->getBeginLoc(),
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "integer";
+ return true;
+ }
+ // check remaining common arguments.
+ Expr *Arg4 = TheCall->getArg(4);
+ Expr *Arg5 = TheCall->getArg(5);
+
+ // Fifth argument is always passed as a pointer to clk_event_t.
+ if (!Arg4->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull) &&
+ !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
+ Diag(TheCall->getArg(4)->getBeginLoc(),
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee()
+ << Context.getPointerType(Context.OCLClkEventTy);
+ return true;
+ }
+
+ // Sixth argument is always passed as a pointer to clk_event_t.
+ if (!Arg5->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull) &&
+ !(Arg5->getType()->isPointerType() &&
+ Arg5->getType()->getPointeeType()->isClkEventT())) {
+ Diag(TheCall->getArg(5)->getBeginLoc(),
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee()
+ << Context.getPointerType(Context.OCLClkEventTy);
+ return true;
+ }
+
+ if (NumArgs == 7)
+ return false;
+
+ return checkOpenCLEnqueueVariadicArgs(SemaRef, TheCall, Arg6, 7);
+ }
+
+ // None of the specific case has been detected, give generic error
+ Diag(TheCall->getBeginLoc(), diag::err_opencl_enqueue_kernel_incorrect_args);
+ return true;
+}
+
+/// Returns OpenCL access qual.
+static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
+ return D->getAttr<OpenCLAccessAttr>();
+}
+
+/// Returns true if pipe element type is different from the pointer.
+static bool checkPipeArg(Sema &S, CallExpr *Call) {
+ const Expr *Arg0 = Call->getArg(0);
+ // First argument type should always be pipe.
+ if (!Arg0->getType()->isPipeType()) {
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
+ << Call->getDirectCallee() << Arg0->getSourceRange();
+ return true;
+ }
+ OpenCLAccessAttr *AccessQual =
+ getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
+ // Validates the access qualifier is compatible with the call.
+ // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
+ // read_only and write_only, and assumed to be read_only if no qualifier is
+ // specified.
+ switch (Call->getDirectCallee()->getBuiltinID()) {
+ case Builtin::BIread_pipe:
+ case Builtin::BIreserve_read_pipe:
+ case Builtin::BIcommit_read_pipe:
+ case Builtin::BIwork_group_reserve_read_pipe:
+ case Builtin::BIsub_group_reserve_read_pipe:
+ case Builtin::BIwork_group_commit_read_pipe:
+ case Builtin::BIsub_group_commit_read_pipe:
+ if (!(!AccessQual || AccessQual->isReadOnly())) {
+ S.Diag(Arg0->getBeginLoc(),
+ diag::err_opencl_builtin_pipe_invalid_access_modifier)
+ << "read_only" << Arg0->getSourceRange();
+ return true;
+ }
+ break;
+ case Builtin::BIwrite_pipe:
+ case Builtin::BIreserve_write_pipe:
+ case Builtin::BIcommit_write_pipe:
+ case Builtin::BIwork_group_reserve_write_pipe:
+ case Builtin::BIsub_group_reserve_write_pipe:
+ case Builtin::BIwork_group_commit_write_pipe:
+ case Builtin::BIsub_group_commit_write_pipe:
+ if (!(AccessQual && AccessQual->isWriteOnly())) {
+ S.Diag(Arg0->getBeginLoc(),
+ diag::err_opencl_builtin_pipe_invalid_access_modifier)
+ << "write_only" << Arg0->getSourceRange();
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+/// Returns true if pipe element type is different from the pointer.
+static bool checkPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
+ const Expr *Arg0 = Call->getArg(0);
+ const Expr *ArgIdx = Call->getArg(Idx);
+ const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
+ const QualType EltTy = PipeTy->getElementType();
+ const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
+ // The Idx argument should be a pointer and the type of the pointer and
+ // the type of pipe element should also be the same.
+ if (!ArgTy ||
+ !S.Context.hasSameType(
+ EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
+ << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
+ << ArgIdx->getType() << ArgIdx->getSourceRange();
+ return true;
+ }
+ return false;
+}
+
+bool SemaOpenCL::checkBuiltinRWPipe(CallExpr *Call) {
+ // OpenCL v2.0 s6.13.16.2 - The built-in read/write
+ // functions have two forms.
+ switch (Call->getNumArgs()) {
+ case 2:
+ if (checkPipeArg(SemaRef, Call))
+ return true;
+ // The call with 2 arguments should be
+ // read/write_pipe(pipe T, T*).
+ // Check packet type T.
+ if (checkPipePacketType(SemaRef, Call, 1))
+ return true;
+ break;
+
+ case 4: {
+ if (checkPipeArg(SemaRef, Call))
+ return true;
+ // The call with 4 arguments should be
+ // read/write_pipe(pipe T, reserve_id_t, uint, T*).
+ // Check reserve_id_t.
+ if (!Call->getArg(1)->getType()->isReserveIDT()) {
+ Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
+ << Call->getDirectCallee() << getASTContext().OCLReserveIDTy
+ << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
+ return true;
+ }
+
+ // Check the index.
+ const Expr *Arg2 = Call->getArg(2);
+ if (!Arg2->getType()->isIntegerType() &&
+ !Arg2->getType()->isUnsignedIntegerType()) {
+ Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
+ << Call->getDirectCallee() << getASTContext().UnsignedIntTy
+ << Arg2->getType() << Arg2->getSourceRange();
+ return true;
+ }
+
+ // Check packet type T.
+ if (checkPipePacketType(SemaRef, Call, 3))
+ return true;
+ } break;
+ default:
+ Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
+ << Call->getDirectCallee() << Call->getSourceRange();
+ return true;
+ }
+
+ return false;
+}
+
+bool SemaOpenCL::checkBuiltinReserveRWPipe(CallExpr *Call) {
+ if (SemaRef.checkArgCount(Call, 2))
+ return true;
+
+ if (checkPipeArg(SemaRef, Call))
+ return true;
+
+ // Check the reserve size.
+ if (!Call->getArg(1)->getType()->isIntegerType() &&
+ !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
+ Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
+ << Call->getDirectCallee() << getASTContext().UnsignedIntTy
+ << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
+ return true;
+ }
+
+ // Since return type of reserve_read/write_pipe built-in function is
+ // reserve_id_t, which is not defined in the builtin def file , we used int
+ // as return type and need to override the return type of these functions.
+ Call->setType(getASTContext().OCLReserveIDTy);
+
+ return false;
+}
+
+bool SemaOpenCL::checkBuiltinCommitRWPipe(CallExpr *Call) {
+ if (SemaRef.checkArgCount(Call, 2))
+ return true;
+
+ if (checkPipeArg(SemaRef, Call))
+ return true;
+
+ // Check reserve_id_t.
+ if (!Call->getArg(1)->getType()->isReserveIDT()) {
+ Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
+ << Call->getDirectCallee() << getASTContext().OCLReserveIDTy
+ << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
+ return true;
+ }
+
+ return false;
+}
+
+bool SemaOpenCL::checkBuiltinPipePackets(CallExpr *Call) {
+ if (SemaRef.checkArgCount(Call, 1))
+ return true;
+
+ if (!Call->getArg(0)->getType()->isPipeType()) {
+ Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
+ << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
+ return true;
+ }
+
+ return false;
+}
+
+bool SemaOpenCL::checkBuiltinToAddr(unsigned BuiltinID, CallExpr *Call) {
+ if (SemaRef.checkArgCount(Call, 1))
+ return true;
+
+ auto RT = Call->getArg(0)->getType();
+ if (!RT->isPointerType() ||
+ RT->getPointeeType().getAddressSpace() == LangAS::opencl_constant) {
+ Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
+ << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
+ return true;
+ }
+
+ if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
+ Diag(Call->getArg(0)->getBeginLoc(),
+ diag::warn_opencl_generic_address_space_arg)
+ << Call->getDirectCallee()->getNameInfo().getAsString()
+ << Call->getArg(0)->getSourceRange();
+ }
+
+ RT = RT->getPointeeType();
+ auto Qual = RT.getQualifiers();
+ switch (BuiltinID) {
+ case Builtin::BIto_global:
+ Qual.setAddressSpace(LangAS::opencl_global);
+ break;
+ case Builtin::BIto_local:
+ Qual.setAddressSpace(LangAS::opencl_local);
+ break;
+ case Builtin::BIto_private:
+ Qual.setAddressSpace(LangAS::opencl_private);
+ break;
+ default:
+ llvm_unreachable("Invalid builtin function");
+ }
+ Call->setType(getASTContext().getPointerType(
+ getASTContext().getQualifiedType(RT.getUnqualifiedType(), Qual)));
+
+ return false;
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
index 217fcb979dee..6cbc075302eb 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
@@ -11,6 +11,8 @@
///
//===----------------------------------------------------------------------===//
+#include "clang/Sema/SemaOpenMP.h"
+
#include "TreeTransform.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
@@ -33,14 +35,17 @@
#include "clang/Sema/ParsedAttr.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Frontend/OpenMP/OMPAssume.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include "llvm/IR/Assumptions.h"
#include <optional>
#include <set>
@@ -463,7 +468,7 @@ public:
getTopOfStack().PossiblyLoopCounter = D ? D->getCanonicalDecl() : D;
}
/// Gets the possible loop counter decl.
- const Decl *getPossiblyLoopCunter() const {
+ const Decl *getPossiblyLoopCounter() const {
return getTopOfStack().PossiblyLoopCounter;
}
/// Start new OpenMP region stack in new non-capturing function.
@@ -715,7 +720,7 @@ public:
TargetLocations.push_back(LocStart);
}
- /// Add location for the first encountered atomicc directive.
+ /// Add location for the first encountered atomic directive.
void addAtomicDirectiveLoc(SourceLocation Loc) {
if (AtomicLocation.isInvalid())
AtomicLocation = Loc;
@@ -1808,9 +1813,9 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
return DVar;
}
const_iterator End = end();
- if (!SemaRef.isOpenMPCapturedByRef(D,
- std::distance(ParentIterTarget, End),
- /*OpenMPCaptureLevel=*/0)) {
+ if (!SemaRef.OpenMP().isOpenMPCapturedByRef(
+ D, std::distance(ParentIterTarget, End),
+ /*OpenMPCaptureLevel=*/0)) {
DVar.RefExpr =
buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
IterTarget->ConstructLoc);
@@ -2018,22 +2023,22 @@ bool DSAStackTy::hasDirective(
return false;
}
-void Sema::InitDataSharingAttributesStack() {
- VarDataSharingAttributesStack = new DSAStackTy(*this);
+void SemaOpenMP::InitDataSharingAttributesStack() {
+ VarDataSharingAttributesStack = new DSAStackTy(SemaRef);
}
#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
-void Sema::pushOpenMPFunctionRegion() { DSAStack->pushFunction(); }
+void SemaOpenMP::pushOpenMPFunctionRegion() { DSAStack->pushFunction(); }
-void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
+void SemaOpenMP::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
DSAStack->popFunction(OldFSI);
}
static bool isOpenMPDeviceDelayedContext(Sema &S) {
assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsTargetDevice &&
"Expected OpenMP device compilation.");
- return !S.isInOpenMPTargetExecutionDirective();
+ return !S.OpenMP().isInOpenMPTargetExecutionDirective();
}
namespace {
@@ -2045,20 +2050,20 @@ enum class FunctionEmissionStatus {
};
} // anonymous namespace
-Sema::SemaDiagnosticBuilder
-Sema::diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID,
- const FunctionDecl *FD) {
- assert(LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice &&
+SemaBase::SemaDiagnosticBuilder
+SemaOpenMP::diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID,
+ const FunctionDecl *FD) {
+ assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
"Expected OpenMP device compilation.");
SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
if (FD) {
- FunctionEmissionStatus FES = getEmissionStatus(FD);
+ Sema::FunctionEmissionStatus FES = SemaRef.getEmissionStatus(FD);
switch (FES) {
- case FunctionEmissionStatus::Emitted:
+ case Sema::FunctionEmissionStatus::Emitted:
Kind = SemaDiagnosticBuilder::K_Immediate;
break;
- case FunctionEmissionStatus::Unknown:
+ case Sema::FunctionEmissionStatus::Unknown:
// TODO: We should always delay diagnostics here in case a target
// region is in a function we do not emit. However, as the
// current diagnostics are associated with the function containing
@@ -2066,48 +2071,48 @@ Sema::diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID,
// on diagnostics for the target region itself. We need to anchor
// the diagnostics with the new generated function *or* ensure we
// emit diagnostics associated with the surrounding function.
- Kind = isOpenMPDeviceDelayedContext(*this)
+ Kind = isOpenMPDeviceDelayedContext(SemaRef)
? SemaDiagnosticBuilder::K_Deferred
: SemaDiagnosticBuilder::K_Immediate;
break;
- case FunctionEmissionStatus::TemplateDiscarded:
- case FunctionEmissionStatus::OMPDiscarded:
+ case Sema::FunctionEmissionStatus::TemplateDiscarded:
+ case Sema::FunctionEmissionStatus::OMPDiscarded:
Kind = SemaDiagnosticBuilder::K_Nop;
break;
- case FunctionEmissionStatus::CUDADiscarded:
+ case Sema::FunctionEmissionStatus::CUDADiscarded:
llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
break;
}
}
- return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
+ return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, SemaRef);
}
-Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPHostCode(SourceLocation Loc,
- unsigned DiagID,
- const FunctionDecl *FD) {
- assert(LangOpts.OpenMP && !LangOpts.OpenMPIsTargetDevice &&
+SemaBase::SemaDiagnosticBuilder
+SemaOpenMP::diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID,
+ const FunctionDecl *FD) {
+ assert(getLangOpts().OpenMP && !getLangOpts().OpenMPIsTargetDevice &&
"Expected OpenMP host compilation.");
SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
if (FD) {
- FunctionEmissionStatus FES = getEmissionStatus(FD);
+ Sema::FunctionEmissionStatus FES = SemaRef.getEmissionStatus(FD);
switch (FES) {
- case FunctionEmissionStatus::Emitted:
+ case Sema::FunctionEmissionStatus::Emitted:
Kind = SemaDiagnosticBuilder::K_Immediate;
break;
- case FunctionEmissionStatus::Unknown:
+ case Sema::FunctionEmissionStatus::Unknown:
Kind = SemaDiagnosticBuilder::K_Deferred;
break;
- case FunctionEmissionStatus::TemplateDiscarded:
- case FunctionEmissionStatus::OMPDiscarded:
- case FunctionEmissionStatus::CUDADiscarded:
+ case Sema::FunctionEmissionStatus::TemplateDiscarded:
+ case Sema::FunctionEmissionStatus::OMPDiscarded:
+ case Sema::FunctionEmissionStatus::CUDADiscarded:
Kind = SemaDiagnosticBuilder::K_Nop;
break;
}
}
- return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
+ return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, SemaRef);
}
static OpenMPDefaultmapClauseKind
@@ -2124,9 +2129,9 @@ getVariableCategoryFromDecl(const LangOptions &LO, const ValueDecl *VD) {
return OMPC_DEFAULTMAP_aggregate;
}
-bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
- unsigned OpenMPCaptureLevel) const {
- assert(LangOpts.OpenMP && "OpenMP is not allowed");
+bool SemaOpenMP::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
+ unsigned OpenMPCaptureLevel) const {
+ assert(getLangOpts().OpenMP && "OpenMP is not allowed");
ASTContext &Ctx = getASTContext();
bool IsByRef = true;
@@ -2228,7 +2233,7 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
dyn_cast<UnaryOperator>(Last->getAssociatedExpression());
if ((UO && UO->getOpcode() == UO_Deref) ||
isa<ArraySubscriptExpr>(Last->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(Last->getAssociatedExpression()) ||
+ isa<ArraySectionExpr>(Last->getAssociatedExpression()) ||
isa<MemberExpr>(EI->getAssociatedExpression()) ||
isa<OMPArrayShapingExpr>(Last->getAssociatedExpression())) {
IsVariableAssociatedWithSection = true;
@@ -2252,7 +2257,7 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
!Ty->isAnyPointerType()) ||
!Ty->isScalarType() ||
DSAStack->isDefaultmapCapturedByRef(
- Level, getVariableCategoryFromDecl(LangOpts, D)) ||
+ Level, getVariableCategoryFromDecl(getLangOpts(), D)) ||
DSAStack->hasExplicitDSA(
D,
[](OpenMPClauseKind K, bool AppliedToPointee) {
@@ -2295,7 +2300,7 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
// instead.
if (!IsByRef && (Ctx.getTypeSizeInChars(Ty) >
Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
- Ctx.getAlignOfGlobalVarInChars(Ty) >
+ Ctx.getAlignOfGlobalVarInChars(Ty, dyn_cast<VarDecl>(D)) >
Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
IsByRef = true;
}
@@ -2303,17 +2308,17 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
return IsByRef;
}
-unsigned Sema::getOpenMPNestingLevel() const {
+unsigned SemaOpenMP::getOpenMPNestingLevel() const {
assert(getLangOpts().OpenMP);
return DSAStack->getNestingLevel();
}
-bool Sema::isInOpenMPTaskUntiedContext() const {
+bool SemaOpenMP::isInOpenMPTaskUntiedContext() const {
return isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
DSAStack->isUntiedRegion();
}
-bool Sema::isInOpenMPTargetExecutionDirective() const {
+bool SemaOpenMP::isInOpenMPTargetExecutionDirective() const {
return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
!DSAStack->isClauseParsingMode()) ||
DSAStack->hasDirective(
@@ -2324,7 +2329,7 @@ bool Sema::isInOpenMPTargetExecutionDirective() const {
false);
}
-bool Sema::isOpenMPRebuildMemberExpr(ValueDecl *D) {
+bool SemaOpenMP::isOpenMPRebuildMemberExpr(ValueDecl *D) {
// Only rebuild for Field.
if (!dyn_cast<FieldDecl>(D))
return false;
@@ -2347,9 +2352,9 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
DeclContext *CurContext,
bool AsExpression);
-VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
- unsigned StopAt) {
- assert(LangOpts.OpenMP && "OpenMP is not allowed");
+VarDecl *SemaOpenMP::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
+ unsigned StopAt) {
+ assert(getLangOpts().OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
auto *VD = dyn_cast<VarDecl>(D);
@@ -2368,7 +2373,8 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
// 'target' we return true so that this global is also mapped to the device.
//
if (VD && !VD->hasLocalStorage() &&
- (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
+ (SemaRef.getCurCapturedRegion() || SemaRef.getCurBlock() ||
+ SemaRef.getCurLambda())) {
if (isInOpenMPTargetExecutionDirective()) {
DSAStackTy::DSAVarData DVarTop =
DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
@@ -2381,8 +2387,9 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
return nullptr;
CapturedRegionScopeInfo *CSI = nullptr;
for (FunctionScopeInfo *FSI : llvm::drop_begin(
- llvm::reverse(FunctionScopes),
- CheckScopeInfo ? (FunctionScopes.size() - (StopAt + 1)) : 0)) {
+ llvm::reverse(SemaRef.FunctionScopes),
+ CheckScopeInfo ? (SemaRef.FunctionScopes.size() - (StopAt + 1))
+ : 0)) {
if (!isa<CapturingScopeInfo>(FSI))
return nullptr;
if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
@@ -2401,7 +2408,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
if (isInOpenMPDeclareTargetContext()) {
// Try to mark variable as declare target if it is used in capturing
// regions.
- if (LangOpts.OpenMP <= 45 &&
+ if (getLangOpts().OpenMP <= 45 &&
!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
return nullptr;
@@ -2411,7 +2418,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
if (CheckScopeInfo) {
bool OpenMPFound = false;
for (unsigned I = StopAt + 1; I > 0; --I) {
- FunctionScopeInfo *FSI = FunctionScopes[I - 1];
+ FunctionScopeInfo *FSI = SemaRef.FunctionScopes[I - 1];
if (!isa<CapturingScopeInfo>(FSI))
return nullptr;
if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
@@ -2476,22 +2483,23 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
VarDecl *VD = DSAStack->getImplicitFDCapExprDecl(FD);
if (VD)
return VD;
- if (getCurrentThisType().isNull())
+ if (SemaRef.getCurrentThisType().isNull())
return nullptr;
- Expr *ThisExpr = BuildCXXThisExpr(SourceLocation(), getCurrentThisType(),
- /*IsImplicit=*/true);
+ Expr *ThisExpr = SemaRef.BuildCXXThisExpr(SourceLocation(),
+ SemaRef.getCurrentThisType(),
+ /*IsImplicit=*/true);
const CXXScopeSpec CS = CXXScopeSpec();
- Expr *ME = BuildMemberExpr(ThisExpr, /*IsArrow=*/true, SourceLocation(),
- NestedNameSpecifierLoc(), SourceLocation(), FD,
- DeclAccessPair::make(FD, FD->getAccess()),
- /*HadMultipleCandidates=*/false,
- DeclarationNameInfo(), FD->getType(),
- VK_LValue, OK_Ordinary);
+ Expr *ME = SemaRef.BuildMemberExpr(
+ ThisExpr, /*IsArrow=*/true, SourceLocation(),
+ NestedNameSpecifierLoc(), SourceLocation(), FD,
+ DeclAccessPair::make(FD, FD->getAccess()),
+ /*HadMultipleCandidates=*/false, DeclarationNameInfo(), FD->getType(),
+ VK_LValue, OK_Ordinary);
OMPCapturedExprDecl *CD = buildCaptureDecl(
- *this, FD->getIdentifier(), ME, DVarPrivate.CKind != OMPC_private,
- CurContext->getParent(), /*AsExpression=*/false);
+ SemaRef, FD->getIdentifier(), ME, DVarPrivate.CKind != OMPC_private,
+ SemaRef.CurContext->getParent(), /*AsExpression=*/false);
DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
- *this, CD, CD->getType().getNonReferenceType(), SourceLocation());
+ SemaRef, CD, CD->getType().getNonReferenceType(), SourceLocation());
VD = cast<VarDecl>(VDPrivateRefExpr->getDecl());
DSAStack->addImplicitDefaultFirstprivateFD(FD, VD);
return VD;
@@ -2505,28 +2513,28 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
return nullptr;
}
-void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
- unsigned Level) const {
+void SemaOpenMP::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
+ unsigned Level) const {
FunctionScopesIndex -= getOpenMPCaptureLevels(DSAStack->getDirective(Level));
}
-void Sema::startOpenMPLoop() {
- assert(LangOpts.OpenMP && "OpenMP must be enabled.");
+void SemaOpenMP::startOpenMPLoop() {
+ assert(getLangOpts().OpenMP && "OpenMP must be enabled.");
if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
DSAStack->loopInit();
}
-void Sema::startOpenMPCXXRangeFor() {
- assert(LangOpts.OpenMP && "OpenMP must be enabled.");
+void SemaOpenMP::startOpenMPCXXRangeFor() {
+ assert(getLangOpts().OpenMP && "OpenMP must be enabled.");
if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
DSAStack->resetPossibleLoopCounter();
DSAStack->loopStart();
}
}
-OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
- unsigned CapLevel) const {
- assert(LangOpts.OpenMP && "OpenMP is not allowed");
+OpenMPClauseKind SemaOpenMP::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
+ unsigned CapLevel) const {
+ assert(getLangOpts().OpenMP && "OpenMP is not allowed");
if (DSAStack->getCurrentDirective() != OMPD_unknown &&
(!DSAStack->isClauseParsingMode() ||
DSAStack->getParentDirective() != OMPD_unknown)) {
@@ -2546,7 +2554,8 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
}
if (DSAStack->hasExplicitDirective(isOpenMPTaskingDirective, Level)) {
bool IsTriviallyCopyable =
- D->getType().getNonReferenceType().isTriviallyCopyableType(Context) &&
+ D->getType().getNonReferenceType().isTriviallyCopyableType(
+ getASTContext()) &&
!D->getType()
.getNonReferenceType()
.getCanonicalType()
@@ -2577,7 +2586,7 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
DSAStack->loopStart();
return OMPC_private;
}
- if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
+ if ((DSAStack->getPossiblyLoopCounter() == D->getCanonicalDecl() ||
DSAStack->isLoopControlVariable(D).first) &&
!DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K, bool) { return K != OMPC_private; },
@@ -2620,9 +2629,9 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
: OMPC_unknown;
}
-void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
- unsigned Level) {
- assert(LangOpts.OpenMP && "OpenMP is not allowed");
+void SemaOpenMP::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
+ unsigned Level) {
+ assert(getLangOpts().OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
OpenMPClauseKind OMPC = OMPC_unknown;
for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) {
@@ -2649,18 +2658,19 @@ void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
NewLevel)) {
OMPC = OMPC_map;
if (DSAStack->mustBeFirstprivateAtLevel(
- NewLevel, getVariableCategoryFromDecl(LangOpts, D)))
+ NewLevel, getVariableCategoryFromDecl(getLangOpts(), D)))
OMPC = OMPC_firstprivate;
break;
}
}
if (OMPC != OMPC_unknown)
- FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, unsigned(OMPC)));
+ FD->addAttr(
+ OMPCaptureKindAttr::CreateImplicit(getASTContext(), unsigned(OMPC)));
}
-bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
- unsigned CaptureLevel) const {
- assert(LangOpts.OpenMP && "OpenMP is not allowed");
+bool SemaOpenMP::isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
+ unsigned CaptureLevel) const {
+ assert(getLangOpts().OpenMP && "OpenMP is not allowed");
// Return true if the current level is no longer enclosed in a target region.
SmallVector<OpenMPDirectiveKind, 4> Regions;
@@ -2672,9 +2682,9 @@ bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
Regions[CaptureLevel] != OMPD_task;
}
-bool Sema::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
- unsigned CaptureLevel) const {
- assert(LangOpts.OpenMP && "OpenMP is not allowed");
+bool SemaOpenMP::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
+ unsigned CaptureLevel) const {
+ assert(getLangOpts().OpenMP && "OpenMP is not allowed");
// Return true if the current level is no longer enclosed in a target region.
if (const auto *VD = dyn_cast<VarDecl>(D)) {
@@ -2686,8 +2696,8 @@ bool Sema::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned NumLevels =
getOpenMPCaptureLevels(DSAStack->getDirective(Level));
if (Level == 0)
- // non-file scope static variale with default(firstprivate)
- // should be gloabal captured.
+ // non-file scope static variable with default(firstprivate)
+ // should be global captured.
return (NumLevels == CaptureLevel + 1 &&
(TopDVar.CKind != OMPC_shared ||
DSAStack->getDefaultDSA() == DSA_firstprivate));
@@ -2702,37 +2712,37 @@ bool Sema::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
return true;
}
-void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
+void SemaOpenMP::DestroyDataSharingAttributesStack() { delete DSAStack; }
-void Sema::ActOnOpenMPBeginDeclareVariant(SourceLocation Loc,
- OMPTraitInfo &TI) {
+void SemaOpenMP::ActOnOpenMPBeginDeclareVariant(SourceLocation Loc,
+ OMPTraitInfo &TI) {
OMPDeclareVariantScopes.push_back(OMPDeclareVariantScope(TI));
}
-void Sema::ActOnOpenMPEndDeclareVariant() {
+void SemaOpenMP::ActOnOpenMPEndDeclareVariant() {
assert(isInOpenMPDeclareVariantScope() &&
"Not in OpenMP declare variant scope!");
OMPDeclareVariantScopes.pop_back();
}
-void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
- const FunctionDecl *Callee,
- SourceLocation Loc) {
- assert(LangOpts.OpenMP && "Expected OpenMP compilation mode.");
+void SemaOpenMP::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ SourceLocation Loc) {
+ assert(getLangOpts().OpenMP && "Expected OpenMP compilation mode.");
std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
- // Ignore host functions during device analyzis.
- if (LangOpts.OpenMPIsTargetDevice &&
+ // Ignore host functions during device analysis.
+ if (getLangOpts().OpenMPIsTargetDevice &&
(!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host))
return;
- // Ignore nohost functions during host analyzis.
- if (!LangOpts.OpenMPIsTargetDevice && DevTy &&
+ // Ignore nohost functions during host analysis.
+ if (!getLangOpts().OpenMPIsTargetDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
return;
const FunctionDecl *FD = Callee->getMostRecentDecl();
DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD);
- if (LangOpts.OpenMPIsTargetDevice && DevTy &&
+ if (getLangOpts().OpenMPIsTargetDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
// Diagnose host function called during device codegen.
StringRef HostDevTy =
@@ -2743,8 +2753,9 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
<< HostDevTy;
return;
}
- if (!LangOpts.OpenMPIsTargetDevice && !LangOpts.OpenMPOffloadMandatory &&
- DevTy && *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
+ if (!getLangOpts().OpenMPIsTargetDevice &&
+ !getLangOpts().OpenMPOffloadMandatory && DevTy &&
+ *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
// In OpenMP 5.2 or later, if the function has a host variant then allow
// that to be called instead
auto &&HasHostAttr = [](const FunctionDecl *Callee) {
@@ -2773,21 +2784,21 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
}
}
-void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
- const DeclarationNameInfo &DirName,
- Scope *CurScope, SourceLocation Loc) {
+void SemaOpenMP::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
+ const DeclarationNameInfo &DirName,
+ Scope *CurScope, SourceLocation Loc) {
DSAStack->push(DKind, DirName, CurScope, Loc);
- PushExpressionEvaluationContext(
- ExpressionEvaluationContext::PotentiallyEvaluated);
+ SemaRef.PushExpressionEvaluationContext(
+ Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
}
-void Sema::StartOpenMPClause(OpenMPClauseKind K) {
+void SemaOpenMP::StartOpenMPClause(OpenMPClauseKind K) {
DSAStack->setClauseParsingMode(K);
}
-void Sema::EndOpenMPClause() {
+void SemaOpenMP::EndOpenMPClause() {
DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
- CleanupVarDeclMarking();
+ SemaRef.CleanupVarDeclMarking();
}
static std::pair<ValueDecl *, bool>
@@ -2871,7 +2882,7 @@ static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
const DSAStackTy::DSAVarData &DVar,
bool IsLoopIterVar = false);
-void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
+void SemaOpenMP::EndOpenMPDSABlock(Stmt *CurDirective) {
// OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
// A variable of class type (or array thereof) that appears in a lastprivate
// clause requires an accessible, unambiguous default constructor for the
@@ -2898,15 +2909,15 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
// variable is not added to IdResolver, so the code in the OpenMP
// region uses original variable for proper diagnostics.
VarDecl *VDPrivate = buildVarDecl(
- *this, DE->getExprLoc(), Type.getUnqualifiedType(),
+ SemaRef, DE->getExprLoc(), Type.getUnqualifiedType(),
VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
- ActOnUninitializedDecl(VDPrivate);
+ SemaRef.ActOnUninitializedDecl(VDPrivate);
if (VDPrivate->isInvalidDecl()) {
PrivateCopies.push_back(nullptr);
continue;
}
PrivateCopies.push_back(buildDeclRefExpr(
- *this, VDPrivate, DE->getType(), DE->getExprLoc()));
+ SemaRef, VDPrivate, DE->getType(), DE->getExprLoc()));
} else {
// The variable is also a firstprivate, so initialization sequence
// for private copy is generated already.
@@ -2924,7 +2935,7 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second)
// It will be analyzed later.
PrivateRefs.push_back(RefExpr);
@@ -2977,7 +2988,7 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
diag::err_omp_allocator_used_in_clauses)
<< D.Allocator->getSourceRange();
if (DVar.RefExpr)
- reportOriginalDsa(*this, DSAStack, VD, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, VD, DVar);
else
Diag(MapExpr->getExprLoc(), diag::note_used_here)
<< MapExpr->getSourceRange();
@@ -2987,20 +2998,43 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
}
}
// Check allocate clauses.
- if (!CurContext->isDependentContext())
- checkAllocateClauses(*this, DSAStack, D->clauses());
- checkReductionClauses(*this, DSAStack, D->clauses());
+ if (!SemaRef.CurContext->isDependentContext())
+ checkAllocateClauses(SemaRef, DSAStack, D->clauses());
+ checkReductionClauses(SemaRef, DSAStack, D->clauses());
}
DSAStack->pop();
- DiscardCleanupsInEvaluationContext();
- PopExpressionEvaluationContext();
+ SemaRef.DiscardCleanupsInEvaluationContext();
+ SemaRef.PopExpressionEvaluationContext();
}
static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
Expr *NumIterations, Sema &SemaRef,
Scope *S, DSAStackTy *Stack);
+static bool finishLinearClauses(Sema &SemaRef, ArrayRef<OMPClause *> Clauses,
+ OMPLoopBasedDirective::HelperExprs &B,
+ DSAStackTy *Stack) {
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
+ "loop exprs were not built");
+
+ if (SemaRef.CurContext->isDependentContext())
+ return false;
+
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (OMPClause *C : Clauses) {
+ auto *LC = dyn_cast<OMPLinearClause>(C);
+ if (!LC)
+ continue;
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, SemaRef,
+ SemaRef.getCurScope(), Stack))
+ return true;
+ }
+
+ return false;
+}
+
namespace {
class VarDeclFilterCCC final : public CorrectionCandidateCallback {
@@ -3047,27 +3081,30 @@ public:
} // namespace
-ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
- CXXScopeSpec &ScopeSpec,
- const DeclarationNameInfo &Id,
- OpenMPDirectiveKind Kind) {
- LookupResult Lookup(*this, Id, LookupOrdinaryName);
- LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
+ExprResult SemaOpenMP::ActOnOpenMPIdExpression(Scope *CurScope,
+ CXXScopeSpec &ScopeSpec,
+ const DeclarationNameInfo &Id,
+ OpenMPDirectiveKind Kind) {
+ ASTContext &Context = getASTContext();
+ LookupResult Lookup(SemaRef, Id, Sema::LookupOrdinaryName);
+ SemaRef.LookupParsedName(Lookup, CurScope, &ScopeSpec,
+ /*ObjectType=*/QualType(),
+ /*AllowBuiltinCreation=*/true);
if (Lookup.isAmbiguous())
return ExprError();
VarDecl *VD;
if (!Lookup.isSingleResult()) {
- VarDeclFilterCCC CCC(*this);
+ VarDeclFilterCCC CCC(SemaRef);
if (TypoCorrection Corrected =
- CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
- CTK_ErrorRecovery)) {
- diagnoseTypo(Corrected,
- PDiag(Lookup.empty()
- ? diag::err_undeclared_var_use_suggest
- : diag::err_omp_expected_var_arg_suggest)
- << Id.getName());
+ SemaRef.CorrectTypo(Id, Sema::LookupOrdinaryName, CurScope, nullptr,
+ CCC, Sema::CTK_ErrorRecovery)) {
+ SemaRef.diagnoseTypo(Corrected,
+ PDiag(Lookup.empty()
+ ? diag::err_undeclared_var_use_suggest
+ : diag::err_omp_expected_var_arg_suggest)
+ << Id.getName());
VD = Corrected.getCorrectionDeclAs<VarDecl>();
} else {
Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use
@@ -3101,7 +3138,7 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
// A threadprivate directive for file-scope variables must appear outside
// any definition or declaration.
if (CanonicalVD->getDeclContext()->isTranslationUnit() &&
- !getCurLexicalContext()->isTranslationUnit()) {
+ !SemaRef.getCurLexicalContext()->isTranslationUnit()) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
@@ -3116,7 +3153,7 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
// in the class definition, in the same scope in which the member
// variables are declared.
if (CanonicalVD->isStaticDataMember() &&
- !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) {
+ !CanonicalVD->getDeclContext()->Equals(SemaRef.getCurLexicalContext())) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
@@ -3131,8 +3168,9 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
// outside any definition or declaration other than the namespace
// definition itself.
if (CanonicalVD->getDeclContext()->isNamespace() &&
- (!getCurLexicalContext()->isFileContext() ||
- !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) {
+ (!SemaRef.getCurLexicalContext()->isFileContext() ||
+ !SemaRef.getCurLexicalContext()->Encloses(
+ CanonicalVD->getDeclContext()))) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
@@ -3146,7 +3184,7 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
// A threadprivate directive for static block-scope variables must appear
// in the scope of the variable and not in a nested scope.
if (CanonicalVD->isLocalVarDecl() && CurScope &&
- !isDeclInScope(ND, getCurLexicalContext(), CurScope)) {
+ !SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(), CurScope)) {
Diag(Id.getLoc(), diag::err_omp_var_scope)
<< getOpenMPDirectiveName(Kind) << VD;
bool IsDecl =
@@ -3174,11 +3212,11 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
Id.getLoc(), ExprType, VK_LValue);
}
-Sema::DeclGroupPtrTy
-Sema::ActOnOpenMPThreadprivateDirective(SourceLocation Loc,
- ArrayRef<Expr *> VarList) {
+SemaOpenMP::DeclGroupPtrTy
+SemaOpenMP::ActOnOpenMPThreadprivateDirective(SourceLocation Loc,
+ ArrayRef<Expr *> VarList) {
if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) {
- CurContext->addDecl(D);
+ SemaRef.CurContext->addDecl(D);
return DeclGroupPtrTy::make(DeclGroupRef(D));
}
return nullptr;
@@ -3215,7 +3253,9 @@ public:
} // namespace
OMPThreadPrivateDecl *
-Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
+SemaOpenMP::CheckOMPThreadPrivateDecl(SourceLocation Loc,
+ ArrayRef<Expr *> VarList) {
+ ASTContext &Context = getASTContext();
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
auto *DE = cast<DeclRefExpr>(RefExpr);
@@ -3235,8 +3275,8 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
// OpenMP [2.9.2, Restrictions, C/C++, p.10]
// A threadprivate variable must not have an incomplete type.
- if (RequireCompleteType(ILoc, VD->getType(),
- diag::err_omp_threadprivate_incomplete_type)) {
+ if (SemaRef.RequireCompleteType(
+ ILoc, VD->getType(), diag::err_omp_threadprivate_incomplete_type)) {
continue;
}
@@ -3274,7 +3314,7 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
// Check if initial value of threadprivate variable reference variable with
// local storage (it is not supported by runtime).
if (const Expr *Init = VD->getAnyInitializer()) {
- LocalVarRefChecker Checker(*this);
+ LocalVarRefChecker Checker(SemaRef);
if (Checker.Visit(Init))
continue;
}
@@ -3288,8 +3328,8 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
}
OMPThreadPrivateDecl *D = nullptr;
if (!Vars.empty()) {
- D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc,
- Vars);
+ D = OMPThreadPrivateDecl::Create(Context, SemaRef.getCurLexicalContext(),
+ Loc, Vars);
D->setAccess(AS_public);
}
return D;
@@ -3395,10 +3435,9 @@ applyOMPAllocateAttribute(Sema &S, VarDecl *VD,
ML->DeclarationMarkedOpenMPAllocate(VD, A);
}
-Sema::DeclGroupPtrTy
-Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList,
- ArrayRef<OMPClause *> Clauses,
- DeclContext *Owner) {
+SemaOpenMP::DeclGroupPtrTy SemaOpenMP::ActOnOpenMPAllocateDirective(
+ SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses,
+ DeclContext *Owner) {
assert(Clauses.size() <= 2 && "Expected at most two clauses.");
Expr *Alignment = nullptr;
Expr *Allocator = nullptr;
@@ -3407,9 +3446,9 @@ Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList,
// allocate directives that appear in a target region must specify an
// allocator clause unless a requires directive with the dynamic_allocators
// clause is present in the same compilation unit.
- if (LangOpts.OpenMPIsTargetDevice &&
+ if (getLangOpts().OpenMPIsTargetDevice &&
!DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
- targetDiag(Loc, diag::err_expected_allocator_clause);
+ SemaRef.targetDiag(Loc, diag::err_expected_allocator_clause);
} else {
for (const OMPClause *C : Clauses)
if (const auto *AC = dyn_cast<OMPAllocatorClause>(C))
@@ -3420,7 +3459,7 @@ Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList,
llvm_unreachable("Unexpected clause on allocate directive");
}
OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
- getAllocatorKind(*this, DSAStack, Allocator);
+ getAllocatorKind(SemaRef, DSAStack, Allocator);
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
auto *DE = cast<DeclRefExpr>(RefExpr);
@@ -3435,7 +3474,7 @@ Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList,
// If the used several times in the allocate directive, the same allocator
// must be used.
- if (checkPreviousOMPAllocateAttribute(*this, DSAStack, RefExpr, VD,
+ if (checkPreviousOMPAllocateAttribute(SemaRef, DSAStack, RefExpr, VD,
AllocatorKind, Allocator))
continue;
@@ -3448,7 +3487,7 @@ Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList,
Diag(Allocator->getExprLoc(),
diag::err_omp_expected_predefined_allocator)
<< Allocator->getSourceRange();
- bool IsDecl = VD->isThisDeclarationADefinition(Context) ==
+ bool IsDecl = VD->isThisDeclarationADefinition(getASTContext()) ==
VarDecl::DeclarationOnly;
Diag(VD->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
@@ -3458,45 +3497,46 @@ Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList,
}
Vars.push_back(RefExpr);
- applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator, Alignment,
+ applyOMPAllocateAttribute(SemaRef, VD, AllocatorKind, Allocator, Alignment,
DE->getSourceRange());
}
if (Vars.empty())
return nullptr;
if (!Owner)
- Owner = getCurLexicalContext();
- auto *D = OMPAllocateDecl::Create(Context, Owner, Loc, Vars, Clauses);
+ Owner = SemaRef.getCurLexicalContext();
+ auto *D = OMPAllocateDecl::Create(getASTContext(), Owner, Loc, Vars, Clauses);
D->setAccess(AS_public);
Owner->addDecl(D);
return DeclGroupPtrTy::make(DeclGroupRef(D));
}
-Sema::DeclGroupPtrTy
-Sema::ActOnOpenMPRequiresDirective(SourceLocation Loc,
- ArrayRef<OMPClause *> ClauseList) {
+SemaOpenMP::DeclGroupPtrTy
+SemaOpenMP::ActOnOpenMPRequiresDirective(SourceLocation Loc,
+ ArrayRef<OMPClause *> ClauseList) {
OMPRequiresDecl *D = nullptr;
- if (!CurContext->isFileContext()) {
+ if (!SemaRef.CurContext->isFileContext()) {
Diag(Loc, diag::err_omp_invalid_scope) << "requires";
} else {
D = CheckOMPRequiresDecl(Loc, ClauseList);
if (D) {
- CurContext->addDecl(D);
+ SemaRef.CurContext->addDecl(D);
DSAStack->addRequiresDecl(D);
}
}
return DeclGroupPtrTy::make(DeclGroupRef(D));
}
-void Sema::ActOnOpenMPAssumesDirective(SourceLocation Loc,
- OpenMPDirectiveKind DKind,
- ArrayRef<std::string> Assumptions,
- bool SkippedClauses) {
+void SemaOpenMP::ActOnOpenMPAssumesDirective(SourceLocation Loc,
+ OpenMPDirectiveKind DKind,
+ ArrayRef<std::string> Assumptions,
+ bool SkippedClauses) {
if (!SkippedClauses && Assumptions.empty())
Diag(Loc, diag::err_omp_no_clause_for_directive)
<< llvm::omp::getAllAssumeClauseOptions()
<< llvm::omp::getOpenMPDirectiveName(DKind);
- auto *AA = AssumptionAttr::Create(Context, llvm::join(Assumptions, ","), Loc);
+ auto *AA =
+ OMPAssumeAttr::Create(getASTContext(), llvm::join(Assumptions, ","), Loc);
if (DKind == llvm::omp::Directive::OMPD_begin_assumes) {
OMPAssumeScoped.push_back(AA);
return;
@@ -3515,7 +3555,7 @@ void Sema::ActOnOpenMPAssumesDirective(SourceLocation Loc,
// declarations in included headers. To this end, we traverse all existing
// declaration contexts and annotate function declarations here.
SmallVector<DeclContext *, 8> DeclContexts;
- auto *Ctx = CurContext;
+ auto *Ctx = SemaRef.CurContext;
while (Ctx->getLexicalParent())
Ctx = Ctx->getLexicalParent();
DeclContexts.push_back(Ctx);
@@ -3539,13 +3579,14 @@ void Sema::ActOnOpenMPAssumesDirective(SourceLocation Loc,
}
}
-void Sema::ActOnOpenMPEndAssumesDirective() {
+void SemaOpenMP::ActOnOpenMPEndAssumesDirective() {
assert(isInOpenMPAssumeScope() && "Not in OpenMP assumes scope!");
OMPAssumeScoped.pop_back();
}
-OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc,
- ArrayRef<OMPClause *> ClauseList) {
+OMPRequiresDecl *
+SemaOpenMP::CheckOMPRequiresDecl(SourceLocation Loc,
+ ArrayRef<OMPClause *> ClauseList) {
/// For target specific clauses, the requires directive cannot be
/// specified after the handling of any of the target regions in the
/// current compilation unit.
@@ -3576,8 +3617,8 @@ OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc,
}
if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
- return OMPRequiresDecl::Create(Context, getCurLexicalContext(), Loc,
- ClauseList);
+ return OMPRequiresDecl::Create(
+ getASTContext(), SemaRef.getCurLexicalContext(), Loc, ClauseList);
return nullptr;
}
@@ -3695,7 +3736,7 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
llvm::SmallVector<Expr *, 4> ImplicitMap[DefaultmapKindNum][OMPC_MAP_delete];
llvm::SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
ImplicitMapModifier[DefaultmapKindNum];
- Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
+ SemaOpenMP::VarsWithInheritedDSAType VarsWithInheritedDSA;
llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
void VisitSubCaptures(OMPExecutableDirective *S) {
@@ -3742,7 +3783,8 @@ public:
void VisitDeclRefExpr(DeclRefExpr *E) {
if (TryCaptureCXXThisMembers || E->isTypeDependent() ||
E->isValueDependent() || E->containsUnexpandedParameterPack() ||
- E->isInstantiationDependent())
+ E->isInstantiationDependent() ||
+ E->isNonOdrUse() == clang::NOUR_Unevaluated)
return;
if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
// Check the datasharing rules for the expressions in the clauses.
@@ -3863,7 +3905,7 @@ public:
if (SemaRef.LangOpts.OpenMP >= 50)
return !StackComponents.empty();
// Variable is used if it has been marked as an array, array
- // section, array shaping or the variable iself.
+ // section, array shaping or the variable itself.
return StackComponents.size() == 1 ||
llvm::all_of(
llvm::drop_begin(llvm::reverse(StackComponents)),
@@ -3871,7 +3913,7 @@ public:
MappableComponent &MC) {
return MC.getAssociatedDeclaration() ==
nullptr &&
- (isa<OMPArraySectionExpr>(
+ (isa<ArraySectionExpr>(
MC.getAssociatedExpression()) ||
isa<OMPArrayShapingExpr>(
MC.getAssociatedExpression()) ||
@@ -4049,7 +4091,7 @@ public:
// Do both expressions have the same kind?
if (CCI->getAssociatedExpression()->getStmtClass() !=
SC.getAssociatedExpression()->getStmtClass())
- if (!((isa<OMPArraySectionExpr>(
+ if (!((isa<ArraySectionExpr>(
SC.getAssociatedExpression()) ||
isa<OMPArrayShapingExpr>(
SC.getAssociatedExpression())) &&
@@ -4161,7 +4203,7 @@ public:
getImplicitMapModifier(OpenMPDefaultmapClauseKind Kind) const {
return ImplicitMapModifier[Kind];
}
- const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
+ const SemaOpenMP::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
return VarsWithInheritedDSA;
}
@@ -4193,446 +4235,195 @@ static void handleDeclareVariantConstructTrait(DSAStackTy *Stack,
Stack->handleConstructTrait(Traits, ScopeEntry);
}
-void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
- switch (DKind) {
- case OMPD_parallel:
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_parallel_sections:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_parallel_loop:
- case OMPD_teams:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- Sema::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params);
- break;
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getParallelRegionParams(Sema &SemaRef, bool LoopBoundSharing) {
+ ASTContext &Context = SemaRef.getASTContext();
+ QualType KmpInt32Ty =
+ Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1).withConst();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ SmallVector<SemaOpenMP::CapturedParamNameType> Params{
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
+ };
+ if (LoopBoundSharing) {
+ QualType KmpSizeTy = Context.getSizeType().withConst();
+ Params.push_back(std::make_pair(".previous.lb.", KmpSizeTy));
+ Params.push_back(std::make_pair(".previous.ub.", KmpSizeTy));
}
- case OMPD_target_teams:
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_parallel_loop:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
+
+ // __context with shared vars
+ Params.push_back(std::make_pair(StringRef(), QualType()));
+ return Params;
+}
+
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getTeamsRegionParams(Sema &SemaRef) {
+ return getParallelRegionParams(SemaRef, /*LoopBoundSharing=*/false);
+}
+
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getTaskRegionParams(Sema &SemaRef) {
+ ASTContext &Context = SemaRef.getASTContext();
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
+ QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ QualType Args[] = {VoidPtrTy};
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = true;
+ QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
+ SmallVector<SemaOpenMP::CapturedParamNameType> Params{
+ std::make_pair(".global_tid.", KmpInt32Ty),
+ std::make_pair(".part_id.", KmpInt32PtrTy),
+ std::make_pair(".privates.", VoidPtrTy),
+ std::make_pair(
+ ".copy_fn.",
+ Context.getPointerType(CopyFnType).withConst().withRestrict()),
+ std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ return Params;
+}
+
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getTargetRegionParams(Sema &SemaRef) {
+ ASTContext &Context = SemaRef.getASTContext();
+ SmallVector<SemaOpenMP::CapturedParamNameType> Params;
+ if (SemaRef.getLangOpts().OpenMPIsTargetDevice) {
QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- Sema::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params, /*OpenMPCaptureLevel=*/0);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- SmallVector<Sema::CapturedParamNameType, 2> ParamsTarget;
- if (getLangOpts().OpenMPIsTargetDevice)
- ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
- ParamsTarget.push_back(
- std::make_pair(StringRef(), QualType())); // __context with shared vars;
- // Start a captured region for 'target' with no implicit parameters.
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsTarget,
- /*OpenMPCaptureLevel=*/1);
- Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'teams' or 'parallel'. Both regions have
- // the same implicit parameters.
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsTeamsOrParallel, /*OpenMPCaptureLevel=*/2);
- break;
+ Params.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
}
- case OMPD_target:
- case OMPD_target_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- Sema::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params, /*OpenMPCaptureLevel=*/0);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- SmallVector<Sema::CapturedParamNameType, 2> ParamsTarget;
- if (getLangOpts().OpenMPIsTargetDevice)
- ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
- ParamsTarget.push_back(
- std::make_pair(StringRef(), QualType())); // __context with shared vars;
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsTarget,
- /*OpenMPCaptureLevel=*/1);
- break;
+ // __context with shared vars
+ Params.push_back(std::make_pair(StringRef(), QualType()));
+ return Params;
+}
+
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getUnknownRegionParams(Sema &SemaRef) {
+ SmallVector<SemaOpenMP::CapturedParamNameType> Params{
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ return Params;
+}
+
+static SmallVector<SemaOpenMP::CapturedParamNameType>
+getTaskloopRegionParams(Sema &SemaRef) {
+ ASTContext &Context = SemaRef.getASTContext();
+ QualType KmpInt32Ty =
+ Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1).withConst();
+ QualType KmpUInt64Ty =
+ Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0).withConst();
+ QualType KmpInt64Ty =
+ Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1).withConst();
+ QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ QualType Args[] = {VoidPtrTy};
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = true;
+ QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
+ SmallVector<SemaOpenMP::CapturedParamNameType> Params{
+ std::make_pair(".global_tid.", KmpInt32Ty),
+ std::make_pair(".part_id.", KmpInt32PtrTy),
+ std::make_pair(".privates.", VoidPtrTy),
+ std::make_pair(
+ ".copy_fn.",
+ Context.getPointerType(CopyFnType).withConst().withRestrict()),
+ std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
+ std::make_pair(".lb.", KmpUInt64Ty),
+ std::make_pair(".ub.", KmpUInt64Ty),
+ std::make_pair(".st.", KmpInt64Ty),
+ std::make_pair(".liter.", KmpInt32Ty),
+ std::make_pair(".reductions.", VoidPtrTy),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ return Params;
+}
+
+static void processCapturedRegions(Sema &SemaRef, OpenMPDirectiveKind DKind,
+ Scope *CurScope, SourceLocation Loc) {
+ SmallVector<OpenMPDirectiveKind> Regions;
+ getOpenMPCaptureRegions(Regions, DKind);
+
+ bool LoopBoundSharing = isOpenMPLoopBoundSharingDirective(DKind);
+
+ auto MarkAsInlined = [&](CapturedRegionScopeInfo *CSI) {
+ CSI->TheCapturedDecl->addAttr(AlwaysInlineAttr::CreateImplicit(
+ SemaRef.getASTContext(), {}, AlwaysInlineAttr::Keyword_forceinline));
+ };
+
+ for (auto [Level, RKind] : llvm::enumerate(Regions)) {
+ switch (RKind) {
+ // All region kinds that can be returned from `getOpenMPCaptureRegions`
+ // are listed here.
+ case OMPD_parallel:
+ SemaRef.ActOnCapturedRegionStart(
+ Loc, CurScope, CR_OpenMP,
+ getParallelRegionParams(SemaRef, LoopBoundSharing), Level);
+ break;
+ case OMPD_teams:
+ SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
+ getTeamsRegionParams(SemaRef), Level);
+ break;
+ case OMPD_task:
+ SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
+ getTaskRegionParams(SemaRef), Level);
+ // Mark this captured region as inlined, because we don't use outlined
+ // function directly.
+ MarkAsInlined(SemaRef.getCurCapturedRegion());
+ break;
+ case OMPD_taskloop:
+ SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
+ getTaskloopRegionParams(SemaRef), Level);
+ // Mark this captured region as inlined, because we don't use outlined
+ // function directly.
+ MarkAsInlined(SemaRef.getCurCapturedRegion());
+ break;
+ case OMPD_target:
+ SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
+ getTargetRegionParams(SemaRef), Level);
+ break;
+ case OMPD_unknown:
+ SemaRef.ActOnCapturedRegionStart(Loc, CurScope, CR_OpenMP,
+ getUnknownRegionParams(SemaRef));
+ break;
+ case OMPD_metadirective:
+ case OMPD_nothing:
+ default:
+ llvm_unreachable("Unexpected capture region");
+ }
}
+}
+
+void SemaOpenMP::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind,
+ Scope *CurScope) {
+ switch (DKind) {
case OMPD_atomic:
case OMPD_critical:
- case OMPD_section:
- case OMPD_master:
case OMPD_masked:
+ case OMPD_master:
+ case OMPD_section:
case OMPD_tile:
case OMPD_unroll:
+ case OMPD_reverse:
+ case OMPD_interchange:
break;
- case OMPD_loop:
- // TODO: 'loop' may require additional parameters depending on the binding.
- // Treat similar to OMPD_simd/OMPD_for for now.
- case OMPD_simd:
- case OMPD_for:
- case OMPD_for_simd:
- case OMPD_sections:
- case OMPD_single:
- case OMPD_taskgroup:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_ordered:
- case OMPD_scope:
- case OMPD_target_data:
- case OMPD_dispatch: {
- Sema::CapturedParamNameType Params[] = {
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params);
- break;
- }
- case OMPD_task: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- Sema::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- break;
- }
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_masked_taskloop:
- case OMPD_masked_taskloop_simd:
- case OMPD_master_taskloop_simd: {
- QualType KmpInt32Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
- .withConst();
- QualType KmpUInt64Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
- .withConst();
- QualType KmpInt64Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
- .withConst();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- Sema::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(".lb.", KmpUInt64Ty),
- std::make_pair(".ub.", KmpUInt64Ty),
- std::make_pair(".st.", KmpInt64Ty),
- std::make_pair(".liter.", KmpInt32Ty),
- std::make_pair(".reductions.", VoidPtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- break;
- }
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_masked_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd: {
- QualType KmpInt32Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
- .withConst();
- QualType KmpUInt64Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
- .withConst();
- QualType KmpInt64Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
- .withConst();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- Sema::CapturedParamNameType ParamsParallel[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'parallel'.
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsParallel, /*OpenMPCaptureLevel=*/0);
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- Sema::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(".lb.", KmpUInt64Ty),
- std::make_pair(".ub.", KmpUInt64Ty),
- std::make_pair(".st.", KmpInt64Ty),
- std::make_pair(".liter.", KmpInt32Ty),
- std::make_pair(".reductions.", VoidPtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params, /*OpenMPCaptureLevel=*/1);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- break;
- }
- case OMPD_distribute_parallel_for_simd:
- case OMPD_distribute_parallel_for: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- Sema::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
- std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params);
- break;
- }
- case OMPD_target_teams_loop:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
-
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- Sema::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params, /*OpenMPCaptureLevel=*/0);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- SmallVector<Sema::CapturedParamNameType, 2> ParamsTarget;
- if (getLangOpts().OpenMPIsTargetDevice)
- ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
- ParamsTarget.push_back(
- std::make_pair(StringRef(), QualType())); // __context with shared vars;
- // Start a captured region for 'target' with no implicit parameters.
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsTarget, /*OpenMPCaptureLevel=*/1);
-
- Sema::CapturedParamNameType ParamsTeams[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'target' with no implicit parameters.
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsTeams, /*OpenMPCaptureLevel=*/2);
-
- Sema::CapturedParamNameType ParamsParallel[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
- std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'teams' or 'parallel'. Both regions have
- // the same implicit parameters.
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsParallel, /*OpenMPCaptureLevel=*/3);
+ default:
+ processCapturedRegions(SemaRef, DKind, CurScope,
+ DSAStack->getConstructLoc());
break;
}
- case OMPD_teams_loop:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
-
- Sema::CapturedParamNameType ParamsTeams[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'target' with no implicit parameters.
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsTeams, /*OpenMPCaptureLevel=*/0);
-
- Sema::CapturedParamNameType ParamsParallel[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
- std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'teams' or 'parallel'. Both regions have
- // the same implicit parameters.
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsParallel, /*OpenMPCaptureLevel=*/1);
- break;
- }
- case OMPD_target_update:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
- QualType Args[] = {VoidPtrTy};
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = true;
- QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
- Sema::CapturedParamNameType Params[] = {
- std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", KmpInt32PtrTy),
- std::make_pair(".privates.", VoidPtrTy),
- std::make_pair(
- ".copy_fn.",
- Context.getPointerType(CopyFnType).withConst().withRestrict()),
- std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params);
- // Mark this captured region as inlined, because we don't use outlined
- // function directly.
- getCurCapturedRegion()->TheCapturedDecl->addAttr(
- AlwaysInlineAttr::CreateImplicit(
- Context, {}, AlwaysInlineAttr::Keyword_forceinline));
- break;
- }
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_taskyield:
- case OMPD_error:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_cancel:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_declare_simd:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_requires:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_metadirective:
- llvm_unreachable("OpenMP Directive is not allowed");
- case OMPD_unknown:
- default:
- llvm_unreachable("Unknown OpenMP directive");
- }
- DSAStack->setContext(CurContext);
- handleDeclareVariantConstructTrait(DSAStack, DKind, /* ScopeEntry */ true);
+ DSAStack->setContext(SemaRef.CurContext);
+ handleDeclareVariantConstructTrait(DSAStack, DKind, /*ScopeEntry=*/true);
}
-int Sema::getNumberOfConstructScopes(unsigned Level) const {
+int SemaOpenMP::getNumberOfConstructScopes(unsigned Level) const {
return getOpenMPCaptureLevels(DSAStack->getDirective(Level));
}
-int Sema::getOpenMPCaptureLevels(OpenMPDirectiveKind DKind) {
+int SemaOpenMP::getOpenMPCaptureLevels(OpenMPDirectiveKind DKind) {
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, DKind);
return CaptureRegions.size();
@@ -4672,7 +4463,7 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
bool WithInit) {
OMPCapturedExprDecl *CD;
- if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
+ if (VarDecl *VD = S.OpenMP().isOpenMPCapturedDecl(D))
CD = cast<OMPCapturedExprDecl>(VD);
else
CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
@@ -4724,7 +4515,7 @@ public:
: S(S), ErrorFound(ErrorFound), DKind(DKind) {}
~CaptureRegionUnwinderRAII() {
if (ErrorFound) {
- int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind);
+ int ThisCaptureLevel = S.OpenMP().getOpenMPCaptureLevels(DKind);
while (--ThisCaptureLevel >= 0)
S.ActOnCapturedRegionError();
}
@@ -4732,10 +4523,10 @@ public:
};
} // namespace
-void Sema::tryCaptureOpenMPLambdas(ValueDecl *V) {
+void SemaOpenMP::tryCaptureOpenMPLambdas(ValueDecl *V) {
// Capture variables captured by reference in lambdas for target-based
// directives.
- if (!CurContext->isDependentContext() &&
+ if (!SemaRef.CurContext->isDependentContext() &&
(isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) ||
isOpenMPTargetDataManagementDirective(
DSAStack->getCurrentDirective()))) {
@@ -4755,14 +4546,14 @@ void Sema::tryCaptureOpenMPLambdas(ValueDecl *V) {
if (LC.getCaptureKind() == LCK_ByRef) {
VarDecl *VD = cast<VarDecl>(LC.getCapturedVar());
DeclContext *VDC = VD->getDeclContext();
- if (!VDC->Encloses(CurContext))
+ if (!VDC->Encloses(SemaRef.CurContext))
continue;
- MarkVariableReferenced(LC.getLocation(), VD);
+ SemaRef.MarkVariableReferenced(LC.getLocation(), VD);
} else if (LC.getCaptureKind() == LCK_This) {
- QualType ThisTy = getCurrentThisType();
- if (!ThisTy.isNull() &&
- Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
- CheckCXXThisCapture(LC.getLocation());
+ QualType ThisTy = SemaRef.getCurrentThisType();
+ if (!ThisTy.isNull() && getASTContext().typesAreCompatible(
+ ThisTy, ThisCapture->getType()))
+ SemaRef.CheckCXXThisCapture(LC.getLocation());
}
}
}
@@ -4802,20 +4593,16 @@ static bool checkOrderedOrderSpecified(Sema &S,
return false;
}
-StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
- ArrayRef<OMPClause *> Clauses) {
+StmtResult SemaOpenMP::ActOnOpenMPRegionEnd(StmtResult S,
+ ArrayRef<OMPClause *> Clauses) {
handleDeclareVariantConstructTrait(DSAStack, DSAStack->getCurrentDirective(),
/* ScopeEntry */ false);
- if (DSAStack->getCurrentDirective() == OMPD_atomic ||
- DSAStack->getCurrentDirective() == OMPD_critical ||
- DSAStack->getCurrentDirective() == OMPD_section ||
- DSAStack->getCurrentDirective() == OMPD_master ||
- DSAStack->getCurrentDirective() == OMPD_masked)
+ if (!isOpenMPCapturingDirective(DSAStack->getCurrentDirective()))
return S;
bool ErrorFound = false;
CaptureRegionUnwinderRAII CaptureRegionUnwinder(
- *this, ErrorFound, DSAStack->getCurrentDirective());
+ SemaRef, ErrorFound, DSAStack->getCurrentDirective());
if (!S.isUsable()) {
ErrorFound = true;
return StmtError();
@@ -4829,7 +4616,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
SmallVector<const OMPClauseWithPreInit *, 4> PICs;
// This is required for proper codegen.
for (OMPClause *Clause : Clauses) {
- if (!LangOpts.OpenMPSimd &&
+ if (!getLangOpts().OpenMPSimd &&
(isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) ||
DSAStack->getCurrentDirective() == OMPD_target) &&
Clause->getClauseKind() == OMPC_in_reduction) {
@@ -4838,7 +4625,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
auto *IRC = cast<OMPInReductionClause>(Clause);
for (Expr *E : IRC->taskgroup_descriptors())
if (E)
- MarkDeclarationsReferencedInExpr(E);
+ SemaRef.MarkDeclarationsReferencedInExpr(E);
}
if (isOpenMPPrivate(Clause->getClauseKind()) ||
Clause->getClauseKind() == OMPC_copyprivate ||
@@ -4849,21 +4636,17 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
// Mark all variables in private list clauses as used in inner region.
for (Stmt *VarRef : Clause->children()) {
if (auto *E = cast_or_null<Expr>(VarRef)) {
- MarkDeclarationsReferencedInExpr(E);
+ SemaRef.MarkDeclarationsReferencedInExpr(E);
}
}
DSAStack->setForceVarCapturing(/*V=*/false);
- } else if (isOpenMPLoopTransformationDirective(
- DSAStack->getCurrentDirective())) {
- assert(CaptureRegions.empty() &&
- "No captured regions in loop transformation directives.");
} else if (CaptureRegions.size() > 1 ||
CaptureRegions.back() != OMPD_unknown) {
if (auto *C = OMPClauseWithPreInit::get(Clause))
PICs.push_back(C);
if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
if (Expr *E = C->getPostUpdateExpr())
- MarkDeclarationsReferencedInExpr(E);
+ SemaRef.MarkDeclarationsReferencedInExpr(E);
}
}
if (Clause->getClauseKind() == OMPC_schedule)
@@ -4875,7 +4658,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
}
// Capture allocator expressions if used.
for (Expr *E : DSAStack->getInnerAllocators())
- MarkDeclarationsReferencedInExpr(E);
+ SemaRef.MarkDeclarationsReferencedInExpr(E);
// OpenMP, 2.7.1 Loop Construct, Restrictions
// The nonmonotonic modifier cannot be specified if an ordered clause is
// specified.
@@ -4897,7 +4680,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
// OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Restrictions.
// If an order(concurrent) clause is present, an ordered clause may not appear
// on the same directive.
- if (checkOrderedOrderSpecified(*this, Clauses))
+ if (checkOrderedOrderSpecified(SemaRef, Clauses))
ErrorFound = true;
if (!LCs.empty() && OC && OC->getNumForLoops()) {
for (const OMPLinearClause *C : LCs) {
@@ -4934,7 +4717,8 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
CaptureRegion == OMPD_unknown) {
if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
for (Decl *D : DS->decls())
- MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
+ SemaRef.MarkVariableReferenced(D->getLocation(),
+ cast<VarDecl>(D));
}
}
}
@@ -4948,7 +4732,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
++I) {
OMPUsesAllocatorsClause::Data D = UAC->getAllocatorData(I);
if (Expr *E = D.AllocatorTraits)
- MarkDeclarationsReferencedInExpr(E);
+ SemaRef.MarkDeclarationsReferencedInExpr(E);
}
continue;
}
@@ -4962,17 +4746,18 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
if (RC->getModifier() != OMPC_REDUCTION_inscan)
continue;
for (Expr *E : RC->copy_array_temps())
- MarkDeclarationsReferencedInExpr(E);
+ if (E)
+ SemaRef.MarkDeclarationsReferencedInExpr(E);
}
if (auto *AC = dyn_cast<OMPAlignedClause>(C)) {
for (Expr *E : AC->varlists())
- MarkDeclarationsReferencedInExpr(E);
+ SemaRef.MarkDeclarationsReferencedInExpr(E);
}
}
}
if (++CompletedRegions == CaptureRegions.size())
DSAStack->setBodyComplete();
- SR = ActOnCapturedRegionEnd(SR.get());
+ SR = SemaRef.ActOnCapturedRegionEnd(SR.get());
}
return SR;
}
@@ -4999,295 +4784,284 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
OpenMPDirectiveKind CancelRegion,
OpenMPBindClauseKind BindKind,
SourceLocation StartLoc) {
- if (Stack->getCurScope()) {
- OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
- OpenMPDirectiveKind OffendingRegion = ParentRegion;
- bool NestingProhibited = false;
- bool CloseNesting = true;
- bool OrphanSeen = false;
- enum {
- NoRecommend,
- ShouldBeInParallelRegion,
- ShouldBeInOrderedRegion,
- ShouldBeInTargetRegion,
- ShouldBeInTeamsRegion,
- ShouldBeInLoopSimdRegion,
- } Recommend = NoRecommend;
- if (SemaRef.LangOpts.OpenMP >= 51 && Stack->isParentOrderConcurrent() &&
- CurrentRegion != OMPD_simd && CurrentRegion != OMPD_loop &&
- CurrentRegion != OMPD_parallel &&
- !isOpenMPCombinedParallelADirective(CurrentRegion)) {
- SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_order)
- << getOpenMPDirectiveName(CurrentRegion);
+ if (!Stack->getCurScope())
+ return false;
+
+ OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
+ OpenMPDirectiveKind OffendingRegion = ParentRegion;
+ bool NestingProhibited = false;
+ bool CloseNesting = true;
+ bool OrphanSeen = false;
+ enum {
+ NoRecommend,
+ ShouldBeInParallelRegion,
+ ShouldBeInOrderedRegion,
+ ShouldBeInTargetRegion,
+ ShouldBeInTeamsRegion,
+ ShouldBeInLoopSimdRegion,
+ } Recommend = NoRecommend;
+
+ SmallVector<OpenMPDirectiveKind, 4> LeafOrComposite;
+ ArrayRef<OpenMPDirectiveKind> ParentLOC =
+ getLeafOrCompositeConstructs(ParentRegion, LeafOrComposite);
+ OpenMPDirectiveKind EnclosingConstruct = ParentLOC.back();
+
+ if (SemaRef.LangOpts.OpenMP >= 51 && Stack->isParentOrderConcurrent() &&
+ CurrentRegion != OMPD_simd && CurrentRegion != OMPD_loop &&
+ CurrentRegion != OMPD_parallel &&
+ !isOpenMPCombinedParallelADirective(CurrentRegion)) {
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_order)
+ << getOpenMPDirectiveName(CurrentRegion);
+ return true;
+ }
+ if (isOpenMPSimdDirective(ParentRegion) &&
+ ((SemaRef.LangOpts.OpenMP <= 45 && CurrentRegion != OMPD_ordered) ||
+ (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion != OMPD_ordered &&
+ CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic &&
+ CurrentRegion != OMPD_scan))) {
+ // OpenMP [2.16, Nesting of Regions]
+ // OpenMP constructs may not be nested inside a simd region.
+ // OpenMP [2.8.1,simd Construct, Restrictions]
+ // An ordered construct with the simd clause is the only OpenMP
+ // construct that can appear in the simd region.
+ // Allowing a SIMD construct nested in another SIMD construct is an
+ // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
+ // message.
+ // OpenMP 5.0 [2.9.3.1, simd Construct, Restrictions]
+ // The only OpenMP constructs that can be encountered during execution of
+ // a simd region are the atomic construct, the loop construct, the simd
+ // construct and the ordered construct with the simd clause.
+ SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
+ ? diag::err_omp_prohibited_region_simd
+ : diag::warn_omp_nesting_simd)
+ << (SemaRef.LangOpts.OpenMP >= 50 ? 1 : 0);
+ return CurrentRegion != OMPD_simd;
+ }
+ if (EnclosingConstruct == OMPD_atomic) {
+ // OpenMP [2.16, Nesting of Regions]
+ // OpenMP constructs may not be nested inside an atomic region.
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
+ return true;
+ }
+ if (CurrentRegion == OMPD_section) {
+ // OpenMP [2.7.2, sections Construct, Restrictions]
+ // Orphaned section directives are prohibited. That is, the section
+ // directives must appear within the sections construct and must not be
+ // encountered elsewhere in the sections region.
+ if (EnclosingConstruct != OMPD_sections) {
+ SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
+ << (ParentRegion != OMPD_unknown)
+ << getOpenMPDirectiveName(ParentRegion);
return true;
}
- if (isOpenMPSimdDirective(ParentRegion) &&
- ((SemaRef.LangOpts.OpenMP <= 45 && CurrentRegion != OMPD_ordered) ||
- (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion != OMPD_ordered &&
- CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic &&
- CurrentRegion != OMPD_scan))) {
- // OpenMP [2.16, Nesting of Regions]
- // OpenMP constructs may not be nested inside a simd region.
- // OpenMP [2.8.1,simd Construct, Restrictions]
- // An ordered construct with the simd clause is the only OpenMP
- // construct that can appear in the simd region.
- // Allowing a SIMD construct nested in another SIMD construct is an
- // extension. The OpenMP 4.5 spec does not allow it. Issue a warning
- // message.
- // OpenMP 5.0 [2.9.3.1, simd Construct, Restrictions]
- // The only OpenMP constructs that can be encountered during execution of
- // a simd region are the atomic construct, the loop construct, the simd
- // construct and the ordered construct with the simd clause.
- SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
- ? diag::err_omp_prohibited_region_simd
- : diag::warn_omp_nesting_simd)
- << (SemaRef.LangOpts.OpenMP >= 50 ? 1 : 0);
- return CurrentRegion != OMPD_simd;
- }
- if (ParentRegion == OMPD_atomic) {
- // OpenMP [2.16, Nesting of Regions]
- // OpenMP constructs may not be nested inside an atomic region.
- SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic);
+ return false;
+ }
+ // Allow some constructs (except teams and cancellation constructs) to be
+ // orphaned (they could be used in functions, called from OpenMP regions
+ // with the required preconditions).
+ if (ParentRegion == OMPD_unknown &&
+ !isOpenMPNestingTeamsDirective(CurrentRegion) &&
+ CurrentRegion != OMPD_cancellation_point &&
+ CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
+ return false;
+ // Checks needed for mapping "loop" construct. Please check mapLoopConstruct
+ // for a detailed explanation
+ if (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion == OMPD_loop &&
+ (BindKind == OMPC_BIND_parallel || BindKind == OMPC_BIND_teams) &&
+ (isOpenMPWorksharingDirective(ParentRegion) ||
+ EnclosingConstruct == OMPD_loop)) {
+ int ErrorMsgNumber = (BindKind == OMPC_BIND_parallel) ? 1 : 4;
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
+ << true << getOpenMPDirectiveName(ParentRegion) << ErrorMsgNumber
+ << getOpenMPDirectiveName(CurrentRegion);
+ return true;
+ }
+ if (CurrentRegion == OMPD_cancellation_point ||
+ CurrentRegion == OMPD_cancel) {
+ // OpenMP [2.16, Nesting of Regions]
+ // A cancellation point construct for which construct-type-clause is
+ // taskgroup must be nested inside a task construct. A cancellation
+ // point construct for which construct-type-clause is not taskgroup must
+ // be closely nested inside an OpenMP construct that matches the type
+ // specified in construct-type-clause.
+ // A cancel construct for which construct-type-clause is taskgroup must be
+ // nested inside a task construct. A cancel construct for which
+ // construct-type-clause is not taskgroup must be closely nested inside an
+ // OpenMP construct that matches the type specified in
+ // construct-type-clause.
+ ArrayRef<OpenMPDirectiveKind> Leafs = getLeafConstructsOrSelf(ParentRegion);
+ if (CancelRegion == OMPD_taskgroup) {
+ NestingProhibited = EnclosingConstruct != OMPD_task &&
+ (SemaRef.getLangOpts().OpenMP < 50 ||
+ EnclosingConstruct != OMPD_taskloop);
+ } else if (CancelRegion == OMPD_sections) {
+ NestingProhibited = EnclosingConstruct != OMPD_section &&
+ EnclosingConstruct != OMPD_sections;
+ } else {
+ NestingProhibited = CancelRegion != Leafs.back();
+ }
+ OrphanSeen = ParentRegion == OMPD_unknown;
+ } else if (CurrentRegion == OMPD_master || CurrentRegion == OMPD_masked) {
+ // OpenMP 5.1 [2.22, Nesting of Regions]
+ // A masked region may not be closely nested inside a worksharing, loop,
+ // atomic, task, or taskloop region.
+ NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
+ isOpenMPGenericLoopDirective(ParentRegion) ||
+ isOpenMPTaskingDirective(ParentRegion);
+ } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
+ // OpenMP [2.16, Nesting of Regions]
+ // A critical region may not be nested (closely or otherwise) inside a
+ // critical region with the same name. Note that this restriction is not
+ // sufficient to prevent deadlock.
+ SourceLocation PreviousCriticalLoc;
+ bool DeadLock = Stack->hasDirective(
+ [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
+ const DeclarationNameInfo &DNI,
+ SourceLocation Loc) {
+ if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
+ PreviousCriticalLoc = Loc;
+ return true;
+ }
+ return false;
+ },
+ false /* skip top directive */);
+ if (DeadLock) {
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_critical_same_name)
+ << CurrentName.getName();
+ if (PreviousCriticalLoc.isValid())
+ SemaRef.Diag(PreviousCriticalLoc,
+ diag::note_omp_previous_critical_region);
return true;
}
- if (CurrentRegion == OMPD_section) {
- // OpenMP [2.7.2, sections Construct, Restrictions]
- // Orphaned section directives are prohibited. That is, the section
- // directives must appear within the sections construct and must not be
- // encountered elsewhere in the sections region.
- if (ParentRegion != OMPD_sections &&
- ParentRegion != OMPD_parallel_sections) {
- SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive)
- << (ParentRegion != OMPD_unknown)
- << getOpenMPDirectiveName(ParentRegion);
- return true;
- }
- return false;
- }
- // Allow some constructs (except teams and cancellation constructs) to be
- // orphaned (they could be used in functions, called from OpenMP regions
- // with the required preconditions).
- if (ParentRegion == OMPD_unknown &&
- !isOpenMPNestingTeamsDirective(CurrentRegion) &&
- CurrentRegion != OMPD_cancellation_point &&
- CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
- return false;
- // Checks needed for mapping "loop" construct. Please check mapLoopConstruct
- // for a detailed explanation
- if (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion == OMPD_loop &&
- (BindKind == OMPC_BIND_parallel || BindKind == OMPC_BIND_teams) &&
- (isOpenMPWorksharingDirective(ParentRegion) ||
- ParentRegion == OMPD_loop)) {
- int ErrorMsgNumber = (BindKind == OMPC_BIND_parallel) ? 1 : 4;
+ } else if (CurrentRegion == OMPD_barrier || CurrentRegion == OMPD_scope) {
+ // OpenMP 5.1 [2.22, Nesting of Regions]
+ // A scope region may not be closely nested inside a worksharing, loop,
+ // task, taskloop, critical, ordered, atomic, or masked region.
+ // OpenMP 5.1 [2.22, Nesting of Regions]
+ // A barrier region may not be closely nested inside a worksharing, loop,
+ // task, taskloop, critical, ordered, atomic, or masked region.
+ NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
+ isOpenMPGenericLoopDirective(ParentRegion) ||
+ isOpenMPTaskingDirective(ParentRegion) ||
+ llvm::is_contained({OMPD_masked, OMPD_master,
+ OMPD_critical, OMPD_ordered},
+ EnclosingConstruct);
+ } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
+ !isOpenMPParallelDirective(CurrentRegion) &&
+ !isOpenMPTeamsDirective(CurrentRegion)) {
+ // OpenMP 5.1 [2.22, Nesting of Regions]
+ // A loop region that binds to a parallel region or a worksharing region
+ // may not be closely nested inside a worksharing, loop, task, taskloop,
+ // critical, ordered, atomic, or masked region.
+ NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
+ isOpenMPGenericLoopDirective(ParentRegion) ||
+ isOpenMPTaskingDirective(ParentRegion) ||
+ llvm::is_contained({OMPD_masked, OMPD_master,
+ OMPD_critical, OMPD_ordered},
+ EnclosingConstruct);
+ Recommend = ShouldBeInParallelRegion;
+ } else if (CurrentRegion == OMPD_ordered) {
+ // OpenMP [2.16, Nesting of Regions]
+ // An ordered region may not be closely nested inside a critical,
+ // atomic, or explicit task region.
+ // An ordered region must be closely nested inside a loop region (or
+ // parallel loop region) with an ordered clause.
+ // OpenMP [2.8.1,simd Construct, Restrictions]
+ // An ordered construct with the simd clause is the only OpenMP construct
+ // that can appear in the simd region.
+ NestingProhibited = EnclosingConstruct == OMPD_critical ||
+ isOpenMPTaskingDirective(ParentRegion) ||
+ !(isOpenMPSimdDirective(ParentRegion) ||
+ Stack->isParentOrderedRegion());
+ Recommend = ShouldBeInOrderedRegion;
+ } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
+ // OpenMP [2.16, Nesting of Regions]
+ // If specified, a teams construct must be contained within a target
+ // construct.
+ NestingProhibited =
+ (SemaRef.LangOpts.OpenMP <= 45 && EnclosingConstruct != OMPD_target) ||
+ (SemaRef.LangOpts.OpenMP >= 50 && EnclosingConstruct != OMPD_unknown &&
+ EnclosingConstruct != OMPD_target);
+ OrphanSeen = ParentRegion == OMPD_unknown;
+ Recommend = ShouldBeInTargetRegion;
+ } else if (CurrentRegion == OMPD_scan) {
+ if (SemaRef.LangOpts.OpenMP >= 50) {
+ // OpenMP spec 5.0 and 5.1 require scan to be directly enclosed by for,
+ // simd, or for simd. This has to take into account combined directives.
+ // In 5.2 this seems to be implied by the fact that the specified
+ // separated constructs are do, for, and simd.
+ NestingProhibited = !llvm::is_contained(
+ {OMPD_for, OMPD_simd, OMPD_for_simd}, EnclosingConstruct);
+ } else {
+ NestingProhibited = true;
+ }
+ OrphanSeen = ParentRegion == OMPD_unknown;
+ Recommend = ShouldBeInLoopSimdRegion;
+ }
+ if (!NestingProhibited && !isOpenMPTargetExecutionDirective(CurrentRegion) &&
+ !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
+ EnclosingConstruct == OMPD_teams) {
+ // OpenMP [5.1, 2.22, Nesting of Regions]
+ // distribute, distribute simd, distribute parallel worksharing-loop,
+ // distribute parallel worksharing-loop SIMD, loop, parallel regions,
+ // including any parallel regions arising from combined constructs,
+ // omp_get_num_teams() regions, and omp_get_team_num() regions are the
+ // only OpenMP regions that may be strictly nested inside the teams
+ // region.
+ //
+ // As an extension, we permit atomic within teams as well.
+ NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
+ !isOpenMPDistributeDirective(CurrentRegion) &&
+ CurrentRegion != OMPD_loop &&
+ !(SemaRef.getLangOpts().OpenMPExtensions &&
+ CurrentRegion == OMPD_atomic);
+ Recommend = ShouldBeInParallelRegion;
+ }
+ if (!NestingProhibited && CurrentRegion == OMPD_loop) {
+ // OpenMP [5.1, 2.11.7, loop Construct, Restrictions]
+ // If the bind clause is present on the loop construct and binding is
+ // teams then the corresponding loop region must be strictly nested inside
+ // a teams region.
+ NestingProhibited =
+ BindKind == OMPC_BIND_teams && EnclosingConstruct != OMPD_teams;
+ Recommend = ShouldBeInTeamsRegion;
+ }
+ if (!NestingProhibited && isOpenMPNestingDistributeDirective(CurrentRegion)) {
+ // OpenMP 4.5 [2.17 Nesting of Regions]
+ // The region associated with the distribute construct must be strictly
+ // nested inside a teams region
+ NestingProhibited = EnclosingConstruct != OMPD_teams;
+ Recommend = ShouldBeInTeamsRegion;
+ }
+ if (!NestingProhibited &&
+ (isOpenMPTargetExecutionDirective(CurrentRegion) ||
+ isOpenMPTargetDataManagementDirective(CurrentRegion))) {
+ // OpenMP 4.5 [2.17 Nesting of Regions]
+ // If a target, target update, target data, target enter data, or
+ // target exit data construct is encountered during execution of a
+ // target region, the behavior is unspecified.
+ NestingProhibited = Stack->hasDirective(
+ [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
+ SourceLocation) {
+ if (isOpenMPTargetExecutionDirective(K)) {
+ OffendingRegion = K;
+ return true;
+ }
+ return false;
+ },
+ false /* don't skip top directive */);
+ CloseNesting = false;
+ }
+ if (NestingProhibited) {
+ if (OrphanSeen) {
+ SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
+ << getOpenMPDirectiveName(CurrentRegion) << Recommend;
+ } else {
SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
- << true << getOpenMPDirectiveName(ParentRegion) << ErrorMsgNumber
- << getOpenMPDirectiveName(CurrentRegion);
- return true;
- }
- if (CurrentRegion == OMPD_cancellation_point ||
- CurrentRegion == OMPD_cancel) {
- // OpenMP [2.16, Nesting of Regions]
- // A cancellation point construct for which construct-type-clause is
- // taskgroup must be nested inside a task construct. A cancellation
- // point construct for which construct-type-clause is not taskgroup must
- // be closely nested inside an OpenMP construct that matches the type
- // specified in construct-type-clause.
- // A cancel construct for which construct-type-clause is taskgroup must be
- // nested inside a task construct. A cancel construct for which
- // construct-type-clause is not taskgroup must be closely nested inside an
- // OpenMP construct that matches the type specified in
- // construct-type-clause.
- NestingProhibited =
- !((CancelRegion == OMPD_parallel &&
- (ParentRegion == OMPD_parallel ||
- ParentRegion == OMPD_target_parallel)) ||
- (CancelRegion == OMPD_for &&
- (ParentRegion == OMPD_for || ParentRegion == OMPD_parallel_for ||
- ParentRegion == OMPD_target_parallel_for ||
- ParentRegion == OMPD_distribute_parallel_for ||
- ParentRegion == OMPD_teams_distribute_parallel_for ||
- ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
- (CancelRegion == OMPD_taskgroup &&
- (ParentRegion == OMPD_task ||
- (SemaRef.getLangOpts().OpenMP >= 50 &&
- (ParentRegion == OMPD_taskloop ||
- ParentRegion == OMPD_master_taskloop ||
- ParentRegion == OMPD_masked_taskloop ||
- ParentRegion == OMPD_parallel_masked_taskloop ||
- ParentRegion == OMPD_parallel_master_taskloop)))) ||
- (CancelRegion == OMPD_sections &&
- (ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
- ParentRegion == OMPD_parallel_sections)));
- OrphanSeen = ParentRegion == OMPD_unknown;
- } else if (CurrentRegion == OMPD_master || CurrentRegion == OMPD_masked) {
- // OpenMP 5.1 [2.22, Nesting of Regions]
- // A masked region may not be closely nested inside a worksharing, loop,
- // atomic, task, or taskloop region.
- NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
- isOpenMPGenericLoopDirective(ParentRegion) ||
- isOpenMPTaskingDirective(ParentRegion);
- } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
- // OpenMP [2.16, Nesting of Regions]
- // A critical region may not be nested (closely or otherwise) inside a
- // critical region with the same name. Note that this restriction is not
- // sufficient to prevent deadlock.
- SourceLocation PreviousCriticalLoc;
- bool DeadLock = Stack->hasDirective(
- [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
- const DeclarationNameInfo &DNI,
- SourceLocation Loc) {
- if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
- PreviousCriticalLoc = Loc;
- return true;
- }
- return false;
- },
- false /* skip top directive */);
- if (DeadLock) {
- SemaRef.Diag(StartLoc,
- diag::err_omp_prohibited_region_critical_same_name)
- << CurrentName.getName();
- if (PreviousCriticalLoc.isValid())
- SemaRef.Diag(PreviousCriticalLoc,
- diag::note_omp_previous_critical_region);
- return true;
- }
- } else if (CurrentRegion == OMPD_barrier || CurrentRegion == OMPD_scope) {
- // OpenMP 5.1 [2.22, Nesting of Regions]
- // A scope region may not be closely nested inside a worksharing, loop,
- // task, taskloop, critical, ordered, atomic, or masked region.
- // OpenMP 5.1 [2.22, Nesting of Regions]
- // A barrier region may not be closely nested inside a worksharing, loop,
- // task, taskloop, critical, ordered, atomic, or masked region.
- NestingProhibited =
- isOpenMPWorksharingDirective(ParentRegion) ||
- isOpenMPGenericLoopDirective(ParentRegion) ||
- isOpenMPTaskingDirective(ParentRegion) ||
- ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
- ParentRegion == OMPD_parallel_master ||
- ParentRegion == OMPD_parallel_masked ||
- ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
- } else if (isOpenMPWorksharingDirective(CurrentRegion) &&
- !isOpenMPParallelDirective(CurrentRegion) &&
- !isOpenMPTeamsDirective(CurrentRegion)) {
- // OpenMP 5.1 [2.22, Nesting of Regions]
- // A loop region that binds to a parallel region or a worksharing region
- // may not be closely nested inside a worksharing, loop, task, taskloop,
- // critical, ordered, atomic, or masked region.
- NestingProhibited =
- isOpenMPWorksharingDirective(ParentRegion) ||
- isOpenMPGenericLoopDirective(ParentRegion) ||
- isOpenMPTaskingDirective(ParentRegion) ||
- ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
- ParentRegion == OMPD_parallel_master ||
- ParentRegion == OMPD_parallel_masked ||
- ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
- Recommend = ShouldBeInParallelRegion;
- } else if (CurrentRegion == OMPD_ordered) {
- // OpenMP [2.16, Nesting of Regions]
- // An ordered region may not be closely nested inside a critical,
- // atomic, or explicit task region.
- // An ordered region must be closely nested inside a loop region (or
- // parallel loop region) with an ordered clause.
- // OpenMP [2.8.1,simd Construct, Restrictions]
- // An ordered construct with the simd clause is the only OpenMP construct
- // that can appear in the simd region.
- NestingProhibited = ParentRegion == OMPD_critical ||
- isOpenMPTaskingDirective(ParentRegion) ||
- !(isOpenMPSimdDirective(ParentRegion) ||
- Stack->isParentOrderedRegion());
- Recommend = ShouldBeInOrderedRegion;
- } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
- // OpenMP [2.16, Nesting of Regions]
- // If specified, a teams construct must be contained within a target
- // construct.
- NestingProhibited =
- (SemaRef.LangOpts.OpenMP <= 45 && ParentRegion != OMPD_target) ||
- (SemaRef.LangOpts.OpenMP >= 50 && ParentRegion != OMPD_unknown &&
- ParentRegion != OMPD_target);
- OrphanSeen = ParentRegion == OMPD_unknown;
- Recommend = ShouldBeInTargetRegion;
- } else if (CurrentRegion == OMPD_scan) {
- // OpenMP [2.16, Nesting of Regions]
- // If specified, a teams construct must be contained within a target
- // construct.
- NestingProhibited =
- SemaRef.LangOpts.OpenMP < 50 ||
- (ParentRegion != OMPD_simd && ParentRegion != OMPD_for &&
- ParentRegion != OMPD_for_simd && ParentRegion != OMPD_parallel_for &&
- ParentRegion != OMPD_parallel_for_simd);
- OrphanSeen = ParentRegion == OMPD_unknown;
- Recommend = ShouldBeInLoopSimdRegion;
- }
- if (!NestingProhibited &&
- !isOpenMPTargetExecutionDirective(CurrentRegion) &&
- !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
- (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
- // OpenMP [5.1, 2.22, Nesting of Regions]
- // distribute, distribute simd, distribute parallel worksharing-loop,
- // distribute parallel worksharing-loop SIMD, loop, parallel regions,
- // including any parallel regions arising from combined constructs,
- // omp_get_num_teams() regions, and omp_get_team_num() regions are the
- // only OpenMP regions that may be strictly nested inside the teams
- // region.
- //
- // As an extension, we permit atomic within teams as well.
- NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
- !isOpenMPDistributeDirective(CurrentRegion) &&
- CurrentRegion != OMPD_loop &&
- !(SemaRef.getLangOpts().OpenMPExtensions &&
- CurrentRegion == OMPD_atomic);
- Recommend = ShouldBeInParallelRegion;
- }
- if (!NestingProhibited && CurrentRegion == OMPD_loop) {
- // OpenMP [5.1, 2.11.7, loop Construct, Restrictions]
- // If the bind clause is present on the loop construct and binding is
- // teams then the corresponding loop region must be strictly nested inside
- // a teams region.
- NestingProhibited = BindKind == OMPC_BIND_teams &&
- ParentRegion != OMPD_teams &&
- ParentRegion != OMPD_target_teams;
- Recommend = ShouldBeInTeamsRegion;
- }
- if (!NestingProhibited &&
- isOpenMPNestingDistributeDirective(CurrentRegion)) {
- // OpenMP 4.5 [2.17 Nesting of Regions]
- // The region associated with the distribute construct must be strictly
- // nested inside a teams region
- NestingProhibited =
- (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
- Recommend = ShouldBeInTeamsRegion;
- }
- if (!NestingProhibited &&
- (isOpenMPTargetExecutionDirective(CurrentRegion) ||
- isOpenMPTargetDataManagementDirective(CurrentRegion))) {
- // OpenMP 4.5 [2.17 Nesting of Regions]
- // If a target, target update, target data, target enter data, or
- // target exit data construct is encountered during execution of a
- // target region, the behavior is unspecified.
- NestingProhibited = Stack->hasDirective(
- [&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
- SourceLocation) {
- if (isOpenMPTargetExecutionDirective(K)) {
- OffendingRegion = K;
- return true;
- }
- return false;
- },
- false /* don't skip top directive */);
- CloseNesting = false;
- }
- if (NestingProhibited) {
- if (OrphanSeen) {
- SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
- << getOpenMPDirectiveName(CurrentRegion) << Recommend;
- } else {
- SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
- << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
- << Recommend << getOpenMPDirectiveName(CurrentRegion);
- }
- return true;
+ << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
+ << Recommend << getOpenMPDirectiveName(CurrentRegion);
}
+ return true;
}
return false;
}
@@ -5398,9 +5172,9 @@ static std::pair<ValueDecl *, bool> getPrivateItem(Sema &S, Expr *&RefExpr,
Base = TempASE->getBase()->IgnoreParenImpCasts();
RefExpr = Base;
IsArrayExpr = ArraySubscript;
- } else if (auto *OASE = dyn_cast_or_null<OMPArraySectionExpr>(RefExpr)) {
+ } else if (auto *OASE = dyn_cast_or_null<ArraySectionExpr>(RefExpr)) {
Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
- while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ while (auto *TempOASE = dyn_cast<ArraySectionExpr>(Base))
Base = TempOASE->getBase()->IgnoreParenImpCasts();
while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = TempASE->getBase()->IgnoreParenImpCasts();
@@ -5725,7 +5499,7 @@ static CapturedStmt *buildDistanceFunc(Sema &Actions, QualType LogicalTy,
// Divide by the absolute step amount. If the range is not a multiple of
// the step size, rounding-up the effective upper bound ensures that the
// last iteration is included.
- // Note that the rounding-up may cause an overflow in a temporry that
+ // Note that the rounding-up may cause an overflow in a temporary that
// could be avoided, but would have occurred in a C-style for-loop as
// well.
Expr *Divisor = BuildVarRef(NewStep);
@@ -5779,9 +5553,9 @@ static CapturedStmt *buildLoopVarFunc(Sema &Actions, QualType LoopVarTy,
// the OpenMPIRBuilder to know additional C/C++ semantics, such as how to
// invoke a copy constructor.
QualType TargetParamTy = Ctx.getLValueReferenceType(LoopVarTy);
- Sema::CapturedParamNameType Params[] = {{"LoopVar", TargetParamTy},
- {"Logical", LogicalTy},
- {StringRef(), QualType()}};
+ SemaOpenMP::CapturedParamNameType Params[] = {{"LoopVar", TargetParamTy},
+ {"Logical", LogicalTy},
+ {StringRef(), QualType()}};
Actions.ActOnCapturedRegionStart({}, nullptr, CR_Default, Params);
// Capture the initial iterator which represents the LoopVar value at the
@@ -5832,7 +5606,7 @@ static CapturedStmt *buildLoopVarFunc(Sema &Actions, QualType LoopVarTy,
AssertSuccess(Actions.ActOnCapturedRegionEnd(Body)));
}
-StmtResult Sema::ActOnOpenMPCanonicalLoop(Stmt *AStmt) {
+StmtResult SemaOpenMP::ActOnOpenMPCanonicalLoop(Stmt *AStmt) {
ASTContext &Ctx = getASTContext();
// Extract the common elements of ForStmt and CXXForRangeStmt:
@@ -5943,8 +5717,8 @@ StmtResult Sema::ActOnOpenMPCanonicalLoop(Stmt *AStmt) {
if (IncBin->getOpcode() == BO_AddAssign) {
Step = IncBin->getRHS();
} else if (IncBin->getOpcode() == BO_SubAssign) {
- Step =
- AssertSuccess(BuildUnaryOp(nullptr, {}, UO_Minus, IncBin->getRHS()));
+ Step = AssertSuccess(
+ SemaRef.BuildUnaryOp(nullptr, {}, UO_Minus, IncBin->getRHS()));
} else
llvm_unreachable("unhandled binary increment operator");
} else if (auto *CondCXXOp = dyn_cast<CXXOperatorCallExpr>(Inc)) {
@@ -5962,7 +5736,7 @@ StmtResult Sema::ActOnOpenMPCanonicalLoop(Stmt *AStmt) {
break;
case OO_MinusEqual:
Step = AssertSuccess(
- BuildUnaryOp(nullptr, {}, UO_Minus, CondCXXOp->getArg(1)));
+ SemaRef.BuildUnaryOp(nullptr, {}, UO_Minus, CondCXXOp->getArg(1)));
break;
default:
llvm_unreachable("unhandled overloaded increment operator");
@@ -5971,16 +5745,17 @@ StmtResult Sema::ActOnOpenMPCanonicalLoop(Stmt *AStmt) {
llvm_unreachable("unknown increment expression");
CapturedStmt *DistanceFunc =
- buildDistanceFunc(*this, LogicalTy, CondRel, LHS, RHS, Step);
+ buildDistanceFunc(SemaRef, LogicalTy, CondRel, LHS, RHS, Step);
CapturedStmt *LoopVarFunc = buildLoopVarFunc(
- *this, LVTy, LogicalTy, CounterRef, Step, isa<CXXForRangeStmt>(AStmt));
- DeclRefExpr *LVRef = BuildDeclRefExpr(LUVDecl, LUVDecl->getType(), VK_LValue,
- {}, nullptr, nullptr, {}, nullptr);
+ SemaRef, LVTy, LogicalTy, CounterRef, Step, isa<CXXForRangeStmt>(AStmt));
+ DeclRefExpr *LVRef =
+ SemaRef.BuildDeclRefExpr(LUVDecl, LUVDecl->getType(), VK_LValue, {},
+ nullptr, nullptr, {}, nullptr);
return OMPCanonicalLoop::create(getASTContext(), AStmt, DistanceFunc,
LoopVarFunc, LVRef);
}
-StmtResult Sema::ActOnOpenMPLoopnest(Stmt *AStmt) {
+StmtResult SemaOpenMP::ActOnOpenMPLoopnest(Stmt *AStmt) {
// Handle a literal loop.
if (isa<ForStmt>(AStmt) || isa<CXXForRangeStmt>(AStmt))
return ActOnOpenMPCanonicalLoop(AStmt);
@@ -6005,7 +5780,7 @@ static ExprResult buildUserDefinedMapperRef(Sema &SemaRef, Scope *S,
static void
processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
SmallVectorImpl<OMPClause *> &Clauses) {
- // Check for the deault mapper for data members.
+ // Check for the default mapper for data members.
if (S.getLangOpts().OpenMP < 50)
return;
SmallVector<OMPClause *, 4> ImplicitMaps;
@@ -6029,10 +5804,10 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
// Array section - need to check for the mapping of the array section
// element.
QualType CanonType = E->getType().getCanonicalType();
- if (CanonType->isSpecificBuiltinType(BuiltinType::OMPArraySection)) {
- const auto *OASE = cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts());
+ if (CanonType->isSpecificBuiltinType(BuiltinType::ArraySection)) {
+ const auto *OASE = cast<ArraySectionExpr>(E->IgnoreParenImpCasts());
QualType BaseType =
- OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ ArraySectionExpr::getBaseOriginalType(OASE->getBase());
QualType ElemType;
if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
ElemType = ATy->getElementType();
@@ -6125,7 +5900,7 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
continue;
CXXScopeSpec MapperIdScopeSpec;
DeclarationNameInfo MapperId;
- if (OMPClause *NewClause = S.ActOnOpenMPMapClause(
+ if (OMPClause *NewClause = S.OpenMP().ActOnOpenMPMapClause(
nullptr, C->getMapTypeModifiers(), C->getMapTypeModifiersLoc(),
MapperIdScopeSpec, MapperId, C->getMapType(),
/*IsMapTypeImplicit=*/true, SourceLocation(), SourceLocation(),
@@ -6134,14 +5909,84 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
}
}
-bool Sema::mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
- ArrayRef<OMPClause *> Clauses,
- OpenMPBindClauseKind &BindKind,
- OpenMPDirectiveKind &Kind,
- OpenMPDirectiveKind &PrevMappedDirective,
- SourceLocation StartLoc, SourceLocation EndLoc,
- const DeclarationNameInfo &DirName,
- OpenMPDirectiveKind CancelRegion) {
+namespace {
+/// A 'teams loop' with a nested 'loop bind(parallel)' or generic function
+/// call in the associated loop-nest cannot be a 'parallel for'.
+class TeamsLoopChecker final : public ConstStmtVisitor<TeamsLoopChecker> {
+ Sema &SemaRef;
+
+public:
+ bool teamsLoopCanBeParallelFor() const { return TeamsLoopCanBeParallelFor; }
+
+ // Is there a nested OpenMP loop bind(parallel)
+ void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
+ if (D->getDirectiveKind() == llvm::omp::Directive::OMPD_loop) {
+ if (const auto *C = D->getSingleClause<OMPBindClause>())
+ if (C->getBindKind() == OMPC_BIND_parallel) {
+ TeamsLoopCanBeParallelFor = false;
+ // No need to continue visiting any more
+ return;
+ }
+ }
+ for (const Stmt *Child : D->children())
+ if (Child)
+ Visit(Child);
+ }
+
+ void VisitCallExpr(const CallExpr *C) {
+ // Function calls inhibit parallel loop translation of 'target teams loop'
+ // unless the assume-no-nested-parallelism flag has been specified.
+ // OpenMP API runtime library calls do not inhibit parallel loop
+ // translation, regardless of the assume-no-nested-parallelism.
+ bool IsOpenMPAPI = false;
+ auto *FD = dyn_cast_or_null<FunctionDecl>(C->getCalleeDecl());
+ if (FD) {
+ std::string Name = FD->getNameInfo().getAsString();
+ IsOpenMPAPI = Name.find("omp_") == 0;
+ }
+ TeamsLoopCanBeParallelFor =
+ IsOpenMPAPI || SemaRef.getLangOpts().OpenMPNoNestedParallelism;
+ if (!TeamsLoopCanBeParallelFor)
+ return;
+
+ for (const Stmt *Child : C->children())
+ if (Child)
+ Visit(Child);
+ }
+
+ void VisitCapturedStmt(const CapturedStmt *S) {
+ if (!S)
+ return;
+ Visit(S->getCapturedDecl()->getBody());
+ }
+
+ void VisitStmt(const Stmt *S) {
+ if (!S)
+ return;
+ for (const Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+ }
+ explicit TeamsLoopChecker(Sema &SemaRef)
+ : SemaRef(SemaRef), TeamsLoopCanBeParallelFor(true) {}
+
+private:
+ bool TeamsLoopCanBeParallelFor;
+};
+} // namespace
+
+static bool teamsLoopCanBeParallelFor(Stmt *AStmt, Sema &SemaRef) {
+ TeamsLoopChecker Checker(SemaRef);
+ Checker.Visit(AStmt);
+ return Checker.teamsLoopCanBeParallelFor();
+}
+
+bool SemaOpenMP::mapLoopConstruct(
+ llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
+ ArrayRef<OMPClause *> Clauses, OpenMPBindClauseKind &BindKind,
+ OpenMPDirectiveKind &Kind, OpenMPDirectiveKind &PrevMappedDirective,
+ SourceLocation StartLoc, SourceLocation EndLoc,
+ const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion) {
bool UseClausesWithoutBind = false;
@@ -6153,16 +5998,21 @@ bool Sema::mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
if (BindKind == OMPC_BIND_unknown) {
// Setting the enclosing teams or parallel construct for the loop
// directive without bind clause.
+ // [5.0:129:25-28] If the bind clause is not present on the construct and
+ // the loop construct is closely nested inside a teams or parallel
+ // construct, the binding region is the corresponding teams or parallel
+ // region. If none of those conditions hold, the binding region is not
+ // defined.
BindKind = OMPC_BIND_thread; // Default bind(thread) if binding is unknown
+ ArrayRef<OpenMPDirectiveKind> ParentLeafs =
+ getLeafConstructsOrSelf(ParentDirective);
if (ParentDirective == OMPD_unknown) {
Diag(DSAStack->getDefaultDSALocation(),
diag::err_omp_bind_required_on_loop);
- } else if (ParentDirective == OMPD_parallel ||
- ParentDirective == OMPD_target_parallel) {
+ } else if (ParentLeafs.back() == OMPD_parallel) {
BindKind = OMPC_BIND_parallel;
- } else if (ParentDirective == OMPD_teams ||
- ParentDirective == OMPD_target_teams) {
+ } else if (ParentLeafs.back() == OMPD_teams) {
BindKind = OMPC_BIND_teams;
}
} else {
@@ -6223,11 +6073,13 @@ bool Sema::mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
return UseClausesWithoutBind;
}
-StmtResult Sema::ActOnOpenMPExecutableDirective(
+StmtResult SemaOpenMP::ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind PrevMappedDirective) {
+ assert(isOpenMPExecutableDirective(Kind) && "Unexpected directive category");
+
StmtResult Res = StmtError();
OpenMPBindClauseKind BindKind = OMPC_BIND_unknown;
llvm::SmallVector<OMPClause *> ClausesWithoutBind;
@@ -6248,8 +6100,8 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
}
// First check CancelRegion which is then used in checkNestingOfRegions.
- if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
- checkNestingOfRegions(*this, DSAStack, DK, DirName, CancelRegion,
+ if (checkCancelRegion(SemaRef, Kind, CancelRegion, StartLoc) ||
+ checkNestingOfRegions(SemaRef, DSAStack, DK, DirName, CancelRegion,
BindKind, StartLoc)) {
return StmtError();
}
@@ -6268,13 +6120,12 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
} else {
ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
}
- if (AStmt && !CurContext->isDependentContext() && Kind != OMPD_atomic &&
- Kind != OMPD_critical && Kind != OMPD_section && Kind != OMPD_master &&
- Kind != OMPD_masked && !isOpenMPLoopTransformationDirective(Kind)) {
+ if (AStmt && !SemaRef.CurContext->isDependentContext() &&
+ isOpenMPCapturingDirective(Kind)) {
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
// Check default data sharing attributes for referenced variables.
- DSAAttrChecker DSAChecker(DSAStack, *this, cast<CapturedStmt>(AStmt));
+ DSAAttrChecker DSAChecker(DSAStack, SemaRef, cast<CapturedStmt>(AStmt));
int ThisCaptureLevel = getOpenMPCaptureLevels(Kind);
Stmt *S = AStmt;
while (--ThisCaptureLevel >= 0)
@@ -6317,14 +6168,14 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
DMC->getDefaultmapModifierLoc();
}
for (unsigned VC = 0; VC < DefaultmapKindNum; ++VC) {
- auto Kind = static_cast<OpenMPDefaultmapClauseKind>(VC);
+ auto K = static_cast<OpenMPDefaultmapClauseKind>(VC);
for (unsigned I = 0; I < OMPC_MAP_delete; ++I) {
- ArrayRef<Expr *> ImplicitMap = DSAChecker.getImplicitMap(
- Kind, static_cast<OpenMPMapClauseKind>(I));
+ ArrayRef<Expr *> ImplicitMap =
+ DSAChecker.getImplicitMap(K, static_cast<OpenMPMapClauseKind>(I));
ImplicitMaps[VC][I].append(ImplicitMap.begin(), ImplicitMap.end());
}
ArrayRef<OpenMPMapModifierKind> ImplicitModifier =
- DSAChecker.getImplicitMapModifier(Kind);
+ DSAChecker.getImplicitMapModifier(K);
ImplicitMapModifiers[VC].append(ImplicitModifier.begin(),
ImplicitModifier.end());
std::fill_n(std::back_inserter(ImplicitMapModifiersLoc[VC]),
@@ -6398,10 +6249,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
continue;
CXXScopeSpec MapperIdScopeSpec;
DeclarationNameInfo MapperId;
- auto Kind = static_cast<OpenMPMapClauseKind>(ClauseKindCnt);
+ auto K = static_cast<OpenMPMapClauseKind>(ClauseKindCnt);
if (OMPClause *Implicit = ActOnOpenMPMapClause(
nullptr, ImplicitMapModifiers[I], ImplicitMapModifiersLoc[I],
- MapperIdScopeSpec, MapperId, Kind, /*IsMapTypeImplicit=*/true,
+ MapperIdScopeSpec, MapperId, K, /*IsMapTypeImplicit=*/true,
SourceLocation(), SourceLocation(), ImplicitMap,
OMPVarListLocTy())) {
ClausesWithImplicit.emplace_back(Implicit);
@@ -6414,23 +6265,19 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
}
// Build expressions for implicit maps of data members with 'default'
// mappers.
- if (LangOpts.OpenMP >= 50)
- processImplicitMapsWithDefaultMappers(*this, DSAStack,
+ if (getLangOpts().OpenMP >= 50)
+ processImplicitMapsWithDefaultMappers(SemaRef, DSAStack,
ClausesWithImplicit);
}
- llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
switch (Kind) {
case OMPD_parallel:
Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
- AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_simd:
Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
VarsWithInheritedDSA);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_tile:
Res =
@@ -6440,6 +6287,15 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
Res = ActOnOpenMPUnrollDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
break;
+ case OMPD_reverse:
+ assert(ClausesWithImplicit.empty() &&
+ "reverse directive does not support any clauses");
+ Res = ActOnOpenMPReverseDirective(AStmt, StartLoc, EndLoc);
+ break;
+ case OMPD_interchange:
+ Res = ActOnOpenMPInterchangeDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ break;
case OMPD_for:
Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
VarsWithInheritedDSA);
@@ -6447,8 +6303,6 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_for_simd:
Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc, VarsWithInheritedDSA);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_sections:
Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc,
@@ -6479,14 +6333,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_parallel_for:
Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_parallel_for_simd:
Res = ActOnOpenMPParallelForSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_parallel);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_scope:
Res =
@@ -6495,22 +6345,18 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_parallel_master:
Res = ActOnOpenMPParallelMasterDirective(ClausesWithImplicit, AStmt,
StartLoc, EndLoc);
- AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_parallel_masked:
Res = ActOnOpenMPParallelMaskedDirective(ClausesWithImplicit, AStmt,
StartLoc, EndLoc);
- AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_parallel_sections:
Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
StartLoc, EndLoc);
- AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_task:
Res =
ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
- AllowedNameModifiers.push_back(OMPD_task);
break;
case OMPD_taskyield:
assert(ClausesWithImplicit.empty() &&
@@ -6570,19 +6416,14 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_target:
Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
- AllowedNameModifiers.push_back(OMPD_target);
break;
case OMPD_target_parallel:
Res = ActOnOpenMPTargetParallelDirective(ClausesWithImplicit, AStmt,
StartLoc, EndLoc);
- AllowedNameModifiers.push_back(OMPD_target);
- AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_target_parallel_for:
Res = ActOnOpenMPTargetParallelForDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_target);
- AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_cancellation_point:
assert(ClausesWithImplicit.empty() &&
@@ -6596,90 +6437,58 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
"No associated statement allowed for 'omp cancel' directive");
Res = ActOnOpenMPCancelDirective(ClausesWithImplicit, StartLoc, EndLoc,
CancelRegion);
- AllowedNameModifiers.push_back(OMPD_cancel);
break;
case OMPD_target_data:
Res = ActOnOpenMPTargetDataDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
- AllowedNameModifiers.push_back(OMPD_target_data);
break;
case OMPD_target_enter_data:
Res = ActOnOpenMPTargetEnterDataDirective(ClausesWithImplicit, StartLoc,
EndLoc, AStmt);
- AllowedNameModifiers.push_back(OMPD_target_enter_data);
break;
case OMPD_target_exit_data:
Res = ActOnOpenMPTargetExitDataDirective(ClausesWithImplicit, StartLoc,
EndLoc, AStmt);
- AllowedNameModifiers.push_back(OMPD_target_exit_data);
break;
case OMPD_taskloop:
Res = ActOnOpenMPTaskLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_taskloop);
break;
case OMPD_taskloop_simd:
Res = ActOnOpenMPTaskLoopSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_taskloop);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_master_taskloop:
Res = ActOnOpenMPMasterTaskLoopDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_taskloop);
break;
case OMPD_masked_taskloop:
Res = ActOnOpenMPMaskedTaskLoopDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_taskloop);
break;
case OMPD_master_taskloop_simd:
Res = ActOnOpenMPMasterTaskLoopSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_taskloop);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_masked_taskloop_simd:
Res = ActOnOpenMPMaskedTaskLoopSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- if (LangOpts.OpenMP >= 51) {
- AllowedNameModifiers.push_back(OMPD_taskloop);
- AllowedNameModifiers.push_back(OMPD_simd);
- }
break;
case OMPD_parallel_master_taskloop:
Res = ActOnOpenMPParallelMasterTaskLoopDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_taskloop);
- AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_parallel_masked_taskloop:
Res = ActOnOpenMPParallelMaskedTaskLoopDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- if (LangOpts.OpenMP >= 51) {
- AllowedNameModifiers.push_back(OMPD_taskloop);
- AllowedNameModifiers.push_back(OMPD_parallel);
- }
break;
case OMPD_parallel_master_taskloop_simd:
Res = ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_taskloop);
- AllowedNameModifiers.push_back(OMPD_parallel);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_parallel_masked_taskloop_simd:
Res = ActOnOpenMPParallelMaskedTaskLoopSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- if (LangOpts.OpenMP >= 51) {
- AllowedNameModifiers.push_back(OMPD_taskloop);
- AllowedNameModifiers.push_back(OMPD_parallel);
- AllowedNameModifiers.push_back(OMPD_simd);
- }
break;
case OMPD_distribute:
Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
@@ -6688,40 +6497,26 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_target_update:
Res = ActOnOpenMPTargetUpdateDirective(ClausesWithImplicit, StartLoc,
EndLoc, AStmt);
- AllowedNameModifiers.push_back(OMPD_target_update);
break;
case OMPD_distribute_parallel_for:
Res = ActOnOpenMPDistributeParallelForDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_distribute_parallel_for_simd:
Res = ActOnOpenMPDistributeParallelForSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_parallel);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_distribute_simd:
Res = ActOnOpenMPDistributeSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_target_parallel_for_simd:
Res = ActOnOpenMPTargetParallelForSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_target);
- AllowedNameModifiers.push_back(OMPD_parallel);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_target_simd:
Res = ActOnOpenMPTargetSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_target);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_teams_distribute:
Res = ActOnOpenMPTeamsDistributeDirective(
@@ -6730,51 +6525,34 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_teams_distribute_simd:
Res = ActOnOpenMPTeamsDistributeSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_teams_distribute_parallel_for_simd:
Res = ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_parallel);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_teams_distribute_parallel_for:
Res = ActOnOpenMPTeamsDistributeParallelForDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_target_teams:
Res = ActOnOpenMPTargetTeamsDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
- AllowedNameModifiers.push_back(OMPD_target);
break;
case OMPD_target_teams_distribute:
Res = ActOnOpenMPTargetTeamsDistributeDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_target);
break;
case OMPD_target_teams_distribute_parallel_for:
Res = ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_target);
- AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_target_teams_distribute_parallel_for_simd:
Res = ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_target);
- AllowedNameModifiers.push_back(OMPD_parallel);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_target_teams_distribute_simd:
Res = ActOnOpenMPTargetTeamsDistributeSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_target);
- if (LangOpts.OpenMP >= 50)
- AllowedNameModifiers.push_back(OMPD_simd);
break;
case OMPD_interop:
assert(AStmt == nullptr &&
@@ -6796,7 +6574,6 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_target_teams_loop:
Res = ActOnOpenMPTargetTeamsGenericLoopDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
- AllowedNameModifiers.push_back(OMPD_target);
break;
case OMPD_parallel_loop:
Res = ActOnOpenMPParallelGenericLoopDirective(
@@ -6830,12 +6607,12 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (DSAStack->getDefaultDSA() == DSA_none ||
DSAStack->getDefaultDSA() == DSA_private ||
DSAStack->getDefaultDSA() == DSA_firstprivate) {
- DSAAttrChecker DSAChecker(DSAStack, *this, nullptr);
+ DSAAttrChecker DSAChecker(DSAStack, SemaRef, nullptr);
for (OMPClause *C : Clauses) {
switch (C->getClauseKind()) {
case OMPC_num_threads:
case OMPC_dist_schedule:
- // Do not analyse if no parent teams directive.
+ // Do not analyze if no parent teams directive.
if (isOpenMPTeamsDirective(Kind))
break;
continue;
@@ -6966,14 +6743,19 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
}
}
+ llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
+ for (OpenMPDirectiveKind D : getLeafConstructsOrSelf(Kind)) {
+ if (isAllowedClauseForDirective(D, OMPC_if, getLangOpts().OpenMP))
+ AllowedNameModifiers.push_back(D);
+ }
if (!AllowedNameModifiers.empty())
- ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) ||
+ ErrorFound = checkIfClauses(SemaRef, Kind, Clauses, AllowedNameModifiers) ||
ErrorFound;
if (ErrorFound)
return StmtError();
- if (!CurContext->isDependentContext() &&
+ if (!SemaRef.CurContext->isDependentContext() &&
isOpenMPTargetExecutionDirective(Kind) &&
!(DSAStack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
DSAStack->hasRequiresDeclWithClause<OMPUnifiedAddressClause>() ||
@@ -6986,7 +6768,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
return Res;
}
-Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
+SemaOpenMP::DeclGroupPtrTy SemaOpenMP::ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen,
ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
@@ -7221,13 +7003,15 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
NewStep = PerformOpenMPImplicitIntegerConversion(Step->getExprLoc(), Step)
.get();
if (NewStep)
- NewStep =
- VerifyIntegerConstantExpression(NewStep, /*FIXME*/ AllowFold).get();
+ NewStep = SemaRef
+ .VerifyIntegerConstantExpression(
+ NewStep, /*FIXME*/ Sema::AllowFold)
+ .get();
}
NewSteps.push_back(NewStep);
}
auto *NewAttr = OMPDeclareSimdDeclAttr::CreateImplicit(
- Context, BS, SL.get(), const_cast<Expr **>(Uniforms.data()),
+ getASTContext(), BS, SL.get(), const_cast<Expr **>(Uniforms.data()),
Uniforms.size(), const_cast<Expr **>(Aligneds.data()), Aligneds.size(),
const_cast<Expr **>(NewAligns.data()), NewAligns.size(),
const_cast<Expr **>(Linears.data()), Linears.size(),
@@ -7260,7 +7044,7 @@ static void setPrototype(Sema &S, FunctionDecl *FD, FunctionDecl *FDWithProto,
FD->setParams(Params);
}
-void Sema::ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D) {
+void SemaOpenMP::ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D) {
if (D->isInvalidDecl())
return;
FunctionDecl *FD = nullptr;
@@ -7273,18 +7057,18 @@ void Sema::ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D) {
// If we are instantiating templates we do *not* apply scoped assumptions but
// only global ones. We apply scoped assumption to the template definition
// though.
- if (!inTemplateInstantiation()) {
- for (AssumptionAttr *AA : OMPAssumeScoped)
+ if (!SemaRef.inTemplateInstantiation()) {
+ for (OMPAssumeAttr *AA : OMPAssumeScoped)
FD->addAttr(AA);
}
- for (AssumptionAttr *AA : OMPAssumeGlobal)
+ for (OMPAssumeAttr *AA : OMPAssumeGlobal)
FD->addAttr(AA);
}
-Sema::OMPDeclareVariantScope::OMPDeclareVariantScope(OMPTraitInfo &TI)
+SemaOpenMP::OMPDeclareVariantScope::OMPDeclareVariantScope(OMPTraitInfo &TI)
: TI(&TI), NameSuffix(TI.getMangledName()) {}
-void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
+void SemaOpenMP::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists,
SmallVectorImpl<FunctionDecl *> &Bases) {
if (!D.getIdentifier())
@@ -7294,17 +7078,18 @@ void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
// Template specialization is an extension, check if we do it.
bool IsTemplated = !TemplateParamLists.empty();
- if (IsTemplated &
+ if (IsTemplated &&
!DVScope.TI->isExtensionActive(
llvm::omp::TraitProperty::implementation_extension_allow_templates))
return;
- IdentifierInfo *BaseII = D.getIdentifier();
- LookupResult Lookup(*this, DeclarationName(BaseII), D.getIdentifierLoc(),
- LookupOrdinaryName);
- LookupParsedName(Lookup, S, &D.getCXXScopeSpec());
+ const IdentifierInfo *BaseII = D.getIdentifier();
+ LookupResult Lookup(SemaRef, DeclarationName(BaseII), D.getIdentifierLoc(),
+ Sema::LookupOrdinaryName);
+ SemaRef.LookupParsedName(Lookup, S, &D.getCXXScopeSpec(),
+ /*ObjectType=*/QualType());
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
+ TypeSourceInfo *TInfo = SemaRef.GetTypeForDeclarator(D);
QualType FType = TInfo->getType();
bool IsConstexpr =
@@ -7333,7 +7118,7 @@ void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
QualType UDeclTy = UDecl->getType();
if (!UDeclTy->isDependentType()) {
- QualType NewType = Context.mergeFunctionTypes(
+ QualType NewType = getASTContext().mergeFunctionTypes(
FType, UDeclTy, /* OfBlockPointer */ false,
/* Unqualified */ false, /* AllowCXX */ true);
if (NewType.isNull())
@@ -7349,7 +7134,7 @@ void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
// If no base was found we create a declaration that we use as base.
if (Bases.empty() && UseImplicitBase) {
D.setFunctionDefinitionKind(FunctionDefinitionKind::Declaration);
- Decl *BaseD = HandleDeclarator(S, D, TemplateParamLists);
+ Decl *BaseD = SemaRef.HandleDeclarator(S, D, TemplateParamLists);
BaseD->setImplicit(true);
if (auto *BaseTemplD = dyn_cast<FunctionTemplateDecl>(BaseD))
Bases.push_back(BaseTemplD->getTemplatedDecl());
@@ -7361,18 +7146,18 @@ void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
MangledName += D.getIdentifier()->getName();
MangledName += getOpenMPVariantManglingSeparatorStr();
MangledName += DVScope.NameSuffix;
- IdentifierInfo &VariantII = Context.Idents.get(MangledName);
+ IdentifierInfo &VariantII = getASTContext().Idents.get(MangledName);
VariantII.setMangledOpenMPVariantName(true);
D.SetIdentifier(&VariantII, D.getBeginLoc());
}
-void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
+void SemaOpenMP::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases) {
// Do not mark function as is used to prevent its emission if this is the
// only place where it is used.
EnterExpressionEvaluationContext Unevaluated(
- *this, Sema::ExpressionEvaluationContext::Unevaluated);
+ SemaRef, Sema::ExpressionEvaluationContext::Unevaluated);
FunctionDecl *FD = nullptr;
if (auto *UTemplDecl = dyn_cast<FunctionTemplateDecl>(D))
@@ -7380,14 +7165,14 @@ void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
else
FD = cast<FunctionDecl>(D);
auto *VariantFuncRef = DeclRefExpr::Create(
- Context, NestedNameSpecifierLoc(), SourceLocation(), FD,
+ getASTContext(), NestedNameSpecifierLoc(), SourceLocation(), FD,
/* RefersToEnclosingVariableOrCapture */ false,
/* NameLoc */ FD->getLocation(), FD->getType(),
ExprValueKind::VK_PRValue);
OMPDeclareVariantScope &DVScope = OMPDeclareVariantScopes.back();
auto *OMPDeclareVariantA = OMPDeclareVariantAttr::CreateImplicit(
- Context, VariantFuncRef, DVScope.TI,
+ getASTContext(), VariantFuncRef, DVScope.TI,
/*NothingArgs=*/nullptr, /*NothingArgsSize=*/0,
/*NeedDevicePtrArgs=*/nullptr, /*NeedDevicePtrArgsSize=*/0,
/*AppendArgs=*/nullptr, /*AppendArgsSize=*/0);
@@ -7395,10 +7180,11 @@ void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
BaseFD->addAttr(OMPDeclareVariantA);
}
-ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
- SourceLocation LParenLoc,
- MultiExprArg ArgExprs,
- SourceLocation RParenLoc, Expr *ExecConfig) {
+ExprResult SemaOpenMP::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
+ SourceLocation LParenLoc,
+ MultiExprArg ArgExprs,
+ SourceLocation RParenLoc,
+ Expr *ExecConfig) {
// The common case is a regular call we do not want to specialize at all. Try
// to make that case fast by bailing early.
CallExpr *CE = dyn_cast<CallExpr>(Call.get());
@@ -7409,7 +7195,7 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
if (!CalleeFnDecl)
return Call;
- if (LangOpts.OpenMP >= 51 && CalleeFnDecl->getIdentifier() &&
+ if (getLangOpts().OpenMP >= 51 && CalleeFnDecl->getIdentifier() &&
CalleeFnDecl->getName().starts_with_insensitive("omp_")) {
// checking for any calls inside an Order region
if (Scope && Scope->isOpenMPOrderClauseScope())
@@ -7428,7 +7214,8 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
<< ISATrait;
};
TargetOMPContext OMPCtx(Context, std::move(DiagUnknownTrait),
- getCurFunctionDecl(), DSAStack->getConstructTraits());
+ SemaRef.getCurFunctionDecl(),
+ DSAStack->getConstructTraits());
QualType CalleeFnType = CalleeFnDecl->getType();
@@ -7473,7 +7260,7 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
// different type than the base function. This is intended and OK but if
// we cannot create a call the difference is not in the "implementation
// defined range" we allow.
- Sema::TentativeAnalysisScope Trap(*this);
+ Sema::TentativeAnalysisScope Trap(SemaRef);
if (auto *SpecializedMethod = dyn_cast<CXXMethodDecl>(BestDecl)) {
auto *MemberCall = dyn_cast<CXXMemberCallExpr>(CE);
@@ -7482,12 +7269,12 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
/* IsArrow */ false, SpecializedMethod, Context.BoundMemberTy,
MemberCall->getValueKind(), MemberCall->getObjectKind());
}
- NewCall = BuildCallExpr(Scope, BestExpr, LParenLoc, ArgExprs, RParenLoc,
- ExecConfig);
+ NewCall = SemaRef.BuildCallExpr(Scope, BestExpr, LParenLoc, ArgExprs,
+ RParenLoc, ExecConfig);
if (NewCall.isUsable()) {
if (CallExpr *NCE = dyn_cast<CallExpr>(NewCall.get())) {
FunctionDecl *NewCalleeFnDecl = NCE->getDirectCallee();
- QualType NewType = Context.mergeFunctionTypes(
+ QualType NewType = getASTContext().mergeFunctionTypes(
CalleeFnType, NewCalleeFnDecl->getType(),
/* OfBlockPointer */ false,
/* Unqualified */ false, /* AllowCXX */ true);
@@ -7505,14 +7292,16 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
if (!NewCall.isUsable())
return Call;
- return PseudoObjectExpr::Create(Context, CE, {NewCall.get()}, 0);
+ return PseudoObjectExpr::Create(getASTContext(), CE, {NewCall.get()}, 0);
}
std::optional<std::pair<FunctionDecl *, Expr *>>
-Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
- Expr *VariantRef, OMPTraitInfo &TI,
- unsigned NumAppendArgs,
- SourceRange SR) {
+SemaOpenMP::checkOpenMPDeclareVariantFunction(SemaOpenMP::DeclGroupPtrTy DG,
+ Expr *VariantRef,
+ OMPTraitInfo &TI,
+ unsigned NumAppendArgs,
+ SourceRange SR) {
+ ASTContext &Context = getASTContext();
if (!DG || DG.get().isNull())
return std::nullopt;
@@ -7555,7 +7344,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
// Check if the function was emitted already.
const FunctionDecl *Definition;
if (!FD->isThisDeclarationADefinition() && FD->isDefined(Definition) &&
- (LangOpts.EmitAllDecls || Context.DeclMustBeEmitted(Definition)))
+ (getLangOpts().EmitAllDecls || Context.DeclMustBeEmitted(Definition)))
Diag(SR.getBegin(), diag::warn_omp_declare_variant_after_emitted)
<< FD->getLocation();
@@ -7578,7 +7367,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
// Deal with non-constant score and user condition expressions.
auto HandleNonConstantScoresAndConditions = [this](Expr *&E,
bool IsScore) -> bool {
- if (!E || E->isIntegerConstantExpr(Context))
+ if (!E || E->isIntegerConstantExpr(getASTContext()))
return false;
if (IsScore) {
@@ -7610,9 +7399,9 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
// Adjust the function type to account for an extra omp_interop_t for each
// specified in the append_args clause.
const TypeDecl *TD = nullptr;
- LookupResult Result(*this, &Context.Idents.get("omp_interop_t"),
+ LookupResult Result(SemaRef, &Context.Idents.get("omp_interop_t"),
SR.getBegin(), Sema::LookupOrdinaryName);
- if (LookupName(Result, getCurScope())) {
+ if (SemaRef.LookupName(Result, SemaRef.getCurScope())) {
NamedDecl *ND = Result.getFoundDecl();
TD = dyn_cast_or_null<TypeDecl>(ND);
}
@@ -7635,7 +7424,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
// Convert VariantRef expression to the type of the original function to
// resolve possible conflicts.
ExprResult VariantRefCast = VariantRef;
- if (LangOpts.CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
QualType FnPtrType;
auto *Method = dyn_cast<CXXMethodDecl>(FD);
if (Method && !Method->isStatic()) {
@@ -7644,11 +7433,11 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
FnPtrType = Context.getMemberPointerType(AdjustedFnType, ClassType);
ExprResult ER;
{
- // Build adrr_of unary op to correctly handle type checks for member
+ // Build addr_of unary op to correctly handle type checks for member
// functions.
- Sema::TentativeAnalysisScope Trap(*this);
- ER = CreateBuiltinUnaryOp(VariantRef->getBeginLoc(), UO_AddrOf,
- VariantRef);
+ Sema::TentativeAnalysisScope Trap(SemaRef);
+ ER = SemaRef.CreateBuiltinUnaryOp(VariantRef->getBeginLoc(), UO_AddrOf,
+ VariantRef);
}
if (!ER.isUsable()) {
Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
@@ -7661,9 +7450,9 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
}
QualType VarianPtrType = Context.getPointerType(VariantRef->getType());
if (VarianPtrType.getUnqualifiedType() != FnPtrType.getUnqualifiedType()) {
- ImplicitConversionSequence ICS = TryImplicitConversion(
+ ImplicitConversionSequence ICS = SemaRef.TryImplicitConversion(
VariantRef, FnPtrType.getUnqualifiedType(),
- /*SuppressUserConversions=*/false, AllowedExplicit::None,
+ /*SuppressUserConversions=*/false, Sema::AllowedExplicit::None,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false);
@@ -7675,8 +7464,8 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
<< (NumAppendArgs ? 1 : 0) << VariantRef->getSourceRange();
return std::nullopt;
}
- VariantRefCast = PerformImplicitConversion(
- VariantRef, FnPtrType.getUnqualifiedType(), AA_Converting);
+ VariantRefCast = SemaRef.PerformImplicitConversion(
+ VariantRef, FnPtrType.getUnqualifiedType(), Sema::AA_Converting);
if (!VariantRefCast.isUsable())
return std::nullopt;
}
@@ -7689,7 +7478,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
}
}
- ExprResult ER = CheckPlaceholderExpr(VariantRefCast.get());
+ ExprResult ER = SemaRef.CheckPlaceholderExpr(VariantRefCast.get());
if (!ER.isUsable() ||
!ER.get()->IgnoreParenImpCasts()->getType()->isFunctionType()) {
Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
@@ -7719,7 +7508,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
}
// Check if function types are compatible in C.
- if (!LangOpts.CPlusPlus) {
+ if (!getLangOpts().CPlusPlus) {
QualType NewType =
Context.mergeFunctionTypes(AdjustedFnType, NewFD->getType());
if (NewType.isNull()) {
@@ -7731,9 +7520,9 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
}
if (NewType->isFunctionProtoType()) {
if (FD->getType()->isFunctionNoProtoType())
- setPrototype(*this, FD, NewFD, NewType);
+ setPrototype(SemaRef, FD, NewFD, NewType);
else if (NewFD->getType()->isFunctionNoProtoType())
- setPrototype(*this, NewFD, FD, NewType);
+ setPrototype(SemaRef, NewFD, FD, NewType);
}
}
@@ -7796,7 +7585,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
}
// Check general compatibility.
- if (areMultiversionVariantFunctionsCompatible(
+ if (SemaRef.areMultiversionVariantFunctionsCompatible(
FD, NewFD, PartialDiagnostic::NullDiagnostic(),
PartialDiagnosticAt(SourceLocation(),
PartialDiagnostic::NullDiagnostic()),
@@ -7812,7 +7601,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
return std::make_pair(FD, cast<Expr>(DRE));
}
-void Sema::ActOnOpenMPDeclareVariantDirective(
+void SemaOpenMP::ActOnOpenMPDeclareVariantDirective(
FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI,
ArrayRef<Expr *> AdjustArgsNothing,
ArrayRef<Expr *> AdjustArgsNeedDevicePtr,
@@ -7830,7 +7619,7 @@ void Sema::ActOnOpenMPDeclareVariantDirective(
if (!AllAdjustArgs.empty() || !AppendArgs.empty()) {
VariantMatchInfo VMI;
- TI.getAsVariantMatchInfo(Context, VMI);
+ TI.getAsVariantMatchInfo(getASTContext(), VMI);
if (!llvm::is_contained(
VMI.ConstructTraits,
llvm::omp::TraitProperty::construct_dispatch_dispatch)) {
@@ -7873,22 +7662,18 @@ void Sema::ActOnOpenMPDeclareVariantDirective(
}
auto *NewAttr = OMPDeclareVariantAttr::CreateImplicit(
- Context, VariantRef, &TI, const_cast<Expr **>(AdjustArgsNothing.data()),
- AdjustArgsNothing.size(),
+ getASTContext(), VariantRef, &TI,
+ const_cast<Expr **>(AdjustArgsNothing.data()), AdjustArgsNothing.size(),
const_cast<Expr **>(AdjustArgsNeedDevicePtr.data()),
AdjustArgsNeedDevicePtr.size(),
const_cast<OMPInteropInfo *>(AppendArgs.data()), AppendArgs.size(), SR);
FD->addAttr(NewAttr);
}
-StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
- if (!AStmt)
- return StmtError();
-
- auto *CS = cast<CapturedStmt>(AStmt);
+static CapturedStmt *
+setBranchProtectedScope(Sema &SemaRef, OpenMPDirectiveKind DKind, Stmt *AStmt) {
+ auto *CS = dyn_cast<CapturedStmt>(AStmt);
+ assert(CS && "Captured statement expected");
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -7896,11 +7681,32 @@ StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
- setFunctionHasBranchProtectedScope();
+ for (int ThisCaptureLevel = SemaRef.OpenMP().getOpenMPCaptureLevels(DKind);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+ SemaRef.setFunctionHasBranchProtectedScope();
+ return CS;
+}
- return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
- DSAStack->getTaskgroupReductionRef(),
- DSAStack->isCancelRegion());
+StmtResult
+SemaOpenMP::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ setBranchProtectedScope(SemaRef, OMPD_parallel, AStmt);
+
+ return OMPParallelDirective::Create(
+ getASTContext(), StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
namespace {
@@ -8150,7 +7956,7 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
if (!NewStep->isValueDependent()) {
// Check that the step is integer expression.
SourceLocation StepLoc = NewStep->getBeginLoc();
- ExprResult Val = SemaRef.PerformOpenMPImplicitIntegerConversion(
+ ExprResult Val = SemaRef.OpenMP().PerformOpenMPImplicitIntegerConversion(
StepLoc, getExprAsWritten(NewStep));
if (Val.isInvalid())
return true;
@@ -8186,7 +7992,7 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
diag::err_omp_loop_incr_not_compatible)
<< LCDecl << *TestIsLessOp << NewStep->getSourceRange();
SemaRef.Diag(ConditionLoc,
- diag::note_omp_loop_cond_requres_compatible_incr)
+ diag::note_omp_loop_cond_requires_compatible_incr)
<< *TestIsLessOp << ConditionSrcRange;
return true;
}
@@ -9172,7 +8978,7 @@ DeclRefExpr *OpenMPIterationSpaceChecker::buildCounterVar(
DSAStackTy &DSA) const {
auto *VD = dyn_cast<VarDecl>(LCDecl);
if (!VD) {
- VD = SemaRef.isOpenMPCapturedDecl(LCDecl);
+ VD = SemaRef.OpenMP().isOpenMPCapturedDecl(LCDecl);
DeclRefExpr *Ref = buildDeclRefExpr(
SemaRef, VD, VD->getType().getNonReferenceType(), DefaultLoc);
const DSAStackTy::DSAVarData Data =
@@ -9245,92 +9051,91 @@ Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
}
} // namespace
-void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
+void SemaOpenMP::ActOnOpenMPLoopInitialization(SourceLocation ForLoc,
+ Stmt *Init) {
assert(getLangOpts().OpenMP && "OpenMP is not active.");
assert(Init && "Expected loop in canonical form.");
unsigned AssociatedLoops = DSAStack->getAssociatedLoops();
- if (AssociatedLoops > 0 &&
- isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
- DSAStack->loopStart();
- OpenMPIterationSpaceChecker ISC(*this, /*SupportsNonRectangular=*/true,
- *DSAStack, ForLoc);
- if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
- if (ValueDecl *D = ISC.getLoopDecl()) {
- auto *VD = dyn_cast<VarDecl>(D);
- DeclRefExpr *PrivateRef = nullptr;
- if (!VD) {
- if (VarDecl *Private = isOpenMPCapturedDecl(D)) {
- VD = Private;
- } else {
- PrivateRef = buildCapture(*this, D, ISC.getLoopDeclRefExpr(),
- /*WithInit=*/false);
- VD = cast<VarDecl>(PrivateRef->getDecl());
- }
- }
- DSAStack->addLoopControlVariable(D, VD);
- const Decl *LD = DSAStack->getPossiblyLoopCunter();
- if (LD != D->getCanonicalDecl()) {
- DSAStack->resetPossibleLoopCounter();
- if (auto *Var = dyn_cast_or_null<VarDecl>(LD))
- MarkDeclarationsReferencedInExpr(
- buildDeclRefExpr(*this, const_cast<VarDecl *>(Var),
- Var->getType().getNonLValueExprType(Context),
- ForLoc, /*RefersToCapture=*/true));
- }
- OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
- // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables
- // Referenced in a Construct, C/C++]. The loop iteration variable in the
- // associated for-loop of a simd construct with just one associated
- // for-loop may be listed in a linear clause with a constant-linear-step
- // that is the increment of the associated for-loop. The loop iteration
- // variable(s) in the associated for-loop(s) of a for or parallel for
- // construct may be listed in a private or lastprivate clause.
- DSAStackTy::DSAVarData DVar =
- DSAStack->getTopDSA(D, /*FromParent=*/false);
- // If LoopVarRefExpr is nullptr it means the corresponding loop variable
- // is declared in the loop and it is predetermined as a private.
- Expr *LoopDeclRefExpr = ISC.getLoopDeclRefExpr();
- OpenMPClauseKind PredeterminedCKind =
- isOpenMPSimdDirective(DKind)
- ? (DSAStack->hasMutipleLoops() ? OMPC_lastprivate : OMPC_linear)
- : OMPC_private;
- if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
- DVar.CKind != PredeterminedCKind && DVar.RefExpr &&
- (LangOpts.OpenMP <= 45 || (DVar.CKind != OMPC_lastprivate &&
- DVar.CKind != OMPC_private))) ||
- ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
- DKind == OMPD_master_taskloop || DKind == OMPD_masked_taskloop ||
- DKind == OMPD_parallel_master_taskloop ||
- DKind == OMPD_parallel_masked_taskloop ||
- isOpenMPDistributeDirective(DKind)) &&
- !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
- DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
- (DVar.CKind != OMPC_private || DVar.RefExpr)) {
- Diag(Init->getBeginLoc(), diag::err_omp_loop_var_dsa)
- << getOpenMPClauseName(DVar.CKind)
- << getOpenMPDirectiveName(DKind)
- << getOpenMPClauseName(PredeterminedCKind);
- if (DVar.RefExpr == nullptr)
- DVar.CKind = PredeterminedCKind;
- reportOriginalDsa(*this, DSAStack, D, DVar,
- /*IsLoopIterVar=*/true);
- } else if (LoopDeclRefExpr) {
- // Make the loop iteration variable private (for worksharing
- // constructs), linear (for simd directives with the only one
- // associated loop) or lastprivate (for simd directives with several
- // collapsed or ordered loops).
- if (DVar.CKind == OMPC_unknown)
- DSAStack->addDSA(D, LoopDeclRefExpr, PredeterminedCKind,
- PrivateRef);
+ OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
+ if (AssociatedLoops == 0 || !isOpenMPLoopDirective(DKind))
+ return;
+
+ DSAStack->loopStart();
+ OpenMPIterationSpaceChecker ISC(SemaRef, /*SupportsNonRectangular=*/true,
+ *DSAStack, ForLoc);
+ if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
+ if (ValueDecl *D = ISC.getLoopDecl()) {
+ auto *VD = dyn_cast<VarDecl>(D);
+ DeclRefExpr *PrivateRef = nullptr;
+ if (!VD) {
+ if (VarDecl *Private = isOpenMPCapturedDecl(D)) {
+ VD = Private;
+ } else {
+ PrivateRef = buildCapture(SemaRef, D, ISC.getLoopDeclRefExpr(),
+ /*WithInit=*/false);
+ VD = cast<VarDecl>(PrivateRef->getDecl());
}
}
+ DSAStack->addLoopControlVariable(D, VD);
+ const Decl *LD = DSAStack->getPossiblyLoopCounter();
+ if (LD != D->getCanonicalDecl()) {
+ DSAStack->resetPossibleLoopCounter();
+ if (auto *Var = dyn_cast_or_null<VarDecl>(LD))
+ SemaRef.MarkDeclarationsReferencedInExpr(buildDeclRefExpr(
+ SemaRef, const_cast<VarDecl *>(Var),
+ Var->getType().getNonLValueExprType(getASTContext()), ForLoc,
+ /*RefersToCapture=*/true));
+ }
+ // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables
+ // Referenced in a Construct, C/C++]. The loop iteration variable in the
+ // associated for-loop of a simd construct with just one associated
+ // for-loop may be listed in a linear clause with a constant-linear-step
+ // that is the increment of the associated for-loop. The loop iteration
+ // variable(s) in the associated for-loop(s) of a for or parallel for
+ // construct may be listed in a private or lastprivate clause.
+ DSAStackTy::DSAVarData DVar =
+ DSAStack->getTopDSA(D, /*FromParent=*/false);
+ // If LoopVarRefExpr is nullptr it means the corresponding loop variable
+ // is declared in the loop and it is predetermined as a private.
+ Expr *LoopDeclRefExpr = ISC.getLoopDeclRefExpr();
+ OpenMPClauseKind PredeterminedCKind =
+ isOpenMPSimdDirective(DKind)
+ ? (DSAStack->hasMutipleLoops() ? OMPC_lastprivate : OMPC_linear)
+ : OMPC_private;
+ auto IsOpenMPTaskloopDirective = [](OpenMPDirectiveKind DK) {
+ return getLeafConstructsOrSelf(DK).back() == OMPD_taskloop;
+ };
+ if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
+ DVar.CKind != PredeterminedCKind && DVar.RefExpr &&
+ (getLangOpts().OpenMP <= 45 ||
+ (DVar.CKind != OMPC_lastprivate && DVar.CKind != OMPC_private))) ||
+ ((isOpenMPWorksharingDirective(DKind) ||
+ IsOpenMPTaskloopDirective(DKind) ||
+ isOpenMPDistributeDirective(DKind)) &&
+ !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
+ DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
+ (DVar.CKind != OMPC_private || DVar.RefExpr)) {
+ Diag(Init->getBeginLoc(), diag::err_omp_loop_var_dsa)
+ << getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
+ << getOpenMPClauseName(PredeterminedCKind);
+ if (DVar.RefExpr == nullptr)
+ DVar.CKind = PredeterminedCKind;
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar, /*IsLoopIterVar=*/true);
+ } else if (LoopDeclRefExpr) {
+ // Make the loop iteration variable private (for worksharing
+ // constructs), linear (for simd directives with the only one
+ // associated loop) or lastprivate (for simd directives with several
+ // collapsed or ordered loops).
+ if (DVar.CKind == OMPC_unknown)
+ DSAStack->addDSA(D, LoopDeclRefExpr, PredeterminedCKind, PrivateRef);
+ }
}
- DSAStack->setAssociatedLoops(AssociatedLoops - 1);
}
+ DSAStack->setAssociatedLoops(AssociatedLoops - 1);
}
namespace {
-// Utility for openmp doacross clause kind
+// Utility for OpenMP doacross clause kind
class OMPDoacrossKind {
public:
bool isSource(const OMPDoacrossClause *C) {
@@ -9352,7 +9157,7 @@ static bool checkOpenMPIterationSpace(
unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
unsigned TotalNestedLoopCount, Expr *CollapseLoopCountExpr,
Expr *OrderedLoopCountExpr,
- Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
+ SemaOpenMP::VarsWithInheritedDSAType &VarsWithImplicitDSA,
llvm::MutableArrayRef<LoopIterationSpace> ResultIterSpaces,
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
bool SupportsNonRectangular = !isOpenMPLoopTransformationDirective(DKind);
@@ -9701,6 +9506,25 @@ static Stmt *buildPreInits(ASTContext &Context,
return nullptr;
}
+/// Append the \p Item or the content of a CompoundStmt to the list \p
+/// TargetList.
+///
+/// A CompoundStmt is used as container in case multiple statements need to be
+/// stored in lieu of using an explicit list. Flattening is necessary because
+/// contained DeclStmts need to be visible after the execution of the list. Used
+/// for OpenMP pre-init declarations/statements.
+static void appendFlattenedStmtList(SmallVectorImpl<Stmt *> &TargetList,
+ Stmt *Item) {
+ // nullptr represents an empty list.
+ if (!Item)
+ return;
+
+ if (auto *CS = dyn_cast<CompoundStmt>(Item))
+ llvm::append_range(TargetList, CS->body());
+ else
+ TargetList.push_back(Item);
+}
+
/// Build preinits statement for the given declarations.
static Stmt *
buildPreInits(ASTContext &Context,
@@ -9714,6 +9538,17 @@ buildPreInits(ASTContext &Context,
return nullptr;
}
+/// Build pre-init statement for the given statements.
+static Stmt *buildPreInits(ASTContext &Context, ArrayRef<Stmt *> PreInits) {
+ if (PreInits.empty())
+ return nullptr;
+
+ SmallVector<Stmt *> Stmts;
+ for (Stmt *S : PreInits)
+ appendFlattenedStmtList(Stmts, S);
+ return CompoundStmt::Create(Context, PreInits, FPOptionsOverride(), {}, {});
+}
+
/// Build postupdate expression for the given list of postupdates expressions.
static Expr *buildPostUpdate(Sema &S, ArrayRef<Expr *> PostUpdates) {
Expr *PostUpdate = nullptr;
@@ -9741,7 +9576,7 @@ static unsigned
checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Expr *OrderedLoopCountExpr, Stmt *AStmt, Sema &SemaRef,
DSAStackTy &DSA,
- Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
+ SemaOpenMP::VarsWithInheritedDSAType &VarsWithImplicitDSA,
OMPLoopBasedDirective::HelperExprs &Built) {
unsigned NestedLoopCount = 1;
bool SupportsNonPerfectlyNested = (SemaRef.LangOpts.OpenMP >= 50) &&
@@ -9810,11 +9645,21 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Stmt *DependentPreInits = Transform->getPreInits();
if (!DependentPreInits)
return;
- for (Decl *C : cast<DeclStmt>(DependentPreInits)->getDeclGroup()) {
- auto *D = cast<VarDecl>(C);
- DeclRefExpr *Ref = buildDeclRefExpr(SemaRef, D, D->getType(),
- Transform->getBeginLoc());
- Captures[Ref] = Ref;
+
+ // Search for pre-init declared variables that need to be captured
+ // to be referenceable inside the directive.
+ SmallVector<Stmt *> Constituents;
+ appendFlattenedStmtList(Constituents, DependentPreInits);
+ for (Stmt *S : Constituents) {
+ if (auto *DC = dyn_cast<DeclStmt>(S)) {
+ for (Decl *C : DC->decls()) {
+ auto *D = cast<VarDecl>(C);
+ DeclRefExpr *Ref = buildDeclRefExpr(
+ SemaRef, D, D->getType().getNonReferenceType(),
+ Transform->getBeginLoc());
+ Captures[Ref] = Ref;
+ }
+ }
}
}))
return 0;
@@ -10490,7 +10335,8 @@ static bool checkGenericLoopLastprivate(Sema &S, ArrayRef<OMPClause *> Clauses,
OpenMPDirectiveKind K,
DSAStackTy *Stack);
-bool Sema::checkLastPrivateForMappedDirectives(ArrayRef<OMPClause *> Clauses) {
+bool SemaOpenMP::checkLastPrivateForMappedDirectives(
+ ArrayRef<OMPClause *> Clauses) {
// Check for syntax of lastprivate
// Param of the lastprivate have different meanings in the mapped directives
@@ -10498,16 +10344,15 @@ bool Sema::checkLastPrivateForMappedDirectives(ArrayRef<OMPClause *> Clauses) {
// "omp for" lastprivate vars must be shared
if (getLangOpts().OpenMP >= 50 &&
DSAStack->getMappedDirective() == OMPD_loop &&
- checkGenericLoopLastprivate(*this, Clauses, OMPD_loop, DSAStack)) {
+ checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_loop, DSAStack)) {
return false;
}
return true;
}
-StmtResult
-Sema::ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
- SourceLocation StartLoc, SourceLocation EndLoc,
- VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+StmtResult SemaOpenMP::ActOnOpenMPSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
@@ -10520,38 +10365,26 @@ Sema::ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_simd, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
- AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
+ AStmt, SemaRef, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp simd loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
auto *SimdDirective = OMPSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->getMappedDirective());
return SimdDirective;
}
-StmtResult
-Sema::ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
- SourceLocation StartLoc, SourceLocation EndLoc,
- VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+StmtResult SemaOpenMP::ActOnOpenMPForDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
@@ -10564,32 +10397,21 @@ Sema::ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_for, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
- AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
+ AStmt, SemaRef, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
auto *ForDirective = OMPForDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion(),
DSAStack->getMappedDirective());
return ForDirective;
}
-StmtResult Sema::ActOnOpenMPForSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -10601,39 +10423,26 @@ StmtResult Sema::ActOnOpenMPForSimdDirective(
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_for_simd, getCollapseNumberExpr(Clauses),
- getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
+ getOrderedNumberExpr(Clauses), AStmt, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp for simd loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
- return OMPForSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
- Clauses, AStmt, B);
+ SemaRef.setFunctionHasBranchProtectedScope();
+ return OMPForSimdDirective::Create(getASTContext(), StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+static bool checkSectionsDirective(Sema &SemaRef, OpenMPDirectiveKind DKind,
+ Stmt *AStmt, DSAStackTy *Stack) {
if (!AStmt)
- return StmtError();
+ return true;
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
auto BaseStmt = AStmt;
@@ -10642,41 +10451,52 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
if (auto *C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
auto S = C->children();
if (S.begin() == S.end())
- return StmtError();
+ return true;
// All associated statements must be '#pragma omp section' except for
// the first one.
for (Stmt *SectionStmt : llvm::drop_begin(S)) {
if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
if (SectionStmt)
- Diag(SectionStmt->getBeginLoc(),
- diag::err_omp_sections_substmt_not_section);
- return StmtError();
+ SemaRef.Diag(SectionStmt->getBeginLoc(),
+ diag::err_omp_sections_substmt_not_section)
+ << getOpenMPDirectiveName(DKind);
+ return true;
}
cast<OMPSectionDirective>(SectionStmt)
- ->setHasCancel(DSAStack->isCancelRegion());
+ ->setHasCancel(Stack->isCancelRegion());
}
} else {
- Diag(AStmt->getBeginLoc(), diag::err_omp_sections_not_compound_stmt);
- return StmtError();
+ SemaRef.Diag(AStmt->getBeginLoc(), diag::err_omp_sections_not_compound_stmt)
+ << getOpenMPDirectiveName(DKind);
+ return true;
}
+ return false;
+}
+
+StmtResult
+SemaOpenMP::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (checkSectionsDirective(SemaRef, OMPD_sections, AStmt, DSAStack))
+ return StmtError();
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
- return OMPSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
- DSAStack->getTaskgroupReductionRef(),
- DSAStack->isCancelRegion());
+ return OMPSectionsDirective::Create(
+ getASTContext(), StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPSectionDirective(Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPSectionDirective(Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
DSAStack->setParentCancelRegion(DSAStack->isCancelRegion());
- return OMPSectionDirective::Create(Context, StartLoc, EndLoc, AStmt,
+ return OMPSectionDirective::Create(getASTContext(), StartLoc, EndLoc, AStmt,
DSAStack->isCancelRegion());
}
@@ -10688,10 +10508,10 @@ static Expr *getDirectCallExpr(Expr *E) {
return nullptr;
}
-StmtResult Sema::ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult
+SemaOpenMP::ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
@@ -10704,7 +10524,7 @@ StmtResult Sema::ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation TargetCallLoc;
- if (!CurContext->isDependentContext()) {
+ if (!SemaRef.CurContext->isDependentContext()) {
Expr *TargetCall = nullptr;
auto *E = dyn_cast<Expr>(S);
@@ -10732,10 +10552,10 @@ StmtResult Sema::ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
TargetCallLoc = TargetCall->getExprLoc();
}
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
- return OMPDispatchDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
- TargetCallLoc);
+ return OMPDispatchDirective::Create(getASTContext(), StartLoc, EndLoc,
+ Clauses, AStmt, TargetCallLoc);
}
static bool checkGenericLoopLastprivate(Sema &S, ArrayRef<OMPClause *> Clauses,
@@ -10763,7 +10583,7 @@ static bool checkGenericLoopLastprivate(Sema &S, ArrayRef<OMPClause *> Clauses,
return ErrorFound;
}
-StmtResult Sema::ActOnOpenMPGenericLoopDirective(
+StmtResult SemaOpenMP::ActOnOpenMPGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -10772,34 +10592,27 @@ StmtResult Sema::ActOnOpenMPGenericLoopDirective(
// OpenMP 5.1 [2.11.7, loop construct, Restrictions]
// A list item may not appear in a lastprivate clause unless it is the
// loop iteration variable of a loop that is associated with the construct.
- if (checkGenericLoopLastprivate(*this, Clauses, OMPD_loop, DSAStack))
+ if (checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_loop, DSAStack))
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
+ setBranchProtectedScope(SemaRef, OMPD_loop, AStmt);
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse', it will define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_loop, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
- AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
+ AStmt, SemaRef, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp loop exprs were not built");
- setFunctionHasBranchProtectedScope();
- return OMPGenericLoopDirective::Create(Context, StartLoc, EndLoc,
+ return OMPGenericLoopDirective::Create(getASTContext(), StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPTeamsGenericLoopDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTeamsGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -10808,47 +10621,30 @@ StmtResult Sema::ActOnOpenMPTeamsGenericLoopDirective(
// OpenMP 5.1 [2.11.7, loop construct, Restrictions]
// A list item may not appear in a lastprivate clause unless it is the
// loop iteration variable of a loop that is associated with the construct.
- if (checkGenericLoopLastprivate(*this, Clauses, OMPD_teams_loop, DSAStack))
+ if (checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_teams_loop, DSAStack))
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_teams_loop);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS = setBranchProtectedScope(SemaRef, OMPD_teams_loop, AStmt);
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse', it will define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_teams_loop, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp loop exprs were not built");
- setFunctionHasBranchProtectedScope();
DSAStack->setParentTeamsRegionLoc(StartLoc);
return OMPTeamsGenericLoopDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPTargetTeamsGenericLoopDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -10857,47 +10653,31 @@ StmtResult Sema::ActOnOpenMPTargetTeamsGenericLoopDirective(
// OpenMP 5.1 [2.11.7, loop construct, Restrictions]
// A list item may not appear in a lastprivate clause unless it is the
// loop iteration variable of a loop that is associated with the construct.
- if (checkGenericLoopLastprivate(*this, Clauses, OMPD_target_teams_loop,
+ if (checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_target_teams_loop,
DSAStack))
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_teams_loop);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_target_teams_loop, AStmt);
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse', it will define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_target_teams_loop, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp loop exprs were not built");
- setFunctionHasBranchProtectedScope();
-
return OMPTargetTeamsGenericLoopDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ teamsLoopCanBeParallelFor(AStmt, SemaRef));
}
-StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective(
+StmtResult SemaOpenMP::ActOnOpenMPParallelGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -10906,46 +10686,30 @@ StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective(
// OpenMP 5.1 [2.11.7, loop construct, Restrictions]
// A list item may not appear in a lastprivate clause unless it is the
// loop iteration variable of a loop that is associated with the construct.
- if (checkGenericLoopLastprivate(*this, Clauses, OMPD_parallel_loop, DSAStack))
+ if (checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_parallel_loop,
+ DSAStack))
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_parallel_loop);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_parallel_loop, AStmt);
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse', it will define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_parallel_loop, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp loop exprs were not built");
- setFunctionHasBranchProtectedScope();
-
return OMPParallelGenericLoopDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPTargetParallelGenericLoopDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTargetParallelGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -10954,56 +10718,39 @@ StmtResult Sema::ActOnOpenMPTargetParallelGenericLoopDirective(
// OpenMP 5.1 [2.11.7, loop construct, Restrictions]
// A list item may not appear in a lastprivate clause unless it is the
// loop iteration variable of a loop that is associated with the construct.
- if (checkGenericLoopLastprivate(*this, Clauses, OMPD_target_parallel_loop,
+ if (checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_target_parallel_loop,
DSAStack))
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_parallel_loop);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_target_parallel_loop, AStmt);
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse', it will define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_target_parallel_loop, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp loop exprs were not built");
- setFunctionHasBranchProtectedScope();
-
return OMPTargetParallelGenericLoopDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
// OpenMP [2.7.3, single Construct, Restrictions]
// The copyprivate clause must not be used with the nowait clause.
@@ -11022,33 +10769,35 @@ StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
}
}
- return OMPSingleDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+ return OMPSingleDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses,
+ AStmt);
}
-StmtResult Sema::ActOnOpenMPMasterDirective(Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPMasterDirective(Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
- return OMPMasterDirective::Create(Context, StartLoc, EndLoc, AStmt);
+ return OMPMasterDirective::Create(getASTContext(), StartLoc, EndLoc, AStmt);
}
-StmtResult Sema::ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
- return OMPMaskedDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+ return OMPMaskedDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses,
+ AStmt);
}
-StmtResult Sema::ActOnOpenMPCriticalDirective(
+StmtResult SemaOpenMP::ActOnOpenMPCriticalDirective(
const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
if (!AStmt)
@@ -11069,7 +10818,7 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
E->isInstantiationDependent()) {
DependentHint = true;
} else {
- Hint = E->EvaluateKnownConstInt(Context);
+ Hint = E->EvaluateKnownConstInt(getASTContext());
HintLoc = C->getBeginLoc();
}
}
@@ -11088,7 +10837,7 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
if (const auto *C = Pair.first->getSingleClause<OMPHintClause>()) {
Diag(C->getBeginLoc(), diag::note_omp_critical_hint_here)
<< 1
- << toString(C->getHint()->EvaluateKnownConstInt(Context),
+ << toString(C->getHint()->EvaluateKnownConstInt(getASTContext()),
/*Radix=*/10, /*Signed=*/false);
} else {
Diag(Pair.first->getBeginLoc(), diag::note_omp_critical_no_hint) << 1;
@@ -11096,185 +10845,105 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
}
}
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
- auto *Dir = OMPCriticalDirective::Create(Context, DirName, StartLoc, EndLoc,
- Clauses, AStmt);
+ auto *Dir = OMPCriticalDirective::Create(getASTContext(), DirName, StartLoc,
+ EndLoc, Clauses, AStmt);
if (!Pair.first && DirName.getName() && !DependentHint)
DSAStack->addCriticalWithHint(Dir, Hint);
return Dir;
}
-StmtResult Sema::ActOnOpenMPParallelForDirective(
+StmtResult SemaOpenMP::ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
+ setBranchProtectedScope(SemaRef, OMPD_parallel_for, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_parallel_for, getCollapseNumberExpr(Clauses),
- getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
+ getOrderedNumberExpr(Clauses), AStmt, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp parallel for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPParallelForDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
+ setBranchProtectedScope(SemaRef, OMPD_parallel_for_simd, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_parallel_for_simd, getCollapseNumberExpr(Clauses),
- getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
+ getOrderedNumberExpr(Clauses), AStmt, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPParallelForSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult
-Sema::ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPParallelMasterDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
- assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
-
- setFunctionHasBranchProtectedScope();
+ setBranchProtectedScope(SemaRef, OMPD_parallel_master, AStmt);
return OMPParallelMasterDirective::Create(
- Context, StartLoc, EndLoc, Clauses, AStmt,
+ getASTContext(), StartLoc, EndLoc, Clauses, AStmt,
DSAStack->getTaskgroupReductionRef());
}
-StmtResult
-Sema::ActOnOpenMPParallelMaskedDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPParallelMaskedDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
- assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
-
- setFunctionHasBranchProtectedScope();
+ setBranchProtectedScope(SemaRef, OMPD_parallel_masked, AStmt);
return OMPParallelMaskedDirective::Create(
- Context, StartLoc, EndLoc, Clauses, AStmt,
+ getASTContext(), StartLoc, EndLoc, Clauses, AStmt,
DSAStack->getTaskgroupReductionRef());
}
-StmtResult
-Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc) {
- if (!AStmt)
- return StmtError();
-
- assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- auto BaseStmt = AStmt;
- while (auto *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
- BaseStmt = CS->getCapturedStmt();
- if (auto *C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
- auto S = C->children();
- if (S.begin() == S.end())
- return StmtError();
- // All associated statements must be '#pragma omp section' except for
- // the first one.
- for (Stmt *SectionStmt : llvm::drop_begin(S)) {
- if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
- if (SectionStmt)
- Diag(SectionStmt->getBeginLoc(),
- diag::err_omp_parallel_sections_substmt_not_section);
- return StmtError();
- }
- cast<OMPSectionDirective>(SectionStmt)
- ->setHasCancel(DSAStack->isCancelRegion());
- }
- } else {
- Diag(AStmt->getBeginLoc(),
- diag::err_omp_parallel_sections_not_compound_stmt);
+StmtResult SemaOpenMP::ActOnOpenMPParallelSectionsDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (checkSectionsDirective(SemaRef, OMPD_parallel_sections, AStmt, DSAStack))
return StmtError();
- }
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
return OMPParallelSectionsDirective::Create(
- Context, StartLoc, EndLoc, Clauses, AStmt,
+ getASTContext(), StartLoc, EndLoc, Clauses, AStmt,
DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
@@ -11301,47 +10970,40 @@ static bool checkMutuallyExclusiveClauses(
return ErrorFound;
}
-StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
// OpenMP 5.0, 2.10.1 task Construct
// If a detach clause appears on the directive, then a mergeable clause cannot
// appear on the same directive.
- if (checkMutuallyExclusiveClauses(*this, Clauses,
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
{OMPC_detach, OMPC_mergeable}))
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
-
- setFunctionHasBranchProtectedScope();
+ setBranchProtectedScope(SemaRef, OMPD_task, AStmt);
- return OMPTaskDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
- DSAStack->isCancelRegion());
+ return OMPTaskDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses,
+ AStmt, DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return OMPTaskyieldDirective::Create(Context, StartLoc, EndLoc);
+StmtResult SemaOpenMP::ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return OMPTaskyieldDirective::Create(getASTContext(), StartLoc, EndLoc);
}
-StmtResult Sema::ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return OMPBarrierDirective::Create(Context, StartLoc, EndLoc);
+StmtResult SemaOpenMP::ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return OMPBarrierDirective::Create(getASTContext(), StartLoc, EndLoc);
}
-StmtResult Sema::ActOnOpenMPErrorDirective(ArrayRef<OMPClause *> Clauses,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- bool InExContext) {
+StmtResult SemaOpenMP::ActOnOpenMPErrorDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ bool InExContext) {
const OMPAtClause *AtC =
OMPExecutableDirective::getSingleClause<OMPAtClause>(Clauses);
@@ -11366,12 +11028,13 @@ StmtResult Sema::ActOnOpenMPErrorDirective(ArrayRef<OMPClause *> Clauses,
if (!SeverityC || SeverityC->getSeverityKind() != OMPC_SEVERITY_warning)
return StmtError();
}
- return OMPErrorDirective::Create(Context, StartLoc, EndLoc, Clauses);
+ return OMPErrorDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses);
}
-StmtResult Sema::ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult
+SemaOpenMP::ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
const OMPNowaitClause *NowaitC =
OMPExecutableDirective::getSingleClause<OMPNowaitClause>(Clauses);
bool HasDependC =
@@ -11382,28 +11045,29 @@ StmtResult Sema::ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
- return OMPTaskwaitDirective::Create(Context, StartLoc, EndLoc, Clauses);
+ return OMPTaskwaitDirective::Create(getASTContext(), StartLoc, EndLoc,
+ Clauses);
}
-StmtResult Sema::ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult
+SemaOpenMP::ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
- return OMPTaskgroupDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt,
+ return OMPTaskgroupDirective::Create(getASTContext(), StartLoc, EndLoc,
+ Clauses, AStmt,
DSAStack->getTaskgroupReductionRef());
}
-StmtResult Sema::ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
OMPFlushClause *FC = nullptr;
OMPClause *OrderClause = nullptr;
for (OMPClause *C : Clauses) {
@@ -11437,12 +11101,12 @@ StmtResult Sema::ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
<< getOpenMPClauseName(OrderClause->getClauseKind());
return StmtError();
}
- return OMPFlushDirective::Create(Context, StartLoc, EndLoc, Clauses);
+ return OMPFlushDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses);
}
-StmtResult Sema::ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (Clauses.empty()) {
Diag(StartLoc, diag::err_omp_depobj_expected);
return StmtError();
@@ -11459,19 +11123,19 @@ StmtResult Sema::ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
Diag(Clauses[0]->getEndLoc(), diag::err_omp_depobj_single_clause_expected);
return StmtError();
}
- return OMPDepobjDirective::Create(Context, StartLoc, EndLoc, Clauses);
+ return OMPDepobjDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses);
}
-StmtResult Sema::ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
// Check that exactly one clause is specified.
if (Clauses.size() != 1) {
Diag(Clauses.empty() ? EndLoc : Clauses[1]->getBeginLoc(),
diag::err_omp_scan_single_clause_expected);
return StmtError();
}
- // Check that scan directive is used in the scopeof the OpenMP loop body.
+ // Check that scan directive is used in the scope of the OpenMP loop body.
if (Scope *S = DSAStack->getCurScope()) {
Scope *ParentS = S->getParent();
if (!ParentS || ParentS->getParent() != ParentS->getBreakParent() ||
@@ -11489,13 +11153,13 @@ StmtResult Sema::ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
DSAStack->setParentHasScanDirective(StartLoc);
- return OMPScanDirective::Create(Context, StartLoc, EndLoc, Clauses);
+ return OMPScanDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses);
}
-StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult
+SemaOpenMP::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
const OMPClause *DependFound = nullptr;
const OMPClause *DependSourceClause = nullptr;
const OMPClause *DependSinkClause = nullptr;
@@ -11554,7 +11218,7 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
// An ordered construct with the simd clause is the only OpenMP construct
// that can appear in the simd region.
Diag(StartLoc, diag::err_omp_prohibited_region_simd)
- << (LangOpts.OpenMP >= 50 ? 1 : 0);
+ << (getLangOpts().OpenMP >= 50 ? 1 : 0);
ErrorFound = true;
} else if ((DependFound || DoacrossFound) && (TC || SC)) {
SourceLocation Loc =
@@ -11601,10 +11265,11 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
if (AStmt) {
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
}
- return OMPOrderedDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+ return OMPOrderedDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses,
+ AStmt);
}
namespace {
@@ -11625,7 +11290,7 @@ class OpenMPAtomicUpdateChecker {
NotAnAssignmentOp,
/// RHS part of the binary operation is not a binary expression.
NotABinaryExpression,
- /// RHS part is not additive/multiplicative/shift/biwise binary
+ /// RHS part is not additive/multiplicative/shift/bitwise binary
/// expression.
NotABinaryOperator,
/// RHS binary operation does not have reference to the updated LHS
@@ -11903,7 +11568,7 @@ public:
InvalidAssignment,
/// Not if statement
NotIfStmt,
- /// More than two statements in a compund statement.
+ /// More than two statements in a compound statement.
MoreThanTwoStmts,
/// Not a compound statement.
NotCompoundStmt,
@@ -11990,7 +11655,7 @@ protected:
return true;
}
- };
+};
bool OpenMPAtomicCompareChecker::checkCondUpdateStmt(IfStmt *S,
ErrorInfoTy &ErrorInfo) {
@@ -12662,10 +12327,11 @@ bool OpenMPAtomicCompareCaptureChecker::checkStmt(Stmt *S,
}
} // namespace
-StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ ASTContext &Context = getASTContext();
// Register location of the first atomic directive.
DSAStack->addAtomicDirectiveLoc(StartLoc);
if (!AStmt)
@@ -12708,9 +12374,11 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
}
break;
}
+ case OMPC_weak:
case OMPC_fail: {
if (!EncounteredAtomicKinds.contains(OMPC_compare)) {
- Diag(C->getBeginLoc(), diag::err_omp_atomic_fail_no_compare)
+ Diag(C->getBeginLoc(), diag::err_omp_atomic_no_compare)
+ << getOpenMPClauseName(C->getClauseKind())
<< SourceRange(C->getBeginLoc(), C->getEndLoc());
return StmtError();
}
@@ -12866,7 +12534,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
<< ErrorFound << NoteRange;
return StmtError();
}
- if (CurContext->isDependentContext())
+ if (SemaRef.CurContext->isDependentContext())
V = X = nullptr;
} else if (AtomicKind == OMPC_write) {
enum {
@@ -12928,7 +12596,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
<< ErrorFound << NoteRange;
return StmtError();
}
- if (CurContext->isDependentContext())
+ if (SemaRef.CurContext->isDependentContext())
E = X = nullptr;
} else if (AtomicKind == OMPC_update || AtomicKind == OMPC_unknown) {
// If clause is update:
@@ -12939,7 +12607,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
// x binop= expr;
// x = x binop expr;
// x = expr binop x;
- OpenMPAtomicUpdateChecker Checker(*this);
+ OpenMPAtomicUpdateChecker Checker(SemaRef);
if (Checker.checkStatement(
Body,
(AtomicKind == OMPC_update)
@@ -12947,7 +12615,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
: diag::err_omp_atomic_not_expression_statement,
diag::note_omp_atomic_update))
return StmtError();
- if (!CurContext->isDependentContext()) {
+ if (!SemaRef.CurContext->isDependentContext()) {
E = Checker.getExpr();
X = Checker.getX();
UE = Checker.getUpdateExpr();
@@ -12977,7 +12645,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
V = AtomicBinOp->getLHS();
Body = AtomicBinOp->getRHS()->IgnoreParenImpCasts();
- OpenMPAtomicUpdateChecker Checker(*this);
+ OpenMPAtomicUpdateChecker Checker(SemaRef);
if (Checker.checkStatement(
Body, diag::err_omp_atomic_capture_not_expression_statement,
diag::note_omp_atomic_update))
@@ -13002,7 +12670,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange;
return StmtError();
}
- if (CurContext->isDependentContext())
+ if (SemaRef.CurContext->isDependentContext())
UE = V = E = X = nullptr;
} else {
// If clause is a capture:
@@ -13031,14 +12699,14 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (auto *EWC = dyn_cast<ExprWithCleanups>(Second))
Second = EWC->getSubExpr()->IgnoreParenImpCasts();
// Need to find what subexpression is 'v' and what is 'x'.
- OpenMPAtomicUpdateChecker Checker(*this);
+ OpenMPAtomicUpdateChecker Checker(SemaRef);
bool IsUpdateExprFound = !Checker.checkStatement(Second);
BinaryOperator *BinOp = nullptr;
if (IsUpdateExprFound) {
BinOp = dyn_cast<BinaryOperator>(First);
IsUpdateExprFound = BinOp && BinOp->getOpcode() == BO_Assign;
}
- if (IsUpdateExprFound && !CurContext->isDependentContext()) {
+ if (IsUpdateExprFound && !SemaRef.CurContext->isDependentContext()) {
// { v = x; x++; }
// { v = x; x--; }
// { v = x; ++x; }
@@ -13068,7 +12736,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
BinOp = dyn_cast<BinaryOperator>(Second);
IsUpdateExprFound = BinOp && BinOp->getOpcode() == BO_Assign;
}
- if (IsUpdateExprFound && !CurContext->isDependentContext()) {
+ if (IsUpdateExprFound &&
+ !SemaRef.CurContext->isDependentContext()) {
// { x++; v = x; }
// { x--; v = x; }
// { ++x; v = x; }
@@ -13165,12 +12834,12 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange;
return StmtError();
}
- if (CurContext->isDependentContext())
+ if (SemaRef.CurContext->isDependentContext())
UE = V = E = X = nullptr;
} else if (AtomicKind == OMPC_compare) {
if (IsCompareCapture) {
OpenMPAtomicCompareCaptureChecker::ErrorInfoTy ErrorInfo;
- OpenMPAtomicCompareCaptureChecker Checker(*this);
+ OpenMPAtomicCompareCaptureChecker Checker(SemaRef);
if (!Checker.checkStmt(Body, ErrorInfo)) {
Diag(ErrorInfo.ErrorLoc, diag::err_omp_atomic_compare_capture)
<< ErrorInfo.ErrorRange;
@@ -13190,54 +12859,59 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
IsPostfixUpdate = Checker.isPostfixUpdate();
} else {
OpenMPAtomicCompareChecker::ErrorInfoTy ErrorInfo;
- OpenMPAtomicCompareChecker Checker(*this);
+ OpenMPAtomicCompareChecker Checker(SemaRef);
if (!Checker.checkStmt(Body, ErrorInfo)) {
Diag(ErrorInfo.ErrorLoc, diag::err_omp_atomic_compare)
<< ErrorInfo.ErrorRange;
Diag(ErrorInfo.NoteLoc, diag::note_omp_atomic_compare)
- << ErrorInfo.Error << ErrorInfo.NoteRange;
+ << ErrorInfo.Error << ErrorInfo.NoteRange;
return StmtError();
}
X = Checker.getX();
E = Checker.getE();
D = Checker.getD();
CE = Checker.getCond();
+ // The weak clause may only appear if the resulting atomic operation is
+ // an atomic conditional update for which the comparison tests for
+ // equality. It was not possible to do this check in
+ // OpenMPAtomicCompareChecker::checkStmt() as the check for OMPC_weak
+ // could not be performed (Clauses are not available).
+ auto *It = find_if(Clauses, [](OMPClause *C) {
+ return C->getClauseKind() == llvm::omp::Clause::OMPC_weak;
+ });
+ if (It != Clauses.end()) {
+ auto *Cond = dyn_cast<BinaryOperator>(CE);
+ if (Cond->getOpcode() != BO_EQ) {
+ ErrorInfo.Error = Checker.ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = Cond->getExprLoc();
+ ErrorInfo.NoteLoc = Cond->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+
+ Diag(ErrorInfo.ErrorLoc, diag::err_omp_atomic_weak_no_equality)
+ << ErrorInfo.ErrorRange;
+ return StmtError();
+ }
+ }
// We reuse IsXLHSInRHSPart to tell if it is in the form 'x ordop expr'.
IsXLHSInRHSPart = Checker.isXBinopExpr();
}
}
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
return OMPAtomicDirective::Create(
Context, StartLoc, EndLoc, Clauses, AStmt,
{X, V, R, E, UE, D, CE, IsXLHSInRHSPart, IsPostfixUpdate, IsFailOnly});
}
-StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS = setBranchProtectedScope(SemaRef, OMPD_target, AStmt);
// OpenMP [2.16, Nesting of Regions]
// If specified, a teams construct must be contained within a target
@@ -13250,10 +12924,14 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
auto I = CS->body_begin();
while (I != CS->body_end()) {
const auto *OED = dyn_cast<OMPExecutableDirective>(*I);
- if (!OED || !isOpenMPTeamsDirective(OED->getDirectiveKind()) ||
- OMPTeamsFound) {
-
+ bool IsTeams = OED && isOpenMPTeamsDirective(OED->getDirectiveKind());
+ if (!IsTeams || I != CS->body_begin()) {
OMPTeamsFound = false;
+ if (IsTeams && I != CS->body_begin()) {
+ // This is the two teams case. Since the InnerTeamsRegionLoc will
+ // point to this second one reset the iterator to the other teams.
+ --I;
+ }
break;
}
++I;
@@ -13274,94 +12952,47 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
}
}
- setFunctionHasBranchProtectedScope();
-
- return OMPTargetDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+ return OMPTargetDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses,
+ AStmt);
}
-StmtResult
-Sema::ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPTargetParallelDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_parallel);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
-
- setFunctionHasBranchProtectedScope();
+ setBranchProtectedScope(SemaRef, OMPD_target_parallel, AStmt);
return OMPTargetParallelDirective::Create(
- Context, StartLoc, EndLoc, Clauses, AStmt,
+ getASTContext(), StartLoc, EndLoc, Clauses, AStmt,
DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_parallel_for);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_target_parallel_for, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_target_parallel_for, getCollapseNumberExpr(Clauses),
- getOrderedNumberExpr(Clauses), CS, *this, *DSAStack,
+ getOrderedNumberExpr(Clauses), CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp target parallel for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPTargetParallelForDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
@@ -13398,10 +13029,10 @@ static bool isClauseMappable(ArrayRef<OMPClause *> Clauses) {
return true;
}
-StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult
+SemaOpenMP::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
@@ -13411,9 +13042,10 @@ StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
// At least one map, use_device_addr or use_device_ptr clause must appear on
// the directive.
if (!hasClauses(Clauses, OMPC_map, OMPC_use_device_ptr) &&
- (LangOpts.OpenMP < 50 || !hasClauses(Clauses, OMPC_use_device_addr))) {
+ (getLangOpts().OpenMP < 50 ||
+ !hasClauses(Clauses, OMPC_use_device_addr))) {
StringRef Expected;
- if (LangOpts.OpenMP < 50)
+ if (getLangOpts().OpenMP < 50)
Expected = "'map' or 'use_device_ptr'";
else
Expected = "'map', 'use_device_ptr', or 'use_device_addr'";
@@ -13422,36 +13054,19 @@ StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
- return OMPTargetDataDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt);
+ return OMPTargetDataDirective::Create(getASTContext(), StartLoc, EndLoc,
+ Clauses, AStmt);
}
-StmtResult
-Sema::ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
- SourceLocation StartLoc,
- SourceLocation EndLoc, Stmt *AStmt) {
+StmtResult SemaOpenMP::ActOnOpenMPTargetEnterDataDirective(
+ ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc,
+ SourceLocation EndLoc, Stmt *AStmt) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_enter_data);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ setBranchProtectedScope(SemaRef, OMPD_target_enter_data, AStmt);
// OpenMP [2.10.2, Restrictions, p. 99]
// At least one map clause must appear on the directive.
@@ -13461,34 +13076,17 @@ Sema::ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
- return OMPTargetEnterDataDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt);
+ return OMPTargetEnterDataDirective::Create(getASTContext(), StartLoc, EndLoc,
+ Clauses, AStmt);
}
-StmtResult
-Sema::ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
- SourceLocation StartLoc,
- SourceLocation EndLoc, Stmt *AStmt) {
+StmtResult SemaOpenMP::ActOnOpenMPTargetExitDataDirective(
+ ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc,
+ SourceLocation EndLoc, Stmt *AStmt) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_exit_data);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ setBranchProtectedScope(SemaRef, OMPD_target_exit_data, AStmt);
// OpenMP [2.10.3, Restrictions, p. 102]
// At least one map clause must appear on the directive.
@@ -13498,34 +13096,17 @@ Sema::ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
- return OMPTargetExitDataDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt);
+ return OMPTargetExitDataDirective::Create(getASTContext(), StartLoc, EndLoc,
+ Clauses, AStmt);
}
-StmtResult Sema::ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- Stmt *AStmt) {
+StmtResult SemaOpenMP::ActOnOpenMPTargetUpdateDirective(
+ ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc,
+ SourceLocation EndLoc, Stmt *AStmt) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_update);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ setBranchProtectedScope(SemaRef, OMPD_target_update, AStmt);
if (!hasClauses(Clauses, OMPC_to, OMPC_from)) {
Diag(StartLoc, diag::err_omp_at_least_one_motion_clause_required);
@@ -13537,13 +13118,14 @@ StmtResult Sema::ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
- return OMPTargetUpdateDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt);
+ return OMPTargetUpdateDirective::Create(getASTContext(), StartLoc, EndLoc,
+ Clauses, AStmt);
}
-StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
@@ -13551,25 +13133,17 @@ StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
if (getLangOpts().HIP && (DSAStack->getParentDirective() == OMPD_target))
Diag(StartLoc, diag::warn_hip_omp_target_directives);
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
-
- setFunctionHasBranchProtectedScope();
+ setBranchProtectedScope(SemaRef, OMPD_teams, AStmt);
DSAStack->setParentTeamsRegionLoc(StartLoc);
- return OMPTeamsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+ return OMPTeamsDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses,
+ AStmt);
}
-StmtResult
-Sema::ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
- SourceLocation EndLoc,
- OpenMPDirectiveKind CancelRegion) {
+StmtResult SemaOpenMP::ActOnOpenMPCancellationPointDirective(
+ SourceLocation StartLoc, SourceLocation EndLoc,
+ OpenMPDirectiveKind CancelRegion) {
if (DSAStack->isParentNowaitRegion()) {
Diag(StartLoc, diag::err_omp_parent_cancel_region_nowait) << 0;
return StmtError();
@@ -13578,14 +13152,13 @@ Sema::ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
Diag(StartLoc, diag::err_omp_parent_cancel_region_ordered) << 0;
return StmtError();
}
- return OMPCancellationPointDirective::Create(Context, StartLoc, EndLoc,
- CancelRegion);
+ return OMPCancellationPointDirective::Create(getASTContext(), StartLoc,
+ EndLoc, CancelRegion);
}
-StmtResult Sema::ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
- SourceLocation StartLoc,
- SourceLocation EndLoc,
- OpenMPDirectiveKind CancelRegion) {
+StmtResult SemaOpenMP::ActOnOpenMPCancelDirective(
+ ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc,
+ SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion) {
if (DSAStack->isParentNowaitRegion()) {
Diag(StartLoc, diag::err_omp_parent_cancel_region_nowait) << 1;
return StmtError();
@@ -13595,7 +13168,7 @@ StmtResult Sema::ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
DSAStack->setParentCancelRegion(/*Cancel=*/true);
- return OMPCancelDirective::Create(Context, StartLoc, EndLoc, Clauses,
+ return OMPCancelDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses,
CancelRegion);
}
@@ -13626,7 +13199,7 @@ static bool checkReductionClauseWithNogroup(Sema &S,
return false;
}
-StmtResult Sema::ActOnOpenMPTaskLoopDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -13638,33 +13211,33 @@ StmtResult Sema::ActOnOpenMPTaskLoopDirective(
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_taskloop, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
- VarsWithImplicitDSA, B);
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef,
+ *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkMutuallyExclusiveClauses(*this, Clauses,
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
{OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
// clause must not be specified.
- if (checkReductionClauseWithNogroup(*this, Clauses))
+ if (checkReductionClauseWithNogroup(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
- return OMPTaskLoopDirective::Create(Context, StartLoc, EndLoc,
+ SemaRef.setFunctionHasBranchProtectedScope();
+ return OMPTaskLoopDirective::Create(getASTContext(), StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B,
DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -13676,45 +13249,34 @@ StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_taskloop_simd, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
- VarsWithImplicitDSA, B);
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef,
+ *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkMutuallyExclusiveClauses(*this, Clauses,
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
{OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
// clause must not be specified.
- if (checkReductionClauseWithNogroup(*this, Clauses))
+ if (checkReductionClauseWithNogroup(SemaRef, Clauses))
return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
- return OMPTaskLoopSimdDirective::Create(Context, StartLoc, EndLoc,
+ SemaRef.setFunctionHasBranchProtectedScope();
+ return OMPTaskLoopSimdDirective::Create(getASTContext(), StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPMasterTaskLoopDirective(
+StmtResult SemaOpenMP::ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -13726,33 +13288,33 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopDirective(
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_master_taskloop, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
- VarsWithImplicitDSA, B);
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef,
+ *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkMutuallyExclusiveClauses(*this, Clauses,
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
{OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
// clause must not be specified.
- if (checkReductionClauseWithNogroup(*this, Clauses))
+ if (checkReductionClauseWithNogroup(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
- return OMPMasterTaskLoopDirective::Create(Context, StartLoc, EndLoc,
+ SemaRef.setFunctionHasBranchProtectedScope();
+ return OMPMasterTaskLoopDirective::Create(getASTContext(), StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B,
DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPMaskedTaskLoopDirective(
+StmtResult SemaOpenMP::ActOnOpenMPMaskedTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -13764,33 +13326,33 @@ StmtResult Sema::ActOnOpenMPMaskedTaskLoopDirective(
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_masked_taskloop, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
- VarsWithImplicitDSA, B);
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef,
+ *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkMutuallyExclusiveClauses(*this, Clauses,
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
{OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
// clause must not be specified.
- if (checkReductionClauseWithNogroup(*this, Clauses))
+ if (checkReductionClauseWithNogroup(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
- return OMPMaskedTaskLoopDirective::Create(Context, StartLoc, EndLoc,
+ SemaRef.setFunctionHasBranchProtectedScope();
+ return OMPMaskedTaskLoopDirective::Create(getASTContext(), StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B,
DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -13802,45 +13364,34 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective(
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_master_taskloop_simd, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
- VarsWithImplicitDSA, B);
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef,
+ *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkMutuallyExclusiveClauses(*this, Clauses,
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
{OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
// clause must not be specified.
- if (checkReductionClauseWithNogroup(*this, Clauses))
+ if (checkReductionClauseWithNogroup(SemaRef, Clauses))
return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
return OMPMasterTaskLoopSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPMaskedTaskLoopSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPMaskedTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -13852,297 +13403,192 @@ StmtResult Sema::ActOnOpenMPMaskedTaskLoopSimdDirective(
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_masked_taskloop_simd, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
- VarsWithImplicitDSA, B);
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef,
+ *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkMutuallyExclusiveClauses(*this, Clauses,
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
{OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
// clause must not be specified.
- if (checkReductionClauseWithNogroup(*this, Clauses))
+ if (checkReductionClauseWithNogroup(SemaRef, Clauses))
return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
return OMPMaskedTaskLoopSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopDirective(
+StmtResult SemaOpenMP::ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_parallel_master_taskloop);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_parallel_master_taskloop, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_parallel_master_taskloop, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkMutuallyExclusiveClauses(*this, Clauses,
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
{OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
// clause must not be specified.
- if (checkReductionClauseWithNogroup(*this, Clauses))
+ if (checkReductionClauseWithNogroup(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPParallelMasterTaskLoopDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPParallelMaskedTaskLoopDirective(
+StmtResult SemaOpenMP::ActOnOpenMPParallelMaskedTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_parallel_masked_taskloop);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_parallel_masked_taskloop, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_parallel_masked_taskloop, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkMutuallyExclusiveClauses(*this, Clauses,
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
{OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
// clause must not be specified.
- if (checkReductionClauseWithNogroup(*this, Clauses))
+ if (checkReductionClauseWithNogroup(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPParallelMaskedTaskLoopDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_parallel_master_taskloop_simd);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS = setBranchProtectedScope(
+ SemaRef, OMPD_parallel_master_taskloop_simd, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_parallel_master_taskloop_simd, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkMutuallyExclusiveClauses(*this, Clauses,
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
{OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
// clause must not be specified.
- if (checkReductionClauseWithNogroup(*this, Clauses))
+ if (checkReductionClauseWithNogroup(SemaRef, Clauses))
return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPParallelMasterTaskLoopSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPParallelMaskedTaskLoopSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPParallelMaskedTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_parallel_masked_taskloop_simd);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS = setBranchProtectedScope(
+ SemaRef, OMPD_parallel_masked_taskloop_simd, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_parallel_masked_taskloop_simd, getCollapseNumberExpr(Clauses),
- /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkMutuallyExclusiveClauses(*this, Clauses,
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
{OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
// clause must not be specified.
- if (checkReductionClauseWithNogroup(*this, Clauses))
+ if (checkReductionClauseWithNogroup(SemaRef, Clauses))
return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPParallelMaskedTaskLoopSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPDistributeDirective(
+StmtResult SemaOpenMP::ActOnOpenMPDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
@@ -14158,531 +13604,289 @@ StmtResult Sema::ActOnOpenMPDistributeDirective(
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_distribute, getCollapseNumberExpr(Clauses),
nullptr /*ordered not a clause on distribute*/, AStmt,
- *this, *DSAStack, VarsWithImplicitDSA, B);
+ SemaRef, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
auto *DistributeDirective = OMPDistributeDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->getMappedDirective());
return DistributeDirective;
}
-StmtResult Sema::ActOnOpenMPDistributeParallelForDirective(
+StmtResult SemaOpenMP::ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_distribute_parallel_for);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_distribute_parallel_for, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_distribute_parallel_for, getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
+ nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
- setFunctionHasBranchProtectedScope();
return OMPDistributeParallelForDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPDistributeParallelForSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_distribute_parallel_for_simd);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS = setBranchProtectedScope(
+ SemaRef, OMPD_distribute_parallel_for_simd, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_distribute_parallel_for_simd, getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
+ nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPDistributeParallelForSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPDistributeSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_distribute_simd);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_distribute_simd, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_distribute_simd, getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, CS, *this,
- *DSAStack, VarsWithImplicitDSA, B);
+ nullptr /*ordered not a clause on distribute*/, CS,
+ SemaRef, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
- return OMPDistributeSimdDirective::Create(Context, StartLoc, EndLoc,
+ return OMPDistributeSimdDirective::Create(getASTContext(), StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_parallel_for);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_target_parallel_for_simd, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_target_parallel_for_simd, getCollapseNumberExpr(Clauses),
- getOrderedNumberExpr(Clauses), CS, *this, *DSAStack, VarsWithImplicitDSA,
- B);
+ getOrderedNumberExpr(Clauses), CS, SemaRef, *DSAStack,
+ VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp target parallel for simd loop exprs were not built");
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPTargetParallelForSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPTargetSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTargetSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_simd);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS = setBranchProtectedScope(SemaRef, OMPD_target_simd, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will define the
// nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_target_simd, getCollapseNumberExpr(Clauses),
- getOrderedNumberExpr(Clauses), CS, *this, *DSAStack,
+ getOrderedNumberExpr(Clauses), CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp target simd loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
- return OMPTargetSimdDirective::Create(Context, StartLoc, EndLoc,
+ return OMPTargetSimdDirective::Create(getASTContext(), StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPTeamsDistributeDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_teams_distribute);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_teams_distribute, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_teams_distribute, getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, CS, *this,
- *DSAStack, VarsWithImplicitDSA, B);
+ nullptr /*ordered not a clause on distribute*/, CS,
+ SemaRef, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp teams distribute loop exprs were not built");
- setFunctionHasBranchProtectedScope();
-
DSAStack->setParentTeamsRegionLoc(StartLoc);
return OMPTeamsDistributeDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_teams_distribute_simd);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_teams_distribute_simd, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_teams_distribute_simd, getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
+ nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
-
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp teams distribute simd loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
-
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
return StmtError();
- setFunctionHasBranchProtectedScope();
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
+ return StmtError();
DSAStack->setParentTeamsRegionLoc(StartLoc);
return OMPTeamsDistributeSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
-
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_teams_distribute_parallel_for_simd);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS = setBranchProtectedScope(
+ SemaRef, OMPD_teams_distribute_parallel_for_simd, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_teams_distribute_parallel_for_simd, getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
+ nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
-
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
-
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
return StmtError();
- setFunctionHasBranchProtectedScope();
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
+ return StmtError();
DSAStack->setParentTeamsRegionLoc(StartLoc);
return OMPTeamsDistributeParallelForSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
-
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_teams_distribute_parallel_for);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS = setBranchProtectedScope(
+ SemaRef, OMPD_teams_distribute_parallel_for, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_teams_distribute_parallel_for, getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
+ nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
- setFunctionHasBranchProtectedScope();
-
DSAStack->setParentTeamsRegionLoc(StartLoc);
return OMPTeamsDistributeParallelForDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
-
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_teams);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
- setFunctionHasBranchProtectedScope();
+ setBranchProtectedScope(SemaRef, OMPD_target_teams, AStmt);
const OMPClause *BareClause = nullptr;
bool HasThreadLimitAndNumTeamsClause = hasClauses(Clauses, OMPC_num_teams) &&
@@ -14697,132 +13901,71 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
- return OMPTargetTeamsDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt);
+ return OMPTargetTeamsDirective::Create(getASTContext(), StartLoc, EndLoc,
+ Clauses, AStmt);
}
-StmtResult Sema::ActOnOpenMPTargetTeamsDistributeDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_target_teams_distribute);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS =
+ setBranchProtectedScope(SemaRef, OMPD_target_teams_distribute, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_target_teams_distribute, getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
+ nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
+ assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) &&
"omp target teams distribute loop exprs were not built");
- setFunctionHasBranchProtectedScope();
return OMPTargetTeamsDistributeDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_target_teams_distribute_parallel_for);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS = setBranchProtectedScope(
+ SemaRef, OMPD_target_teams_distribute_parallel_for, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_target_teams_distribute_parallel_for, getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
+ nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp target teams distribute parallel for loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPTargetTeamsDistributeParallelForDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
-StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel = getOpenMPCaptureLevels(
- OMPD_target_teams_distribute_parallel_for_simd);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS = setBranchProtectedScope(
+ SemaRef, OMPD_target_teams_distribute_parallel_for_simd, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
@@ -14830,97 +13973,54 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
unsigned NestedLoopCount =
checkOpenMPLoop(OMPD_target_teams_distribute_parallel_for_simd,
getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, CS, *this,
- *DSAStack, VarsWithImplicitDSA, B);
+ nullptr /*ordered not a clause on distribute*/, CS,
+ SemaRef, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp target teams distribute parallel for simd loop exprs were not "
- "built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPTargetTeamsDistributeParallelForSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPTargetTeamsDistributeSimdDirective(
+StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- auto *CS = cast<CapturedStmt>(AStmt);
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- for (int ThisCaptureLevel =
- getOpenMPCaptureLevels(OMPD_target_teams_distribute_simd);
- ThisCaptureLevel > 1; --ThisCaptureLevel) {
- CS = cast<CapturedStmt>(CS->getCapturedStmt());
- // 1.2.2 OpenMP Language Terminology
- // Structured block - An executable statement with a single entry at the
- // top and a single exit at the bottom.
- // The point of exit cannot be a branch out of the structured block.
- // longjmp() and throw() must not violate the entry/exit criteria.
- CS->getCapturedDecl()->setNothrow();
- }
+ CapturedStmt *CS = setBranchProtectedScope(
+ SemaRef, OMPD_target_teams_distribute_simd, AStmt);
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_target_teams_distribute_simd, getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
+ nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
- assert((CurContext->isDependentContext() || B.builtAll()) &&
- "omp target teams distribute simd loop exprs were not built");
-
- if (!CurContext->isDependentContext()) {
- // Finalize the clauses that need pre-built expressions for CodeGen.
- for (OMPClause *C : Clauses) {
- if (auto *LC = dyn_cast<OMPLinearClause>(C))
- if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
- B.NumIterations, *this, CurScope,
- DSAStack))
- return StmtError();
- }
- }
+ if (finishLinearClauses(SemaRef, Clauses, B, DSAStack))
+ return StmtError();
- if (checkSimdlenSafelenSpecified(*this, Clauses))
+ if (checkSimdlenSafelenSpecified(SemaRef, Clauses))
return StmtError();
- setFunctionHasBranchProtectedScope();
return OMPTargetTeamsDistributeSimdDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
-bool Sema::checkTransformableLoopNest(
+bool SemaOpenMP::checkTransformableLoopNest(
OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
- Stmt *&Body,
- SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
- &OriginalInits) {
+ Stmt *&Body, SmallVectorImpl<SmallVector<Stmt *, 0>> &OriginalInits) {
OriginalInits.emplace_back();
bool Result = OMPLoopBasedDirective::doForAllLoops(
AStmt->IgnoreContainers(), /*TryImperfectlyNestedLoops=*/false, NumLoops,
@@ -14928,7 +14028,7 @@ bool Sema::checkTransformableLoopNest(
Stmt *CurStmt) {
VarsWithInheritedDSAType TmpDSA;
unsigned SingleNumLoops =
- checkOpenMPLoop(Kind, nullptr, nullptr, CurStmt, *this, *DSAStack,
+ checkOpenMPLoop(Kind, nullptr, nullptr, CurStmt, SemaRef, *DSAStack,
TmpDSA, LoopHelpers[Cnt]);
if (SingleNumLoops == 0)
return true;
@@ -14952,28 +14052,88 @@ bool Sema::checkTransformableLoopNest(
DependentPreInits = Dir->getPreInits();
else if (auto *Dir = dyn_cast<OMPUnrollDirective>(Transform))
DependentPreInits = Dir->getPreInits();
+ else if (auto *Dir = dyn_cast<OMPReverseDirective>(Transform))
+ DependentPreInits = Dir->getPreInits();
+ else if (auto *Dir = dyn_cast<OMPInterchangeDirective>(Transform))
+ DependentPreInits = Dir->getPreInits();
else
llvm_unreachable("Unhandled loop transformation");
- if (!DependentPreInits)
- return;
- llvm::append_range(OriginalInits.back(),
- cast<DeclStmt>(DependentPreInits)->getDeclGroup());
+
+ appendFlattenedStmtList(OriginalInits.back(), DependentPreInits);
});
assert(OriginalInits.back().empty() && "No preinit after innermost loop");
OriginalInits.pop_back();
return Result;
}
-StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc) {
- auto SizesClauses =
- OMPExecutableDirective::getClausesOfKind<OMPSizesClause>(Clauses);
- if (SizesClauses.empty()) {
- // A missing 'sizes' clause is already reported by the parser.
- return StmtError();
+/// Add preinit statements that need to be propageted from the selected loop.
+static void addLoopPreInits(ASTContext &Context,
+ OMPLoopBasedDirective::HelperExprs &LoopHelper,
+ Stmt *LoopStmt, ArrayRef<Stmt *> OriginalInit,
+ SmallVectorImpl<Stmt *> &PreInits) {
+
+ // For range-based for-statements, ensure that their syntactic sugar is
+ // executed by adding them as pre-init statements.
+ if (auto *CXXRangeFor = dyn_cast<CXXForRangeStmt>(LoopStmt)) {
+ Stmt *RangeInit = CXXRangeFor->getInit();
+ if (RangeInit)
+ PreInits.push_back(RangeInit);
+
+ DeclStmt *RangeStmt = CXXRangeFor->getRangeStmt();
+ PreInits.push_back(new (Context) DeclStmt(RangeStmt->getDeclGroup(),
+ RangeStmt->getBeginLoc(),
+ RangeStmt->getEndLoc()));
+
+ DeclStmt *RangeEnd = CXXRangeFor->getEndStmt();
+ PreInits.push_back(new (Context) DeclStmt(RangeEnd->getDeclGroup(),
+ RangeEnd->getBeginLoc(),
+ RangeEnd->getEndLoc()));
}
- const OMPSizesClause *SizesClause = *SizesClauses.begin();
+
+ llvm::append_range(PreInits, OriginalInit);
+
+ // List of OMPCapturedExprDecl, for __begin, __end, and NumIterations
+ if (auto *PI = cast_or_null<DeclStmt>(LoopHelper.PreInits)) {
+ PreInits.push_back(new (Context) DeclStmt(
+ PI->getDeclGroup(), PI->getBeginLoc(), PI->getEndLoc()));
+ }
+
+ // Gather declarations for the data members used as counters.
+ for (Expr *CounterRef : LoopHelper.Counters) {
+ auto *CounterDecl = cast<DeclRefExpr>(CounterRef)->getDecl();
+ if (isa<OMPCapturedExprDecl>(CounterDecl))
+ PreInits.push_back(new (Context) DeclStmt(
+ DeclGroupRef(CounterDecl), SourceLocation(), SourceLocation()));
+ }
+}
+
+/// Collect the loop statements (ForStmt or CXXRangeForStmt) of the affected
+/// loop of a construct.
+static void collectLoopStmts(Stmt *AStmt, MutableArrayRef<Stmt *> LoopStmts) {
+ size_t NumLoops = LoopStmts.size();
+ OMPLoopBasedDirective::doForAllLoops(
+ AStmt, /*TryImperfectlyNestedLoops=*/false, NumLoops,
+ [LoopStmts](unsigned Cnt, Stmt *CurStmt) {
+ assert(!LoopStmts[Cnt] && "Loop statement must not yet be assigned");
+ LoopStmts[Cnt] = CurStmt;
+ return false;
+ });
+ assert(!is_contained(LoopStmts, nullptr) &&
+ "Expecting a loop statement for each affected loop");
+}
+
+StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ ASTContext &Context = getASTContext();
+ Scope *CurScope = SemaRef.getCurScope();
+
+ const auto *SizesClause =
+ OMPExecutableDirective::getSingleClause<OMPSizesClause>(Clauses);
+ if (!SizesClause ||
+ llvm::any_of(SizesClause->getSizesRefs(), [](Expr *E) { return !E; }))
+ return StmtError();
unsigned NumLoops = SizesClause->getNumSizes();
// Empty statement should only be possible if there already was an error.
@@ -14983,18 +14143,29 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
// Verify and diagnose loop nest.
SmallVector<OMPLoopBasedDirective::HelperExprs, 4> LoopHelpers(NumLoops);
Stmt *Body = nullptr;
- SmallVector<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>, 4>
- OriginalInits;
+ SmallVector<SmallVector<Stmt *, 0>, 4> OriginalInits;
if (!checkTransformableLoopNest(OMPD_tile, AStmt, NumLoops, LoopHelpers, Body,
OriginalInits))
return StmtError();
// Delay tiling to when template is completely instantiated.
- if (CurContext->isDependentContext())
+ if (SemaRef.CurContext->isDependentContext())
return OMPTileDirective::Create(Context, StartLoc, EndLoc, Clauses,
NumLoops, AStmt, nullptr, nullptr);
- SmallVector<Decl *, 4> PreInits;
+ assert(LoopHelpers.size() == NumLoops &&
+ "Expecting loop iteration space dimensionality to match number of "
+ "affected loops");
+ assert(OriginalInits.size() == NumLoops &&
+ "Expecting loop iteration space dimensionality to match number of "
+ "affected loops");
+
+ // Collect all affected loop statements.
+ SmallVector<Stmt *> LoopStmts(NumLoops, nullptr);
+ collectLoopStmts(AStmt, LoopStmts);
+
+ SmallVector<Stmt *, 4> PreInits;
+ CaptureVars CopyTransformer(SemaRef);
// Create iteration variables for the generated loops.
SmallVector<VarDecl *, 4> FloorIndVars;
@@ -15016,7 +14187,7 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
std::string FloorCntName =
(Twine(".floor_") + llvm::utostr(I) + ".iv." + OrigVarName).str();
VarDecl *FloorCntDecl =
- buildVarDecl(*this, {}, CntTy, FloorCntName, nullptr, OrigCntVar);
+ buildVarDecl(SemaRef, {}, CntTy, FloorCntName, nullptr, OrigCntVar);
FloorIndVars[I] = FloorCntDecl;
}
@@ -15029,46 +14200,82 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
// used by the expressions to derive the original iteration variable's
// value from the logical iteration number.
auto *TileCntDecl = cast<VarDecl>(IterVarRef->getDecl());
- TileCntDecl->setDeclName(&PP.getIdentifierTable().get(TileCntName));
+ TileCntDecl->setDeclName(
+ &SemaRef.PP.getIdentifierTable().get(TileCntName));
TileIndVars[I] = TileCntDecl;
}
- for (auto &P : OriginalInits[I]) {
- if (auto *D = P.dyn_cast<Decl *>())
- PreInits.push_back(D);
- else if (auto *PI = dyn_cast_or_null<DeclStmt>(P.dyn_cast<Stmt *>()))
- PreInits.append(PI->decl_begin(), PI->decl_end());
- }
- if (auto *PI = cast_or_null<DeclStmt>(LoopHelper.PreInits))
- PreInits.append(PI->decl_begin(), PI->decl_end());
- // Gather declarations for the data members used as counters.
- for (Expr *CounterRef : LoopHelper.Counters) {
- auto *CounterDecl = cast<DeclRefExpr>(CounterRef)->getDecl();
- if (isa<OMPCapturedExprDecl>(CounterDecl))
- PreInits.push_back(CounterDecl);
- }
+
+ addLoopPreInits(Context, LoopHelper, LoopStmts[I], OriginalInits[I],
+ PreInits);
}
// Once the original iteration values are set, append the innermost body.
Stmt *Inner = Body;
+ auto MakeDimTileSize = [&SemaRef = this->SemaRef, &CopyTransformer, &Context,
+ SizesClause, CurScope](int I) -> Expr * {
+ Expr *DimTileSizeExpr = SizesClause->getSizesRefs()[I];
+ if (isa<ConstantExpr>(DimTileSizeExpr))
+ return AssertSuccess(CopyTransformer.TransformExpr(DimTileSizeExpr));
+
+ // When the tile size is not a constant but a variable, it is possible to
+ // pass non-positive numbers. For instance:
+ // \code{c}
+ // int a = 0;
+ // #pragma omp tile sizes(a)
+ // for (int i = 0; i < 42; ++i)
+ // body(i);
+ // \endcode
+ // Although there is no meaningful interpretation of the tile size, the body
+ // should still be executed 42 times to avoid surprises. To preserve the
+ // invariant that every loop iteration is executed exactly once and not
+ // cause an infinite loop, apply a minimum tile size of one.
+ // Build expr:
+ // \code{c}
+ // (TS <= 0) ? 1 : TS
+ // \endcode
+ QualType DimTy = DimTileSizeExpr->getType();
+ uint64_t DimWidth = Context.getTypeSize(DimTy);
+ IntegerLiteral *Zero = IntegerLiteral::Create(
+ Context, llvm::APInt::getZero(DimWidth), DimTy, {});
+ IntegerLiteral *One =
+ IntegerLiteral::Create(Context, llvm::APInt(DimWidth, 1), DimTy, {});
+ Expr *Cond = AssertSuccess(SemaRef.BuildBinOp(
+ CurScope, {}, BO_LE,
+ AssertSuccess(CopyTransformer.TransformExpr(DimTileSizeExpr)), Zero));
+ Expr *MinOne = new (Context) ConditionalOperator(
+ Cond, {}, One, {},
+ AssertSuccess(CopyTransformer.TransformExpr(DimTileSizeExpr)), DimTy,
+ VK_PRValue, OK_Ordinary);
+ return MinOne;
+ };
+
// Create tile loops from the inside to the outside.
for (int I = NumLoops - 1; I >= 0; --I) {
OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers[I];
Expr *NumIterations = LoopHelper.NumIterations;
auto *OrigCntVar = cast<DeclRefExpr>(LoopHelper.Counters[0]);
- QualType CntTy = OrigCntVar->getType();
- Expr *DimTileSize = SizesClause->getSizesRefs()[I];
- Scope *CurScope = getCurScope();
-
- // Commonly used variables.
- DeclRefExpr *TileIV = buildDeclRefExpr(*this, TileIndVars[I], CntTy,
- OrigCntVar->getExprLoc());
- DeclRefExpr *FloorIV = buildDeclRefExpr(*this, FloorIndVars[I], CntTy,
- OrigCntVar->getExprLoc());
+ QualType IVTy = NumIterations->getType();
+ Stmt *LoopStmt = LoopStmts[I];
+
+ // Commonly used variables. One of the constraints of an AST is that every
+ // node object must appear at most once, hence we define lamdas that create
+ // a new AST node at every use.
+ auto MakeTileIVRef = [&SemaRef = this->SemaRef, &TileIndVars, I, IVTy,
+ OrigCntVar]() {
+ return buildDeclRefExpr(SemaRef, TileIndVars[I], IVTy,
+ OrigCntVar->getExprLoc());
+ };
+ auto MakeFloorIVRef = [&SemaRef = this->SemaRef, &FloorIndVars, I, IVTy,
+ OrigCntVar]() {
+ return buildDeclRefExpr(SemaRef, FloorIndVars[I], IVTy,
+ OrigCntVar->getExprLoc());
+ };
// For init-statement: auto .tile.iv = .floor.iv
- AddInitializerToDecl(TileIndVars[I], DefaultLvalueConversion(FloorIV).get(),
- /*DirectInit=*/false);
+ SemaRef.AddInitializerToDecl(
+ TileIndVars[I], SemaRef.DefaultLvalueConversion(MakeFloorIVRef()).get(),
+ /*DirectInit=*/false);
Decl *CounterDecl = TileIndVars[I];
StmtResult InitStmt = new (Context)
DeclStmt(DeclGroupRef::Create(Context, &CounterDecl, 1),
@@ -15076,48 +14283,55 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
if (!InitStmt.isUsable())
return StmtError();
- // For cond-expression: .tile.iv < min(.floor.iv + DimTileSize,
- // NumIterations)
- ExprResult EndOfTile = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(),
- BO_Add, FloorIV, DimTileSize);
+ // For cond-expression:
+ // .tile.iv < min(.floor.iv + DimTileSize, NumIterations)
+ ExprResult EndOfTile =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_Add,
+ MakeFloorIVRef(), MakeDimTileSize(I));
if (!EndOfTile.isUsable())
return StmtError();
ExprResult IsPartialTile =
- BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
- NumIterations, EndOfTile.get());
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
+ NumIterations, EndOfTile.get());
if (!IsPartialTile.isUsable())
return StmtError();
- ExprResult MinTileAndIterSpace = ActOnConditionalOp(
+ ExprResult MinTileAndIterSpace = SemaRef.ActOnConditionalOp(
LoopHelper.Cond->getBeginLoc(), LoopHelper.Cond->getEndLoc(),
IsPartialTile.get(), NumIterations, EndOfTile.get());
if (!MinTileAndIterSpace.isUsable())
return StmtError();
- ExprResult CondExpr = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(),
- BO_LT, TileIV, MinTileAndIterSpace.get());
+ ExprResult CondExpr =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
+ MakeTileIVRef(), MinTileAndIterSpace.get());
if (!CondExpr.isUsable())
return StmtError();
// For incr-statement: ++.tile.iv
- ExprResult IncrStmt =
- BuildUnaryOp(CurScope, LoopHelper.Inc->getExprLoc(), UO_PreInc, TileIV);
+ ExprResult IncrStmt = SemaRef.BuildUnaryOp(
+ CurScope, LoopHelper.Inc->getExprLoc(), UO_PreInc, MakeTileIVRef());
if (!IncrStmt.isUsable())
return StmtError();
// Statements to set the original iteration variable's value from the
// logical iteration number.
// Generated for loop is:
+ // \code
// Original_for_init;
- // for (auto .tile.iv = .floor.iv; .tile.iv < min(.floor.iv + DimTileSize,
- // NumIterations); ++.tile.iv) {
+ // for (auto .tile.iv = .floor.iv;
+ // .tile.iv < min(.floor.iv + DimTileSize, NumIterations);
+ // ++.tile.iv) {
// Original_Body;
// Original_counter_update;
// }
+ // \endcode
// FIXME: If the innermost body is an loop itself, inserting these
// statements stops it being recognized as a perfectly nested loop (e.g.
// for applying tiling again). If this is the case, sink the expressions
// further into the inner loop.
SmallVector<Stmt *, 4> BodyParts;
BodyParts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end());
+ if (auto *SourceCXXFor = dyn_cast<CXXForRangeStmt>(LoopStmt))
+ BodyParts.push_back(SourceCXXFor->getLoopVarStmt());
BodyParts.push_back(Inner);
Inner = CompoundStmt::Create(Context, BodyParts, FPOptionsOverride(),
Inner->getBeginLoc(), Inner->getEndLoc());
@@ -15132,18 +14346,21 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
auto &LoopHelper = LoopHelpers[I];
Expr *NumIterations = LoopHelper.NumIterations;
DeclRefExpr *OrigCntVar = cast<DeclRefExpr>(LoopHelper.Counters[0]);
- QualType CntTy = OrigCntVar->getType();
- Expr *DimTileSize = SizesClause->getSizesRefs()[I];
- Scope *CurScope = getCurScope();
-
- // Commonly used variables.
- DeclRefExpr *FloorIV = buildDeclRefExpr(*this, FloorIndVars[I], CntTy,
- OrigCntVar->getExprLoc());
+ QualType IVTy = NumIterations->getType();
+
+ // Commonly used variables. One of the constraints of an AST is that every
+ // node object must appear at most once, hence we define lamdas that create
+ // a new AST node at every use.
+ auto MakeFloorIVRef = [&SemaRef = this->SemaRef, &FloorIndVars, I, IVTy,
+ OrigCntVar]() {
+ return buildDeclRefExpr(SemaRef, FloorIndVars[I], IVTy,
+ OrigCntVar->getExprLoc());
+ };
// For init-statement: auto .floor.iv = 0
- AddInitializerToDecl(
+ SemaRef.AddInitializerToDecl(
FloorIndVars[I],
- ActOnIntegerConstant(LoopHelper.Init->getExprLoc(), 0).get(),
+ SemaRef.ActOnIntegerConstant(LoopHelper.Init->getExprLoc(), 0).get(),
/*DirectInit=*/false);
Decl *CounterDecl = FloorIndVars[I];
StmtResult InitStmt = new (Context)
@@ -15153,14 +14370,16 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
// For cond-expression: .floor.iv < NumIterations
- ExprResult CondExpr = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(),
- BO_LT, FloorIV, NumIterations);
+ ExprResult CondExpr =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
+ MakeFloorIVRef(), NumIterations);
if (!CondExpr.isUsable())
return StmtError();
// For incr-statement: .floor.iv += DimTileSize
- ExprResult IncrStmt = BuildBinOp(CurScope, LoopHelper.Inc->getExprLoc(),
- BO_AddAssign, FloorIV, DimTileSize);
+ ExprResult IncrStmt =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Inc->getExprLoc(), BO_AddAssign,
+ MakeFloorIVRef(), MakeDimTileSize(I));
if (!IncrStmt.isUsable())
return StmtError();
@@ -15175,15 +14394,18 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
buildPreInits(Context, PreInits));
}
-StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ ASTContext &Context = getASTContext();
+ Scope *CurScope = SemaRef.getCurScope();
// Empty statement should only be possible if there already was an error.
if (!AStmt)
return StmtError();
- if (checkMutuallyExclusiveClauses(*this, Clauses, {OMPC_partial, OMPC_full}))
+ if (checkMutuallyExclusiveClauses(SemaRef, Clauses,
+ {OMPC_partial, OMPC_full}))
return StmtError();
const OMPFullClause *FullClause =
@@ -15197,8 +14419,7 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Stmt *Body = nullptr;
SmallVector<OMPLoopBasedDirective::HelperExprs, NumLoops> LoopHelpers(
NumLoops);
- SmallVector<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>, NumLoops + 1>
- OriginalInits;
+ SmallVector<SmallVector<Stmt *, 0>, NumLoops + 1> OriginalInits;
if (!checkTransformableLoopNest(OMPD_unroll, AStmt, NumLoops, LoopHelpers,
Body, OriginalInits))
return StmtError();
@@ -15206,10 +14427,14 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
unsigned NumGeneratedLoops = PartialClause ? 1 : 0;
// Delay unrolling to when template is completely instantiated.
- if (CurContext->isDependentContext())
+ if (SemaRef.CurContext->isDependentContext())
return OMPUnrollDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
NumGeneratedLoops, nullptr, nullptr);
+ assert(LoopHelpers.size() == NumLoops &&
+ "Expecting a single-dimensional loop iteration space");
+ assert(OriginalInits.size() == NumLoops &&
+ "Expecting a single-dimensional loop iteration space");
OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers.front();
if (FullClause) {
@@ -15273,24 +14498,13 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
// of a canonical loop nest where these PreInits are emitted before the
// outermost directive.
+ // Find the loop statement.
+ Stmt *LoopStmt = nullptr;
+ collectLoopStmts(AStmt, {LoopStmt});
+
// Determine the PreInit declarations.
- SmallVector<Decl *, 4> PreInits;
- assert(OriginalInits.size() == 1 &&
- "Expecting a single-dimensional loop iteration space");
- for (auto &P : OriginalInits[0]) {
- if (auto *D = P.dyn_cast<Decl *>())
- PreInits.push_back(D);
- else if (auto *PI = dyn_cast_or_null<DeclStmt>(P.dyn_cast<Stmt *>()))
- PreInits.append(PI->decl_begin(), PI->decl_end());
- }
- if (auto *PI = cast_or_null<DeclStmt>(LoopHelper.PreInits))
- PreInits.append(PI->decl_begin(), PI->decl_end());
- // Gather declarations for the data members used as counters.
- for (Expr *CounterRef : LoopHelper.Counters) {
- auto *CounterDecl = cast<DeclRefExpr>(CounterRef)->getDecl();
- if (isa<OMPCapturedExprDecl>(CounterDecl))
- PreInits.push_back(CounterDecl);
- }
+ SmallVector<Stmt *, 4> PreInits;
+ addLoopPreInits(Context, LoopHelper, LoopStmt, OriginalInits[0], PreInits);
auto *IterationVarRef = cast<DeclRefExpr>(LoopHelper.IterationVarRef);
QualType IVTy = IterationVarRef->getType();
@@ -15311,8 +14525,8 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
assert(Factor > 0 && "Expected positive unroll factor");
auto MakeFactorExpr = [this, Factor, IVTy, FactorLoc]() {
return IntegerLiteral::Create(
- Context, llvm::APInt(Context.getIntWidth(IVTy), Factor), IVTy,
- FactorLoc);
+ getASTContext(), llvm::APInt(getASTContext().getIntWidth(IVTy), Factor),
+ IVTy, FactorLoc);
};
// Iteration variable SourceLocations.
@@ -15329,30 +14543,31 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
// Create the iteration variable for the unrolled loop.
VarDecl *OuterIVDecl =
- buildVarDecl(*this, {}, IVTy, OuterIVName, nullptr, OrigVar);
+ buildVarDecl(SemaRef, {}, IVTy, OuterIVName, nullptr, OrigVar);
auto MakeOuterRef = [this, OuterIVDecl, IVTy, OrigVarLoc]() {
- return buildDeclRefExpr(*this, OuterIVDecl, IVTy, OrigVarLoc);
+ return buildDeclRefExpr(SemaRef, OuterIVDecl, IVTy, OrigVarLoc);
};
// Iteration variable for the inner loop: Reuse the iteration variable created
// by checkOpenMPLoop.
auto *InnerIVDecl = cast<VarDecl>(IterationVarRef->getDecl());
- InnerIVDecl->setDeclName(&PP.getIdentifierTable().get(InnerIVName));
+ InnerIVDecl->setDeclName(&SemaRef.PP.getIdentifierTable().get(InnerIVName));
auto MakeInnerRef = [this, InnerIVDecl, IVTy, OrigVarLoc]() {
- return buildDeclRefExpr(*this, InnerIVDecl, IVTy, OrigVarLoc);
+ return buildDeclRefExpr(SemaRef, InnerIVDecl, IVTy, OrigVarLoc);
};
// Make a copy of the NumIterations expression for each use: By the AST
// constraints, every expression object in a DeclContext must be unique.
- CaptureVars CopyTransformer(*this);
+ CaptureVars CopyTransformer(SemaRef);
auto MakeNumIterations = [&CopyTransformer, &LoopHelper]() -> Expr * {
return AssertSuccess(
CopyTransformer.TransformExpr(LoopHelper.NumIterations));
};
// Inner For init-statement: auto .unroll_inner.iv = .unrolled.iv
- ExprResult LValueConv = DefaultLvalueConversion(MakeOuterRef());
- AddInitializerToDecl(InnerIVDecl, LValueConv.get(), /*DirectInit=*/false);
+ ExprResult LValueConv = SemaRef.DefaultLvalueConversion(MakeOuterRef());
+ SemaRef.AddInitializerToDecl(InnerIVDecl, LValueConv.get(),
+ /*DirectInit=*/false);
StmtResult InnerInit = new (Context)
DeclStmt(DeclGroupRef(InnerIVDecl), OrigVarLocBegin, OrigVarLocEnd);
if (!InnerInit.isUsable())
@@ -15365,37 +14580,41 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
// \endcode
// This conjunction of two conditions allows ScalarEvolution to derive the
// maximum trip count of the inner loop.
- ExprResult EndOfTile = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(),
- BO_Add, MakeOuterRef(), MakeFactorExpr());
+ ExprResult EndOfTile =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_Add,
+ MakeOuterRef(), MakeFactorExpr());
if (!EndOfTile.isUsable())
return StmtError();
- ExprResult InnerCond1 = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(),
- BO_LT, MakeInnerRef(), EndOfTile.get());
+ ExprResult InnerCond1 =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
+ MakeInnerRef(), EndOfTile.get());
if (!InnerCond1.isUsable())
return StmtError();
ExprResult InnerCond2 =
- BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, MakeInnerRef(),
- MakeNumIterations());
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
+ MakeInnerRef(), MakeNumIterations());
if (!InnerCond2.isUsable())
return StmtError();
ExprResult InnerCond =
- BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LAnd,
- InnerCond1.get(), InnerCond2.get());
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LAnd,
+ InnerCond1.get(), InnerCond2.get());
if (!InnerCond.isUsable())
return StmtError();
// Inner For incr-statement: ++.unroll_inner.iv
- ExprResult InnerIncr = BuildUnaryOp(CurScope, LoopHelper.Inc->getExprLoc(),
- UO_PreInc, MakeInnerRef());
+ ExprResult InnerIncr = SemaRef.BuildUnaryOp(
+ CurScope, LoopHelper.Inc->getExprLoc(), UO_PreInc, MakeInnerRef());
if (!InnerIncr.isUsable())
return StmtError();
// Inner For statement.
SmallVector<Stmt *> InnerBodyStmts;
InnerBodyStmts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end());
+ if (auto *CXXRangeFor = dyn_cast<CXXForRangeStmt>(LoopStmt))
+ InnerBodyStmts.push_back(CXXRangeFor->getLoopVarStmt());
InnerBodyStmts.push_back(Body);
CompoundStmt *InnerBody =
- CompoundStmt::Create(Context, InnerBodyStmts, FPOptionsOverride(),
+ CompoundStmt::Create(getASTContext(), InnerBodyStmts, FPOptionsOverride(),
Body->getBeginLoc(), Body->getEndLoc());
ForStmt *InnerFor = new (Context)
ForStmt(Context, InnerInit.get(), InnerCond.get(), nullptr,
@@ -15417,12 +14636,13 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
LoopHintAttr *UnrollHintAttr =
LoopHintAttr::CreateImplicit(Context, LoopHintAttr::UnrollCount,
LoopHintAttr::Numeric, MakeFactorExpr());
- AttributedStmt *InnerUnrolled =
- AttributedStmt::Create(Context, StartLoc, {UnrollHintAttr}, InnerFor);
+ AttributedStmt *InnerUnrolled = AttributedStmt::Create(
+ getASTContext(), StartLoc, {UnrollHintAttr}, InnerFor);
// Outer For init-statement: auto .unrolled.iv = 0
- AddInitializerToDecl(
- OuterIVDecl, ActOnIntegerConstant(LoopHelper.Init->getExprLoc(), 0).get(),
+ SemaRef.AddInitializerToDecl(
+ OuterIVDecl,
+ SemaRef.ActOnIntegerConstant(LoopHelper.Init->getExprLoc(), 0).get(),
/*DirectInit=*/false);
StmtResult OuterInit = new (Context)
DeclStmt(DeclGroupRef(OuterIVDecl), OrigVarLocBegin, OrigVarLocEnd);
@@ -15431,15 +14651,15 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
// Outer For cond-expression: .unrolled.iv < NumIterations
ExprResult OuterConde =
- BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, MakeOuterRef(),
- MakeNumIterations());
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
+ MakeOuterRef(), MakeNumIterations());
if (!OuterConde.isUsable())
return StmtError();
// Outer For incr-statement: .unrolled.iv += Factor
ExprResult OuterIncr =
- BuildBinOp(CurScope, LoopHelper.Inc->getExprLoc(), BO_AddAssign,
- MakeOuterRef(), MakeFactorExpr());
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Inc->getExprLoc(), BO_AddAssign,
+ MakeOuterRef(), MakeFactorExpr());
if (!OuterIncr.isUsable())
return StmtError();
@@ -15454,10 +14674,350 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
buildPreInits(Context, PreInits));
}
-OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPReverseDirective(Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ ASTContext &Context = getASTContext();
+ Scope *CurScope = SemaRef.getCurScope();
+
+ // Empty statement should only be possible if there already was an error.
+ if (!AStmt)
+ return StmtError();
+
+ constexpr unsigned NumLoops = 1;
+ Stmt *Body = nullptr;
+ SmallVector<OMPLoopBasedDirective::HelperExprs, NumLoops> LoopHelpers(
+ NumLoops);
+ SmallVector<SmallVector<Stmt *, 0>, NumLoops + 1> OriginalInits;
+ if (!checkTransformableLoopNest(OMPD_reverse, AStmt, NumLoops, LoopHelpers,
+ Body, OriginalInits))
+ return StmtError();
+
+ // Delay applying the transformation to when template is completely
+ // instantiated.
+ if (SemaRef.CurContext->isDependentContext())
+ return OMPReverseDirective::Create(Context, StartLoc, EndLoc, AStmt,
+ nullptr, nullptr);
+
+ assert(LoopHelpers.size() == NumLoops &&
+ "Expecting a single-dimensional loop iteration space");
+ assert(OriginalInits.size() == NumLoops &&
+ "Expecting a single-dimensional loop iteration space");
+ OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers.front();
+
+ // Find the loop statement.
+ Stmt *LoopStmt = nullptr;
+ collectLoopStmts(AStmt, {LoopStmt});
+
+ // Determine the PreInit declarations.
+ SmallVector<Stmt *> PreInits;
+ addLoopPreInits(Context, LoopHelper, LoopStmt, OriginalInits[0], PreInits);
+
+ auto *IterationVarRef = cast<DeclRefExpr>(LoopHelper.IterationVarRef);
+ QualType IVTy = IterationVarRef->getType();
+ uint64_t IVWidth = Context.getTypeSize(IVTy);
+ auto *OrigVar = cast<DeclRefExpr>(LoopHelper.Counters.front());
+
+ // Iteration variable SourceLocations.
+ SourceLocation OrigVarLoc = OrigVar->getExprLoc();
+ SourceLocation OrigVarLocBegin = OrigVar->getBeginLoc();
+ SourceLocation OrigVarLocEnd = OrigVar->getEndLoc();
+
+ // Locations pointing to the transformation.
+ SourceLocation TransformLoc = StartLoc;
+ SourceLocation TransformLocBegin = StartLoc;
+ SourceLocation TransformLocEnd = EndLoc;
+
+ // Internal variable names.
+ std::string OrigVarName = OrigVar->getNameInfo().getAsString();
+ SmallString<64> ForwardIVName(".forward.iv.");
+ ForwardIVName += OrigVarName;
+ SmallString<64> ReversedIVName(".reversed.iv.");
+ ReversedIVName += OrigVarName;
+
+ // LoopHelper.Updates will read the logical iteration number from
+ // LoopHelper.IterationVarRef, compute the value of the user loop counter of
+ // that logical iteration from it, then assign it to the user loop counter
+ // variable. We cannot directly use LoopHelper.IterationVarRef as the
+ // induction variable of the generated loop because it may cause an underflow:
+ // \code{.c}
+ // for (unsigned i = 0; i < n; ++i)
+ // body(i);
+ // \endcode
+ //
+ // Naive reversal:
+ // \code{.c}
+ // for (unsigned i = n-1; i >= 0; --i)
+ // body(i);
+ // \endcode
+ //
+ // Instead, we introduce a new iteration variable representing the logical
+ // iteration counter of the original loop, convert it to the logical iteration
+ // number of the reversed loop, then let LoopHelper.Updates compute the user's
+ // loop iteration variable from it.
+ // \code{.cpp}
+ // for (auto .forward.iv = 0; .forward.iv < n; ++.forward.iv) {
+ // auto .reversed.iv = n - .forward.iv - 1;
+ // i = (.reversed.iv + 0) * 1; // LoopHelper.Updates
+ // body(i); // Body
+ // }
+ // \endcode
+
+ // Subexpressions with more than one use. One of the constraints of an AST is
+ // that every node object must appear at most once, hence we define a lambda
+ // that creates a new AST node at every use.
+ CaptureVars CopyTransformer(SemaRef);
+ auto MakeNumIterations = [&CopyTransformer, &LoopHelper]() -> Expr * {
+ return AssertSuccess(
+ CopyTransformer.TransformExpr(LoopHelper.NumIterations));
+ };
+
+ // Create the iteration variable for the forward loop (from 0 to n-1).
+ VarDecl *ForwardIVDecl =
+ buildVarDecl(SemaRef, {}, IVTy, ForwardIVName, nullptr, OrigVar);
+ auto MakeForwardRef = [&SemaRef = this->SemaRef, ForwardIVDecl, IVTy,
+ OrigVarLoc]() {
+ return buildDeclRefExpr(SemaRef, ForwardIVDecl, IVTy, OrigVarLoc);
+ };
+
+ // Iteration variable for the reversed induction variable (from n-1 downto 0):
+ // Reuse the iteration variable created by checkOpenMPLoop.
+ auto *ReversedIVDecl = cast<VarDecl>(IterationVarRef->getDecl());
+ ReversedIVDecl->setDeclName(
+ &SemaRef.PP.getIdentifierTable().get(ReversedIVName));
+
+ // For init-statement:
+ // \code{.cpp}
+ // auto .forward.iv = 0;
+ // \endcode
+ auto *Zero = IntegerLiteral::Create(Context, llvm::APInt::getZero(IVWidth),
+ ForwardIVDecl->getType(), OrigVarLoc);
+ SemaRef.AddInitializerToDecl(ForwardIVDecl, Zero, /*DirectInit=*/false);
+ StmtResult Init = new (Context)
+ DeclStmt(DeclGroupRef(ForwardIVDecl), OrigVarLocBegin, OrigVarLocEnd);
+ if (!Init.isUsable())
+ return StmtError();
+
+ // Forward iv cond-expression:
+ // \code{.cpp}
+ // .forward.iv < MakeNumIterations()
+ // \endcode
+ ExprResult Cond =
+ SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
+ MakeForwardRef(), MakeNumIterations());
+ if (!Cond.isUsable())
+ return StmtError();
+
+ // Forward incr-statement:
+ // \code{.c}
+ // ++.forward.iv
+ // \endcode
+ ExprResult Incr = SemaRef.BuildUnaryOp(CurScope, LoopHelper.Inc->getExprLoc(),
+ UO_PreInc, MakeForwardRef());
+ if (!Incr.isUsable())
+ return StmtError();
+
+ // Reverse the forward-iv:
+ // \code{.cpp}
+ // auto .reversed.iv = MakeNumIterations() - 1 - .forward.iv
+ // \endcode
+ auto *One = IntegerLiteral::Create(Context, llvm::APInt(IVWidth, 1), IVTy,
+ TransformLoc);
+ ExprResult Minus = SemaRef.BuildBinOp(CurScope, TransformLoc, BO_Sub,
+ MakeNumIterations(), One);
+ if (!Minus.isUsable())
+ return StmtError();
+ Minus = SemaRef.BuildBinOp(CurScope, TransformLoc, BO_Sub, Minus.get(),
+ MakeForwardRef());
+ if (!Minus.isUsable())
+ return StmtError();
+ StmtResult InitReversed = new (Context) DeclStmt(
+ DeclGroupRef(ReversedIVDecl), TransformLocBegin, TransformLocEnd);
+ if (!InitReversed.isUsable())
+ return StmtError();
+ SemaRef.AddInitializerToDecl(ReversedIVDecl, Minus.get(),
+ /*DirectInit=*/false);
+
+ // The new loop body.
+ SmallVector<Stmt *, 4> BodyStmts;
+ BodyStmts.reserve(LoopHelper.Updates.size() + 2 +
+ (isa<CXXForRangeStmt>(LoopStmt) ? 1 : 0));
+ BodyStmts.push_back(InitReversed.get());
+ llvm::append_range(BodyStmts, LoopHelper.Updates);
+ if (auto *CXXRangeFor = dyn_cast<CXXForRangeStmt>(LoopStmt))
+ BodyStmts.push_back(CXXRangeFor->getLoopVarStmt());
+ BodyStmts.push_back(Body);
+ auto *ReversedBody =
+ CompoundStmt::Create(Context, BodyStmts, FPOptionsOverride(),
+ Body->getBeginLoc(), Body->getEndLoc());
+
+ // Finally create the reversed For-statement.
+ auto *ReversedFor = new (Context)
+ ForStmt(Context, Init.get(), Cond.get(), nullptr, Incr.get(),
+ ReversedBody, LoopHelper.Init->getBeginLoc(),
+ LoopHelper.Init->getBeginLoc(), LoopHelper.Inc->getEndLoc());
+ return OMPReverseDirective::Create(Context, StartLoc, EndLoc, AStmt,
+ ReversedFor,
+ buildPreInits(Context, PreInits));
+}
+
+StmtResult SemaOpenMP::ActOnOpenMPInterchangeDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ ASTContext &Context = getASTContext();
+ DeclContext *CurContext = SemaRef.CurContext;
+ Scope *CurScope = SemaRef.getCurScope();
+
+ // Empty statement should only be possible if there already was an error.
+ if (!AStmt)
+ return StmtError();
+
+ // interchange without permutation clause swaps two loops.
+ constexpr size_t NumLoops = 2;
+
+ // Verify and diagnose loop nest.
+ SmallVector<OMPLoopBasedDirective::HelperExprs, 4> LoopHelpers(NumLoops);
+ Stmt *Body = nullptr;
+ SmallVector<SmallVector<Stmt *, 0>, 2> OriginalInits;
+ if (!checkTransformableLoopNest(OMPD_interchange, AStmt, NumLoops,
+ LoopHelpers, Body, OriginalInits))
+ return StmtError();
+
+ // Delay interchange to when template is completely instantiated.
+ if (CurContext->isDependentContext())
+ return OMPInterchangeDirective::Create(Context, StartLoc, EndLoc, Clauses,
+ NumLoops, AStmt, nullptr, nullptr);
+
+ assert(LoopHelpers.size() == NumLoops &&
+ "Expecting loop iteration space dimensionaly to match number of "
+ "affected loops");
+ assert(OriginalInits.size() == NumLoops &&
+ "Expecting loop iteration space dimensionaly to match number of "
+ "affected loops");
+
+ // Decode the permutation clause.
+ constexpr uint64_t Permutation[] = {1, 0};
+
+ // Find the affected loops.
+ SmallVector<Stmt *> LoopStmts(NumLoops, nullptr);
+ collectLoopStmts(AStmt, LoopStmts);
+
+ // Collect pre-init statements on the order before the permuation.
+ SmallVector<Stmt *> PreInits;
+ for (auto I : llvm::seq<int>(NumLoops)) {
+ OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers[I];
+
+ assert(LoopHelper.Counters.size() == 1 &&
+ "Single-dimensional loop iteration space expected");
+ auto *OrigCntVar = cast<DeclRefExpr>(LoopHelper.Counters.front());
+
+ std::string OrigVarName = OrigCntVar->getNameInfo().getAsString();
+ addLoopPreInits(Context, LoopHelper, LoopStmts[I], OriginalInits[I],
+ PreInits);
+ }
+
+ SmallVector<VarDecl *> PermutedIndVars(NumLoops);
+ CaptureVars CopyTransformer(SemaRef);
+
+ // Create the permuted loops from the inside to the outside of the
+ // interchanged loop nest. Body of the innermost new loop is the original
+ // innermost body.
+ Stmt *Inner = Body;
+ for (auto TargetIdx : llvm::reverse(llvm::seq<int>(NumLoops))) {
+ // Get the original loop that belongs to this new position.
+ uint64_t SourceIdx = Permutation[TargetIdx];
+ OMPLoopBasedDirective::HelperExprs &SourceHelper = LoopHelpers[SourceIdx];
+ Stmt *SourceLoopStmt = LoopStmts[SourceIdx];
+ assert(SourceHelper.Counters.size() == 1 &&
+ "Single-dimensional loop iteration space expected");
+ auto *OrigCntVar = cast<DeclRefExpr>(SourceHelper.Counters.front());
+
+ // Normalized loop counter variable: From 0 to n-1, always an integer type.
+ DeclRefExpr *IterVarRef = cast<DeclRefExpr>(SourceHelper.IterationVarRef);
+ QualType IVTy = IterVarRef->getType();
+ assert(IVTy->isIntegerType() &&
+ "Expected the logical iteration counter to be an integer");
+
+ std::string OrigVarName = OrigCntVar->getNameInfo().getAsString();
+ SourceLocation OrigVarLoc = IterVarRef->getExprLoc();
+
+ // Make a copy of the NumIterations expression for each use: By the AST
+ // constraints, every expression object in a DeclContext must be unique.
+ auto MakeNumIterations = [&CopyTransformer, &SourceHelper]() -> Expr * {
+ return AssertSuccess(
+ CopyTransformer.TransformExpr(SourceHelper.NumIterations));
+ };
+
+ // Iteration variable for the permuted loop. Reuse the one from
+ // checkOpenMPLoop which will also be used to update the original loop
+ // variable.
+ SmallString<64> PermutedCntName(".permuted_");
+ PermutedCntName.append({llvm::utostr(TargetIdx), ".iv.", OrigVarName});
+ auto *PermutedCntDecl = cast<VarDecl>(IterVarRef->getDecl());
+ PermutedCntDecl->setDeclName(
+ &SemaRef.PP.getIdentifierTable().get(PermutedCntName));
+ PermutedIndVars[TargetIdx] = PermutedCntDecl;
+ auto MakePermutedRef = [this, PermutedCntDecl, IVTy, OrigVarLoc]() {
+ return buildDeclRefExpr(SemaRef, PermutedCntDecl, IVTy, OrigVarLoc);
+ };
+
+ // For init-statement:
+ // \code
+ // auto .permuted_{target}.iv = 0
+ // \endcode
+ ExprResult Zero = SemaRef.ActOnIntegerConstant(OrigVarLoc, 0);
+ if (!Zero.isUsable())
+ return StmtError();
+ SemaRef.AddInitializerToDecl(PermutedCntDecl, Zero.get(),
+ /*DirectInit=*/false);
+ StmtResult InitStmt = new (Context)
+ DeclStmt(DeclGroupRef(PermutedCntDecl), OrigCntVar->getBeginLoc(),
+ OrigCntVar->getEndLoc());
+ if (!InitStmt.isUsable())
+ return StmtError();
+
+ // For cond-expression:
+ // \code
+ // .permuted_{target}.iv < MakeNumIterations()
+ // \endcode
+ ExprResult CondExpr =
+ SemaRef.BuildBinOp(CurScope, SourceHelper.Cond->getExprLoc(), BO_LT,
+ MakePermutedRef(), MakeNumIterations());
+ if (!CondExpr.isUsable())
+ return StmtError();
+
+ // For incr-statement:
+ // \code
+ // ++.tile.iv
+ // \endcode
+ ExprResult IncrStmt = SemaRef.BuildUnaryOp(
+ CurScope, SourceHelper.Inc->getExprLoc(), UO_PreInc, MakePermutedRef());
+ if (!IncrStmt.isUsable())
+ return StmtError();
+
+ SmallVector<Stmt *, 4> BodyParts(SourceHelper.Updates.begin(),
+ SourceHelper.Updates.end());
+ if (auto *SourceCXXFor = dyn_cast<CXXForRangeStmt>(SourceLoopStmt))
+ BodyParts.push_back(SourceCXXFor->getLoopVarStmt());
+ BodyParts.push_back(Inner);
+ Inner = CompoundStmt::Create(Context, BodyParts, FPOptionsOverride(),
+ Inner->getBeginLoc(), Inner->getEndLoc());
+ Inner = new (Context) ForStmt(
+ Context, InitStmt.get(), CondExpr.get(), nullptr, IncrStmt.get(), Inner,
+ SourceHelper.Init->getBeginLoc(), SourceHelper.Init->getBeginLoc(),
+ SourceHelper.Inc->getEndLoc());
+ }
+
+ return OMPInterchangeDirective::Create(Context, StartLoc, EndLoc, Clauses,
+ NumLoops, AStmt, Inner,
+ buildPreInits(Context, PreInits));
+}
+
+OMPClause *SemaOpenMP::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
+ Expr *Expr,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
OMPClause *Res = nullptr;
switch (Kind) {
case OMPC_final:
@@ -15601,959 +15161,129 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, unsigned OpenMPVersion,
OpenMPDirectiveKind NameModifier = OMPD_unknown) {
- OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
+ assert(isAllowedClauseForDirective(DKind, CKind, OpenMPVersion) &&
+ "Invalid directive with CKind-clause");
+
+ // Invalid modifier will be diagnosed separately, just return OMPD_unknown.
+ if (NameModifier != OMPD_unknown &&
+ !isAllowedClauseForDirective(NameModifier, CKind, OpenMPVersion))
+ return OMPD_unknown;
+
+ ArrayRef<OpenMPDirectiveKind> Leafs = getLeafConstructsOrSelf(DKind);
+
+ // [5.2:341:24-30]
+ // If the clauses have expressions on them, such as for various clauses where
+ // the argument of the clause is an expression, or lower-bound, length, or
+ // stride expressions inside array sections (or subscript and stride
+ // expressions in subscript-triplet for Fortran), or linear-step or alignment
+ // expressions, the expressions are evaluated immediately before the construct
+ // to which the clause has been split or duplicated per the above rules
+ // (therefore inside of the outer leaf constructs). However, the expressions
+ // inside the num_teams and thread_limit clauses are always evaluated before
+ // the outermost leaf construct.
+
+ // Process special cases first.
switch (CKind) {
case OMPC_if:
switch (DKind) {
- case OMPD_target_parallel_for_simd:
- if (OpenMPVersion >= 50 &&
- (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)) {
- CaptureRegion = OMPD_parallel;
- break;
- }
- [[fallthrough]];
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_loop:
- // If this clause applies to the nested 'parallel' region, capture within
- // the 'target' region, otherwise do not capture.
- if (NameModifier == OMPD_unknown || NameModifier == OMPD_parallel)
- CaptureRegion = OMPD_target;
- break;
- case OMPD_target_teams_distribute_parallel_for_simd:
- if (OpenMPVersion >= 50 &&
- (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)) {
- CaptureRegion = OMPD_parallel;
- break;
- }
- [[fallthrough]];
+ case OMPD_teams_loop:
case OMPD_target_teams_loop:
- case OMPD_target_teams_distribute_parallel_for:
- // If this clause applies to the nested 'parallel' region, capture within
- // the 'teams' region, otherwise do not capture.
- if (NameModifier == OMPD_unknown || NameModifier == OMPD_parallel)
- CaptureRegion = OMPD_teams;
- break;
- case OMPD_teams_distribute_parallel_for_simd:
- if (OpenMPVersion >= 50 &&
- (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)) {
- CaptureRegion = OMPD_parallel;
- break;
- }
- [[fallthrough]];
- case OMPD_teams_distribute_parallel_for:
- CaptureRegion = OMPD_teams;
- break;
+ // For [target] teams loop, assume capture region is 'teams' so it's
+ // available for codegen later to use if/when necessary.
+ return OMPD_teams;
case OMPD_target_update:
case OMPD_target_enter_data:
case OMPD_target_exit_data:
- CaptureRegion = OMPD_task;
- break;
- case OMPD_parallel_masked_taskloop:
- if (NameModifier == OMPD_unknown || NameModifier == OMPD_taskloop)
- CaptureRegion = OMPD_parallel;
- break;
- case OMPD_parallel_master_taskloop:
- if (NameModifier == OMPD_unknown || NameModifier == OMPD_taskloop)
- CaptureRegion = OMPD_parallel;
- break;
- case OMPD_parallel_masked_taskloop_simd:
- if ((OpenMPVersion <= 45 && NameModifier == OMPD_unknown) ||
- NameModifier == OMPD_taskloop) {
- CaptureRegion = OMPD_parallel;
- break;
- }
- if (OpenMPVersion <= 45)
- break;
- if (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)
- CaptureRegion = OMPD_taskloop;
- break;
- case OMPD_parallel_master_taskloop_simd:
- if ((OpenMPVersion <= 45 && NameModifier == OMPD_unknown) ||
- NameModifier == OMPD_taskloop) {
- CaptureRegion = OMPD_parallel;
- break;
- }
- if (OpenMPVersion <= 45)
- break;
- if (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)
- CaptureRegion = OMPD_taskloop;
- break;
- case OMPD_parallel_for_simd:
- if (OpenMPVersion <= 45)
- break;
- if (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)
- CaptureRegion = OMPD_parallel;
- break;
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop_simd:
- case OMPD_masked_taskloop_simd:
- if (OpenMPVersion <= 45)
- break;
- if (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)
- CaptureRegion = OMPD_taskloop;
- break;
- case OMPD_distribute_parallel_for_simd:
- if (OpenMPVersion <= 45)
- break;
- if (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)
- CaptureRegion = OMPD_parallel;
- break;
- case OMPD_target_simd:
- if (OpenMPVersion >= 50 &&
- (NameModifier == OMPD_unknown || NameModifier == OMPD_simd))
- CaptureRegion = OMPD_target;
- break;
- case OMPD_teams_distribute_simd:
- case OMPD_target_teams_distribute_simd:
- if (OpenMPVersion >= 50 &&
- (NameModifier == OMPD_unknown || NameModifier == OMPD_simd))
- CaptureRegion = OMPD_teams;
- break;
- case OMPD_cancel:
- case OMPD_parallel:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_parallel_sections:
- case OMPD_parallel_for:
- case OMPD_parallel_loop:
- case OMPD_target:
- case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- case OMPD_distribute_parallel_for:
- case OMPD_task:
- case OMPD_taskloop:
- case OMPD_master_taskloop:
- case OMPD_masked_taskloop:
- case OMPD_target_data:
- case OMPD_simd:
- case OMPD_for_simd:
- case OMPD_distribute_simd:
- // Do not capture if-clause expressions.
- break;
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_taskyield:
- case OMPD_error:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_loop:
- case OMPD_teams_loop:
- case OMPD_teams:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_for:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_masked:
- case OMPD_critical:
- case OMPD_taskgroup:
- case OMPD_distribute:
- case OMPD_ordered:
- case OMPD_atomic:
- case OMPD_teams_distribute:
- case OMPD_requires:
- case OMPD_metadirective:
- llvm_unreachable("Unexpected OpenMP directive with if-clause");
- case OMPD_unknown:
+ return OMPD_task;
default:
- llvm_unreachable("Unknown OpenMP directive");
- }
- break;
- case OMPC_num_threads:
- switch (DKind) {
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_parallel_loop:
- CaptureRegion = OMPD_target;
- break;
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- CaptureRegion = OMPD_teams;
break;
- case OMPD_parallel:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_parallel_sections:
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_parallel_loop:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_parallel_masked_taskloop_simd:
- // Do not capture num_threads-clause expressions.
- break;
- case OMPD_target_data:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data:
- case OMPD_target_update:
- case OMPD_target:
- case OMPD_target_simd:
- case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_cancel:
- case OMPD_task:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_masked_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_masked_taskloop_simd:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_taskyield:
- case OMPD_error:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_loop:
- case OMPD_teams_loop:
- case OMPD_target_teams_loop:
- case OMPD_teams:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_for:
- case OMPD_for_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_masked:
- case OMPD_critical:
- case OMPD_taskgroup:
- case OMPD_distribute:
- case OMPD_ordered:
- case OMPD_atomic:
- case OMPD_distribute_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_requires:
- case OMPD_metadirective:
- llvm_unreachable("Unexpected OpenMP directive with num_threads-clause");
- case OMPD_unknown:
- default:
- llvm_unreachable("Unknown OpenMP directive");
}
break;
case OMPC_num_teams:
- switch (DKind) {
- case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_target_teams_loop:
- CaptureRegion = OMPD_target;
- break;
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_teams:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_loop:
- // Do not capture num_teams-clause expressions.
- break;
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_task:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_masked_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_masked_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_parallel_masked_taskloop_simd:
- case OMPD_target_data:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data:
- case OMPD_target_update:
- case OMPD_cancel:
- case OMPD_parallel:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_parallel_sections:
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_parallel_loop:
- case OMPD_target:
- case OMPD_target_simd:
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_parallel_loop:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_taskyield:
- case OMPD_error:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_loop:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_for:
- case OMPD_for_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_masked:
- case OMPD_critical:
- case OMPD_taskgroup:
- case OMPD_distribute:
- case OMPD_ordered:
- case OMPD_atomic:
- case OMPD_distribute_simd:
- case OMPD_requires:
- case OMPD_metadirective:
- llvm_unreachable("Unexpected OpenMP directive with num_teams-clause");
- case OMPD_unknown:
- default:
- llvm_unreachable("Unknown OpenMP directive");
- }
- break;
case OMPC_thread_limit:
- switch (DKind) {
- case OMPD_target:
- case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_target_teams_loop:
- case OMPD_target_simd:
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_parallel_loop:
- CaptureRegion = OMPD_target;
- break;
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_teams:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_loop:
- // Do not capture thread_limit-clause expressions.
- break;
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_task:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_masked_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_masked_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_parallel_masked_taskloop_simd:
- case OMPD_target_data:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data:
- case OMPD_target_update:
- case OMPD_cancel:
- case OMPD_parallel:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_parallel_sections:
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_parallel_loop:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_taskyield:
- case OMPD_error:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_loop:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_for:
- case OMPD_for_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_masked:
- case OMPD_critical:
- case OMPD_taskgroup:
- case OMPD_distribute:
- case OMPD_ordered:
- case OMPD_atomic:
- case OMPD_distribute_simd:
- case OMPD_requires:
- case OMPD_metadirective:
- llvm_unreachable("Unexpected OpenMP directive with thread_limit-clause");
- case OMPD_unknown:
- default:
- llvm_unreachable("Unknown OpenMP directive");
- }
- break;
- case OMPC_schedule:
- switch (DKind) {
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- CaptureRegion = OMPD_parallel;
- break;
- case OMPD_for:
- case OMPD_for_simd:
- // Do not capture schedule-clause expressions.
- break;
- case OMPD_task:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_masked_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_masked_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_parallel_masked_taskloop_simd:
- case OMPD_target_data:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data:
- case OMPD_target_update:
- case OMPD_teams:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target:
- case OMPD_target_simd:
- case OMPD_target_parallel:
- case OMPD_cancel:
- case OMPD_parallel:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_parallel_sections:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_taskyield:
- case OMPD_error:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_loop:
- case OMPD_teams_loop:
- case OMPD_target_teams_loop:
- case OMPD_parallel_loop:
- case OMPD_target_parallel_loop:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_masked:
- case OMPD_critical:
- case OMPD_taskgroup:
- case OMPD_distribute:
- case OMPD_ordered:
- case OMPD_atomic:
- case OMPD_distribute_simd:
- case OMPD_target_teams:
- case OMPD_requires:
- case OMPD_metadirective:
- llvm_unreachable("Unexpected OpenMP directive with schedule clause");
- case OMPD_unknown:
- default:
- llvm_unreachable("Unknown OpenMP directive");
- }
- break;
- case OMPC_dist_schedule:
- switch (DKind) {
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- CaptureRegion = OMPD_teams;
- break;
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- // Do not capture dist_schedule-clause expressions.
- break;
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_parallel_for:
- case OMPD_task:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_masked_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_masked_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_parallel_masked_taskloop_simd:
- case OMPD_target_data:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data:
- case OMPD_target_update:
- case OMPD_teams:
- case OMPD_target:
- case OMPD_target_simd:
- case OMPD_target_parallel:
- case OMPD_cancel:
- case OMPD_parallel:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_parallel_sections:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_taskyield:
- case OMPD_error:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_loop:
- case OMPD_teams_loop:
- case OMPD_target_teams_loop:
- case OMPD_parallel_loop:
- case OMPD_target_parallel_loop:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_for:
- case OMPD_for_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_masked:
- case OMPD_critical:
- case OMPD_taskgroup:
- case OMPD_ordered:
- case OMPD_atomic:
- case OMPD_target_teams:
- case OMPD_requires:
- case OMPD_metadirective:
- llvm_unreachable("Unexpected OpenMP directive with dist_schedule clause");
- case OMPD_unknown:
- default:
- llvm_unreachable("Unknown OpenMP directive");
- }
- break;
case OMPC_ompx_dyn_cgroup_mem:
- switch (DKind) {
- case OMPD_target:
- case OMPD_target_simd:
- case OMPD_target_teams:
- case OMPD_target_parallel:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_parallel_loop:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_target_teams_loop:
- CaptureRegion = OMPD_target;
- break;
- default:
- llvm_unreachable("Unknown OpenMP directive");
- }
+ if (Leafs[0] == OMPD_target)
+ return OMPD_target;
break;
case OMPC_device:
- switch (DKind) {
- case OMPD_target_update:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data:
- case OMPD_target:
- case OMPD_target_simd:
- case OMPD_target_teams:
- case OMPD_target_parallel:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_parallel_loop:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_target_teams_loop:
- case OMPD_dispatch:
- CaptureRegion = OMPD_task;
- break;
- case OMPD_target_data:
- case OMPD_interop:
- // Do not capture device-clause expressions.
- break;
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_teams:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_task:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_masked_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_masked_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_parallel_masked_taskloop_simd:
- case OMPD_cancel:
- case OMPD_parallel:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_parallel_sections:
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_taskyield:
- case OMPD_error:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_loop:
- case OMPD_teams_loop:
- case OMPD_parallel_loop:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_for:
- case OMPD_for_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_masked:
- case OMPD_critical:
- case OMPD_taskgroup:
- case OMPD_distribute:
- case OMPD_ordered:
- case OMPD_atomic:
- case OMPD_distribute_simd:
- case OMPD_requires:
- case OMPD_metadirective:
- llvm_unreachable("Unexpected OpenMP directive with device-clause");
- case OMPD_unknown:
- default:
- llvm_unreachable("Unknown OpenMP directive");
- }
- break;
- case OMPC_grainsize:
- case OMPC_num_tasks:
- case OMPC_final:
- case OMPC_priority:
- switch (DKind) {
- case OMPD_task:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_masked_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_masked_taskloop_simd:
- break;
- case OMPD_parallel_masked_taskloop:
- case OMPD_parallel_masked_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- CaptureRegion = OMPD_parallel;
- break;
- case OMPD_target_update:
- case OMPD_target_enter_data:
- case OMPD_target_exit_data:
- case OMPD_target:
- case OMPD_target_simd:
- case OMPD_target_teams:
- case OMPD_target_parallel:
- case OMPD_target_teams_distribute:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_target_data:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_teams:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_parallel:
- case OMPD_parallel_master:
- case OMPD_parallel_masked:
- case OMPD_parallel_sections:
- case OMPD_parallel_for:
- case OMPD_parallel_for_simd:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_taskyield:
- case OMPD_error:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_loop:
- case OMPD_teams_loop:
- case OMPD_target_teams_loop:
- case OMPD_parallel_loop:
- case OMPD_target_parallel_loop:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_for:
- case OMPD_for_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_masked:
- case OMPD_critical:
- case OMPD_taskgroup:
- case OMPD_distribute:
- case OMPD_ordered:
- case OMPD_atomic:
- case OMPD_distribute_simd:
- case OMPD_requires:
- case OMPD_metadirective:
- llvm_unreachable("Unexpected OpenMP directive with grainsize-clause");
- case OMPD_unknown:
- default:
- llvm_unreachable("Unknown OpenMP directive");
- }
+ if (Leafs[0] == OMPD_target ||
+ llvm::is_contained({OMPD_dispatch, OMPD_target_update,
+ OMPD_target_enter_data, OMPD_target_exit_data},
+ DKind))
+ return OMPD_task;
break;
case OMPC_novariants:
case OMPC_nocontext:
- switch (DKind) {
- case OMPD_dispatch:
- CaptureRegion = OMPD_task;
- break;
- default:
- llvm_unreachable("Unexpected OpenMP directive");
- }
- break;
- case OMPC_filter:
- // Do not capture filter-clause expressions.
+ if (DKind == OMPD_dispatch)
+ return OMPD_task;
break;
case OMPC_when:
- if (DKind == OMPD_metadirective) {
- CaptureRegion = OMPD_metadirective;
- } else if (DKind == OMPD_unknown) {
- llvm_unreachable("Unknown OpenMP directive");
- } else {
- llvm_unreachable("Unexpected OpenMP directive with when clause");
- }
+ if (DKind == OMPD_metadirective)
+ return OMPD_metadirective;
break;
- case OMPC_firstprivate:
- case OMPC_lastprivate:
- case OMPC_reduction:
- case OMPC_task_reduction:
- case OMPC_in_reduction:
- case OMPC_linear:
- case OMPC_default:
- case OMPC_proc_bind:
- case OMPC_safelen:
- case OMPC_simdlen:
- case OMPC_sizes:
- case OMPC_allocator:
- case OMPC_collapse:
- case OMPC_private:
- case OMPC_shared:
- case OMPC_aligned:
- case OMPC_copyin:
- case OMPC_copyprivate:
- case OMPC_ordered:
- case OMPC_nowait:
- case OMPC_untied:
- case OMPC_mergeable:
- case OMPC_threadprivate:
- case OMPC_allocate:
- case OMPC_flush:
- case OMPC_depobj:
- case OMPC_read:
- case OMPC_write:
- case OMPC_update:
- case OMPC_capture:
- case OMPC_compare:
- case OMPC_seq_cst:
- case OMPC_acq_rel:
- case OMPC_acquire:
- case OMPC_release:
- case OMPC_relaxed:
- case OMPC_depend:
- case OMPC_threads:
- case OMPC_simd:
- case OMPC_map:
- case OMPC_nogroup:
- case OMPC_hint:
- case OMPC_defaultmap:
- case OMPC_unknown:
- case OMPC_uniform:
- case OMPC_to:
- case OMPC_from:
- case OMPC_use_device_ptr:
- case OMPC_use_device_addr:
- case OMPC_is_device_ptr:
- case OMPC_unified_address:
- case OMPC_unified_shared_memory:
- case OMPC_reverse_offload:
- case OMPC_dynamic_allocators:
- case OMPC_atomic_default_mem_order:
- case OMPC_device_type:
- case OMPC_match:
- case OMPC_nontemporal:
- case OMPC_order:
- case OMPC_at:
- case OMPC_severity:
- case OMPC_message:
- case OMPC_destroy:
- case OMPC_detach:
- case OMPC_inclusive:
- case OMPC_exclusive:
- case OMPC_uses_allocators:
- case OMPC_affinity:
- case OMPC_bind:
+ case OMPC_filter:
+ return OMPD_unknown;
default:
- llvm_unreachable("Unexpected OpenMP clause.");
+ break;
+ }
+
+ // If none of the special cases above applied, and DKind is a capturing
+ // directive, find the innermost enclosing leaf construct that allows the
+ // clause, and returns the corresponding capture region.
+
+ auto GetEnclosingRegion = [&](int EndIdx, OpenMPClauseKind Clause) {
+ // Find the index in "Leafs" of the last leaf that allows the given
+ // clause. The search will only include indexes [0, EndIdx).
+ // EndIdx may be set to the index of the NameModifier, if present.
+ int InnermostIdx = [&]() {
+ for (int I = EndIdx - 1; I >= 0; --I) {
+ if (isAllowedClauseForDirective(Leafs[I], Clause, OpenMPVersion))
+ return I;
+ }
+ return -1;
+ }();
+
+ // Find the nearest enclosing capture region.
+ SmallVector<OpenMPDirectiveKind, 2> Regions;
+ for (int I = InnermostIdx - 1; I >= 0; --I) {
+ if (!isOpenMPCapturingDirective(Leafs[I]))
+ continue;
+ Regions.clear();
+ getOpenMPCaptureRegions(Regions, Leafs[I]);
+ if (Regions[0] != OMPD_unknown)
+ return Regions.back();
+ }
+ return OMPD_unknown;
+ };
+
+ if (isOpenMPCapturingDirective(DKind)) {
+ auto GetLeafIndex = [&](OpenMPDirectiveKind Dir) {
+ for (int I = 0, E = Leafs.size(); I != E; ++I) {
+ if (Leafs[I] == Dir)
+ return I + 1;
+ }
+ return 0;
+ };
+
+ int End = NameModifier == OMPD_unknown ? Leafs.size()
+ : GetLeafIndex(NameModifier);
+ return GetEnclosingRegion(End, CKind);
}
- return CaptureRegion;
+
+ return OMPD_unknown;
}
-OMPClause *Sema::ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
- Expr *Condition, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation NameModifierLoc,
- SourceLocation ColonLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPIfClause(
+ OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation NameModifierLoc,
+ SourceLocation ColonLoc, SourceLocation EndLoc) {
Expr *ValExpr = Condition;
Stmt *HelperValStmt = nullptr;
OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
if (!Condition->isValueDependent() && !Condition->isTypeDependent() &&
!Condition->isInstantiationDependent() &&
!Condition->containsUnexpandedParameterPack()) {
- ExprResult Val = CheckBooleanCondition(StartLoc, Condition);
+ ExprResult Val = SemaRef.CheckBooleanCondition(StartLoc, Condition);
if (Val.isInvalid())
return nullptr;
@@ -16561,57 +15291,60 @@ OMPClause *Sema::ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
CaptureRegion = getOpenMPCaptureRegionForClause(
- DKind, OMPC_if, LangOpts.OpenMP, NameModifier);
- if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ DKind, OMPC_if, getLangOpts().OpenMP, NameModifier);
+ if (CaptureRegion != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
}
- return new (Context)
+ return new (getASTContext())
OMPIfClause(NameModifier, ValExpr, HelperValStmt, CaptureRegion, StartLoc,
LParenLoc, NameModifierLoc, ColonLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPFinalClause(Expr *Condition,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPFinalClause(Expr *Condition,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
Expr *ValExpr = Condition;
Stmt *HelperValStmt = nullptr;
OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
if (!Condition->isValueDependent() && !Condition->isTypeDependent() &&
!Condition->isInstantiationDependent() &&
!Condition->containsUnexpandedParameterPack()) {
- ExprResult Val = CheckBooleanCondition(StartLoc, Condition);
+ ExprResult Val = SemaRef.CheckBooleanCondition(StartLoc, Condition);
if (Val.isInvalid())
return nullptr;
- ValExpr = MakeFullExpr(Val.get()).get();
+ ValExpr = SemaRef.MakeFullExpr(Val.get()).get();
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
- CaptureRegion =
- getOpenMPCaptureRegionForClause(DKind, OMPC_final, LangOpts.OpenMP);
- if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ CaptureRegion = getOpenMPCaptureRegionForClause(DKind, OMPC_final,
+ getLangOpts().OpenMP);
+ if (CaptureRegion != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
}
- return new (Context) OMPFinalClause(ValExpr, HelperValStmt, CaptureRegion,
- StartLoc, LParenLoc, EndLoc);
+ return new (getASTContext()) OMPFinalClause(
+ ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
}
-ExprResult Sema::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc,
- Expr *Op) {
+ExprResult
+SemaOpenMP::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc,
+ Expr *Op) {
if (!Op)
return ExprError();
- class IntConvertDiagnoser : public ICEConvertDiagnoser {
+ class IntConvertDiagnoser : public Sema::ICEConvertDiagnoser {
public:
IntConvertDiagnoser()
: ICEConvertDiagnoser(/*AllowScopedEnumerations*/ false, false, true) {}
@@ -16647,7 +15380,7 @@ ExprResult Sema::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc,
llvm_unreachable("conversion functions are permitted");
}
} ConvertDiagnoser;
- return PerformContextualImplicitConversion(Loc, Op, ConvertDiagnoser);
+ return SemaRef.PerformContextualImplicitConversion(Loc, Op, ConvertDiagnoser);
}
static bool
@@ -16660,7 +15393,7 @@ isNonNegativeIntegerValue(Expr *&ValExpr, Sema &SemaRef, OpenMPClauseKind CKind,
!ValExpr->isInstantiationDependent()) {
SourceLocation Loc = ValExpr->getExprLoc();
ExprResult Value =
- SemaRef.PerformOpenMPImplicitIntegerConversion(Loc, ValExpr);
+ SemaRef.OpenMP().PerformOpenMPImplicitIntegerConversion(Loc, ValExpr);
if (Value.isInvalid())
return false;
@@ -16692,37 +15425,37 @@ isNonNegativeIntegerValue(Expr *&ValExpr, Sema &SemaRef, OpenMPClauseKind CKind,
return true;
}
-OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPNumThreadsClause(Expr *NumThreads,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
Expr *ValExpr = NumThreads;
Stmt *HelperValStmt = nullptr;
// OpenMP [2.5, Restrictions]
// The num_threads expression must evaluate to a positive integer value.
- if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_num_threads,
+ if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_num_threads,
/*StrictlyPositive=*/true))
return nullptr;
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
- OpenMPDirectiveKind CaptureRegion =
- getOpenMPCaptureRegionForClause(DKind, OMPC_num_threads, LangOpts.OpenMP);
- if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ OpenMPDirectiveKind CaptureRegion = getOpenMPCaptureRegionForClause(
+ DKind, OMPC_num_threads, getLangOpts().OpenMP);
+ if (CaptureRegion != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
- return new (Context) OMPNumThreadsClause(
+ return new (getASTContext()) OMPNumThreadsClause(
ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
}
-ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
- OpenMPClauseKind CKind,
- bool StrictlyPositive,
- bool SuppressExprDiags) {
+ExprResult SemaOpenMP::VerifyPositiveIntegerConstantInClause(
+ Expr *E, OpenMPClauseKind CKind, bool StrictlyPositive,
+ bool SuppressExprDiags) {
if (!E)
return ExprError();
if (E->isValueDependent() || E->isTypeDependent() ||
@@ -16736,14 +15469,16 @@ ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
// expression.
struct SuppressedDiagnoser : public Sema::VerifyICEDiagnoser {
SuppressedDiagnoser() : VerifyICEDiagnoser(/*Suppress=*/true) {}
- Sema::SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
- SourceLocation Loc) override {
+ SemaBase::SemaDiagnosticBuilder
+ diagnoseNotICE(Sema &S, SourceLocation Loc) override {
llvm_unreachable("Diagnostic suppressed");
}
} Diagnoser;
- ICE = VerifyIntegerConstantExpression(E, &Result, Diagnoser, AllowFold);
+ ICE = SemaRef.VerifyIntegerConstantExpression(E, &Result, Diagnoser,
+ Sema::AllowFold);
} else {
- ICE = VerifyIntegerConstantExpression(E, &Result, /*FIXME*/ AllowFold);
+ ICE = SemaRef.VerifyIntegerConstantExpression(E, &Result,
+ /*FIXME*/ Sema::AllowFold);
}
if (ICE.isInvalid())
return ExprError();
@@ -16767,29 +15502,31 @@ ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
return ICE;
}
-OMPClause *Sema::ActOnOpenMPSafelenClause(Expr *Len, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPSafelenClause(Expr *Len,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
// OpenMP [2.8.1, simd construct, Description]
// The parameter of the safelen clause must be a constant
// positive integer expression.
ExprResult Safelen = VerifyPositiveIntegerConstantInClause(Len, OMPC_safelen);
if (Safelen.isInvalid())
return nullptr;
- return new (Context)
+ return new (getASTContext())
OMPSafelenClause(Safelen.get(), StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPSimdlenClause(Expr *Len, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPSimdlenClause(Expr *Len,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
// OpenMP [2.8.1, simd construct, Description]
// The parameter of the simdlen clause must be a constant
// positive integer expression.
ExprResult Simdlen = VerifyPositiveIntegerConstantInClause(Len, OMPC_simdlen);
if (Simdlen.isInvalid())
return nullptr;
- return new (Context)
+ return new (getASTContext())
OMPSimdlenClause(Simdlen.get(), StartLoc, LParenLoc, EndLoc);
}
@@ -16849,31 +15586,32 @@ static bool findOMPAllocatorHandleT(Sema &S, SourceLocation Loc,
return true;
}
-OMPClause *Sema::ActOnOpenMPAllocatorClause(Expr *A, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPAllocatorClause(Expr *A,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
// OpenMP [2.11.3, allocate Directive, Description]
// allocator is an expression of omp_allocator_handle_t type.
- if (!findOMPAllocatorHandleT(*this, A->getExprLoc(), DSAStack))
+ if (!findOMPAllocatorHandleT(SemaRef, A->getExprLoc(), DSAStack))
return nullptr;
- ExprResult Allocator = DefaultLvalueConversion(A);
+ ExprResult Allocator = SemaRef.DefaultLvalueConversion(A);
if (Allocator.isInvalid())
return nullptr;
- Allocator = PerformImplicitConversion(Allocator.get(),
- DSAStack->getOMPAllocatorHandleT(),
- Sema::AA_Initializing,
- /*AllowExplicit=*/true);
+ Allocator = SemaRef.PerformImplicitConversion(
+ Allocator.get(), DSAStack->getOMPAllocatorHandleT(),
+ Sema::AA_Initializing,
+ /*AllowExplicit=*/true);
if (Allocator.isInvalid())
return nullptr;
- return new (Context)
+ return new (getASTContext())
OMPAllocatorClause(Allocator.get(), StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPCollapseClause(Expr *NumForLoops,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPCollapseClause(Expr *NumForLoops,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
// OpenMP [2.7.1, loop construct, Description]
// OpenMP [2.8.1, simd construct, Description]
// OpenMP [2.9.6, distribute construct, Description]
@@ -16883,14 +15621,14 @@ OMPClause *Sema::ActOnOpenMPCollapseClause(Expr *NumForLoops,
VerifyPositiveIntegerConstantInClause(NumForLoops, OMPC_collapse);
if (NumForLoopsResult.isInvalid())
return nullptr;
- return new (Context)
+ return new (getASTContext())
OMPCollapseClause(NumForLoopsResult.get(), StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc,
- SourceLocation EndLoc,
- SourceLocation LParenLoc,
- Expr *NumForLoops) {
+OMPClause *SemaOpenMP::ActOnOpenMPOrderedClause(SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ SourceLocation LParenLoc,
+ Expr *NumForLoops) {
// OpenMP [2.7.1, loop construct, Description]
// OpenMP [2.8.1, simd construct, Description]
// OpenMP [2.9.6, distribute construct, Description]
@@ -16905,14 +15643,15 @@ OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc,
} else {
NumForLoops = nullptr;
}
- auto *Clause = OMPOrderedClause::Create(
- Context, NumForLoops, NumForLoops ? DSAStack->getAssociatedLoops() : 0,
- StartLoc, LParenLoc, EndLoc);
+ auto *Clause =
+ OMPOrderedClause::Create(getASTContext(), NumForLoops,
+ NumForLoops ? DSAStack->getAssociatedLoops() : 0,
+ StartLoc, LParenLoc, EndLoc);
DSAStack->setOrderedRegion(/*IsOrdered=*/true, NumForLoops, Clause);
return Clause;
}
-OMPClause *Sema::ActOnOpenMPSimpleClause(
+OMPClause *SemaOpenMP::ActOnOpenMPSimpleClause(
OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
OMPClause *Res = nullptr;
@@ -16931,9 +15670,8 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
ArgumentLoc, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_fail:
- Res = ActOnOpenMPFailClause(
- static_cast<OpenMPClauseKind>(Argument),
- ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ Res = ActOnOpenMPFailClause(static_cast<OpenMPClauseKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_update:
Res = ActOnOpenMPUpdateClause(static_cast<OpenMPDependClauseKind>(Argument),
@@ -17054,11 +15792,11 @@ getListOfPossibleValues(OpenMPClauseKind K, unsigned First, unsigned Last,
return std::string(Out.str());
}
-OMPClause *Sema::ActOnOpenMPDefaultClause(DefaultKind Kind,
- SourceLocation KindKwLoc,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPDefaultClause(DefaultKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
if (Kind == OMP_DEFAULT_unknown) {
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_default, /*First=*/0,
@@ -17084,39 +15822,39 @@ OMPClause *Sema::ActOnOpenMPDefaultClause(DefaultKind Kind,
llvm_unreachable("DSA unexpected in OpenMP default clause");
}
- return new (Context)
+ return new (getASTContext())
OMPDefaultClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPProcBindClause(ProcBindKind Kind,
- SourceLocation KindKwLoc,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPProcBindClause(ProcBindKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
if (Kind == OMP_PROC_BIND_unknown) {
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_proc_bind,
/*First=*/unsigned(OMP_PROC_BIND_master),
/*Last=*/
- unsigned(LangOpts.OpenMP > 50
+ unsigned(getLangOpts().OpenMP > 50
? OMP_PROC_BIND_primary
: OMP_PROC_BIND_spread) +
1)
<< getOpenMPClauseName(OMPC_proc_bind);
return nullptr;
}
- if (Kind == OMP_PROC_BIND_primary && LangOpts.OpenMP < 51)
+ if (Kind == OMP_PROC_BIND_primary && getLangOpts().OpenMP < 51)
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_proc_bind,
/*First=*/unsigned(OMP_PROC_BIND_master),
/*Last=*/
unsigned(OMP_PROC_BIND_spread) + 1)
<< getOpenMPClauseName(OMPC_proc_bind);
- return new (Context)
+ return new (getASTContext())
OMPProcBindClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPAtomicDefaultMemOrderClause(
+OMPClause *SemaOpenMP::ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindKwLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
if (Kind == OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown) {
@@ -17127,15 +15865,15 @@ OMPClause *Sema::ActOnOpenMPAtomicDefaultMemOrderClause(
<< getOpenMPClauseName(OMPC_atomic_default_mem_order);
return nullptr;
}
- return new (Context) OMPAtomicDefaultMemOrderClause(Kind, KindKwLoc, StartLoc,
- LParenLoc, EndLoc);
+ return new (getASTContext()) OMPAtomicDefaultMemOrderClause(
+ Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPAtClause(OpenMPAtClauseKind Kind,
- SourceLocation KindKwLoc,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPAtClause(OpenMPAtClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
if (Kind == OMPC_AT_unknown) {
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_at, /*First=*/0,
@@ -17143,15 +15881,15 @@ OMPClause *Sema::ActOnOpenMPAtClause(OpenMPAtClauseKind Kind,
<< getOpenMPClauseName(OMPC_at);
return nullptr;
}
- return new (Context)
+ return new (getASTContext())
OMPAtClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPSeverityClause(OpenMPSeverityClauseKind Kind,
- SourceLocation KindKwLoc,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPSeverityClause(OpenMPSeverityClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
if (Kind == OMPC_SEVERITY_unknown) {
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_severity, /*First=*/0,
@@ -17159,28 +15897,30 @@ OMPClause *Sema::ActOnOpenMPSeverityClause(OpenMPSeverityClauseKind Kind,
<< getOpenMPClauseName(OMPC_severity);
return nullptr;
}
- return new (Context)
+ return new (getASTContext())
OMPSeverityClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPMessageClause(Expr *ME, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPMessageClause(Expr *ME,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
assert(ME && "NULL expr in Message clause");
if (!isa<StringLiteral>(ME)) {
Diag(ME->getBeginLoc(), diag::warn_clause_expected_string)
<< getOpenMPClauseName(OMPC_message);
return nullptr;
}
- return new (Context) OMPMessageClause(ME, StartLoc, LParenLoc, EndLoc);
+ return new (getASTContext())
+ OMPMessageClause(ME, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPOrderClause(
+OMPClause *SemaOpenMP::ActOnOpenMPOrderClause(
OpenMPOrderClauseModifier Modifier, OpenMPOrderClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc) {
if (Kind != OMPC_ORDER_concurrent ||
- (LangOpts.OpenMP < 51 && MLoc.isValid())) {
+ (getLangOpts().OpenMP < 51 && MLoc.isValid())) {
// Kind should be concurrent,
// Modifiers introduced in OpenMP 5.1
static_assert(OMPC_ORDER_unknown > 0,
@@ -17193,7 +15933,7 @@ OMPClause *Sema::ActOnOpenMPOrderClause(
<< getOpenMPClauseName(OMPC_order);
return nullptr;
}
- if (LangOpts.OpenMP >= 51) {
+ if (getLangOpts().OpenMP >= 51) {
if (Modifier == OMPC_ORDER_MODIFIER_unknown && MLoc.isValid()) {
Diag(MLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_order,
@@ -17210,21 +15950,21 @@ OMPClause *Sema::ActOnOpenMPOrderClause(
}
}
}
- return new (Context) OMPOrderClause(Kind, KindLoc, StartLoc, LParenLoc,
- EndLoc, Modifier, MLoc);
+ return new (getASTContext()) OMPOrderClause(
+ Kind, KindLoc, StartLoc, LParenLoc, EndLoc, Modifier, MLoc);
}
-OMPClause *Sema::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
- SourceLocation KindKwLoc,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
if (Kind == OMPC_DEPEND_unknown || Kind == OMPC_DEPEND_source ||
Kind == OMPC_DEPEND_sink || Kind == OMPC_DEPEND_depobj) {
SmallVector<unsigned> Except = {
OMPC_DEPEND_source, OMPC_DEPEND_sink, OMPC_DEPEND_depobj,
OMPC_DEPEND_outallmemory, OMPC_DEPEND_inoutallmemory};
- if (LangOpts.OpenMP < 51)
+ if (getLangOpts().OpenMP < 51)
Except.push_back(OMPC_DEPEND_inoutset);
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_depend, /*First=*/0,
@@ -17232,35 +15972,72 @@ OMPClause *Sema::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
<< getOpenMPClauseName(OMPC_update);
return nullptr;
}
- return OMPUpdateClause::Create(Context, StartLoc, LParenLoc, KindKwLoc, Kind,
- EndLoc);
+ return OMPUpdateClause::Create(getASTContext(), StartLoc, LParenLoc,
+ KindKwLoc, Kind, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
- for (Expr *SizeExpr : SizeExprs) {
- ExprResult NumForLoopsResult = VerifyPositiveIntegerConstantInClause(
- SizeExpr, OMPC_sizes, /*StrictlyPositive=*/true);
- if (!NumForLoopsResult.isUsable())
- return nullptr;
- }
+OMPClause *SemaOpenMP::ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ SmallVector<Expr *> SanitizedSizeExprs(SizeExprs);
- DSAStack->setAssociatedLoops(SizeExprs.size());
- return OMPSizesClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- SizeExprs);
-}
+ for (Expr *&SizeExpr : SanitizedSizeExprs) {
+ // Skip if already sanitized, e.g. during a partial template instantiation.
+ if (!SizeExpr)
+ continue;
+
+ bool IsValid = isNonNegativeIntegerValue(SizeExpr, SemaRef, OMPC_sizes,
+ /*StrictlyPositive=*/true);
-OMPClause *Sema::ActOnOpenMPFullClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return OMPFullClause::Create(Context, StartLoc, EndLoc);
+ // isNonNegativeIntegerValue returns true for non-integral types (but still
+ // emits error diagnostic), so check for the expected type explicitly.
+ QualType SizeTy = SizeExpr->getType();
+ if (!SizeTy->isIntegerType())
+ IsValid = false;
+
+ // Handling in templates is tricky. There are four possibilities to
+ // consider:
+ //
+ // 1a. The expression is valid and we are in a instantiated template or not
+ // in a template:
+ // Pass valid expression to be further analysed later in Sema.
+ // 1b. The expression is valid and we are in a template (including partial
+ // instantiation):
+ // isNonNegativeIntegerValue skipped any checks so there is no
+ // guarantee it will be correct after instantiation.
+ // ActOnOpenMPSizesClause will be called again at instantiation when
+ // it is not in a dependent context anymore. This may cause warnings
+ // to be emitted multiple times.
+ // 2a. The expression is invalid and we are in an instantiated template or
+ // not in a template:
+ // Invalidate the expression with a clearly wrong value (nullptr) so
+ // later in Sema we do not have to do the same validity analysis again
+ // or crash from unexpected data. Error diagnostics have already been
+ // emitted.
+ // 2b. The expression is invalid and we are in a template (including partial
+ // instantiation):
+ // Pass the invalid expression as-is, template instantiation may
+ // replace unexpected types/values with valid ones. The directives
+ // with this clause must not try to use these expressions in dependent
+ // contexts, but delay analysis until full instantiation.
+ if (!SizeExpr->isInstantiationDependent() && !IsValid)
+ SizeExpr = nullptr;
+ }
+
+ return OMPSizesClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc,
+ SanitizedSizeExprs);
+}
+
+OMPClause *SemaOpenMP::ActOnOpenMPFullClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return OMPFullClause::Create(getASTContext(), StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPPartialClause(Expr *FactorExpr,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPPartialClause(Expr *FactorExpr,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
if (FactorExpr) {
// If an argument is specified, it must be a constant (or an unevaluated
// template expression).
@@ -17271,22 +16048,22 @@ OMPClause *Sema::ActOnOpenMPPartialClause(Expr *FactorExpr,
FactorExpr = FactorResult.get();
}
- return OMPPartialClause::Create(Context, StartLoc, LParenLoc, EndLoc,
+ return OMPPartialClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc,
FactorExpr);
}
-OMPClause *Sema::ActOnOpenMPAlignClause(Expr *A, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPAlignClause(Expr *A, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
ExprResult AlignVal;
AlignVal = VerifyPositiveIntegerConstantInClause(A, OMPC_align);
if (AlignVal.isInvalid())
return nullptr;
- return OMPAlignClause::Create(Context, AlignVal.get(), StartLoc, LParenLoc,
- EndLoc);
+ return OMPAlignClause::Create(getASTContext(), AlignVal.get(), StartLoc,
+ LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
+OMPClause *SemaOpenMP::ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Argument, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentLoc, SourceLocation DelimLoc,
@@ -17454,13 +16231,13 @@ static bool checkScheduleModifiers(Sema &S, OpenMPScheduleClauseModifier M1,
return false;
}
-OMPClause *Sema::ActOnOpenMPScheduleClause(
+OMPClause *SemaOpenMP::ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc) {
- if (checkScheduleModifiers(*this, M1, M2, M1Loc, M2Loc) ||
- checkScheduleModifiers(*this, M2, M1, M2Loc, M1Loc))
+ if (checkScheduleModifiers(SemaRef, M1, M2, M1Loc, M2Loc) ||
+ checkScheduleModifiers(SemaRef, M2, M1, M2Loc, M1Loc))
return nullptr;
// OpenMP, 2.7.1, Loop Construct, Restrictions
// Either the monotonic modifier or the nonmonotonic modifier can be specified
@@ -17494,7 +16271,7 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
// The nonmonotonic modifier can only be specified with schedule(dynamic) or
// schedule(guided).
// OpenMP 5.0 does not have this restriction.
- if (LangOpts.OpenMP < 50 &&
+ if (getLangOpts().OpenMP < 50 &&
(M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
Kind != OMPC_SCHEDULE_dynamic && Kind != OMPC_SCHEDULE_guided) {
@@ -17520,7 +16297,7 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
// chunk_size must be a loop invariant integer expression with a positive
// value.
if (std::optional<llvm::APSInt> Result =
- ValExpr->getIntegerConstantExpr(Context)) {
+ ValExpr->getIntegerConstantExpr(getASTContext())) {
if (Result->isSigned() && !Result->isStrictlyPositive()) {
Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause)
<< "schedule" << 1 << ChunkSize->getSourceRange();
@@ -17528,24 +16305,24 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
}
} else if (getOpenMPCaptureRegionForClause(
DSAStack->getCurrentDirective(), OMPC_schedule,
- LangOpts.OpenMP) != OMPD_unknown &&
- !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ getLangOpts().OpenMP) != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
}
}
- return new (Context)
+ return new (getASTContext())
OMPScheduleClause(StartLoc, LParenLoc, KindLoc, CommaLoc, EndLoc, Kind,
ValExpr, HelperValStmt, M1, M1Loc, M2, M2Loc);
}
-OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPClause(OpenMPClauseKind Kind,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
OMPClause *Res = nullptr;
switch (Kind) {
case OMPC_ordered:
@@ -17593,6 +16370,9 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_relaxed:
Res = ActOnOpenMPRelaxedClause(StartLoc, EndLoc);
break;
+ case OMPC_weak:
+ Res = ActOnOpenMPWeakClause(StartLoc, EndLoc);
+ break;
case OMPC_threads:
Res = ActOnOpenMPThreadsClause(StartLoc, EndLoc);
break;
@@ -17696,129 +16476,138 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
return Res;
}
-OMPClause *Sema::ActOnOpenMPNowaitClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPNowaitClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
DSAStack->setNowaitRegion();
- return new (Context) OMPNowaitClause(StartLoc, EndLoc);
+ return new (getASTContext()) OMPNowaitClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPUntiedClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPUntiedClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
DSAStack->setUntiedRegion();
- return new (Context) OMPUntiedClause(StartLoc, EndLoc);
+ return new (getASTContext()) OMPUntiedClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPMergeableClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPMergeableClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPMergeableClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPMergeableClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPReadClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPReadClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPReadClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPReadClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPWriteClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPWriteClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPWriteClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPWriteClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPUpdateClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return OMPUpdateClause::Create(Context, StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPUpdateClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return OMPUpdateClause::Create(getASTContext(), StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPCaptureClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPCaptureClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPCaptureClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPCaptureClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPCompareClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPCompareClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPCompareClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPCompareClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPFailClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPFailClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPFailClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPFailClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPFailClause(
- OpenMPClauseKind Parameter, SourceLocation KindLoc,
- SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPFailClause(OpenMPClauseKind Parameter,
+ SourceLocation KindLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
if (!checkFailClauseParameter(Parameter)) {
Diag(KindLoc, diag::err_omp_atomic_fail_wrong_or_no_clauses);
return nullptr;
}
- return new (Context)
+ return new (getASTContext())
OMPFailClause(Parameter, KindLoc, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPSeqCstClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPSeqCstClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPAcqRelClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPAcqRelClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPAcquireClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPAcquireClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPAcquireClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPAcquireClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPReleaseClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPReleaseClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPReleaseClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPReleaseClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPRelaxedClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPRelaxedClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPThreadsClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPThreadsClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPWeakClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPWeakClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPSIMDClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPSIMDClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPThreadsClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPThreadsClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPNogroupClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPNogroupClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPSIMDClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPSIMDClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPUnifiedAddressClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPNogroupClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPNogroupClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPUnifiedSharedMemoryClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPUnifiedAddressClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
+OMPClause *
+SemaOpenMP::ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
- return new (Context) OMPReverseOffloadClause(StartLoc, EndLoc);
+ return new (getASTContext()) OMPUnifiedSharedMemoryClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPDynamicAllocatorsClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPReverseOffloadClause(StartLoc, EndLoc);
}
-StmtResult Sema::ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
+OMPClause *
+SemaOpenMP::ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPDynamicAllocatorsClause(StartLoc, EndLoc);
+}
+
+StmtResult
+SemaOpenMP::ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
// OpenMP 5.1 [2.15.1, interop Construct, Restrictions]
// At least one action-clause must appear on a directive.
@@ -17868,13 +16657,13 @@ StmtResult Sema::ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
if (ClauseKind == OMPC_init) {
auto *E = cast<OMPInitClause>(C)->getInteropVar();
- DeclResult = getPrivateItem(*this, E, ELoc, ERange);
+ DeclResult = getPrivateItem(SemaRef, E, ELoc, ERange);
} else if (ClauseKind == OMPC_use) {
auto *E = cast<OMPUseClause>(C)->getInteropVar();
- DeclResult = getPrivateItem(*this, E, ELoc, ERange);
+ DeclResult = getPrivateItem(SemaRef, E, ELoc, ERange);
} else if (ClauseKind == OMPC_destroy) {
auto *E = cast<OMPDestroyClause>(C)->getInteropVar();
- DeclResult = getPrivateItem(*this, E, ELoc, ERange);
+ DeclResult = getPrivateItem(SemaRef, E, ELoc, ERange);
}
if (DeclResult.first) {
@@ -17886,7 +16675,8 @@ StmtResult Sema::ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
}
}
- return OMPInteropDirective::Create(Context, StartLoc, EndLoc, Clauses);
+ return OMPInteropDirective::Create(getASTContext(), StartLoc, EndLoc,
+ Clauses);
}
static bool isValidInteropVariable(Sema &SemaRef, Expr *InteropVarExpr,
@@ -17946,12 +16736,11 @@ static bool isValidInteropVariable(Sema &SemaRef, Expr *InteropVarExpr,
return true;
}
-OMPClause *
-Sema::ActOnOpenMPInitClause(Expr *InteropVar, OMPInteropInfo &InteropInfo,
- SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation VarLoc, SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPInitClause(
+ Expr *InteropVar, OMPInteropInfo &InteropInfo, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc) {
- if (!isValidInteropVariable(*this, InteropVar, VarLoc, OMPC_init))
+ if (!isValidInteropVariable(SemaRef, InteropVar, VarLoc, OMPC_init))
return nullptr;
// Check prefer_type values. These foreign-runtime-id values are either
@@ -17960,7 +16749,7 @@ Sema::ActOnOpenMPInitClause(Expr *InteropVar, OMPInteropInfo &InteropInfo,
if (E->isValueDependent() || E->isTypeDependent() ||
E->isInstantiationDependent() || E->containsUnexpandedParameterPack())
continue;
- if (E->isIntegerConstantExpr(Context))
+ if (E->isIntegerConstantExpr(getASTContext()))
continue;
if (isa<StringLiteral>(E))
continue;
@@ -17968,28 +16757,29 @@ Sema::ActOnOpenMPInitClause(Expr *InteropVar, OMPInteropInfo &InteropInfo,
return nullptr;
}
- return OMPInitClause::Create(Context, InteropVar, InteropInfo, StartLoc,
- LParenLoc, VarLoc, EndLoc);
+ return OMPInitClause::Create(getASTContext(), InteropVar, InteropInfo,
+ StartLoc, LParenLoc, VarLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation VarLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPUseClause(Expr *InteropVar,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation VarLoc,
+ SourceLocation EndLoc) {
- if (!isValidInteropVariable(*this, InteropVar, VarLoc, OMPC_use))
+ if (!isValidInteropVariable(SemaRef, InteropVar, VarLoc, OMPC_use))
return nullptr;
- return new (Context)
+ return new (getASTContext())
OMPUseClause(InteropVar, StartLoc, LParenLoc, VarLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPDestroyClause(Expr *InteropVar,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation VarLoc,
- SourceLocation EndLoc) {
- if (!InteropVar && LangOpts.OpenMP >= 52 &&
+OMPClause *SemaOpenMP::ActOnOpenMPDestroyClause(Expr *InteropVar,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation VarLoc,
+ SourceLocation EndLoc) {
+ if (!InteropVar && getLangOpts().OpenMP >= 52 &&
DSAStack->getCurrentDirective() == OMPD_depobj) {
Diag(StartLoc, diag::err_omp_expected_clause_argument)
<< getOpenMPClauseName(OMPC_destroy)
@@ -17997,100 +16787,103 @@ OMPClause *Sema::ActOnOpenMPDestroyClause(Expr *InteropVar,
return nullptr;
}
if (InteropVar &&
- !isValidInteropVariable(*this, InteropVar, VarLoc, OMPC_destroy))
+ !isValidInteropVariable(SemaRef, InteropVar, VarLoc, OMPC_destroy))
return nullptr;
- return new (Context)
+ return new (getASTContext())
OMPDestroyClause(InteropVar, StartLoc, LParenLoc, VarLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPNovariantsClause(Expr *Condition,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPNovariantsClause(Expr *Condition,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
Expr *ValExpr = Condition;
Stmt *HelperValStmt = nullptr;
OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
if (!Condition->isValueDependent() && !Condition->isTypeDependent() &&
!Condition->isInstantiationDependent() &&
!Condition->containsUnexpandedParameterPack()) {
- ExprResult Val = CheckBooleanCondition(StartLoc, Condition);
+ ExprResult Val = SemaRef.CheckBooleanCondition(StartLoc, Condition);
if (Val.isInvalid())
return nullptr;
- ValExpr = MakeFullExpr(Val.get()).get();
+ ValExpr = SemaRef.MakeFullExpr(Val.get()).get();
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
CaptureRegion = getOpenMPCaptureRegionForClause(DKind, OMPC_novariants,
- LangOpts.OpenMP);
- if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ getLangOpts().OpenMP);
+ if (CaptureRegion != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
}
- return new (Context) OMPNovariantsClause(
+ return new (getASTContext()) OMPNovariantsClause(
ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPNocontextClause(Expr *Condition,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPNocontextClause(Expr *Condition,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
Expr *ValExpr = Condition;
Stmt *HelperValStmt = nullptr;
OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
if (!Condition->isValueDependent() && !Condition->isTypeDependent() &&
!Condition->isInstantiationDependent() &&
!Condition->containsUnexpandedParameterPack()) {
- ExprResult Val = CheckBooleanCondition(StartLoc, Condition);
+ ExprResult Val = SemaRef.CheckBooleanCondition(StartLoc, Condition);
if (Val.isInvalid())
return nullptr;
- ValExpr = MakeFullExpr(Val.get()).get();
+ ValExpr = SemaRef.MakeFullExpr(Val.get()).get();
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
- CaptureRegion =
- getOpenMPCaptureRegionForClause(DKind, OMPC_nocontext, LangOpts.OpenMP);
- if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ CaptureRegion = getOpenMPCaptureRegionForClause(DKind, OMPC_nocontext,
+ getLangOpts().OpenMP);
+ if (CaptureRegion != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
}
- return new (Context) OMPNocontextClause(ValExpr, HelperValStmt, CaptureRegion,
- StartLoc, LParenLoc, EndLoc);
+ return new (getASTContext()) OMPNocontextClause(
+ ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPFilterClause(Expr *ThreadID,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPFilterClause(Expr *ThreadID,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
Expr *ValExpr = ThreadID;
Stmt *HelperValStmt = nullptr;
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
OpenMPDirectiveKind CaptureRegion =
- getOpenMPCaptureRegionForClause(DKind, OMPC_filter, LangOpts.OpenMP);
- if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ getOpenMPCaptureRegionForClause(DKind, OMPC_filter, getLangOpts().OpenMP);
+ if (CaptureRegion != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
- return new (Context) OMPFilterClause(ValExpr, HelperValStmt, CaptureRegion,
- StartLoc, LParenLoc, EndLoc);
+ return new (getASTContext()) OMPFilterClause(
+ ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPVarListClause(OpenMPClauseKind Kind,
- ArrayRef<Expr *> VarList,
- const OMPVarListLocTy &Locs,
- OpenMPVarListDataTy &Data) {
+OMPClause *SemaOpenMP::ActOnOpenMPVarListClause(OpenMPClauseKind Kind,
+ ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs,
+ OpenMPVarListDataTy &Data) {
SourceLocation StartLoc = Locs.StartLoc;
SourceLocation LParenLoc = Locs.LParenLoc;
SourceLocation EndLoc = Locs.EndLoc;
@@ -18282,29 +17075,30 @@ OMPClause *Sema::ActOnOpenMPVarListClause(OpenMPClauseKind Kind,
return Res;
}
-ExprResult Sema::getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
- ExprObjectKind OK, SourceLocation Loc) {
- ExprResult Res = BuildDeclRefExpr(
+ExprResult SemaOpenMP::getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
+ ExprObjectKind OK,
+ SourceLocation Loc) {
+ ExprResult Res = SemaRef.BuildDeclRefExpr(
Capture, Capture->getType().getNonReferenceType(), VK_LValue, Loc);
if (!Res.isUsable())
return ExprError();
if (OK == OK_Ordinary && !getLangOpts().CPlusPlus) {
- Res = CreateBuiltinUnaryOp(Loc, UO_Deref, Res.get());
+ Res = SemaRef.CreateBuiltinUnaryOp(Loc, UO_Deref, Res.get());
if (!Res.isUsable())
return ExprError();
}
if (VK != VK_LValue && Res.get()->isGLValue()) {
- Res = DefaultLvalueConversion(Res.get());
+ Res = SemaRef.DefaultLvalueConversion(Res.get());
if (!Res.isUsable())
return ExprError();
}
return Res;
}
-OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
SmallVector<Expr *, 8> PrivateCopies;
bool IsImplicitClause =
@@ -18314,7 +17108,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -18330,7 +17124,8 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
// OpenMP [2.9.3.3, Restrictions, C/C++, p.3]
// A variable that appears in a private clause must not have an incomplete
// type or a reference type.
- if (RequireCompleteType(ELoc, Type, diag::err_omp_private_incomplete_type))
+ if (SemaRef.RequireCompleteType(ELoc, Type,
+ diag::err_omp_private_incomplete_type))
continue;
Type = Type.getNonReferenceType();
@@ -18342,7 +17137,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
// OpenMP 3.1 [2.9.3.3, private clause, Restrictions]
// A variable that appears in a private clause must not have a
// const-qualified type unless it is of class type with a mutable member.
- if (rejectConstNotMutableType(*this, D, Type, OMPC_private, ELoc))
+ if (rejectConstNotMutableType(SemaRef, D, Type, OMPC_private, ELoc))
continue;
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
@@ -18356,7 +17151,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_private) {
Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_private);
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
@@ -18367,7 +17162,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
<< getOpenMPClauseName(OMPC_private) << Type
<< getOpenMPDirectiveName(CurrDir);
- bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) ==
VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
@@ -18383,7 +17178,8 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct unless the construct is a
// combined construct.
- if ((LangOpts.OpenMP <= 45 && isOpenMPTargetExecutionDirective(CurrDir)) ||
+ if ((getLangOpts().OpenMP <= 45 &&
+ isOpenMPTargetExecutionDirective(CurrDir)) ||
CurrDir == OMPD_target) {
OpenMPClauseKind ConflictKind;
if (DSAStack->checkMappableExprComponentListsForDecl(
@@ -18397,7 +17193,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
<< getOpenMPClauseName(OMPC_private)
<< getOpenMPClauseName(ConflictKind)
<< getOpenMPDirectiveName(CurrDir);
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
}
@@ -18413,28 +17209,28 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
// proper diagnostics.
Type = Type.getUnqualifiedType();
VarDecl *VDPrivate =
- buildVarDecl(*this, ELoc, Type, D->getName(),
+ buildVarDecl(SemaRef, ELoc, Type, D->getName(),
D->hasAttrs() ? &D->getAttrs() : nullptr,
VD ? cast<DeclRefExpr>(SimpleRefExpr) : nullptr);
- ActOnUninitializedDecl(VDPrivate);
+ SemaRef.ActOnUninitializedDecl(VDPrivate);
if (VDPrivate->isInvalidDecl())
continue;
DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
- *this, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc);
+ SemaRef, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc);
DeclRefExpr *Ref = nullptr;
- if (!VD && !CurContext->isDependentContext()) {
+ if (!VD && !SemaRef.CurContext->isDependentContext()) {
auto *FD = dyn_cast<FieldDecl>(D);
VarDecl *VD = FD ? DSAStack->getImplicitFDCapExprDecl(FD) : nullptr;
if (VD)
- Ref = buildDeclRefExpr(*this, VD, VD->getType().getNonReferenceType(),
+ Ref = buildDeclRefExpr(SemaRef, VD, VD->getType().getNonReferenceType(),
RefExpr->getExprLoc());
else
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false);
+ Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/false);
}
if (!IsImplicitClause)
DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_private, Ref);
- Vars.push_back((VD || CurContext->isDependentContext())
+ Vars.push_back((VD || SemaRef.CurContext->isDependentContext())
? RefExpr->IgnoreParens()
: Ref);
PrivateCopies.push_back(VDPrivateRefExpr);
@@ -18443,14 +17239,14 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
if (Vars.empty())
return nullptr;
- return OMPPrivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars,
- PrivateCopies);
+ return OMPPrivateClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc,
+ Vars, PrivateCopies);
}
-OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
SmallVector<Expr *, 8> PrivateCopies;
SmallVector<Expr *, 8> Inits;
@@ -18464,7 +17260,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -18482,8 +17278,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// OpenMP [2.9.3.3, Restrictions, C/C++, p.3]
// A variable that appears in a private clause must not have an incomplete
// type or a reference type.
- if (RequireCompleteType(ELoc, Type,
- diag::err_omp_firstprivate_incomplete_type))
+ if (SemaRef.RequireCompleteType(ELoc, Type,
+ diag::err_omp_firstprivate_incomplete_type))
continue;
Type = Type.getNonReferenceType();
@@ -18491,7 +17287,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// A variable of class type (or array thereof) that appears in a private
// clause requires an accessible, unambiguous copy constructor for the
// class type.
- QualType ElemType = Context.getBaseElementType(Type).getNonReferenceType();
+ QualType ElemType =
+ getASTContext().getBaseElementType(Type).getNonReferenceType();
// If an implicit firstprivate variable found it was checked already.
DSAStackTy::DSAVarData TopDVar;
@@ -18500,7 +17297,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
DSAStack->getTopDSA(D, /*FromParent=*/false);
TopDVar = DVar;
OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective();
- bool IsConstant = ElemType.isConstant(Context);
+ bool IsConstant = ElemType.isConstant(getASTContext());
// OpenMP [2.4.13, Data-sharing Attribute Clauses]
// A list item that specifies a given variable may not appear in more
// than one clause on the same directive, except that a variable may be
@@ -18515,7 +17312,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_wrong_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_firstprivate);
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
@@ -18535,7 +17332,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_wrong_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_firstprivate);
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
@@ -18566,7 +17363,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_required_access)
<< getOpenMPClauseName(OMPC_firstprivate)
<< getOpenMPClauseName(OMPC_shared);
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
}
@@ -18599,7 +17396,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
isOpenMPTeamsDirective(DVar.DKind))) {
Diag(ELoc, diag::err_omp_parallel_reduction_in_task_firstprivate)
<< getOpenMPDirectiveName(DVar.DKind);
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
}
@@ -18612,7 +17409,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct unless the construct is a
// combined construct.
- if ((LangOpts.OpenMP <= 45 &&
+ if ((getLangOpts().OpenMP <= 45 &&
isOpenMPTargetExecutionDirective(CurrDir)) ||
CurrDir == OMPD_target) {
OpenMPClauseKind ConflictKind;
@@ -18628,7 +17425,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
<< getOpenMPClauseName(OMPC_firstprivate)
<< getOpenMPClauseName(ConflictKind)
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
}
@@ -18640,7 +17437,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
<< getOpenMPClauseName(OMPC_firstprivate) << Type
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
- bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) ==
VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
@@ -18650,7 +17447,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
Type = Type.getUnqualifiedType();
VarDecl *VDPrivate =
- buildVarDecl(*this, ELoc, Type, D->getName(),
+ buildVarDecl(SemaRef, ELoc, Type, D->getName(),
D->hasAttrs() ? &D->getAttrs() : nullptr,
VD ? cast<DeclRefExpr>(SimpleRefExpr) : nullptr);
// Generate helper private variable and initialize it with the value of the
@@ -18663,32 +17460,32 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// original array element in CodeGen.
if (Type->isArrayType()) {
VarDecl *VDInit =
- buildVarDecl(*this, RefExpr->getExprLoc(), ElemType, D->getName());
- VDInitRefExpr = buildDeclRefExpr(*this, VDInit, ElemType, ELoc);
- Expr *Init = DefaultLvalueConversion(VDInitRefExpr).get();
+ buildVarDecl(SemaRef, RefExpr->getExprLoc(), ElemType, D->getName());
+ VDInitRefExpr = buildDeclRefExpr(SemaRef, VDInit, ElemType, ELoc);
+ Expr *Init = SemaRef.DefaultLvalueConversion(VDInitRefExpr).get();
ElemType = ElemType.getUnqualifiedType();
- VarDecl *VDInitTemp = buildVarDecl(*this, RefExpr->getExprLoc(), ElemType,
- ".firstprivate.temp");
+ VarDecl *VDInitTemp = buildVarDecl(SemaRef, RefExpr->getExprLoc(),
+ ElemType, ".firstprivate.temp");
InitializedEntity Entity =
InitializedEntity::InitializeVariable(VDInitTemp);
InitializationKind Kind = InitializationKind::CreateCopy(ELoc, ELoc);
- InitializationSequence InitSeq(*this, Entity, Kind, Init);
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Init);
+ InitializationSequence InitSeq(SemaRef, Entity, Kind, Init);
+ ExprResult Result = InitSeq.Perform(SemaRef, Entity, Kind, Init);
if (Result.isInvalid())
VDPrivate->setInvalidDecl();
else
VDPrivate->setInit(Result.getAs<Expr>());
// Remove temp variable declaration.
- Context.Deallocate(VDInitTemp);
+ getASTContext().Deallocate(VDInitTemp);
} else {
- VarDecl *VDInit = buildVarDecl(*this, RefExpr->getExprLoc(), Type,
+ VarDecl *VDInit = buildVarDecl(SemaRef, RefExpr->getExprLoc(), Type,
".firstprivate.temp");
- VDInitRefExpr = buildDeclRefExpr(*this, VDInit, RefExpr->getType(),
+ VDInitRefExpr = buildDeclRefExpr(SemaRef, VDInit, RefExpr->getType(),
RefExpr->getExprLoc());
- AddInitializerToDecl(VDPrivate,
- DefaultLvalueConversion(VDInitRefExpr).get(),
- /*DirectInit=*/false);
+ SemaRef.AddInitializerToDecl(
+ VDPrivate, SemaRef.DefaultLvalueConversion(VDInitRefExpr).get(),
+ /*DirectInit=*/false);
}
if (VDPrivate->isInvalidDecl()) {
if (IsImplicitClause) {
@@ -18697,29 +17494,30 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
}
continue;
}
- CurContext->addDecl(VDPrivate);
+ SemaRef.CurContext->addDecl(VDPrivate);
DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
- *this, VDPrivate, RefExpr->getType().getUnqualifiedType(),
+ SemaRef, VDPrivate, RefExpr->getType().getUnqualifiedType(),
RefExpr->getExprLoc());
DeclRefExpr *Ref = nullptr;
- if (!VD && !CurContext->isDependentContext()) {
+ if (!VD && !SemaRef.CurContext->isDependentContext()) {
if (TopDVar.CKind == OMPC_lastprivate) {
Ref = TopDVar.PrivateCopy;
} else {
auto *FD = dyn_cast<FieldDecl>(D);
VarDecl *VD = FD ? DSAStack->getImplicitFDCapExprDecl(FD) : nullptr;
if (VD)
- Ref = buildDeclRefExpr(*this, VD, VD->getType().getNonReferenceType(),
- RefExpr->getExprLoc());
+ Ref =
+ buildDeclRefExpr(SemaRef, VD, VD->getType().getNonReferenceType(),
+ RefExpr->getExprLoc());
else
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true);
if (VD || !isOpenMPCapturedDecl(D))
ExprCaptures.push_back(Ref->getDecl());
}
}
if (!IsImplicitClause)
DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_firstprivate, Ref);
- Vars.push_back((VD || CurContext->isDependentContext())
+ Vars.push_back((VD || SemaRef.CurContext->isDependentContext())
? RefExpr->IgnoreParens()
: Ref);
PrivateCopies.push_back(VDPrivateRefExpr);
@@ -18729,12 +17527,12 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
if (Vars.empty())
return nullptr;
- return OMPFirstprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- Vars, PrivateCopies, Inits,
- buildPreInits(Context, ExprCaptures));
+ return OMPFirstprivateClause::Create(
+ getASTContext(), StartLoc, LParenLoc, EndLoc, Vars, PrivateCopies, Inits,
+ buildPreInits(getASTContext(), ExprCaptures));
}
-OMPClause *Sema::ActOnOpenMPLastprivateClause(
+OMPClause *SemaOpenMP::ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc) {
@@ -18758,7 +17556,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -18776,8 +17574,8 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(
// OpenMP [2.14.3.5, Restrictions, C/C++, p.2]
// A variable that appears in a lastprivate clause must not have an
// incomplete type or a reference type.
- if (RequireCompleteType(ELoc, Type,
- diag::err_omp_lastprivate_incomplete_type))
+ if (SemaRef.RequireCompleteType(ELoc, Type,
+ diag::err_omp_lastprivate_incomplete_type))
continue;
Type = Type.getNonReferenceType();
@@ -18789,7 +17587,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(
// OpenMP 3.1 [2.9.3.5, lastprivate clause, Restrictions]
// A variable that appears in a lastprivate clause must not have a
// const-qualified type unless it is of class type with a mutable member.
- if (rejectConstNotMutableType(*this, D, Type, OMPC_lastprivate, ELoc))
+ if (rejectConstNotMutableType(SemaRef, D, Type, OMPC_lastprivate, ELoc))
continue;
// OpenMP 5.0 [2.19.4.5 lastprivate Clause, Restrictions]
@@ -18797,7 +17595,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(
// modifier must be a scalar variable.
if (LPKind == OMPC_LASTPRIVATE_conditional && !Type->isScalarType()) {
Diag(ELoc, diag::err_omp_lastprivate_conditional_non_scalar);
- bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) ==
VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
@@ -18822,7 +17620,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(
Diag(ELoc, diag::err_omp_wrong_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_lastprivate);
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
@@ -18841,7 +17639,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(
Diag(ELoc, diag::err_omp_required_access)
<< getOpenMPClauseName(OMPC_lastprivate)
<< getOpenMPClauseName(OMPC_shared);
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
}
@@ -18854,53 +17652,53 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(
// A variable of class type (or array thereof) that appears in a
// lastprivate clause requires an accessible, unambiguous copy assignment
// operator for the class type.
- Type = Context.getBaseElementType(Type).getNonReferenceType();
- VarDecl *SrcVD = buildVarDecl(*this, ERange.getBegin(),
+ Type = getASTContext().getBaseElementType(Type).getNonReferenceType();
+ VarDecl *SrcVD = buildVarDecl(SemaRef, ERange.getBegin(),
Type.getUnqualifiedType(), ".lastprivate.src",
D->hasAttrs() ? &D->getAttrs() : nullptr);
DeclRefExpr *PseudoSrcExpr =
- buildDeclRefExpr(*this, SrcVD, Type.getUnqualifiedType(), ELoc);
+ buildDeclRefExpr(SemaRef, SrcVD, Type.getUnqualifiedType(), ELoc);
VarDecl *DstVD =
- buildVarDecl(*this, ERange.getBegin(), Type, ".lastprivate.dst",
+ buildVarDecl(SemaRef, ERange.getBegin(), Type, ".lastprivate.dst",
D->hasAttrs() ? &D->getAttrs() : nullptr);
- DeclRefExpr *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, ELoc);
+ DeclRefExpr *PseudoDstExpr = buildDeclRefExpr(SemaRef, DstVD, Type, ELoc);
// For arrays generate assignment operation for single element and replace
// it by the original array element in CodeGen.
- ExprResult AssignmentOp = BuildBinOp(/*S=*/nullptr, ELoc, BO_Assign,
- PseudoDstExpr, PseudoSrcExpr);
+ ExprResult AssignmentOp = SemaRef.BuildBinOp(/*S=*/nullptr, ELoc, BO_Assign,
+ PseudoDstExpr, PseudoSrcExpr);
if (AssignmentOp.isInvalid())
continue;
- AssignmentOp =
- ActOnFinishFullExpr(AssignmentOp.get(), ELoc, /*DiscardedValue*/ false);
+ AssignmentOp = SemaRef.ActOnFinishFullExpr(AssignmentOp.get(), ELoc,
+ /*DiscardedValue*/ false);
if (AssignmentOp.isInvalid())
continue;
DeclRefExpr *Ref = nullptr;
- if (!VD && !CurContext->isDependentContext()) {
+ if (!VD && !SemaRef.CurContext->isDependentContext()) {
if (TopDVar.CKind == OMPC_firstprivate) {
Ref = TopDVar.PrivateCopy;
} else {
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false);
+ Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/false);
if (!isOpenMPCapturedDecl(D))
ExprCaptures.push_back(Ref->getDecl());
}
if ((TopDVar.CKind == OMPC_firstprivate && !TopDVar.PrivateCopy) ||
(!isOpenMPCapturedDecl(D) &&
Ref->getDecl()->hasAttr<OMPCaptureNoInitAttr>())) {
- ExprResult RefRes = DefaultLvalueConversion(Ref);
+ ExprResult RefRes = SemaRef.DefaultLvalueConversion(Ref);
if (!RefRes.isUsable())
continue;
ExprResult PostUpdateRes =
- BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign, SimpleRefExpr,
- RefRes.get());
+ SemaRef.BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign,
+ SimpleRefExpr, RefRes.get());
if (!PostUpdateRes.isUsable())
continue;
ExprPostUpdates.push_back(
- IgnoredValueConversions(PostUpdateRes.get()).get());
+ SemaRef.IgnoredValueConversions(PostUpdateRes.get()).get());
}
}
DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_lastprivate, Ref);
- Vars.push_back((VD || CurContext->isDependentContext())
+ Vars.push_back((VD || SemaRef.CurContext->isDependentContext())
? RefExpr->IgnoreParens()
: Ref);
SrcExprs.push_back(PseudoSrcExpr);
@@ -18911,24 +17709,24 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(
if (Vars.empty())
return nullptr;
- return OMPLastprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- Vars, SrcExprs, DstExprs, AssignmentOps,
- LPKind, LPKindLoc, ColonLoc,
- buildPreInits(Context, ExprCaptures),
- buildPostUpdate(*this, ExprPostUpdates));
+ return OMPLastprivateClause::Create(
+ getASTContext(), StartLoc, LParenLoc, EndLoc, Vars, SrcExprs, DstExprs,
+ AssignmentOps, LPKind, LPKindLoc, ColonLoc,
+ buildPreInits(getASTContext(), ExprCaptures),
+ buildPostUpdate(SemaRef, ExprPostUpdates));
}
-OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP lastprivate clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -18950,15 +17748,16 @@ OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
DVar.RefExpr) {
Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_shared);
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
DeclRefExpr *Ref = nullptr;
- if (!VD && isOpenMPCapturedDecl(D) && !CurContext->isDependentContext())
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ if (!VD && isOpenMPCapturedDecl(D) &&
+ !SemaRef.CurContext->isDependentContext())
+ Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true);
DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_shared, Ref);
- Vars.push_back((VD || !Ref || CurContext->isDependentContext())
+ Vars.push_back((VD || !Ref || SemaRef.CurContext->isDependentContext())
? RefExpr->IgnoreParens()
: Ref);
}
@@ -18966,7 +17765,8 @@ OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
if (Vars.empty())
return nullptr;
- return OMPSharedClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
+ return OMPSharedClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc,
+ Vars);
}
namespace {
@@ -19124,7 +17924,8 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
if (S) {
LookupResult Lookup(SemaRef, ReductionId, Sema::LookupOMPReductionName);
Lookup.suppressDiagnostics();
- while (S && SemaRef.LookupParsedName(Lookup, S, &ReductionIdScopeSpec)) {
+ while (S && SemaRef.LookupParsedName(Lookup, S, &ReductionIdScopeSpec,
+ /*ObjectType=*/QualType())) {
NamedDecl *D = Lookup.getRepresentativeDecl();
do {
S = S->getParent();
@@ -19167,7 +17968,8 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
return UnresolvedLookupExpr::Create(
SemaRef.Context, /*NamingClass=*/nullptr,
ReductionIdScopeSpec.getWithLocInContext(SemaRef.Context), ReductionId,
- /*ADL=*/true, /*Overloaded=*/true, ResSet.begin(), ResSet.end());
+ /*ADL=*/true, ResSet.begin(), ResSet.end(), /*KnownDependent=*/false,
+ /*KnownInstantiationDependent=*/false);
}
// Lookup inside the classes.
// C++ [over.match.oper]p3:
@@ -19326,7 +18128,7 @@ struct ReductionData {
} // namespace
static bool checkOMPArraySectionConstantForReduction(
- ASTContext &Context, const OMPArraySectionExpr *OASE, bool &SingleElement,
+ ASTContext &Context, const ArraySectionExpr *OASE, bool &SingleElement,
SmallVectorImpl<llvm::APSInt> &ArraySizes) {
const Expr *Length = OASE->getLength();
if (Length == nullptr) {
@@ -19353,7 +18155,7 @@ static bool checkOMPArraySectionConstantForReduction(
// We require length = 1 for all array sections except the right-most to
// guarantee that the memory region is contiguous and has no holes in it.
- while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base)) {
+ while (const auto *TempOASE = dyn_cast<ArraySectionExpr>(Base)) {
Length = TempOASE->getLength();
if (Length == nullptr) {
// For array sections of the form [1:] or [:], we would need to analyze
@@ -19558,12 +18360,12 @@ static bool actOnOMPReductionKindClause(
Expr *TaskgroupDescriptor = nullptr;
QualType Type;
auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr->IgnoreParens());
- auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr->IgnoreParens());
+ auto *OASE = dyn_cast<ArraySectionExpr>(RefExpr->IgnoreParens());
if (ASE) {
Type = ASE->getType().getNonReferenceType();
} else if (OASE) {
QualType BaseType =
- OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ ArraySectionExpr::getBaseOriginalType(OASE->getBase());
if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
Type = ATy->getElementType();
else
@@ -20087,7 +18889,7 @@ static bool actOnOMPReductionKindClause(
} else {
VarsExpr = Ref = buildCapture(S, D, SimpleRefExpr, /*WithInit=*/false);
}
- if (!S.isOpenMPCapturedDecl(D)) {
+ if (!S.OpenMP().isOpenMPCapturedDecl(D)) {
RD.ExprCaptures.emplace_back(Ref->getDecl());
if (Ref->getDecl()->hasAttr<OMPCaptureNoInitAttr>()) {
ExprResult RefRes = S.DefaultLvalueConversion(Ref);
@@ -20137,7 +18939,7 @@ static bool actOnOMPReductionKindClause(
return RD.Vars.empty();
}
-OMPClause *Sema::ActOnOpenMPReductionClause(
+OMPClause *SemaOpenMP::ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
@@ -20166,77 +18968,80 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
}
ReductionData RD(VarList.size(), Modifier);
- if (actOnOMPReductionKindClause(*this, DSAStack, OMPC_reduction, VarList,
+ if (actOnOMPReductionKindClause(SemaRef, DSAStack, OMPC_reduction, VarList,
StartLoc, LParenLoc, ColonLoc, EndLoc,
ReductionIdScopeSpec, ReductionId,
UnresolvedReductions, RD))
return nullptr;
return OMPReductionClause::Create(
- Context, StartLoc, LParenLoc, ModifierLoc, ColonLoc, EndLoc, Modifier,
- RD.Vars, ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId,
+ getASTContext(), StartLoc, LParenLoc, ModifierLoc, ColonLoc, EndLoc,
+ Modifier, RD.Vars,
+ ReductionIdScopeSpec.getWithLocInContext(getASTContext()), ReductionId,
RD.Privates, RD.LHSs, RD.RHSs, RD.ReductionOps, RD.InscanCopyOps,
RD.InscanCopyArrayTemps, RD.InscanCopyArrayElems,
- buildPreInits(Context, RD.ExprCaptures),
- buildPostUpdate(*this, RD.ExprPostUpdates));
+ buildPreInits(getASTContext(), RD.ExprCaptures),
+ buildPostUpdate(SemaRef, RD.ExprPostUpdates));
}
-OMPClause *Sema::ActOnOpenMPTaskReductionClause(
+OMPClause *SemaOpenMP::ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions) {
ReductionData RD(VarList.size());
- if (actOnOMPReductionKindClause(*this, DSAStack, OMPC_task_reduction, VarList,
- StartLoc, LParenLoc, ColonLoc, EndLoc,
- ReductionIdScopeSpec, ReductionId,
+ if (actOnOMPReductionKindClause(SemaRef, DSAStack, OMPC_task_reduction,
+ VarList, StartLoc, LParenLoc, ColonLoc,
+ EndLoc, ReductionIdScopeSpec, ReductionId,
UnresolvedReductions, RD))
return nullptr;
return OMPTaskReductionClause::Create(
- Context, StartLoc, LParenLoc, ColonLoc, EndLoc, RD.Vars,
- ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId,
+ getASTContext(), StartLoc, LParenLoc, ColonLoc, EndLoc, RD.Vars,
+ ReductionIdScopeSpec.getWithLocInContext(getASTContext()), ReductionId,
RD.Privates, RD.LHSs, RD.RHSs, RD.ReductionOps,
- buildPreInits(Context, RD.ExprCaptures),
- buildPostUpdate(*this, RD.ExprPostUpdates));
+ buildPreInits(getASTContext(), RD.ExprCaptures),
+ buildPostUpdate(SemaRef, RD.ExprPostUpdates));
}
-OMPClause *Sema::ActOnOpenMPInReductionClause(
+OMPClause *SemaOpenMP::ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions) {
ReductionData RD(VarList.size());
- if (actOnOMPReductionKindClause(*this, DSAStack, OMPC_in_reduction, VarList,
+ if (actOnOMPReductionKindClause(SemaRef, DSAStack, OMPC_in_reduction, VarList,
StartLoc, LParenLoc, ColonLoc, EndLoc,
ReductionIdScopeSpec, ReductionId,
UnresolvedReductions, RD))
return nullptr;
return OMPInReductionClause::Create(
- Context, StartLoc, LParenLoc, ColonLoc, EndLoc, RD.Vars,
- ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId,
+ getASTContext(), StartLoc, LParenLoc, ColonLoc, EndLoc, RD.Vars,
+ ReductionIdScopeSpec.getWithLocInContext(getASTContext()), ReductionId,
RD.Privates, RD.LHSs, RD.RHSs, RD.ReductionOps, RD.TaskgroupDescriptors,
- buildPreInits(Context, RD.ExprCaptures),
- buildPostUpdate(*this, RD.ExprPostUpdates));
+ buildPreInits(getASTContext(), RD.ExprCaptures),
+ buildPostUpdate(SemaRef, RD.ExprPostUpdates));
}
-bool Sema::CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
- SourceLocation LinLoc) {
- if ((!LangOpts.CPlusPlus && LinKind != OMPC_LINEAR_val) ||
+bool SemaOpenMP::CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
+ SourceLocation LinLoc) {
+ if ((!getLangOpts().CPlusPlus && LinKind != OMPC_LINEAR_val) ||
LinKind == OMPC_LINEAR_unknown || LinKind == OMPC_LINEAR_step) {
- Diag(LinLoc, diag::err_omp_wrong_linear_modifier) << LangOpts.CPlusPlus;
+ Diag(LinLoc, diag::err_omp_wrong_linear_modifier)
+ << getLangOpts().CPlusPlus;
return true;
}
return false;
}
-bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
- OpenMPLinearClauseKind LinKind, QualType Type,
- bool IsDeclareSimd) {
+bool SemaOpenMP::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
+ OpenMPLinearClauseKind LinKind,
+ QualType Type, bool IsDeclareSimd) {
const auto *VD = dyn_cast_or_null<VarDecl>(D);
// A variable must not have an incomplete type or a reference type.
- if (RequireCompleteType(ELoc, Type, diag::err_omp_linear_incomplete_type))
+ if (SemaRef.RequireCompleteType(ELoc, Type,
+ diag::err_omp_linear_incomplete_type))
return true;
if ((LinKind == OMPC_LINEAR_uval || LinKind == OMPC_LINEAR_ref) &&
!Type->isReferenceType()) {
@@ -20252,17 +19057,17 @@ bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
// not apply to the firstprivate clause, nor to the linear clause on
// declarative directives (like declare simd).
if (!IsDeclareSimd &&
- rejectConstNotMutableType(*this, D, Type, OMPC_linear, ELoc))
+ rejectConstNotMutableType(SemaRef, D, Type, OMPC_linear, ELoc))
return true;
// A list item must be of integral or pointer type.
Type = Type.getUnqualifiedType().getCanonicalType();
const auto *Ty = Type.getTypePtrOrNull();
if (!Ty || (LinKind != OMPC_LINEAR_ref && !Ty->isDependentType() &&
- !Ty->isIntegralType(Context) && !Ty->isPointerType())) {
+ !Ty->isIntegralType(getASTContext()) && !Ty->isPointerType())) {
Diag(ELoc, diag::err_omp_linear_expected_int_or_ptr) << Type;
if (D) {
- bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) ==
VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
@@ -20273,7 +19078,7 @@ bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
return false;
}
-OMPClause *Sema::ActOnOpenMPLinearClause(
+OMPClause *SemaOpenMP::ActOnOpenMPLinearClause(
ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc,
SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc, SourceLocation ColonLoc,
@@ -20296,7 +19101,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -20318,7 +19123,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
if (DVar.RefExpr) {
Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_linear);
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
@@ -20328,29 +19133,29 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
// Build private copy of original var.
VarDecl *Private =
- buildVarDecl(*this, ELoc, Type, D->getName(),
+ buildVarDecl(SemaRef, ELoc, Type, D->getName(),
D->hasAttrs() ? &D->getAttrs() : nullptr,
VD ? cast<DeclRefExpr>(SimpleRefExpr) : nullptr);
- DeclRefExpr *PrivateRef = buildDeclRefExpr(*this, Private, Type, ELoc);
+ DeclRefExpr *PrivateRef = buildDeclRefExpr(SemaRef, Private, Type, ELoc);
// Build var to save initial value.
- VarDecl *Init = buildVarDecl(*this, ELoc, Type, ".linear.start");
+ VarDecl *Init = buildVarDecl(SemaRef, ELoc, Type, ".linear.start");
Expr *InitExpr;
DeclRefExpr *Ref = nullptr;
- if (!VD && !CurContext->isDependentContext()) {
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false);
+ if (!VD && !SemaRef.CurContext->isDependentContext()) {
+ Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/false);
if (!isOpenMPCapturedDecl(D)) {
ExprCaptures.push_back(Ref->getDecl());
if (Ref->getDecl()->hasAttr<OMPCaptureNoInitAttr>()) {
- ExprResult RefRes = DefaultLvalueConversion(Ref);
+ ExprResult RefRes = SemaRef.DefaultLvalueConversion(Ref);
if (!RefRes.isUsable())
continue;
ExprResult PostUpdateRes =
- BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign,
- SimpleRefExpr, RefRes.get());
+ SemaRef.BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign,
+ SimpleRefExpr, RefRes.get());
if (!PostUpdateRes.isUsable())
continue;
ExprPostUpdates.push_back(
- IgnoredValueConversions(PostUpdateRes.get()).get());
+ SemaRef.IgnoredValueConversions(PostUpdateRes.get()).get());
}
}
}
@@ -20358,12 +19163,13 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
InitExpr = VD ? VD->getInit() : SimpleRefExpr;
else
InitExpr = VD ? SimpleRefExpr : Ref;
- AddInitializerToDecl(Init, DefaultLvalueConversion(InitExpr).get(),
- /*DirectInit=*/false);
- DeclRefExpr *InitRef = buildDeclRefExpr(*this, Init, Type, ELoc);
+ SemaRef.AddInitializerToDecl(
+ Init, SemaRef.DefaultLvalueConversion(InitExpr).get(),
+ /*DirectInit=*/false);
+ DeclRefExpr *InitRef = buildDeclRefExpr(SemaRef, Init, Type, ELoc);
DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_linear, Ref);
- Vars.push_back((VD || CurContext->isDependentContext())
+ Vars.push_back((VD || SemaRef.CurContext->isDependentContext())
? RefExpr->IgnoreParens()
: Ref);
Privates.push_back(PrivateRef);
@@ -20386,17 +19192,18 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
// Build var to save the step value.
VarDecl *SaveVar =
- buildVarDecl(*this, StepLoc, StepExpr->getType(), ".linear.step");
+ buildVarDecl(SemaRef, StepLoc, StepExpr->getType(), ".linear.step");
ExprResult SaveRef =
- buildDeclRefExpr(*this, SaveVar, StepExpr->getType(), StepLoc);
- ExprResult CalcStep =
- BuildBinOp(CurScope, StepLoc, BO_Assign, SaveRef.get(), StepExpr);
- CalcStep = ActOnFinishFullExpr(CalcStep.get(), /*DiscardedValue*/ false);
+ buildDeclRefExpr(SemaRef, SaveVar, StepExpr->getType(), StepLoc);
+ ExprResult CalcStep = SemaRef.BuildBinOp(
+ SemaRef.getCurScope(), StepLoc, BO_Assign, SaveRef.get(), StepExpr);
+ CalcStep =
+ SemaRef.ActOnFinishFullExpr(CalcStep.get(), /*DiscardedValue*/ false);
// Warn about zero linear step (it would be probably better specified as
// making corresponding variables 'const').
if (std::optional<llvm::APSInt> Result =
- StepExpr->getIntegerConstantExpr(Context)) {
+ StepExpr->getIntegerConstantExpr(getASTContext())) {
if (!Result->isNegative() && !Result->isStrictlyPositive())
Diag(StepLoc, diag::warn_omp_linear_step_zero)
<< Vars[0] << (Vars.size() > 1);
@@ -20407,11 +19214,11 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
}
}
- return OMPLinearClause::Create(Context, StartLoc, LParenLoc, LinKind, LinLoc,
- ColonLoc, StepModifierLoc, EndLoc, Vars,
- Privates, Inits, StepExpr, CalcStepExpr,
- buildPreInits(Context, ExprCaptures),
- buildPostUpdate(*this, ExprPostUpdates));
+ return OMPLinearClause::Create(getASTContext(), StartLoc, LParenLoc, LinKind,
+ LinLoc, ColonLoc, StepModifierLoc, EndLoc,
+ Vars, Privates, Inits, StepExpr, CalcStepExpr,
+ buildPreInits(getASTContext(), ExprCaptures),
+ buildPostUpdate(SemaRef, ExprPostUpdates));
}
static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
@@ -20517,7 +19324,7 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
return HasErrors;
}
-OMPClause *Sema::ActOnOpenMPAlignedClause(
+OMPClause *SemaOpenMP::ActOnOpenMPAlignedClause(
ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
@@ -20526,7 +19333,7 @@ OMPClause *Sema::ActOnOpenMPAlignedClause(
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -20546,7 +19353,7 @@ OMPClause *Sema::ActOnOpenMPAlignedClause(
if (!Ty || (!Ty->isArrayType() && !Ty->isPointerType())) {
Diag(ELoc, diag::err_omp_aligned_expected_array_or_ptr)
<< QType << getLangOpts().CPlusPlus << ERange;
- bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) ==
VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
@@ -20566,9 +19373,10 @@ OMPClause *Sema::ActOnOpenMPAlignedClause(
DeclRefExpr *Ref = nullptr;
if (!VD && isOpenMPCapturedDecl(D))
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
- Vars.push_back(DefaultFunctionArrayConversion(
- (VD || !Ref) ? RefExpr->IgnoreParens() : Ref)
+ Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true);
+ Vars.push_back(SemaRef
+ .DefaultFunctionArrayConversion(
+ (VD || !Ref) ? RefExpr->IgnoreParens() : Ref)
.get());
}
@@ -20587,14 +19395,14 @@ OMPClause *Sema::ActOnOpenMPAlignedClause(
if (Vars.empty())
return nullptr;
- return OMPAlignedClause::Create(Context, StartLoc, LParenLoc, ColonLoc,
- EndLoc, Vars, Alignment);
+ return OMPAlignedClause::Create(getASTContext(), StartLoc, LParenLoc,
+ ColonLoc, EndLoc, Vars, Alignment);
}
-OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
SmallVector<Expr *, 8> SrcExprs;
SmallVector<Expr *, 8> DstExprs;
@@ -20648,26 +19456,28 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
// A variable of class type (or array thereof) that appears in a
// copyin clause requires an accessible, unambiguous copy assignment
// operator for the class type.
- QualType ElemType = Context.getBaseElementType(Type).getNonReferenceType();
+ QualType ElemType =
+ getASTContext().getBaseElementType(Type).getNonReferenceType();
VarDecl *SrcVD =
- buildVarDecl(*this, DE->getBeginLoc(), ElemType.getUnqualifiedType(),
+ buildVarDecl(SemaRef, DE->getBeginLoc(), ElemType.getUnqualifiedType(),
".copyin.src", VD->hasAttrs() ? &VD->getAttrs() : nullptr);
DeclRefExpr *PseudoSrcExpr = buildDeclRefExpr(
- *this, SrcVD, ElemType.getUnqualifiedType(), DE->getExprLoc());
+ SemaRef, SrcVD, ElemType.getUnqualifiedType(), DE->getExprLoc());
VarDecl *DstVD =
- buildVarDecl(*this, DE->getBeginLoc(), ElemType, ".copyin.dst",
+ buildVarDecl(SemaRef, DE->getBeginLoc(), ElemType, ".copyin.dst",
VD->hasAttrs() ? &VD->getAttrs() : nullptr);
DeclRefExpr *PseudoDstExpr =
- buildDeclRefExpr(*this, DstVD, ElemType, DE->getExprLoc());
+ buildDeclRefExpr(SemaRef, DstVD, ElemType, DE->getExprLoc());
// For arrays generate assignment operation for single element and replace
// it by the original array element in CodeGen.
ExprResult AssignmentOp =
- BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign, PseudoDstExpr,
- PseudoSrcExpr);
+ SemaRef.BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign,
+ PseudoDstExpr, PseudoSrcExpr);
if (AssignmentOp.isInvalid())
continue;
- AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(),
- /*DiscardedValue*/ false);
+ AssignmentOp =
+ SemaRef.ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(),
+ /*DiscardedValue*/ false);
if (AssignmentOp.isInvalid())
continue;
@@ -20681,14 +19491,14 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
if (Vars.empty())
return nullptr;
- return OMPCopyinClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars,
- SrcExprs, DstExprs, AssignmentOps);
+ return OMPCopyinClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc,
+ Vars, SrcExprs, DstExprs, AssignmentOps);
}
-OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
SmallVector<Expr *, 8> SrcExprs;
SmallVector<Expr *, 8> DstExprs;
@@ -20698,7 +19508,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -20724,7 +19534,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_wrong_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_copyprivate);
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
@@ -20737,7 +19547,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_required_access)
<< getOpenMPClauseName(OMPC_copyprivate)
<< "threadprivate or private in the enclosing context";
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
}
@@ -20748,7 +19558,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
<< getOpenMPClauseName(OMPC_copyprivate) << Type
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
- bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) ==
VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
@@ -20760,22 +19570,23 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
// A variable of class type (or array thereof) that appears in a
// copyin clause requires an accessible, unambiguous copy assignment
// operator for the class type.
- Type = Context.getBaseElementType(Type.getNonReferenceType())
+ Type = getASTContext()
+ .getBaseElementType(Type.getNonReferenceType())
.getUnqualifiedType();
VarDecl *SrcVD =
- buildVarDecl(*this, RefExpr->getBeginLoc(), Type, ".copyprivate.src",
+ buildVarDecl(SemaRef, RefExpr->getBeginLoc(), Type, ".copyprivate.src",
D->hasAttrs() ? &D->getAttrs() : nullptr);
- DeclRefExpr *PseudoSrcExpr = buildDeclRefExpr(*this, SrcVD, Type, ELoc);
+ DeclRefExpr *PseudoSrcExpr = buildDeclRefExpr(SemaRef, SrcVD, Type, ELoc);
VarDecl *DstVD =
- buildVarDecl(*this, RefExpr->getBeginLoc(), Type, ".copyprivate.dst",
+ buildVarDecl(SemaRef, RefExpr->getBeginLoc(), Type, ".copyprivate.dst",
D->hasAttrs() ? &D->getAttrs() : nullptr);
- DeclRefExpr *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, ELoc);
- ExprResult AssignmentOp = BuildBinOp(
+ DeclRefExpr *PseudoDstExpr = buildDeclRefExpr(SemaRef, DstVD, Type, ELoc);
+ ExprResult AssignmentOp = SemaRef.BuildBinOp(
DSAStack->getCurScope(), ELoc, BO_Assign, PseudoDstExpr, PseudoSrcExpr);
if (AssignmentOp.isInvalid())
continue;
- AssignmentOp =
- ActOnFinishFullExpr(AssignmentOp.get(), ELoc, /*DiscardedValue*/ false);
+ AssignmentOp = SemaRef.ActOnFinishFullExpr(AssignmentOp.get(), ELoc,
+ /*DiscardedValue*/ false);
if (AssignmentOp.isInvalid())
continue;
@@ -20784,7 +19595,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
assert(VD || isOpenMPCapturedDecl(D));
Vars.push_back(
VD ? RefExpr->IgnoreParens()
- : buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false));
+ : buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/false));
SrcExprs.push_back(PseudoSrcExpr);
DstExprs.push_back(PseudoDstExpr);
AssignmentOps.push_back(AssignmentOp.get());
@@ -20793,18 +19604,20 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
if (Vars.empty())
return nullptr;
- return OMPCopyprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- Vars, SrcExprs, DstExprs, AssignmentOps);
+ return OMPCopyprivateClause::Create(getASTContext(), StartLoc, LParenLoc,
+ EndLoc, Vars, SrcExprs, DstExprs,
+ AssignmentOps);
}
-OMPClause *Sema::ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
if (VarList.empty())
return nullptr;
- return OMPFlushClause::Create(Context, StartLoc, LParenLoc, EndLoc, VarList);
+ return OMPFlushClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc,
+ VarList);
}
/// Tries to find omp_depend_t. type.
@@ -20824,22 +19637,23 @@ static bool findOMPDependT(Sema &S, SourceLocation Loc, DSAStackTy *Stack,
return true;
}
-OMPClause *Sema::ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPDepobjClause(Expr *Depobj,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
if (!Depobj)
return nullptr;
- bool OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack);
+ bool OMPDependTFound = findOMPDependT(SemaRef, StartLoc, DSAStack);
// OpenMP 5.0, 2.17.10.1 depobj Construct
// depobj is an lvalue expression of type omp_depend_t.
if (!Depobj->isTypeDependent() && !Depobj->isValueDependent() &&
!Depobj->isInstantiationDependent() &&
!Depobj->containsUnexpandedParameterPack() &&
- (OMPDependTFound &&
- !Context.typesAreCompatible(DSAStack->getOMPDependT(), Depobj->getType(),
- /*CompareUnqualified=*/true))) {
+ (OMPDependTFound && !getASTContext().typesAreCompatible(
+ DSAStack->getOMPDependT(), Depobj->getType(),
+ /*CompareUnqualified=*/true))) {
Diag(Depobj->getExprLoc(), diag::err_omp_expected_omp_depend_t_lvalue)
<< 0 << Depobj->getType() << Depobj->getSourceRange();
}
@@ -20849,7 +19663,8 @@ OMPClause *Sema::ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
<< 1 << Depobj->getSourceRange();
}
- return OMPDepobjClause::Create(Context, StartLoc, LParenLoc, EndLoc, Depobj);
+ return OMPDepobjClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc,
+ Depobj);
}
namespace {
@@ -20949,8 +19764,9 @@ ProcessOpenMPDoacrossClauseCommon(Sema &SemaRef, bool IsSource,
continue;
}
if (RHS) {
- ExprResult RHSRes = SemaRef.VerifyPositiveIntegerConstantInClause(
- RHS, OMPC_depend, /*StrictlyPositive=*/false);
+ ExprResult RHSRes =
+ SemaRef.OpenMP().VerifyPositiveIntegerConstantInClause(
+ RHS, OMPC_depend, /*StrictlyPositive=*/false);
if (RHSRes.isInvalid())
continue;
}
@@ -20981,11 +19797,10 @@ ProcessOpenMPDoacrossClauseCommon(Sema &SemaRef, bool IsSource,
return {Vars, OpsOffs, TotalDepCount};
}
-OMPClause *
-Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
- Expr *DepModifier, ArrayRef<Expr *> VarList,
- SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPDependClause(
+ const OMPDependClause::DependDataTy &Data, Expr *DepModifier,
+ ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
OpenMPDependClauseKind DepKind = Data.DepKind;
SourceLocation DepLoc = Data.DepLoc;
if (DSAStack->getCurrentDirective() == OMPD_ordered &&
@@ -21003,17 +19818,18 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
DSAStack->getCurrentDirective() == OMPD_depobj) &&
(DepKind == OMPC_DEPEND_unknown || DepKind == OMPC_DEPEND_source ||
DepKind == OMPC_DEPEND_sink ||
- ((LangOpts.OpenMP < 50 ||
+ ((getLangOpts().OpenMP < 50 ||
DSAStack->getCurrentDirective() == OMPD_depobj) &&
DepKind == OMPC_DEPEND_depobj))) {
SmallVector<unsigned, 6> Except = {OMPC_DEPEND_source, OMPC_DEPEND_sink,
OMPC_DEPEND_outallmemory,
OMPC_DEPEND_inoutallmemory};
- if (LangOpts.OpenMP < 50 || DSAStack->getCurrentDirective() == OMPD_depobj)
+ if (getLangOpts().OpenMP < 50 ||
+ DSAStack->getCurrentDirective() == OMPD_depobj)
Except.push_back(OMPC_DEPEND_depobj);
- if (LangOpts.OpenMP < 51)
+ if (getLangOpts().OpenMP < 51)
Except.push_back(OMPC_DEPEND_inoutset);
- std::string Expected = (LangOpts.OpenMP >= 50 && !DepModifier)
+ std::string Expected = (getLangOpts().OpenMP >= 50 && !DepModifier)
? "depend modifier(iterator) or "
: "";
Diag(DepLoc, diag::err_omp_unexpected_clause_value)
@@ -21039,7 +19855,7 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
if (DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) {
DoacrossDataInfoTy VarOffset = ProcessOpenMPDoacrossClauseCommon(
- *this, DepKind == OMPC_DEPEND_source, VarList, DSAStack, EndLoc);
+ SemaRef, DepKind == OMPC_DEPEND_source, VarList, DSAStack, EndLoc);
Vars = VarOffset.Vars;
OpsOffs = VarOffset.OpsOffs;
TotalDepCount = VarOffset.TotalDepCount;
@@ -21055,9 +19871,9 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
SourceLocation ELoc = RefExpr->getExprLoc();
Expr *SimpleExpr = RefExpr->IgnoreParenCasts();
if (DepKind != OMPC_DEPEND_sink && DepKind != OMPC_DEPEND_source) {
- bool OMPDependTFound = LangOpts.OpenMP >= 50;
+ bool OMPDependTFound = getLangOpts().OpenMP >= 50;
if (OMPDependTFound)
- OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack,
+ OMPDependTFound = findOMPDependT(SemaRef, StartLoc, DSAStack,
DepKind == OMPC_DEPEND_depobj);
if (DepKind == OMPC_DEPEND_depobj) {
// OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
@@ -21067,8 +19883,8 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
!RefExpr->isInstantiationDependent() &&
!RefExpr->containsUnexpandedParameterPack() &&
(OMPDependTFound &&
- !Context.hasSameUnqualifiedType(DSAStack->getOMPDependT(),
- RefExpr->getType()))) {
+ !getASTContext().hasSameUnqualifiedType(
+ DSAStack->getOMPDependT(), RefExpr->getType()))) {
Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
<< 0 << RefExpr->getType() << RefExpr->getSourceRange();
continue;
@@ -21083,21 +19899,23 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
// List items used in depend clauses cannot be zero-length array
// sections.
QualType ExprTy = RefExpr->getType().getNonReferenceType();
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
+ const auto *OASE = dyn_cast<ArraySectionExpr>(SimpleExpr);
if (OASE) {
QualType BaseType =
- OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ ArraySectionExpr::getBaseOriginalType(OASE->getBase());
if (BaseType.isNull())
return nullptr;
if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
ExprTy = ATy->getElementType();
else
ExprTy = BaseType->getPointeeType();
+ if (BaseType.isNull() || ExprTy.isNull())
+ return nullptr;
ExprTy = ExprTy.getNonReferenceType();
const Expr *Length = OASE->getLength();
Expr::EvalResult Result;
if (Length && !Length->isValueDependent() &&
- Length->EvaluateAsInt(Result, Context) &&
+ Length->EvaluateAsInt(Result, getASTContext()) &&
Result.Val.getInt().isZero()) {
Diag(ELoc,
diag::err_omp_depend_zero_length_array_section_not_allowed)
@@ -21117,8 +19935,9 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
(OMPDependTFound && DSAStack->getOMPDependT().getTypePtr() ==
ExprTy.getTypePtr()))) {
Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << (LangOpts.OpenMP >= 50 ? 1 : 0)
- << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ << (getLangOpts().OpenMP >= 50 ? 1 : 0)
+ << (getLangOpts().OpenMP >= 50 ? 1 : 0)
+ << RefExpr->getSourceRange();
continue;
}
@@ -21130,22 +19949,24 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
->isPointerType() &&
!ASE->getBase()->getType().getNonReferenceType()->isArrayType()) {
Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << (LangOpts.OpenMP >= 50 ? 1 : 0)
- << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ << (getLangOpts().OpenMP >= 50 ? 1 : 0)
+ << (getLangOpts().OpenMP >= 50 ? 1 : 0)
+ << RefExpr->getSourceRange();
continue;
}
ExprResult Res;
{
- Sema::TentativeAnalysisScope Trap(*this);
- Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf,
- RefExpr->IgnoreParenImpCasts());
+ Sema::TentativeAnalysisScope Trap(SemaRef);
+ Res = SemaRef.CreateBuiltinUnaryOp(ELoc, UO_AddrOf,
+ RefExpr->IgnoreParenImpCasts());
}
- if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr) &&
+ if (!Res.isUsable() && !isa<ArraySectionExpr>(SimpleExpr) &&
!isa<OMPArrayShapingExpr>(SimpleExpr)) {
Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << (LangOpts.OpenMP >= 50 ? 1 : 0)
- << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ << (getLangOpts().OpenMP >= 50 ? 1 : 0)
+ << (getLangOpts().OpenMP >= 50 ? 1 : 0)
+ << RefExpr->getSourceRange();
continue;
}
}
@@ -21160,7 +19981,7 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
return nullptr;
auto *C = OMPDependClause::Create(
- Context, StartLoc, LParenLoc, EndLoc,
+ getASTContext(), StartLoc, LParenLoc, EndLoc,
{DepKind, DepLoc, Data.ColonLoc, Data.OmpAllMemoryLoc}, DepModifier, Vars,
TotalDepCount.getZExtValue());
if ((DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) &&
@@ -21169,12 +19990,11 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
return C;
}
-OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
- Expr *Device, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation ModifierLoc,
- SourceLocation EndLoc) {
- assert((ModifierLoc.isInvalid() || LangOpts.OpenMP >= 50) &&
+OMPClause *SemaOpenMP::ActOnOpenMPDeviceClause(
+ OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation ModifierLoc,
+ SourceLocation EndLoc) {
+ assert((ModifierLoc.isInvalid() || getLangOpts().OpenMP >= 50) &&
"Unexpected device modifier in OpenMP < 50.");
bool ErrorFound = false;
@@ -21191,7 +20011,7 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
// OpenMP [2.9.1, Restrictions]
// The device expression must evaluate to a non-negative integer value.
- ErrorFound = !isNonNegativeIntegerValue(ValExpr, *this, OMPC_device,
+ ErrorFound = !isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_device,
/*StrictlyPositive=*/false) ||
ErrorFound;
if (ErrorFound)
@@ -21202,7 +20022,7 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
// the reverse_offload clause must be specified.
if (Modifier == OMPC_DEVICE_ancestor) {
if (!DSAStack->hasRequiresDeclWithClause<OMPReverseOffloadClause>()) {
- targetDiag(
+ SemaRef.targetDiag(
StartLoc,
diag::err_omp_device_ancestor_without_requires_reverse_offload);
ErrorFound = true;
@@ -21211,15 +20031,16 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
OpenMPDirectiveKind CaptureRegion =
- getOpenMPCaptureRegionForClause(DKind, OMPC_device, LangOpts.OpenMP);
- if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ getOpenMPCaptureRegionForClause(DKind, OMPC_device, getLangOpts().OpenMP);
+ if (CaptureRegion != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
- return new (Context)
+ return new (getASTContext())
OMPDeviceClause(Modifier, ValExpr, HelperValStmt, CaptureRegion, StartLoc,
LParenLoc, ModifierLoc, EndLoc);
}
@@ -21241,7 +20062,7 @@ static bool checkTypeMappable(SourceLocation SL, SourceRange SR, Sema &SemaRef,
static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
const Expr *E,
QualType BaseQTy) {
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
+ const auto *OASE = dyn_cast<ArraySectionExpr>(E);
// If this is an array subscript, it refers to the whole size if the size of
// the dimension is constant and equals 1. Also, an array section assumes the
@@ -21249,7 +20070,7 @@ static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
if (isa<ArraySubscriptExpr>(E) ||
(OASE && OASE->getColonLocFirst().isInvalid())) {
if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
- return ATy->getSize().getSExtValue() != 1;
+ return ATy->getSExtSize() != 1;
// Size can't be evaluated statically.
return false;
}
@@ -21290,7 +20111,7 @@ static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
return false; // Can't get the integer value as a constant.
llvm::APSInt ConstLength = Result.Val.getInt();
- return CATy->getSize().getSExtValue() != ConstLength.getSExtValue();
+ return CATy->getSExtSize() != ConstLength.getSExtValue();
}
// Return true if it can be proven that the provided array expression (array
@@ -21299,7 +20120,7 @@ static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
static bool checkArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
const Expr *E,
QualType BaseQTy) {
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
+ const auto *OASE = dyn_cast<ArraySectionExpr>(E);
// An array subscript always refer to a single element. Also, an array section
// assumes the format of an array subscript if no colon is used.
@@ -21315,7 +20136,7 @@ static bool checkArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
// is pointer.
if (!Length) {
if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
- return ATy->getSize().getSExtValue() != 1;
+ return ATy->getSExtSize() != 1;
// We cannot assume anything.
return false;
}
@@ -21514,14 +20335,14 @@ public:
return RelevantExpr || Visit(E);
}
- bool VisitOMPArraySectionExpr(OMPArraySectionExpr *OASE) {
+ bool VisitArraySectionExpr(ArraySectionExpr *OASE) {
// After OMP 5.0 Array section in reduction clause will be implicitly
// mapped
assert(!(SemaRef.getLangOpts().OpenMP < 50 && NoDiagnose) &&
"Array sections cannot be implicitly mapped.");
Expr *E = OASE->getBase()->IgnoreParenImpCasts();
QualType CurType =
- OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
+ ArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
// OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
// If the type of a list item is a reference to a type T then the type
@@ -21694,7 +20515,7 @@ static const Expr *checkMapClauseExpressionBase(
auto CE = CurComponents.rend();
for (; CI != CE; ++CI) {
const auto *OASE =
- dyn_cast<OMPArraySectionExpr>(CI->getAssociatedExpression());
+ dyn_cast<ArraySectionExpr>(CI->getAssociatedExpression());
if (!OASE)
continue;
if (OASE && OASE->getLength())
@@ -21764,10 +20585,10 @@ static bool checkMapConflicts(
// variable in map clauses of the same construct.
if (CurrentRegionOnly &&
(isa<ArraySubscriptExpr>(CI->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(CI->getAssociatedExpression()) ||
+ isa<ArraySectionExpr>(CI->getAssociatedExpression()) ||
isa<OMPArrayShapingExpr>(CI->getAssociatedExpression())) &&
(isa<ArraySubscriptExpr>(SI->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(SI->getAssociatedExpression()) ||
+ isa<ArraySectionExpr>(SI->getAssociatedExpression()) ||
isa<OMPArrayShapingExpr>(SI->getAssociatedExpression()))) {
SemaRef.Diag(CI->getAssociatedExpression()->getExprLoc(),
diag::err_omp_multiple_array_items_in_map_clause)
@@ -21795,11 +20616,10 @@ static bool checkMapConflicts(
if (const auto *ASE =
dyn_cast<ArraySubscriptExpr>(SI->getAssociatedExpression())) {
Type = ASE->getBase()->IgnoreParenImpCasts()->getType();
- } else if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(
+ } else if (const auto *OASE = dyn_cast<ArraySectionExpr>(
SI->getAssociatedExpression())) {
const Expr *E = OASE->getBase()->IgnoreParenImpCasts();
- Type =
- OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
+ Type = ArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
} else if (const auto *OASE = dyn_cast<OMPArrayShapingExpr>(
SI->getAssociatedExpression())) {
Type = OASE->getBase()->getType()->getPointeeType();
@@ -21975,7 +20795,8 @@ static ExprResult buildUserDefinedMapperRef(Sema &SemaRef, Scope *S,
LookupResult Lookup(SemaRef, MapperId, Sema::LookupOMPMapperName);
Lookup.suppressDiagnostics();
if (S) {
- while (S && SemaRef.LookupParsedName(Lookup, S, &MapperIdScopeSpec)) {
+ while (S && SemaRef.LookupParsedName(Lookup, S, &MapperIdScopeSpec,
+ /*ObjectType=*/QualType())) {
NamedDecl *D = Lookup.getRepresentativeDecl();
while (S && !S->isDeclScope(D))
S = S->getParent();
@@ -22014,7 +20835,8 @@ static ExprResult buildUserDefinedMapperRef(Sema &SemaRef, Scope *S,
return UnresolvedLookupExpr::Create(
SemaRef.Context, /*NamingClass=*/nullptr,
MapperIdScopeSpec.getWithLocInContext(SemaRef.Context), MapperId,
- /*ADL=*/false, /*Overloaded=*/true, URS.begin(), URS.end());
+ /*ADL=*/false, URS.begin(), URS.end(), /*KnownDependent=*/false,
+ /*KnownInstantiationDependent=*/false);
}
SourceLocation Loc = MapperId.getLoc();
// [OpenMP 5.0], 2.19.7.3 declare mapper Directive, Restrictions
@@ -22274,13 +21096,13 @@ static void checkMappableExpressionList(
(void)I;
QualType Type;
auto *ASE = dyn_cast<ArraySubscriptExpr>(VE->IgnoreParens());
- auto *OASE = dyn_cast<OMPArraySectionExpr>(VE->IgnoreParens());
+ auto *OASE = dyn_cast<ArraySectionExpr>(VE->IgnoreParens());
auto *OAShE = dyn_cast<OMPArrayShapingExpr>(VE->IgnoreParens());
if (ASE) {
Type = ASE->getType().getNonReferenceType();
} else if (OASE) {
QualType BaseType =
- OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ ArraySectionExpr::getBaseOriginalType(OASE->getBase());
if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
Type = ATy->getElementType();
else
@@ -22412,7 +21234,7 @@ static void checkMappableExpressionList(
}
}
-OMPClause *Sema::ActOnOpenMPMapClause(
+OMPClause *SemaOpenMP::ActOnOpenMPMapClause(
Expr *IteratorModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
@@ -22447,7 +21269,7 @@ OMPClause *Sema::ActOnOpenMPMapClause(
}
MappableVarListInfo MVLI(VarList);
- checkMappableExpressionList(*this, DSAStack, OMPC_map, MVLI, Locs.StartLoc,
+ checkMappableExpressionList(SemaRef, DSAStack, OMPC_map, MVLI, Locs.StartLoc,
MapperIdScopeSpec, MapperId, UnresolvedMappers,
MapType, Modifiers, IsMapTypeImplicit,
NoDiagnose);
@@ -22455,17 +21277,17 @@ OMPClause *Sema::ActOnOpenMPMapClause(
// We need to produce a map clause even if we don't have variables so that
// other diagnostics related with non-existing map clauses are accurate.
return OMPMapClause::Create(
- Context, Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
+ getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
MVLI.VarComponents, MVLI.UDMapperList, IteratorModifier, Modifiers,
- ModifiersLoc, MapperIdScopeSpec.getWithLocInContext(Context), MapperId,
- MapType, IsMapTypeImplicit, MapLoc);
+ ModifiersLoc, MapperIdScopeSpec.getWithLocInContext(getASTContext()),
+ MapperId, MapType, IsMapTypeImplicit, MapLoc);
}
-QualType Sema::ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
- TypeResult ParsedType) {
+QualType SemaOpenMP::ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
+ TypeResult ParsedType) {
assert(ParsedType.isUsable());
- QualType ReductionType = GetTypeFromParser(ParsedType.get());
+ QualType ReductionType = SemaRef.GetTypeFromParser(ParsedType.get());
if (ReductionType.isNull())
return QualType();
@@ -22493,15 +21315,17 @@ QualType Sema::ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
return ReductionType;
}
-Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveStart(
+SemaOpenMP::DeclGroupPtrTy
+SemaOpenMP::ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope) {
SmallVector<Decl *, 8> Decls;
Decls.reserve(ReductionTypes.size());
- LookupResult Lookup(*this, Name, SourceLocation(), LookupOMPReductionName,
- forRedeclarationInCurContext());
+ LookupResult Lookup(SemaRef, Name, SourceLocation(),
+ Sema::LookupOMPReductionName,
+ SemaRef.forRedeclarationInCurContext());
// [OpenMP 4.0], 2.15 declare reduction Directive, Restrictions
// A reduction-identifier may not be re-declared in the current scope for the
// same type or for a type that is compatible according to the base language
@@ -22512,12 +21336,12 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveStart(
if (S != nullptr) {
// Find previous declaration with the same name not referenced in other
// declarations.
- FunctionScopeInfo *ParentFn = getEnclosingFunction();
+ FunctionScopeInfo *ParentFn = SemaRef.getEnclosingFunction();
InCompoundScope =
(ParentFn != nullptr) && !ParentFn->CompoundScopes.empty();
- LookupName(Lookup, S);
- FilterLookupForScope(Lookup, DC, S, /*ConsiderLinkage=*/false,
- /*AllowInlineNamespace=*/false);
+ SemaRef.LookupName(Lookup, S);
+ SemaRef.FilterLookupForScope(Lookup, DC, S, /*ConsiderLinkage=*/false,
+ /*AllowInlineNamespace=*/false);
llvm::DenseMap<OMPDeclareReductionDecl *, bool> UsedAsPrevious;
LookupResult::Filter Filter = Lookup.makeFilter();
while (Filter.hasNext()) {
@@ -22560,8 +21384,8 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveStart(
Invalid = true;
}
PreviousRedeclTypes[TyData.first.getCanonicalType()] = TyData.second;
- auto *DRD = OMPDeclareReductionDecl::Create(Context, DC, TyData.second,
- Name, TyData.first, PrevDRD);
+ auto *DRD = OMPDeclareReductionDecl::Create(
+ getASTContext(), DC, TyData.second, Name, TyData.first, PrevDRD);
DC->addDecl(DRD);
DRD->setAccess(AS);
Decls.push_back(DRD);
@@ -22572,24 +21396,24 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveStart(
}
return DeclGroupPtrTy::make(
- DeclGroupRef::Create(Context, Decls.begin(), Decls.size()));
+ DeclGroupRef::Create(getASTContext(), Decls.begin(), Decls.size()));
}
-void Sema::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) {
+void SemaOpenMP::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) {
auto *DRD = cast<OMPDeclareReductionDecl>(D);
// Enter new function scope.
- PushFunctionScope();
- setFunctionHasBranchProtectedScope();
- getCurFunction()->setHasOMPDeclareReductionCombiner();
+ SemaRef.PushFunctionScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
+ SemaRef.getCurFunction()->setHasOMPDeclareReductionCombiner();
if (S != nullptr)
- PushDeclContext(S, DRD);
+ SemaRef.PushDeclContext(S, DRD);
else
- CurContext = DRD;
+ SemaRef.CurContext = DRD;
- PushExpressionEvaluationContext(
- ExpressionEvaluationContext::PotentiallyEvaluated);
+ SemaRef.PushExpressionEvaluationContext(
+ Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
QualType ReductionType = DRD->getType();
// Create 'T* omp_parm;T omp_in;'. All references to 'omp_in' will
@@ -22599,7 +21423,7 @@ void Sema::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) {
// pointers.
// Create 'T omp_in;' variable.
VarDecl *OmpInParm =
- buildVarDecl(*this, D->getLocation(), ReductionType, "omp_in");
+ buildVarDecl(SemaRef, D->getLocation(), ReductionType, "omp_in");
// Create 'T* omp_parm;T omp_out;'. All references to 'omp_out' will
// be replaced by '*omp_parm' during codegen. This required because 'omp_out'
// uses semantics of argument handles by value, but it should be passed by
@@ -22607,28 +21431,29 @@ void Sema::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) {
// pointers.
// Create 'T omp_out;' variable.
VarDecl *OmpOutParm =
- buildVarDecl(*this, D->getLocation(), ReductionType, "omp_out");
+ buildVarDecl(SemaRef, D->getLocation(), ReductionType, "omp_out");
if (S != nullptr) {
- PushOnScopeChains(OmpInParm, S);
- PushOnScopeChains(OmpOutParm, S);
+ SemaRef.PushOnScopeChains(OmpInParm, S);
+ SemaRef.PushOnScopeChains(OmpOutParm, S);
} else {
DRD->addDecl(OmpInParm);
DRD->addDecl(OmpOutParm);
}
Expr *InE =
- ::buildDeclRefExpr(*this, OmpInParm, ReductionType, D->getLocation());
+ ::buildDeclRefExpr(SemaRef, OmpInParm, ReductionType, D->getLocation());
Expr *OutE =
- ::buildDeclRefExpr(*this, OmpOutParm, ReductionType, D->getLocation());
+ ::buildDeclRefExpr(SemaRef, OmpOutParm, ReductionType, D->getLocation());
DRD->setCombinerData(InE, OutE);
}
-void Sema::ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner) {
+void SemaOpenMP::ActOnOpenMPDeclareReductionCombinerEnd(Decl *D,
+ Expr *Combiner) {
auto *DRD = cast<OMPDeclareReductionDecl>(D);
- DiscardCleanupsInEvaluationContext();
- PopExpressionEvaluationContext();
+ SemaRef.DiscardCleanupsInEvaluationContext();
+ SemaRef.PopExpressionEvaluationContext();
- PopDeclContext();
- PopFunctionScopeInfo();
+ SemaRef.PopDeclContext();
+ SemaRef.PopFunctionScopeInfo();
if (Combiner != nullptr)
DRD->setCombiner(Combiner);
@@ -22636,20 +21461,21 @@ void Sema::ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner) {
DRD->setInvalidDecl();
}
-VarDecl *Sema::ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D) {
+VarDecl *SemaOpenMP::ActOnOpenMPDeclareReductionInitializerStart(Scope *S,
+ Decl *D) {
auto *DRD = cast<OMPDeclareReductionDecl>(D);
// Enter new function scope.
- PushFunctionScope();
- setFunctionHasBranchProtectedScope();
+ SemaRef.PushFunctionScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
if (S != nullptr)
- PushDeclContext(S, DRD);
+ SemaRef.PushDeclContext(S, DRD);
else
- CurContext = DRD;
+ SemaRef.CurContext = DRD;
- PushExpressionEvaluationContext(
- ExpressionEvaluationContext::PotentiallyEvaluated);
+ SemaRef.PushExpressionEvaluationContext(
+ Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
QualType ReductionType = DRD->getType();
// Create 'T* omp_parm;T omp_priv;'. All references to 'omp_priv' will
@@ -22659,7 +21485,7 @@ VarDecl *Sema::ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D) {
// pointers.
// Create 'T omp_priv;' variable.
VarDecl *OmpPrivParm =
- buildVarDecl(*this, D->getLocation(), ReductionType, "omp_priv");
+ buildVarDecl(SemaRef, D->getLocation(), ReductionType, "omp_priv");
// Create 'T* omp_parm;T omp_orig;'. All references to 'omp_orig' will
// be replaced by '*omp_parm' during codegen. This required because 'omp_orig'
// uses semantics of argument handles by value, but it should be passed by
@@ -22667,30 +21493,30 @@ VarDecl *Sema::ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D) {
// pointers.
// Create 'T omp_orig;' variable.
VarDecl *OmpOrigParm =
- buildVarDecl(*this, D->getLocation(), ReductionType, "omp_orig");
+ buildVarDecl(SemaRef, D->getLocation(), ReductionType, "omp_orig");
if (S != nullptr) {
- PushOnScopeChains(OmpPrivParm, S);
- PushOnScopeChains(OmpOrigParm, S);
+ SemaRef.PushOnScopeChains(OmpPrivParm, S);
+ SemaRef.PushOnScopeChains(OmpOrigParm, S);
} else {
DRD->addDecl(OmpPrivParm);
DRD->addDecl(OmpOrigParm);
}
Expr *OrigE =
- ::buildDeclRefExpr(*this, OmpOrigParm, ReductionType, D->getLocation());
+ ::buildDeclRefExpr(SemaRef, OmpOrigParm, ReductionType, D->getLocation());
Expr *PrivE =
- ::buildDeclRefExpr(*this, OmpPrivParm, ReductionType, D->getLocation());
+ ::buildDeclRefExpr(SemaRef, OmpPrivParm, ReductionType, D->getLocation());
DRD->setInitializerData(OrigE, PrivE);
return OmpPrivParm;
}
-void Sema::ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
- VarDecl *OmpPrivParm) {
+void SemaOpenMP::ActOnOpenMPDeclareReductionInitializerEnd(
+ Decl *D, Expr *Initializer, VarDecl *OmpPrivParm) {
auto *DRD = cast<OMPDeclareReductionDecl>(D);
- DiscardCleanupsInEvaluationContext();
- PopExpressionEvaluationContext();
+ SemaRef.DiscardCleanupsInEvaluationContext();
+ SemaRef.PopExpressionEvaluationContext();
- PopDeclContext();
- PopFunctionScopeInfo();
+ SemaRef.PopDeclContext();
+ SemaRef.PopFunctionScopeInfo();
if (Initializer != nullptr) {
DRD->setInitializer(Initializer, OMPDeclareReductionInitKind::Call);
@@ -22704,13 +21530,13 @@ void Sema::ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
}
}
-Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveEnd(
+SemaOpenMP::DeclGroupPtrTy SemaOpenMP::ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid) {
for (Decl *D : DeclReductions.get()) {
if (IsValid) {
if (S)
- PushOnScopeChains(cast<OMPDeclareReductionDecl>(D), S,
- /*AddToContext=*/false);
+ SemaRef.PushOnScopeChains(cast<OMPDeclareReductionDecl>(D), S,
+ /*AddToContext=*/false);
} else {
D->setInvalidDecl();
}
@@ -22718,25 +21544,26 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveEnd(
return DeclReductions;
}
-TypeResult Sema::ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D) {
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
+TypeResult SemaOpenMP::ActOnOpenMPDeclareMapperVarDecl(Scope *S,
+ Declarator &D) {
+ TypeSourceInfo *TInfo = SemaRef.GetTypeForDeclarator(D);
QualType T = TInfo->getType();
if (D.isInvalidType())
return true;
if (getLangOpts().CPlusPlus) {
// Check that there are no default arguments (C++ only).
- CheckExtraCXXDefaultArguments(D);
+ SemaRef.CheckExtraCXXDefaultArguments(D);
}
- return CreateParsedType(T, TInfo);
+ return SemaRef.CreateParsedType(T, TInfo);
}
-QualType Sema::ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
- TypeResult ParsedType) {
+QualType SemaOpenMP::ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
+ TypeResult ParsedType) {
assert(ParsedType.isUsable() && "Expect usable parsed mapper type");
- QualType MapperType = GetTypeFromParser(ParsedType.get());
+ QualType MapperType = SemaRef.GetTypeFromParser(ParsedType.get());
assert(!MapperType.isNull() && "Expect valid mapper type");
// [OpenMP 5.0], 2.19.7.3 declare mapper Directive, Restrictions
@@ -22748,12 +21575,13 @@ QualType Sema::ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
return MapperType;
}
-Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareMapperDirective(
+SemaOpenMP::DeclGroupPtrTy SemaOpenMP::ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope) {
- LookupResult Lookup(*this, Name, SourceLocation(), LookupOMPMapperName,
- forRedeclarationInCurContext());
+ LookupResult Lookup(SemaRef, Name, SourceLocation(),
+ Sema::LookupOMPMapperName,
+ SemaRef.forRedeclarationInCurContext());
// [OpenMP 5.0], 2.19.7.3 declare mapper Directive, Restrictions
// A mapper-identifier may not be redeclared in the current scope for the
// same type or for a type that is compatible according to the base language
@@ -22764,12 +21592,12 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareMapperDirective(
if (S != nullptr) {
// Find previous declaration with the same name not referenced in other
// declarations.
- FunctionScopeInfo *ParentFn = getEnclosingFunction();
+ FunctionScopeInfo *ParentFn = SemaRef.getEnclosingFunction();
InCompoundScope =
(ParentFn != nullptr) && !ParentFn->CompoundScopes.empty();
- LookupName(Lookup, S);
- FilterLookupForScope(Lookup, DC, S, /*ConsiderLinkage=*/false,
- /*AllowInlineNamespace=*/false);
+ SemaRef.LookupName(Lookup, S);
+ SemaRef.FilterLookupForScope(Lookup, DC, S, /*ConsiderLinkage=*/false,
+ /*AllowInlineNamespace=*/false);
llvm::DenseMap<OMPDeclareMapperDecl *, bool> UsedAsPrevious;
LookupResult::Filter Filter = Lookup.makeFilter();
while (Filter.hasNext()) {
@@ -22814,13 +21642,14 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareMapperDirective(
// mappers.
SmallVector<OMPClause *, 4> ClausesWithImplicit(Clauses.begin(),
Clauses.end());
- if (LangOpts.OpenMP >= 50)
- processImplicitMapsWithDefaultMappers(*this, DSAStack, ClausesWithImplicit);
- auto *DMD =
- OMPDeclareMapperDecl::Create(Context, DC, StartLoc, Name, MapperType, VN,
- ClausesWithImplicit, PrevDMD);
+ if (getLangOpts().OpenMP >= 50)
+ processImplicitMapsWithDefaultMappers(SemaRef, DSAStack,
+ ClausesWithImplicit);
+ auto *DMD = OMPDeclareMapperDecl::Create(getASTContext(), DC, StartLoc, Name,
+ MapperType, VN, ClausesWithImplicit,
+ PrevDMD);
if (S)
- PushOnScopeChains(DMD, S);
+ SemaRef.PushOnScopeChains(DMD, S);
else
DC->addDecl(DMD);
DMD->setAccess(AS);
@@ -22836,105 +21665,106 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareMapperDirective(
return DeclGroupPtrTy::make(DeclGroupRef(DMD));
}
-ExprResult
-Sema::ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType,
- SourceLocation StartLoc,
- DeclarationName VN) {
+ExprResult SemaOpenMP::ActOnOpenMPDeclareMapperDirectiveVarDecl(
+ Scope *S, QualType MapperType, SourceLocation StartLoc,
+ DeclarationName VN) {
TypeSourceInfo *TInfo =
- Context.getTrivialTypeSourceInfo(MapperType, StartLoc);
- auto *VD = VarDecl::Create(Context, Context.getTranslationUnitDecl(),
- StartLoc, StartLoc, VN.getAsIdentifierInfo(),
- MapperType, TInfo, SC_None);
+ getASTContext().getTrivialTypeSourceInfo(MapperType, StartLoc);
+ auto *VD = VarDecl::Create(
+ getASTContext(), getASTContext().getTranslationUnitDecl(), StartLoc,
+ StartLoc, VN.getAsIdentifierInfo(), MapperType, TInfo, SC_None);
if (S)
- PushOnScopeChains(VD, S, /*AddToContext=*/false);
- Expr *E = buildDeclRefExpr(*this, VD, MapperType, StartLoc);
+ SemaRef.PushOnScopeChains(VD, S, /*AddToContext=*/false);
+ Expr *E = buildDeclRefExpr(SemaRef, VD, MapperType, StartLoc);
DSAStack->addDeclareMapperVarRef(E);
return E;
}
-void Sema::ActOnOpenMPIteratorVarDecl(VarDecl *VD) {
+void SemaOpenMP::ActOnOpenMPIteratorVarDecl(VarDecl *VD) {
if (DSAStack->getDeclareMapperVarRef())
DSAStack->addIteratorVarDecl(VD);
}
-bool Sema::isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const {
- assert(LangOpts.OpenMP && "Expected OpenMP mode.");
+bool SemaOpenMP::isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const {
+ assert(getLangOpts().OpenMP && "Expected OpenMP mode.");
const Expr *Ref = DSAStack->getDeclareMapperVarRef();
if (const auto *DRE = cast_or_null<DeclRefExpr>(Ref)) {
if (VD->getCanonicalDecl() == DRE->getDecl()->getCanonicalDecl())
return true;
- if (VD->isUsableInConstantExpressions(Context))
+ if (VD->isUsableInConstantExpressions(getASTContext()))
return true;
- if (LangOpts.OpenMP >= 52 && DSAStack->isIteratorVarDecl(VD))
+ if (getLangOpts().OpenMP >= 52 && DSAStack->isIteratorVarDecl(VD))
return true;
return false;
}
return true;
}
-const ValueDecl *Sema::getOpenMPDeclareMapperVarName() const {
- assert(LangOpts.OpenMP && "Expected OpenMP mode.");
+const ValueDecl *SemaOpenMP::getOpenMPDeclareMapperVarName() const {
+ assert(getLangOpts().OpenMP && "Expected OpenMP mode.");
return cast<DeclRefExpr>(DSAStack->getDeclareMapperVarRef())->getDecl();
}
-OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
Expr *ValExpr = NumTeams;
Stmt *HelperValStmt = nullptr;
// OpenMP [teams Constrcut, Restrictions]
// The num_teams expression must evaluate to a positive integer value.
- if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_num_teams,
+ if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_num_teams,
/*StrictlyPositive=*/true))
return nullptr;
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
- OpenMPDirectiveKind CaptureRegion =
- getOpenMPCaptureRegionForClause(DKind, OMPC_num_teams, LangOpts.OpenMP);
- if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ OpenMPDirectiveKind CaptureRegion = getOpenMPCaptureRegionForClause(
+ DKind, OMPC_num_teams, getLangOpts().OpenMP);
+ if (CaptureRegion != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
- return new (Context) OMPNumTeamsClause(ValExpr, HelperValStmt, CaptureRegion,
- StartLoc, LParenLoc, EndLoc);
+ return new (getASTContext()) OMPNumTeamsClause(
+ ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
Expr *ValExpr = ThreadLimit;
Stmt *HelperValStmt = nullptr;
// OpenMP [teams Constrcut, Restrictions]
// The thread_limit expression must evaluate to a positive integer value.
- if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_thread_limit,
+ if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_thread_limit,
/*StrictlyPositive=*/true))
return nullptr;
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
OpenMPDirectiveKind CaptureRegion = getOpenMPCaptureRegionForClause(
- DKind, OMPC_thread_limit, LangOpts.OpenMP);
- if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ DKind, OMPC_thread_limit, getLangOpts().OpenMP);
+ if (CaptureRegion != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
- return new (Context) OMPThreadLimitClause(
+ return new (getASTContext()) OMPThreadLimitClause(
ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPPriorityClause(Expr *Priority,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPPriorityClause(Expr *Priority,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
Expr *ValExpr = Priority;
Stmt *HelperValStmt = nullptr;
OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
@@ -22942,20 +21772,20 @@ OMPClause *Sema::ActOnOpenMPPriorityClause(Expr *Priority,
// OpenMP [2.9.1, task Constrcut]
// The priority-value is a non-negative numerical scalar expression.
if (!isNonNegativeIntegerValue(
- ValExpr, *this, OMPC_priority,
+ ValExpr, SemaRef, OMPC_priority,
/*StrictlyPositive=*/false, /*BuildCapture=*/true,
DSAStack->getCurrentDirective(), &CaptureRegion, &HelperValStmt))
return nullptr;
- return new (Context) OMPPriorityClause(ValExpr, HelperValStmt, CaptureRegion,
- StartLoc, LParenLoc, EndLoc);
+ return new (getASTContext()) OMPPriorityClause(
+ ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPGrainsizeClause(
+OMPClause *SemaOpenMP::ActOnOpenMPGrainsizeClause(
OpenMPGrainsizeClauseModifier Modifier, Expr *Grainsize,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation EndLoc) {
- assert((ModifierLoc.isInvalid() || LangOpts.OpenMP >= 51) &&
+ assert((ModifierLoc.isInvalid() || getLangOpts().OpenMP >= 51) &&
"Unexpected grainsize modifier in OpenMP < 51.");
if (ModifierLoc.isValid() && Modifier == OMPC_GRAINSIZE_unknown) {
@@ -22973,23 +21803,23 @@ OMPClause *Sema::ActOnOpenMPGrainsizeClause(
// OpenMP [2.9.2, taskloop Constrcut]
// The parameter of the grainsize clause must be a positive integer
// expression.
- if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_grainsize,
+ if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_grainsize,
/*StrictlyPositive=*/true,
/*BuildCapture=*/true,
DSAStack->getCurrentDirective(),
&CaptureRegion, &HelperValStmt))
return nullptr;
- return new (Context)
+ return new (getASTContext())
OMPGrainsizeClause(Modifier, ValExpr, HelperValStmt, CaptureRegion,
StartLoc, LParenLoc, ModifierLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPNumTasksClause(
+OMPClause *SemaOpenMP::ActOnOpenMPNumTasksClause(
OpenMPNumTasksClauseModifier Modifier, Expr *NumTasks,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation EndLoc) {
- assert((ModifierLoc.isInvalid() || LangOpts.OpenMP >= 51) &&
+ assert((ModifierLoc.isInvalid() || getLangOpts().OpenMP >= 51) &&
"Unexpected num_tasks modifier in OpenMP < 51.");
if (ModifierLoc.isValid() && Modifier == OMPC_NUMTASKS_unknown) {
@@ -23008,19 +21838,20 @@ OMPClause *Sema::ActOnOpenMPNumTasksClause(
// The parameter of the num_tasks clause must be a positive integer
// expression.
if (!isNonNegativeIntegerValue(
- ValExpr, *this, OMPC_num_tasks,
+ ValExpr, SemaRef, OMPC_num_tasks,
/*StrictlyPositive=*/true, /*BuildCapture=*/true,
DSAStack->getCurrentDirective(), &CaptureRegion, &HelperValStmt))
return nullptr;
- return new (Context)
+ return new (getASTContext())
OMPNumTasksClause(Modifier, ValExpr, HelperValStmt, CaptureRegion,
StartLoc, LParenLoc, ModifierLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPHintClause(Expr *Hint,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
// OpenMP [2.13.2, critical construct, Description]
// ... where hint-expression is an integer constant expression that evaluates
// to a valid lock hint.
@@ -23028,7 +21859,7 @@ OMPClause *Sema::ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
VerifyPositiveIntegerConstantInClause(Hint, OMPC_hint, false);
if (HintExpr.isInvalid())
return nullptr;
- return new (Context)
+ return new (getASTContext())
OMPHintClause(HintExpr.get(), StartLoc, LParenLoc, EndLoc);
}
@@ -23048,13 +21879,14 @@ static bool findOMPEventHandleT(Sema &S, SourceLocation Loc,
return true;
}
-OMPClause *Sema::ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPDetachClause(Expr *Evt,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
if (!Evt->isValueDependent() && !Evt->isTypeDependent() &&
!Evt->isInstantiationDependent() &&
!Evt->containsUnexpandedParameterPack()) {
- if (!findOMPEventHandleT(*this, Evt->getExprLoc(), DSAStack))
+ if (!findOMPEventHandleT(SemaRef, Evt->getExprLoc(), DSAStack))
return nullptr;
// OpenMP 5.0, 2.10.1 task Construct.
// event-handle is a variable of the omp_event_handle_t type.
@@ -23070,9 +21902,9 @@ OMPClause *Sema::ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
<< "omp_event_handle_t" << 0 << Evt->getSourceRange();
return nullptr;
}
- if (!Context.hasSameUnqualifiedType(DSAStack->getOMPEventHandleT(),
- VD->getType()) ||
- VD->getType().isConstant(Context)) {
+ if (!getASTContext().hasSameUnqualifiedType(DSAStack->getOMPEventHandleT(),
+ VD->getType()) ||
+ VD->getType().isConstant(getASTContext())) {
Diag(Evt->getExprLoc(), diag::err_omp_var_expected)
<< "omp_event_handle_t" << 1 << VD->getType()
<< Evt->getSourceRange();
@@ -23087,15 +21919,16 @@ OMPClause *Sema::ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
Diag(Evt->getExprLoc(), diag::err_omp_wrong_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_firstprivate);
- reportOriginalDsa(*this, DSAStack, VD, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, VD, DVar);
return nullptr;
}
}
- return new (Context) OMPDetachClause(Evt, StartLoc, LParenLoc, EndLoc);
+ return new (getASTContext())
+ OMPDetachClause(Evt, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPDistScheduleClause(
+OMPClause *SemaOpenMP::ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc,
SourceLocation EndLoc) {
@@ -23126,7 +21959,7 @@ OMPClause *Sema::ActOnOpenMPDistScheduleClause(
// chunk_size must be a loop invariant integer expression with a positive
// value.
if (std::optional<llvm::APSInt> Result =
- ValExpr->getIntegerConstantExpr(Context)) {
+ ValExpr->getIntegerConstantExpr(getASTContext())) {
if (Result->isSigned() && !Result->isStrictlyPositive()) {
Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause)
<< "dist_schedule" << ChunkSize->getSourceRange();
@@ -23134,22 +21967,22 @@ OMPClause *Sema::ActOnOpenMPDistScheduleClause(
}
} else if (getOpenMPCaptureRegionForClause(
DSAStack->getCurrentDirective(), OMPC_dist_schedule,
- LangOpts.OpenMP) != OMPD_unknown &&
- !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ getLangOpts().OpenMP) != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
}
}
- return new (Context)
+ return new (getASTContext())
OMPDistScheduleClause(StartLoc, LParenLoc, KindLoc, CommaLoc, EndLoc,
Kind, ValExpr, HelperValStmt);
}
-OMPClause *Sema::ActOnOpenMPDefaultmapClause(
+OMPClause *SemaOpenMP::ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc) {
@@ -23176,10 +22009,10 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause(
} else {
bool isDefaultmapModifier = (M != OMPC_DEFAULTMAP_MODIFIER_unknown);
bool isDefaultmapKind = (Kind != OMPC_DEFAULTMAP_unknown) ||
- (LangOpts.OpenMP >= 50 && KindLoc.isInvalid());
+ (getLangOpts().OpenMP >= 50 && KindLoc.isInvalid());
if (!isDefaultmapKind || !isDefaultmapModifier) {
StringRef KindValue = "'scalar', 'aggregate', 'pointer'";
- if (LangOpts.OpenMP == 50) {
+ if (getLangOpts().OpenMP == 50) {
StringRef ModifierValue = "'alloc', 'from', 'to', 'tofrom', "
"'firstprivate', 'none', 'default'";
if (!isDefaultmapKind && isDefaultmapModifier) {
@@ -23231,13 +22064,13 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause(
DSAStack->setDefaultDMAAttr(M, Kind, StartLoc);
}
- return new (Context)
+ return new (getASTContext())
OMPDefaultmapClause(StartLoc, LParenLoc, MLoc, KindLoc, EndLoc, Kind, M);
}
-bool Sema::ActOnStartOpenMPDeclareTargetContext(
+bool SemaOpenMP::ActOnStartOpenMPDeclareTargetContext(
DeclareTargetContextInfo &DTCI) {
- DeclContext *CurLexicalContext = getCurLexicalContext();
+ DeclContext *CurLexicalContext = SemaRef.getCurLexicalContext();
if (!CurLexicalContext->isFileContext() &&
!CurLexicalContext->isExternCContext() &&
!CurLexicalContext->isExternCXXContext() &&
@@ -23257,20 +22090,20 @@ bool Sema::ActOnStartOpenMPDeclareTargetContext(
return true;
}
-const Sema::DeclareTargetContextInfo
-Sema::ActOnOpenMPEndDeclareTargetDirective() {
+const SemaOpenMP::DeclareTargetContextInfo
+SemaOpenMP::ActOnOpenMPEndDeclareTargetDirective() {
assert(!DeclareTargetNesting.empty() &&
"check isInOpenMPDeclareTargetContext() first!");
return DeclareTargetNesting.pop_back_val();
}
-void Sema::ActOnFinishedOpenMPDeclareTargetContext(
+void SemaOpenMP::ActOnFinishedOpenMPDeclareTargetContext(
DeclareTargetContextInfo &DTCI) {
for (auto &It : DTCI.ExplicitlyMapped)
ActOnOpenMPDeclareTargetName(It.first, It.second.Loc, It.second.MT, DTCI);
}
-void Sema::DiagnoseUnterminatedOpenMPDeclareTarget() {
+void SemaOpenMP::DiagnoseUnterminatedOpenMPDeclareTarget() {
if (DeclareTargetNesting.empty())
return;
DeclareTargetContextInfo &DTCI = DeclareTargetNesting.back();
@@ -23278,23 +22111,25 @@ void Sema::DiagnoseUnterminatedOpenMPDeclareTarget() {
<< getOpenMPDirectiveName(DTCI.Kind);
}
-NamedDecl *Sema::lookupOpenMPDeclareTargetName(Scope *CurScope,
- CXXScopeSpec &ScopeSpec,
- const DeclarationNameInfo &Id) {
- LookupResult Lookup(*this, Id, LookupOrdinaryName);
- LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
+NamedDecl *SemaOpenMP::lookupOpenMPDeclareTargetName(
+ Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id) {
+ LookupResult Lookup(SemaRef, Id, Sema::LookupOrdinaryName);
+ SemaRef.LookupParsedName(Lookup, CurScope, &ScopeSpec,
+ /*ObjectType=*/QualType(),
+ /*AllowBuiltinCreation=*/true);
if (Lookup.isAmbiguous())
return nullptr;
Lookup.suppressDiagnostics();
if (!Lookup.isSingleResult()) {
- VarOrFuncDeclFilterCCC CCC(*this);
+ VarOrFuncDeclFilterCCC CCC(SemaRef);
if (TypoCorrection Corrected =
- CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC,
- CTK_ErrorRecovery)) {
- diagnoseTypo(Corrected, PDiag(diag::err_undeclared_var_use_suggest)
- << Id.getName());
+ SemaRef.CorrectTypo(Id, Sema::LookupOrdinaryName, CurScope, nullptr,
+ CCC, Sema::CTK_ErrorRecovery)) {
+ SemaRef.diagnoseTypo(Corrected,
+ PDiag(diag::err_undeclared_var_use_suggest)
+ << Id.getName());
checkDeclIsAllowedInOpenMPTarget(nullptr, Corrected.getCorrectionDecl());
return nullptr;
}
@@ -23312,16 +22147,25 @@ NamedDecl *Sema::lookupOpenMPDeclareTargetName(Scope *CurScope,
return ND;
}
-void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
- OMPDeclareTargetDeclAttr::MapTypeTy MT,
- DeclareTargetContextInfo &DTCI) {
+void SemaOpenMP::ActOnOpenMPDeclareTargetName(
+ NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT,
+ DeclareTargetContextInfo &DTCI) {
assert((isa<VarDecl>(ND) || isa<FunctionDecl>(ND) ||
isa<FunctionTemplateDecl>(ND)) &&
"Expected variable, function or function template.");
+ if (auto *VD = dyn_cast<VarDecl>(ND)) {
+ // Only global variables can be marked as declare target.
+ if (!VD->isFileVarDecl() && !VD->isStaticLocal() &&
+ !VD->isStaticDataMember()) {
+ Diag(Loc, diag::err_omp_declare_target_has_local_vars)
+ << VD->getNameAsString();
+ return;
+ }
+ }
// Diagnose marking after use as it may lead to incorrect diagnosis and
// codegen.
- if (LangOpts.OpenMP >= 50 &&
+ if (getLangOpts().OpenMP >= 50 &&
(ND->isUsed(/*CheckUsedAttr=*/false) || ND->isReferenced()))
Diag(Loc, diag::warn_omp_declare_target_after_first_use);
@@ -23360,14 +22204,14 @@ void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
IsIndirect = true;
}
auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
- Context, MT, DTCI.DT, IndirectE, IsIndirect, Level,
+ getASTContext(), MT, DTCI.DT, IndirectE, IsIndirect, Level,
SourceRange(Loc, Loc));
ND->addAttr(A);
- if (ASTMutationListener *ML = Context.getASTMutationListener())
+ if (ASTMutationListener *ML = getASTContext().getASTMutationListener())
ML->DeclarationMarkedOpenMPDeclareTarget(ND, A);
checkDeclIsAllowedInOpenMPTarget(nullptr, ND, Loc);
if (auto *VD = dyn_cast<VarDecl>(ND);
- LangOpts.OpenMP && VD && VD->hasAttr<OMPDeclareTargetDeclAttr>() &&
+ getLangOpts().OpenMP && VD && VD->hasAttr<OMPDeclareTargetDeclAttr>() &&
VD->hasGlobalStorage())
ActOnOpenMPDeclareTargetInitializer(ND);
}
@@ -23411,8 +22255,8 @@ static bool checkValueDeclInTarget(SourceLocation SL, SourceRange SR,
/*FullCheck=*/false);
}
-void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
- SourceLocation IdLoc) {
+void SemaOpenMP::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
+ SourceLocation IdLoc) {
if (!D || D->isInvalidDecl())
return;
SourceRange SR = E ? E->getSourceRange() : D->getSourceRange();
@@ -23426,7 +22270,7 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
// directive.
if (DSAStack->isThreadPrivate(VD)) {
Diag(SL, diag::err_omp_threadprivate_in_target);
- reportOriginalDsa(*this, DSAStack, VD, DSAStack->getTopDSA(VD, false));
+ reportOriginalDsa(SemaRef, DSAStack, VD, DSAStack->getTopDSA(VD, false));
return;
}
}
@@ -23445,7 +22289,7 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
// Problem if any with var declared with incomplete type will be reported
// as normal, so no need to check it here.
if ((E || !VD->getType()->isIncompleteType()) &&
- !checkValueDeclInTarget(SL, SR, *this, DSAStack, VD))
+ !checkValueDeclInTarget(SL, SR, SemaRef, DSAStack, VD))
return;
if (!E && isInOpenMPDeclareTargetContext()) {
// Checking declaration inside declare target region.
@@ -23465,13 +22309,13 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
IsIndirect = true;
}
auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
- Context,
+ getASTContext(),
getLangOpts().OpenMP >= 52 ? OMPDeclareTargetDeclAttr::MT_Enter
: OMPDeclareTargetDeclAttr::MT_To,
DTCI.DT, IndirectE, IsIndirect, Level,
SourceRange(DTCI.Loc, DTCI.Loc));
D->addAttr(A);
- if (ASTMutationListener *ML = Context.getASTMutationListener())
+ if (ASTMutationListener *ML = getASTContext().getASTMutationListener())
ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
}
return;
@@ -23479,13 +22323,12 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
}
if (!E)
return;
- checkDeclInTargetContext(E->getExprLoc(), E->getSourceRange(), *this, D);
+ checkDeclInTargetContext(E->getExprLoc(), E->getSourceRange(), SemaRef, D);
}
/// This class visits every VarDecl that the initializer references and adds
/// OMPDeclareTargetDeclAttr to each of them.
-class GlobalDeclRefChecker final
- : public StmtVisitor<GlobalDeclRefChecker> {
+class GlobalDeclRefChecker final : public StmtVisitor<GlobalDeclRefChecker> {
SmallVector<VarDecl *> DeclVector;
Attr *A;
@@ -23525,13 +22368,13 @@ public:
/// Adding OMPDeclareTargetDeclAttr to variables with static storage
/// duration that are referenced in the initializer expression list of
/// variables with static storage duration in declare target directive.
-void Sema::ActOnOpenMPDeclareTargetInitializer(Decl *TargetDecl) {
+void SemaOpenMP::ActOnOpenMPDeclareTargetInitializer(Decl *TargetDecl) {
GlobalDeclRefChecker Checker;
if (isa<VarDecl>(TargetDecl))
Checker.declareTargetInitializer(TargetDecl);
}
-OMPClause *Sema::ActOnOpenMPToClause(
+OMPClause *SemaOpenMP::ActOnOpenMPToClause(
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
@@ -23557,18 +22400,18 @@ OMPClause *Sema::ActOnOpenMPToClause(
}
MappableVarListInfo MVLI(VarList);
- checkMappableExpressionList(*this, DSAStack, OMPC_to, MVLI, Locs.StartLoc,
+ checkMappableExpressionList(SemaRef, DSAStack, OMPC_to, MVLI, Locs.StartLoc,
MapperIdScopeSpec, MapperId, UnresolvedMappers);
if (MVLI.ProcessedVarList.empty())
return nullptr;
return OMPToClause::Create(
- Context, Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
+ getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
MVLI.VarComponents, MVLI.UDMapperList, Modifiers, ModifiersLoc,
- MapperIdScopeSpec.getWithLocInContext(Context), MapperId);
+ MapperIdScopeSpec.getWithLocInContext(getASTContext()), MapperId);
}
-OMPClause *Sema::ActOnOpenMPFromClause(
+OMPClause *SemaOpenMP::ActOnOpenMPFromClause(
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
@@ -23594,19 +22437,20 @@ OMPClause *Sema::ActOnOpenMPFromClause(
}
MappableVarListInfo MVLI(VarList);
- checkMappableExpressionList(*this, DSAStack, OMPC_from, MVLI, Locs.StartLoc,
+ checkMappableExpressionList(SemaRef, DSAStack, OMPC_from, MVLI, Locs.StartLoc,
MapperIdScopeSpec, MapperId, UnresolvedMappers);
if (MVLI.ProcessedVarList.empty())
return nullptr;
return OMPFromClause::Create(
- Context, Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
+ getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
MVLI.VarComponents, MVLI.UDMapperList, Modifiers, ModifiersLoc,
- MapperIdScopeSpec.getWithLocInContext(Context), MapperId);
+ MapperIdScopeSpec.getWithLocInContext(getASTContext()), MapperId);
}
-OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
- const OMPVarListLocTy &Locs) {
+OMPClause *
+SemaOpenMP::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
MappableVarListInfo MVLI(VarList);
SmallVector<Expr *, 8> PrivateCopies;
SmallVector<Expr *, 8> Inits;
@@ -23616,7 +22460,7 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
MVLI.ProcessedVarList.push_back(RefExpr);
@@ -23641,30 +22485,30 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
// Build the private variable and the expression that refers to it.
auto VDPrivate =
- buildVarDecl(*this, ELoc, Type, D->getName(),
+ buildVarDecl(SemaRef, ELoc, Type, D->getName(),
D->hasAttrs() ? &D->getAttrs() : nullptr,
VD ? cast<DeclRefExpr>(SimpleRefExpr) : nullptr);
if (VDPrivate->isInvalidDecl())
continue;
- CurContext->addDecl(VDPrivate);
+ SemaRef.CurContext->addDecl(VDPrivate);
DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
- *this, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc);
+ SemaRef, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc);
// Add temporary variable to initialize the private copy of the pointer.
VarDecl *VDInit =
- buildVarDecl(*this, RefExpr->getExprLoc(), Type, ".devptr.temp");
+ buildVarDecl(SemaRef, RefExpr->getExprLoc(), Type, ".devptr.temp");
DeclRefExpr *VDInitRefExpr = buildDeclRefExpr(
- *this, VDInit, RefExpr->getType(), RefExpr->getExprLoc());
- AddInitializerToDecl(VDPrivate,
- DefaultLvalueConversion(VDInitRefExpr).get(),
- /*DirectInit=*/false);
+ SemaRef, VDInit, RefExpr->getType(), RefExpr->getExprLoc());
+ SemaRef.AddInitializerToDecl(
+ VDPrivate, SemaRef.DefaultLvalueConversion(VDInitRefExpr).get(),
+ /*DirectInit=*/false);
// If required, build a capture to implement the privatization initialized
// with the current list item value.
DeclRefExpr *Ref = nullptr;
if (!VD)
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true);
MVLI.ProcessedVarList.push_back(VD ? RefExpr->IgnoreParens() : Ref);
PrivateCopies.push_back(VDPrivateRefExpr);
Inits.push_back(VDInitRefExpr);
@@ -23686,12 +22530,13 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
return nullptr;
return OMPUseDevicePtrClause::Create(
- Context, Locs, MVLI.ProcessedVarList, PrivateCopies, Inits,
+ getASTContext(), Locs, MVLI.ProcessedVarList, PrivateCopies, Inits,
MVLI.VarBaseDeclarations, MVLI.VarComponents);
}
-OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
- const OMPVarListLocTy &Locs) {
+OMPClause *
+SemaOpenMP::ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
MappableVarListInfo MVLI(VarList);
for (Expr *RefExpr : VarList) {
@@ -23699,7 +22544,7 @@ OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange,
/*AllowArraySection=*/true);
if (Res.second) {
// It will be analyzed later.
@@ -23714,7 +22559,7 @@ OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
// with the current list item value.
DeclRefExpr *Ref = nullptr;
if (!VD)
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true);
MVLI.ProcessedVarList.push_back(VD ? RefExpr->IgnoreParens() : Ref);
// We need to add a data sharing attribute for this variable to make sure it
@@ -23727,9 +22572,10 @@ OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
MVLI.VarBaseDeclarations.push_back(D);
MVLI.VarComponents.emplace_back();
Expr *Component = SimpleRefExpr;
- if (VD && (isa<OMPArraySectionExpr>(RefExpr->IgnoreParenImpCasts()) ||
+ if (VD && (isa<ArraySectionExpr>(RefExpr->IgnoreParenImpCasts()) ||
isa<ArraySubscriptExpr>(RefExpr->IgnoreParenImpCasts())))
- Component = DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get();
+ Component =
+ SemaRef.DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get();
MVLI.VarComponents.back().emplace_back(Component, D,
/*IsNonContiguous=*/false);
}
@@ -23737,20 +22583,21 @@ OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
if (MVLI.ProcessedVarList.empty())
return nullptr;
- return OMPUseDeviceAddrClause::Create(Context, Locs, MVLI.ProcessedVarList,
- MVLI.VarBaseDeclarations,
- MVLI.VarComponents);
+ return OMPUseDeviceAddrClause::Create(
+ getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
+ MVLI.VarComponents);
}
-OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
- const OMPVarListLocTy &Locs) {
+OMPClause *
+SemaOpenMP::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
MappableVarListInfo MVLI(VarList);
for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP is_device_ptr clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
MVLI.ProcessedVarList.push_back(RefExpr);
@@ -23776,7 +22623,7 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_is_device_ptr)
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
@@ -23820,20 +22667,21 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
if (MVLI.ProcessedVarList.empty())
return nullptr;
- return OMPIsDevicePtrClause::Create(Context, Locs, MVLI.ProcessedVarList,
- MVLI.VarBaseDeclarations,
- MVLI.VarComponents);
+ return OMPIsDevicePtrClause::Create(
+ getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
+ MVLI.VarComponents);
}
-OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
- const OMPVarListLocTy &Locs) {
+OMPClause *
+SemaOpenMP::ActOnOpenMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
MappableVarListInfo MVLI(VarList);
for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP has_device_addr clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange,
/*AllowArraySection=*/true);
if (Res.second) {
// It will be analyzed later.
@@ -23851,7 +22699,7 @@ OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_has_device_addr)
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
- reportOriginalDsa(*this, DSAStack, D, DVar);
+ reportOriginalDsa(SemaRef, DSAStack, D, DVar);
continue;
}
@@ -23874,18 +22722,19 @@ OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
// against other clauses later on.
Expr *Component = SimpleRefExpr;
auto *VD = dyn_cast<VarDecl>(D);
- if (VD && (isa<OMPArraySectionExpr>(RefExpr->IgnoreParenImpCasts()) ||
+ if (VD && (isa<ArraySectionExpr>(RefExpr->IgnoreParenImpCasts()) ||
isa<ArraySubscriptExpr>(RefExpr->IgnoreParenImpCasts())))
- Component = DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get();
+ Component =
+ SemaRef.DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get();
OMPClauseMappableExprCommon::MappableComponent MC(
Component, D, /*IsNonContiguous=*/false);
DSAStack->addMappableExpressionComponents(
D, MC, /*WhereFoundClauseKind=*/OMPC_has_device_addr);
// Record the expression we've just processed.
- if (!VD && !CurContext->isDependentContext()) {
+ if (!VD && !SemaRef.CurContext->isDependentContext()) {
DeclRefExpr *Ref =
- buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true);
assert(Ref && "has_device_addr capture failed");
MVLI.ProcessedVarList.push_back(Ref);
} else
@@ -23906,27 +22755,27 @@ OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
if (MVLI.ProcessedVarList.empty())
return nullptr;
- return OMPHasDeviceAddrClause::Create(Context, Locs, MVLI.ProcessedVarList,
- MVLI.VarBaseDeclarations,
- MVLI.VarComponents);
+ return OMPHasDeviceAddrClause::Create(
+ getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
+ MVLI.VarComponents);
}
-OMPClause *Sema::ActOnOpenMPAllocateClause(
+OMPClause *SemaOpenMP::ActOnOpenMPAllocateClause(
Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc,
- SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
+ SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) {
if (Allocator) {
// OpenMP [2.11.4 allocate Clause, Description]
// allocator is an expression of omp_allocator_handle_t type.
- if (!findOMPAllocatorHandleT(*this, Allocator->getExprLoc(), DSAStack))
+ if (!findOMPAllocatorHandleT(SemaRef, Allocator->getExprLoc(), DSAStack))
return nullptr;
- ExprResult AllocatorRes = DefaultLvalueConversion(Allocator);
+ ExprResult AllocatorRes = SemaRef.DefaultLvalueConversion(Allocator);
if (AllocatorRes.isInvalid())
return nullptr;
- AllocatorRes = PerformImplicitConversion(AllocatorRes.get(),
- DSAStack->getOMPAllocatorHandleT(),
- Sema::AA_Initializing,
- /*AllowExplicit=*/true);
+ AllocatorRes = SemaRef.PerformImplicitConversion(
+ AllocatorRes.get(), DSAStack->getOMPAllocatorHandleT(),
+ Sema::AA_Initializing,
+ /*AllowExplicit=*/true);
if (AllocatorRes.isInvalid())
return nullptr;
Allocator = AllocatorRes.get();
@@ -23936,9 +22785,9 @@ OMPClause *Sema::ActOnOpenMPAllocateClause(
// target region must specify an allocator expression unless a requires
// directive with the dynamic_allocators clause is present in the same
// compilation unit.
- if (LangOpts.OpenMPIsTargetDevice &&
+ if (getLangOpts().OpenMPIsTargetDevice &&
!DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
- targetDiag(StartLoc, diag::err_expected_allocator_expression);
+ SemaRef.targetDiag(StartLoc, diag::err_expected_allocator_expression);
}
// Analyze and build list of variables.
SmallVector<Expr *, 8> Vars;
@@ -23947,7 +22796,7 @@ OMPClause *Sema::ActOnOpenMPAllocateClause(
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -23958,9 +22807,9 @@ OMPClause *Sema::ActOnOpenMPAllocateClause(
auto *VD = dyn_cast<VarDecl>(D);
DeclRefExpr *Ref = nullptr;
- if (!VD && !CurContext->isDependentContext())
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false);
- Vars.push_back((VD || CurContext->isDependentContext())
+ if (!VD && !SemaRef.CurContext->isDependentContext())
+ Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/false);
+ Vars.push_back((VD || SemaRef.CurContext->isDependentContext())
? RefExpr->IgnoreParens()
: Ref);
}
@@ -23970,21 +22819,21 @@ OMPClause *Sema::ActOnOpenMPAllocateClause(
if (Allocator)
DSAStack->addInnerAllocatorExpr(Allocator);
- return OMPAllocateClause::Create(Context, StartLoc, LParenLoc, Allocator,
- ColonLoc, EndLoc, Vars);
+ return OMPAllocateClause::Create(getASTContext(), StartLoc, LParenLoc,
+ Allocator, ColonLoc, EndLoc, Vars);
}
-OMPClause *Sema::ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
if (Res.second)
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -24009,32 +22858,34 @@ OMPClause *Sema::ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
if (Vars.empty())
return nullptr;
- return OMPNontemporalClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- Vars);
+ return OMPNontemporalClause::Create(getASTContext(), StartLoc, LParenLoc,
+ EndLoc, Vars);
}
-StmtResult Sema::ActOnOpenMPScopeDirective(ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc) {
+StmtResult SemaOpenMP::ActOnOpenMPScopeDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!AStmt)
return StmtError();
- setFunctionHasBranchProtectedScope();
+ SemaRef.setFunctionHasBranchProtectedScope();
- return OMPScopeDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+ return OMPScopeDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses,
+ AStmt);
}
-OMPClause *Sema::ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange,
/*AllowArraySection=*/true);
if (Res.second)
// It will be analyzed later.
@@ -24061,20 +22912,21 @@ OMPClause *Sema::ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
if (Vars.empty())
return nullptr;
- return OMPInclusiveClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
+ return OMPInclusiveClause::Create(getASTContext(), StartLoc, LParenLoc,
+ EndLoc, Vars);
}
-OMPClause *Sema::ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange,
/*AllowArraySection=*/true);
if (Res.second)
// It will be analyzed later.
@@ -24104,7 +22956,8 @@ OMPClause *Sema::ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
if (Vars.empty())
return nullptr;
- return OMPExclusiveClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
+ return OMPExclusiveClause::Create(getASTContext(), StartLoc, LParenLoc,
+ EndLoc, Vars);
}
/// Tries to find omp_alloctrait_t type.
@@ -24122,19 +22975,20 @@ static bool findOMPAlloctraitT(Sema &S, SourceLocation Loc, DSAStackTy *Stack) {
return true;
}
-OMPClause *Sema::ActOnOpenMPUsesAllocatorClause(
+OMPClause *SemaOpenMP::ActOnOpenMPUsesAllocatorClause(
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data) {
+ ASTContext &Context = getASTContext();
// OpenMP [2.12.5, target Construct]
// allocator is an identifier of omp_allocator_handle_t type.
- if (!findOMPAllocatorHandleT(*this, StartLoc, DSAStack))
+ if (!findOMPAllocatorHandleT(SemaRef, StartLoc, DSAStack))
return nullptr;
// OpenMP [2.12.5, target Construct]
// allocator-traits-array is an identifier of const omp_alloctrait_t * type.
if (llvm::any_of(
Data,
[](const UsesAllocatorsData &D) { return D.AllocatorTraits; }) &&
- !findOMPAlloctraitT(*this, StartLoc, DSAStack))
+ !findOMPAlloctraitT(SemaRef, StartLoc, DSAStack))
return nullptr;
llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> PredefinedAllocators;
for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
@@ -24142,8 +22996,8 @@ OMPClause *Sema::ActOnOpenMPUsesAllocatorClause(
StringRef Allocator =
OMPAllocateDeclAttr::ConvertAllocatorTypeTyToStr(AllocatorKind);
DeclarationName AllocatorName = &Context.Idents.get(Allocator);
- PredefinedAllocators.insert(LookupSingleName(
- TUScope, AllocatorName, StartLoc, Sema::LookupAnyName));
+ PredefinedAllocators.insert(SemaRef.LookupSingleName(
+ SemaRef.TUScope, AllocatorName, StartLoc, Sema::LookupAnyName));
}
SmallVector<OMPUsesAllocatorsClause::Data, 4> NewData;
@@ -24160,7 +23014,7 @@ OMPClause *Sema::ActOnOpenMPUsesAllocatorClause(
bool IsPredefinedAllocator = false;
if (DRE) {
OMPAllocateDeclAttr::AllocatorTypeTy AllocatorTy =
- getAllocatorKind(*this, DSAStack, AllocatorExpr);
+ getAllocatorKind(SemaRef, DSAStack, AllocatorExpr);
IsPredefinedAllocator =
AllocatorTy !=
OMPAllocateDeclAttr::AllocatorTypeTy::OMPUserDefinedMemAlloc;
@@ -24205,7 +23059,7 @@ OMPClause *Sema::ActOnOpenMPUsesAllocatorClause(
}
// No allocator traits - just convert it to rvalue.
if (!D.AllocatorTraits)
- AllocatorExpr = DefaultLvalueConversion(AllocatorExpr).get();
+ AllocatorExpr = SemaRef.DefaultLvalueConversion(AllocatorExpr).get();
DSAStack->addUsesAllocatorsDecl(
DRE->getDecl(),
IsPredefinedAllocator
@@ -24252,11 +23106,11 @@ OMPClause *Sema::ActOnOpenMPUsesAllocatorClause(
NewD.LParenLoc = D.LParenLoc;
NewD.RParenLoc = D.RParenLoc;
}
- return OMPUsesAllocatorsClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- NewData);
+ return OMPUsesAllocatorsClause::Create(getASTContext(), StartLoc, LParenLoc,
+ EndLoc, NewData);
}
-OMPClause *Sema::ActOnOpenMPAffinityClause(
+OMPClause *SemaOpenMP::ActOnOpenMPAffinityClause(
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators) {
SmallVector<Expr *, 8> Vars;
@@ -24279,10 +23133,10 @@ OMPClause *Sema::ActOnOpenMPAffinityClause(
ExprResult Res;
{
- Sema::TentativeAnalysisScope Trap(*this);
- Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf, SimpleExpr);
+ Sema::TentativeAnalysisScope Trap(SemaRef);
+ Res = SemaRef.CreateBuiltinUnaryOp(ELoc, UO_AddrOf, SimpleExpr);
}
- if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr) &&
+ if (!Res.isUsable() && !isa<ArraySectionExpr>(SimpleExpr) &&
!isa<OMPArrayShapingExpr>(SimpleExpr)) {
Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
<< 1 << 0 << RefExpr->getSourceRange();
@@ -24291,15 +23145,15 @@ OMPClause *Sema::ActOnOpenMPAffinityClause(
Vars.push_back(SimpleExpr);
}
- return OMPAffinityClause::Create(Context, StartLoc, LParenLoc, ColonLoc,
- EndLoc, Modifier, Vars);
+ return OMPAffinityClause::Create(getASTContext(), StartLoc, LParenLoc,
+ ColonLoc, EndLoc, Modifier, Vars);
}
-OMPClause *Sema::ActOnOpenMPBindClause(OpenMPBindClauseKind Kind,
- SourceLocation KindLoc,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPBindClause(OpenMPBindClauseKind Kind,
+ SourceLocation KindLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
if (Kind == OMPC_BIND_unknown) {
Diag(KindLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_bind, /*First=*/0,
@@ -24308,39 +23162,40 @@ OMPClause *Sema::ActOnOpenMPBindClause(OpenMPBindClauseKind Kind,
return nullptr;
}
- return OMPBindClause::Create(Context, Kind, KindLoc, StartLoc, LParenLoc,
- EndLoc);
+ return OMPBindClause::Create(getASTContext(), Kind, KindLoc, StartLoc,
+ LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPXDynCGroupMemClause(Expr *Size,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *SemaOpenMP::ActOnOpenMPXDynCGroupMemClause(Expr *Size,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
Expr *ValExpr = Size;
Stmt *HelperValStmt = nullptr;
// OpenMP [2.5, Restrictions]
// The ompx_dyn_cgroup_mem expression must evaluate to a positive integer
// value.
- if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_ompx_dyn_cgroup_mem,
+ if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_ompx_dyn_cgroup_mem,
/*StrictlyPositive=*/false))
return nullptr;
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
OpenMPDirectiveKind CaptureRegion = getOpenMPCaptureRegionForClause(
- DKind, OMPC_ompx_dyn_cgroup_mem, LangOpts.OpenMP);
- if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
- ValExpr = MakeFullExpr(ValExpr).get();
+ DKind, OMPC_ompx_dyn_cgroup_mem, getLangOpts().OpenMP);
+ if (CaptureRegion != OMPD_unknown &&
+ !SemaRef.CurContext->isDependentContext()) {
+ ValExpr = SemaRef.MakeFullExpr(ValExpr).get();
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
- HelperValStmt = buildPreInits(Context, Captures);
+ ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(getASTContext(), Captures);
}
- return new (Context) OMPXDynCGroupMemClause(
+ return new (getASTContext()) OMPXDynCGroupMemClause(
ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPDoacrossClause(
+OMPClause *SemaOpenMP::ActOnOpenMPDoacrossClause(
OpenMPDoacrossClauseModifier DepType, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc) {
@@ -24359,7 +23214,7 @@ OMPClause *Sema::ActOnOpenMPDoacrossClause(
DSAStackTy::OperatorOffsetTy OpsOffs;
llvm::APSInt TotalDepCount(/*BitWidth=*/32);
DoacrossDataInfoTy VarOffset = ProcessOpenMPDoacrossClauseCommon(
- *this,
+ SemaRef,
DepType == OMPC_DOACROSS_source ||
DepType == OMPC_DOACROSS_source_omp_cur_iteration ||
DepType == OMPC_DOACROSS_sink_omp_cur_iteration,
@@ -24367,22 +23222,624 @@ OMPClause *Sema::ActOnOpenMPDoacrossClause(
Vars = VarOffset.Vars;
OpsOffs = VarOffset.OpsOffs;
TotalDepCount = VarOffset.TotalDepCount;
- auto *C = OMPDoacrossClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- DepType, DepLoc, ColonLoc, Vars,
+ auto *C = OMPDoacrossClause::Create(getASTContext(), StartLoc, LParenLoc,
+ EndLoc, DepType, DepLoc, ColonLoc, Vars,
TotalDepCount.getZExtValue());
if (DSAStack->isParentOrderedRegion())
DSAStack->addDoacrossDependClause(C, OpsOffs);
return C;
}
-OMPClause *Sema::ActOnOpenMPXAttributeClause(ArrayRef<const Attr *> Attrs,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPXAttributeClause(Attrs, StartLoc, LParenLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPXAttributeClause(ArrayRef<const Attr *> Attrs,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext())
+ OMPXAttributeClause(Attrs, StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPXBareClause(SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (Context) OMPXBareClause(StartLoc, EndLoc);
+OMPClause *SemaOpenMP::ActOnOpenMPXBareClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (getASTContext()) OMPXBareClause(StartLoc, EndLoc);
+}
+
+ExprResult SemaOpenMP::ActOnOMPArraySectionExpr(
+ Expr *Base, SourceLocation LBLoc, Expr *LowerBound,
+ SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length,
+ Expr *Stride, SourceLocation RBLoc) {
+ ASTContext &Context = getASTContext();
+ if (Base->hasPlaceholderType() &&
+ !Base->hasPlaceholderType(BuiltinType::ArraySection)) {
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(Base);
+ if (Result.isInvalid())
+ return ExprError();
+ Base = Result.get();
+ }
+ if (LowerBound && LowerBound->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(LowerBound);
+ if (Result.isInvalid())
+ return ExprError();
+ Result = SemaRef.DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid())
+ return ExprError();
+ LowerBound = Result.get();
+ }
+ if (Length && Length->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(Length);
+ if (Result.isInvalid())
+ return ExprError();
+ Result = SemaRef.DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid())
+ return ExprError();
+ Length = Result.get();
+ }
+ if (Stride && Stride->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(Stride);
+ if (Result.isInvalid())
+ return ExprError();
+ Result = SemaRef.DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid())
+ return ExprError();
+ Stride = Result.get();
+ }
+
+ // Build an unanalyzed expression if either operand is type-dependent.
+ if (Base->isTypeDependent() ||
+ (LowerBound &&
+ (LowerBound->isTypeDependent() || LowerBound->isValueDependent())) ||
+ (Length && (Length->isTypeDependent() || Length->isValueDependent())) ||
+ (Stride && (Stride->isTypeDependent() || Stride->isValueDependent()))) {
+ return new (Context) ArraySectionExpr(
+ Base, LowerBound, Length, Stride, Context.DependentTy, VK_LValue,
+ OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc);
+ }
+
+ // Perform default conversions.
+ QualType OriginalTy = ArraySectionExpr::getBaseOriginalType(Base);
+ QualType ResultTy;
+ if (OriginalTy->isAnyPointerType()) {
+ ResultTy = OriginalTy->getPointeeType();
+ } else if (OriginalTy->isArrayType()) {
+ ResultTy = OriginalTy->getAsArrayTypeUnsafe()->getElementType();
+ } else {
+ return ExprError(
+ Diag(Base->getExprLoc(), diag::err_omp_typecheck_section_value)
+ << Base->getSourceRange());
+ }
+ // C99 6.5.2.1p1
+ if (LowerBound) {
+ auto Res = PerformOpenMPImplicitIntegerConversion(LowerBound->getExprLoc(),
+ LowerBound);
+ if (Res.isInvalid())
+ return ExprError(Diag(LowerBound->getExprLoc(),
+ diag::err_omp_typecheck_section_not_integer)
+ << 0 << LowerBound->getSourceRange());
+ LowerBound = Res.get();
+
+ if (LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
+ Diag(LowerBound->getExprLoc(), diag::warn_omp_section_is_char)
+ << 0 << LowerBound->getSourceRange();
+ }
+ if (Length) {
+ auto Res =
+ PerformOpenMPImplicitIntegerConversion(Length->getExprLoc(), Length);
+ if (Res.isInvalid())
+ return ExprError(Diag(Length->getExprLoc(),
+ diag::err_omp_typecheck_section_not_integer)
+ << 1 << Length->getSourceRange());
+ Length = Res.get();
+
+ if (Length->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ Length->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
+ Diag(Length->getExprLoc(), diag::warn_omp_section_is_char)
+ << 1 << Length->getSourceRange();
+ }
+ if (Stride) {
+ ExprResult Res =
+ PerformOpenMPImplicitIntegerConversion(Stride->getExprLoc(), Stride);
+ if (Res.isInvalid())
+ return ExprError(Diag(Stride->getExprLoc(),
+ diag::err_omp_typecheck_section_not_integer)
+ << 1 << Stride->getSourceRange());
+ Stride = Res.get();
+
+ if (Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
+ Diag(Stride->getExprLoc(), diag::warn_omp_section_is_char)
+ << 1 << Stride->getSourceRange();
+ }
+
+ // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly,
+ // C++ [expr.sub]p1: The type "T" shall be a completely-defined object
+ // type. Note that functions are not objects, and that (in C99 parlance)
+ // incomplete types are not object types.
+ if (ResultTy->isFunctionType()) {
+ Diag(Base->getExprLoc(), diag::err_omp_section_function_type)
+ << ResultTy << Base->getSourceRange();
+ return ExprError();
+ }
+
+ if (SemaRef.RequireCompleteType(Base->getExprLoc(), ResultTy,
+ diag::err_omp_section_incomplete_type, Base))
+ return ExprError();
+
+ if (LowerBound && !OriginalTy->isAnyPointerType()) {
+ Expr::EvalResult Result;
+ if (LowerBound->EvaluateAsInt(Result, Context)) {
+ // OpenMP 5.0, [2.1.5 Array Sections]
+ // The array section must be a subset of the original array.
+ llvm::APSInt LowerBoundValue = Result.Val.getInt();
+ if (LowerBoundValue.isNegative()) {
+ Diag(LowerBound->getExprLoc(),
+ diag::err_omp_section_not_subset_of_array)
+ << LowerBound->getSourceRange();
+ return ExprError();
+ }
+ }
+ }
+
+ if (Length) {
+ Expr::EvalResult Result;
+ if (Length->EvaluateAsInt(Result, Context)) {
+ // OpenMP 5.0, [2.1.5 Array Sections]
+ // The length must evaluate to non-negative integers.
+ llvm::APSInt LengthValue = Result.Val.getInt();
+ if (LengthValue.isNegative()) {
+ Diag(Length->getExprLoc(), diag::err_omp_section_length_negative)
+ << toString(LengthValue, /*Radix=*/10, /*Signed=*/true)
+ << Length->getSourceRange();
+ return ExprError();
+ }
+ }
+ } else if (ColonLocFirst.isValid() &&
+ (OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() &&
+ !OriginalTy->isVariableArrayType()))) {
+ // OpenMP 5.0, [2.1.5 Array Sections]
+ // When the size of the array dimension is not known, the length must be
+ // specified explicitly.
+ Diag(ColonLocFirst, diag::err_omp_section_length_undefined)
+ << (!OriginalTy.isNull() && OriginalTy->isArrayType());
+ return ExprError();
+ }
+
+ if (Stride) {
+ Expr::EvalResult Result;
+ if (Stride->EvaluateAsInt(Result, Context)) {
+ // OpenMP 5.0, [2.1.5 Array Sections]
+ // The stride must evaluate to a positive integer.
+ llvm::APSInt StrideValue = Result.Val.getInt();
+ if (!StrideValue.isStrictlyPositive()) {
+ Diag(Stride->getExprLoc(), diag::err_omp_section_stride_non_positive)
+ << toString(StrideValue, /*Radix=*/10, /*Signed=*/true)
+ << Stride->getSourceRange();
+ return ExprError();
+ }
+ }
+ }
+
+ if (!Base->hasPlaceholderType(BuiltinType::ArraySection)) {
+ ExprResult Result = SemaRef.DefaultFunctionArrayLvalueConversion(Base);
+ if (Result.isInvalid())
+ return ExprError();
+ Base = Result.get();
+ }
+ return new (Context) ArraySectionExpr(
+ Base, LowerBound, Length, Stride, Context.ArraySectionTy, VK_LValue,
+ OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc);
+}
+
+ExprResult SemaOpenMP::ActOnOMPArrayShapingExpr(
+ Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc,
+ ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets) {
+ ASTContext &Context = getASTContext();
+ if (Base->hasPlaceholderType()) {
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(Base);
+ if (Result.isInvalid())
+ return ExprError();
+ Result = SemaRef.DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid())
+ return ExprError();
+ Base = Result.get();
+ }
+ QualType BaseTy = Base->getType();
+ // Delay analysis of the types/expressions if instantiation/specialization is
+ // required.
+ if (!BaseTy->isPointerType() && Base->isTypeDependent())
+ return OMPArrayShapingExpr::Create(Context, Context.DependentTy, Base,
+ LParenLoc, RParenLoc, Dims, Brackets);
+ if (!BaseTy->isPointerType() ||
+ (!Base->isTypeDependent() &&
+ BaseTy->getPointeeType()->isIncompleteType()))
+ return ExprError(Diag(Base->getExprLoc(),
+ diag::err_omp_non_pointer_type_array_shaping_base)
+ << Base->getSourceRange());
+
+ SmallVector<Expr *, 4> NewDims;
+ bool ErrorFound = false;
+ for (Expr *Dim : Dims) {
+ if (Dim->hasPlaceholderType()) {
+ ExprResult Result = SemaRef.CheckPlaceholderExpr(Dim);
+ if (Result.isInvalid()) {
+ ErrorFound = true;
+ continue;
+ }
+ Result = SemaRef.DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid()) {
+ ErrorFound = true;
+ continue;
+ }
+ Dim = Result.get();
+ }
+ if (!Dim->isTypeDependent()) {
+ ExprResult Result =
+ PerformOpenMPImplicitIntegerConversion(Dim->getExprLoc(), Dim);
+ if (Result.isInvalid()) {
+ ErrorFound = true;
+ Diag(Dim->getExprLoc(), diag::err_omp_typecheck_shaping_not_integer)
+ << Dim->getSourceRange();
+ continue;
+ }
+ Dim = Result.get();
+ Expr::EvalResult EvResult;
+ if (!Dim->isValueDependent() && Dim->EvaluateAsInt(EvResult, Context)) {
+ // OpenMP 5.0, [2.1.4 Array Shaping]
+ // Each si is an integral type expression that must evaluate to a
+ // positive integer.
+ llvm::APSInt Value = EvResult.Val.getInt();
+ if (!Value.isStrictlyPositive()) {
+ Diag(Dim->getExprLoc(), diag::err_omp_shaping_dimension_not_positive)
+ << toString(Value, /*Radix=*/10, /*Signed=*/true)
+ << Dim->getSourceRange();
+ ErrorFound = true;
+ continue;
+ }
+ }
+ }
+ NewDims.push_back(Dim);
+ }
+ if (ErrorFound)
+ return ExprError();
+ return OMPArrayShapingExpr::Create(Context, Context.OMPArrayShapingTy, Base,
+ LParenLoc, RParenLoc, NewDims, Brackets);
+}
+
+ExprResult SemaOpenMP::ActOnOMPIteratorExpr(Scope *S,
+ SourceLocation IteratorKwLoc,
+ SourceLocation LLoc,
+ SourceLocation RLoc,
+ ArrayRef<OMPIteratorData> Data) {
+ ASTContext &Context = getASTContext();
+ SmallVector<OMPIteratorExpr::IteratorDefinition, 4> ID;
+ bool IsCorrect = true;
+ for (const OMPIteratorData &D : Data) {
+ TypeSourceInfo *TInfo = nullptr;
+ SourceLocation StartLoc;
+ QualType DeclTy;
+ if (!D.Type.getAsOpaquePtr()) {
+ // OpenMP 5.0, 2.1.6 Iterators
+ // In an iterator-specifier, if the iterator-type is not specified then
+ // the type of that iterator is of int type.
+ DeclTy = Context.IntTy;
+ StartLoc = D.DeclIdentLoc;
+ } else {
+ DeclTy = Sema::GetTypeFromParser(D.Type, &TInfo);
+ StartLoc = TInfo->getTypeLoc().getBeginLoc();
+ }
+
+ bool IsDeclTyDependent = DeclTy->isDependentType() ||
+ DeclTy->containsUnexpandedParameterPack() ||
+ DeclTy->isInstantiationDependentType();
+ if (!IsDeclTyDependent) {
+ if (!DeclTy->isIntegralType(Context) && !DeclTy->isAnyPointerType()) {
+ // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++
+ // The iterator-type must be an integral or pointer type.
+ Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer)
+ << DeclTy;
+ IsCorrect = false;
+ continue;
+ }
+ if (DeclTy.isConstant(Context)) {
+ // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++
+ // The iterator-type must not be const qualified.
+ Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer)
+ << DeclTy;
+ IsCorrect = false;
+ continue;
+ }
+ }
+
+ // Iterator declaration.
+ assert(D.DeclIdent && "Identifier expected.");
+ // Always try to create iterator declarator to avoid extra error messages
+ // about unknown declarations use.
+ auto *VD =
+ VarDecl::Create(Context, SemaRef.CurContext, StartLoc, D.DeclIdentLoc,
+ D.DeclIdent, DeclTy, TInfo, SC_None);
+ VD->setImplicit();
+ if (S) {
+ // Check for conflicting previous declaration.
+ DeclarationNameInfo NameInfo(VD->getDeclName(), D.DeclIdentLoc);
+ LookupResult Previous(SemaRef, NameInfo, Sema::LookupOrdinaryName,
+ RedeclarationKind::ForVisibleRedeclaration);
+ Previous.suppressDiagnostics();
+ SemaRef.LookupName(Previous, S);
+
+ SemaRef.FilterLookupForScope(Previous, SemaRef.CurContext, S,
+ /*ConsiderLinkage=*/false,
+ /*AllowInlineNamespace=*/false);
+ if (!Previous.empty()) {
+ NamedDecl *Old = Previous.getRepresentativeDecl();
+ Diag(D.DeclIdentLoc, diag::err_redefinition) << VD->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ } else {
+ SemaRef.PushOnScopeChains(VD, S);
+ }
+ } else {
+ SemaRef.CurContext->addDecl(VD);
+ }
+
+ /// Act on the iterator variable declaration.
+ ActOnOpenMPIteratorVarDecl(VD);
+
+ Expr *Begin = D.Range.Begin;
+ if (!IsDeclTyDependent && Begin && !Begin->isTypeDependent()) {
+ ExprResult BeginRes =
+ SemaRef.PerformImplicitConversion(Begin, DeclTy, Sema::AA_Converting);
+ Begin = BeginRes.get();
+ }
+ Expr *End = D.Range.End;
+ if (!IsDeclTyDependent && End && !End->isTypeDependent()) {
+ ExprResult EndRes =
+ SemaRef.PerformImplicitConversion(End, DeclTy, Sema::AA_Converting);
+ End = EndRes.get();
+ }
+ Expr *Step = D.Range.Step;
+ if (!IsDeclTyDependent && Step && !Step->isTypeDependent()) {
+ if (!Step->getType()->isIntegralType(Context)) {
+ Diag(Step->getExprLoc(), diag::err_omp_iterator_step_not_integral)
+ << Step << Step->getSourceRange();
+ IsCorrect = false;
+ continue;
+ }
+ std::optional<llvm::APSInt> Result =
+ Step->getIntegerConstantExpr(Context);
+ // OpenMP 5.0, 2.1.6 Iterators, Restrictions
+ // If the step expression of a range-specification equals zero, the
+ // behavior is unspecified.
+ if (Result && Result->isZero()) {
+ Diag(Step->getExprLoc(), diag::err_omp_iterator_step_constant_zero)
+ << Step << Step->getSourceRange();
+ IsCorrect = false;
+ continue;
+ }
+ }
+ if (!Begin || !End || !IsCorrect) {
+ IsCorrect = false;
+ continue;
+ }
+ OMPIteratorExpr::IteratorDefinition &IDElem = ID.emplace_back();
+ IDElem.IteratorDecl = VD;
+ IDElem.AssignmentLoc = D.AssignLoc;
+ IDElem.Range.Begin = Begin;
+ IDElem.Range.End = End;
+ IDElem.Range.Step = Step;
+ IDElem.ColonLoc = D.ColonLoc;
+ IDElem.SecondColonLoc = D.SecColonLoc;
+ }
+ if (!IsCorrect) {
+ // Invalidate all created iterator declarations if error is found.
+ for (const OMPIteratorExpr::IteratorDefinition &D : ID) {
+ if (Decl *ID = D.IteratorDecl)
+ ID->setInvalidDecl();
+ }
+ return ExprError();
+ }
+ SmallVector<OMPIteratorHelperData, 4> Helpers;
+ if (!SemaRef.CurContext->isDependentContext()) {
+ // Build number of ityeration for each iteration range.
+ // Ni = ((Stepi > 0) ? ((Endi + Stepi -1 - Begini)/Stepi) :
+ // ((Begini-Stepi-1-Endi) / -Stepi);
+ for (OMPIteratorExpr::IteratorDefinition &D : ID) {
+ // (Endi - Begini)
+ ExprResult Res = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub,
+ D.Range.End, D.Range.Begin);
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ ExprResult St, St1;
+ if (D.Range.Step) {
+ St = D.Range.Step;
+ // (Endi - Begini) + Stepi
+ Res = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res.get(),
+ St.get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // (Endi - Begini) + Stepi - 1
+ Res = SemaRef.CreateBuiltinBinOp(
+ D.AssignmentLoc, BO_Sub, Res.get(),
+ SemaRef.ActOnIntegerConstant(D.AssignmentLoc, 1).get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // ((Endi - Begini) + Stepi - 1) / Stepi
+ Res = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res.get(),
+ St.get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ St1 = SemaRef.CreateBuiltinUnaryOp(D.AssignmentLoc, UO_Minus,
+ D.Range.Step);
+ // (Begini - Endi)
+ ExprResult Res1 = SemaRef.CreateBuiltinBinOp(
+ D.AssignmentLoc, BO_Sub, D.Range.Begin, D.Range.End);
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // (Begini - Endi) - Stepi
+ Res1 = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res1.get(),
+ St1.get());
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // (Begini - Endi) - Stepi - 1
+ Res1 = SemaRef.CreateBuiltinBinOp(
+ D.AssignmentLoc, BO_Sub, Res1.get(),
+ SemaRef.ActOnIntegerConstant(D.AssignmentLoc, 1).get());
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // ((Begini - Endi) - Stepi - 1) / (-Stepi)
+ Res1 = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res1.get(),
+ St1.get());
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // Stepi > 0.
+ ExprResult CmpRes = SemaRef.CreateBuiltinBinOp(
+ D.AssignmentLoc, BO_GT, D.Range.Step,
+ SemaRef.ActOnIntegerConstant(D.AssignmentLoc, 0).get());
+ if (!CmpRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ Res = SemaRef.ActOnConditionalOp(D.AssignmentLoc, D.AssignmentLoc,
+ CmpRes.get(), Res.get(), Res1.get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ }
+ Res = SemaRef.ActOnFinishFullExpr(Res.get(), /*DiscardedValue=*/false);
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+
+ // Build counter update.
+ // Build counter.
+ auto *CounterVD = VarDecl::Create(Context, SemaRef.CurContext,
+ D.IteratorDecl->getBeginLoc(),
+ D.IteratorDecl->getBeginLoc(), nullptr,
+ Res.get()->getType(), nullptr, SC_None);
+ CounterVD->setImplicit();
+ ExprResult RefRes =
+ SemaRef.BuildDeclRefExpr(CounterVD, CounterVD->getType(), VK_LValue,
+ D.IteratorDecl->getBeginLoc());
+ // Build counter update.
+ // I = Begini + counter * Stepi;
+ ExprResult UpdateRes;
+ if (D.Range.Step) {
+ UpdateRes = SemaRef.CreateBuiltinBinOp(
+ D.AssignmentLoc, BO_Mul,
+ SemaRef.DefaultLvalueConversion(RefRes.get()).get(), St.get());
+ } else {
+ UpdateRes = SemaRef.DefaultLvalueConversion(RefRes.get());
+ }
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ UpdateRes = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Add,
+ D.Range.Begin, UpdateRes.get());
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ ExprResult VDRes =
+ SemaRef.BuildDeclRefExpr(cast<VarDecl>(D.IteratorDecl),
+ cast<VarDecl>(D.IteratorDecl)->getType(),
+ VK_LValue, D.IteratorDecl->getBeginLoc());
+ UpdateRes = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Assign,
+ VDRes.get(), UpdateRes.get());
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ UpdateRes =
+ SemaRef.ActOnFinishFullExpr(UpdateRes.get(), /*DiscardedValue=*/true);
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ ExprResult CounterUpdateRes = SemaRef.CreateBuiltinUnaryOp(
+ D.AssignmentLoc, UO_PreInc, RefRes.get());
+ if (!CounterUpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ CounterUpdateRes = SemaRef.ActOnFinishFullExpr(CounterUpdateRes.get(),
+ /*DiscardedValue=*/true);
+ if (!CounterUpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ OMPIteratorHelperData &HD = Helpers.emplace_back();
+ HD.CounterVD = CounterVD;
+ HD.Upper = Res.get();
+ HD.Update = UpdateRes.get();
+ HD.CounterUpdate = CounterUpdateRes.get();
+ }
+ } else {
+ Helpers.assign(ID.size(), {});
+ }
+ if (!IsCorrect) {
+ // Invalidate all created iterator declarations if error is found.
+ for (const OMPIteratorExpr::IteratorDefinition &D : ID) {
+ if (Decl *ID = D.IteratorDecl)
+ ID->setInvalidDecl();
+ }
+ return ExprError();
+ }
+ return OMPIteratorExpr::Create(Context, Context.OMPIteratorTy, IteratorKwLoc,
+ LLoc, RLoc, ID, Helpers);
}
+
+/// Check if \p AssumptionStr is a known assumption and warn if not.
+static void checkOMPAssumeAttr(Sema &S, SourceLocation Loc,
+ StringRef AssumptionStr) {
+ if (llvm::KnownAssumptionStrings.count(AssumptionStr))
+ return;
+
+ unsigned BestEditDistance = 3;
+ StringRef Suggestion;
+ for (const auto &KnownAssumptionIt : llvm::KnownAssumptionStrings) {
+ unsigned EditDistance =
+ AssumptionStr.edit_distance(KnownAssumptionIt.getKey());
+ if (EditDistance < BestEditDistance) {
+ Suggestion = KnownAssumptionIt.getKey();
+ BestEditDistance = EditDistance;
+ }
+ }
+
+ if (!Suggestion.empty())
+ S.Diag(Loc, diag::warn_omp_assume_attribute_string_unknown_suggested)
+ << AssumptionStr << Suggestion;
+ else
+ S.Diag(Loc, diag::warn_omp_assume_attribute_string_unknown)
+ << AssumptionStr;
+}
+
+void SemaOpenMP::handleOMPAssumeAttr(Decl *D, const ParsedAttr &AL) {
+ // Handle the case where the attribute has a text message.
+ StringRef Str;
+ SourceLocation AttrStrLoc;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &AttrStrLoc))
+ return;
+
+ checkOMPAssumeAttr(SemaRef, AttrStrLoc, Str);
+
+ D->addAttr(::new (getASTContext()) OMPAssumeAttr(getASTContext(), AL, Str));
+}
+
+SemaOpenMP::SemaOpenMP(Sema &S)
+ : SemaBase(S), VarDataSharingAttributesStack(nullptr) {}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
index b708272ebe7d..28fd3b06156b 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
@@ -10,9 +10,11 @@
//
//===----------------------------------------------------------------------===//
+#include "CheckExprLifetime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DependenceFlags.h"
@@ -31,11 +33,15 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
+#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLForwardCompat.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -123,83 +129,107 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
/// GetConversionRank - Retrieve the implicit conversion rank
/// corresponding to the given implicit conversion kind.
ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
- static const ImplicitConversionRank
- Rank[] = {
- ICR_Exact_Match,
- ICR_Exact_Match,
- ICR_Exact_Match,
- ICR_Exact_Match,
- ICR_Exact_Match,
- ICR_Exact_Match,
- ICR_Promotion,
- ICR_Promotion,
- ICR_Promotion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_OCL_Scalar_Widening,
- ICR_Complex_Real_Conversion,
- ICR_Conversion,
- ICR_Conversion,
- ICR_Writeback_Conversion,
- ICR_Exact_Match, // NOTE(gbiv): This may not be completely right --
- // it was omitted by the patch that added
- // ICK_Zero_Event_Conversion
- ICR_Exact_Match, // NOTE(ctopper): This may not be completely right --
- // it was omitted by the patch that added
- // ICK_Zero_Queue_Conversion
- ICR_C_Conversion,
- ICR_C_Conversion_Extension,
- ICR_Conversion,
+ static const ImplicitConversionRank Rank[] = {
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Promotion,
+ ICR_Promotion,
+ ICR_Promotion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_OCL_Scalar_Widening,
+ ICR_Complex_Real_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Writeback_Conversion,
+ ICR_Exact_Match, // NOTE(gbiv): This may not be completely right --
+ // it was omitted by the patch that added
+ // ICK_Zero_Event_Conversion
+ ICR_Exact_Match, // NOTE(ctopper): This may not be completely right --
+ // it was omitted by the patch that added
+ // ICK_Zero_Queue_Conversion
+ ICR_C_Conversion,
+ ICR_C_Conversion_Extension,
+ ICR_Conversion,
+ ICR_HLSL_Dimension_Reduction,
+ ICR_Conversion,
+ ICR_HLSL_Scalar_Widening,
};
static_assert(std::size(Rank) == (int)ICK_Num_Conversion_Kinds);
return Rank[(int)Kind];
}
+ImplicitConversionRank
+clang::GetDimensionConversionRank(ImplicitConversionRank Base,
+ ImplicitConversionKind Dimension) {
+ ImplicitConversionRank Rank = GetConversionRank(Dimension);
+ if (Rank == ICR_HLSL_Scalar_Widening) {
+ if (Base == ICR_Promotion)
+ return ICR_HLSL_Scalar_Widening_Promotion;
+ if (Base == ICR_Conversion)
+ return ICR_HLSL_Scalar_Widening_Conversion;
+ }
+ if (Rank == ICR_HLSL_Dimension_Reduction) {
+ if (Base == ICR_Promotion)
+ return ICR_HLSL_Dimension_Reduction_Promotion;
+ if (Base == ICR_Conversion)
+ return ICR_HLSL_Dimension_Reduction_Conversion;
+ }
+ return Rank;
+}
+
/// GetImplicitConversionName - Return the name of this kind of
/// implicit conversion.
-static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
- static const char* const Name[] = {
- "No conversion",
- "Lvalue-to-rvalue",
- "Array-to-pointer",
- "Function-to-pointer",
- "Function pointer conversion",
- "Qualification",
- "Integral promotion",
- "Floating point promotion",
- "Complex promotion",
- "Integral conversion",
- "Floating conversion",
- "Complex conversion",
- "Floating-integral conversion",
- "Pointer conversion",
- "Pointer-to-member conversion",
- "Boolean conversion",
- "Compatible-types conversion",
- "Derived-to-base conversion",
- "Vector conversion",
- "SVE Vector conversion",
- "RVV Vector conversion",
- "Vector splat",
- "Complex-real conversion",
- "Block Pointer conversion",
- "Transparent Union Conversion",
- "Writeback conversion",
- "OpenCL Zero Event Conversion",
- "OpenCL Zero Queue Conversion",
- "C specific type conversion",
- "Incompatible pointer conversion",
- "Fixed point conversion",
+static const char *GetImplicitConversionName(ImplicitConversionKind Kind) {
+ static const char *const Name[] = {
+ "No conversion",
+ "Lvalue-to-rvalue",
+ "Array-to-pointer",
+ "Function-to-pointer",
+ "Function pointer conversion",
+ "Qualification",
+ "Integral promotion",
+ "Floating point promotion",
+ "Complex promotion",
+ "Integral conversion",
+ "Floating conversion",
+ "Complex conversion",
+ "Floating-integral conversion",
+ "Pointer conversion",
+ "Pointer-to-member conversion",
+ "Boolean conversion",
+ "Compatible-types conversion",
+ "Derived-to-base conversion",
+ "Vector conversion",
+ "SVE Vector conversion",
+ "RVV Vector conversion",
+ "Vector splat",
+ "Complex-real conversion",
+ "Block Pointer conversion",
+ "Transparent Union Conversion",
+ "Writeback conversion",
+ "OpenCL Zero Event Conversion",
+ "OpenCL Zero Queue Conversion",
+ "C specific type conversion",
+ "Incompatible pointer conversion",
+ "Fixed point conversion",
+ "HLSL vector truncation",
+ "Non-decaying array conversion",
+ "HLSL vector splat",
};
static_assert(std::size(Name) == (int)ICK_Num_Conversion_Kinds);
return Name[Kind];
@@ -210,6 +240,7 @@ static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
void StandardConversionSequence::setAsIdentityConversion() {
First = ICK_Identity;
Second = ICK_Identity;
+ Dimension = ICK_Identity;
Third = ICK_Identity;
DeprecatedStringLiteralToCharPtr = false;
QualificationIncludesObjCLifetime = false;
@@ -228,11 +259,13 @@ void StandardConversionSequence::setAsIdentityConversion() {
/// implicit conversions.
ImplicitConversionRank StandardConversionSequence::getRank() const {
ImplicitConversionRank Rank = ICR_Exact_Match;
- if (GetConversionRank(First) > Rank)
+ if (GetConversionRank(First) > Rank)
Rank = GetConversionRank(First);
- if (GetConversionRank(Second) > Rank)
+ if (GetConversionRank(Second) > Rank)
Rank = GetConversionRank(Second);
- if (GetConversionRank(Third) > Rank)
+ if (GetDimensionConversionRank(Rank, Dimension) > Rank)
+ Rank = GetDimensionConversionRank(Rank, Dimension);
+ if (GetConversionRank(Third) > Rank)
Rank = GetConversionRank(Third);
return Rank;
}
@@ -328,7 +361,8 @@ static const Expr *IgnoreNarrowingConversion(ASTContext &Ctx,
NarrowingKind StandardConversionSequence::getNarrowingKind(
ASTContext &Ctx, const Expr *Converted, APValue &ConstantValue,
QualType &ConstantType, bool IgnoreFloatToIntegralConversion) const {
- assert(Ctx.getLangOpts().CPlusPlus && "narrowing check outside C++");
+ assert((Ctx.getLangOpts().CPlusPlus || Ctx.getLangOpts().C23) &&
+ "narrowing check outside C++");
// C++11 [dcl.init.list]p7:
// A narrowing conversion is an implicit conversion ...
@@ -410,20 +444,41 @@ NarrowingKind StandardConversionSequence::getNarrowingKind(
if (Initializer->isValueDependent())
return NK_Dependent_Narrowing;
- if (Initializer->isCXX11ConstantExpr(Ctx, &ConstantValue)) {
+ Expr::EvalResult R;
+ if ((Ctx.getLangOpts().C23 && Initializer->EvaluateAsRValue(R, Ctx)) ||
+ Initializer->isCXX11ConstantExpr(Ctx, &ConstantValue)) {
// Constant!
+ if (Ctx.getLangOpts().C23)
+ ConstantValue = R.Val;
assert(ConstantValue.isFloat());
llvm::APFloat FloatVal = ConstantValue.getFloat();
// Convert the source value into the target type.
bool ignored;
- llvm::APFloat::opStatus ConvertStatus = FloatVal.convert(
- Ctx.getFloatTypeSemantics(ToType),
- llvm::APFloat::rmNearestTiesToEven, &ignored);
- // If there was no overflow, the source value is within the range of
- // values that can be represented.
- if (ConvertStatus & llvm::APFloat::opOverflow) {
- ConstantType = Initializer->getType();
- return NK_Constant_Narrowing;
+ llvm::APFloat Converted = FloatVal;
+ llvm::APFloat::opStatus ConvertStatus =
+ Converted.convert(Ctx.getFloatTypeSemantics(ToType),
+ llvm::APFloat::rmNearestTiesToEven, &ignored);
+ Converted.convert(Ctx.getFloatTypeSemantics(FromType),
+ llvm::APFloat::rmNearestTiesToEven, &ignored);
+ if (Ctx.getLangOpts().C23) {
+ if (FloatVal.isNaN() && Converted.isNaN() &&
+ !FloatVal.isSignaling() && !Converted.isSignaling()) {
+ // Quiet NaNs are considered the same value, regardless of
+ // payloads.
+ return NK_Not_Narrowing;
+ }
+ // For normal values, check exact equality.
+ if (!Converted.bitwiseIsEqual(FloatVal)) {
+ ConstantType = Initializer->getType();
+ return NK_Constant_Narrowing;
+ }
+ } else {
+ // If there was no overflow, the source value is within the range of
+ // values that can be represented.
+ if (ConvertStatus & llvm::APFloat::opOverflow) {
+ ConstantType = Initializer->getType();
+ return NK_Constant_Narrowing;
+ }
}
} else {
return NK_Variable_Narrowing;
@@ -490,7 +545,30 @@ NarrowingKind StandardConversionSequence::getNarrowingKind(
}
return NK_Not_Narrowing;
}
+ case ICK_Complex_Real:
+ if (FromType->isComplexType() && !ToType->isComplexType())
+ return NK_Type_Narrowing;
+ return NK_Not_Narrowing;
+ case ICK_Floating_Promotion:
+ if (Ctx.getLangOpts().C23) {
+ const Expr *Initializer = IgnoreNarrowingConversion(Ctx, Converted);
+ Expr::EvalResult R;
+ if (Initializer->EvaluateAsRValue(R, Ctx)) {
+ ConstantValue = R.Val;
+ assert(ConstantValue.isFloat());
+ llvm::APFloat FloatVal = ConstantValue.getFloat();
+ // C23 6.7.3p6 If the initializer has real type and a signaling NaN
+ // value, the unqualified versions of the type of the initializer and
+ // the corresponding real type of the object declared shall be
+ // compatible.
+ if (FloatVal.isNaN() && FloatVal.isSignaling()) {
+ ConstantType = Initializer->getType();
+ return NK_Constant_Narrowing;
+ }
+ }
+ }
+ return NK_Not_Narrowing;
default:
// Other kinds of conversions are not narrowings.
return NK_Not_Narrowing;
@@ -628,28 +706,28 @@ namespace {
/// to the form used in overload-candidate information.
DeductionFailureInfo
clang::MakeDeductionFailureInfo(ASTContext &Context,
- Sema::TemplateDeductionResult TDK,
+ TemplateDeductionResult TDK,
TemplateDeductionInfo &Info) {
DeductionFailureInfo Result;
Result.Result = static_cast<unsigned>(TDK);
Result.HasDiagnostic = false;
switch (TDK) {
- case Sema::TDK_Invalid:
- case Sema::TDK_InstantiationDepth:
- case Sema::TDK_TooManyArguments:
- case Sema::TDK_TooFewArguments:
- case Sema::TDK_MiscellaneousDeductionFailure:
- case Sema::TDK_CUDATargetMismatch:
+ case TemplateDeductionResult::Invalid:
+ case TemplateDeductionResult::InstantiationDepth:
+ case TemplateDeductionResult::TooManyArguments:
+ case TemplateDeductionResult::TooFewArguments:
+ case TemplateDeductionResult::MiscellaneousDeductionFailure:
+ case TemplateDeductionResult::CUDATargetMismatch:
Result.Data = nullptr;
break;
- case Sema::TDK_Incomplete:
- case Sema::TDK_InvalidExplicitArguments:
+ case TemplateDeductionResult::Incomplete:
+ case TemplateDeductionResult::InvalidExplicitArguments:
Result.Data = Info.Param.getOpaqueValue();
break;
- case Sema::TDK_DeducedMismatch:
- case Sema::TDK_DeducedMismatchNested: {
+ case TemplateDeductionResult::DeducedMismatch:
+ case TemplateDeductionResult::DeducedMismatchNested: {
// FIXME: Should allocate from normal heap so that we can free this later.
auto *Saved = new (Context) DFIDeducedMismatchArgs;
Saved->FirstArg = Info.FirstArg;
@@ -660,7 +738,7 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
break;
}
- case Sema::TDK_NonDeducedMismatch: {
+ case TemplateDeductionResult::NonDeducedMismatch: {
// FIXME: Should allocate from normal heap so that we can free this later.
DFIArguments *Saved = new (Context) DFIArguments;
Saved->FirstArg = Info.FirstArg;
@@ -669,10 +747,10 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
break;
}
- case Sema::TDK_IncompletePack:
+ case TemplateDeductionResult::IncompletePack:
// FIXME: It's slightly wasteful to allocate two TemplateArguments for this.
- case Sema::TDK_Inconsistent:
- case Sema::TDK_Underqualified: {
+ case TemplateDeductionResult::Inconsistent:
+ case TemplateDeductionResult::Underqualified: {
// FIXME: Should allocate from normal heap so that we can free this later.
DFIParamWithArguments *Saved = new (Context) DFIParamWithArguments;
Saved->Param = Info.Param;
@@ -682,7 +760,7 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
break;
}
- case Sema::TDK_SubstitutionFailure:
+ case TemplateDeductionResult::SubstitutionFailure:
Result.Data = Info.takeSugared();
if (Info.hasSFINAEDiagnostic()) {
PartialDiagnosticAt *Diag = new (Result.Diagnostic) PartialDiagnosticAt(
@@ -692,7 +770,7 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
}
break;
- case Sema::TDK_ConstraintsNotSatisfied: {
+ case TemplateDeductionResult::ConstraintsNotSatisfied: {
CNSInfo *Saved = new (Context) CNSInfo;
Saved->TemplateArgs = Info.takeSugared();
Saved->Satisfaction = Info.AssociatedConstraintsSatisfaction;
@@ -700,9 +778,9 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
break;
}
- case Sema::TDK_Success:
- case Sema::TDK_NonDependentConversionFailure:
- case Sema::TDK_AlreadyDiagnosed:
+ case TemplateDeductionResult::Success:
+ case TemplateDeductionResult::NonDependentConversionFailure:
+ case TemplateDeductionResult::AlreadyDiagnosed:
llvm_unreachable("not a deduction failure");
}
@@ -710,29 +788,29 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
}
void DeductionFailureInfo::Destroy() {
- switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
- case Sema::TDK_Success:
- case Sema::TDK_Invalid:
- case Sema::TDK_InstantiationDepth:
- case Sema::TDK_Incomplete:
- case Sema::TDK_TooManyArguments:
- case Sema::TDK_TooFewArguments:
- case Sema::TDK_InvalidExplicitArguments:
- case Sema::TDK_CUDATargetMismatch:
- case Sema::TDK_NonDependentConversionFailure:
+ switch (static_cast<TemplateDeductionResult>(Result)) {
+ case TemplateDeductionResult::Success:
+ case TemplateDeductionResult::Invalid:
+ case TemplateDeductionResult::InstantiationDepth:
+ case TemplateDeductionResult::Incomplete:
+ case TemplateDeductionResult::TooManyArguments:
+ case TemplateDeductionResult::TooFewArguments:
+ case TemplateDeductionResult::InvalidExplicitArguments:
+ case TemplateDeductionResult::CUDATargetMismatch:
+ case TemplateDeductionResult::NonDependentConversionFailure:
break;
- case Sema::TDK_IncompletePack:
- case Sema::TDK_Inconsistent:
- case Sema::TDK_Underqualified:
- case Sema::TDK_DeducedMismatch:
- case Sema::TDK_DeducedMismatchNested:
- case Sema::TDK_NonDeducedMismatch:
+ case TemplateDeductionResult::IncompletePack:
+ case TemplateDeductionResult::Inconsistent:
+ case TemplateDeductionResult::Underqualified:
+ case TemplateDeductionResult::DeducedMismatch:
+ case TemplateDeductionResult::DeducedMismatchNested:
+ case TemplateDeductionResult::NonDeducedMismatch:
// FIXME: Destroy the data?
Data = nullptr;
break;
- case Sema::TDK_SubstitutionFailure:
+ case TemplateDeductionResult::SubstitutionFailure:
// FIXME: Destroy the template argument list?
Data = nullptr;
if (PartialDiagnosticAt *Diag = getSFINAEDiagnostic()) {
@@ -741,7 +819,7 @@ void DeductionFailureInfo::Destroy() {
}
break;
- case Sema::TDK_ConstraintsNotSatisfied:
+ case TemplateDeductionResult::ConstraintsNotSatisfied:
// FIXME: Destroy the template argument list?
Data = nullptr;
if (PartialDiagnosticAt *Diag = getSFINAEDiagnostic()) {
@@ -751,8 +829,8 @@ void DeductionFailureInfo::Destroy() {
break;
// Unhandled
- case Sema::TDK_MiscellaneousDeductionFailure:
- case Sema::TDK_AlreadyDiagnosed:
+ case TemplateDeductionResult::MiscellaneousDeductionFailure:
+ case TemplateDeductionResult::AlreadyDiagnosed:
break;
}
}
@@ -764,33 +842,33 @@ PartialDiagnosticAt *DeductionFailureInfo::getSFINAEDiagnostic() {
}
TemplateParameter DeductionFailureInfo::getTemplateParameter() {
- switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
- case Sema::TDK_Success:
- case Sema::TDK_Invalid:
- case Sema::TDK_InstantiationDepth:
- case Sema::TDK_TooManyArguments:
- case Sema::TDK_TooFewArguments:
- case Sema::TDK_SubstitutionFailure:
- case Sema::TDK_DeducedMismatch:
- case Sema::TDK_DeducedMismatchNested:
- case Sema::TDK_NonDeducedMismatch:
- case Sema::TDK_CUDATargetMismatch:
- case Sema::TDK_NonDependentConversionFailure:
- case Sema::TDK_ConstraintsNotSatisfied:
+ switch (static_cast<TemplateDeductionResult>(Result)) {
+ case TemplateDeductionResult::Success:
+ case TemplateDeductionResult::Invalid:
+ case TemplateDeductionResult::InstantiationDepth:
+ case TemplateDeductionResult::TooManyArguments:
+ case TemplateDeductionResult::TooFewArguments:
+ case TemplateDeductionResult::SubstitutionFailure:
+ case TemplateDeductionResult::DeducedMismatch:
+ case TemplateDeductionResult::DeducedMismatchNested:
+ case TemplateDeductionResult::NonDeducedMismatch:
+ case TemplateDeductionResult::CUDATargetMismatch:
+ case TemplateDeductionResult::NonDependentConversionFailure:
+ case TemplateDeductionResult::ConstraintsNotSatisfied:
return TemplateParameter();
- case Sema::TDK_Incomplete:
- case Sema::TDK_InvalidExplicitArguments:
+ case TemplateDeductionResult::Incomplete:
+ case TemplateDeductionResult::InvalidExplicitArguments:
return TemplateParameter::getFromOpaqueValue(Data);
- case Sema::TDK_IncompletePack:
- case Sema::TDK_Inconsistent:
- case Sema::TDK_Underqualified:
+ case TemplateDeductionResult::IncompletePack:
+ case TemplateDeductionResult::Inconsistent:
+ case TemplateDeductionResult::Underqualified:
return static_cast<DFIParamWithArguments*>(Data)->Param;
// Unhandled
- case Sema::TDK_MiscellaneousDeductionFailure:
- case Sema::TDK_AlreadyDiagnosed:
+ case TemplateDeductionResult::MiscellaneousDeductionFailure:
+ case TemplateDeductionResult::AlreadyDiagnosed:
break;
}
@@ -798,35 +876,35 @@ TemplateParameter DeductionFailureInfo::getTemplateParameter() {
}
TemplateArgumentList *DeductionFailureInfo::getTemplateArgumentList() {
- switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
- case Sema::TDK_Success:
- case Sema::TDK_Invalid:
- case Sema::TDK_InstantiationDepth:
- case Sema::TDK_TooManyArguments:
- case Sema::TDK_TooFewArguments:
- case Sema::TDK_Incomplete:
- case Sema::TDK_IncompletePack:
- case Sema::TDK_InvalidExplicitArguments:
- case Sema::TDK_Inconsistent:
- case Sema::TDK_Underqualified:
- case Sema::TDK_NonDeducedMismatch:
- case Sema::TDK_CUDATargetMismatch:
- case Sema::TDK_NonDependentConversionFailure:
+ switch (static_cast<TemplateDeductionResult>(Result)) {
+ case TemplateDeductionResult::Success:
+ case TemplateDeductionResult::Invalid:
+ case TemplateDeductionResult::InstantiationDepth:
+ case TemplateDeductionResult::TooManyArguments:
+ case TemplateDeductionResult::TooFewArguments:
+ case TemplateDeductionResult::Incomplete:
+ case TemplateDeductionResult::IncompletePack:
+ case TemplateDeductionResult::InvalidExplicitArguments:
+ case TemplateDeductionResult::Inconsistent:
+ case TemplateDeductionResult::Underqualified:
+ case TemplateDeductionResult::NonDeducedMismatch:
+ case TemplateDeductionResult::CUDATargetMismatch:
+ case TemplateDeductionResult::NonDependentConversionFailure:
return nullptr;
- case Sema::TDK_DeducedMismatch:
- case Sema::TDK_DeducedMismatchNested:
+ case TemplateDeductionResult::DeducedMismatch:
+ case TemplateDeductionResult::DeducedMismatchNested:
return static_cast<DFIDeducedMismatchArgs*>(Data)->TemplateArgs;
- case Sema::TDK_SubstitutionFailure:
+ case TemplateDeductionResult::SubstitutionFailure:
return static_cast<TemplateArgumentList*>(Data);
- case Sema::TDK_ConstraintsNotSatisfied:
+ case TemplateDeductionResult::ConstraintsNotSatisfied:
return static_cast<CNSInfo*>(Data)->TemplateArgs;
// Unhandled
- case Sema::TDK_MiscellaneousDeductionFailure:
- case Sema::TDK_AlreadyDiagnosed:
+ case TemplateDeductionResult::MiscellaneousDeductionFailure:
+ case TemplateDeductionResult::AlreadyDiagnosed:
break;
}
@@ -834,31 +912,31 @@ TemplateArgumentList *DeductionFailureInfo::getTemplateArgumentList() {
}
const TemplateArgument *DeductionFailureInfo::getFirstArg() {
- switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
- case Sema::TDK_Success:
- case Sema::TDK_Invalid:
- case Sema::TDK_InstantiationDepth:
- case Sema::TDK_Incomplete:
- case Sema::TDK_TooManyArguments:
- case Sema::TDK_TooFewArguments:
- case Sema::TDK_InvalidExplicitArguments:
- case Sema::TDK_SubstitutionFailure:
- case Sema::TDK_CUDATargetMismatch:
- case Sema::TDK_NonDependentConversionFailure:
- case Sema::TDK_ConstraintsNotSatisfied:
+ switch (static_cast<TemplateDeductionResult>(Result)) {
+ case TemplateDeductionResult::Success:
+ case TemplateDeductionResult::Invalid:
+ case TemplateDeductionResult::InstantiationDepth:
+ case TemplateDeductionResult::Incomplete:
+ case TemplateDeductionResult::TooManyArguments:
+ case TemplateDeductionResult::TooFewArguments:
+ case TemplateDeductionResult::InvalidExplicitArguments:
+ case TemplateDeductionResult::SubstitutionFailure:
+ case TemplateDeductionResult::CUDATargetMismatch:
+ case TemplateDeductionResult::NonDependentConversionFailure:
+ case TemplateDeductionResult::ConstraintsNotSatisfied:
return nullptr;
- case Sema::TDK_IncompletePack:
- case Sema::TDK_Inconsistent:
- case Sema::TDK_Underqualified:
- case Sema::TDK_DeducedMismatch:
- case Sema::TDK_DeducedMismatchNested:
- case Sema::TDK_NonDeducedMismatch:
+ case TemplateDeductionResult::IncompletePack:
+ case TemplateDeductionResult::Inconsistent:
+ case TemplateDeductionResult::Underqualified:
+ case TemplateDeductionResult::DeducedMismatch:
+ case TemplateDeductionResult::DeducedMismatchNested:
+ case TemplateDeductionResult::NonDeducedMismatch:
return &static_cast<DFIArguments*>(Data)->FirstArg;
// Unhandled
- case Sema::TDK_MiscellaneousDeductionFailure:
- case Sema::TDK_AlreadyDiagnosed:
+ case TemplateDeductionResult::MiscellaneousDeductionFailure:
+ case TemplateDeductionResult::AlreadyDiagnosed:
break;
}
@@ -866,31 +944,31 @@ const TemplateArgument *DeductionFailureInfo::getFirstArg() {
}
const TemplateArgument *DeductionFailureInfo::getSecondArg() {
- switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
- case Sema::TDK_Success:
- case Sema::TDK_Invalid:
- case Sema::TDK_InstantiationDepth:
- case Sema::TDK_Incomplete:
- case Sema::TDK_IncompletePack:
- case Sema::TDK_TooManyArguments:
- case Sema::TDK_TooFewArguments:
- case Sema::TDK_InvalidExplicitArguments:
- case Sema::TDK_SubstitutionFailure:
- case Sema::TDK_CUDATargetMismatch:
- case Sema::TDK_NonDependentConversionFailure:
- case Sema::TDK_ConstraintsNotSatisfied:
+ switch (static_cast<TemplateDeductionResult>(Result)) {
+ case TemplateDeductionResult::Success:
+ case TemplateDeductionResult::Invalid:
+ case TemplateDeductionResult::InstantiationDepth:
+ case TemplateDeductionResult::Incomplete:
+ case TemplateDeductionResult::IncompletePack:
+ case TemplateDeductionResult::TooManyArguments:
+ case TemplateDeductionResult::TooFewArguments:
+ case TemplateDeductionResult::InvalidExplicitArguments:
+ case TemplateDeductionResult::SubstitutionFailure:
+ case TemplateDeductionResult::CUDATargetMismatch:
+ case TemplateDeductionResult::NonDependentConversionFailure:
+ case TemplateDeductionResult::ConstraintsNotSatisfied:
return nullptr;
- case Sema::TDK_Inconsistent:
- case Sema::TDK_Underqualified:
- case Sema::TDK_DeducedMismatch:
- case Sema::TDK_DeducedMismatchNested:
- case Sema::TDK_NonDeducedMismatch:
+ case TemplateDeductionResult::Inconsistent:
+ case TemplateDeductionResult::Underqualified:
+ case TemplateDeductionResult::DeducedMismatch:
+ case TemplateDeductionResult::DeducedMismatchNested:
+ case TemplateDeductionResult::NonDeducedMismatch:
return &static_cast<DFIArguments*>(Data)->SecondArg;
// Unhandled
- case Sema::TDK_MiscellaneousDeductionFailure:
- case Sema::TDK_AlreadyDiagnosed:
+ case TemplateDeductionResult::MiscellaneousDeductionFailure:
+ case TemplateDeductionResult::AlreadyDiagnosed:
break;
}
@@ -898,9 +976,9 @@ const TemplateArgument *DeductionFailureInfo::getSecondArg() {
}
std::optional<unsigned> DeductionFailureInfo::getCallArgIndex() {
- switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
- case Sema::TDK_DeducedMismatch:
- case Sema::TDK_DeducedMismatchNested:
+ switch (static_cast<TemplateDeductionResult>(Result)) {
+ case TemplateDeductionResult::DeducedMismatch:
+ case TemplateDeductionResult::DeducedMismatchNested:
return static_cast<DFIDeducedMismatchArgs*>(Data)->CallArgIndex;
default:
@@ -1033,7 +1111,7 @@ namespace {
assert(E->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
Entry entry = { &E, E };
Entries.push_back(entry);
- E = S.stripARCUnbridgedCast(E);
+ E = S.ObjC().stripARCUnbridgedCast(E);
}
void restore() {
@@ -1091,39 +1169,6 @@ static bool checkArgPlaceholdersForOverload(Sema &S, MultiExprArg Args,
return false;
}
-/// Determine whether the given New declaration is an overload of the
-/// declarations in Old. This routine returns Ovl_Match or Ovl_NonFunction if
-/// New and Old cannot be overloaded, e.g., if New has the same signature as
-/// some function in Old (C++ 1.3.10) or if the Old declarations aren't
-/// functions (or function templates) at all. When it does return Ovl_Match or
-/// Ovl_NonFunction, MatchedDecl will point to the decl that New cannot be
-/// overloaded with. This decl may be a UsingShadowDecl on top of the underlying
-/// declaration.
-///
-/// Example: Given the following input:
-///
-/// void f(int, float); // #1
-/// void f(int, int); // #2
-/// int f(int, int); // #3
-///
-/// When we process #1, there is no previous declaration of "f", so IsOverload
-/// will not be used.
-///
-/// When we process #2, Old contains only the FunctionDecl for #1. By comparing
-/// the parameter types, we see that #1 and #2 are overloaded (since they have
-/// different signatures), so this routine returns Ovl_Overload; MatchedDecl is
-/// unchanged.
-///
-/// When we process #3, Old is an overload set containing #1 and #2. We compare
-/// the signatures of #3 to #1 (they're overloaded, so we do nothing) and then
-/// #3 to #2. Since the signatures of #3 and #2 are identical (return types of
-/// functions are not part of the signature), IsOverload returns Ovl_Match and
-/// MatchedDecl will be set to point to the FunctionDecl for #2.
-///
-/// 'NewIsUsingShadowDecl' indicates that 'New' is being introduced into a class
-/// by a using declaration. The rules for whether to hide shadow declarations
-/// ignore some properties which otherwise figure into a function template's
-/// signature.
Sema::OverloadKind
Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
NamedDecl *&Match, bool NewIsUsingDecl) {
@@ -1250,6 +1295,8 @@ static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New,
if (New->isMSVCRTEntryPoint())
return false;
+ NamedDecl *OldDecl = Old;
+ NamedDecl *NewDecl = New;
FunctionTemplateDecl *OldTemplate = Old->getDescribedFunctionTemplate();
FunctionTemplateDecl *NewTemplate = New->getDescribedFunctionTemplate();
@@ -1294,6 +1341,8 @@ static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New,
// references to non-instantiated entities during constraint substitution.
// GH78101.
if (NewTemplate) {
+ OldDecl = OldTemplate;
+ NewDecl = NewTemplate;
// C++ [temp.over.link]p4:
// The signature of a function template consists of its function
// signature, its return type and its template parameter list. The names
@@ -1423,7 +1472,7 @@ static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New,
}
if (OldMethod && NewMethod && !OldMethod->isStatic() &&
- !OldMethod->isStatic()) {
+ !NewMethod->isStatic()) {
bool HaveCorrespondingObjectParameters = [&](const CXXMethodDecl *Old,
const CXXMethodDecl *New) {
auto NewObjectType = New->getFunctionObjectParameterReferenceType();
@@ -1453,13 +1502,14 @@ static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New,
}
}
- if (!UseOverrideRules) {
+ if (!UseOverrideRules &&
+ New->getTemplateSpecializationKind() != TSK_ExplicitSpecialization) {
Expr *NewRC = New->getTrailingRequiresClause(),
*OldRC = Old->getTrailingRequiresClause();
if ((NewRC != nullptr) != (OldRC != nullptr))
return true;
-
- if (NewRC && !SemaRef.AreConstraintExpressionsEqual(Old, OldRC, New, NewRC))
+ if (NewRC &&
+ !SemaRef.AreConstraintExpressionsEqual(OldDecl, OldRC, NewDecl, NewRC))
return true;
}
@@ -1497,10 +1547,10 @@ static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New,
// Don't allow overloading of destructors. (In theory we could, but it
// would be a giant change to clang.)
if (!isa<CXXDestructorDecl>(New)) {
- Sema::CUDAFunctionTarget NewTarget = SemaRef.IdentifyCUDATarget(New),
- OldTarget = SemaRef.IdentifyCUDATarget(Old);
- if (NewTarget != Sema::CFT_InvalidTarget) {
- assert((OldTarget != Sema::CFT_InvalidTarget) &&
+ CUDAFunctionTarget NewTarget = SemaRef.CUDA().IdentifyTarget(New),
+ OldTarget = SemaRef.CUDA().IdentifyTarget(Old);
+ if (NewTarget != CUDAFunctionTarget::InvalidTarget) {
+ assert((OldTarget != CUDAFunctionTarget::InvalidTarget) &&
"Unexpected invalid target.");
// Allow overloading of functions with same signature and different CUDA
@@ -1704,11 +1754,6 @@ Sema::TryImplicitConversion(Expr *From, QualType ToType,
/*AllowObjCConversionOnExplicit=*/false);
}
-/// PerformImplicitConversion - Perform an implicit conversion of the
-/// expression From to the type ToType. Returns the
-/// converted expression. Flavor is the kind of conversion we're
-/// performing, used in the error message. If @p AllowExplicit,
-/// explicit user-defined conversions are permitted.
ExprResult Sema::PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit) {
@@ -1720,8 +1765,8 @@ ExprResult Sema::PerformImplicitConversion(Expr *From, QualType ToType,
= getLangOpts().ObjCAutoRefCount &&
(Action == AA_Passing || Action == AA_Sending);
if (getLangOpts().ObjC)
- CheckObjCBridgeRelatedConversions(From->getBeginLoc(), ToType,
- From->getType(), From);
+ ObjC().CheckObjCBridgeRelatedConversions(From->getBeginLoc(), ToType,
+ From->getType(), From);
ImplicitConversionSequence ICS = ::TryImplicitConversion(
*this, From, ToType,
/*SuppressUserConversions=*/false,
@@ -1732,9 +1777,6 @@ ExprResult Sema::PerformImplicitConversion(Expr *From, QualType ToType,
return PerformImplicitConversion(From, ToType, ICS, Action);
}
-/// Determine whether the conversion from FromType to ToType is a valid
-/// conversion that strips "noexcept" or "noreturn" off the nested function
-/// type.
bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy) {
if (Context.hasSameUnqualifiedType(FromType, ToType))
@@ -1817,6 +1859,27 @@ bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
FromFn = QT->getAs<FunctionType>();
Changed = true;
}
+
+ // For C, when called from checkPointerTypesForAssignment,
+ // we need to not alter FromFn, or else even an innocuous cast
+ // like dropping effects will fail. In C++ however we do want to
+ // alter FromFn (because of the way PerformImplicitConversion works).
+ if (Context.hasAnyFunctionEffects() && getLangOpts().CPlusPlus) {
+ FromFPT = cast<FunctionProtoType>(FromFn); // in case FromFn changed above
+
+ // Transparently add/drop effects; here we are concerned with
+ // language rules/canonicalization. Adding/dropping effects is a warning.
+ const auto FromFX = FromFPT->getFunctionEffects();
+ const auto ToFX = ToFPT->getFunctionEffects();
+ if (FromFX != ToFX) {
+ FunctionProtoType::ExtProtoInfo ExtInfo = FromFPT->getExtProtoInfo();
+ ExtInfo.FunctionEffects = ToFX;
+ QualType QT = Context.getFunctionType(
+ FromFPT->getReturnType(), FromFPT->getParamTypes(), ExtInfo);
+ FromFn = QT->getAs<FunctionType>();
+ Changed = true;
+ }
+ }
}
if (!Changed)
@@ -1830,12 +1893,85 @@ bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
}
/// Determine whether the conversion from FromType to ToType is a valid
+/// floating point conversion.
+///
+static bool IsFloatingPointConversion(Sema &S, QualType FromType,
+ QualType ToType) {
+ if (!FromType->isRealFloatingType() || !ToType->isRealFloatingType())
+ return false;
+ // FIXME: disable conversions between long double, __ibm128 and __float128
+ // if their representation is different until there is back end support
+ // We of course allow this conversion if long double is really double.
+
+ // Conversions between bfloat16 and float16 are currently not supported.
+ if ((FromType->isBFloat16Type() &&
+ (ToType->isFloat16Type() || ToType->isHalfType())) ||
+ (ToType->isBFloat16Type() &&
+ (FromType->isFloat16Type() || FromType->isHalfType())))
+ return false;
+
+ // Conversions between IEEE-quad and IBM-extended semantics are not
+ // permitted.
+ const llvm::fltSemantics &FromSem = S.Context.getFloatTypeSemantics(FromType);
+ const llvm::fltSemantics &ToSem = S.Context.getFloatTypeSemantics(ToType);
+ if ((&FromSem == &llvm::APFloat::PPCDoubleDouble() &&
+ &ToSem == &llvm::APFloat::IEEEquad()) ||
+ (&FromSem == &llvm::APFloat::IEEEquad() &&
+ &ToSem == &llvm::APFloat::PPCDoubleDouble()))
+ return false;
+ return true;
+}
+
+static bool IsVectorElementConversion(Sema &S, QualType FromType,
+ QualType ToType,
+ ImplicitConversionKind &ICK, Expr *From) {
+ if (S.Context.hasSameUnqualifiedType(FromType, ToType))
+ return true;
+
+ if (S.IsFloatingPointPromotion(FromType, ToType)) {
+ ICK = ICK_Floating_Promotion;
+ return true;
+ }
+
+ if (IsFloatingPointConversion(S, FromType, ToType)) {
+ ICK = ICK_Floating_Conversion;
+ return true;
+ }
+
+ if (ToType->isBooleanType() && FromType->isArithmeticType()) {
+ ICK = ICK_Boolean_Conversion;
+ return true;
+ }
+
+ if ((FromType->isRealFloatingType() && ToType->isIntegralType(S.Context)) ||
+ (FromType->isIntegralOrUnscopedEnumerationType() &&
+ ToType->isRealFloatingType())) {
+ ICK = ICK_Floating_Integral;
+ return true;
+ }
+
+ if (S.IsIntegralPromotion(From, FromType, ToType)) {
+ ICK = ICK_Integral_Promotion;
+ return true;
+ }
+
+ if (FromType->isIntegralOrUnscopedEnumerationType() &&
+ ToType->isIntegralType(S.Context)) {
+ ICK = ICK_Integral_Conversion;
+ return true;
+ }
+
+ return false;
+}
+
+/// Determine whether the conversion from FromType to ToType is a valid
/// vector conversion.
///
/// \param ICK Will be set to the vector conversion kind, if this is a vector
/// conversion.
static bool IsVectorConversion(Sema &S, QualType FromType, QualType ToType,
- ImplicitConversionKind &ICK, Expr *From,
+ ImplicitConversionKind &ICK,
+ ImplicitConversionKind &ElConv, Expr *From,
bool InOverloadResolution, bool CStyle) {
// We need at least one of these types to be a vector type to have a vector
// conversion.
@@ -1847,14 +1983,37 @@ static bool IsVectorConversion(Sema &S, QualType FromType, QualType ToType,
return false;
// There are no conversions between extended vector types, only identity.
- if (ToType->isExtVectorType()) {
- // There are no conversions between extended vector types other than the
- // identity conversion.
- if (FromType->isExtVectorType())
+ if (auto *ToExtType = ToType->getAs<ExtVectorType>()) {
+ if (auto *FromExtType = FromType->getAs<ExtVectorType>()) {
+ // HLSL allows implicit truncation of vector types.
+ if (S.getLangOpts().HLSL) {
+ unsigned FromElts = FromExtType->getNumElements();
+ unsigned ToElts = ToExtType->getNumElements();
+ if (FromElts < ToElts)
+ return false;
+ if (FromElts == ToElts)
+ ElConv = ICK_Identity;
+ else
+ ElConv = ICK_HLSL_Vector_Truncation;
+
+ QualType FromElTy = FromExtType->getElementType();
+ QualType ToElTy = ToExtType->getElementType();
+ if (S.Context.hasSameUnqualifiedType(FromElTy, ToElTy))
+ return true;
+ return IsVectorElementConversion(S, FromElTy, ToElTy, ICK, From);
+ }
+ // There are no conversions between extended vector types other than the
+ // identity conversion.
return false;
+ }
// Vector splat from any arithmetic type to a vector.
if (FromType->isArithmeticType()) {
+ if (S.getLangOpts().HLSL) {
+ ElConv = ICK_HLSL_Vector_Splat;
+ QualType ToElTy = ToExtType->getElementType();
+ return IsVectorElementConversion(S, FromType, ToElTy, ICK, From);
+ }
ICK = ICK_Vector_Splat;
return true;
}
@@ -1991,8 +2150,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// A glvalue (3.10) of a non-function, non-array type T can
// be converted to a prvalue.
bool argIsLValue = From->isGLValue();
- if (argIsLValue &&
- !FromType->isFunctionType() && !FromType->isArrayType() &&
+ if (argIsLValue && !FromType->canDecayToPointerType() &&
S.Context.getCanonicalType(FromType) != S.Context.OverloadTy) {
SCS.First = ICK_Lvalue_To_Rvalue;
@@ -2007,6 +2165,19 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// is T (C++ 4.1p1). C++ can't get here with class types; in C, we
// just strip the qualifiers because they don't matter.
FromType = FromType.getUnqualifiedType();
+ } else if (S.getLangOpts().HLSL && FromType->isConstantArrayType() &&
+ ToType->isArrayParameterType()) {
+ // HLSL constant array parameters do not decay, so if the argument is a
+ // constant array and the parameter is an ArrayParameterType we have special
+ // handling here.
+ FromType = S.Context.getArrayParameterType(FromType);
+ if (S.Context.getCanonicalType(FromType) !=
+ S.Context.getCanonicalType(ToType))
+ return false;
+
+ SCS.First = ICK_HLSL_Array_RValue;
+ SCS.setAllToTypes(ToType);
+ return true;
} else if (FromType->isArrayType()) {
// Array-to-pointer conversion (C++ 4.2)
SCS.First = ICK_Array_To_Pointer;
@@ -2057,6 +2228,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// conversion.
bool IncompatibleObjC = false;
ImplicitConversionKind SecondICK = ICK_Identity;
+ ImplicitConversionKind DimensionICK = ICK_Identity;
if (S.Context.hasSameUnqualifiedType(FromType, ToType)) {
// The unqualified versions of the types are the same: there's no
// conversion to do.
@@ -2095,29 +2267,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// Complex-real conversions (C99 6.3.1.7)
SCS.Second = ICK_Complex_Real;
FromType = ToType.getUnqualifiedType();
- } else if (FromType->isRealFloatingType() && ToType->isRealFloatingType()) {
- // FIXME: disable conversions between long double, __ibm128 and __float128
- // if their representation is different until there is back end support
- // We of course allow this conversion if long double is really double.
-
- // Conversions between bfloat16 and float16 are currently not supported.
- if ((FromType->isBFloat16Type() &&
- (ToType->isFloat16Type() || ToType->isHalfType())) ||
- (ToType->isBFloat16Type() &&
- (FromType->isFloat16Type() || FromType->isHalfType())))
- return false;
-
- // Conversions between IEEE-quad and IBM-extended semantics are not
- // permitted.
- const llvm::fltSemantics &FromSem =
- S.Context.getFloatTypeSemantics(FromType);
- const llvm::fltSemantics &ToSem = S.Context.getFloatTypeSemantics(ToType);
- if ((&FromSem == &llvm::APFloat::PPCDoubleDouble() &&
- &ToSem == &llvm::APFloat::IEEEquad()) ||
- (&FromSem == &llvm::APFloat::IEEEquad() &&
- &ToSem == &llvm::APFloat::PPCDoubleDouble()))
- return false;
-
+ } else if (IsFloatingPointConversion(S, FromType, ToType)) {
// Floating point conversions (C++ 4.8).
SCS.Second = ICK_Floating_Conversion;
FromType = ToType.getUnqualifiedType();
@@ -2132,7 +2282,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
} else if (S.IsBlockPointerConversion(FromType, ToType, FromType)) {
SCS.Second = ICK_Block_Pointer_Conversion;
} else if (AllowObjCWritebackConversion &&
- S.isObjCWritebackConversion(FromType, ToType, FromType)) {
+ S.ObjC().isObjCWritebackConversion(FromType, ToType, FromType)) {
SCS.Second = ICK_Writeback_Conversion;
} else if (S.IsPointerConversion(From, FromType, ToType, InOverloadResolution,
FromType, IncompatibleObjC)) {
@@ -2144,18 +2294,18 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
InOverloadResolution, FromType)) {
// Pointer to member conversions (4.11).
SCS.Second = ICK_Pointer_Member;
- } else if (IsVectorConversion(S, FromType, ToType, SecondICK, From,
- InOverloadResolution, CStyle)) {
+ } else if (IsVectorConversion(S, FromType, ToType, SecondICK, DimensionICK,
+ From, InOverloadResolution, CStyle)) {
SCS.Second = SecondICK;
+ SCS.Dimension = DimensionICK;
FromType = ToType.getUnqualifiedType();
} else if (!S.getLangOpts().CPlusPlus &&
S.Context.typesAreCompatible(ToType, FromType)) {
// Compatible conversions (Clang extension for C function overloading)
SCS.Second = ICK_Compatible_Conversion;
FromType = ToType.getUnqualifiedType();
- } else if (IsTransparentUnionStandardConversion(S, From, ToType,
- InOverloadResolution,
- SCS, CStyle)) {
+ } else if (IsTransparentUnionStandardConversion(
+ S, From, ToType, InOverloadResolution, SCS, CStyle)) {
SCS.Second = ICK_TransparentUnionConversion;
FromType = ToType;
} else if (tryAtomicConversion(S, From, ToType, InOverloadResolution, SCS,
@@ -2177,7 +2327,10 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
From->isIntegerConstantExpr(S.getASTContext())) {
SCS.Second = ICK_Compatible_Conversion;
FromType = ToType;
- } else if (ToType->isFixedPointType() || FromType->isFixedPointType()) {
+ } else if ((ToType->isFixedPointType() &&
+ FromType->isConvertibleToFixedPointType()) ||
+ (FromType->isFixedPointType() &&
+ ToType->isConvertibleToFixedPointType())) {
SCS.Second = ICK_Fixed_Point_Conversion;
FromType = ToType;
} else {
@@ -2287,10 +2440,6 @@ IsTransparentUnionStandardConversion(Sema &S, Expr* From,
return false;
}
-/// IsIntegralPromotion - Determines whether the conversion from the
-/// expression From (whose potentially-adjusted type is FromType) to
-/// ToType is an integral promotion (C++ 4.5). If so, returns true and
-/// sets PromotedType to the promoted type.
bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
const BuiltinType *To = ToType->getAs<BuiltinType>();
// All integers are built-in.
@@ -2447,12 +2596,15 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
return true;
}
+ // In HLSL an rvalue of integral type can be promoted to an rvalue of a larger
+ // integral type.
+ if (Context.getLangOpts().HLSL && FromType->isIntegerType() &&
+ ToType->isIntegerType())
+ return Context.getTypeSize(FromType) < Context.getTypeSize(ToType);
+
return false;
}
-/// IsFloatingPointPromotion - Determines whether the conversion from
-/// FromType to ToType is a floating point promotion (C++ 4.6). If so,
-/// returns true and sets PromotedType to the promoted type.
bool Sema::IsFloatingPointPromotion(QualType FromType, QualType ToType) {
if (const BuiltinType *FromBuiltin = FromType->getAs<BuiltinType>())
if (const BuiltinType *ToBuiltin = ToType->getAs<BuiltinType>()) {
@@ -2473,6 +2625,13 @@ bool Sema::IsFloatingPointPromotion(QualType FromType, QualType ToType) {
ToBuiltin->getKind() == BuiltinType::Ibm128))
return true;
+ // In HLSL, `half` promotes to `float` or `double`, regardless of whether
+ // or not native half types are enabled.
+ if (getLangOpts().HLSL && FromBuiltin->getKind() == BuiltinType::Half &&
+ (ToBuiltin->getKind() == BuiltinType::Float ||
+ ToBuiltin->getKind() == BuiltinType::Double))
+ return true;
+
// Half can be promoted to float.
if (!getLangOpts().NativeHalfType &&
FromBuiltin->getKind() == BuiltinType::Half &&
@@ -2483,11 +2642,6 @@ bool Sema::IsFloatingPointPromotion(QualType FromType, QualType ToType) {
return false;
}
-/// Determine if a conversion is a complex promotion.
-///
-/// A complex promotion is defined as a complex -> complex conversion
-/// where the conversion between the underlying real types is a
-/// floating-point or integral promotion.
bool Sema::IsComplexPromotion(QualType FromType, QualType ToType) {
const ComplexType *FromComplex = FromType->getAs<ComplexType>();
if (!FromComplex)
@@ -2566,22 +2720,6 @@ static bool isNullPointerConstantForConversion(Expr *Expr,
: Expr::NPC_ValueDependentIsNull);
}
-/// IsPointerConversion - Determines whether the conversion of the
-/// expression From, which has the (possibly adjusted) type FromType,
-/// can be converted to the type ToType via a pointer conversion (C++
-/// 4.10). If so, returns true and places the converted type (that
-/// might differ from ToType in its cv-qualifiers at some level) into
-/// ConvertedType.
-///
-/// This routine also supports conversions to and from block pointers
-/// and conversions with Objective-C's 'id', 'id<protocols...>', and
-/// pointers to interfaces. FIXME: Once we've determined the
-/// appropriate overloading rules for Objective-C, we may want to
-/// split the Objective-C checks into a different routine; however,
-/// GCC seems to consider all of these conversions to be pointer
-/// conversions, so for now they live here. IncompatibleObjC will be
-/// set if the conversion is an allowed Objective-C conversion that
-/// should result in a warning.
bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType,
@@ -2730,9 +2868,6 @@ static QualType AdoptQualifiers(ASTContext &Context, QualType T, Qualifiers Qs){
return Context.getQualifiedType(T.getUnqualifiedType(), Qs);
}
-/// isObjCPointerConversion - Determines whether this is an
-/// Objective-C pointer conversion. Subroutine of IsPointerConversion,
-/// with the same arguments and return values.
bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType,
bool &IncompatibleObjC) {
@@ -2904,73 +3039,6 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
return false;
}
-/// Determine whether this is an Objective-C writeback conversion,
-/// used for parameter passing when performing automatic reference counting.
-///
-/// \param FromType The type we're converting form.
-///
-/// \param ToType The type we're converting to.
-///
-/// \param ConvertedType The type that will be produced after applying
-/// this conversion.
-bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
- QualType &ConvertedType) {
- if (!getLangOpts().ObjCAutoRefCount ||
- Context.hasSameUnqualifiedType(FromType, ToType))
- return false;
-
- // Parameter must be a pointer to __autoreleasing (with no other qualifiers).
- QualType ToPointee;
- if (const PointerType *ToPointer = ToType->getAs<PointerType>())
- ToPointee = ToPointer->getPointeeType();
- else
- return false;
-
- Qualifiers ToQuals = ToPointee.getQualifiers();
- if (!ToPointee->isObjCLifetimeType() ||
- ToQuals.getObjCLifetime() != Qualifiers::OCL_Autoreleasing ||
- !ToQuals.withoutObjCLifetime().empty())
- return false;
-
- // Argument must be a pointer to __strong to __weak.
- QualType FromPointee;
- if (const PointerType *FromPointer = FromType->getAs<PointerType>())
- FromPointee = FromPointer->getPointeeType();
- else
- return false;
-
- Qualifiers FromQuals = FromPointee.getQualifiers();
- if (!FromPointee->isObjCLifetimeType() ||
- (FromQuals.getObjCLifetime() != Qualifiers::OCL_Strong &&
- FromQuals.getObjCLifetime() != Qualifiers::OCL_Weak))
- return false;
-
- // Make sure that we have compatible qualifiers.
- FromQuals.setObjCLifetime(Qualifiers::OCL_Autoreleasing);
- if (!ToQuals.compatiblyIncludes(FromQuals))
- return false;
-
- // Remove qualifiers from the pointee type we're converting from; they
- // aren't used in the compatibility check belong, and we'll be adding back
- // qualifiers (with __autoreleasing) if the compatibility check succeeds.
- FromPointee = FromPointee.getUnqualifiedType();
-
- // The unqualified form of the pointee types must be compatible.
- ToPointee = ToPointee.getUnqualifiedType();
- bool IncompatibleObjC;
- if (Context.typesAreCompatible(FromPointee, ToPointee))
- FromPointee = ToPointee;
- else if (!isObjCPointerConversion(FromPointee, ToPointee, FromPointee,
- IncompatibleObjC))
- return false;
-
- /// Construct the type we're converting to, which is a pointer to
- /// __autoreleasing pointee.
- FromPointee = Context.getQualifiedType(FromPointee, FromQuals);
- ConvertedType = Context.getPointerType(FromPointee);
- return true;
-}
-
bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType) {
QualType ToPointeeType;
@@ -3086,9 +3154,6 @@ static const FunctionProtoType *tryGetFunctionProtoType(QualType FromType) {
return nullptr;
}
-/// HandleFunctionTypeMismatch - Gives diagnostic information for differeing
-/// function types. Catches different number of parameter, mismatch in
-/// parameter types, and different return types.
void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType) {
// If either type is not valid, include no extra info.
@@ -3184,13 +3249,6 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
PDiag << ft_default;
}
-/// FunctionParamTypesAreEqual - This routine checks two function proto types
-/// for equality of their parameter types. Caller has already checked that
-/// they have same number of parameters. If the parameters are different,
-/// ArgPos will have the parameter index of the first different parameter.
-/// If `Reversed` is true, the parameters of `NewType` will be compared in
-/// reverse order. That's useful if one of the functions is being used as a C++20
-/// synthesized operator overload with a reversed parameter order.
bool Sema::FunctionParamTypesAreEqual(ArrayRef<QualType> Old,
ArrayRef<QualType> New, unsigned *ArgPos,
bool Reversed) {
@@ -3247,12 +3305,6 @@ bool Sema::FunctionNonObjectParamTypesAreEqual(const FunctionDecl *OldFunction,
ArgPos, Reversed);
}
-/// CheckPointerConversion - Check the pointer conversion from the
-/// expression From to the type ToType. This routine checks for
-/// ambiguous or inaccessible derived-to-base pointer
-/// conversions for which IsPointerConversion has already returned
-/// true. It returns true and produces a diagnostic if there was an
-/// error, or returns false otherwise.
bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
@@ -3334,11 +3386,6 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
return false;
}
-/// IsMemberPointerConversion - Determines whether the conversion of the
-/// expression From, which has the (possibly adjusted) type FromType, can be
-/// converted to the type ToType via a member pointer conversion (C++ 4.11).
-/// If so, returns true and places the converted type (that might differ from
-/// ToType in its cv-qualifiers at some level) into ConvertedType.
bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
QualType ToType,
bool InOverloadResolution,
@@ -3375,12 +3422,6 @@ bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
return false;
}
-/// CheckMemberPointerConversion - Check the member pointer conversion from the
-/// expression From to the type ToType. This routine checks for ambiguous or
-/// virtual or inaccessible base-to-derived member pointer conversions
-/// for which IsMemberPointerConversion has already returned true. It returns
-/// true and produces a diagnostic if there was an error, or returns false
-/// otherwise.
bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
@@ -3533,13 +3574,6 @@ static bool isQualificationConversionStep(QualType FromType, QualType ToType,
return true;
}
-/// IsQualificationConversion - Determines whether the conversion from
-/// an rvalue of type FromType to ToType is a qualification conversion
-/// (C++ 4.4).
-///
-/// \param ObjCLifetimeConversion Output parameter that will be set to indicate
-/// when the qualification conversion involves a change in the Objective-C
-/// object lifetime.
bool
Sema::IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion) {
@@ -4490,7 +4524,6 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
? ImplicitConversionSequence::Better
: ImplicitConversionSequence::Worse;
}
-
return ImplicitConversionSequence::Indistinguishable;
}
@@ -4780,12 +4813,6 @@ static QualType withoutUnaligned(ASTContext &Ctx, QualType T) {
return Ctx.getQualifiedType(T, Q);
}
-/// CompareReferenceRelationship - Compare the two types T1 and T2 to
-/// determine whether they are reference-compatible,
-/// reference-related, or incompatible, for use in C++ initialization by
-/// reference (C++ [dcl.ref.init]p4). Neither type can be a reference
-/// type, and the first type (T1) is the pointee type of the reference
-/// type being initialized.
Sema::ReferenceCompareResult
Sema::CompareReferenceRelationship(SourceLocation Loc,
QualType OrigT1, QualType OrigT2,
@@ -5040,6 +5067,7 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
: (RefConv & Sema::ReferenceConversions::ObjC)
? ICK_Compatible_Conversion
: ICK_Identity;
+ ICS.Standard.Dimension = ICK_Identity;
// FIXME: As a speculative fix to a defect introduced by CWG2352, we rank
// a reference binding that performs a non-top-level qualification
// conversion as a qualification conversion, not as an identity conversion.
@@ -5899,8 +5927,6 @@ TryContextuallyConvertToBool(Sema &S, Expr *From) {
/*AllowObjCConversionOnExplicit=*/false);
}
-/// PerformContextuallyConvertToBool - Perform a contextual conversion
-/// of the expression From to bool (C++0x [conv]p3).
ExprResult Sema::PerformContextuallyConvertToBool(Expr *From) {
if (checkPlaceholderForOverload(*this, From))
return ExprError();
@@ -5957,6 +5983,7 @@ static bool CheckConvertedConstantConversions(Sema &S,
case ICK_Vector_Conversion:
case ICK_SVE_Vector_Conversion:
case ICK_RVV_Vector_Conversion:
+ case ICK_HLSL_Vector_Splat:
case ICK_Vector_Splat:
case ICK_Complex_Real:
case ICK_Block_Pointer_Conversion:
@@ -5966,11 +5993,13 @@ static bool CheckConvertedConstantConversions(Sema &S,
case ICK_C_Only_Conversion:
case ICK_Incompatible_Pointer_Conversion:
case ICK_Fixed_Point_Conversion:
+ case ICK_HLSL_Vector_Truncation:
return false;
case ICK_Lvalue_To_Rvalue:
case ICK_Array_To_Pointer:
case ICK_Function_To_Pointer:
+ case ICK_HLSL_Array_RValue:
llvm_unreachable("found a first conversion kind in Second");
case ICK_Function_Conversion:
@@ -6172,9 +6201,6 @@ ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
return R;
}
-/// EvaluateConvertedConstantExpression - Evaluate an Expression
-/// That is a converted constant expression
-/// (which was built with BuildConvertedConstantExpression)
ExprResult
Sema::EvaluateConvertedConstantExpression(Expr *E, QualType T, APValue &Value,
Sema::CCEKind CCE, bool RequireInt,
@@ -6210,6 +6236,7 @@ Sema::EvaluateConvertedConstantExpression(Expr *E, QualType T, APValue &Value,
// by this point.
assert(CE->getResultStorageKind() != ConstantResultStorageKind::None &&
"ConstantExpr has no value associated with it");
+ (void)CE;
} else {
E = ConstantExpr::Create(Context, Result.get(), Value);
}
@@ -6243,6 +6270,7 @@ Sema::EvaluateConvertedConstantExpression(Expr *E, QualType T, APValue &Value,
static void dropPointerConversion(StandardConversionSequence &SCS) {
if (SCS.Second == ICK_Pointer_Conversion) {
SCS.Second = ICK_Identity;
+ SCS.Dimension = ICK_Identity;
SCS.Third = ICK_Identity;
SCS.ToTypePtrs[2] = SCS.ToTypePtrs[1] = SCS.ToTypePtrs[0];
}
@@ -6284,9 +6312,6 @@ TryContextuallyConvertToObjCPointer(Sema &S, Expr *From) {
return ICS;
}
-/// PerformContextuallyConvertToObjCPointer - Perform a contextual
-/// conversion of the expression From to an Objective-C pointer type.
-/// Returns a valid but null ExprResult if no conversion sequence exists.
ExprResult Sema::PerformContextuallyConvertToObjCPointer(Expr *From) {
if (checkPlaceholderForOverload(*this, From))
return ExprError();
@@ -6344,17 +6369,20 @@ ExprResult Sema::InitializeExplicitObjectArgument(Sema &S, Expr *Obj,
Obj->getExprLoc(), Obj);
}
-static void PrepareExplicitObjectArgument(Sema &S, CXXMethodDecl *Method,
+static bool PrepareExplicitObjectArgument(Sema &S, CXXMethodDecl *Method,
Expr *Object, MultiExprArg &Args,
SmallVectorImpl<Expr *> &NewArgs) {
assert(Method->isExplicitObjectMemberFunction() &&
"Method is not an explicit member function");
assert(NewArgs.empty() && "NewArgs should be empty");
+
NewArgs.reserve(Args.size() + 1);
Expr *This = GetExplicitObjectExpr(S, Object, Method);
NewArgs.push_back(This);
NewArgs.append(Args.begin(), Args.end());
Args = NewArgs;
+ return S.DiagnoseInvalidExplicitObjectParameterInLambda(
+ Method, Object->getBeginLoc());
}
/// Determine whether the provided type is an integral type, or an enumeration
@@ -6415,11 +6443,14 @@ diagnoseNoViableConversion(Sema &SemaRef, SourceLocation Loc, Expr *&From,
HadMultipleCandidates);
if (Result.isInvalid())
return true;
- // Record usage of conversion in an implicit cast.
- From = ImplicitCastExpr::Create(SemaRef.Context, Result.get()->getType(),
- CK_UserDefinedConversion, Result.get(),
- nullptr, Result.get()->getValueKind(),
- SemaRef.CurFPFeatureOverrides());
+
+ // Replace the conversion with a RecoveryExpr, so we don't try to
+ // instantiate it later, but can further diagnose here.
+ Result = SemaRef.CreateRecoveryExpr(From->getBeginLoc(), From->getEndLoc(),
+ From, Result.get()->getType());
+ if (Result.isInvalid())
+ return true;
+ From = Result.get();
}
return false;
}
@@ -6735,14 +6766,32 @@ static bool IsAcceptableNonMemberOperatorCandidate(ASTContext &Context,
return false;
}
-/// AddOverloadCandidate - Adds the given function to the set of
-/// candidate functions, using the given function call arguments. If
-/// @p SuppressUserConversions, then don't allow user-defined
-/// conversions via constructors or conversion operators.
-///
-/// \param PartialOverloading true if we are performing "partial" overloading
-/// based on an incomplete set of function arguments. This feature is used by
-/// code completion.
+static bool isNonViableMultiVersionOverload(FunctionDecl *FD) {
+ if (FD->isTargetMultiVersionDefault())
+ return false;
+
+ if (!FD->getASTContext().getTargetInfo().getTriple().isAArch64())
+ return FD->isTargetMultiVersion();
+
+ if (!FD->isMultiVersion())
+ return false;
+
+ // Among multiple target versions consider either the default,
+ // or the first non-default in the absence of default version.
+ unsigned SeenAt = 0;
+ unsigned I = 0;
+ bool HasDefault = false;
+ FD->getASTContext().forEachMultiversionedFunctionVersion(
+ FD, [&](const FunctionDecl *CurFD) {
+ if (FD == CurFD)
+ SeenAt = I;
+ else if (CurFD->isTargetMultiVersionDefault())
+ HasDefault = true;
+ ++I;
+ });
+ return HasDefault || SeenAt != 0;
+}
+
void Sema::AddOverloadCandidate(
FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions,
@@ -6808,9 +6857,7 @@ void Sema::AddOverloadCandidate(
Candidate.Viable = true;
Candidate.RewriteKind =
CandidateSet.getRewriteInfo().getRewriteKind(Function, PO);
- Candidate.IsSurrogate = false;
Candidate.IsADLCandidate = IsADLCandidate;
- Candidate.IgnoreObjectArgument = false;
Candidate.ExplicitCallArguments = Args.size();
// Explicit functions are not actually candidates at all if we're not
@@ -6840,11 +6887,7 @@ void Sema::AddOverloadCandidate(
}
}
- if (Function->isMultiVersion() &&
- ((Function->hasAttr<TargetAttr>() &&
- !Function->getAttr<TargetAttr>()->isDefaultVersion()) ||
- (Function->hasAttr<TargetVersionAttr>() &&
- !Function->getAttr<TargetVersionAttr>()->isDefaultVersion()))) {
+ if (isNonViableMultiVersionOverload(Function)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
return;
@@ -6931,7 +6974,7 @@ void Sema::AddOverloadCandidate(
// inferred for the member automatically, based on the bases and fields of
// the class.
if (!(Caller && Caller->isImplicit()) &&
- !IsAllowedCUDACall(Caller, Function)) {
+ !CUDA().IsAllowedCall(Caller, Function)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_target;
return;
@@ -7022,7 +7065,7 @@ Sema::SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance,
// a consumed argument.
if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) &&
!param->hasAttr<CFConsumedAttr>())
- argExpr = stripARCUnbridgedCast(argExpr);
+ argExpr = ObjC().stripARCUnbridgedCast(argExpr);
// If the parameter is __unknown_anytype, move on to the next method.
if (param->getType() == Context.UnknownAnyTy) {
@@ -7245,8 +7288,6 @@ bool Sema::diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
});
}
-/// Add all of the function declarations in the given function set to
-/// the overload candidate set.
void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
@@ -7315,8 +7356,6 @@ void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
}
}
-/// AddMethodCandidate - Adds a named decl (which is some kind of
-/// method) as a method candidate to the given overload set.
void Sema::AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
@@ -7343,13 +7382,6 @@ void Sema::AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType,
}
}
-/// AddMethodCandidate - Adds the given C++ member function to the set
-/// of candidate functions, using the given function call arguments
-/// and the object argument (@c Object). For example, in a call
-/// @c o.f(a1,a2), @c Object will contain @c o and @c Args will contain
-/// both @c a1 and @c a2. If @p SuppressUserConversions, then don't
-/// allow user-defined conversions via constructors or conversion
-/// operators.
void
Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
@@ -7387,12 +7419,24 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
Candidate.Function = Method;
Candidate.RewriteKind =
CandidateSet.getRewriteInfo().getRewriteKind(Method, PO);
- Candidate.IsSurrogate = false;
- Candidate.IgnoreObjectArgument = false;
+ Candidate.TookAddressOfOverload =
+ CandidateSet.getKind() == OverloadCandidateSet::CSK_AddressOfOverloadSet;
Candidate.ExplicitCallArguments = Args.size();
- unsigned NumParams = Method->getNumExplicitParams();
- unsigned ExplicitOffset = Method->isExplicitObjectMemberFunction() ? 1 : 0;
+ bool IgnoreExplicitObject =
+ (Method->isExplicitObjectMemberFunction() &&
+ CandidateSet.getKind() ==
+ OverloadCandidateSet::CSK_AddressOfOverloadSet);
+ bool ImplicitObjectMethodTreatedAsStatic =
+ CandidateSet.getKind() ==
+ OverloadCandidateSet::CSK_AddressOfOverloadSet &&
+ Method->isImplicitObjectMemberFunction();
+
+ unsigned ExplicitOffset =
+ !IgnoreExplicitObject && Method->isExplicitObjectMemberFunction() ? 1 : 0;
+
+ unsigned NumParams = Method->getNumParams() - ExplicitOffset +
+ int(ImplicitObjectMethodTreatedAsStatic);
// (C++ 13.3.2p2): A candidate function having fewer than m
// parameters is viable only if it has an ellipsis in its parameter
@@ -7410,7 +7454,10 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
// (8.3.6). For the purposes of overload resolution, the
// parameter list is truncated on the right, so that there are
// exactly m parameters.
- unsigned MinRequiredArgs = Method->getMinRequiredExplicitArguments();
+ unsigned MinRequiredArgs = Method->getMinRequiredArguments() -
+ ExplicitOffset +
+ int(ImplicitObjectMethodTreatedAsStatic);
+
if (Args.size() < MinRequiredArgs && !PartialOverloading) {
// Not enough arguments.
Candidate.Viable = false;
@@ -7449,7 +7496,8 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
// (CUDA B.1): Check for invalid calls between targets.
if (getLangOpts().CUDA)
- if (!IsAllowedCUDACall(getCurFunctionDecl(/*AllowLambda=*/true), Method)) {
+ if (!CUDA().IsAllowedCall(getCurFunctionDecl(/*AllowLambda=*/true),
+ Method)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_target;
return;
@@ -7479,7 +7527,14 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
// exist for each argument an implicit conversion sequence
// (13.3.3.1) that converts that argument to the corresponding
// parameter of F.
- QualType ParamType = Proto->getParamType(ArgIdx + ExplicitOffset);
+ QualType ParamType;
+ if (ImplicitObjectMethodTreatedAsStatic) {
+ ParamType = ArgIdx == 0
+ ? Method->getFunctionObjectParameterReferenceType()
+ : Proto->getParamType(ArgIdx - 1);
+ } else {
+ ParamType = Proto->getParamType(ArgIdx + ExplicitOffset);
+ }
Candidate.Conversions[ConvIdx]
= TryCopyInitialization(*this, Args[ArgIdx], ParamType,
SuppressUserConversions,
@@ -7507,19 +7562,12 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
return;
}
- if (Method->isMultiVersion() &&
- ((Method->hasAttr<TargetAttr>() &&
- !Method->getAttr<TargetAttr>()->isDefaultVersion()) ||
- (Method->hasAttr<TargetVersionAttr>() &&
- !Method->getAttr<TargetVersionAttr>()->isDefaultVersion()))) {
+ if (isNonViableMultiVersionOverload(Method)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
}
}
-/// Add a C++ member function template as a candidate to the candidate
-/// set, using template argument deduction to produce an appropriate member
-/// function template specialization.
void Sema::AddMethodTemplateCandidate(
FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
@@ -7545,12 +7593,14 @@ void Sema::AddMethodTemplateCandidate(
if (TemplateDeductionResult Result = DeduceTemplateArguments(
MethodTmpl, ExplicitTemplateArgs, Args, Specialization, Info,
PartialOverloading, /*AggregateDeductionCandidate=*/false, ObjectType,
- ObjectClassification, [&](ArrayRef<QualType> ParamTypes) {
+ ObjectClassification,
+ [&](ArrayRef<QualType> ParamTypes) {
return CheckNonDependentConversions(
MethodTmpl, ParamTypes, Args, CandidateSet, Conversions,
SuppressUserConversions, ActingContext, ObjectType,
ObjectClassification, PO);
- })) {
+ });
+ Result != TemplateDeductionResult::Success) {
OverloadCandidate &Candidate =
CandidateSet.addCandidate(Conversions.size(), Conversions);
Candidate.FoundDecl = FoundDecl;
@@ -7563,7 +7613,7 @@ void Sema::AddMethodTemplateCandidate(
cast<CXXMethodDecl>(Candidate.Function)->isStatic() ||
ObjectType.isNull();
Candidate.ExplicitCallArguments = Args.size();
- if (Result == TDK_NonDependentConversionFailure)
+ if (Result == TemplateDeductionResult::NonDependentConversionFailure)
Candidate.FailureKind = ovl_fail_bad_conversion;
else {
Candidate.FailureKind = ovl_fail_bad_deduction;
@@ -7590,9 +7640,6 @@ static bool isNonDependentlyExplicit(FunctionTemplateDecl *FTD) {
return ExplicitSpecifier::getFromDecl(FTD->getTemplatedDecl()).isExplicit();
}
-/// Add a C++ function template specialization as a candidate
-/// in the candidate set, using template argument deduction to produce
-/// an appropriate function template specialization.
void Sema::AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
@@ -7623,7 +7670,8 @@ void Sema::AddTemplateOverloadCandidate(
// functions. In such a case, the candidate functions generated from each
// function template are combined with the set of non-template candidate
// functions.
- TemplateDeductionInfo Info(CandidateSet.getLocation());
+ TemplateDeductionInfo Info(CandidateSet.getLocation(),
+ FunctionTemplate->getTemplateDepth());
FunctionDecl *Specialization = nullptr;
ConversionSequenceList Conversions;
if (TemplateDeductionResult Result = DeduceTemplateArguments(
@@ -7635,7 +7683,8 @@ void Sema::AddTemplateOverloadCandidate(
return CheckNonDependentConversions(
FunctionTemplate, ParamTypes, Args, CandidateSet, Conversions,
SuppressUserConversions, nullptr, QualType(), {}, PO);
- })) {
+ });
+ Result != TemplateDeductionResult::Success) {
OverloadCandidate &Candidate =
CandidateSet.addCandidate(Conversions.size(), Conversions);
Candidate.FoundDecl = FoundDecl;
@@ -7651,7 +7700,7 @@ void Sema::AddTemplateOverloadCandidate(
isa<CXXMethodDecl>(Candidate.Function) &&
!isa<CXXConstructorDecl>(Candidate.Function);
Candidate.ExplicitCallArguments = Args.size();
- if (Result == TDK_NonDependentConversionFailure)
+ if (Result == TemplateDeductionResult::NonDependentConversionFailure)
Candidate.FailureKind = ovl_fail_bad_conversion;
else {
Candidate.FailureKind = ovl_fail_bad_deduction;
@@ -7671,9 +7720,6 @@ void Sema::AddTemplateOverloadCandidate(
Info.AggregateDeductionCandidateHasMismatchedArity);
}
-/// Check that implicit conversion sequences can be formed for each argument
-/// whose corresponding parameter has a non-dependent type, per DR1391's
-/// [temp.deduct.call]p10.
bool Sema::CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
@@ -7718,8 +7764,8 @@ bool Sema::CheckNonDependentConversions(
unsigned Offset =
Method && Method->hasCXXExplicitFunctionObjectParameter() ? 1 : 0;
- for (unsigned I = 0, N = std::min(ParamTypes.size(), Args.size()); I != N;
- ++I) {
+ for (unsigned I = 0, N = std::min(ParamTypes.size() - Offset, Args.size());
+ I != N; ++I) {
QualType ParamType = ParamTypes[I + Offset];
if (!ParamType->isDependentType()) {
unsigned ConvIdx;
@@ -7789,12 +7835,6 @@ static bool isAllowableExplicitConversion(Sema &S,
IncompatibleObjC);
}
-/// AddConversionCandidate - Add a C++ conversion function as a
-/// candidate in the candidate set (C++ [over.match.conv],
-/// C++ [over.match.copy]). From is the expression we're converting from,
-/// and ToType is the type that we're eventually trying to convert to
-/// (which may or may not be the same type as the type that the
-/// conversion function produces).
void Sema::AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
@@ -7839,8 +7879,6 @@ void Sema::AddConversionCandidate(
OverloadCandidate &Candidate = CandidateSet.addCandidate(1);
Candidate.FoundDecl = FoundDecl;
Candidate.Function = Conversion;
- Candidate.IsSurrogate = false;
- Candidate.IgnoreObjectArgument = false;
Candidate.FinalConversion.setAsIdentityConversion();
Candidate.FinalConversion.setFromType(ConvType);
Candidate.FinalConversion.setAllToTypes(ToType);
@@ -7993,21 +8031,12 @@ void Sema::AddConversionCandidate(
return;
}
- if (Conversion->isMultiVersion() &&
- ((Conversion->hasAttr<TargetAttr>() &&
- !Conversion->getAttr<TargetAttr>()->isDefaultVersion()) ||
- (Conversion->hasAttr<TargetVersionAttr>() &&
- !Conversion->getAttr<TargetVersionAttr>()->isDefaultVersion()))) {
+ if (isNonViableMultiVersionOverload(Conversion)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
}
}
-/// Adds a conversion function template specialization
-/// candidate to the overload set, using template argument deduction
-/// to deduce the template arguments of the conversion function
-/// template from the type that we are converting to (C++
-/// [temp.deduct.conv]).
void Sema::AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingDC, Expr *From, QualType ToType,
@@ -8038,14 +8067,13 @@ void Sema::AddTemplateConversionCandidate(
CXXConversionDecl *Specialization = nullptr;
if (TemplateDeductionResult Result = DeduceTemplateArguments(
FunctionTemplate, ObjectType, ObjectClassification, ToType,
- Specialization, Info)) {
+ Specialization, Info);
+ Result != TemplateDeductionResult::Success) {
OverloadCandidate &Candidate = CandidateSet.addCandidate();
Candidate.FoundDecl = FoundDecl;
Candidate.Function = FunctionTemplate->getTemplatedDecl();
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_deduction;
- Candidate.IsSurrogate = false;
- Candidate.IgnoreObjectArgument = false;
Candidate.ExplicitCallArguments = 1;
Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
Info);
@@ -8060,11 +8088,6 @@ void Sema::AddTemplateConversionCandidate(
AllowExplicit, AllowResultConversion);
}
-/// AddSurrogateCandidate - Adds a "surrogate" candidate function that
-/// converts the given @c Object to a function pointer via the
-/// conversion function @c Conversion, and then attempts to call it
-/// with the given arguments (C++ [over.call.object]p2-4). Proto is
-/// the type of function that we'll eventually be calling.
void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
@@ -8083,9 +8106,8 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
Candidate.FoundDecl = FoundDecl;
Candidate.Function = nullptr;
Candidate.Surrogate = Conversion;
- Candidate.Viable = true;
Candidate.IsSurrogate = true;
- Candidate.IgnoreObjectArgument = false;
+ Candidate.Viable = true;
Candidate.ExplicitCallArguments = Args.size();
// Determine the implicit conversion sequence for the implicit
@@ -8191,8 +8213,6 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
}
}
-/// Add all of the non-member operator function declarations in the given
-/// function set to the overload candidate set.
void Sema::AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
@@ -8233,14 +8253,6 @@ void Sema::AddNonMemberOperatorCandidates(
}
}
-/// Add overload candidates for overloaded operators that are
-/// member functions.
-///
-/// Add the overloaded operator candidates that are member functions
-/// for the operator Op that was used in an operator expression such
-/// as "x Op y". , Args/NumArgs provides the operator arguments, and
-/// CandidateSet will store the added overload candidates. (C++
-/// [over.match.oper]).
void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc,
ArrayRef<Expr *> Args,
@@ -8289,14 +8301,6 @@ void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
}
}
-/// AddBuiltinCandidate - Add a candidate for a built-in
-/// operator. ResultTy and ParamTys are the result and parameter types
-/// of the built-in candidate, respectively. Args and NumArgs are the
-/// arguments being passed to the candidate. IsAssignmentOperator
-/// should be true when this built-in candidate is an assignment
-/// operator. NumContextualBoolArguments is the number of arguments
-/// (at the beginning of the argument list) that will be contextually
-/// converted to bool.
void Sema::AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator,
@@ -8309,8 +8313,6 @@ void Sema::AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size());
Candidate.FoundDecl = DeclAccessPair::make(nullptr, AS_none);
Candidate.Function = nullptr;
- Candidate.IsSurrogate = false;
- Candidate.IgnoreObjectArgument = false;
std::copy(ParamTys, ParamTys + Args.size(), Candidate.BuiltinParamTypes);
// Determine the implicit conversion sequences for each of the
@@ -8381,6 +8383,9 @@ class BuiltinCandidateTypeSet {
/// candidates.
TypeSet MatrixTypes;
+ /// The set of _BitInt types that will be used in the built-in candidates.
+ TypeSet BitIntTypes;
+
/// A flag indicating non-record types are viable candidates
bool HasNonRecordTypes;
@@ -8429,6 +8434,7 @@ public:
}
llvm::iterator_range<iterator> vector_types() { return VectorTypes; }
llvm::iterator_range<iterator> matrix_types() { return MatrixTypes; }
+ llvm::iterator_range<iterator> bitint_types() { return BitIntTypes; }
bool containsMatrixType(QualType Ty) const { return MatrixTypes.count(Ty); }
bool hasNonRecordTypes() { return HasNonRecordTypes; }
@@ -8600,6 +8606,9 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
} else if (Ty->isEnumeralType()) {
HasArithmeticOrEnumeralTypes = true;
EnumerationTypes.insert(Ty);
+ } else if (Ty->isBitIntType()) {
+ HasArithmeticOrEnumeralTypes = true;
+ BitIntTypes.insert(Ty);
} else if (Ty->isVectorType()) {
// We treat vector types as arithmetic types in many contexts as an
// extension.
@@ -8778,7 +8787,7 @@ class BuiltinOperatorOverloadBuilder {
SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes;
OverloadCandidateSet &CandidateSet;
- static constexpr int ArithmeticTypesCap = 24;
+ static constexpr int ArithmeticTypesCap = 26;
SmallVector<CanQualType, ArithmeticTypesCap> ArithmeticTypes;
// Define some indices used to iterate over the arithmetic types in
@@ -8820,6 +8829,20 @@ class BuiltinOperatorOverloadBuilder {
(S.Context.getAuxTargetInfo() &&
S.Context.getAuxTargetInfo()->hasInt128Type()))
ArithmeticTypes.push_back(S.Context.UnsignedInt128Ty);
+
+ /// We add candidates for the unique, unqualified _BitInt types present in
+ /// the candidate type set. The candidate set already handled ensuring the
+ /// type is unqualified and canonical, but because we're adding from N
+ /// different sets, we need to do some extra work to unique things. Insert
+ /// the candidates into a unique set, then move from that set into the list
+ /// of arithmetic types.
+ llvm::SmallSetVector<CanQualType, 2> BitIntCandidates;
+ llvm::for_each(CandidateTypes, [&BitIntCandidates](
+ BuiltinCandidateTypeSet &Candidate) {
+ for (QualType BitTy : Candidate.bitint_types())
+ BitIntCandidates.insert(CanQualType::CreateUnsafe(BitTy));
+ });
+ llvm::move(BitIntCandidates, std::back_inserter(ArithmeticTypes));
LastPromotedIntegralType = ArithmeticTypes.size();
LastPromotedArithmeticType = ArithmeticTypes.size();
// End of promoted types.
@@ -8840,7 +8863,11 @@ class BuiltinOperatorOverloadBuilder {
// End of integral types.
// FIXME: What about complex? What about half?
- assert(ArithmeticTypes.size() <= ArithmeticTypesCap &&
+ // We don't know for sure how many bit-precise candidates were involved, so
+ // we subtract those from the total when testing whether we're under the
+ // cap or not.
+ assert(ArithmeticTypes.size() - BitIntCandidates.size() <=
+ ArithmeticTypesCap &&
"Enough inline storage for all arithmetic types.");
}
@@ -9729,11 +9756,6 @@ public:
} // end anonymous namespace
-/// AddBuiltinOperatorCandidates - Add the appropriate built-in
-/// operator overloads to the candidate set (C++ [over.built]), based
-/// on the operator @p Op and the arguments given. For example, if the
-/// operator is a binary '+', this routine might add "int
-/// operator+(int, int)" to cover integer addition.
void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc,
ArrayRef<Expr *> Args,
@@ -9931,13 +9953,6 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
}
}
-/// Add function candidates found via argument-dependent lookup
-/// to the set of overloading candidates.
-///
-/// This routine performs argument-dependent name lookup based on the
-/// given function name (which may also be an operator name) and adds
-/// all of the overload candidates found by ADL to the overload
-/// candidate set (C++ [basic.lookup.argdep]).
void
Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
@@ -10187,7 +10202,7 @@ static bool sameFunctionParameterTypeLists(Sema &S,
FunctionDecl *Fn1 = Cand1.Function;
FunctionDecl *Fn2 = Cand2.Function;
- if (Fn1->isVariadic() != Fn1->isVariadic())
+ if (Fn1->isVariadic() != Fn2->isVariadic())
return false;
if (!S.FunctionNonObjectParamTypesAreEqual(
@@ -10249,7 +10264,7 @@ bool clang::isBetterOverloadCandidate(
// If other rules cannot determine which is better, CUDA preference will be
// used again to determine which is better.
//
- // TODO: Currently IdentifyCUDAPreference does not return correct values
+ // TODO: Currently IdentifyPreference does not return correct values
// for functions called in global variable initializers due to missing
// correct context about device/host. Therefore we can only enforce this
// rule when there is a caller. We should enforce this rule for functions
@@ -10261,14 +10276,14 @@ bool clang::isBetterOverloadCandidate(
if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function &&
S.getLangOpts().GPUExcludeWrongSideOverloads) {
if (FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true)) {
- bool IsCallerImplicitHD = Sema::isCUDAImplicitHostDeviceFunction(Caller);
+ bool IsCallerImplicitHD = SemaCUDA::isImplicitHostDeviceFunction(Caller);
bool IsCand1ImplicitHD =
- Sema::isCUDAImplicitHostDeviceFunction(Cand1.Function);
+ SemaCUDA::isImplicitHostDeviceFunction(Cand1.Function);
bool IsCand2ImplicitHD =
- Sema::isCUDAImplicitHostDeviceFunction(Cand2.Function);
- auto P1 = S.IdentifyCUDAPreference(Caller, Cand1.Function);
- auto P2 = S.IdentifyCUDAPreference(Caller, Cand2.Function);
- assert(P1 != Sema::CFP_Never && P2 != Sema::CFP_Never);
+ SemaCUDA::isImplicitHostDeviceFunction(Cand2.Function);
+ auto P1 = S.CUDA().IdentifyPreference(Caller, Cand1.Function);
+ auto P2 = S.CUDA().IdentifyPreference(Caller, Cand2.Function);
+ assert(P1 != SemaCUDA::CFP_Never && P2 != SemaCUDA::CFP_Never);
// The implicit HD function may be a function in a system header which
// is forced by pragma. In device compilation, if we prefer HD candidates
// over wrong-sided candidates, overloading resolution may change, which
@@ -10282,8 +10297,8 @@ bool clang::isBetterOverloadCandidate(
auto EmitThreshold =
(S.getLangOpts().CUDAIsDevice && IsCallerImplicitHD &&
(IsCand1ImplicitHD || IsCand2ImplicitHD))
- ? Sema::CFP_Never
- : Sema::CFP_WrongSide;
+ ? SemaCUDA::CFP_Never
+ : SemaCUDA::CFP_WrongSide;
auto Cand1Emittable = P1 > EmitThreshold;
auto Cand2Emittable = P2 > EmitThreshold;
if (Cand1Emittable && !Cand2Emittable)
@@ -10436,42 +10451,32 @@ bool clang::isBetterOverloadCandidate(
// according to the partial ordering rules described in 14.5.5.2, or,
// if not that,
if (Cand1IsSpecialization && Cand2IsSpecialization) {
+ const auto *Obj1Context =
+ dyn_cast<CXXRecordDecl>(Cand1.FoundDecl->getDeclContext());
+ const auto *Obj2Context =
+ dyn_cast<CXXRecordDecl>(Cand2.FoundDecl->getDeclContext());
if (FunctionTemplateDecl *BetterTemplate = S.getMoreSpecializedTemplate(
Cand1.Function->getPrimaryTemplate(),
Cand2.Function->getPrimaryTemplate(), Loc,
isa<CXXConversionDecl>(Cand1.Function) ? TPOC_Conversion
: TPOC_Call,
- Cand1.ExplicitCallArguments, Cand2.ExplicitCallArguments,
- Cand1.isReversed() ^ Cand2.isReversed()))
+ Cand1.ExplicitCallArguments,
+ Obj1Context ? QualType(Obj1Context->getTypeForDecl(), 0)
+ : QualType{},
+ Obj2Context ? QualType(Obj2Context->getTypeForDecl(), 0)
+ : QualType{},
+ Cand1.isReversed() ^ Cand2.isReversed())) {
return BetterTemplate == Cand1.Function->getPrimaryTemplate();
+ }
}
// -— F1 and F2 are non-template functions with the same
// parameter-type-lists, and F1 is more constrained than F2 [...],
if (!Cand1IsSpecialization && !Cand2IsSpecialization &&
- sameFunctionParameterTypeLists(S, Cand1, Cand2)) {
- FunctionDecl *Function1 = Cand1.Function;
- FunctionDecl *Function2 = Cand2.Function;
- if (FunctionDecl *MF = Function1->getInstantiatedFromMemberFunction())
- Function1 = MF;
- if (FunctionDecl *MF = Function2->getInstantiatedFromMemberFunction())
- Function2 = MF;
-
- const Expr *RC1 = Function1->getTrailingRequiresClause();
- const Expr *RC2 = Function2->getTrailingRequiresClause();
- if (RC1 && RC2) {
- bool AtLeastAsConstrained1, AtLeastAsConstrained2;
- if (S.IsAtLeastAsConstrained(Function1, RC1, Function2, RC2,
- AtLeastAsConstrained1) ||
- S.IsAtLeastAsConstrained(Function2, RC2, Function1, RC1,
- AtLeastAsConstrained2))
- return false;
- if (AtLeastAsConstrained1 != AtLeastAsConstrained2)
- return AtLeastAsConstrained1;
- } else if (RC1 || RC2) {
- return RC1 != nullptr;
- }
- }
+ sameFunctionParameterTypeLists(S, Cand1, Cand2) &&
+ S.getMoreConstrainedFunction(Cand1.Function, Cand2.Function) ==
+ Cand1.Function)
+ return true;
// -- F1 is a constructor for a class D, F2 is a constructor for a base
// class B of D, and for all arguments the corresponding parameters of
@@ -10558,8 +10563,8 @@ bool clang::isBetterOverloadCandidate(
// to determine which is better.
if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function) {
FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true);
- return S.IdentifyCUDAPreference(Caller, Cand1.Function) >
- S.IdentifyCUDAPreference(Caller, Cand2.Function);
+ return S.CUDA().IdentifyPreference(Caller, Cand1.Function) >
+ S.CUDA().IdentifyPreference(Caller, Cand2.Function);
}
// General member function overloading is handled above, so this only handles
@@ -10651,7 +10656,8 @@ void Sema::diagnoseEquivalentInternalLinkageDeclarations(
bool OverloadCandidate::NotValidBecauseConstraintExprHasError() const {
return FailureKind == ovl_fail_bad_deduction &&
- DeductionFailure.Result == Sema::TDK_ConstraintsNotSatisfied &&
+ static_cast<TemplateDeductionResult>(DeductionFailure.Result) ==
+ TemplateDeductionResult::ConstraintsNotSatisfied &&
static_cast<CNSInfo *>(DeductionFailure.Data)
->Satisfaction.ContainsErrors;
}
@@ -10690,15 +10696,15 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
llvm::any_of(Candidates, [&](OverloadCandidate *Cand) {
// Check viable function only.
return Cand->Viable && Cand->Function &&
- S.IdentifyCUDAPreference(Caller, Cand->Function) ==
- Sema::CFP_SameSide;
+ S.CUDA().IdentifyPreference(Caller, Cand->Function) ==
+ SemaCUDA::CFP_SameSide;
});
if (ContainsSameSideCandidate) {
auto IsWrongSideCandidate = [&](OverloadCandidate *Cand) {
// Check viable function only to avoid unnecessary data copying/moving.
return Cand->Viable && Cand->Function &&
- S.IdentifyCUDAPreference(Caller, Cand->Function) ==
- Sema::CFP_WrongSide;
+ S.CUDA().IdentifyPreference(Caller, Cand->Function) ==
+ SemaCUDA::CFP_WrongSide;
};
llvm::erase_if(Candidates, IsWrongSideCandidate);
}
@@ -10759,6 +10765,12 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
if (Best->Function && Best->Function->isDeleted())
return OR_Deleted;
+ if (auto *M = dyn_cast_or_null<CXXMethodDecl>(Best->Function);
+ Kind == CSK_AddressOfOverloadSet && M &&
+ M->isImplicitObjectMemberFunction()) {
+ return OR_No_Viable_Function;
+ }
+
if (!EquivalentCands.empty())
S.diagnoseEquivalentInternalLinkageDeclarations(Loc, Best->Function,
EquivalentCands);
@@ -11131,8 +11143,16 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
Expr *FromExpr = Conv.Bad.FromExpr;
QualType FromTy = Conv.Bad.getFromType();
QualType ToTy = Conv.Bad.getToType();
- SourceRange ToParamRange =
- !isObjectArgument ? Fn->getParamDecl(I)->getSourceRange() : SourceRange();
+ SourceRange ToParamRange;
+
+ // FIXME: In presence of parameter packs we can't determine parameter range
+ // reliably, as we don't have access to instantiation.
+ bool HasParamPack =
+ llvm::any_of(Fn->parameters().take_front(I), [](const ParmVarDecl *Parm) {
+ return Parm->isParameterPack();
+ });
+ if (!isObjectArgument && !HasParamPack)
+ ToParamRange = Fn->getParamDecl(I)->getSourceRange();
if (FromTy == S.Context.OverloadTy) {
assert(FromExpr && "overload set argument came from implicit argument?");
@@ -11338,9 +11358,10 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
/// candidates. This is not covered by the more general DiagnoseArityMismatch()
/// over a candidate in any candidate set.
static bool CheckArityMismatch(Sema &S, OverloadCandidate *Cand,
- unsigned NumArgs) {
+ unsigned NumArgs, bool IsAddressOf = false) {
FunctionDecl *Fn = Cand->Function;
- unsigned MinParams = Fn->getMinRequiredArguments();
+ unsigned MinParams = Fn->getMinRequiredExplicitArguments() +
+ ((IsAddressOf && !Fn->isStatic()) ? 1 : 0);
// With invalid overloaded operators, it's possible that we think we
// have an arity mismatch when in fact it looks like we have the
@@ -11354,11 +11375,13 @@ static bool CheckArityMismatch(Sema &S, OverloadCandidate *Cand,
if (NumArgs < MinParams) {
assert((Cand->FailureKind == ovl_fail_too_few_arguments) ||
(Cand->FailureKind == ovl_fail_bad_deduction &&
- Cand->DeductionFailure.Result == Sema::TDK_TooFewArguments));
+ Cand->DeductionFailure.getResult() ==
+ TemplateDeductionResult::TooFewArguments));
} else {
assert((Cand->FailureKind == ovl_fail_too_many_arguments) ||
(Cand->FailureKind == ovl_fail_bad_deduction &&
- Cand->DeductionFailure.Result == Sema::TDK_TooManyArguments));
+ Cand->DeductionFailure.getResult() ==
+ TemplateDeductionResult::TooManyArguments));
}
return false;
@@ -11366,7 +11389,8 @@ static bool CheckArityMismatch(Sema &S, OverloadCandidate *Cand,
/// General arity mismatch diagnosis over a candidate in a candidate set.
static void DiagnoseArityMismatch(Sema &S, NamedDecl *Found, Decl *D,
- unsigned NumFormalArgs) {
+ unsigned NumFormalArgs,
+ bool IsAddressOf = false) {
assert(isa<FunctionDecl>(D) &&
"The templated declaration should at least be a function"
" when diagnosing bad template argument deduction due to too many"
@@ -11376,12 +11400,17 @@ static void DiagnoseArityMismatch(Sema &S, NamedDecl *Found, Decl *D,
// TODO: treat calls to a missing default constructor as a special case
const auto *FnTy = Fn->getType()->castAs<FunctionProtoType>();
- unsigned MinParams = Fn->getMinRequiredExplicitArguments();
+ unsigned MinParams = Fn->getMinRequiredExplicitArguments() +
+ ((IsAddressOf && !Fn->isStatic()) ? 1 : 0);
// at least / at most / exactly
- bool HasExplicitObjectParam = Fn->hasCXXExplicitFunctionObjectParameter();
- unsigned ParamCount = FnTy->getNumParams() - (HasExplicitObjectParam ? 1 : 0);
+ bool HasExplicitObjectParam =
+ !IsAddressOf && Fn->hasCXXExplicitFunctionObjectParameter();
+
+ unsigned ParamCount =
+ Fn->getNumNonObjectParams() + ((IsAddressOf && !Fn->isStatic()) ? 1 : 0);
unsigned mode, modeCount;
+
if (NumFormalArgs < MinParams) {
if (MinParams != ParamCount || FnTy->isVariadic() ||
FnTy->isTemplateVariadic())
@@ -11401,7 +11430,7 @@ static void DiagnoseArityMismatch(Sema &S, NamedDecl *Found, Decl *D,
std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
ClassifyOverloadCandidate(S, Found, Fn, CRK_None, Description);
- if (modeCount == 1 &&
+ if (modeCount == 1 && !IsAddressOf &&
Fn->getParamDecl(HasExplicitObjectParam ? 1 : 0)->getDeclName())
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity_one)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
@@ -11420,8 +11449,9 @@ static void DiagnoseArityMismatch(Sema &S, NamedDecl *Found, Decl *D,
/// Arity mismatch diagnosis specific to a function overload candidate.
static void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand,
unsigned NumFormalArgs) {
- if (!CheckArityMismatch(S, Cand, NumFormalArgs))
- DiagnoseArityMismatch(S, Cand->FoundDecl, Cand->Function, NumFormalArgs);
+ if (!CheckArityMismatch(S, Cand, NumFormalArgs, Cand->TookAddressOfOverload))
+ DiagnoseArityMismatch(S, Cand->FoundDecl, Cand->Function, NumFormalArgs,
+ Cand->TookAddressOfOverload);
}
static TemplateDecl *getDescribedTemplate(Decl *Templated) {
@@ -11441,11 +11471,18 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
(ParamD = Param.dyn_cast<TemplateTypeParmDecl*>()) ||
(ParamD = Param.dyn_cast<NonTypeTemplateParmDecl*>()) ||
(ParamD = Param.dyn_cast<TemplateTemplateParmDecl*>());
- switch (DeductionFailure.Result) {
- case Sema::TDK_Success:
- llvm_unreachable("TDK_success while diagnosing bad deduction");
+ switch (DeductionFailure.getResult()) {
+ case TemplateDeductionResult::Success:
+ llvm_unreachable(
+ "TemplateDeductionResult::Success while diagnosing bad deduction");
+ case TemplateDeductionResult::NonDependentConversionFailure:
+ llvm_unreachable("TemplateDeductionResult::NonDependentConversionFailure "
+ "while diagnosing bad deduction");
+ case TemplateDeductionResult::Invalid:
+ case TemplateDeductionResult::AlreadyDiagnosed:
+ return;
- case Sema::TDK_Incomplete: {
+ case TemplateDeductionResult::Incomplete: {
assert(ParamD && "no parameter found for incomplete deduction result");
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_incomplete_deduction)
@@ -11454,7 +11491,7 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
return;
}
- case Sema::TDK_IncompletePack: {
+ case TemplateDeductionResult::IncompletePack: {
assert(ParamD && "no parameter found for incomplete deduction result");
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_incomplete_deduction_pack)
@@ -11465,7 +11502,7 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
return;
}
- case Sema::TDK_Underqualified: {
+ case TemplateDeductionResult::Underqualified: {
assert(ParamD && "no parameter found for bad qualifiers deduction result");
TemplateTypeParmDecl *TParam = cast<TemplateTypeParmDecl>(ParamD);
@@ -11490,7 +11527,7 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
return;
}
- case Sema::TDK_Inconsistent: {
+ case TemplateDeductionResult::Inconsistent: {
assert(ParamD && "no parameter found for inconsistent deduction result");
int which = 0;
if (isa<TemplateTypeParmDecl>(ParamD))
@@ -11535,7 +11572,7 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
return;
}
- case Sema::TDK_InvalidExplicitArguments:
+ case TemplateDeductionResult::InvalidExplicitArguments:
assert(ParamD && "no parameter found for invalid explicit arguments");
if (ParamD->getDeclName())
S.Diag(Templated->getLocation(),
@@ -11557,7 +11594,7 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
MaybeEmitInheritedConstructorNote(S, Found);
return;
- case Sema::TDK_ConstraintsNotSatisfied: {
+ case TemplateDeductionResult::ConstraintsNotSatisfied: {
// Format the template argument list into the argument string.
SmallString<128> TemplateArgString;
TemplateArgumentList *Args = DeductionFailure.getTemplateArgumentList();
@@ -11574,18 +11611,18 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
static_cast<CNSInfo*>(DeductionFailure.Data)->Satisfaction);
return;
}
- case Sema::TDK_TooManyArguments:
- case Sema::TDK_TooFewArguments:
+ case TemplateDeductionResult::TooManyArguments:
+ case TemplateDeductionResult::TooFewArguments:
DiagnoseArityMismatch(S, Found, Templated, NumArgs);
return;
- case Sema::TDK_InstantiationDepth:
+ case TemplateDeductionResult::InstantiationDepth:
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_instantiation_depth);
MaybeEmitInheritedConstructorNote(S, Found);
return;
- case Sema::TDK_SubstitutionFailure: {
+ case TemplateDeductionResult::SubstitutionFailure: {
// Format the template argument list into the argument string.
SmallString<128> TemplateArgString;
if (TemplateArgumentList *Args =
@@ -11635,8 +11672,8 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
return;
}
- case Sema::TDK_DeducedMismatch:
- case Sema::TDK_DeducedMismatchNested: {
+ case TemplateDeductionResult::DeducedMismatch:
+ case TemplateDeductionResult::DeducedMismatchNested: {
// Format the template argument list into the argument string.
SmallString<128> TemplateArgString;
if (TemplateArgumentList *Args =
@@ -11652,11 +11689,12 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
<< (*DeductionFailure.getCallArgIndex() + 1)
<< *DeductionFailure.getFirstArg() << *DeductionFailure.getSecondArg()
<< TemplateArgString
- << (DeductionFailure.Result == Sema::TDK_DeducedMismatchNested);
+ << (DeductionFailure.getResult() ==
+ TemplateDeductionResult::DeducedMismatchNested);
break;
}
- case Sema::TDK_NonDeducedMismatch: {
+ case TemplateDeductionResult::NonDeducedMismatch: {
// FIXME: Provide a source location to indicate what we couldn't match.
TemplateArgument FirstTA = *DeductionFailure.getFirstArg();
TemplateArgument SecondTA = *DeductionFailure.getSecondArg();
@@ -11697,11 +11735,11 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
}
// TODO: diagnose these individually, then kill off
// note_ovl_candidate_bad_deduction, which is uselessly vague.
- case Sema::TDK_MiscellaneousDeductionFailure:
+ case TemplateDeductionResult::MiscellaneousDeductionFailure:
S.Diag(Templated->getLocation(), diag::note_ovl_candidate_bad_deduction);
MaybeEmitInheritedConstructorNote(S, Found);
return;
- case Sema::TDK_CUDATargetMismatch:
+ case TemplateDeductionResult::CUDATargetMismatch:
S.Diag(Templated->getLocation(),
diag::note_cuda_ovl_candidate_target_mismatch);
return;
@@ -11712,8 +11750,9 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
static void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
unsigned NumArgs,
bool TakingCandidateAddress) {
- unsigned TDK = Cand->DeductionFailure.Result;
- if (TDK == Sema::TDK_TooFewArguments || TDK == Sema::TDK_TooManyArguments) {
+ TemplateDeductionResult TDK = Cand->DeductionFailure.getResult();
+ if (TDK == TemplateDeductionResult::TooFewArguments ||
+ TDK == TemplateDeductionResult::TooManyArguments) {
if (CheckArityMismatch(S, Cand, NumArgs))
return;
}
@@ -11726,8 +11765,8 @@ static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true);
FunctionDecl *Callee = Cand->Function;
- Sema::CUDAFunctionTarget CallerTarget = S.IdentifyCUDATarget(Caller),
- CalleeTarget = S.IdentifyCUDATarget(Callee);
+ CUDAFunctionTarget CallerTarget = S.CUDA().IdentifyTarget(Caller),
+ CalleeTarget = S.CUDA().IdentifyTarget(Callee);
std::string FnDesc;
std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
@@ -11737,32 +11776,32 @@ static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
S.Diag(Callee->getLocation(), diag::note_ovl_candidate_bad_target)
<< (unsigned)FnKindPair.first << (unsigned)ocs_non_template
<< FnDesc /* Ignored */
- << CalleeTarget << CallerTarget;
+ << llvm::to_underlying(CalleeTarget) << llvm::to_underlying(CallerTarget);
// This could be an implicit constructor for which we could not infer the
// target due to a collsion. Diagnose that case.
CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Callee);
if (Meth != nullptr && Meth->isImplicit()) {
CXXRecordDecl *ParentClass = Meth->getParent();
- Sema::CXXSpecialMember CSM;
+ CXXSpecialMemberKind CSM;
switch (FnKindPair.first) {
default:
return;
case oc_implicit_default_constructor:
- CSM = Sema::CXXDefaultConstructor;
+ CSM = CXXSpecialMemberKind::DefaultConstructor;
break;
case oc_implicit_copy_constructor:
- CSM = Sema::CXXCopyConstructor;
+ CSM = CXXSpecialMemberKind::CopyConstructor;
break;
case oc_implicit_move_constructor:
- CSM = Sema::CXXMoveConstructor;
+ CSM = CXXSpecialMemberKind::MoveConstructor;
break;
case oc_implicit_copy_assignment:
- CSM = Sema::CXXCopyAssignment;
+ CSM = CXXSpecialMemberKind::CopyAssignment;
break;
case oc_implicit_move_assignment:
- CSM = Sema::CXXMoveAssignment;
+ CSM = CXXSpecialMemberKind::MoveAssignment;
break;
};
@@ -11774,9 +11813,9 @@ static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
}
}
- S.inferCUDATargetForImplicitSpecialMember(ParentClass, CSM, Meth,
- /* ConstRHS */ ConstRHS,
- /* Diagnose */ true);
+ S.CUDA().inferTargetForImplicitSpecialMember(ParentClass, CSM, Meth,
+ /* ConstRHS */ ConstRHS,
+ /* Diagnose */ true);
}
}
@@ -11822,6 +11861,46 @@ static void DiagnoseFailedExplicitSpec(Sema &S, OverloadCandidate *Cand) {
<< (ES.getExpr() ? ES.getExpr()->getSourceRange() : SourceRange());
}
+static void NoteImplicitDeductionGuide(Sema &S, FunctionDecl *Fn) {
+ auto *DG = dyn_cast<CXXDeductionGuideDecl>(Fn);
+ if (!DG)
+ return;
+ TemplateDecl *OriginTemplate =
+ DG->getDeclName().getCXXDeductionGuideTemplate();
+ // We want to always print synthesized deduction guides for type aliases.
+ // They would retain the explicit bit of the corresponding constructor.
+ if (!(DG->isImplicit() || (OriginTemplate && OriginTemplate->isTypeAlias())))
+ return;
+ std::string FunctionProto;
+ llvm::raw_string_ostream OS(FunctionProto);
+ FunctionTemplateDecl *Template = DG->getDescribedFunctionTemplate();
+ if (!Template) {
+ // This also could be an instantiation. Find out the primary template.
+ FunctionDecl *Pattern =
+ DG->getTemplateInstantiationPattern(/*ForDefinition=*/false);
+ if (!Pattern) {
+ // The implicit deduction guide is built on an explicit non-template
+ // deduction guide. Currently, this might be the case only for type
+ // aliases.
+ // FIXME: Add a test once https://github.com/llvm/llvm-project/pull/96686
+ // gets merged.
+ assert(OriginTemplate->isTypeAlias() &&
+ "Non-template implicit deduction guides are only possible for "
+ "type aliases");
+ DG->print(OS);
+ S.Diag(DG->getLocation(), diag::note_implicit_deduction_guide)
+ << FunctionProto;
+ return;
+ }
+ Template = Pattern->getDescribedFunctionTemplate();
+ assert(Template && "Cannot find the associated function template of "
+ "CXXDeductionGuideDecl?");
+ }
+ Template->print(OS);
+ S.Diag(DG->getLocation(), diag::note_implicit_deduction_guide)
+ << FunctionProto;
+}
+
/// Generates a 'note' diagnostic for an overload candidate. We've
/// already generated a primary error at the call site.
///
@@ -11852,6 +11931,13 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
Cand->FailureKind != ovl_fail_bad_conversion)
return;
+ // Skip implicit member functions when trying to resolve
+ // the address of a an overload set for a function pointer.
+ if (Cand->TookAddressOfOverload &&
+ !Cand->Function->hasCXXExplicitFunctionObjectParameter() &&
+ !Cand->Function->isStatic())
+ return;
+
// Note deleted candidates, but only if they're viable.
if (Cand->Viable) {
if (Fn->isDeleted()) {
@@ -11872,6 +11958,17 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
return;
}
+ // If this is a synthesized deduction guide we're deducing against, add a note
+ // for it. These deduction guides are not explicitly spelled in the source
+ // code, so simply printing a deduction failure note mentioning synthesized
+ // template parameters or pointing to the header of the surrounding RecordDecl
+ // would be confusing.
+ //
+ // We prefer adding such notes at the end of the deduction failure because
+ // duplicate code snippets appearing in the diagnostic would likely become
+ // noisy.
+ auto _ = llvm::make_scope_exit([&] { NoteImplicitDeductionGuide(S, Fn); });
+
switch (Cand->FailureKind) {
case ovl_fail_too_many_arguments:
case ovl_fail_too_few_arguments:
@@ -11906,7 +12003,7 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
case ovl_fail_bad_conversion: {
unsigned I = (Cand->IgnoreObjectArgument ? 1 : 0);
for (unsigned N = Cand->Conversions.size(); I != N; ++I)
- if (Cand->Conversions[I].isBad())
+ if (Cand->Conversions[I].isInitialized() && Cand->Conversions[I].isBad())
return DiagnoseBadConversion(S, Cand, I, TakingCandidateAddress);
// FIXME: this currently happens when we're called from SemaInit
@@ -12047,38 +12144,38 @@ static SourceLocation GetLocationForCandidate(const OverloadCandidate *Cand) {
}
static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) {
- switch ((Sema::TemplateDeductionResult)DFI.Result) {
- case Sema::TDK_Success:
- case Sema::TDK_NonDependentConversionFailure:
- case Sema::TDK_AlreadyDiagnosed:
+ switch (static_cast<TemplateDeductionResult>(DFI.Result)) {
+ case TemplateDeductionResult::Success:
+ case TemplateDeductionResult::NonDependentConversionFailure:
+ case TemplateDeductionResult::AlreadyDiagnosed:
llvm_unreachable("non-deduction failure while diagnosing bad deduction");
- case Sema::TDK_Invalid:
- case Sema::TDK_Incomplete:
- case Sema::TDK_IncompletePack:
+ case TemplateDeductionResult::Invalid:
+ case TemplateDeductionResult::Incomplete:
+ case TemplateDeductionResult::IncompletePack:
return 1;
- case Sema::TDK_Underqualified:
- case Sema::TDK_Inconsistent:
+ case TemplateDeductionResult::Underqualified:
+ case TemplateDeductionResult::Inconsistent:
return 2;
- case Sema::TDK_SubstitutionFailure:
- case Sema::TDK_DeducedMismatch:
- case Sema::TDK_ConstraintsNotSatisfied:
- case Sema::TDK_DeducedMismatchNested:
- case Sema::TDK_NonDeducedMismatch:
- case Sema::TDK_MiscellaneousDeductionFailure:
- case Sema::TDK_CUDATargetMismatch:
+ case TemplateDeductionResult::SubstitutionFailure:
+ case TemplateDeductionResult::DeducedMismatch:
+ case TemplateDeductionResult::ConstraintsNotSatisfied:
+ case TemplateDeductionResult::DeducedMismatchNested:
+ case TemplateDeductionResult::NonDeducedMismatch:
+ case TemplateDeductionResult::MiscellaneousDeductionFailure:
+ case TemplateDeductionResult::CUDATargetMismatch:
return 3;
- case Sema::TDK_InstantiationDepth:
+ case TemplateDeductionResult::InstantiationDepth:
return 4;
- case Sema::TDK_InvalidExplicitArguments:
+ case TemplateDeductionResult::InvalidExplicitArguments:
return 5;
- case Sema::TDK_TooManyArguments:
- case Sema::TDK_TooFewArguments:
+ case TemplateDeductionResult::TooManyArguments:
+ case TemplateDeductionResult::TooFewArguments:
return 6;
}
llvm_unreachable("Unhandled deduction result");
@@ -12806,11 +12903,10 @@ private:
// overloaded functions considered.
FunctionDecl *Specialization = nullptr;
TemplateDeductionInfo Info(FailedCandidates.getLocation());
- if (Sema::TemplateDeductionResult Result
- = S.DeduceTemplateArguments(FunctionTemplate,
- &OvlExplicitTemplateArgs,
- TargetFunctionType, Specialization,
- Info, /*IsAddressOfFunction*/true)) {
+ if (TemplateDeductionResult Result = S.DeduceTemplateArguments(
+ FunctionTemplate, &OvlExplicitTemplateArgs, TargetFunctionType,
+ Specialization, Info, /*IsAddressOfFunction*/ true);
+ Result != TemplateDeductionResult::Success) {
// Make a note of the failed deduction for diagnostics.
FailedCandidates.addCandidate()
.set(CurAccessFunPair, FunctionTemplate->getTemplatedDecl(),
@@ -12849,7 +12945,7 @@ private:
if (S.getLangOpts().CUDA) {
FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true);
if (!(Caller && Caller->isImplicit()) &&
- !S.IsAllowedCUDACall(Caller, FunDecl))
+ !S.CUDA().IsAllowedCall(Caller, FunDecl))
return false;
}
if (FunDecl->isMultiVersion()) {
@@ -12969,8 +13065,8 @@ private:
}
void EliminateSuboptimalCudaMatches() {
- S.EraseUnwantedCUDAMatches(S.getCurFunctionDecl(/*AllowLambda=*/true),
- Matches);
+ S.CUDA().EraseUnwantedMatches(S.getCurFunctionDecl(/*AllowLambda=*/true),
+ Matches);
}
public:
@@ -13050,21 +13146,6 @@ public:
};
}
-/// ResolveAddressOfOverloadedFunction - Try to resolve the address of
-/// an overloaded function (C++ [over.over]), where @p From is an
-/// expression with overloaded function type and @p ToType is the type
-/// we're trying to resolve to. For example:
-///
-/// @code
-/// int f(double);
-/// int f(int);
-///
-/// int (*pfd)(double) = f; // selects f(double)
-/// @endcode
-///
-/// This routine returns the resulting FunctionDecl if it could be
-/// resolved, and NULL otherwise. When @p Complain is true, this
-/// routine will emit diagnostics if there is an error.
FunctionDecl *
Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
@@ -13105,13 +13186,6 @@ Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
return Fn;
}
-/// Given an expression that refers to an overloaded function, try to
-/// resolve that function to a single function that can have its address taken.
-/// This will modify `Pair` iff it returns non-null.
-///
-/// This routine can only succeed if from all of the candidates in the overload
-/// set for SrcExpr that can have their addresses taken, there is one candidate
-/// that is more constrained than the rest.
FunctionDecl *
Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) {
OverloadExpr::FindResult R = OverloadExpr::find(E);
@@ -13124,27 +13198,8 @@ Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) {
// Return positive for better, negative for worse, 0 for equal preference.
auto CheckCUDAPreference = [&](FunctionDecl *FD1, FunctionDecl *FD2) {
FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
- return static_cast<int>(IdentifyCUDAPreference(Caller, FD1)) -
- static_cast<int>(IdentifyCUDAPreference(Caller, FD2));
- };
-
- auto CheckMoreConstrained = [&](FunctionDecl *FD1,
- FunctionDecl *FD2) -> std::optional<bool> {
- if (FunctionDecl *MF = FD1->getInstantiatedFromMemberFunction())
- FD1 = MF;
- if (FunctionDecl *MF = FD2->getInstantiatedFromMemberFunction())
- FD2 = MF;
- SmallVector<const Expr *, 1> AC1, AC2;
- FD1->getAssociatedConstraints(AC1);
- FD2->getAssociatedConstraints(AC2);
- bool AtLeastAsConstrained1, AtLeastAsConstrained2;
- if (IsAtLeastAsConstrained(FD1, AC1, FD2, AC2, AtLeastAsConstrained1))
- return std::nullopt;
- if (IsAtLeastAsConstrained(FD2, AC2, FD1, AC1, AtLeastAsConstrained2))
- return std::nullopt;
- if (AtLeastAsConstrained1 == AtLeastAsConstrained2)
- return std::nullopt;
- return AtLeastAsConstrained1;
+ return static_cast<int>(CUDA().IdentifyPreference(Caller, FD1)) -
+ static_cast<int>(CUDA().IdentifyPreference(Caller, FD2));
};
// Don't use the AddressOfResolver because we're specifically looking for
@@ -13183,15 +13238,14 @@ Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) {
}
// FD has the same CUDA prefernece than Result. Continue check
// constraints.
- std::optional<bool> MoreConstrainedThanPrevious =
- CheckMoreConstrained(FD, Result);
- if (!MoreConstrainedThanPrevious) {
- IsResultAmbiguous = true;
- AmbiguousDecls.push_back(FD);
+ FunctionDecl *MoreConstrained = getMoreConstrainedFunction(FD, Result);
+ if (MoreConstrained != FD) {
+ if (!MoreConstrained) {
+ IsResultAmbiguous = true;
+ AmbiguousDecls.push_back(FD);
+ }
continue;
}
- if (!*MoreConstrainedThanPrevious)
- continue;
// FD is more constrained - replace Result with it.
}
FoundBetter();
@@ -13210,7 +13264,7 @@ Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) {
// constraints.
if (getLangOpts().CUDA && CheckCUDAPreference(Skipped, Result) != 0)
continue;
- if (!CheckMoreConstrained(Skipped, Result))
+ if (!getMoreConstrainedFunction(Skipped, Result))
return nullptr;
}
Pair = DAP;
@@ -13218,13 +13272,6 @@ Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) {
return Result;
}
-/// Given an overloaded function, tries to turn it into a non-overloaded
-/// function reference using resolveAddressOfSingleOverloadCandidate. This
-/// will perform access checks, diagnose the use of the resultant decl, and, if
-/// requested, potentially perform a function-to-pointer decay.
-///
-/// Returns false if resolveAddressOfSingleOverloadCandidate fails.
-/// Otherwise, returns true. This may emit diagnostics and return true.
bool Sema::resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion) {
Expr *E = SrcExpr.get();
@@ -13252,16 +13299,6 @@ bool Sema::resolveAndFixAddressOfSingleOverloadCandidate(
return true;
}
-/// Given an expression that refers to an overloaded function, try to
-/// resolve that overloaded function expression down to a single function.
-///
-/// This routine can only resolve template-ids that refer to a single function
-/// template, where that template-id refers to a single template whose template
-/// arguments are either provided by the template-id or have defaults,
-/// as described in C++0x [temp.arg.explicit]p3.
-///
-/// If no template-ids are found, no diagnostics are emitted and NULL is
-/// returned.
FunctionDecl *Sema::ResolveSingleFunctionTemplateSpecialization(
OverloadExpr *ovl, bool Complain, DeclAccessPair *FoundResult,
TemplateSpecCandidateSet *FailedTSC) {
@@ -13301,10 +13338,10 @@ FunctionDecl *Sema::ResolveSingleFunctionTemplateSpecialization(
// overloaded functions considered.
FunctionDecl *Specialization = nullptr;
TemplateDeductionInfo Info(ovl->getNameLoc());
- if (TemplateDeductionResult Result
- = DeduceTemplateArguments(FunctionTemplate, &ExplicitTemplateArgs,
- Specialization, Info,
- /*IsAddressOfFunction*/true)) {
+ if (TemplateDeductionResult Result = DeduceTemplateArguments(
+ FunctionTemplate, &ExplicitTemplateArgs, Specialization, Info,
+ /*IsAddressOfFunction*/ true);
+ Result != TemplateDeductionResult::Success) {
// Make a note of the failed deduction for diagnostics.
if (FailedTSC)
FailedTSC->addCandidate().set(
@@ -13336,14 +13373,6 @@ FunctionDecl *Sema::ResolveSingleFunctionTemplateSpecialization(
return Matched;
}
-// Resolve and fix an overloaded expression that can be resolved
-// because it identifies a single function template specialization.
-//
-// Last three arguments should only be supplied if Complain = true
-//
-// Return true if it was logically possible to so resolve the
-// expression, regardless of whether or not it succeeded. Always
-// returns true if 'complain' is set.
bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr, bool doFunctionPointerConversion, bool complain,
SourceRange OpRangeForComplaining, QualType DestTypeForComplaining,
@@ -13457,8 +13486,6 @@ static void AddOverloadedCallCandidate(Sema &S,
assert(!KnownValid && "unhandled case in overloaded call candidate");
}
-/// Add the overload candidates named by callee and/or found by argument
-/// dependent lookup to the given overload set.
void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
@@ -13513,8 +13540,6 @@ void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
CandidateSet, PartialOverloading);
}
-/// Add the call candidates from the given set of lookup results to the given
-/// overload set. Non-function lookup results are ignored.
void Sema::AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet) {
@@ -13778,9 +13803,6 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
RParenLoc);
}
-/// Constructs and populates an OverloadedCandidateSet from
-/// the given function.
-/// \returns true when an the ExprResult output parameter has been set.
bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
MultiExprArg Args,
@@ -13916,6 +13938,21 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
}
case OR_No_Viable_Function: {
+ if (*Best != CandidateSet->end() &&
+ CandidateSet->getKind() ==
+ clang::OverloadCandidateSet::CSK_AddressOfOverloadSet) {
+ if (CXXMethodDecl *M =
+ dyn_cast_if_present<CXXMethodDecl>((*Best)->Function);
+ M && M->isImplicitObjectMemberFunction()) {
+ CandidateSet->NoteCandidates(
+ PartialDiagnosticAt(
+ Fn->getBeginLoc(),
+ SemaRef.PDiag(diag::err_member_call_without_object) << 0 << M),
+ SemaRef, OCD_AmbiguousCandidates, Args);
+ return ExprError();
+ }
+ }
+
// Try to recover by looking for viable functions which the user might
// have meant to call.
ExprResult Recovery = BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc,
@@ -13958,15 +13995,13 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
break;
case OR_Deleted: {
- CandidateSet->NoteCandidates(
- PartialDiagnosticAt(Fn->getBeginLoc(),
- SemaRef.PDiag(diag::err_ovl_deleted_call)
- << ULE->getName() << Fn->getSourceRange()),
- SemaRef, OCD_AllCandidates, Args);
+ FunctionDecl *FDecl = (*Best)->Function;
+ SemaRef.DiagnoseUseOfDeletedFunction(Fn->getBeginLoc(),
+ Fn->getSourceRange(), ULE->getName(),
+ *CandidateSet, FDecl, Args);
// We emitted an error for the unavailable/deleted function call but keep
// the call in the AST.
- FunctionDecl *FDecl = (*Best)->Function;
ExprResult Res =
SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
if (Res.isInvalid())
@@ -13995,12 +14030,6 @@ static void markUnaddressableCandidatesUnviable(Sema &S,
}
}
-/// BuildOverloadedCallExpr - Given the call expression that calls Fn
-/// (which eventually refers to the declaration Func) and the call
-/// arguments Args/NumArgs, attempt to resolve the function call down
-/// to a specific function. If overload resolution succeeds, returns
-/// the call expression produced by overload resolution.
-/// Otherwise, emits diagnostics and returns ExprError.
ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
@@ -14009,8 +14038,10 @@ ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn,
Expr *ExecConfig,
bool AllowTypoCorrection,
bool CalleesAddressIsTaken) {
- OverloadCandidateSet CandidateSet(Fn->getExprLoc(),
- OverloadCandidateSet::CSK_Normal);
+ OverloadCandidateSet CandidateSet(
+ Fn->getExprLoc(), CalleesAddressIsTaken
+ ? OverloadCandidateSet::CSK_AddressOfOverloadSet
+ : OverloadCandidateSet::CSK_Normal);
ExprResult result;
if (buildOverloadedCallSet(S, Fn, ULE, Args, LParenLoc, &CandidateSet,
@@ -14047,20 +14078,14 @@ ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn,
OverloadResult, AllowTypoCorrection);
}
-static bool IsOverloaded(const UnresolvedSetImpl &Functions) {
- return Functions.size() > 1 ||
- (Functions.size() == 1 &&
- isa<FunctionTemplateDecl>((*Functions.begin())->getUnderlyingDecl()));
-}
-
ExprResult Sema::CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL) {
- return UnresolvedLookupExpr::Create(Context, NamingClass, NNSLoc, DNI,
- PerformADL, IsOverloaded(Fns),
- Fns.begin(), Fns.end());
+ return UnresolvedLookupExpr::Create(
+ Context, NamingClass, NNSLoc, DNI, PerformADL, Fns.begin(), Fns.end(),
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false);
}
ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
@@ -14140,21 +14165,6 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
return CheckForImmediateInvocation(CE, CE->getDirectCallee());
}
-/// Create a unary operation that may resolve to an overloaded
-/// operator.
-///
-/// \param OpLoc The location of the operator itself (e.g., '*').
-///
-/// \param Opc The UnaryOperatorKind that describes this operator.
-///
-/// \param Fns The set of non-member functions that will be
-/// considered by overload resolution. The caller needs to build this
-/// set based on the context using, e.g.,
-/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
-/// set should not contain any member functions; those will be added
-/// by CreateOverloadedUnaryOp().
-///
-/// \param Input The input argument.
ExprResult
Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
@@ -14184,9 +14194,16 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
ArrayRef<Expr *> ArgsArray(Args, NumArgs);
if (Input->isTypeDependent()) {
+ ExprValueKind VK = ExprValueKind::VK_PRValue;
+ // [C++26][expr.unary.op][expr.pre.incr]
+ // The * operator yields an lvalue of type
+ // The pre/post increment operators yied an lvalue.
+ if (Opc == UO_PreDec || Opc == UO_PreInc || Opc == UO_Deref)
+ VK = VK_LValue;
+
if (Fns.empty())
- return UnaryOperator::Create(Context, Input, Opc, Context.DependentTy,
- VK_PRValue, OK_Ordinary, OpLoc, false,
+ return UnaryOperator::Create(Context, Input, Opc, Context.DependentTy, VK,
+ OK_Ordinary, OpLoc, false,
CurFPFeatureOverrides());
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
@@ -14288,7 +14305,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
// operator node.
ExprResult InputRes = PerformImplicitConversion(
Input, Best->BuiltinParamTypes[0], Best->Conversions[0], AA_Passing,
- CCK_ForBuiltinOverloadedOp);
+ CheckedConversionKind::ForBuiltinOverloadedOp);
if (InputRes.isInvalid())
return ExprError();
Input = InputRes.get();
@@ -14318,20 +14335,24 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
UnaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
- case OR_Deleted:
+ case OR_Deleted: {
// CreateOverloadedUnaryOp fills the first element of ArgsArray with the
// object whose method was called. Later in NoteCandidates size of ArgsArray
// is passed further and it eventually ends up compared to number of
// function candidate parameters which never includes the object parameter,
// so slice ArgsArray to make sure apples are compared to apples.
+ StringLiteral *Msg = Best->Function->getDeletedMessage();
CandidateSet.NoteCandidates(
PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_deleted_oper)
<< UnaryOperator::getOpcodeStr(Opc)
+ << (Msg != nullptr)
+ << (Msg ? Msg->getString() : StringRef())
<< Input->getSourceRange()),
*this, OCD_AllCandidates, ArgsArray.drop_front(),
UnaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
}
+ }
// Either we found no viable overloaded operator or we matched a
// built-in operator. In either case, fall through to trying to
@@ -14339,7 +14360,6 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
}
-/// Perform lookup for an overloaded binary operator.
void Sema::LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
@@ -14403,29 +14423,6 @@ void Sema::LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
AddBuiltinOperatorCandidates(Op, OpLoc, Args, CandidateSet);
}
-/// Create a binary operation that may resolve to an overloaded
-/// operator.
-///
-/// \param OpLoc The location of the operator itself (e.g., '+').
-///
-/// \param Opc The BinaryOperatorKind that describes this operator.
-///
-/// \param Fns The set of non-member functions that will be
-/// considered by overload resolution. The caller needs to build this
-/// set based on the context using, e.g.,
-/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
-/// set should not contain any member functions; those will be added
-/// by CreateOverloadedBinOp().
-///
-/// \param LHS Left-hand argument.
-/// \param RHS Right-hand argument.
-/// \param PerformADL Whether to consider operator candidates found by ADL.
-/// \param AllowRewrittenCandidates Whether to consider candidates found by
-/// C++20 operator rewrites.
-/// \param DefaultedFn If we are synthesizing a defaulted operator function,
-/// the function in question. Such a function is never a candidate in
-/// our overload resolution. This also enables synthesizing a three-way
-/// comparison from < and == as described in C++20 [class.spaceship]p1.
ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns, Expr *LHS,
@@ -14617,6 +14614,13 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
}
}
+ // Check for nonnull = nullable.
+ // This won't be caught in the arg's initialization: the parameter to
+ // the assignment operator is not marked nonnull.
+ if (Op == OO_Equal)
+ diagnoseNullableToNonnullConversion(Args[0]->getType(),
+ Args[1]->getType(), OpLoc);
+
// Convert the arguments.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
// Best->Access is only meaningful for class members.
@@ -14693,10 +14697,12 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
FnDecl))
return ExprError();
- // Check for a self move.
- if (Op == OO_Equal)
+ if (Op == OO_Equal) {
+ // Check for a self move.
DiagnoseSelfMove(Args[0], Args[1], OpLoc);
-
+ // lifetime check.
+ checkExprLifetime(*this, AssignedEntity{Args[0]}, Args[1]);
+ }
if (ImplicitThis) {
QualType ThisType = Context.getPointerType(ImplicitThis->getType());
QualType ThisTypeFromDecl = Context.getPointerType(
@@ -14760,14 +14766,14 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// operator node.
ExprResult ArgsRes0 = PerformImplicitConversion(
Args[0], Best->BuiltinParamTypes[0], Best->Conversions[0],
- AA_Passing, CCK_ForBuiltinOverloadedOp);
+ AA_Passing, CheckedConversionKind::ForBuiltinOverloadedOp);
if (ArgsRes0.isInvalid())
return ExprError();
Args[0] = ArgsRes0.get();
ExprResult ArgsRes1 = PerformImplicitConversion(
Args[1], Best->BuiltinParamTypes[1], Best->Conversions[1],
- AA_Passing, CCK_ForBuiltinOverloadedOp);
+ AA_Passing, CheckedConversionKind::ForBuiltinOverloadedOp);
if (ArgsRes1.isInvalid())
return ExprError();
Args[1] = ArgsRes1.get();
@@ -14841,13 +14847,14 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
OpLoc);
return ExprError();
- case OR_Deleted:
+ case OR_Deleted: {
if (isImplicitlyDeleted(Best->Function)) {
FunctionDecl *DeletedFD = Best->Function;
DefaultedFunctionKind DFK = getDefaultedFunctionKind(DeletedFD);
if (DFK.isSpecialMember()) {
Diag(OpLoc, diag::err_ovl_deleted_special_oper)
- << Args[0]->getType() << DFK.asSpecialMember();
+ << Args[0]->getType()
+ << llvm::to_underlying(DFK.asSpecialMember());
} else {
assert(DFK.isComparison());
Diag(OpLoc, diag::err_ovl_deleted_comparison)
@@ -14859,16 +14866,20 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
NoteDeletedFunction(DeletedFD);
return ExprError();
}
+
+ StringLiteral *Msg = Best->Function->getDeletedMessage();
CandidateSet.NoteCandidates(
PartialDiagnosticAt(
- OpLoc, PDiag(diag::err_ovl_deleted_oper)
- << getOperatorSpelling(Best->Function->getDeclName()
- .getCXXOverloadedOperator())
- << Args[0]->getSourceRange()
- << Args[1]->getSourceRange()),
+ OpLoc,
+ PDiag(diag::err_ovl_deleted_oper)
+ << getOperatorSpelling(Best->Function->getDeclName()
+ .getCXXOverloadedOperator())
+ << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef())
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange()),
*this, OCD_AllCandidates, Args, BinaryOperator::getOpcodeStr(Opc),
OpLoc);
return ExprError();
+ }
}
// We matched a built-in operator; build it.
@@ -15133,14 +15144,14 @@ ExprResult Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
// operator node.
ExprResult ArgsRes0 = PerformImplicitConversion(
Args[0], Best->BuiltinParamTypes[0], Best->Conversions[0],
- AA_Passing, CCK_ForBuiltinOverloadedOp);
+ AA_Passing, CheckedConversionKind::ForBuiltinOverloadedOp);
if (ArgsRes0.isInvalid())
return ExprError();
Args[0] = ArgsRes0.get();
ExprResult ArgsRes1 = PerformImplicitConversion(
Args[1], Best->BuiltinParamTypes[1], Best->Conversions[1],
- AA_Passing, CCK_ForBuiltinOverloadedOp);
+ AA_Passing, CheckedConversionKind::ForBuiltinOverloadedOp);
if (ArgsRes1.isInvalid())
return ExprError();
Args[1] = ArgsRes1.get();
@@ -15180,26 +15191,23 @@ ExprResult Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
}
return ExprError();
- case OR_Deleted:
+ case OR_Deleted: {
+ StringLiteral *Msg = Best->Function->getDeletedMessage();
CandidateSet.NoteCandidates(
- PartialDiagnosticAt(LLoc, PDiag(diag::err_ovl_deleted_oper)
- << "[]" << Args[0]->getSourceRange()
- << Range),
+ PartialDiagnosticAt(LLoc,
+ PDiag(diag::err_ovl_deleted_oper)
+ << "[]" << (Msg != nullptr)
+ << (Msg ? Msg->getString() : StringRef())
+ << Args[0]->getSourceRange() << Range),
*this, OCD_AllCandidates, Args, "[]", LLoc);
return ExprError();
}
+ }
// We matched a built-in operator; build it.
return CreateBuiltinArraySubscriptExpr(Args[0], LLoc, Args[1], RLoc);
}
-/// BuildCallToMemberFunction - Build a call to a member
-/// function. MemExpr is the expression that refers to the member
-/// function (and includes the object parameter), Args/NumArgs are the
-/// arguments to the function call (not including the object
-/// parameter). The caller needs to validate that the member
-/// expression refers to a non-static member function or an overloaded
-/// member function.
ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
SourceLocation LParenLoc,
MultiExprArg Args,
@@ -15401,11 +15409,9 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
*this, OCD_AmbiguousCandidates, Args);
break;
case OR_Deleted:
- CandidateSet.NoteCandidates(
- PartialDiagnosticAt(UnresExpr->getMemberLoc(),
- PDiag(diag::err_ovl_deleted_member_call)
- << DeclName << MemExprE->getSourceRange()),
- *this, OCD_AllCandidates, Args);
+ DiagnoseUseOfDeletedFunction(
+ UnresExpr->getMemberLoc(), MemExprE->getSourceRange(), DeclName,
+ CandidateSet, Best->Function, Args, /*IsMember=*/true);
break;
}
// Overload resolution fails, try to recover.
@@ -15438,8 +15444,10 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
CallExpr *TheCall = nullptr;
llvm::SmallVector<Expr *, 8> NewArgs;
if (Method->isExplicitObjectMemberFunction()) {
- PrepareExplicitObjectArgument(*this, Method, MemExpr->getBase(), Args,
- NewArgs);
+ if (PrepareExplicitObjectArgument(*this, Method, MemExpr->getBase(), Args,
+ NewArgs))
+ return ExprError();
+
// Build the actual expression node.
ExprResult FnExpr =
CreateFunctionRefExpr(*this, Method, FoundDecl, MemExpr,
@@ -15525,10 +15533,6 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
TheCall->getDirectCallee());
}
-/// BuildCallToObjectOfClassType - Build a call to an object of class
-/// type (C++ [over.call.object]), which can end up invoking an
-/// overloaded function call operator (@c operator()) or performing a
-/// user-defined conversion on the object argument.
ExprResult
Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
SourceLocation LParenLoc,
@@ -15669,15 +15673,21 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
*this, OCD_AmbiguousCandidates, Args);
break;
- case OR_Deleted:
+ case OR_Deleted: {
+ // FIXME: Is this diagnostic here really necessary? It seems that
+ // 1. we don't have any tests for this diagnostic, and
+ // 2. we already issue err_deleted_function_use for this later on anyway.
+ StringLiteral *Msg = Best->Function->getDeletedMessage();
CandidateSet.NoteCandidates(
PartialDiagnosticAt(Object.get()->getBeginLoc(),
PDiag(diag::err_ovl_deleted_object_call)
- << Object.get()->getType()
+ << Object.get()->getType() << (Msg != nullptr)
+ << (Msg ? Msg->getString() : StringRef())
<< Object.get()->getSourceRange()),
*this, OCD_AllCandidates, Args);
break;
}
+ }
if (Best == CandidateSet.end())
return true;
@@ -15747,9 +15757,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// Initialize the object parameter.
llvm::SmallVector<Expr *, 8> NewArgs;
if (Method->isExplicitObjectMemberFunction()) {
- // FIXME: we should do that during the definition of the lambda when we can.
- DiagnoseInvalidExplicitObjectParameterInLambda(Method);
- PrepareExplicitObjectArgument(*this, Method, Obj, Args, NewArgs);
+ IsError |= PrepareExplicitObjectArgument(*this, Method, Obj, Args, NewArgs);
} else {
ExprResult ObjRes = PerformImplicitObjectArgumentInitialization(
Object.get(), /*Qualifier=*/nullptr, Best->FoundDecl, Method);
@@ -15797,9 +15805,6 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall), Method);
}
-/// BuildOverloadedArrowExpr - Build a call to an overloaded @c operator->
-/// (if one exists), where @c Base is an expression of class type and
-/// @c Member is the name of the member we're trying to find.
ExprResult
Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
bool *NoArrowOperatorFound) {
@@ -15876,13 +15881,17 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
*this, OCD_AmbiguousCandidates, Base);
return ExprError();
- case OR_Deleted:
+ case OR_Deleted: {
+ StringLiteral *Msg = Best->Function->getDeletedMessage();
CandidateSet.NoteCandidates(
PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_deleted_oper)
- << "->" << Base->getSourceRange()),
+ << "->" << (Msg != nullptr)
+ << (Msg ? Msg->getString() : StringRef())
+ << Base->getSourceRange()),
*this, OCD_AllCandidates, Base);
return ExprError();
}
+ }
CheckMemberOperatorAccess(OpLoc, Base, nullptr, Best->FoundDecl);
@@ -15926,8 +15935,6 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall), Method);
}
-/// BuildLiteralOperatorCall - Build a UserDefinedLiteral by creating a call to
-/// a literal operator described by the provided lookup results.
ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr*> Args,
@@ -16003,13 +16010,6 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
return CheckForImmediateInvocation(MaybeBindToTemporary(UDL), FD);
}
-/// Build a call to 'begin' or 'end' for a C++11 for-range statement. If the
-/// given LookupResult is non-empty, it is assumed to describe a member which
-/// will be invoked. Otherwise, the function will be found via argument
-/// dependent lookup.
-/// CallExpr is set to a valid expression and FRS_Success returned on success,
-/// otherwise CallExpr is set to ExprError() and some non-success value
-/// is returned.
Sema::ForRangeStatus
Sema::BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
@@ -16072,12 +16072,6 @@ Sema::BuildForRangeBeginEndCall(SourceLocation Loc,
return FRS_Success;
}
-
-/// FixOverloadedFunctionReference - E is an expression that refers to
-/// a C++ overloaded function (possibly with some parentheses and
-/// perhaps a '&' around it). We have resolved the overloaded function
-/// to the function declaration Fn, so patch up the expression E to
-/// refer (possibly indirectly) to Fn. Returns the new expr.
ExprResult Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
FunctionDecl *Fn) {
if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
@@ -16146,9 +16140,9 @@ ExprResult Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
assert(UnOp->getOpcode() == UO_AddrOf &&
"Can only take the address of an overloaded function");
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
- if (Method->isStatic()) {
- // Do nothing: static member functions aren't any different
- // from non-member functions.
+ if (!Method->isImplicitObjectMemberFunction()) {
+ // Do nothing: the address of static and
+ // explicit object member functions is a (non-member) function pointer.
} else {
// Fix the subexpression, which really has to be an
// UnresolvedLookupExpr holding an overloaded member function
@@ -16206,7 +16200,10 @@ ExprResult Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
}
QualType Type = Fn->getType();
- ExprValueKind ValueKind = getLangOpts().CPlusPlus ? VK_LValue : VK_PRValue;
+ ExprValueKind ValueKind =
+ getLangOpts().CPlusPlus && !Fn->hasCXXExplicitFunctionObjectParameter()
+ ? VK_LValue
+ : VK_PRValue;
// FIXME: Duplicated from BuildDeclarationNameExpr.
if (unsigned BID = Fn->getBuiltinID()) {
@@ -16296,3 +16293,17 @@ bool clang::shouldEnforceArgLimit(bool PartialOverloading,
return false;
return true;
}
+
+void Sema::DiagnoseUseOfDeletedFunction(SourceLocation Loc, SourceRange Range,
+ DeclarationName Name,
+ OverloadCandidateSet &CandidateSet,
+ FunctionDecl *Fn, MultiExprArg Args,
+ bool IsMember) {
+ StringLiteral *Msg = Fn->getDeletedMessage();
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(Loc, PDiag(diag::err_ovl_deleted_call)
+ << IsMember << Name << (Msg != nullptr)
+ << (Msg ? Msg->getString() : StringRef())
+ << Range),
+ *this, OCD_AllCandidates, Args);
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaPPC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaPPC.cpp
new file mode 100644
index 000000000000..99f46b12e696
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaPPC.cpp
@@ -0,0 +1,439 @@
+//===------ SemaPPC.cpp ------ PowerPC target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to PowerPC.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaPPC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/APSInt.h"
+
+namespace clang {
+
+SemaPPC::SemaPPC(Sema &S) : SemaBase(S) {}
+
+void SemaPPC::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) {
+ const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens());
+ if (!ICE)
+ return;
+
+ const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
+ if (!DR)
+ return;
+
+ const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl());
+ if (!PD || !PD->getType()->isRecordType())
+ return;
+
+ QualType ArgType = Arg->getType();
+ for (const FieldDecl *FD :
+ ArgType->castAs<RecordType>()->getDecl()->fields()) {
+ if (const auto *AA = FD->getAttr<AlignedAttr>()) {
+ CharUnits Alignment = getASTContext().toCharUnitsFromBits(
+ AA->getAlignment(getASTContext()));
+ if (Alignment.getQuantity() == 16) {
+ Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD;
+ Diag(Loc, diag::note_misaligned_member_used_here) << PD;
+ }
+ }
+ }
+}
+
+static bool isPPC_64Builtin(unsigned BuiltinID) {
+ // These builtins only work on PPC 64bit targets.
+ switch (BuiltinID) {
+ case PPC::BI__builtin_divde:
+ case PPC::BI__builtin_divdeu:
+ case PPC::BI__builtin_bpermd:
+ case PPC::BI__builtin_pdepd:
+ case PPC::BI__builtin_pextd:
+ case PPC::BI__builtin_ppc_ldarx:
+ case PPC::BI__builtin_ppc_stdcx:
+ case PPC::BI__builtin_ppc_tdw:
+ case PPC::BI__builtin_ppc_trapd:
+ case PPC::BI__builtin_ppc_cmpeqb:
+ case PPC::BI__builtin_ppc_setb:
+ case PPC::BI__builtin_ppc_mulhd:
+ case PPC::BI__builtin_ppc_mulhdu:
+ case PPC::BI__builtin_ppc_maddhd:
+ case PPC::BI__builtin_ppc_maddhdu:
+ case PPC::BI__builtin_ppc_maddld:
+ case PPC::BI__builtin_ppc_load8r:
+ case PPC::BI__builtin_ppc_store8r:
+ case PPC::BI__builtin_ppc_insert_exp:
+ case PPC::BI__builtin_ppc_extract_sig:
+ case PPC::BI__builtin_ppc_addex:
+ case PPC::BI__builtin_darn:
+ case PPC::BI__builtin_darn_raw:
+ case PPC::BI__builtin_ppc_compare_and_swaplp:
+ case PPC::BI__builtin_ppc_fetch_and_addlp:
+ case PPC::BI__builtin_ppc_fetch_and_andlp:
+ case PPC::BI__builtin_ppc_fetch_and_orlp:
+ case PPC::BI__builtin_ppc_fetch_and_swaplp:
+ return true;
+ }
+ return false;
+}
+
+bool SemaPPC::CheckPPCBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ ASTContext &Context = getASTContext();
+ unsigned i = 0, l = 0, u = 0;
+ bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
+ llvm::APSInt Result;
+
+ if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
+ return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
+ << TheCall->getSourceRange();
+
+ switch (BuiltinID) {
+ default:
+ return false;
+ case PPC::BI__builtin_altivec_crypto_vshasigmaw:
+ case PPC::BI__builtin_altivec_crypto_vshasigmad:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case PPC::BI__builtin_altivec_dss:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3);
+ case PPC::BI__builtin_tbegin:
+ case PPC::BI__builtin_tend:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 1);
+ case PPC::BI__builtin_tsr:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7);
+ case PPC::BI__builtin_tabortwc:
+ case PPC::BI__builtin_tabortdc:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 31);
+ case PPC::BI__builtin_tabortwci:
+ case PPC::BI__builtin_tabortdci:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
+ // __builtin_(un)pack_longdouble are available only if long double uses IBM
+ // extended double representation.
+ case PPC::BI__builtin_unpack_longdouble:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1))
+ return true;
+ [[fallthrough]];
+ case PPC::BI__builtin_pack_longdouble:
+ if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble())
+ return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi)
+ << "ibmlongdouble";
+ return false;
+ case PPC::BI__builtin_altivec_dst:
+ case PPC::BI__builtin_altivec_dstt:
+ case PPC::BI__builtin_altivec_dstst:
+ case PPC::BI__builtin_altivec_dststt:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case PPC::BI__builtin_vsx_xxpermdi:
+ case PPC::BI__builtin_vsx_xxsldwi:
+ return BuiltinVSX(TheCall);
+ case PPC::BI__builtin_unpack_vector_int128:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case PPC::BI__builtin_altivec_vgnb:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 2, 7);
+ case PPC::BI__builtin_vsx_xxeval:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 255);
+ case PPC::BI__builtin_altivec_vsldbi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case PPC::BI__builtin_altivec_vsrdbi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case PPC::BI__builtin_vsx_xxpermx:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7);
+ case PPC::BI__builtin_ppc_tw:
+ case PPC::BI__builtin_ppc_tdw:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 1, 31);
+ case PPC::BI__builtin_ppc_cmprb:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 1);
+ // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
+ // be a constant that represents a contiguous bit field.
+ case PPC::BI__builtin_ppc_rlwnm:
+ return SemaRef.ValueIsRunOfOnes(TheCall, 2);
+ case PPC::BI__builtin_ppc_rlwimi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
+ SemaRef.ValueIsRunOfOnes(TheCall, 3);
+ case PPC::BI__builtin_ppc_rldimi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 63) ||
+ SemaRef.ValueIsRunOfOnes(TheCall, 3);
+ case PPC::BI__builtin_ppc_addex: {
+ if (SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3))
+ return true;
+ // Output warning for reserved values 1 to 3.
+ int ArgValue =
+ TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue();
+ if (ArgValue != 0)
+ Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour)
+ << ArgValue;
+ return false;
+ }
+ case PPC::BI__builtin_ppc_mtfsb0:
+ case PPC::BI__builtin_ppc_mtfsb1:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 31);
+ case PPC::BI__builtin_ppc_mtfsf:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 255);
+ case PPC::BI__builtin_ppc_mtfsfi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case PPC::BI__builtin_ppc_alignx:
+ return SemaRef.BuiltinConstantArgPower2(TheCall, 0);
+ case PPC::BI__builtin_ppc_rdlam:
+ return SemaRef.ValueIsRunOfOnes(TheCall, 2);
+ case PPC::BI__builtin_vsx_ldrmb:
+ case PPC::BI__builtin_vsx_strmb:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 16);
+ case PPC::BI__builtin_altivec_vcntmbb:
+ case PPC::BI__builtin_altivec_vcntmbh:
+ case PPC::BI__builtin_altivec_vcntmbw:
+ case PPC::BI__builtin_altivec_vcntmbd:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case PPC::BI__builtin_vsx_xxgenpcvbm:
+ case PPC::BI__builtin_vsx_xxgenpcvhm:
+ case PPC::BI__builtin_vsx_xxgenpcvwm:
+ case PPC::BI__builtin_vsx_xxgenpcvdm:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 3);
+ case PPC::BI__builtin_ppc_test_data_class: {
+ // Check if the first argument of the __builtin_ppc_test_data_class call is
+ // valid. The argument must be 'float' or 'double' or '__float128'.
+ QualType ArgType = TheCall->getArg(0)->getType();
+ if (ArgType != QualType(Context.FloatTy) &&
+ ArgType != QualType(Context.DoubleTy) &&
+ ArgType != QualType(Context.Float128Ty))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_ppc_invalid_test_data_class_type);
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 127);
+ }
+ case PPC::BI__builtin_ppc_maxfe:
+ case PPC::BI__builtin_ppc_minfe:
+ case PPC::BI__builtin_ppc_maxfl:
+ case PPC::BI__builtin_ppc_minfl:
+ case PPC::BI__builtin_ppc_maxfs:
+ case PPC::BI__builtin_ppc_minfs: {
+ if (Context.getTargetInfo().getTriple().isOSAIX() &&
+ (BuiltinID == PPC::BI__builtin_ppc_maxfe ||
+ BuiltinID == PPC::BI__builtin_ppc_minfe))
+ return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type)
+ << "builtin" << true << 128 << QualType(Context.LongDoubleTy)
+ << false << Context.getTargetInfo().getTriple().str();
+ // Argument type should be exact.
+ QualType ArgType = QualType(Context.LongDoubleTy);
+ if (BuiltinID == PPC::BI__builtin_ppc_maxfl ||
+ BuiltinID == PPC::BI__builtin_ppc_minfl)
+ ArgType = QualType(Context.DoubleTy);
+ else if (BuiltinID == PPC::BI__builtin_ppc_maxfs ||
+ BuiltinID == PPC::BI__builtin_ppc_minfs)
+ ArgType = QualType(Context.FloatTy);
+ for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I)
+ if (TheCall->getArg(I)->getType() != ArgType)
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_typecheck_convert_incompatible)
+ << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0;
+ return false;
+ }
+#define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
+ case PPC::BI__builtin_##Name: \
+ return BuiltinPPCMMACall(TheCall, BuiltinID, Types);
+#include "clang/Basic/BuiltinsPPC.def"
+ }
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u);
+}
+
+// Check if the given type is a non-pointer PPC MMA type. This function is used
+// in Sema to prevent invalid uses of restricted PPC MMA types.
+bool SemaPPC::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
+ ASTContext &Context = getASTContext();
+ if (Type->isPointerType() || Type->isArrayType())
+ return false;
+
+ QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
+#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
+ if (false
+#include "clang/Basic/PPCTypes.def"
+ ) {
+ Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
+ return true;
+ }
+ return false;
+}
+
+/// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
+/// advancing the pointer over the consumed characters. The decoded type is
+/// returned. If the decoded type represents a constant integer with a
+/// constraint on its value then Mask is set to that value. The type descriptors
+/// used in Str are specific to PPC MMA builtins and are documented in the file
+/// defining the PPC builtins.
+static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
+ unsigned &Mask) {
+ bool RequireICE = false;
+ ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
+ switch (*Str++) {
+ case 'V':
+ return Context.getVectorType(Context.UnsignedCharTy, 16,
+ VectorKind::AltiVecVector);
+ case 'i': {
+ char *End;
+ unsigned size = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing constant parameter constraint");
+ Str = End;
+ Mask = size;
+ return Context.IntTy;
+ }
+ case 'W': {
+ char *End;
+ unsigned size = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing PowerPC MMA type size");
+ Str = End;
+ QualType Type;
+ switch (size) {
+#define PPC_VECTOR_TYPE(typeName, Id, size) \
+ case size: \
+ Type = Context.Id##Ty; \
+ break;
+#include "clang/Basic/PPCTypes.def"
+ default:
+ llvm_unreachable("Invalid PowerPC MMA vector type");
+ }
+ bool CheckVectorArgs = false;
+ while (!CheckVectorArgs) {
+ switch (*Str++) {
+ case '*':
+ Type = Context.getPointerType(Type);
+ break;
+ case 'C':
+ Type = Type.withConst();
+ break;
+ default:
+ CheckVectorArgs = true;
+ --Str;
+ break;
+ }
+ }
+ return Type;
+ }
+ default:
+ return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true);
+ }
+}
+
+bool SemaPPC::BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
+ const char *TypeStr) {
+
+ assert((TypeStr[0] != '\0') &&
+ "Invalid types in PPC MMA builtin declaration");
+
+ ASTContext &Context = getASTContext();
+ unsigned Mask = 0;
+ unsigned ArgNum = 0;
+
+ // The first type in TypeStr is the type of the value returned by the
+ // builtin. So we first read that type and change the type of TheCall.
+ QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
+ TheCall->setType(type);
+
+ while (*TypeStr != '\0') {
+ Mask = 0;
+ QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
+ if (ArgNum >= TheCall->getNumArgs()) {
+ ArgNum++;
+ break;
+ }
+
+ Expr *Arg = TheCall->getArg(ArgNum);
+ QualType PassedType = Arg->getType();
+ QualType StrippedRVType = PassedType.getCanonicalType();
+
+ // Strip Restrict/Volatile qualifiers.
+ if (StrippedRVType.isRestrictQualified() ||
+ StrippedRVType.isVolatileQualified())
+ StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType();
+
+ // The only case where the argument type and expected type are allowed to
+ // mismatch is if the argument type is a non-void pointer (or array) and
+ // expected type is a void pointer.
+ if (StrippedRVType != ExpectedType)
+ if (!(ExpectedType->isVoidPointerType() &&
+ (StrippedRVType->isPointerType() || StrippedRVType->isArrayType())))
+ return Diag(Arg->getBeginLoc(),
+ diag::err_typecheck_convert_incompatible)
+ << PassedType << ExpectedType << 1 << 0 << 0;
+
+ // If the value of the Mask is not 0, we have a constraint in the size of
+ // the integer argument so here we ensure the argument is a constant that
+ // is in the valid range.
+ if (Mask != 0 &&
+ SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true))
+ return true;
+
+ ArgNum++;
+ }
+
+ // In case we exited early from the previous loop, there are other types to
+ // read from TypeStr. So we need to read them all to ensure we have the right
+ // number of arguments in TheCall and if it is not the case, to display a
+ // better error message.
+ while (*TypeStr != '\0') {
+ (void)DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
+ ArgNum++;
+ }
+ if (SemaRef.checkArgCount(TheCall, ArgNum))
+ return true;
+
+ return false;
+}
+
+bool SemaPPC::BuiltinVSX(CallExpr *TheCall) {
+ unsigned ExpectedNumArgs = 3;
+ if (SemaRef.checkArgCount(TheCall, ExpectedNumArgs))
+ return true;
+
+ // Check the third argument is a compile time constant
+ if (!TheCall->getArg(2)->isIntegerConstantExpr(getASTContext()))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_vsx_builtin_nonconstant_argument)
+ << 3 /* argument index */ << TheCall->getDirectCallee()
+ << SourceRange(TheCall->getArg(2)->getBeginLoc(),
+ TheCall->getArg(2)->getEndLoc());
+
+ QualType Arg1Ty = TheCall->getArg(0)->getType();
+ QualType Arg2Ty = TheCall->getArg(1)->getType();
+
+ // Check the type of argument 1 and argument 2 are vectors.
+ SourceLocation BuiltinLoc = TheCall->getBeginLoc();
+ if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
+ (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
+ return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
+ << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
+ << SourceRange(TheCall->getArg(0)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc());
+ }
+
+ // Check the first two arguments are the same type.
+ if (!getASTContext().hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
+ return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
+ << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
+ << SourceRange(TheCall->getArg(0)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc());
+ }
+
+ // When default clang type checking is turned off and the customized type
+ // checking is used, the returning type of the function must be explicitly
+ // set. Otherwise it is _Bool by default.
+ TheCall->setType(Arg1Ty);
+
+ return false;
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp b/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
index 528c261c4a29..fdb584ceb810 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
@@ -29,13 +29,15 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaPseudoObject.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
#include "llvm/ADT/SmallString.h"
using namespace clang;
@@ -557,30 +559,31 @@ static ObjCMethodDecl *LookupMethodInReceiverType(Sema &S, Selector sel,
// Special case for 'self' in class method implementations.
if (PT->isObjCClassType() &&
- S.isSelfExpr(const_cast<Expr*>(PRE->getBase()))) {
+ S.ObjC().isSelfExpr(const_cast<Expr *>(PRE->getBase()))) {
// This cast is safe because isSelfExpr is only true within
// methods.
ObjCMethodDecl *method =
cast<ObjCMethodDecl>(S.CurContext->getNonClosureAncestor());
- return S.LookupMethodInObjectType(sel,
- S.Context.getObjCInterfaceType(method->getClassInterface()),
- /*instance*/ false);
+ return S.ObjC().LookupMethodInObjectType(
+ sel, S.Context.getObjCInterfaceType(method->getClassInterface()),
+ /*instance*/ false);
}
- return S.LookupMethodInObjectType(sel, PT->getPointeeType(), true);
+ return S.ObjC().LookupMethodInObjectType(sel, PT->getPointeeType(), true);
}
if (PRE->isSuperReceiver()) {
if (const ObjCObjectPointerType *PT =
PRE->getSuperReceiverType()->getAs<ObjCObjectPointerType>())
- return S.LookupMethodInObjectType(sel, PT->getPointeeType(), true);
+ return S.ObjC().LookupMethodInObjectType(sel, PT->getPointeeType(), true);
- return S.LookupMethodInObjectType(sel, PRE->getSuperReceiverType(), false);
+ return S.ObjC().LookupMethodInObjectType(sel, PRE->getSuperReceiverType(),
+ false);
}
assert(PRE->isClassReceiver() && "Invalid expression");
QualType IT = S.Context.getObjCInterfaceType(PRE->getClassReceiver());
- return S.LookupMethodInObjectType(sel, IT, false);
+ return S.ObjC().LookupMethodInObjectType(sel, IT, false);
}
bool ObjCPropertyOpBuilder::isWeakProperty() const {
@@ -613,9 +616,9 @@ bool ObjCPropertyOpBuilder::findGetter() {
// Must build the getter selector the hard way.
ObjCMethodDecl *setter = RefExpr->getImplicitPropertySetter();
assert(setter && "both setter and getter are null - cannot happen");
- IdentifierInfo *setterName =
- setter->getSelector().getIdentifierInfoForSlot(0);
- IdentifierInfo *getterName =
+ const IdentifierInfo *setterName =
+ setter->getSelector().getIdentifierInfoForSlot(0);
+ const IdentifierInfo *getterName =
&S.Context.Idents.get(setterName->getName().substr(3));
GetterSelector =
S.PP.getSelectorTable().getNullarySelector(getterName);
@@ -640,9 +643,9 @@ bool ObjCPropertyOpBuilder::findSetter(bool warn) {
SetterSelector = setter->getSelector();
return true;
} else {
- IdentifierInfo *getterName =
- RefExpr->getImplicitPropertyGetter()->getSelector()
- .getIdentifierInfoForSlot(0);
+ const IdentifierInfo *getterName = RefExpr->getImplicitPropertyGetter()
+ ->getSelector()
+ .getIdentifierInfoForSlot(0);
SetterSelector =
SelectorTable::constructSetterSelector(S.PP.getIdentifierTable(),
S.PP.getSelectorTable(),
@@ -667,7 +670,8 @@ bool ObjCPropertyOpBuilder::findSetter(bool warn) {
front = isLowercase(front) ? toUppercase(front) : toLowercase(front);
SmallString<100> PropertyName = thisPropertyName;
PropertyName[0] = front;
- IdentifierInfo *AltMember = &S.PP.getIdentifierTable().get(PropertyName);
+ const IdentifierInfo *AltMember =
+ &S.PP.getIdentifierTable().get(PropertyName);
if (ObjCPropertyDecl *prop1 = IFace->FindPropertyDeclaration(
AltMember, prop->getQueryKind()))
if (prop != prop1 && (prop1->getSetterMethodDecl() == setter)) {
@@ -740,13 +744,13 @@ ExprResult ObjCPropertyOpBuilder::buildGet() {
if ((Getter->isInstanceMethod() && !RefExpr->isClassReceiver()) ||
RefExpr->isObjectReceiver()) {
assert(InstanceReceiver || RefExpr->isSuperReceiver());
- msg = S.BuildInstanceMessageImplicit(InstanceReceiver, receiverType,
- GenericLoc, Getter->getSelector(),
- Getter, std::nullopt);
+ msg = S.ObjC().BuildInstanceMessageImplicit(
+ InstanceReceiver, receiverType, GenericLoc, Getter->getSelector(),
+ Getter, std::nullopt);
} else {
- msg = S.BuildClassMessageImplicit(receiverType, RefExpr->isSuperReceiver(),
- GenericLoc, Getter->getSelector(), Getter,
- std::nullopt);
+ msg = S.ObjC().BuildClassMessageImplicit(
+ receiverType, RefExpr->isSuperReceiver(), GenericLoc,
+ Getter->getSelector(), Getter, std::nullopt);
}
return msg;
}
@@ -800,14 +804,13 @@ ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
S.DiagnoseUseOfDecl(Setter, GenericLoc, nullptr, true);
if ((Setter->isInstanceMethod() && !RefExpr->isClassReceiver()) ||
RefExpr->isObjectReceiver()) {
- msg = S.BuildInstanceMessageImplicit(InstanceReceiver, receiverType,
- GenericLoc, SetterSelector, Setter,
- MultiExprArg(args, 1));
+ msg = S.ObjC().BuildInstanceMessageImplicit(InstanceReceiver, receiverType,
+ GenericLoc, SetterSelector,
+ Setter, MultiExprArg(args, 1));
} else {
- msg = S.BuildClassMessageImplicit(receiverType, RefExpr->isSuperReceiver(),
- GenericLoc,
- SetterSelector, Setter,
- MultiExprArg(args, 1));
+ msg = S.ObjC().BuildClassMessageImplicit(
+ receiverType, RefExpr->isSuperReceiver(), GenericLoc, SetterSelector,
+ Setter, MultiExprArg(args, 1));
}
if (!msg.isInvalid() && captureSetValueAsResult) {
@@ -835,8 +838,8 @@ ExprResult ObjCPropertyOpBuilder::buildRValueOperation(Expr *op) {
if (result.isInvalid()) return ExprError();
if (RefExpr->isExplicitProperty() && !Getter->hasRelatedResultType())
- S.DiagnosePropertyAccessorMismatch(RefExpr->getExplicitProperty(),
- Getter, RefExpr->getLocation());
+ S.ObjC().DiagnosePropertyAccessorMismatch(RefExpr->getExplicitProperty(),
+ Getter, RefExpr->getLocation());
// As a special case, if the method returns 'id', try to get
// a better type from the property.
@@ -924,7 +927,7 @@ ObjCPropertyOpBuilder::buildAssignmentOperation(Scope *Sc,
// Various warnings about property assignments in ARC.
if (S.getLangOpts().ObjCAutoRefCount && InstanceReceiver) {
- S.checkRetainCycles(InstanceReceiver->getSourceExpr(), RHS);
+ S.ObjC().checkRetainCycles(InstanceReceiver->getSourceExpr(), RHS);
S.checkUnsafeExprAssigns(opcLoc, LHS, RHS);
}
@@ -1013,7 +1016,7 @@ ObjCSubscriptOpBuilder::buildAssignmentOperation(Scope *Sc,
// Various warnings about objc Index'ed assignments in ARC.
if (S.getLangOpts().ObjCAutoRefCount && InstanceBase) {
- S.checkRetainCycles(InstanceBase->getSourceExpr(), RHS);
+ S.ObjC().checkRetainCycles(InstanceBase->getSourceExpr(), RHS);
S.checkUnsafeExprAssigns(opcLoc, LHS, RHS);
}
@@ -1044,80 +1047,6 @@ Expr *ObjCSubscriptOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
return syntacticBase;
}
-/// CheckSubscriptingKind - This routine decide what type
-/// of indexing represented by "FromE" is being done.
-Sema::ObjCSubscriptKind
- Sema::CheckSubscriptingKind(Expr *FromE) {
- // If the expression already has integral or enumeration type, we're golden.
- QualType T = FromE->getType();
- if (T->isIntegralOrEnumerationType())
- return OS_Array;
-
- // If we don't have a class type in C++, there's no way we can get an
- // expression of integral or enumeration type.
- const RecordType *RecordTy = T->getAs<RecordType>();
- if (!RecordTy &&
- (T->isObjCObjectPointerType() || T->isVoidPointerType()))
- // All other scalar cases are assumed to be dictionary indexing which
- // caller handles, with diagnostics if needed.
- return OS_Dictionary;
- if (!getLangOpts().CPlusPlus ||
- !RecordTy || RecordTy->isIncompleteType()) {
- // No indexing can be done. Issue diagnostics and quit.
- const Expr *IndexExpr = FromE->IgnoreParenImpCasts();
- if (isa<StringLiteral>(IndexExpr))
- Diag(FromE->getExprLoc(), diag::err_objc_subscript_pointer)
- << T << FixItHint::CreateInsertion(FromE->getExprLoc(), "@");
- else
- Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion)
- << T;
- return OS_Error;
- }
-
- // We must have a complete class type.
- if (RequireCompleteType(FromE->getExprLoc(), T,
- diag::err_objc_index_incomplete_class_type, FromE))
- return OS_Error;
-
- // Look for a conversion to an integral, enumeration type, or
- // objective-C pointer type.
- int NoIntegrals=0, NoObjCIdPointers=0;
- SmallVector<CXXConversionDecl *, 4> ConversionDecls;
-
- for (NamedDecl *D : cast<CXXRecordDecl>(RecordTy->getDecl())
- ->getVisibleConversionFunctions()) {
- if (CXXConversionDecl *Conversion =
- dyn_cast<CXXConversionDecl>(D->getUnderlyingDecl())) {
- QualType CT = Conversion->getConversionType().getNonReferenceType();
- if (CT->isIntegralOrEnumerationType()) {
- ++NoIntegrals;
- ConversionDecls.push_back(Conversion);
- }
- else if (CT->isObjCIdType() ||CT->isBlockPointerType()) {
- ++NoObjCIdPointers;
- ConversionDecls.push_back(Conversion);
- }
- }
- }
- if (NoIntegrals ==1 && NoObjCIdPointers == 0)
- return OS_Array;
- if (NoIntegrals == 0 && NoObjCIdPointers == 1)
- return OS_Dictionary;
- if (NoIntegrals == 0 && NoObjCIdPointers == 0) {
- // No conversion function was found. Issue diagnostic and return.
- Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion)
- << FromE->getType();
- return OS_Error;
- }
- Diag(FromE->getExprLoc(), diag::err_objc_multiple_subscript_type_conversion)
- << FromE->getType();
- for (unsigned int i = 0; i < ConversionDecls.size(); i++)
- Diag(ConversionDecls[i]->getLocation(),
- diag::note_conv_function_declared_at);
-
- return OS_Error;
-}
-
/// CheckKeyForObjCARCConversion - This routine suggests bridge casting of CF
/// objects used as dictionary subscript key objects.
static void CheckKeyForObjCARCConversion(Sema &S, QualType ContainerT,
@@ -1126,17 +1055,16 @@ static void CheckKeyForObjCARCConversion(Sema &S, QualType ContainerT,
return;
// dictionary subscripting.
// - (id)objectForKeyedSubscript:(id)key;
- IdentifierInfo *KeyIdents[] = {
- &S.Context.Idents.get("objectForKeyedSubscript")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("objectForKeyedSubscript")};
Selector GetterSelector = S.Context.Selectors.getSelector(1, KeyIdents);
- ObjCMethodDecl *Getter = S.LookupMethodInObjectType(GetterSelector, ContainerT,
- true /*instance*/);
+ ObjCMethodDecl *Getter = S.ObjC().LookupMethodInObjectType(
+ GetterSelector, ContainerT, true /*instance*/);
if (!Getter)
return;
QualType T = Getter->parameters()[0]->getType();
- S.CheckObjCConversion(Key->getSourceRange(), T, Key,
- Sema::CCK_ImplicitConversion);
+ S.ObjC().CheckObjCConversion(Key->getSourceRange(), T, Key,
+ CheckedConversionKind::Implicit);
}
bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
@@ -1151,15 +1079,15 @@ bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
BaseT->getAs<ObjCObjectPointerType>()) {
ResultType = PTy->getPointeeType();
}
- Sema::ObjCSubscriptKind Res =
- S.CheckSubscriptingKind(RefExpr->getKeyExpr());
- if (Res == Sema::OS_Error) {
+ SemaObjC::ObjCSubscriptKind Res =
+ S.ObjC().CheckSubscriptingKind(RefExpr->getKeyExpr());
+ if (Res == SemaObjC::OS_Error) {
if (S.getLangOpts().ObjCAutoRefCount)
CheckKeyForObjCARCConversion(S, ResultType,
RefExpr->getKeyExpr());
return false;
}
- bool arrayRef = (Res == Sema::OS_Array);
+ bool arrayRef = (Res == SemaObjC::OS_Array);
if (ResultType.isNull()) {
S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_base_type)
@@ -1169,22 +1097,20 @@ bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
if (!arrayRef) {
// dictionary subscripting.
// - (id)objectForKeyedSubscript:(id)key;
- IdentifierInfo *KeyIdents[] = {
- &S.Context.Idents.get("objectForKeyedSubscript")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("objectForKeyedSubscript")};
AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents);
}
else {
// - (id)objectAtIndexedSubscript:(size_t)index;
- IdentifierInfo *KeyIdents[] = {
- &S.Context.Idents.get("objectAtIndexedSubscript")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("objectAtIndexedSubscript")};
AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents);
}
- AtIndexGetter = S.LookupMethodInObjectType(AtIndexGetterSelector, ResultType,
- true /*instance*/);
+ AtIndexGetter = S.ObjC().LookupMethodInObjectType(
+ AtIndexGetterSelector, ResultType, true /*instance*/);
if (!AtIndexGetter && S.getLangOpts().DebuggerObjCLiteral) {
AtIndexGetter = ObjCMethodDecl::Create(
@@ -1214,10 +1140,8 @@ bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
<< BaseExpr->getType() << 0 << arrayRef;
return false;
}
- AtIndexGetter =
- S.LookupInstanceMethodInGlobalPool(AtIndexGetterSelector,
- RefExpr->getSourceRange(),
- true);
+ AtIndexGetter = S.ObjC().LookupInstanceMethodInGlobalPool(
+ AtIndexGetterSelector, RefExpr->getSourceRange(), true);
}
if (AtIndexGetter) {
@@ -1255,15 +1179,15 @@ bool ObjCSubscriptOpBuilder::findAtIndexSetter() {
ResultType = PTy->getPointeeType();
}
- Sema::ObjCSubscriptKind Res =
- S.CheckSubscriptingKind(RefExpr->getKeyExpr());
- if (Res == Sema::OS_Error) {
+ SemaObjC::ObjCSubscriptKind Res =
+ S.ObjC().CheckSubscriptingKind(RefExpr->getKeyExpr());
+ if (Res == SemaObjC::OS_Error) {
if (S.getLangOpts().ObjCAutoRefCount)
CheckKeyForObjCARCConversion(S, ResultType,
RefExpr->getKeyExpr());
return false;
}
- bool arrayRef = (Res == Sema::OS_Array);
+ bool arrayRef = (Res == SemaObjC::OS_Array);
if (ResultType.isNull()) {
S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_base_type)
@@ -1274,22 +1198,20 @@ bool ObjCSubscriptOpBuilder::findAtIndexSetter() {
if (!arrayRef) {
// dictionary subscripting.
// - (void)setObject:(id)object forKeyedSubscript:(id)key;
- IdentifierInfo *KeyIdents[] = {
- &S.Context.Idents.get("setObject"),
- &S.Context.Idents.get("forKeyedSubscript")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("setObject"),
+ &S.Context.Idents.get("forKeyedSubscript")};
AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents);
}
else {
// - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index;
- IdentifierInfo *KeyIdents[] = {
- &S.Context.Idents.get("setObject"),
- &S.Context.Idents.get("atIndexedSubscript")
- };
+ const IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("setObject"),
+ &S.Context.Idents.get("atIndexedSubscript")};
AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents);
}
- AtIndexSetter = S.LookupMethodInObjectType(AtIndexSetterSelector, ResultType,
- true /*instance*/);
+ AtIndexSetter = S.ObjC().LookupMethodInObjectType(
+ AtIndexSetterSelector, ResultType, true /*instance*/);
if (!AtIndexSetter && S.getLangOpts().DebuggerObjCLiteral) {
TypeSourceInfo *ReturnTInfo = nullptr;
@@ -1331,10 +1253,8 @@ bool ObjCSubscriptOpBuilder::findAtIndexSetter() {
<< BaseExpr->getType() << 1 << arrayRef;
return false;
}
- AtIndexSetter =
- S.LookupInstanceMethodInGlobalPool(AtIndexSetterSelector,
- RefExpr->getSourceRange(),
- true);
+ AtIndexSetter = S.ObjC().LookupInstanceMethodInGlobalPool(
+ AtIndexSetterSelector, RefExpr->getSourceRange(), true);
}
bool err = false;
@@ -1392,10 +1312,9 @@ ExprResult ObjCSubscriptOpBuilder::buildGet() {
assert(InstanceBase);
if (AtIndexGetter)
S.DiagnoseUseOfDecl(AtIndexGetter, GenericLoc);
- msg = S.BuildInstanceMessageImplicit(InstanceBase, receiverType,
- GenericLoc,
- AtIndexGetterSelector, AtIndexGetter,
- MultiExprArg(args, 1));
+ msg = S.ObjC().BuildInstanceMessageImplicit(
+ InstanceBase, receiverType, GenericLoc, AtIndexGetterSelector,
+ AtIndexGetter, MultiExprArg(args, 1));
return msg;
}
@@ -1417,11 +1336,9 @@ ExprResult ObjCSubscriptOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
Expr *args[] = { op, Index };
// Build a message-send.
- ExprResult msg = S.BuildInstanceMessageImplicit(InstanceBase, receiverType,
- GenericLoc,
- AtIndexSetterSelector,
- AtIndexSetter,
- MultiExprArg(args, 2));
+ ExprResult msg = S.ObjC().BuildInstanceMessageImplicit(
+ InstanceBase, receiverType, GenericLoc, AtIndexSetterSelector,
+ AtIndexSetter, MultiExprArg(args, 2));
if (!msg.isInvalid() && captureSetValueAsResult) {
ObjCMessageExpr *msgExpr =
@@ -1474,7 +1391,7 @@ ExprResult MSPropertyOpBuilder::buildGet() {
}
UnqualifiedId GetterName;
- IdentifierInfo *II = RefExpr->getPropertyDecl()->getGetterId();
+ const IdentifierInfo *II = RefExpr->getPropertyDecl()->getGetterId();
GetterName.setIdentifier(II, RefExpr->getMemberLoc());
CXXScopeSpec SS;
SS.Adopt(RefExpr->getQualifierLoc());
@@ -1503,7 +1420,7 @@ ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl,
}
UnqualifiedId SetterName;
- IdentifierInfo *II = RefExpr->getPropertyDecl()->getSetterId();
+ const IdentifierInfo *II = RefExpr->getPropertyDecl()->getSetterId();
SetterName.setIdentifier(II, RefExpr->getMemberLoc());
CXXScopeSpec SS;
SS.Adopt(RefExpr->getQualifierLoc());
@@ -1530,24 +1447,24 @@ ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl,
// General Sema routines.
//===----------------------------------------------------------------------===//
-ExprResult Sema::checkPseudoObjectRValue(Expr *E) {
+ExprResult SemaPseudoObject::checkRValue(Expr *E) {
Expr *opaqueRef = E->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr, true);
+ ObjCPropertyOpBuilder builder(SemaRef, refExpr, true);
return builder.buildRValueOperation(E);
}
else if (ObjCSubscriptRefExpr *refExpr
= dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
- ObjCSubscriptOpBuilder builder(*this, refExpr, true);
+ ObjCSubscriptOpBuilder builder(SemaRef, refExpr, true);
return builder.buildRValueOperation(E);
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr, true);
+ MSPropertyOpBuilder builder(SemaRef, refExpr, true);
return builder.buildRValueOperation(E);
} else if (MSPropertySubscriptExpr *RefExpr =
dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr, true);
+ MSPropertyOpBuilder Builder(SemaRef, RefExpr, true);
return Builder.buildRValueOperation(E);
} else {
llvm_unreachable("unknown pseudo-object kind!");
@@ -1555,48 +1472,48 @@ ExprResult Sema::checkPseudoObjectRValue(Expr *E) {
}
/// Check an increment or decrement of a pseudo-object expression.
-ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc,
+ExprResult SemaPseudoObject::checkIncDec(Scope *Sc, SourceLocation opcLoc,
UnaryOperatorKind opcode, Expr *op) {
// Do nothing if the operand is dependent.
if (op->isTypeDependent())
- return UnaryOperator::Create(Context, op, opcode, Context.DependentTy,
- VK_PRValue, OK_Ordinary, opcLoc, false,
- CurFPFeatureOverrides());
+ return UnaryOperator::Create(
+ SemaRef.Context, op, opcode, SemaRef.Context.DependentTy, VK_PRValue,
+ OK_Ordinary, opcLoc, false, SemaRef.CurFPFeatureOverrides());
assert(UnaryOperator::isIncrementDecrementOp(opcode));
Expr *opaqueRef = op->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr, false);
+ ObjCPropertyOpBuilder builder(SemaRef, refExpr, false);
return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else if (isa<ObjCSubscriptRefExpr>(opaqueRef)) {
Diag(opcLoc, diag::err_illegal_container_subscripting_op);
return ExprError();
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr, false);
+ MSPropertyOpBuilder builder(SemaRef, refExpr, false);
return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else if (MSPropertySubscriptExpr *RefExpr
= dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr, false);
+ MSPropertyOpBuilder Builder(SemaRef, RefExpr, false);
return Builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else {
llvm_unreachable("unknown pseudo-object kind!");
}
}
-ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
+ExprResult SemaPseudoObject::checkAssignment(Scope *S, SourceLocation opcLoc,
BinaryOperatorKind opcode,
Expr *LHS, Expr *RHS) {
// Do nothing if either argument is dependent.
if (LHS->isTypeDependent() || RHS->isTypeDependent())
- return BinaryOperator::Create(Context, LHS, RHS, opcode,
- Context.DependentTy, VK_PRValue, OK_Ordinary,
- opcLoc, CurFPFeatureOverrides());
+ return BinaryOperator::Create(
+ SemaRef.Context, LHS, RHS, opcode, SemaRef.Context.DependentTy,
+ VK_PRValue, OK_Ordinary, opcLoc, SemaRef.CurFPFeatureOverrides());
// Filter out non-overload placeholder types in the RHS.
if (RHS->getType()->isNonOverloadPlaceholderType()) {
- ExprResult result = CheckPlaceholderExpr(RHS);
+ ExprResult result = SemaRef.CheckPlaceholderExpr(RHS);
if (result.isInvalid()) return ExprError();
RHS = result.get();
}
@@ -1605,20 +1522,20 @@ ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
Expr *opaqueRef = LHS->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr, IsSimpleAssign);
+ ObjCPropertyOpBuilder builder(SemaRef, refExpr, IsSimpleAssign);
return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (ObjCSubscriptRefExpr *refExpr
= dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
- ObjCSubscriptOpBuilder builder(*this, refExpr, IsSimpleAssign);
+ ObjCSubscriptOpBuilder builder(SemaRef, refExpr, IsSimpleAssign);
return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr, IsSimpleAssign);
- return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ MSPropertyOpBuilder builder(SemaRef, refExpr, IsSimpleAssign);
+ return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (MSPropertySubscriptExpr *RefExpr
= dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr, IsSimpleAssign);
- return Builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ MSPropertyOpBuilder Builder(SemaRef, RefExpr, IsSimpleAssign);
+ return Builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else {
llvm_unreachable("unknown pseudo-object kind!");
}
@@ -1641,36 +1558,38 @@ static Expr *stripOpaqueValuesFromPseudoObjectRef(Sema &S, Expr *E) {
/// This is a hack which should be removed when TreeTransform is
/// capable of rebuilding a tree without stripping implicit
/// operations.
-Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) {
+Expr *SemaPseudoObject::recreateSyntacticForm(PseudoObjectExpr *E) {
Expr *syntax = E->getSyntacticForm();
if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) {
- Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr());
- return UnaryOperator::Create(Context, op, uop->getOpcode(), uop->getType(),
- uop->getValueKind(), uop->getObjectKind(),
- uop->getOperatorLoc(), uop->canOverflow(),
- CurFPFeatureOverrides());
+ Expr *op = stripOpaqueValuesFromPseudoObjectRef(SemaRef, uop->getSubExpr());
+ return UnaryOperator::Create(
+ SemaRef.Context, op, uop->getOpcode(), uop->getType(),
+ uop->getValueKind(), uop->getObjectKind(), uop->getOperatorLoc(),
+ uop->canOverflow(), SemaRef.CurFPFeatureOverrides());
} else if (CompoundAssignOperator *cop
= dyn_cast<CompoundAssignOperator>(syntax)) {
- Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS());
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(SemaRef, cop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(cop->getRHS())->getSourceExpr();
return CompoundAssignOperator::Create(
- Context, lhs, rhs, cop->getOpcode(), cop->getType(),
+ SemaRef.Context, lhs, rhs, cop->getOpcode(), cop->getType(),
cop->getValueKind(), cop->getObjectKind(), cop->getOperatorLoc(),
- CurFPFeatureOverrides(), cop->getComputationLHSType(),
+ SemaRef.CurFPFeatureOverrides(), cop->getComputationLHSType(),
cop->getComputationResultType());
} else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(syntax)) {
- Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, bop->getLHS());
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(SemaRef, bop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(bop->getRHS())->getSourceExpr();
- return BinaryOperator::Create(Context, lhs, rhs, bop->getOpcode(),
+ return BinaryOperator::Create(SemaRef.Context, lhs, rhs, bop->getOpcode(),
bop->getType(), bop->getValueKind(),
bop->getObjectKind(), bop->getOperatorLoc(),
- CurFPFeatureOverrides());
+ SemaRef.CurFPFeatureOverrides());
} else if (isa<CallExpr>(syntax)) {
return syntax;
} else {
assert(syntax->hasPlaceholderType(BuiltinType::PseudoObject));
- return stripOpaqueValuesFromPseudoObjectRef(*this, syntax);
+ return stripOpaqueValuesFromPseudoObjectRef(SemaRef, syntax);
}
}
+
+SemaPseudoObject::SemaPseudoObject(Sema &S) : SemaBase(S) {}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaRISCV.cpp b/contrib/llvm-project/clang/lib/Sema/SemaRISCV.cpp
new file mode 100644
index 000000000000..f1c7c0516e67
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaRISCV.cpp
@@ -0,0 +1,1491 @@
+//===------ SemaRISCV.cpp ------- RISC-V target-specific routines ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to RISC-V.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaRISCV.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Attr.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/RISCVIntrinsicManager.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Support/RISCVVIntrinsicUtils.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
+#include <optional>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+using namespace clang;
+using namespace clang::RISCV;
+
+using IntrinsicKind = sema::RISCVIntrinsicManager::IntrinsicKind;
+
+namespace {
+
+// Function definition of a RVV intrinsic.
+struct RVVIntrinsicDef {
+ /// Mapping to which clang built-in function, e.g. __builtin_rvv_vadd.
+ std::string BuiltinName;
+
+ /// Function signature, first element is return type.
+ RVVTypes Signature;
+};
+
+struct RVVOverloadIntrinsicDef {
+ // Indexes of RISCVIntrinsicManagerImpl::IntrinsicList.
+ SmallVector<uint16_t, 8> Indexes;
+};
+
+} // namespace
+
+static const PrototypeDescriptor RVVSignatureTable[] = {
+#define DECL_SIGNATURE_TABLE
+#include "clang/Basic/riscv_vector_builtin_sema.inc"
+#undef DECL_SIGNATURE_TABLE
+};
+
+static const PrototypeDescriptor RVSiFiveVectorSignatureTable[] = {
+#define DECL_SIGNATURE_TABLE
+#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
+#undef DECL_SIGNATURE_TABLE
+};
+
+static const RVVIntrinsicRecord RVVIntrinsicRecords[] = {
+#define DECL_INTRINSIC_RECORDS
+#include "clang/Basic/riscv_vector_builtin_sema.inc"
+#undef DECL_INTRINSIC_RECORDS
+};
+
+static const RVVIntrinsicRecord RVSiFiveVectorIntrinsicRecords[] = {
+#define DECL_INTRINSIC_RECORDS
+#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
+#undef DECL_INTRINSIC_RECORDS
+};
+
+// Get subsequence of signature table.
+static ArrayRef<PrototypeDescriptor>
+ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
+ switch (K) {
+ case IntrinsicKind::RVV:
+ return ArrayRef(&RVVSignatureTable[Index], Length);
+ case IntrinsicKind::SIFIVE_VECTOR:
+ return ArrayRef(&RVSiFiveVectorSignatureTable[Index], Length);
+ }
+ llvm_unreachable("Unhandled IntrinsicKind");
+}
+
+static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
+ QualType QT;
+ switch (Type->getScalarType()) {
+ case ScalarTypeKind::Void:
+ QT = Context.VoidTy;
+ break;
+ case ScalarTypeKind::Size_t:
+ QT = Context.getSizeType();
+ break;
+ case ScalarTypeKind::Ptrdiff_t:
+ QT = Context.getPointerDiffType();
+ break;
+ case ScalarTypeKind::UnsignedLong:
+ QT = Context.UnsignedLongTy;
+ break;
+ case ScalarTypeKind::SignedLong:
+ QT = Context.LongTy;
+ break;
+ case ScalarTypeKind::Boolean:
+ QT = Context.BoolTy;
+ break;
+ case ScalarTypeKind::SignedInteger:
+ QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), true);
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), false);
+ break;
+ case ScalarTypeKind::BFloat:
+ QT = Context.BFloat16Ty;
+ break;
+ case ScalarTypeKind::Float:
+ switch (Type->getElementBitwidth()) {
+ case 64:
+ QT = Context.DoubleTy;
+ break;
+ case 32:
+ QT = Context.FloatTy;
+ break;
+ case 16:
+ QT = Context.Float16Ty;
+ break;
+ default:
+ llvm_unreachable("Unsupported floating point width.");
+ }
+ break;
+ case Invalid:
+ case Undefined:
+ llvm_unreachable("Unhandled type.");
+ }
+ if (Type->isVector()) {
+ if (Type->isTuple())
+ QT = Context.getScalableVectorType(QT, *Type->getScale(), Type->getNF());
+ else
+ QT = Context.getScalableVectorType(QT, *Type->getScale());
+ }
+
+ if (Type->isConstant())
+ QT = Context.getConstType(QT);
+
+ // Transform the type to a pointer as the last step, if necessary.
+ if (Type->isPointer())
+ QT = Context.getPointerType(QT);
+
+ return QT;
+}
+
+namespace {
+class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
+private:
+ Sema &S;
+ ASTContext &Context;
+ RVVTypeCache TypeCache;
+ bool ConstructedRISCVVBuiltins;
+ bool ConstructedRISCVSiFiveVectorBuiltins;
+
+ // List of all RVV intrinsic.
+ std::vector<RVVIntrinsicDef> IntrinsicList;
+ // Mapping function name to index of IntrinsicList.
+ StringMap<uint16_t> Intrinsics;
+ // Mapping function name to RVVOverloadIntrinsicDef.
+ StringMap<RVVOverloadIntrinsicDef> OverloadIntrinsics;
+
+ // Create RVVIntrinsicDef.
+ void InitRVVIntrinsic(const RVVIntrinsicRecord &Record, StringRef SuffixStr,
+ StringRef OverloadedSuffixStr, bool IsMask,
+ RVVTypes &Types, bool HasPolicy, Policy PolicyAttrs);
+
+ // Create FunctionDecl for a vector intrinsic.
+ void CreateRVVIntrinsicDecl(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP, uint32_t Index,
+ bool IsOverload);
+
+ void ConstructRVVIntrinsics(ArrayRef<RVVIntrinsicRecord> Recs,
+ IntrinsicKind K);
+
+public:
+ RISCVIntrinsicManagerImpl(clang::Sema &S) : S(S), Context(S.Context) {
+ ConstructedRISCVVBuiltins = false;
+ ConstructedRISCVSiFiveVectorBuiltins = false;
+ }
+
+ // Initialize IntrinsicList
+ void InitIntrinsicList() override;
+
+ // Create RISC-V vector intrinsic and insert into symbol table if found, and
+ // return true, otherwise return false.
+ bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP) override;
+};
+} // namespace
+
+void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
+ ArrayRef<RVVIntrinsicRecord> Recs, IntrinsicKind K) {
+ const TargetInfo &TI = Context.getTargetInfo();
+ static const std::pair<const char *, RVVRequire> FeatureCheckList[] = {
+ {"64bit", RVV_REQ_RV64},
+ {"xsfvcp", RVV_REQ_Xsfvcp},
+ {"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
+ {"xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq},
+ {"xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod},
+ {"xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq},
+ {"zvbb", RVV_REQ_Zvbb},
+ {"zvbc", RVV_REQ_Zvbc},
+ {"zvkb", RVV_REQ_Zvkb},
+ {"zvkg", RVV_REQ_Zvkg},
+ {"zvkned", RVV_REQ_Zvkned},
+ {"zvknha", RVV_REQ_Zvknha},
+ {"zvknhb", RVV_REQ_Zvknhb},
+ {"zvksed", RVV_REQ_Zvksed},
+ {"zvksh", RVV_REQ_Zvksh},
+ {"zvfbfwma", RVV_REQ_Zvfbfwma},
+ {"zvfbfmin", RVV_REQ_Zvfbfmin},
+ {"experimental", RVV_REQ_Experimental}};
+
+ // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
+ // in RISCVVEmitter.cpp.
+ for (auto &Record : Recs) {
+ // Check requirements.
+ if (llvm::any_of(FeatureCheckList, [&](const auto &Item) {
+ return (Record.RequiredExtensions & Item.second) == Item.second &&
+ !TI.hasFeature(Item.first);
+ }))
+ continue;
+
+ // Create Intrinsics for each type and LMUL.
+ BasicType BaseType = BasicType::Unknown;
+ ArrayRef<PrototypeDescriptor> BasicProtoSeq =
+ ProtoSeq2ArrayRef(K, Record.PrototypeIndex, Record.PrototypeLength);
+ ArrayRef<PrototypeDescriptor> SuffixProto =
+ ProtoSeq2ArrayRef(K, Record.SuffixIndex, Record.SuffixLength);
+ ArrayRef<PrototypeDescriptor> OverloadedSuffixProto = ProtoSeq2ArrayRef(
+ K, Record.OverloadedSuffixIndex, Record.OverloadedSuffixSize);
+
+ PolicyScheme UnMaskedPolicyScheme =
+ static_cast<PolicyScheme>(Record.UnMaskedPolicyScheme);
+ PolicyScheme MaskedPolicyScheme =
+ static_cast<PolicyScheme>(Record.MaskedPolicyScheme);
+
+ const Policy DefaultPolicy;
+
+ llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
+ UnMaskedPolicyScheme, DefaultPolicy, Record.IsTuple);
+
+ llvm::SmallVector<PrototypeDescriptor> ProtoMaskSeq;
+ if (Record.HasMasked)
+ ProtoMaskSeq = RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
+ Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy,
+ Record.IsTuple);
+
+ bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone;
+ bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone;
+ SmallVector<Policy> SupportedUnMaskedPolicies =
+ RVVIntrinsic::getSupportedUnMaskedPolicies();
+ SmallVector<Policy> SupportedMaskedPolicies =
+ RVVIntrinsic::getSupportedMaskedPolicies(Record.HasTailPolicy,
+ Record.HasMaskPolicy);
+
+ for (unsigned int TypeRangeMaskShift = 0;
+ TypeRangeMaskShift <= static_cast<unsigned int>(BasicType::MaxOffset);
+ ++TypeRangeMaskShift) {
+ unsigned int BaseTypeI = 1 << TypeRangeMaskShift;
+ BaseType = static_cast<BasicType>(BaseTypeI);
+
+ if ((BaseTypeI & Record.TypeRangeMask) != BaseTypeI)
+ continue;
+
+ if (BaseType == BasicType::Float16) {
+ if ((Record.RequiredExtensions & RVV_REQ_Zvfhmin) == RVV_REQ_Zvfhmin) {
+ if (!TI.hasFeature("zvfhmin"))
+ continue;
+ } else if (!TI.hasFeature("zvfh")) {
+ continue;
+ }
+ }
+
+ // Expanded with different LMUL.
+ for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) {
+ if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3))))
+ continue;
+
+ std::optional<RVVTypes> Types =
+ TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoSeq);
+
+ // Ignored to create new intrinsic if there are any illegal types.
+ if (!Types.has_value())
+ continue;
+
+ std::string SuffixStr = RVVIntrinsic::getSuffixStr(
+ TypeCache, BaseType, Log2LMUL, SuffixProto);
+ std::string OverloadedSuffixStr = RVVIntrinsic::getSuffixStr(
+ TypeCache, BaseType, Log2LMUL, OverloadedSuffixProto);
+
+ // Create non-masked intrinsic.
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, false, *Types,
+ UnMaskedHasPolicy, DefaultPolicy);
+
+ // Create non-masked policy intrinsic.
+ if (Record.UnMaskedPolicyScheme != PolicyScheme::SchemeNone) {
+ for (auto P : SupportedUnMaskedPolicies) {
+ llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
+ UnMaskedPolicyScheme, P, Record.IsTuple);
+ std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
+ BaseType, Log2LMUL, Record.NF, PolicyPrototype);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
+ /*IsMask=*/false, *PolicyTypes, UnMaskedHasPolicy,
+ P);
+ }
+ }
+ if (!Record.HasMasked)
+ continue;
+ // Create masked intrinsic.
+ std::optional<RVVTypes> MaskTypes =
+ TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoMaskSeq);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, true,
+ *MaskTypes, MaskedHasPolicy, DefaultPolicy);
+ if (Record.MaskedPolicyScheme == PolicyScheme::SchemeNone)
+ continue;
+ // Create masked policy intrinsic.
+ for (auto P : SupportedMaskedPolicies) {
+ llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
+ Record.HasVL, Record.NF, MaskedPolicyScheme, P,
+ Record.IsTuple);
+ std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
+ BaseType, Log2LMUL, Record.NF, PolicyPrototype);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
+ /*IsMask=*/true, *PolicyTypes, MaskedHasPolicy, P);
+ }
+ } // End for different LMUL
+ } // End for different TypeRange
+ }
+}
+
+void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
+
+ if (S.RISCV().DeclareRVVBuiltins && !ConstructedRISCVVBuiltins) {
+ ConstructedRISCVVBuiltins = true;
+ ConstructRVVIntrinsics(RVVIntrinsicRecords, IntrinsicKind::RVV);
+ }
+ if (S.RISCV().DeclareSiFiveVectorBuiltins &&
+ !ConstructedRISCVSiFiveVectorBuiltins) {
+ ConstructedRISCVSiFiveVectorBuiltins = true;
+ ConstructRVVIntrinsics(RVSiFiveVectorIntrinsicRecords,
+ IntrinsicKind::SIFIVE_VECTOR);
+ }
+}
+
+// Compute name and signatures for intrinsic with practical types.
+void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
+ const RVVIntrinsicRecord &Record, StringRef SuffixStr,
+ StringRef OverloadedSuffixStr, bool IsMasked, RVVTypes &Signature,
+ bool HasPolicy, Policy PolicyAttrs) {
+ // Function name, e.g. vadd_vv_i32m1.
+ std::string Name = Record.Name;
+ if (!SuffixStr.empty())
+ Name += "_" + SuffixStr.str();
+
+ // Overloaded function name, e.g. vadd.
+ std::string OverloadedName;
+ if (!Record.OverloadedName)
+ OverloadedName = StringRef(Record.Name).split("_").first.str();
+ else
+ OverloadedName = Record.OverloadedName;
+ if (!OverloadedSuffixStr.empty())
+ OverloadedName += "_" + OverloadedSuffixStr.str();
+
+ // clang built-in function name, e.g. __builtin_rvv_vadd.
+ std::string BuiltinName = std::string(Record.Name);
+
+ RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName,
+ OverloadedName, PolicyAttrs,
+ Record.HasFRMRoundModeOp);
+
+ // Put into IntrinsicList.
+ uint16_t Index = IntrinsicList.size();
+ assert(IntrinsicList.size() == (size_t)Index &&
+ "Intrinsics indices overflow.");
+ IntrinsicList.push_back({BuiltinName, Signature});
+
+ // Creating mapping to Intrinsics.
+ Intrinsics.insert({Name, Index});
+
+ // Get the RVVOverloadIntrinsicDef.
+ RVVOverloadIntrinsicDef &OverloadIntrinsicDef =
+ OverloadIntrinsics[OverloadedName];
+
+ // And added the index.
+ OverloadIntrinsicDef.Indexes.push_back(Index);
+}
+
+void RISCVIntrinsicManagerImpl::CreateRVVIntrinsicDecl(LookupResult &LR,
+ IdentifierInfo *II,
+ Preprocessor &PP,
+ uint32_t Index,
+ bool IsOverload) {
+ ASTContext &Context = S.Context;
+ RVVIntrinsicDef &IDef = IntrinsicList[Index];
+ RVVTypes Sigs = IDef.Signature;
+ size_t SigLength = Sigs.size();
+ RVVType *ReturnType = Sigs[0];
+ QualType RetType = RVVType2Qual(Context, ReturnType);
+ SmallVector<QualType, 8> ArgTypes;
+ QualType BuiltinFuncType;
+
+ // Skip return type, and convert RVVType to QualType for arguments.
+ for (size_t i = 1; i < SigLength; ++i)
+ ArgTypes.push_back(RVVType2Qual(Context, Sigs[i]));
+
+ FunctionProtoType::ExtProtoInfo PI(
+ Context.getDefaultCallingConvention(false, false, true));
+
+ PI.Variadic = false;
+
+ SourceLocation Loc = LR.getNameLoc();
+ BuiltinFuncType = Context.getFunctionType(RetType, ArgTypes, PI);
+ DeclContext *Parent = Context.getTranslationUnitDecl();
+
+ FunctionDecl *RVVIntrinsicDecl = FunctionDecl::Create(
+ Context, Parent, Loc, Loc, II, BuiltinFuncType, /*TInfo=*/nullptr,
+ SC_Extern, S.getCurFPFeatures().isFPConstrained(),
+ /*isInlineSpecified*/ false,
+ /*hasWrittenPrototype*/ true);
+
+ // Create Decl objects for each parameter, adding them to the
+ // FunctionDecl.
+ const auto *FP = cast<FunctionProtoType>(BuiltinFuncType);
+ SmallVector<ParmVarDecl *, 8> ParmList;
+ for (unsigned IParm = 0, E = FP->getNumParams(); IParm != E; ++IParm) {
+ ParmVarDecl *Parm =
+ ParmVarDecl::Create(Context, RVVIntrinsicDecl, Loc, Loc, nullptr,
+ FP->getParamType(IParm), nullptr, SC_None, nullptr);
+ Parm->setScopeInfo(0, IParm);
+ ParmList.push_back(Parm);
+ }
+ RVVIntrinsicDecl->setParams(ParmList);
+
+ // Add function attributes.
+ if (IsOverload)
+ RVVIntrinsicDecl->addAttr(OverloadableAttr::CreateImplicit(Context));
+
+ // Setup alias to __builtin_rvv_*
+ IdentifierInfo &IntrinsicII =
+ PP.getIdentifierTable().get("__builtin_rvv_" + IDef.BuiltinName);
+ RVVIntrinsicDecl->addAttr(
+ BuiltinAliasAttr::CreateImplicit(S.Context, &IntrinsicII));
+
+ // Add to symbol table.
+ LR.addDecl(RVVIntrinsicDecl);
+}
+
+bool RISCVIntrinsicManagerImpl::CreateIntrinsicIfFound(LookupResult &LR,
+ IdentifierInfo *II,
+ Preprocessor &PP) {
+ StringRef Name = II->getName();
+ if (!Name.consume_front("__riscv_"))
+ return false;
+
+ // Lookup the function name from the overload intrinsics first.
+ auto OvIItr = OverloadIntrinsics.find(Name);
+ if (OvIItr != OverloadIntrinsics.end()) {
+ const RVVOverloadIntrinsicDef &OvIntrinsicDef = OvIItr->second;
+ for (auto Index : OvIntrinsicDef.Indexes)
+ CreateRVVIntrinsicDecl(LR, II, PP, Index,
+ /*IsOverload*/ true);
+
+ // If we added overloads, need to resolve the lookup result.
+ LR.resolveKind();
+ return true;
+ }
+
+ // Lookup the function name from the intrinsics.
+ auto Itr = Intrinsics.find(Name);
+ if (Itr != Intrinsics.end()) {
+ CreateRVVIntrinsicDecl(LR, II, PP, Itr->second,
+ /*IsOverload*/ false);
+ return true;
+ }
+
+ // It's not an RVV intrinsics.
+ return false;
+}
+
+namespace clang {
+std::unique_ptr<clang::sema::RISCVIntrinsicManager>
+CreateRISCVIntrinsicManager(Sema &S) {
+ return std::make_unique<RISCVIntrinsicManagerImpl>(S);
+}
+
+bool SemaRISCV::CheckLMUL(CallExpr *TheCall, unsigned ArgNum) {
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ int64_t Val = Result.getSExtValue();
+ if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
+ return false;
+
+ return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
+ << Arg->getSourceRange();
+}
+
+static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
+ Sema &S, QualType Type, int EGW) {
+ assert((EGW == 128 || EGW == 256) && "EGW can only be 128 or 256 bits");
+
+ // LMUL * VLEN >= EGW
+ ASTContext::BuiltinVectorTypeInfo Info =
+ S.Context.getBuiltinVectorTypeInfo(Type->castAs<BuiltinType>());
+ unsigned ElemSize = S.Context.getTypeSize(Info.ElementType);
+ unsigned MinElemCount = Info.EC.getKnownMinValue();
+
+ unsigned EGS = EGW / ElemSize;
+ // If EGS is less than or equal to the minimum number of elements, then the
+ // type is valid.
+ if (EGS <= MinElemCount)
+ return false;
+
+ // Otherwise, we need vscale to be at least EGS / MinElemCont.
+ assert(EGS % MinElemCount == 0);
+ unsigned VScaleFactor = EGS / MinElemCount;
+ // Vscale is VLEN/RVVBitsPerBlock.
+ unsigned MinRequiredVLEN = VScaleFactor * llvm::RISCV::RVVBitsPerBlock;
+ std::string RequiredExt = "zvl" + std::to_string(MinRequiredVLEN) + "b";
+ if (!TI.hasFeature(RequiredExt))
+ return S.Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_type_requires_extension)
+ << Type << RequiredExt;
+
+ return false;
+}
+
+bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ ASTContext &Context = getASTContext();
+ // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
+ // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
+ switch (BuiltinID) {
+ default:
+ break;
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
+ ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(
+ TheCall->getType()->castAs<BuiltinType>());
+
+ if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_builtin_requires_extension)
+ << /* IsExtension */ true << TheCall->getSourceRange() << "v";
+
+ break;
+ }
+ }
+
+ switch (BuiltinID) {
+ case RISCVVector::BI__builtin_rvv_vsetvli:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 3) ||
+ CheckLMUL(TheCall, 2);
+ case RISCVVector::BI__builtin_rvv_vsetvlimax:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ CheckLMUL(TheCall, 1);
+ case RISCVVector::BI__builtin_rvv_vget_v: {
+ ASTContext::BuiltinVectorTypeInfo ResVecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getType().getCanonicalType().getTypePtr()));
+ ASTContext::BuiltinVectorTypeInfo VecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getArg(0)->getType().getCanonicalType().getTypePtr()));
+ unsigned MaxIndex;
+ if (VecInfo.NumVectors != 1) // vget for tuple type
+ MaxIndex = VecInfo.NumVectors;
+ else // vget for non-tuple type
+ MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
+ (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
+ }
+ case RISCVVector::BI__builtin_rvv_vset_v: {
+ ASTContext::BuiltinVectorTypeInfo ResVecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getType().getCanonicalType().getTypePtr()));
+ ASTContext::BuiltinVectorTypeInfo VecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getArg(2)->getType().getCanonicalType().getTypePtr()));
+ unsigned MaxIndex;
+ if (ResVecInfo.NumVectors != 1) // vset for tuple type
+ MaxIndex = ResVecInfo.NumVectors;
+ else // vset fo non-tuple type
+ MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
+ (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
+ }
+ // Vector Crypto
+ case RISCVVector::BI__builtin_rvv_vaeskf1_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vaeskf2_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vaeskf2_vi:
+ case RISCVVector::BI__builtin_rvv_vsm4k_vi_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 128) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op2Type, 128) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vsm3c_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vsm3c_vi: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 256) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vaeskf1_vi:
+ case RISCVVector::BI__builtin_rvv_vsm4k_vi: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 128) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vaesdf_vv:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vs:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vv:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vs:
+ case RISCVVector::BI__builtin_rvv_vaesef_vv:
+ case RISCVVector::BI__builtin_rvv_vaesef_vs:
+ case RISCVVector::BI__builtin_rvv_vaesem_vv:
+ case RISCVVector::BI__builtin_rvv_vaesem_vs:
+ case RISCVVector::BI__builtin_rvv_vaesz_vs:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vv:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vs:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesef_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesef_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesem_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesem_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesz_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vs_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 128) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op2Type, 128);
+ }
+ case RISCVVector::BI__builtin_rvv_vsha2ch_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2cl_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2ms_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2ch_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsha2cl_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsha2ms_vv_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ QualType Op3Type = TheCall->getArg(2)->getType();
+ ASTContext::BuiltinVectorTypeInfo Info =
+ Context.getBuiltinVectorTypeInfo(Op1Type->castAs<BuiltinType>());
+ uint64_t ElemSize = Context.getTypeSize(Info.ElementType);
+ if (ElemSize == 64 && !TI.hasFeature("zvknhb"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_builtin_requires_extension)
+ << /* IsExtension */ true << TheCall->getSourceRange() << "zvknb";
+
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type,
+ ElemSize * 4) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op2Type,
+ ElemSize * 4) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op3Type, ElemSize * 4);
+ }
+
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se:
+ // bit_27_26, bit_24_20, bit_11_7, simm5, sew, log2lmul
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, -16, 15) ||
+ CheckLMUL(TheCall, 5);
+ case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
+ // bit_27_26, bit_11_7, vs2, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_i:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se:
+ // bit_27_26, bit_24_20, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_iv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se:
+ // bit_27_26, vs2, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se:
+ // bit_27_26, vd, vs2, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se:
+ // bit_27_26, bit_24_20, bit_11_7, xs1, sew, log2lmul
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
+ CheckLMUL(TheCall, 5);
+ case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
+ // bit_27_26, bit_11_7, vs2, xs1/vs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_x:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se:
+ // bit_27_26, bit_24-20, xs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se:
+ // bit_27_26, vd, vs2, xs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se:
+ // bit_27_26, vs2, xs1/vs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se:
+ // bit_27_26, vd, vs2, xs1/vs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3);
+ case RISCVVector::BI__builtin_rvv_sf_vc_fv_se:
+ // bit_26, bit_11_7, vs2, fs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 1) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se:
+ // bit_26, vd, vs2, fs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se:
+ // bit_26, vs2, fs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 1);
+ // Check if byteselect is in [0, 3]
+ case RISCV::BI__builtin_riscv_aes32dsi:
+ case RISCV::BI__builtin_riscv_aes32dsmi:
+ case RISCV::BI__builtin_riscv_aes32esi:
+ case RISCV::BI__builtin_riscv_aes32esmi:
+ case RISCV::BI__builtin_riscv_sm4ks:
+ case RISCV::BI__builtin_riscv_sm4ed:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ // Check if rnum is in [0, 10]
+ case RISCV::BI__builtin_riscv_aes64ks1i:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 10);
+ // Check if value range for vxrm is in [0, 3]
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx:
+ case RISCVVector::BI__builtin_rvv_vasub_vv:
+ case RISCVVector::BI__builtin_rvv_vasub_vx:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx:
+ case RISCVVector::BI__builtin_rvv_vssra_vv:
+ case RISCVVector::BI__builtin_rvv_vssra_vx:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_m:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_m:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_m:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_m:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_m:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_m:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_m:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_m:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_m:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_m:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_m:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 4);
+ case RISCV::BI__builtin_riscv_ntl_load:
+ case RISCV::BI__builtin_riscv_ntl_store:
+ DeclRefExpr *DRE =
+ cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store ||
+ BuiltinID == RISCV::BI__builtin_riscv_ntl_load) &&
+ "Unexpected RISC-V nontemporal load/store builtin!");
+ bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store;
+ unsigned NumArgs = IsStore ? 3 : 2;
+
+ if (SemaRef.checkArgCountAtLeast(TheCall, NumArgs - 1))
+ return true;
+
+ if (SemaRef.checkArgCountAtMost(TheCall, NumArgs))
+ return true;
+
+ // Domain value should be compile-time constant.
+ // 2 <= domain <= 5
+ if (TheCall->getNumArgs() == NumArgs &&
+ SemaRef.BuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5))
+ return true;
+
+ Expr *PointerArg = TheCall->getArg(0);
+ ExprResult PointerArgResult =
+ SemaRef.DefaultFunctionArrayLvalueConversion(PointerArg);
+
+ if (PointerArgResult.isInvalid())
+ return true;
+ PointerArg = PointerArgResult.get();
+
+ const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>();
+ if (!PtrType) {
+ Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ QualType ValType = PtrType->getPointeeType();
+ ValType = ValType.getUnqualifiedType();
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
+ !ValType->isVectorType() && !ValType->isRVVSizelessBuiltinType()) {
+ Diag(DRE->getBeginLoc(),
+ diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ if (!IsStore) {
+ TheCall->setType(ValType);
+ return false;
+ }
+
+ ExprResult ValArg = TheCall->getArg(1);
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, ValType, /*consume*/ false);
+ ValArg =
+ SemaRef.PerformCopyInitialization(Entity, SourceLocation(), ValArg);
+ if (ValArg.isInvalid())
+ return true;
+
+ TheCall->setArg(1, ValArg.get());
+ TheCall->setType(Context.VoidTy);
+ return false;
+ }
+
+ return false;
+}
+
+void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
+ const llvm::StringMap<bool> &FeatureMap) {
+ ASTContext::BuiltinVectorTypeInfo Info =
+ SemaRef.Context.getBuiltinVectorTypeInfo(Ty->castAs<BuiltinType>());
+ unsigned EltSize = SemaRef.Context.getTypeSize(Info.ElementType);
+ unsigned MinElts = Info.EC.getKnownMinValue();
+
+ if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
+ !FeatureMap.lookup("zve64d"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
+ // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
+ // least zve64x
+ else if (((EltSize == 64 && Info.ElementType->isIntegerType()) ||
+ MinElts == 1) &&
+ !FeatureMap.lookup("zve64x"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
+ else if (Info.ElementType->isFloat16Type() && !FeatureMap.lookup("zvfh") &&
+ !FeatureMap.lookup("zvfhmin"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D)
+ << Ty << "zvfh or zvfhmin";
+ else if (Info.ElementType->isBFloat16Type() && !FeatureMap.lookup("zvfbfmin"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
+ else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
+ !FeatureMap.lookup("zve32f"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
+ // Given that caller already checked isRVVType() before calling this function,
+ // if we don't have at least zve32x supported, then we need to emit error.
+ else if (!FeatureMap.lookup("zve32x"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
+}
+
+/// Are the two types RVV-bitcast-compatible types? I.e. is bitcasting from the
+/// first RVV type (e.g. an RVV scalable type) to the second type (e.g. an RVV
+/// VLS type) allowed?
+///
+/// This will also return false if the two given types do not make sense from
+/// the perspective of RVV bitcasts.
+bool SemaRISCV::isValidRVVBitcast(QualType srcTy, QualType destTy) {
+ assert(srcTy->isVectorType() || destTy->isVectorType());
+
+ auto ValidScalableConversion = [](QualType FirstType, QualType SecondType) {
+ if (!FirstType->isRVVSizelessBuiltinType())
+ return false;
+
+ const auto *VecTy = SecondType->getAs<VectorType>();
+ return VecTy && VecTy->getVectorKind() == VectorKind::RVVFixedLengthData;
+ };
+
+ return ValidScalableConversion(srcTy, destTy) ||
+ ValidScalableConversion(destTy, srcTy);
+}
+
+void SemaRISCV::handleInterruptAttr(Decl *D, const ParsedAttr &AL) {
+ // Warn about repeated attributes.
+ if (const auto *A = D->getAttr<RISCVInterruptAttr>()) {
+ Diag(AL.getRange().getBegin(),
+ diag::warn_riscv_repeated_interrupt_attribute);
+ Diag(A->getLocation(), diag::note_riscv_repeated_interrupt_attribute);
+ return;
+ }
+
+ // Check the attribute argument. Argument is optional.
+ if (!AL.checkAtMostNumArgs(SemaRef, 1))
+ return;
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+
+ // 'machine'is the default interrupt mode.
+ if (AL.getNumArgs() == 0)
+ Str = "machine";
+ else if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+
+ // Semantic checks for a function with the 'interrupt' attribute:
+ // - Must be a function.
+ // - Must have no parameters.
+ // - Must have the 'void' return type.
+ // - The attribute itself must either have no argument or one of the
+ // valid interrupt types, see [RISCVInterruptDocs].
+
+ if (D->getFunctionType() == nullptr) {
+ Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
+ return;
+ }
+
+ if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
+ Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*RISC-V*/ 2 << 0;
+ return;
+ }
+
+ if (!getFunctionOrMethodResultType(D)->isVoidType()) {
+ Diag(D->getLocation(), diag::warn_interrupt_attribute_invalid)
+ << /*RISC-V*/ 2 << 1;
+ return;
+ }
+
+ RISCVInterruptAttr::InterruptType Kind;
+ if (!RISCVInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
+ Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL << Str << ArgLoc;
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ RISCVInterruptAttr(getASTContext(), AL, Kind));
+}
+
+bool SemaRISCV::isAliasValid(unsigned BuiltinID, StringRef AliasName) {
+ return BuiltinID >= RISCV::FirstRVVBuiltin &&
+ BuiltinID <= RISCV::LastRVVBuiltin;
+}
+
+SemaRISCV::SemaRISCV(Sema &S) : SemaBase(S) {}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
deleted file mode 100644
index 00a5ea65f3f4..000000000000
--- a/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
+++ /dev/null
@@ -1,497 +0,0 @@
-//==- SemaRISCVVectorLookup.cpp - Name Lookup for RISC-V Vector Intrinsic -==//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements name lookup for RISC-V vector intrinsic.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/AST/ASTContext.h"
-#include "clang/AST/Decl.h"
-#include "clang/Basic/Builtins.h"
-#include "clang/Basic/TargetInfo.h"
-#include "clang/Lex/Preprocessor.h"
-#include "clang/Sema/Lookup.h"
-#include "clang/Sema/RISCVIntrinsicManager.h"
-#include "clang/Sema/Sema.h"
-#include "clang/Support/RISCVVIntrinsicUtils.h"
-#include "llvm/ADT/SmallVector.h"
-#include <optional>
-#include <string>
-#include <vector>
-
-using namespace llvm;
-using namespace clang;
-using namespace clang::RISCV;
-
-using IntrinsicKind = sema::RISCVIntrinsicManager::IntrinsicKind;
-
-namespace {
-
-// Function definition of a RVV intrinsic.
-struct RVVIntrinsicDef {
- /// Mapping to which clang built-in function, e.g. __builtin_rvv_vadd.
- std::string BuiltinName;
-
- /// Function signature, first element is return type.
- RVVTypes Signature;
-};
-
-struct RVVOverloadIntrinsicDef {
- // Indexes of RISCVIntrinsicManagerImpl::IntrinsicList.
- SmallVector<uint32_t, 8> Indexes;
-};
-
-} // namespace
-
-static const PrototypeDescriptor RVVSignatureTable[] = {
-#define DECL_SIGNATURE_TABLE
-#include "clang/Basic/riscv_vector_builtin_sema.inc"
-#undef DECL_SIGNATURE_TABLE
-};
-
-static const PrototypeDescriptor RVSiFiveVectorSignatureTable[] = {
-#define DECL_SIGNATURE_TABLE
-#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
-#undef DECL_SIGNATURE_TABLE
-};
-
-static const RVVIntrinsicRecord RVVIntrinsicRecords[] = {
-#define DECL_INTRINSIC_RECORDS
-#include "clang/Basic/riscv_vector_builtin_sema.inc"
-#undef DECL_INTRINSIC_RECORDS
-};
-
-static const RVVIntrinsicRecord RVSiFiveVectorIntrinsicRecords[] = {
-#define DECL_INTRINSIC_RECORDS
-#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
-#undef DECL_INTRINSIC_RECORDS
-};
-
-// Get subsequence of signature table.
-static ArrayRef<PrototypeDescriptor>
-ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
- switch (K) {
- case IntrinsicKind::RVV:
- return ArrayRef(&RVVSignatureTable[Index], Length);
- case IntrinsicKind::SIFIVE_VECTOR:
- return ArrayRef(&RVSiFiveVectorSignatureTable[Index], Length);
- }
- llvm_unreachable("Unhandled IntrinsicKind");
-}
-
-static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
- QualType QT;
- switch (Type->getScalarType()) {
- case ScalarTypeKind::Void:
- QT = Context.VoidTy;
- break;
- case ScalarTypeKind::Size_t:
- QT = Context.getSizeType();
- break;
- case ScalarTypeKind::Ptrdiff_t:
- QT = Context.getPointerDiffType();
- break;
- case ScalarTypeKind::UnsignedLong:
- QT = Context.UnsignedLongTy;
- break;
- case ScalarTypeKind::SignedLong:
- QT = Context.LongTy;
- break;
- case ScalarTypeKind::Boolean:
- QT = Context.BoolTy;
- break;
- case ScalarTypeKind::SignedInteger:
- QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), true);
- break;
- case ScalarTypeKind::UnsignedInteger:
- QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), false);
- break;
- case ScalarTypeKind::BFloat:
- QT = Context.BFloat16Ty;
- break;
- case ScalarTypeKind::Float:
- switch (Type->getElementBitwidth()) {
- case 64:
- QT = Context.DoubleTy;
- break;
- case 32:
- QT = Context.FloatTy;
- break;
- case 16:
- QT = Context.Float16Ty;
- break;
- default:
- llvm_unreachable("Unsupported floating point width.");
- }
- break;
- case Invalid:
- case Undefined:
- llvm_unreachable("Unhandled type.");
- }
- if (Type->isVector()) {
- if (Type->isTuple())
- QT = Context.getScalableVectorType(QT, *Type->getScale(), Type->getNF());
- else
- QT = Context.getScalableVectorType(QT, *Type->getScale());
- }
-
- if (Type->isConstant())
- QT = Context.getConstType(QT);
-
- // Transform the type to a pointer as the last step, if necessary.
- if (Type->isPointer())
- QT = Context.getPointerType(QT);
-
- return QT;
-}
-
-namespace {
-class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
-private:
- Sema &S;
- ASTContext &Context;
- RVVTypeCache TypeCache;
- bool ConstructedRISCVVBuiltins;
- bool ConstructedRISCVSiFiveVectorBuiltins;
-
- // List of all RVV intrinsic.
- std::vector<RVVIntrinsicDef> IntrinsicList;
- // Mapping function name to index of IntrinsicList.
- StringMap<uint32_t> Intrinsics;
- // Mapping function name to RVVOverloadIntrinsicDef.
- StringMap<RVVOverloadIntrinsicDef> OverloadIntrinsics;
-
-
- // Create RVVIntrinsicDef.
- void InitRVVIntrinsic(const RVVIntrinsicRecord &Record, StringRef SuffixStr,
- StringRef OverloadedSuffixStr, bool IsMask,
- RVVTypes &Types, bool HasPolicy, Policy PolicyAttrs);
-
- // Create FunctionDecl for a vector intrinsic.
- void CreateRVVIntrinsicDecl(LookupResult &LR, IdentifierInfo *II,
- Preprocessor &PP, uint32_t Index,
- bool IsOverload);
-
- void ConstructRVVIntrinsics(ArrayRef<RVVIntrinsicRecord> Recs,
- IntrinsicKind K);
-
-public:
- RISCVIntrinsicManagerImpl(clang::Sema &S) : S(S), Context(S.Context) {
- ConstructedRISCVVBuiltins = false;
- ConstructedRISCVSiFiveVectorBuiltins = false;
- }
-
- // Initialize IntrinsicList
- void InitIntrinsicList() override;
-
- // Create RISC-V vector intrinsic and insert into symbol table if found, and
- // return true, otherwise return false.
- bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
- Preprocessor &PP) override;
-};
-} // namespace
-
-void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
- ArrayRef<RVVIntrinsicRecord> Recs, IntrinsicKind K) {
- const TargetInfo &TI = Context.getTargetInfo();
- static const std::pair<const char *, RVVRequire> FeatureCheckList[] = {
- {"64bit", RVV_REQ_RV64},
- {"xsfvcp", RVV_REQ_Xsfvcp},
- {"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
- {"xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq},
- {"xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod},
- {"xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq},
- {"zvbb", RVV_REQ_Zvbb},
- {"zvbc", RVV_REQ_Zvbc},
- {"zvkb", RVV_REQ_Zvkb},
- {"zvkg", RVV_REQ_Zvkg},
- {"zvkned", RVV_REQ_Zvkned},
- {"zvknha", RVV_REQ_Zvknha},
- {"zvknhb", RVV_REQ_Zvknhb},
- {"zvksed", RVV_REQ_Zvksed},
- {"zvksh", RVV_REQ_Zvksh},
- {"experimental", RVV_REQ_Experimental}};
-
- // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
- // in RISCVVEmitter.cpp.
- for (auto &Record : Recs) {
- // Check requirements.
- if (llvm::any_of(FeatureCheckList, [&](const auto &Item) {
- return (Record.RequiredExtensions & Item.second) == Item.second &&
- !TI.hasFeature(Item.first);
- }))
- continue;
-
- // Create Intrinsics for each type and LMUL.
- BasicType BaseType = BasicType::Unknown;
- ArrayRef<PrototypeDescriptor> BasicProtoSeq =
- ProtoSeq2ArrayRef(K, Record.PrototypeIndex, Record.PrototypeLength);
- ArrayRef<PrototypeDescriptor> SuffixProto =
- ProtoSeq2ArrayRef(K, Record.SuffixIndex, Record.SuffixLength);
- ArrayRef<PrototypeDescriptor> OverloadedSuffixProto = ProtoSeq2ArrayRef(
- K, Record.OverloadedSuffixIndex, Record.OverloadedSuffixSize);
-
- PolicyScheme UnMaskedPolicyScheme =
- static_cast<PolicyScheme>(Record.UnMaskedPolicyScheme);
- PolicyScheme MaskedPolicyScheme =
- static_cast<PolicyScheme>(Record.MaskedPolicyScheme);
-
- const Policy DefaultPolicy;
-
- llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
- RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/false,
- /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
- UnMaskedPolicyScheme, DefaultPolicy, Record.IsTuple);
-
- llvm::SmallVector<PrototypeDescriptor> ProtoMaskSeq;
- if (Record.HasMasked)
- ProtoMaskSeq = RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
- Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy,
- Record.IsTuple);
-
- bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone;
- bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone;
- SmallVector<Policy> SupportedUnMaskedPolicies =
- RVVIntrinsic::getSupportedUnMaskedPolicies();
- SmallVector<Policy> SupportedMaskedPolicies =
- RVVIntrinsic::getSupportedMaskedPolicies(Record.HasTailPolicy,
- Record.HasMaskPolicy);
-
- for (unsigned int TypeRangeMaskShift = 0;
- TypeRangeMaskShift <= static_cast<unsigned int>(BasicType::MaxOffset);
- ++TypeRangeMaskShift) {
- unsigned int BaseTypeI = 1 << TypeRangeMaskShift;
- BaseType = static_cast<BasicType>(BaseTypeI);
-
- if ((BaseTypeI & Record.TypeRangeMask) != BaseTypeI)
- continue;
-
- if (BaseType == BasicType::Float16) {
- if ((Record.RequiredExtensions & RVV_REQ_Zvfhmin) == RVV_REQ_Zvfhmin) {
- if (!TI.hasFeature("zvfhmin"))
- continue;
- } else if (!TI.hasFeature("zvfh")) {
- continue;
- }
- }
-
- // Expanded with different LMUL.
- for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) {
- if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3))))
- continue;
-
- std::optional<RVVTypes> Types =
- TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoSeq);
-
- // Ignored to create new intrinsic if there are any illegal types.
- if (!Types.has_value())
- continue;
-
- std::string SuffixStr = RVVIntrinsic::getSuffixStr(
- TypeCache, BaseType, Log2LMUL, SuffixProto);
- std::string OverloadedSuffixStr = RVVIntrinsic::getSuffixStr(
- TypeCache, BaseType, Log2LMUL, OverloadedSuffixProto);
-
- // Create non-masked intrinsic.
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, false, *Types,
- UnMaskedHasPolicy, DefaultPolicy);
-
- // Create non-masked policy intrinsic.
- if (Record.UnMaskedPolicyScheme != PolicyScheme::SchemeNone) {
- for (auto P : SupportedUnMaskedPolicies) {
- llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
- RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/false,
- /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
- UnMaskedPolicyScheme, P, Record.IsTuple);
- std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
- BaseType, Log2LMUL, Record.NF, PolicyPrototype);
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
- /*IsMask=*/false, *PolicyTypes, UnMaskedHasPolicy,
- P);
- }
- }
- if (!Record.HasMasked)
- continue;
- // Create masked intrinsic.
- std::optional<RVVTypes> MaskTypes =
- TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoMaskSeq);
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, true,
- *MaskTypes, MaskedHasPolicy, DefaultPolicy);
- if (Record.MaskedPolicyScheme == PolicyScheme::SchemeNone)
- continue;
- // Create masked policy intrinsic.
- for (auto P : SupportedMaskedPolicies) {
- llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
- RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
- Record.HasVL, Record.NF, MaskedPolicyScheme, P,
- Record.IsTuple);
- std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
- BaseType, Log2LMUL, Record.NF, PolicyPrototype);
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
- /*IsMask=*/true, *PolicyTypes, MaskedHasPolicy, P);
- }
- } // End for different LMUL
- } // End for different TypeRange
- }
-}
-
-void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
-
- if (S.DeclareRISCVVBuiltins && !ConstructedRISCVVBuiltins) {
- ConstructedRISCVVBuiltins = true;
- ConstructRVVIntrinsics(RVVIntrinsicRecords,
- IntrinsicKind::RVV);
- }
- if (S.DeclareRISCVSiFiveVectorBuiltins &&
- !ConstructedRISCVSiFiveVectorBuiltins) {
- ConstructedRISCVSiFiveVectorBuiltins = true;
- ConstructRVVIntrinsics(RVSiFiveVectorIntrinsicRecords,
- IntrinsicKind::SIFIVE_VECTOR);
- }
-}
-
-// Compute name and signatures for intrinsic with practical types.
-void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
- const RVVIntrinsicRecord &Record, StringRef SuffixStr,
- StringRef OverloadedSuffixStr, bool IsMasked, RVVTypes &Signature,
- bool HasPolicy, Policy PolicyAttrs) {
- // Function name, e.g. vadd_vv_i32m1.
- std::string Name = Record.Name;
- if (!SuffixStr.empty())
- Name += "_" + SuffixStr.str();
-
- // Overloaded function name, e.g. vadd.
- std::string OverloadedName;
- if (!Record.OverloadedName)
- OverloadedName = StringRef(Record.Name).split("_").first.str();
- else
- OverloadedName = Record.OverloadedName;
- if (!OverloadedSuffixStr.empty())
- OverloadedName += "_" + OverloadedSuffixStr.str();
-
- // clang built-in function name, e.g. __builtin_rvv_vadd.
- std::string BuiltinName = "__builtin_rvv_" + std::string(Record.Name);
-
- RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName,
- OverloadedName, PolicyAttrs,
- Record.HasFRMRoundModeOp);
-
- // Put into IntrinsicList.
- uint32_t Index = IntrinsicList.size();
- IntrinsicList.push_back({BuiltinName, Signature});
-
- // Creating mapping to Intrinsics.
- Intrinsics.insert({Name, Index});
-
- // Get the RVVOverloadIntrinsicDef.
- RVVOverloadIntrinsicDef &OverloadIntrinsicDef =
- OverloadIntrinsics[OverloadedName];
-
- // And added the index.
- OverloadIntrinsicDef.Indexes.push_back(Index);
-}
-
-void RISCVIntrinsicManagerImpl::CreateRVVIntrinsicDecl(LookupResult &LR,
- IdentifierInfo *II,
- Preprocessor &PP,
- uint32_t Index,
- bool IsOverload) {
- ASTContext &Context = S.Context;
- RVVIntrinsicDef &IDef = IntrinsicList[Index];
- RVVTypes Sigs = IDef.Signature;
- size_t SigLength = Sigs.size();
- RVVType *ReturnType = Sigs[0];
- QualType RetType = RVVType2Qual(Context, ReturnType);
- SmallVector<QualType, 8> ArgTypes;
- QualType BuiltinFuncType;
-
- // Skip return type, and convert RVVType to QualType for arguments.
- for (size_t i = 1; i < SigLength; ++i)
- ArgTypes.push_back(RVVType2Qual(Context, Sigs[i]));
-
- FunctionProtoType::ExtProtoInfo PI(
- Context.getDefaultCallingConvention(false, false, true));
-
- PI.Variadic = false;
-
- SourceLocation Loc = LR.getNameLoc();
- BuiltinFuncType = Context.getFunctionType(RetType, ArgTypes, PI);
- DeclContext *Parent = Context.getTranslationUnitDecl();
-
- FunctionDecl *RVVIntrinsicDecl = FunctionDecl::Create(
- Context, Parent, Loc, Loc, II, BuiltinFuncType, /*TInfo=*/nullptr,
- SC_Extern, S.getCurFPFeatures().isFPConstrained(),
- /*isInlineSpecified*/ false,
- /*hasWrittenPrototype*/ true);
-
- // Create Decl objects for each parameter, adding them to the
- // FunctionDecl.
- const auto *FP = cast<FunctionProtoType>(BuiltinFuncType);
- SmallVector<ParmVarDecl *, 8> ParmList;
- for (unsigned IParm = 0, E = FP->getNumParams(); IParm != E; ++IParm) {
- ParmVarDecl *Parm =
- ParmVarDecl::Create(Context, RVVIntrinsicDecl, Loc, Loc, nullptr,
- FP->getParamType(IParm), nullptr, SC_None, nullptr);
- Parm->setScopeInfo(0, IParm);
- ParmList.push_back(Parm);
- }
- RVVIntrinsicDecl->setParams(ParmList);
-
- // Add function attributes.
- if (IsOverload)
- RVVIntrinsicDecl->addAttr(OverloadableAttr::CreateImplicit(Context));
-
- // Setup alias to __builtin_rvv_*
- IdentifierInfo &IntrinsicII = PP.getIdentifierTable().get(IDef.BuiltinName);
- RVVIntrinsicDecl->addAttr(
- BuiltinAliasAttr::CreateImplicit(S.Context, &IntrinsicII));
-
- // Add to symbol table.
- LR.addDecl(RVVIntrinsicDecl);
-}
-
-bool RISCVIntrinsicManagerImpl::CreateIntrinsicIfFound(LookupResult &LR,
- IdentifierInfo *II,
- Preprocessor &PP) {
- StringRef Name = II->getName();
-
- // Lookup the function name from the overload intrinsics first.
- auto OvIItr = OverloadIntrinsics.find(Name);
- if (OvIItr != OverloadIntrinsics.end()) {
- const RVVOverloadIntrinsicDef &OvIntrinsicDef = OvIItr->second;
- for (auto Index : OvIntrinsicDef.Indexes)
- CreateRVVIntrinsicDecl(LR, II, PP, Index,
- /*IsOverload*/ true);
-
- // If we added overloads, need to resolve the lookup result.
- LR.resolveKind();
- return true;
- }
-
- // Lookup the function name from the intrinsics.
- auto Itr = Intrinsics.find(Name);
- if (Itr != Intrinsics.end()) {
- CreateRVVIntrinsicDecl(LR, II, PP, Itr->second,
- /*IsOverload*/ false);
- return true;
- }
-
- // It's not an RVV intrinsics.
- return false;
-}
-
-namespace clang {
-std::unique_ptr<clang::sema::RISCVIntrinsicManager>
-CreateRISCVIntrinsicManager(Sema &S) {
- return std::make_unique<RISCVIntrinsicManagerImpl>(S);
-}
-} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp b/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
index ca0254d29e7f..2b55c598d55c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
@@ -8,7 +8,10 @@
// This implements Semantic Analysis for SYCL constructs.
//===----------------------------------------------------------------------===//
+#include "clang/Sema/SemaSYCL.h"
#include "clang/AST/Mangle.h"
+#include "clang/Sema/Attr.h"
+#include "clang/Sema/ParsedAttr.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaDiagnostic.h"
@@ -18,28 +21,30 @@ using namespace clang;
// SYCL device specific diagnostics implementation
// -----------------------------------------------------------------------------
-Sema::SemaDiagnosticBuilder Sema::SYCLDiagIfDeviceCode(SourceLocation Loc,
+SemaSYCL::SemaSYCL(Sema &S) : SemaBase(S) {}
+
+Sema::SemaDiagnosticBuilder SemaSYCL::DiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID) {
assert(getLangOpts().SYCLIsDevice &&
"Should only be called during SYCL compilation");
- FunctionDecl *FD = dyn_cast<FunctionDecl>(getCurLexicalContext());
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(SemaRef.getCurLexicalContext());
SemaDiagnosticBuilder::Kind DiagKind = [this, FD] {
if (!FD)
return SemaDiagnosticBuilder::K_Nop;
- if (getEmissionStatus(FD) == Sema::FunctionEmissionStatus::Emitted)
+ if (SemaRef.getEmissionStatus(FD) == Sema::FunctionEmissionStatus::Emitted)
return SemaDiagnosticBuilder::K_ImmediateWithCallStack;
return SemaDiagnosticBuilder::K_Deferred;
}();
- return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, FD, *this);
+ return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, FD, SemaRef);
}
-static bool isZeroSizedArray(Sema &SemaRef, QualType Ty) {
- if (const auto *CAT = SemaRef.getASTContext().getAsConstantArrayType(Ty))
- return CAT->getSize() == 0;
+static bool isZeroSizedArray(SemaSYCL &S, QualType Ty) {
+ if (const auto *CAT = S.getASTContext().getAsConstantArrayType(Ty))
+ return CAT->isZeroSize();
return false;
}
-void Sema::deepTypeCheckForSYCLDevice(SourceLocation UsedAt,
+void SemaSYCL::deepTypeCheckForDevice(SourceLocation UsedAt,
llvm::DenseSet<QualType> Visited,
ValueDecl *DeclToCheck) {
assert(getLangOpts().SYCLIsDevice &&
@@ -51,18 +56,18 @@ void Sema::deepTypeCheckForSYCLDevice(SourceLocation UsedAt,
auto Check = [&](QualType TypeToCheck, const ValueDecl *D) {
bool ErrorFound = false;
if (isZeroSizedArray(*this, TypeToCheck)) {
- SYCLDiagIfDeviceCode(UsedAt, diag::err_typecheck_zero_array_size) << 1;
+ DiagIfDeviceCode(UsedAt, diag::err_typecheck_zero_array_size) << 1;
ErrorFound = true;
}
// Checks for other types can also be done here.
if (ErrorFound) {
if (NeedToEmitNotes) {
if (auto *FD = dyn_cast<FieldDecl>(D))
- SYCLDiagIfDeviceCode(FD->getLocation(),
- diag::note_illegal_field_declared_here)
+ DiagIfDeviceCode(FD->getLocation(),
+ diag::note_illegal_field_declared_here)
<< FD->getType()->isPointerType() << FD->getType();
else
- SYCLDiagIfDeviceCode(D->getLocation(), diag::note_declared_at);
+ DiagIfDeviceCode(D->getLocation(), diag::note_declared_at);
}
}
@@ -93,8 +98,8 @@ void Sema::deepTypeCheckForSYCLDevice(SourceLocation UsedAt,
auto EmitHistory = [&]() {
// The first element is always nullptr.
for (uint64_t Index = 1; Index < History.size(); ++Index) {
- SYCLDiagIfDeviceCode(History[Index]->getLocation(),
- diag::note_within_field_of_type)
+ DiagIfDeviceCode(History[Index]->getLocation(),
+ diag::note_within_field_of_type)
<< History[Index]->getType();
}
};
@@ -130,3 +135,65 @@ void Sema::deepTypeCheckForSYCLDevice(SourceLocation UsedAt,
}
} while (!StackForRecursion.empty());
}
+
+ExprResult SemaSYCL::BuildUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation LParen,
+ SourceLocation RParen,
+ TypeSourceInfo *TSI) {
+ return SYCLUniqueStableNameExpr::Create(getASTContext(), OpLoc, LParen,
+ RParen, TSI);
+}
+
+ExprResult SemaSYCL::ActOnUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation LParen,
+ SourceLocation RParen,
+ ParsedType ParsedTy) {
+ TypeSourceInfo *TSI = nullptr;
+ QualType Ty = SemaRef.GetTypeFromParser(ParsedTy, &TSI);
+
+ if (Ty.isNull())
+ return ExprError();
+ if (!TSI)
+ TSI = getASTContext().getTrivialTypeSourceInfo(Ty, LParen);
+
+ return BuildUniqueStableNameExpr(OpLoc, LParen, RParen, TSI);
+}
+
+void SemaSYCL::handleKernelAttr(Decl *D, const ParsedAttr &AL) {
+ // The 'sycl_kernel' attribute applies only to function templates.
+ const auto *FD = cast<FunctionDecl>(D);
+ const FunctionTemplateDecl *FT = FD->getDescribedFunctionTemplate();
+ assert(FT && "Function template is expected");
+
+ // Function template must have at least two template parameters.
+ const TemplateParameterList *TL = FT->getTemplateParameters();
+ if (TL->size() < 2) {
+ Diag(FT->getLocation(), diag::warn_sycl_kernel_num_of_template_params);
+ return;
+ }
+
+ // Template parameters must be typenames.
+ for (unsigned I = 0; I < 2; ++I) {
+ const NamedDecl *TParam = TL->getParam(I);
+ if (isa<NonTypeTemplateParmDecl>(TParam)) {
+ Diag(FT->getLocation(),
+ diag::warn_sycl_kernel_invalid_template_param_type);
+ return;
+ }
+ }
+
+ // Function must have at least one argument.
+ if (getFunctionOrMethodNumParams(D) != 1) {
+ Diag(FT->getLocation(), diag::warn_sycl_kernel_num_of_function_params);
+ return;
+ }
+
+ // Function must return void.
+ QualType RetTy = getFunctionOrMethodResultType(D);
+ if (!RetTy->isVoidType()) {
+ Diag(FT->getLocation(), diag::warn_sycl_kernel_return_type);
+ return;
+ }
+
+ handleSimpleAttribute<SYCLKernelAttr>(*this, D, AL);
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
index 9e7c8c7e4e8c..34d2d398f244 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
@@ -27,15 +27,20 @@
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenMP.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -526,13 +531,19 @@ Sema::ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHSVal,
return StmtError();
}
+ if (LangOpts.OpenACC &&
+ getCurScope()->isInOpenACCComputeConstructScope(Scope::SwitchScope)) {
+ Diag(CaseLoc, diag::err_acc_branch_in_out_compute_construct)
+ << /*branch*/ 0 << /*into*/ 1;
+ return StmtError();
+ }
+
auto *CS = CaseStmt::Create(Context, LHSVal.get(), RHSVal.get(),
CaseLoc, DotDotDotLoc, ColonLoc);
getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(CS);
return CS;
}
-/// ActOnCaseStmtBody - This installs a statement as the body of a case.
void Sema::ActOnCaseStmtBody(Stmt *S, Stmt *SubStmt) {
cast<CaseStmt>(S)->setSubStmt(SubStmt);
}
@@ -545,6 +556,13 @@ Sema::ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc,
return SubStmt;
}
+ if (LangOpts.OpenACC &&
+ getCurScope()->isInOpenACCComputeConstructScope(Scope::SwitchScope)) {
+ Diag(DefaultLoc, diag::err_acc_branch_in_out_compute_construct)
+ << /*branch*/ 0 << /*into*/ 1;
+ return StmtError();
+ }
+
DefaultStmt *DS = new (Context) DefaultStmt(DefaultLoc, ColonLoc, SubStmt);
getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(DS);
return DS;
@@ -566,6 +584,11 @@ Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
Diag(IdentLoc, diag::warn_reserved_extern_symbol)
<< TheDecl << static_cast<int>(Status);
+ // If this label is in a compute construct scope, we need to make sure we
+ // check gotos in/out.
+ if (getCurScope()->isInOpenACCComputeConstructScope())
+ setFunctionHasBranchProtectedScope();
+
// Otherwise, things are good. Fill in the declaration and return it.
LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt);
TheDecl->setStmt(LS);
@@ -2196,10 +2219,6 @@ StmtResult Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
Body, ForLoc, LParenLoc, RParenLoc);
}
-/// In an Objective C collection iteration statement:
-/// for (x in y)
-/// x can be an arbitrary l-value expression. Bind it up as a
-/// full-expression.
StmtResult Sema::ActOnForEachLValueExpr(Expr *E) {
// Reduce placeholder expressions here. Note that this rejects the
// use of pseudo-object l-values in this position.
@@ -2213,166 +2232,6 @@ StmtResult Sema::ActOnForEachLValueExpr(Expr *E) {
return StmtResult(static_cast<Stmt*>(FullExpr.get()));
}
-ExprResult
-Sema::CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
- if (!collection)
- return ExprError();
-
- ExprResult result = CorrectDelayedTyposInExpr(collection);
- if (!result.isUsable())
- return ExprError();
- collection = result.get();
-
- // Bail out early if we've got a type-dependent expression.
- if (collection->isTypeDependent()) return collection;
-
- // Perform normal l-value conversion.
- result = DefaultFunctionArrayLvalueConversion(collection);
- if (result.isInvalid())
- return ExprError();
- collection = result.get();
-
- // The operand needs to have object-pointer type.
- // TODO: should we do a contextual conversion?
- const ObjCObjectPointerType *pointerType =
- collection->getType()->getAs<ObjCObjectPointerType>();
- if (!pointerType)
- return Diag(forLoc, diag::err_collection_expr_type)
- << collection->getType() << collection->getSourceRange();
-
- // Check that the operand provides
- // - countByEnumeratingWithState:objects:count:
- const ObjCObjectType *objectType = pointerType->getObjectType();
- ObjCInterfaceDecl *iface = objectType->getInterface();
-
- // If we have a forward-declared type, we can't do this check.
- // Under ARC, it is an error not to have a forward-declared class.
- if (iface &&
- (getLangOpts().ObjCAutoRefCount
- ? RequireCompleteType(forLoc, QualType(objectType, 0),
- diag::err_arc_collection_forward, collection)
- : !isCompleteType(forLoc, QualType(objectType, 0)))) {
- // Otherwise, if we have any useful type information, check that
- // the type declares the appropriate method.
- } else if (iface || !objectType->qual_empty()) {
- IdentifierInfo *selectorIdents[] = {
- &Context.Idents.get("countByEnumeratingWithState"),
- &Context.Idents.get("objects"),
- &Context.Idents.get("count")
- };
- Selector selector = Context.Selectors.getSelector(3, &selectorIdents[0]);
-
- ObjCMethodDecl *method = nullptr;
-
- // If there's an interface, look in both the public and private APIs.
- if (iface) {
- method = iface->lookupInstanceMethod(selector);
- if (!method) method = iface->lookupPrivateMethod(selector);
- }
-
- // Also check protocol qualifiers.
- if (!method)
- method = LookupMethodInQualifiedType(selector, pointerType,
- /*instance*/ true);
-
- // If we didn't find it anywhere, give up.
- if (!method) {
- Diag(forLoc, diag::warn_collection_expr_type)
- << collection->getType() << selector << collection->getSourceRange();
- }
-
- // TODO: check for an incompatible signature?
- }
-
- // Wrap up any cleanups in the expression.
- return collection;
-}
-
-StmtResult
-Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
- Stmt *First, Expr *collection,
- SourceLocation RParenLoc) {
- setFunctionHasBranchProtectedScope();
-
- ExprResult CollectionExprResult =
- CheckObjCForCollectionOperand(ForLoc, collection);
-
- if (First) {
- QualType FirstType;
- if (DeclStmt *DS = dyn_cast<DeclStmt>(First)) {
- if (!DS->isSingleDecl())
- return StmtError(Diag((*DS->decl_begin())->getLocation(),
- diag::err_toomany_element_decls));
-
- VarDecl *D = dyn_cast<VarDecl>(DS->getSingleDecl());
- if (!D || D->isInvalidDecl())
- return StmtError();
-
- FirstType = D->getType();
- // C99 6.8.5p3: The declaration part of a 'for' statement shall only
- // declare identifiers for objects having storage class 'auto' or
- // 'register'.
- if (!D->hasLocalStorage())
- return StmtError(Diag(D->getLocation(),
- diag::err_non_local_variable_decl_in_for));
-
- // If the type contained 'auto', deduce the 'auto' to 'id'.
- if (FirstType->getContainedAutoType()) {
- SourceLocation Loc = D->getLocation();
- OpaqueValueExpr OpaqueId(Loc, Context.getObjCIdType(), VK_PRValue);
- Expr *DeducedInit = &OpaqueId;
- TemplateDeductionInfo Info(Loc);
- FirstType = QualType();
- TemplateDeductionResult Result = DeduceAutoType(
- D->getTypeSourceInfo()->getTypeLoc(), DeducedInit, FirstType, Info);
- if (Result != TDK_Success && Result != TDK_AlreadyDiagnosed)
- DiagnoseAutoDeductionFailure(D, DeducedInit);
- if (FirstType.isNull()) {
- D->setInvalidDecl();
- return StmtError();
- }
-
- D->setType(FirstType);
-
- if (!inTemplateInstantiation()) {
- SourceLocation Loc =
- D->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
- Diag(Loc, diag::warn_auto_var_is_id)
- << D->getDeclName();
- }
- }
-
- } else {
- Expr *FirstE = cast<Expr>(First);
- if (!FirstE->isTypeDependent() && !FirstE->isLValue())
- return StmtError(
- Diag(First->getBeginLoc(), diag::err_selector_element_not_lvalue)
- << First->getSourceRange());
-
- FirstType = static_cast<Expr*>(First)->getType();
- if (FirstType.isConstQualified())
- Diag(ForLoc, diag::err_selector_element_const_type)
- << FirstType << First->getSourceRange();
- }
- if (!FirstType->isDependentType() &&
- !FirstType->isObjCObjectPointerType() &&
- !FirstType->isBlockPointerType())
- return StmtError(Diag(ForLoc, diag::err_selector_element_type)
- << FirstType << First->getSourceRange());
- }
-
- if (CollectionExprResult.isInvalid())
- return StmtError();
-
- CollectionExprResult =
- ActOnFinishFullExpr(CollectionExprResult.get(), /*DiscardedValue*/ false);
- if (CollectionExprResult.isInvalid())
- return StmtError();
-
- return new (Context) ObjCForCollectionStmt(First, CollectionExprResult.get(),
- nullptr, ForLoc, RParenLoc);
-}
-
/// Finish building a variable declaration for a for-range statement.
/// \return true if an error occurs.
static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
@@ -2393,9 +2252,10 @@ static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
SemaRef.Diag(Loc, DiagID) << Init->getType();
} else {
TemplateDeductionInfo Info(Init->getExprLoc());
- Sema::TemplateDeductionResult Result = SemaRef.DeduceAutoType(
+ TemplateDeductionResult Result = SemaRef.DeduceAutoType(
Decl->getTypeSourceInfo()->getTypeLoc(), Init, InitType, Info);
- if (Result != Sema::TDK_Success && Result != Sema::TDK_AlreadyDiagnosed)
+ if (Result != TemplateDeductionResult::Success &&
+ Result != TemplateDeductionResult::AlreadyDiagnosed)
SemaRef.Diag(Loc, DiagID) << Init->getType();
}
@@ -2409,7 +2269,7 @@ static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
// FIXME: ARC may want to turn this into 'const __unsafe_unretained' if
// we're doing the equivalent of fast iteration.
if (SemaRef.getLangOpts().ObjCAutoRefCount &&
- SemaRef.inferObjCARCLifetime(Decl))
+ SemaRef.ObjC().inferObjCARCLifetime(Decl))
Decl->setInvalidDecl();
SemaRef.AddInitializerToDecl(Decl, Init, /*DirectInit=*/false);
@@ -2471,29 +2331,11 @@ static bool ObjCEnumerationCollection(Expr *Collection) {
&& Collection->getType()->getAs<ObjCObjectPointerType>() != nullptr;
}
-/// ActOnCXXForRangeStmt - Check and build a C++11 for-range statement.
-///
-/// C++11 [stmt.ranged]:
-/// A range-based for statement is equivalent to
-///
-/// {
-/// auto && __range = range-init;
-/// for ( auto __begin = begin-expr,
-/// __end = end-expr;
-/// __begin != __end;
-/// ++__begin ) {
-/// for-range-declaration = *__begin;
-/// statement
-/// }
-/// }
-///
-/// The body of the loop is not available yet, since it cannot be analysed until
-/// we have determined the type of the for-range-declaration.
-StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
- SourceLocation CoawaitLoc, Stmt *InitStmt,
- Stmt *First, SourceLocation ColonLoc,
- Expr *Range, SourceLocation RParenLoc,
- BuildForRangeKind Kind) {
+StmtResult Sema::ActOnCXXForRangeStmt(
+ Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt,
+ Stmt *First, SourceLocation ColonLoc, Expr *Range, SourceLocation RParenLoc,
+ BuildForRangeKind Kind,
+ ArrayRef<MaterializeTemporaryExpr *> LifetimeExtendTemps) {
// FIXME: recover in order to allow the body to be parsed.
if (!First)
return StmtError();
@@ -2503,7 +2345,7 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
if (InitStmt)
return Diag(InitStmt->getBeginLoc(), diag::err_objc_for_range_init_stmt)
<< InitStmt->getSourceRange();
- return ActOnObjCForCollectionStmt(ForLoc, First, Range, RParenLoc);
+ return ObjC().ActOnObjCForCollectionStmt(ForLoc, First, Range, RParenLoc);
}
DeclStmt *DS = dyn_cast<DeclStmt>(First);
@@ -2557,7 +2399,8 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
StmtResult R = BuildCXXForRangeStmt(
ForLoc, CoawaitLoc, InitStmt, ColonLoc, RangeDecl.get(),
/*BeginStmt=*/nullptr, /*EndStmt=*/nullptr,
- /*Cond=*/nullptr, /*Inc=*/nullptr, DS, RParenLoc, Kind);
+ /*Cond=*/nullptr, /*Inc=*/nullptr, DS, RParenLoc, Kind,
+ LifetimeExtendTemps);
if (R.isInvalid()) {
ActOnInitializerError(LoopVar);
return StmtError();
@@ -2746,14 +2589,12 @@ static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
AdjustedRange.get(), RParenLoc, Sema::BFRK_Rebuild);
}
-/// BuildCXXForRangeStmt - Build or instantiate a C++11 for-range statement.
-StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
- SourceLocation CoawaitLoc, Stmt *InitStmt,
- SourceLocation ColonLoc, Stmt *RangeDecl,
- Stmt *Begin, Stmt *End, Expr *Cond,
- Expr *Inc, Stmt *LoopVarDecl,
- SourceLocation RParenLoc,
- BuildForRangeKind Kind) {
+StmtResult Sema::BuildCXXForRangeStmt(
+ SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt,
+ SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End,
+ Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc,
+ BuildForRangeKind Kind,
+ ArrayRef<MaterializeTemporaryExpr *> LifetimeExtendTemps) {
// FIXME: This should not be used during template instantiation. We should
// pick up the set of unqualified lookup results for the != and + operators
// in the initial parse.
@@ -2813,6 +2654,14 @@ StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
diag::err_for_range_incomplete_type))
return StmtError();
+ // P2718R0 - Lifetime extension in range-based for loops.
+ if (getLangOpts().CPlusPlus23 && !LifetimeExtendTemps.empty()) {
+ InitializedEntity Entity =
+ InitializedEntity::InitializeVariable(RangeVar);
+ for (auto *MTE : LifetimeExtendTemps)
+ MTE->setExtendingDecl(RangeVar, Entity.allocateManglingNumber());
+ }
+
// Build auto __begin = begin-expr, __end = end-expr.
// Divide by 2, since the variables are in the inner scope (loop body).
const auto DepthStr = std::to_string(S->getDepth() / 2);
@@ -3067,7 +2916,7 @@ StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
// In OpenMP loop region loop control variable must be private. Perform
// analysis of first part (if any).
if (getLangOpts().OpenMP >= 50 && BeginDeclStmt.isUsable())
- ActOnOpenMPLoopInitialization(ForLoc, BeginDeclStmt.get());
+ OpenMP().ActOnOpenMPLoopInitialization(ForLoc, BeginDeclStmt.get());
return new (Context) CXXForRangeStmt(
InitStmt, RangeDS, cast_or_null<DeclStmt>(BeginDeclStmt.get()),
@@ -3076,17 +2925,6 @@ StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
ColonLoc, RParenLoc);
}
-/// FinishObjCForCollectionStmt - Attach the body to a objective-C foreach
-/// statement.
-StmtResult Sema::FinishObjCForCollectionStmt(Stmt *S, Stmt *B) {
- if (!S || !B)
- return StmtError();
- ObjCForCollectionStmt * ForStmt = cast<ObjCForCollectionStmt>(S);
-
- ForStmt->setBody(B);
- return S;
-}
-
// Warn when the loop variable is a const reference that creates a copy.
// Suggest using the non-reference type for copies. If a copy can be prevented
// suggest the const reference type that would do so.
@@ -3267,16 +3105,12 @@ static void DiagnoseForRangeVariableCopies(Sema &SemaRef,
}
}
-/// FinishCXXForRangeStmt - Attach the body to a C++0x for-range statement.
-/// This is a separate step from ActOnCXXForRangeStmt because analysis of the
-/// body cannot be performed until after the type of the range variable is
-/// determined.
StmtResult Sema::FinishCXXForRangeStmt(Stmt *S, Stmt *B) {
if (!S || !B)
return StmtError();
if (isa<ObjCForCollectionStmt>(S))
- return FinishObjCForCollectionStmt(S, B);
+ return ObjC().FinishObjCForCollectionStmt(S, B);
CXXForRangeStmt *ForStmt = cast<CXXForRangeStmt>(S);
ForStmt->setBody(B);
@@ -3293,6 +3127,12 @@ StmtResult Sema::ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl) {
setFunctionHasBranchIntoScope();
+
+ // If this goto is in a compute construct scope, we need to make sure we check
+ // gotos in/out.
+ if (getCurScope()->isInOpenACCComputeConstructScope())
+ setFunctionHasBranchProtectedScope();
+
TheDecl->markUsed(Context);
return new (Context) GotoStmt(TheDecl, GotoLoc, LabelLoc);
}
@@ -3321,6 +3161,11 @@ Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc,
setFunctionHasIndirectGoto();
+ // If this goto is in a compute construct scope, we need to make sure we
+ // check gotos in/out.
+ if (getCurScope()->isInOpenACCComputeConstructScope())
+ setFunctionHasBranchProtectedScope();
+
return new (Context) IndirectGotoStmt(GotoLoc, StarLoc, E);
}
@@ -3345,6 +3190,15 @@ Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) {
// initialization of that variable.
return StmtError(Diag(ContinueLoc, diag::err_continue_from_cond_var_init));
}
+
+ // A 'continue' that would normally have execution continue on a block outside
+ // of a compute construct counts as 'branching out of' the compute construct,
+ // so diagnose here.
+ if (S->isOpenACCComputeConstructScope())
+ return StmtError(
+ Diag(ContinueLoc, diag::err_acc_branch_in_out_compute_construct)
+ << /*branch*/ 0 << /*out of */ 0);
+
CheckJumpOutOfSEHFinally(*this, ContinueLoc, *S);
return new (Context) ContinueStmt(ContinueLoc);
@@ -3360,25 +3214,26 @@ Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
if (S->isOpenMPLoopScope())
return StmtError(Diag(BreakLoc, diag::err_omp_loop_cannot_use_stmt)
<< "break");
+
+ // OpenACC doesn't allow 'break'ing from a compute construct, so diagnose if
+ // we are trying to do so. This can come in 2 flavors: 1-the break'able thing
+ // (besides the compute construct) 'contains' the compute construct, at which
+ // point the 'break' scope will be the compute construct. Else it could be a
+ // loop of some sort that has a direct parent of the compute construct.
+ // However, a 'break' in a 'switch' marked as a compute construct doesn't
+ // count as 'branch out of' the compute construct.
+ if (S->isOpenACCComputeConstructScope() ||
+ (S->isLoopScope() && S->getParent() &&
+ S->getParent()->isOpenACCComputeConstructScope()))
+ return StmtError(
+ Diag(BreakLoc, diag::err_acc_branch_in_out_compute_construct)
+ << /*branch*/ 0 << /*out of */ 0);
+
CheckJumpOutOfSEHFinally(*this, BreakLoc, *S);
return new (Context) BreakStmt(BreakLoc);
}
-/// Determine whether the given expression might be move-eligible or
-/// copy-elidable in either a (co_)return statement or throw expression,
-/// without considering function return type, if applicable.
-///
-/// \param E The expression being returned from the function or block,
-/// being thrown, or being co_returned from a coroutine. This expression
-/// might be modified by the implementation.
-///
-/// \param Mode Overrides detection of current language mode
-/// and uses the rules for C++23.
-///
-/// \returns An aggregate which contains the Candidate and isMoveEligible
-/// and isCopyElidable methods. If Candidate is non-null, it means
-/// isMoveEligible() would be true under the most permissive language standard.
Sema::NamedReturnInfo Sema::getNamedReturnInfo(Expr *&E,
SimplerImplicitMoveMode Mode) {
if (!E)
@@ -3391,6 +3246,8 @@ Sema::NamedReturnInfo Sema::getNamedReturnInfo(Expr *&E,
const auto *VD = dyn_cast<VarDecl>(DR->getDecl());
if (!VD)
return NamedReturnInfo();
+ if (VD->getInit() && VD->getInit()->containsErrors())
+ return NamedReturnInfo();
NamedReturnInfo Res = getNamedReturnInfo(VD);
if (Res.Candidate && !E->isXValue() &&
(Mode == SimplerImplicitMoveMode::ForceOn ||
@@ -3403,14 +3260,6 @@ Sema::NamedReturnInfo Sema::getNamedReturnInfo(Expr *&E,
return Res;
}
-/// Determine whether the given NRVO candidate variable is move-eligible or
-/// copy-elidable, without considering function return type.
-///
-/// \param VD The NRVO candidate variable.
-///
-/// \returns An aggregate which contains the Candidate and isMoveEligible
-/// and isCopyElidable methods. If Candidate is non-null, it means
-/// isMoveEligible() would be true under the most permissive language standard.
Sema::NamedReturnInfo Sema::getNamedReturnInfo(const VarDecl *VD) {
NamedReturnInfo Info{VD, NamedReturnInfo::MoveEligibleAndCopyElidable};
@@ -3456,22 +3305,13 @@ Sema::NamedReturnInfo Sema::getNamedReturnInfo(const VarDecl *VD) {
// Variables with higher required alignment than their type's ABI
// alignment cannot use NRVO.
- if (!VD->hasDependentAlignment() &&
+ if (!VD->hasDependentAlignment() && !VDType->isIncompleteType() &&
Context.getDeclAlign(VD) > Context.getTypeAlignInChars(VDType))
Info.S = NamedReturnInfo::MoveEligible;
return Info;
}
-/// Updates given NamedReturnInfo's move-eligible and
-/// copy-elidable statuses, considering the function
-/// return type criteria as applicable to return statements.
-///
-/// \param Info The NamedReturnInfo object to update.
-///
-/// \param ReturnType This is the return type of the function.
-/// \returns The copy elision candidate, in case the initial return expression
-/// was copy elidable, or nullptr otherwise.
const VarDecl *Sema::getCopyElisionCandidate(NamedReturnInfo &Info,
QualType ReturnType) {
if (!Info.Candidate)
@@ -3529,12 +3369,6 @@ VerifyInitializationSequenceCXX98(const Sema &S,
return true;
}
-/// Perform the initialization of a potentially-movable value, which
-/// is the result of return value.
-///
-/// This routine implements C++20 [class.copy.elision]p3, which attempts to
-/// treat returned lvalues as rvalues in certain cases (to prefer move
-/// construction), then falls back to treating them as lvalues if that failed.
ExprResult Sema::PerformMoveOrCopyInitialization(
const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value,
bool SupressSimplerImplicitMoves) {
@@ -3575,9 +3409,6 @@ static bool hasDeducedReturnType(FunctionDecl *FD) {
return FPT->getReturnType()->isUndeducedType();
}
-/// ActOnCapScopeReturnStmt - Utility routine to type-check return statements
-/// for capturing scopes.
-///
StmtResult Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc,
Expr *RetValExp,
NamedReturnInfo &NRInfo,
@@ -3791,8 +3622,6 @@ TypeLoc Sema::getReturnTypeLoc(FunctionDecl *FD) const {
.getReturnLoc();
}
-/// Deduce the return type for a function from a returned expression, per
-/// C++1y [dcl.spec.auto]p6.
bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *RetExpr, const AutoType *AT) {
@@ -3802,7 +3631,7 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
if (isLambdaConversionOperator(FD))
return false;
- if (RetExpr && isa<InitListExpr>(RetExpr)) {
+ if (isa_and_nonnull<InitListExpr>(RetExpr)) {
// If the deduction is for a return statement and the initializer is
// a braced-init-list, the program is ill-formed.
Diag(RetExpr->getExprLoc(),
@@ -3854,14 +3683,14 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
TemplateDeductionResult Res = DeduceAutoType(
OrigResultType, RetExpr, Deduced, Info, /*DependentDeduction=*/false,
/*IgnoreConstraints=*/false, &FailedTSC);
- if (Res != TDK_Success && FD->isInvalidDecl())
+ if (Res != TemplateDeductionResult::Success && FD->isInvalidDecl())
return true;
switch (Res) {
- case TDK_Success:
+ case TemplateDeductionResult::Success:
break;
- case TDK_AlreadyDiagnosed:
+ case TemplateDeductionResult::AlreadyDiagnosed:
return true;
- case TDK_Inconsistent: {
+ case TemplateDeductionResult::Inconsistent: {
// If a function with a declared return type that contains a placeholder
// type has multiple return statements, the return type is deduced for
// each return statement. [...] if the type deduced is not the same in
@@ -3912,6 +3741,12 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
RetValExp, nullptr, /*RecoverUncorrectedTypos=*/true);
if (RetVal.isInvalid())
return StmtError();
+
+ if (getCurScope()->isInOpenACCComputeConstructScope())
+ return StmtError(
+ Diag(ReturnLoc, diag::err_acc_branch_in_out_compute_construct)
+ << /*return*/ 1 << /*out of */ 0);
+
StmtResult R =
BuildReturnStmt(ReturnLoc, RetVal.get(), /*AllowRecovery=*/true);
if (R.isInvalid() || ExprEvalContexts.back().isDiscardedStatementContext())
@@ -4228,132 +4063,6 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
}
StmtResult
-Sema::ActOnObjCAtCatchStmt(SourceLocation AtLoc,
- SourceLocation RParen, Decl *Parm,
- Stmt *Body) {
- VarDecl *Var = cast_or_null<VarDecl>(Parm);
- if (Var && Var->isInvalidDecl())
- return StmtError();
-
- return new (Context) ObjCAtCatchStmt(AtLoc, RParen, Var, Body);
-}
-
-StmtResult
-Sema::ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body) {
- return new (Context) ObjCAtFinallyStmt(AtLoc, Body);
-}
-
-StmtResult
-Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
- MultiStmtArg CatchStmts, Stmt *Finally) {
- if (!getLangOpts().ObjCExceptions)
- Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@try";
-
- // Objective-C try is incompatible with SEH __try.
- sema::FunctionScopeInfo *FSI = getCurFunction();
- if (FSI->FirstSEHTryLoc.isValid()) {
- Diag(AtLoc, diag::err_mixing_cxx_try_seh_try) << 1;
- Diag(FSI->FirstSEHTryLoc, diag::note_conflicting_try_here) << "'__try'";
- }
-
- FSI->setHasObjCTry(AtLoc);
- unsigned NumCatchStmts = CatchStmts.size();
- return ObjCAtTryStmt::Create(Context, AtLoc, Try, CatchStmts.data(),
- NumCatchStmts, Finally);
-}
-
-StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw) {
- if (Throw) {
- ExprResult Result = DefaultLvalueConversion(Throw);
- if (Result.isInvalid())
- return StmtError();
-
- Result = ActOnFinishFullExpr(Result.get(), /*DiscardedValue*/ false);
- if (Result.isInvalid())
- return StmtError();
- Throw = Result.get();
-
- QualType ThrowType = Throw->getType();
- // Make sure the expression type is an ObjC pointer or "void *".
- if (!ThrowType->isDependentType() &&
- !ThrowType->isObjCObjectPointerType()) {
- const PointerType *PT = ThrowType->getAs<PointerType>();
- if (!PT || !PT->getPointeeType()->isVoidType())
- return StmtError(Diag(AtLoc, diag::err_objc_throw_expects_object)
- << Throw->getType() << Throw->getSourceRange());
- }
- }
-
- return new (Context) ObjCAtThrowStmt(AtLoc, Throw);
-}
-
-StmtResult
-Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
- Scope *CurScope) {
- if (!getLangOpts().ObjCExceptions)
- Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@throw";
-
- if (!Throw) {
- // @throw without an expression designates a rethrow (which must occur
- // in the context of an @catch clause).
- Scope *AtCatchParent = CurScope;
- while (AtCatchParent && !AtCatchParent->isAtCatchScope())
- AtCatchParent = AtCatchParent->getParent();
- if (!AtCatchParent)
- return StmtError(Diag(AtLoc, diag::err_rethrow_used_outside_catch));
- }
- return BuildObjCAtThrowStmt(AtLoc, Throw);
-}
-
-ExprResult
-Sema::ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand) {
- ExprResult result = DefaultLvalueConversion(operand);
- if (result.isInvalid())
- return ExprError();
- operand = result.get();
-
- // Make sure the expression type is an ObjC pointer or "void *".
- QualType type = operand->getType();
- if (!type->isDependentType() &&
- !type->isObjCObjectPointerType()) {
- const PointerType *pointerType = type->getAs<PointerType>();
- if (!pointerType || !pointerType->getPointeeType()->isVoidType()) {
- if (getLangOpts().CPlusPlus) {
- if (RequireCompleteType(atLoc, type,
- diag::err_incomplete_receiver_type))
- return Diag(atLoc, diag::err_objc_synchronized_expects_object)
- << type << operand->getSourceRange();
-
- ExprResult result = PerformContextuallyConvertToObjCPointer(operand);
- if (result.isInvalid())
- return ExprError();
- if (!result.isUsable())
- return Diag(atLoc, diag::err_objc_synchronized_expects_object)
- << type << operand->getSourceRange();
-
- operand = result.get();
- } else {
- return Diag(atLoc, diag::err_objc_synchronized_expects_object)
- << type << operand->getSourceRange();
- }
- }
- }
-
- // The operand to @synchronized is a full-expression.
- return ActOnFinishFullExpr(operand, /*DiscardedValue*/ false);
-}
-
-StmtResult
-Sema::ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SyncExpr,
- Stmt *SyncBody) {
- // We can't jump into or indirect-jump out of a @synchronized block.
- setFunctionHasBranchProtectedScope();
- return new (Context) ObjCAtSynchronizedStmt(AtLoc, SyncExpr, SyncBody);
-}
-
-/// ActOnCXXCatchBlock - Takes an exception declaration and a handler block
-/// and creates a proper catch handler from them.
-StmtResult
Sema::ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl,
Stmt *HandlerBlock) {
// There's nothing to test that ActOnExceptionDecl didn't already test.
@@ -4361,15 +4070,10 @@ Sema::ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl,
CXXCatchStmt(CatchLoc, cast_or_null<VarDecl>(ExDecl), HandlerBlock);
}
-StmtResult
-Sema::ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body) {
- setFunctionHasBranchProtectedScope();
- return new (Context) ObjCAutoreleasePoolStmt(AtLoc, Body);
-}
-
namespace {
class CatchHandlerType {
QualType QT;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsPointer : 1;
// This is a special constructor to be used only with DenseMapInfo's
@@ -4479,8 +4183,6 @@ public:
};
}
-/// ActOnCXXTryBlock - Takes a try compound-statement and a number of
-/// handlers and creates a try statement from them.
StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers) {
const llvm::Triple &T = Context.getTargetInfo().getTriple();
@@ -4501,8 +4203,8 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
// Exceptions aren't allowed in CUDA device code.
if (getLangOpts().CUDA)
- CUDADiagIfDeviceCode(TryLoc, diag::err_cuda_device_exceptions)
- << "try" << CurrentCUDATarget();
+ CUDA().DiagIfDeviceCode(TryLoc, diag::err_cuda_device_exceptions)
+ << "try" << llvm::to_underlying(CUDA().CurrentTarget());
if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
Diag(TryLoc, diag::err_omp_simd_region_cannot_use_stmt) << "try";
@@ -4748,7 +4450,8 @@ buildCapturedStmtCaptureList(Sema &S, CapturedRegionScopeInfo *RSI,
assert(Cap.isVariableCapture() && "unknown kind of capture");
if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP)
- S.setOpenMPCaptureKind(Field, Cap.getVariable(), RSI->OpenMPLevel);
+ S.OpenMP().setOpenMPCaptureKind(Field, Cap.getVariable(),
+ RSI->OpenMPLevel);
Captures.push_back(CapturedStmt::Capture(
Cap.getLocation(),
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
index 83351b703c15..32d42f3c3f3b 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
@@ -829,7 +829,7 @@ bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
NamedDecl *FoundDecl = nullptr;
// MS InlineAsm uses 'this' as a base
- if (getLangOpts().CPlusPlus && Base.equals("this")) {
+ if (getLangOpts().CPlusPlus && Base == "this") {
if (const Type *PT = getCurrentThisType().getTypePtrOrNull())
FoundDecl = PT->getPointeeType()->getAsTagDecl();
} else {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
index e6a4d3e63e4a..7f452d177c16 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
@@ -16,6 +16,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedAttr.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/StringExtras.h"
@@ -109,9 +110,16 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SetHints(LoopHintAttr::Unroll, LoopHintAttr::Disable);
} else if (PragmaName == "unroll") {
// #pragma unroll N
- if (ValueExpr)
- SetHints(LoopHintAttr::UnrollCount, LoopHintAttr::Numeric);
- else
+ if (ValueExpr) {
+ if (!ValueExpr->isValueDependent()) {
+ auto Value = ValueExpr->EvaluateKnownConstInt(S.getASTContext());
+ if (Value.isZero() || Value.isOne())
+ SetHints(LoopHintAttr::Unroll, LoopHintAttr::Disable);
+ else
+ SetHints(LoopHintAttr::UnrollCount, LoopHintAttr::Numeric);
+ } else
+ SetHints(LoopHintAttr::UnrollCount, LoopHintAttr::Numeric);
+ } else
SetHints(LoopHintAttr::Unroll, LoopHintAttr::Enable);
} else if (PragmaName == "nounroll_and_jam") {
SetHints(LoopHintAttr::UnrollAndJam, LoopHintAttr::Disable);
@@ -142,7 +150,8 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
if (Option == LoopHintAttr::VectorizeWidth) {
assert((ValueExpr || (StateLoc && StateLoc->Ident)) &&
"Attribute must have a valid value expression or argument.");
- if (ValueExpr && S.CheckLoopHintExpr(ValueExpr, St->getBeginLoc()))
+ if (ValueExpr && S.CheckLoopHintExpr(ValueExpr, St->getBeginLoc(),
+ /*AllowZero=*/false))
return nullptr;
if (StateLoc && StateLoc->Ident && StateLoc->Ident->isStr("scalable"))
State = LoopHintAttr::ScalableWidth;
@@ -152,7 +161,8 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
Option == LoopHintAttr::UnrollCount ||
Option == LoopHintAttr::PipelineInitiationInterval) {
assert(ValueExpr && "Attribute must have a valid value expression.");
- if (S.CheckLoopHintExpr(ValueExpr, St->getBeginLoc()))
+ if (S.CheckLoopHintExpr(ValueExpr, St->getBeginLoc(),
+ /*AllowZero=*/false))
return nullptr;
State = LoopHintAttr::Numeric;
} else if (Option == LoopHintAttr::Vectorize ||
@@ -276,7 +286,7 @@ bool Sema::CheckAlwaysInlineAttr(const Stmt *OrigSt, const Stmt *CurSt,
static Attr *handleNoInlineAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
NoInlineAttr NIA(S.Context, A);
- if (!NIA.isClangNoInline()) {
+ if (!NIA.isStmtNoInline()) {
S.Diag(St->getBeginLoc(), diag::warn_function_attribute_ignored_in_stmt)
<< "[[clang::noinline]]";
return nullptr;
@@ -303,6 +313,15 @@ static Attr *handleAlwaysInlineAttr(Sema &S, Stmt *St, const ParsedAttr &A,
return ::new (S.Context) AlwaysInlineAttr(S.Context, A);
}
+static Attr *handleCXXAssumeAttr(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+ ExprResult Res = S.ActOnCXXAssumeAttr(St, A, Range);
+ if (!Res.isUsable())
+ return nullptr;
+
+ return ::new (S.Context) CXXAssumeAttr(S.Context, A, Res.get());
+}
+
static Attr *handleMustTailAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
// Validation is in Sema::ActOnAttributedStmt().
@@ -397,8 +416,8 @@ static void CheckForDuplicateLoopAttrs(Sema &S, ArrayRef<const Attr *> Attrs) {
<< *FirstItr;
S.Diag((*FirstItr)->getLocation(), diag::note_previous_attribute);
}
- return;
}
+ return;
}
static Attr *handleMSConstexprAttr(Sema &S, Stmt *St, const ParsedAttr &A,
@@ -566,6 +585,39 @@ static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const ParsedAttr &A,
return ::new (S.Context) OpenCLUnrollHintAttr(S.Context, A, UnrollFactor);
}
+static Attr *handleHLSLLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+
+ if (A.getSemanticSpelling() == HLSLLoopHintAttr::Spelling::Microsoft_loop &&
+ !A.checkAtMostNumArgs(S, 0))
+ return nullptr;
+
+ unsigned UnrollFactor = 0;
+ if (A.getNumArgs() == 1) {
+
+ if (A.isArgIdent(0)) {
+ S.Diag(A.getLoc(), diag::err_attribute_argument_type)
+ << A << AANT_ArgumentIntegerConstant << A.getRange();
+ return nullptr;
+ }
+
+ Expr *E = A.getArgAsExpr(0);
+
+ if (S.CheckLoopHintExpr(E, St->getBeginLoc(),
+ /*AllowZero=*/false))
+ return nullptr;
+
+ std::optional<llvm::APSInt> ArgVal = E->getIntegerConstantExpr(S.Context);
+ // CheckLoopHintExpr handles non int const cases
+ assert(ArgVal != std::nullopt && "ArgVal should be an integer constant.");
+ int Val = ArgVal->getSExtValue();
+ // CheckLoopHintExpr handles negative and zero cases
+ assert(Val > 0 && "Val should be a positive integer greater than zero.");
+ UnrollFactor = static_cast<unsigned>(Val);
+ }
+ return ::new (S.Context) HLSLLoopHintAttr(S.Context, A, UnrollFactor);
+}
+
static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
if (A.isInvalid() || A.getKind() == ParsedAttr::IgnoredAttribute)
@@ -594,10 +646,14 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
switch (A.getKind()) {
case ParsedAttr::AT_AlwaysInline:
return handleAlwaysInlineAttr(S, St, A, Range);
+ case ParsedAttr::AT_CXXAssume:
+ return handleCXXAssumeAttr(S, St, A, Range);
case ParsedAttr::AT_FallThrough:
return handleFallThroughAttr(S, St, A, Range);
case ParsedAttr::AT_LoopHint:
return handleLoopHintAttr(S, St, A, Range);
+ case ParsedAttr::AT_HLSLLoopHint:
+ return handleHLSLLoopHintAttr(S, St, A, Range);
case ParsedAttr::AT_OpenCLUnrollHint:
return handleOpenCLUnrollHint(S, St, A, Range);
case ParsedAttr::AT_Suppress:
@@ -641,3 +697,54 @@ bool Sema::CheckRebuiltStmtAttributes(ArrayRef<const Attr *> Attrs) {
CheckForDuplicateLoopAttrs<CodeAlignAttr>(*this, Attrs);
return false;
}
+
+ExprResult Sema::ActOnCXXAssumeAttr(Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+ if (A.getNumArgs() != 1 || !A.getArgAsExpr(0)) {
+ Diag(A.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << A.getAttrName() << 1 << Range;
+ return ExprError();
+ }
+
+ auto *Assumption = A.getArgAsExpr(0);
+
+ if (DiagnoseUnexpandedParameterPack(Assumption)) {
+ return ExprError();
+ }
+
+ if (Assumption->getDependence() == ExprDependence::None) {
+ ExprResult Res = BuildCXXAssumeExpr(Assumption, A.getAttrName(), Range);
+ if (Res.isInvalid())
+ return ExprError();
+ Assumption = Res.get();
+ }
+
+ if (!getLangOpts().CPlusPlus23 &&
+ A.getSyntax() == AttributeCommonInfo::AS_CXX11)
+ Diag(A.getLoc(), diag::ext_cxx23_attr) << A << Range;
+
+ return Assumption;
+}
+
+ExprResult Sema::BuildCXXAssumeExpr(Expr *Assumption,
+ const IdentifierInfo *AttrName,
+ SourceRange Range) {
+ ExprResult Res = CorrectDelayedTyposInExpr(Assumption);
+ if (Res.isInvalid())
+ return ExprError();
+
+ Res = CheckPlaceholderExpr(Res.get());
+ if (Res.isInvalid())
+ return ExprError();
+
+ Res = PerformContextuallyConvertToBool(Res.get());
+ if (Res.isInvalid())
+ return ExprError();
+
+ Assumption = Res.get();
+ if (Assumption->HasSideEffects(Context))
+ Diag(Assumption->getBeginLoc(), diag::warn_assume_side_effects)
+ << AttrName << Range;
+
+ return Assumption;
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaSwift.cpp b/contrib/llvm-project/clang/lib/Sema/SemaSwift.cpp
new file mode 100644
index 000000000000..bf56ae8ac76d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaSwift.cpp
@@ -0,0 +1,765 @@
+//===------ SemaSwift.cpp ------ Swift language-specific routines ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to Swift.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaSwift.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/Basic/AttributeCommonInfo.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Sema/Attr.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaObjC.h"
+
+namespace clang {
+SemaSwift::SemaSwift(Sema &S) : SemaBase(S) {}
+
+SwiftNameAttr *SemaSwift::mergeNameAttr(Decl *D, const SwiftNameAttr &SNA,
+ StringRef Name) {
+ if (const auto *PrevSNA = D->getAttr<SwiftNameAttr>()) {
+ if (PrevSNA->getName() != Name && !PrevSNA->isImplicit()) {
+ Diag(PrevSNA->getLocation(), diag::err_attributes_are_not_compatible)
+ << PrevSNA << &SNA
+ << (PrevSNA->isRegularKeywordAttribute() ||
+ SNA.isRegularKeywordAttribute());
+ Diag(SNA.getLoc(), diag::note_conflicting_attribute);
+ }
+
+ D->dropAttr<SwiftNameAttr>();
+ }
+ return ::new (getASTContext()) SwiftNameAttr(getASTContext(), SNA, Name);
+}
+
+/// Pointer-like types in the default address space.
+static bool isValidSwiftContextType(QualType Ty) {
+ if (!Ty->hasPointerRepresentation())
+ return Ty->isDependentType();
+ return Ty->getPointeeType().getAddressSpace() == LangAS::Default;
+}
+
+/// Pointers and references in the default address space.
+static bool isValidSwiftIndirectResultType(QualType Ty) {
+ if (const auto *PtrType = Ty->getAs<PointerType>()) {
+ Ty = PtrType->getPointeeType();
+ } else if (const auto *RefType = Ty->getAs<ReferenceType>()) {
+ Ty = RefType->getPointeeType();
+ } else {
+ return Ty->isDependentType();
+ }
+ return Ty.getAddressSpace() == LangAS::Default;
+}
+
+/// Pointers and references to pointers in the default address space.
+static bool isValidSwiftErrorResultType(QualType Ty) {
+ if (const auto *PtrType = Ty->getAs<PointerType>()) {
+ Ty = PtrType->getPointeeType();
+ } else if (const auto *RefType = Ty->getAs<ReferenceType>()) {
+ Ty = RefType->getPointeeType();
+ } else {
+ return Ty->isDependentType();
+ }
+ if (!Ty.getQualifiers().empty())
+ return false;
+ return isValidSwiftContextType(Ty);
+}
+
+void SemaSwift::handleAttrAttr(Decl *D, const ParsedAttr &AL) {
+ // Make sure that there is a string literal as the annotation's single
+ // argument.
+ StringRef Str;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str))
+ return;
+
+ D->addAttr(::new (getASTContext()) SwiftAttrAttr(getASTContext(), AL, Str));
+}
+
+void SemaSwift::handleBridge(Decl *D, const ParsedAttr &AL) {
+ // Make sure that there is a string literal as the annotation's single
+ // argument.
+ StringRef BT;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, BT))
+ return;
+
+ // Warn about duplicate attributes if they have different arguments, but drop
+ // any duplicate attributes regardless.
+ if (const auto *Other = D->getAttr<SwiftBridgeAttr>()) {
+ if (Other->getSwiftType() != BT)
+ Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL;
+ return;
+ }
+
+ D->addAttr(::new (getASTContext()) SwiftBridgeAttr(getASTContext(), AL, BT));
+}
+
+static bool isErrorParameter(Sema &S, QualType QT) {
+ const auto *PT = QT->getAs<PointerType>();
+ if (!PT)
+ return false;
+
+ QualType Pointee = PT->getPointeeType();
+
+ // Check for NSError**.
+ if (const auto *OPT = Pointee->getAs<ObjCObjectPointerType>())
+ if (const auto *ID = OPT->getInterfaceDecl())
+ if (ID->getIdentifier() == S.ObjC().getNSErrorIdent())
+ return true;
+
+ // Check for CFError**.
+ if (const auto *PT = Pointee->getAs<PointerType>())
+ if (const auto *RT = PT->getPointeeType()->getAs<RecordType>())
+ if (S.ObjC().isCFError(RT->getDecl()))
+ return true;
+
+ return false;
+}
+
+void SemaSwift::handleError(Decl *D, const ParsedAttr &AL) {
+ auto hasErrorParameter = [](Sema &S, Decl *D, const ParsedAttr &AL) -> bool {
+ for (unsigned I = 0, E = getFunctionOrMethodNumParams(D); I != E; ++I) {
+ if (isErrorParameter(S, getFunctionOrMethodParamType(D, I)))
+ return true;
+ }
+
+ S.Diag(AL.getLoc(), diag::err_attr_swift_error_no_error_parameter)
+ << AL << isa<ObjCMethodDecl>(D);
+ return false;
+ };
+
+ auto hasPointerResult = [](Sema &S, Decl *D, const ParsedAttr &AL) -> bool {
+ // - C, ObjC, and block pointers are definitely okay.
+ // - References are definitely not okay.
+ // - nullptr_t is weird, but acceptable.
+ QualType RT = getFunctionOrMethodResultType(D);
+ if (RT->hasPointerRepresentation() && !RT->isReferenceType())
+ return true;
+
+ S.Diag(AL.getLoc(), diag::err_attr_swift_error_return_type)
+ << AL << AL.getArgAsIdent(0)->Ident->getName() << isa<ObjCMethodDecl>(D)
+ << /*pointer*/ 1;
+ return false;
+ };
+
+ auto hasIntegerResult = [](Sema &S, Decl *D, const ParsedAttr &AL) -> bool {
+ QualType RT = getFunctionOrMethodResultType(D);
+ if (RT->isIntegralType(S.Context))
+ return true;
+
+ S.Diag(AL.getLoc(), diag::err_attr_swift_error_return_type)
+ << AL << AL.getArgAsIdent(0)->Ident->getName() << isa<ObjCMethodDecl>(D)
+ << /*integral*/ 0;
+ return false;
+ };
+
+ if (D->isInvalidDecl())
+ return;
+
+ IdentifierLoc *Loc = AL.getArgAsIdent(0);
+ SwiftErrorAttr::ConventionKind Convention;
+ if (!SwiftErrorAttr::ConvertStrToConventionKind(Loc->Ident->getName(),
+ Convention)) {
+ Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL << Loc->Ident;
+ return;
+ }
+
+ switch (Convention) {
+ case SwiftErrorAttr::None:
+ // No additional validation required.
+ break;
+
+ case SwiftErrorAttr::NonNullError:
+ if (!hasErrorParameter(SemaRef, D, AL))
+ return;
+ break;
+
+ case SwiftErrorAttr::NullResult:
+ if (!hasErrorParameter(SemaRef, D, AL) || !hasPointerResult(SemaRef, D, AL))
+ return;
+ break;
+
+ case SwiftErrorAttr::NonZeroResult:
+ case SwiftErrorAttr::ZeroResult:
+ if (!hasErrorParameter(SemaRef, D, AL) || !hasIntegerResult(SemaRef, D, AL))
+ return;
+ break;
+ }
+
+ D->addAttr(::new (getASTContext())
+ SwiftErrorAttr(getASTContext(), AL, Convention));
+}
+
+static void checkSwiftAsyncErrorBlock(Sema &S, Decl *D,
+ const SwiftAsyncErrorAttr *ErrorAttr,
+ const SwiftAsyncAttr *AsyncAttr) {
+ if (AsyncAttr->getKind() == SwiftAsyncAttr::None) {
+ if (ErrorAttr->getConvention() != SwiftAsyncErrorAttr::None) {
+ S.Diag(AsyncAttr->getLocation(),
+ diag::err_swift_async_error_without_swift_async)
+ << AsyncAttr << isa<ObjCMethodDecl>(D);
+ }
+ return;
+ }
+
+ const ParmVarDecl *HandlerParam = getFunctionOrMethodParam(
+ D, AsyncAttr->getCompletionHandlerIndex().getASTIndex());
+ // handleSwiftAsyncAttr already verified the type is correct, so no need to
+ // double-check it here.
+ const auto *FuncTy = HandlerParam->getType()
+ ->castAs<BlockPointerType>()
+ ->getPointeeType()
+ ->getAs<FunctionProtoType>();
+ ArrayRef<QualType> BlockParams;
+ if (FuncTy)
+ BlockParams = FuncTy->getParamTypes();
+
+ switch (ErrorAttr->getConvention()) {
+ case SwiftAsyncErrorAttr::ZeroArgument:
+ case SwiftAsyncErrorAttr::NonZeroArgument: {
+ uint32_t ParamIdx = ErrorAttr->getHandlerParamIdx();
+ if (ParamIdx == 0 || ParamIdx > BlockParams.size()) {
+ S.Diag(ErrorAttr->getLocation(),
+ diag::err_attribute_argument_out_of_bounds)
+ << ErrorAttr << 2;
+ return;
+ }
+ QualType ErrorParam = BlockParams[ParamIdx - 1];
+ if (!ErrorParam->isIntegralType(S.Context)) {
+ StringRef ConvStr =
+ ErrorAttr->getConvention() == SwiftAsyncErrorAttr::ZeroArgument
+ ? "zero_argument"
+ : "nonzero_argument";
+ S.Diag(ErrorAttr->getLocation(), diag::err_swift_async_error_non_integral)
+ << ErrorAttr << ConvStr << ParamIdx << ErrorParam;
+ return;
+ }
+ break;
+ }
+ case SwiftAsyncErrorAttr::NonNullError: {
+ bool AnyErrorParams = false;
+ for (QualType Param : BlockParams) {
+ // Check for NSError *.
+ if (const auto *ObjCPtrTy = Param->getAs<ObjCObjectPointerType>()) {
+ if (const auto *ID = ObjCPtrTy->getInterfaceDecl()) {
+ if (ID->getIdentifier() == S.ObjC().getNSErrorIdent()) {
+ AnyErrorParams = true;
+ break;
+ }
+ }
+ }
+ // Check for CFError *.
+ if (const auto *PtrTy = Param->getAs<PointerType>()) {
+ if (const auto *RT = PtrTy->getPointeeType()->getAs<RecordType>()) {
+ if (S.ObjC().isCFError(RT->getDecl())) {
+ AnyErrorParams = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!AnyErrorParams) {
+ S.Diag(ErrorAttr->getLocation(),
+ diag::err_swift_async_error_no_error_parameter)
+ << ErrorAttr << isa<ObjCMethodDecl>(D);
+ return;
+ }
+ break;
+ }
+ case SwiftAsyncErrorAttr::None:
+ break;
+ }
+}
+
+void SemaSwift::handleAsyncError(Decl *D, const ParsedAttr &AL) {
+ IdentifierLoc *IDLoc = AL.getArgAsIdent(0);
+ SwiftAsyncErrorAttr::ConventionKind ConvKind;
+ if (!SwiftAsyncErrorAttr::ConvertStrToConventionKind(IDLoc->Ident->getName(),
+ ConvKind)) {
+ Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL << IDLoc->Ident;
+ return;
+ }
+
+ uint32_t ParamIdx = 0;
+ switch (ConvKind) {
+ case SwiftAsyncErrorAttr::ZeroArgument:
+ case SwiftAsyncErrorAttr::NonZeroArgument: {
+ if (!AL.checkExactlyNumArgs(SemaRef, 2))
+ return;
+
+ Expr *IdxExpr = AL.getArgAsExpr(1);
+ if (!SemaRef.checkUInt32Argument(AL, IdxExpr, ParamIdx))
+ return;
+ break;
+ }
+ case SwiftAsyncErrorAttr::NonNullError:
+ case SwiftAsyncErrorAttr::None: {
+ if (!AL.checkExactlyNumArgs(SemaRef, 1))
+ return;
+ break;
+ }
+ }
+
+ auto *ErrorAttr = ::new (getASTContext())
+ SwiftAsyncErrorAttr(getASTContext(), AL, ConvKind, ParamIdx);
+ D->addAttr(ErrorAttr);
+
+ if (auto *AsyncAttr = D->getAttr<SwiftAsyncAttr>())
+ checkSwiftAsyncErrorBlock(SemaRef, D, ErrorAttr, AsyncAttr);
+}
+
+// For a function, this will validate a compound Swift name, e.g.
+// <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, and
+// the function will output the number of parameter names, and whether this is a
+// single-arg initializer.
+//
+// For a type, enum constant, property, or variable declaration, this will
+// validate either a simple identifier, or a qualified
+// <code>context.identifier</code> name.
+static bool validateSwiftFunctionName(Sema &S, const ParsedAttr &AL,
+ SourceLocation Loc, StringRef Name,
+ unsigned &SwiftParamCount,
+ bool &IsSingleParamInit) {
+ SwiftParamCount = 0;
+ IsSingleParamInit = false;
+
+ // Check whether this will be mapped to a getter or setter of a property.
+ bool IsGetter = false, IsSetter = false;
+ if (Name.consume_front("getter:"))
+ IsGetter = true;
+ else if (Name.consume_front("setter:"))
+ IsSetter = true;
+
+ if (Name.back() != ')') {
+ S.Diag(Loc, diag::warn_attr_swift_name_function) << AL;
+ return false;
+ }
+
+ bool IsMember = false;
+ StringRef ContextName, BaseName, Parameters;
+
+ std::tie(BaseName, Parameters) = Name.split('(');
+
+ // Split at the first '.', if it exists, which separates the context name
+ // from the base name.
+ std::tie(ContextName, BaseName) = BaseName.split('.');
+ if (BaseName.empty()) {
+ BaseName = ContextName;
+ ContextName = StringRef();
+ } else if (ContextName.empty() || !isValidAsciiIdentifier(ContextName)) {
+ S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
+ << AL << /*context*/ 1;
+ return false;
+ } else {
+ IsMember = true;
+ }
+
+ if (!isValidAsciiIdentifier(BaseName) || BaseName == "_") {
+ S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
+ << AL << /*basename*/ 0;
+ return false;
+ }
+
+ bool IsSubscript = BaseName == "subscript";
+ // A subscript accessor must be a getter or setter.
+ if (IsSubscript && !IsGetter && !IsSetter) {
+ S.Diag(Loc, diag::warn_attr_swift_name_subscript_invalid_parameter)
+ << AL << /* getter or setter */ 0;
+ return false;
+ }
+
+ if (Parameters.empty()) {
+ S.Diag(Loc, diag::warn_attr_swift_name_missing_parameters) << AL;
+ return false;
+ }
+
+ assert(Parameters.back() == ')' && "expected ')'");
+ Parameters = Parameters.drop_back(); // ')'
+
+ if (Parameters.empty()) {
+ // Setters and subscripts must have at least one parameter.
+ if (IsSubscript) {
+ S.Diag(Loc, diag::warn_attr_swift_name_subscript_invalid_parameter)
+ << AL << /* have at least one parameter */ 1;
+ return false;
+ }
+
+ if (IsSetter) {
+ S.Diag(Loc, diag::warn_attr_swift_name_setter_parameters) << AL;
+ return false;
+ }
+
+ return true;
+ }
+
+ if (Parameters.back() != ':') {
+ S.Diag(Loc, diag::warn_attr_swift_name_function) << AL;
+ return false;
+ }
+
+ StringRef CurrentParam;
+ std::optional<unsigned> SelfLocation;
+ unsigned NewValueCount = 0;
+ std::optional<unsigned> NewValueLocation;
+ do {
+ std::tie(CurrentParam, Parameters) = Parameters.split(':');
+
+ if (!isValidAsciiIdentifier(CurrentParam)) {
+ S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
+ << AL << /*parameter*/ 2;
+ return false;
+ }
+
+ if (IsMember && CurrentParam == "self") {
+ // "self" indicates the "self" argument for a member.
+
+ // More than one "self"?
+ if (SelfLocation) {
+ S.Diag(Loc, diag::warn_attr_swift_name_multiple_selfs) << AL;
+ return false;
+ }
+
+ // The "self" location is the current parameter.
+ SelfLocation = SwiftParamCount;
+ } else if (CurrentParam == "newValue") {
+ // "newValue" indicates the "newValue" argument for a setter.
+
+ // There should only be one 'newValue', but it's only significant for
+ // subscript accessors, so don't error right away.
+ ++NewValueCount;
+
+ NewValueLocation = SwiftParamCount;
+ }
+
+ ++SwiftParamCount;
+ } while (!Parameters.empty());
+
+ // Only instance subscripts are currently supported.
+ if (IsSubscript && !SelfLocation) {
+ S.Diag(Loc, diag::warn_attr_swift_name_subscript_invalid_parameter)
+ << AL << /*have a 'self:' parameter*/ 2;
+ return false;
+ }
+
+ IsSingleParamInit =
+ SwiftParamCount == 1 && BaseName == "init" && CurrentParam != "_";
+
+ // Check the number of parameters for a getter/setter.
+ if (IsGetter || IsSetter) {
+ // Setters have one parameter for the new value.
+ unsigned NumExpectedParams = IsGetter ? 0 : 1;
+ unsigned ParamDiag = IsGetter
+ ? diag::warn_attr_swift_name_getter_parameters
+ : diag::warn_attr_swift_name_setter_parameters;
+
+ // Instance methods have one parameter for "self".
+ if (SelfLocation)
+ ++NumExpectedParams;
+
+ // Subscripts may have additional parameters beyond the expected params for
+ // the index.
+ if (IsSubscript) {
+ if (SwiftParamCount < NumExpectedParams) {
+ S.Diag(Loc, ParamDiag) << AL;
+ return false;
+ }
+
+ // A subscript setter must explicitly label its newValue parameter to
+ // distinguish it from index parameters.
+ if (IsSetter) {
+ if (!NewValueLocation) {
+ S.Diag(Loc, diag::warn_attr_swift_name_subscript_setter_no_newValue)
+ << AL;
+ return false;
+ }
+ if (NewValueCount > 1) {
+ S.Diag(Loc,
+ diag::warn_attr_swift_name_subscript_setter_multiple_newValues)
+ << AL;
+ return false;
+ }
+ } else {
+ // Subscript getters should have no 'newValue:' parameter.
+ if (NewValueLocation) {
+ S.Diag(Loc, diag::warn_attr_swift_name_subscript_getter_newValue)
+ << AL;
+ return false;
+ }
+ }
+ } else {
+ // Property accessors must have exactly the number of expected params.
+ if (SwiftParamCount != NumExpectedParams) {
+ S.Diag(Loc, ParamDiag) << AL;
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool SemaSwift::DiagnoseName(Decl *D, StringRef Name, SourceLocation Loc,
+ const ParsedAttr &AL, bool IsAsync) {
+ if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
+ ArrayRef<ParmVarDecl *> Params;
+ unsigned ParamCount;
+
+ if (const auto *Method = dyn_cast<ObjCMethodDecl>(D)) {
+ ParamCount = Method->getSelector().getNumArgs();
+ Params = Method->parameters().slice(0, ParamCount);
+ } else {
+ const auto *F = cast<FunctionDecl>(D);
+
+ ParamCount = F->getNumParams();
+ Params = F->parameters();
+
+ if (!F->hasWrittenPrototype()) {
+ Diag(Loc, diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute()
+ << ExpectedFunctionWithProtoType;
+ return false;
+ }
+ }
+
+ // The async name drops the last callback parameter.
+ if (IsAsync) {
+ if (ParamCount == 0) {
+ Diag(Loc, diag::warn_attr_swift_name_decl_missing_params)
+ << AL << isa<ObjCMethodDecl>(D);
+ return false;
+ }
+ ParamCount -= 1;
+ }
+
+ unsigned SwiftParamCount;
+ bool IsSingleParamInit;
+ if (!validateSwiftFunctionName(SemaRef, AL, Loc, Name, SwiftParamCount,
+ IsSingleParamInit))
+ return false;
+
+ bool ParamCountValid;
+ if (SwiftParamCount == ParamCount) {
+ ParamCountValid = true;
+ } else if (SwiftParamCount > ParamCount) {
+ ParamCountValid = IsSingleParamInit && ParamCount == 0;
+ } else {
+ // We have fewer Swift parameters than Objective-C parameters, but that
+ // might be because we've transformed some of them. Check for potential
+ // "out" parameters and err on the side of not warning.
+ unsigned MaybeOutParamCount =
+ llvm::count_if(Params, [](const ParmVarDecl *Param) -> bool {
+ QualType ParamTy = Param->getType();
+ if (ParamTy->isReferenceType() || ParamTy->isPointerType())
+ return !ParamTy->getPointeeType().isConstQualified();
+ return false;
+ });
+
+ ParamCountValid = SwiftParamCount + MaybeOutParamCount >= ParamCount;
+ }
+
+ if (!ParamCountValid) {
+ Diag(Loc, diag::warn_attr_swift_name_num_params)
+ << (SwiftParamCount > ParamCount) << AL << ParamCount
+ << SwiftParamCount;
+ return false;
+ }
+ } else if ((isa<EnumConstantDecl>(D) || isa<ObjCProtocolDecl>(D) ||
+ isa<ObjCInterfaceDecl>(D) || isa<ObjCPropertyDecl>(D) ||
+ isa<VarDecl>(D) || isa<TypedefNameDecl>(D) || isa<TagDecl>(D) ||
+ isa<IndirectFieldDecl>(D) || isa<FieldDecl>(D)) &&
+ !IsAsync) {
+ StringRef ContextName, BaseName;
+
+ std::tie(ContextName, BaseName) = Name.split('.');
+ if (BaseName.empty()) {
+ BaseName = ContextName;
+ ContextName = StringRef();
+ } else if (!isValidAsciiIdentifier(ContextName)) {
+ Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
+ << AL << /*context*/ 1;
+ return false;
+ }
+
+ if (!isValidAsciiIdentifier(BaseName)) {
+ Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
+ << AL << /*basename*/ 0;
+ return false;
+ }
+ } else {
+ Diag(Loc, diag::warn_attr_swift_name_decl_kind) << AL;
+ return false;
+ }
+ return true;
+}
+
+void SemaSwift::handleName(Decl *D, const ParsedAttr &AL) {
+ StringRef Name;
+ SourceLocation Loc;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Name, &Loc))
+ return;
+
+ if (!DiagnoseName(D, Name, Loc, AL, /*IsAsync=*/false))
+ return;
+
+ D->addAttr(::new (getASTContext()) SwiftNameAttr(getASTContext(), AL, Name));
+}
+
+void SemaSwift::handleAsyncName(Decl *D, const ParsedAttr &AL) {
+ StringRef Name;
+ SourceLocation Loc;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Name, &Loc))
+ return;
+
+ if (!DiagnoseName(D, Name, Loc, AL, /*IsAsync=*/true))
+ return;
+
+ D->addAttr(::new (getASTContext())
+ SwiftAsyncNameAttr(getASTContext(), AL, Name));
+}
+
+void SemaSwift::handleNewType(Decl *D, const ParsedAttr &AL) {
+ // Make sure that there is an identifier as the annotation's single argument.
+ if (!AL.checkExactlyNumArgs(SemaRef, 1))
+ return;
+
+ if (!AL.isArgIdent(0)) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ SwiftNewTypeAttr::NewtypeKind Kind;
+ IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
+ if (!SwiftNewTypeAttr::ConvertStrToNewtypeKind(II->getName(), Kind)) {
+ Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) << AL << II;
+ return;
+ }
+
+ if (!isa<TypedefNameDecl>(D)) {
+ Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type_str)
+ << AL << AL.isRegularKeywordAttribute() << "typedefs";
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ SwiftNewTypeAttr(getASTContext(), AL, Kind));
+}
+
+void SemaSwift::handleAsyncAttr(Decl *D, const ParsedAttr &AL) {
+ if (!AL.isArgIdent(0)) {
+ Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL << 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ SwiftAsyncAttr::Kind Kind;
+ IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
+ if (!SwiftAsyncAttr::ConvertStrToKind(II->getName(), Kind)) {
+ Diag(AL.getLoc(), diag::err_swift_async_no_access) << AL << II;
+ return;
+ }
+
+ ParamIdx Idx;
+ if (Kind == SwiftAsyncAttr::None) {
+ // If this is 'none', then there shouldn't be any additional arguments.
+ if (!AL.checkExactlyNumArgs(SemaRef, 1))
+ return;
+ } else {
+ // Non-none swift_async requires a completion handler index argument.
+ if (!AL.checkExactlyNumArgs(SemaRef, 2))
+ return;
+
+ Expr *HandlerIdx = AL.getArgAsExpr(1);
+ if (!SemaRef.checkFunctionOrMethodParameterIndex(D, AL, 2, HandlerIdx, Idx))
+ return;
+
+ const ParmVarDecl *CompletionBlock =
+ getFunctionOrMethodParam(D, Idx.getASTIndex());
+ QualType CompletionBlockType = CompletionBlock->getType();
+ if (!CompletionBlockType->isBlockPointerType()) {
+ Diag(CompletionBlock->getLocation(), diag::err_swift_async_bad_block_type)
+ << CompletionBlock->getType();
+ return;
+ }
+ QualType BlockTy =
+ CompletionBlockType->castAs<BlockPointerType>()->getPointeeType();
+ if (!BlockTy->castAs<FunctionType>()->getReturnType()->isVoidType()) {
+ Diag(CompletionBlock->getLocation(), diag::err_swift_async_bad_block_type)
+ << CompletionBlock->getType();
+ return;
+ }
+ }
+
+ auto *AsyncAttr =
+ ::new (getASTContext()) SwiftAsyncAttr(getASTContext(), AL, Kind, Idx);
+ D->addAttr(AsyncAttr);
+
+ if (auto *ErrorAttr = D->getAttr<SwiftAsyncErrorAttr>())
+ checkSwiftAsyncErrorBlock(SemaRef, D, ErrorAttr, AsyncAttr);
+}
+
+void SemaSwift::AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
+ ParameterABI abi) {
+ ASTContext &Context = getASTContext();
+ QualType type = cast<ParmVarDecl>(D)->getType();
+
+ if (auto existingAttr = D->getAttr<ParameterABIAttr>()) {
+ if (existingAttr->getABI() != abi) {
+ Diag(CI.getLoc(), diag::err_attributes_are_not_compatible)
+ << getParameterABISpelling(abi) << existingAttr
+ << (CI.isRegularKeywordAttribute() ||
+ existingAttr->isRegularKeywordAttribute());
+ Diag(existingAttr->getLocation(), diag::note_conflicting_attribute);
+ return;
+ }
+ }
+
+ switch (abi) {
+ case ParameterABI::Ordinary:
+ llvm_unreachable("explicit attribute for ordinary parameter ABI?");
+
+ case ParameterABI::SwiftContext:
+ if (!isValidSwiftContextType(type)) {
+ Diag(CI.getLoc(), diag::err_swift_abi_parameter_wrong_type)
+ << getParameterABISpelling(abi) << /*pointer to pointer */ 0 << type;
+ }
+ D->addAttr(::new (Context) SwiftContextAttr(Context, CI));
+ return;
+
+ case ParameterABI::SwiftAsyncContext:
+ if (!isValidSwiftContextType(type)) {
+ Diag(CI.getLoc(), diag::err_swift_abi_parameter_wrong_type)
+ << getParameterABISpelling(abi) << /*pointer to pointer */ 0 << type;
+ }
+ D->addAttr(::new (Context) SwiftAsyncContextAttr(Context, CI));
+ return;
+
+ case ParameterABI::SwiftErrorResult:
+ if (!isValidSwiftErrorResultType(type)) {
+ Diag(CI.getLoc(), diag::err_swift_abi_parameter_wrong_type)
+ << getParameterABISpelling(abi) << /*pointer to pointer */ 1 << type;
+ }
+ D->addAttr(::new (Context) SwiftErrorResultAttr(Context, CI));
+ return;
+
+ case ParameterABI::SwiftIndirectResult:
+ if (!isValidSwiftIndirectResultType(type)) {
+ Diag(CI.getLoc(), diag::err_swift_abi_parameter_wrong_type)
+ << getParameterABISpelling(abi) << /*pointer*/ 0 << type;
+ }
+ D->addAttr(::new (Context) SwiftIndirectResultAttr(Context, CI));
+ return;
+ }
+ llvm_unreachable("bad parameter ABI attribute");
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaSystemZ.cpp b/contrib/llvm-project/clang/lib/Sema/SemaSystemZ.cpp
new file mode 100644
index 000000000000..7e836adbee65
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaSystemZ.cpp
@@ -0,0 +1,94 @@
+//===------ SemaSystemZ.cpp ------ SystemZ target-specific routines -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to SystemZ.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaSystemZ.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/APSInt.h"
+#include <optional>
+
+namespace clang {
+
+SemaSystemZ::SemaSystemZ(Sema &S) : SemaBase(S) {}
+
+bool SemaSystemZ::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ if (BuiltinID == SystemZ::BI__builtin_tabort) {
+ Expr *Arg = TheCall->getArg(0);
+ if (std::optional<llvm::APSInt> AbortCode =
+ Arg->getIntegerConstantExpr(getASTContext()))
+ if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
+ return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
+ << Arg->getSourceRange();
+ }
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_verimb:
+ case SystemZ::BI__builtin_s390_verimh:
+ case SystemZ::BI__builtin_s390_verimf:
+ case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
+ case SystemZ::BI__builtin_s390_vfaeb:
+ case SystemZ::BI__builtin_s390_vfaeh:
+ case SystemZ::BI__builtin_s390_vfaef:
+ case SystemZ::BI__builtin_s390_vfaebs:
+ case SystemZ::BI__builtin_s390_vfaehs:
+ case SystemZ::BI__builtin_s390_vfaefs:
+ case SystemZ::BI__builtin_s390_vfaezb:
+ case SystemZ::BI__builtin_s390_vfaezh:
+ case SystemZ::BI__builtin_s390_vfaezf:
+ case SystemZ::BI__builtin_s390_vfaezbs:
+ case SystemZ::BI__builtin_s390_vfaezhs:
+ case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vfisb:
+ case SystemZ::BI__builtin_s390_vfidb:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case SystemZ::BI__builtin_s390_vftcisb:
+ case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
+ case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vstrcb:
+ case SystemZ::BI__builtin_s390_vstrch:
+ case SystemZ::BI__builtin_s390_vstrcf:
+ case SystemZ::BI__builtin_s390_vstrczb:
+ case SystemZ::BI__builtin_s390_vstrczh:
+ case SystemZ::BI__builtin_s390_vstrczf:
+ case SystemZ::BI__builtin_s390_vstrcbs:
+ case SystemZ::BI__builtin_s390_vstrchs:
+ case SystemZ::BI__builtin_s390_vstrcfs:
+ case SystemZ::BI__builtin_s390_vstrczbs:
+ case SystemZ::BI__builtin_s390_vstrczhs:
+ case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vfminsb:
+ case SystemZ::BI__builtin_s390_vfmaxsb:
+ case SystemZ::BI__builtin_s390_vfmindb:
+ case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
+ case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
+ case SystemZ::BI__builtin_s390_vclfnhs:
+ case SystemZ::BI__builtin_s390_vclfnls:
+ case SystemZ::BI__builtin_s390_vcfn:
+ case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
+ }
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u);
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
index b619f5d729e8..ca71542d886f 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
@@ -33,9 +33,11 @@
#include "clang/Sema/Overload.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -209,10 +211,11 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
AssumedTemplateKind AssumedTemplate;
LookupResult R(*this, TName, Name.getBeginLoc(), LookupOrdinaryName);
if (LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
- MemberOfUnknownSpecialization, SourceLocation(),
+ /*RequiredTemplate=*/SourceLocation(),
&AssumedTemplate,
/*AllowTypoCorrection=*/!Disambiguation))
return TNK_Non_template;
+ MemberOfUnknownSpecialization = R.wasNotFoundInCurrentInstantiation();
if (AssumedTemplate != AssumedTemplateKind::None) {
TemplateResult = TemplateTy::make(Context.getAssumedTemplateName(TName));
@@ -290,7 +293,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
Template =
FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD);
assert(!FoundUsingShadow || FoundUsingShadow->getTargetDecl() == TD);
- if (SS.isSet() && !SS.isInvalid()) {
+ if (!SS.isInvalid()) {
NestedNameSpecifier *Qualifier = SS.getScopeRep();
Template = Context.getQualifiedTemplateName(Qualifier, hasTemplateKeyword,
Template);
@@ -319,15 +322,12 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
bool Sema::isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc, CXXScopeSpec &SS,
ParsedTemplateTy *Template /*=nullptr*/) {
- bool MemberOfUnknownSpecialization = false;
-
// We could use redeclaration lookup here, but we don't need to: the
// syntactic form of a deduction guide is enough to identify it even
// if we can't look up the template name at all.
LookupResult R(*this, DeclarationName(&Name), NameLoc, LookupOrdinaryName);
if (LookupTemplateName(R, S, SS, /*ObjectType*/ QualType(),
- /*EnteringContext*/ false,
- MemberOfUnknownSpecialization))
+ /*EnteringContext*/ false))
return false;
if (R.empty()) return false;
@@ -343,8 +343,11 @@ bool Sema::isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
if (!TD || !getAsTypeTemplateDecl(TD))
return false;
- if (Template)
- *Template = TemplateTy::make(TemplateName(TD));
+ if (Template) {
+ TemplateName Name = Context.getQualifiedTemplateName(
+ SS.getScopeRep(), /*TemplateKeyword=*/false, TemplateName(TD));
+ *Template = TemplateTy::make(Name);
+ }
return true;
}
@@ -373,11 +376,8 @@ bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II,
return true;
}
-bool Sema::LookupTemplateName(LookupResult &Found,
- Scope *S, CXXScopeSpec &SS,
- QualType ObjectType,
- bool EnteringContext,
- bool &MemberOfUnknownSpecialization,
+bool Sema::LookupTemplateName(LookupResult &Found, Scope *S, CXXScopeSpec &SS,
+ QualType ObjectType, bool EnteringContext,
RequiredTemplateKind RequiredTemplate,
AssumedTemplateKind *ATK,
bool AllowTypoCorrection) {
@@ -390,7 +390,6 @@ bool Sema::LookupTemplateName(LookupResult &Found,
Found.setTemplateNameLookup(true);
// Determine where to perform name lookup
- MemberOfUnknownSpecialization = false;
DeclContext *LookupCtx = nullptr;
bool IsDependent = false;
if (!ObjectType.isNull()) {
@@ -547,7 +546,7 @@ bool Sema::LookupTemplateName(LookupResult &Found,
FilterAcceptableTemplateNames(Found, AllowFunctionTemplatesInLookup);
if (Found.empty()) {
if (IsDependent) {
- MemberOfUnknownSpecialization = true;
+ Found.setNotFoundInCurrentInstantiation();
return false;
}
@@ -722,52 +721,28 @@ void Sema::diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
Diag(Found->getLocation(), diag::note_non_template_in_template_id_found);
}
-/// ActOnDependentIdExpression - Handle a dependent id-expression that
-/// was just parsed. This is only possible with an explicit scope
-/// specifier naming a dependent type.
ExprResult
Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs) {
- DeclContext *DC = getFunctionLevelDeclContext();
-
- // C++11 [expr.prim.general]p12:
- // An id-expression that denotes a non-static data member or non-static
- // member function of a class can only be used:
- // (...)
- // - if that id-expression denotes a non-static data member and it
- // appears in an unevaluated operand.
- //
- // If this might be the case, form a DependentScopeDeclRefExpr instead of a
- // CXXDependentScopeMemberExpr. The former can instantiate to either
- // DeclRefExpr or MemberExpr depending on lookup results, while the latter is
- // always a MemberExpr.
- bool MightBeCxx11UnevalField =
- getLangOpts().CPlusPlus11 && isUnevaluatedContext();
-
- // Check if the nested name specifier is an enum type.
- bool IsEnum = false;
- if (NestedNameSpecifier *NNS = SS.getScopeRep())
- IsEnum = isa_and_nonnull<EnumType>(NNS->getAsType());
-
- if (!MightBeCxx11UnevalField && !isAddressOfOperand && !IsEnum &&
- isa<CXXMethodDecl>(DC) &&
- cast<CXXMethodDecl>(DC)->isImplicitObjectMemberFunction()) {
- QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType().getNonReferenceType();
-
- // Since the 'this' expression is synthesized, we don't need to
- // perform the double-lookup check.
- NamedDecl *FirstQualifierInScope = nullptr;
+ if (SS.isEmpty()) {
+ // FIXME: This codepath is only used by dependent unqualified names
+ // (e.g. a dependent conversion-function-id, or operator= once we support
+ // it). It doesn't quite do the right thing, and it will silently fail if
+ // getCurrentThisType() returns null.
+ QualType ThisType = getCurrentThisType();
+ if (ThisType.isNull())
+ return ExprError();
return CXXDependentScopeMemberExpr::Create(
- Context, /*This=*/nullptr, ThisType,
+ Context, /*Base=*/nullptr, ThisType,
/*IsArrow=*/!Context.getLangOpts().HLSL,
- /*Op=*/SourceLocation(), SS.getWithLocInContext(Context), TemplateKWLoc,
- FirstQualifierInScope, NameInfo, TemplateArgs);
+ /*OperatorLoc=*/SourceLocation(),
+ /*QualifierLoc=*/NestedNameSpecifierLoc(), TemplateKWLoc,
+ /*FirstQualifierFoundInScope=*/nullptr, NameInfo, TemplateArgs);
}
-
return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
}
@@ -776,18 +751,17 @@ Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
- // DependentScopeDeclRefExpr::Create requires a valid QualifierLoc
- NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
- if (!QualifierLoc)
- return ExprError();
+ // DependentScopeDeclRefExpr::Create requires a valid NestedNameSpecifierLoc
+ if (!SS.isValid())
+ return CreateRecoveryExpr(
+ SS.getBeginLoc(),
+ TemplateArgs ? TemplateArgs->getRAngleLoc() : NameInfo.getEndLoc(), {});
return DependentScopeDeclRefExpr::Create(
- Context, QualifierLoc, TemplateKWLoc, NameInfo, TemplateArgs);
+ Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo,
+ TemplateArgs);
}
-
-/// Determine whether we would be unable to instantiate this template (because
-/// it either has no definition, or is in the process of being instantiated).
bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
@@ -881,28 +855,28 @@ bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
return true;
}
-/// DiagnoseTemplateParameterShadow - Produce a diagnostic complaining
-/// that the template parameter 'PrevDecl' is being shadowed by a new
-/// declaration at location Loc. Returns true to indicate that this is
-/// an error, and false otherwise.
-void Sema::DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl) {
+void Sema::DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl,
+ bool SupportedForCompatibility) {
assert(PrevDecl->isTemplateParameter() && "Not a template parameter");
- // C++ [temp.local]p4:
- // A template-parameter shall not be redeclared within its
- // scope (including nested scopes).
+ // C++23 [temp.local]p6:
+ // The name of a template-parameter shall not be bound to any following.
+ // declaration whose locus is contained by the scope to which the
+ // template-parameter belongs.
//
- // Make this a warning when MSVC compatibility is requested.
- unsigned DiagId = getLangOpts().MSVCCompat ? diag::ext_template_param_shadow
- : diag::err_template_param_shadow;
+ // When MSVC compatibility is enabled, the diagnostic is always a warning
+ // by default. Otherwise, it an error unless SupportedForCompatibility is
+ // true, in which case it is a default-to-error warning.
+ unsigned DiagId =
+ getLangOpts().MSVCCompat
+ ? diag::ext_template_param_shadow
+ : (SupportedForCompatibility ? diag::ext_compat_template_param_shadow
+ : diag::err_template_param_shadow);
const auto *ND = cast<NamedDecl>(PrevDecl);
Diag(Loc, DiagId) << ND->getDeclName();
NoteTemplateParameterLocation(*ND);
}
-/// AdjustDeclIfTemplate - If the given decl happens to be a template, reset
-/// the parameter D to reference the templated declaration and return a pointer
-/// to the template declaration. Otherwise, do nothing to D and return null.
TemplateDecl *Sema::AdjustDeclIfTemplate(Decl *&D) {
if (TemplateDecl *Temp = dyn_cast_or_null<TemplateDecl>(D)) {
D = Temp->getTemplatedDecl();
@@ -956,8 +930,6 @@ static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
llvm_unreachable("Unhandled parsed template argument");
}
-/// Translates template arguments as provided by the parser
-/// into template arguments used by semantic analysis.
void Sema::translateTemplateArguments(const ASTTemplateArgsPtr &TemplateArgsIn,
TemplateArgumentListInfo &TemplateArgs) {
for (unsigned I = 0, Last = TemplateArgsIn.size(); I != Last; ++I)
@@ -967,17 +939,14 @@ void Sema::translateTemplateArguments(const ASTTemplateArgsPtr &TemplateArgsIn,
static void maybeDiagnoseTemplateParameterShadow(Sema &SemaRef, Scope *S,
SourceLocation Loc,
- IdentifierInfo *Name) {
- NamedDecl *PrevDecl = SemaRef.LookupSingleName(
- S, Name, Loc, Sema::LookupOrdinaryName, Sema::ForVisibleRedeclaration);
+ const IdentifierInfo *Name) {
+ NamedDecl *PrevDecl =
+ SemaRef.LookupSingleName(S, Name, Loc, Sema::LookupOrdinaryName,
+ RedeclarationKind::ForVisibleRedeclaration);
if (PrevDecl && PrevDecl->isTemplateParameter())
SemaRef.DiagnoseTemplateParameterShadow(Loc, PrevDecl);
}
-/// Convert a parsed type into a parsed template argument. This is mostly
-/// trivial, except that we may have parsed a C++17 deduced class template
-/// specialization type, in which case we should form a template template
-/// argument instead of a type template argument.
ParsedTemplateArgument Sema::ActOnTemplateTypeArgument(TypeResult ParsedType) {
TypeSourceInfo *TInfo;
QualType T = GetTypeFromParser(ParsedType.get(), &TInfo);
@@ -1003,10 +972,6 @@ ParsedTemplateArgument Sema::ActOnTemplateTypeArgument(TypeResult ParsedType) {
if (auto DTST = TL.getAs<DeducedTemplateSpecializationTypeLoc>()) {
TemplateName Name = DTST.getTypePtr()->getTemplateName();
- if (SS.isSet())
- Name = Context.getQualifiedTemplateName(SS.getScopeRep(),
- /*HasTemplateKeyword=*/false,
- Name);
ParsedTemplateArgument Result(SS, TemplateTy::make(Name),
DTST.getTemplateNameLoc());
if (EllipsisLoc.isValid())
@@ -1024,15 +989,6 @@ ParsedTemplateArgument Sema::ActOnTemplateTypeArgument(TypeResult ParsedType) {
TInfo->getTypeLoc().getBeginLoc());
}
-/// ActOnTypeParameter - Called when a C++ template type parameter
-/// (e.g., "typename T") has been parsed. Typename specifies whether
-/// the keyword "typename" was used to declare the type parameter
-/// (otherwise, "class" was used), and KeyLoc is the location of the
-/// "class" or "typename" keyword. ParamName is the name of the
-/// parameter (NULL indicates an unnamed template parameter) and
-/// ParamNameLoc is the location of the parameter name (if any).
-/// If the type parameter has a default argument, it will be added
-/// later via ActOnTypeParameterDefault.
NamedDecl *Sema::ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
@@ -1091,7 +1047,8 @@ NamedDecl *Sema::ActOnTypeParameter(Scope *S, bool Typename,
return Param;
}
- Param->setDefaultArgument(DefaultTInfo);
+ Param->setDefaultArgument(
+ Context, TemplateArgumentLoc(DefaultTInfo->getType(), DefaultTInfo));
}
return Param;
@@ -1153,6 +1110,7 @@ bool Sema::BuildTypeConstraint(const CXXScopeSpec &SS,
TemplateName TN = TypeConstr->Template.get();
ConceptDecl *CD = cast<ConceptDecl>(TN.getAsTemplateDecl());
+ UsingShadowDecl *USD = TN.getAsUsingShadowDecl();
DeclarationNameInfo ConceptName(DeclarationName(TypeConstr->Name),
TypeConstr->TemplateNameLoc);
@@ -1171,15 +1129,15 @@ bool Sema::BuildTypeConstraint(const CXXScopeSpec &SS,
}
return AttachTypeConstraint(
SS.isSet() ? SS.getWithLocInContext(Context) : NestedNameSpecifierLoc(),
- ConceptName, CD,
+ ConceptName, CD, /*FoundDecl=*/USD ? cast<NamedDecl>(USD) : CD,
TypeConstr->LAngleLoc.isValid() ? &TemplateArgs : nullptr,
ConstrainedParameter, EllipsisLoc);
}
-template<typename ArgumentLocAppender>
+template <typename ArgumentLocAppender>
static ExprResult formImmediatelyDeclaredConstraint(
Sema &S, NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo,
- ConceptDecl *NamedConcept, SourceLocation LAngleLoc,
+ ConceptDecl *NamedConcept, NamedDecl *FoundDecl, SourceLocation LAngleLoc,
SourceLocation RAngleLoc, QualType ConstrainedType,
SourceLocation ParamNameLoc, ArgumentLocAppender Appender,
SourceLocation EllipsisLoc) {
@@ -1200,7 +1158,8 @@ static ExprResult formImmediatelyDeclaredConstraint(
SS.Adopt(NS);
ExprResult ImmediatelyDeclaredConstraint = S.CheckConceptTemplateId(
SS, /*TemplateKWLoc=*/SourceLocation(), NameInfo,
- /*FoundDecl=*/NamedConcept, NamedConcept, &ConstraintArgs);
+ /*FoundDecl=*/FoundDecl ? FoundDecl : NamedConcept, NamedConcept,
+ &ConstraintArgs);
if (ImmediatelyDeclaredConstraint.isInvalid() || !EllipsisLoc.isValid())
return ImmediatelyDeclaredConstraint;
@@ -1224,13 +1183,9 @@ static ExprResult formImmediatelyDeclaredConstraint(
/*NumExpansions=*/std::nullopt);
}
-/// Attach a type-constraint to a template parameter.
-/// \returns true if an error occurred. This can happen if the
-/// immediately-declared constraint could not be formed (e.g. incorrect number
-/// of arguments for the named concept).
bool Sema::AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
- ConceptDecl *NamedConcept,
+ ConceptDecl *NamedConcept, NamedDecl *FoundDecl,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc) {
@@ -1243,24 +1198,24 @@ bool Sema::AttachTypeConstraint(NestedNameSpecifierLoc NS,
QualType ParamAsArgument(ConstrainedParameter->getTypeForDecl(), 0);
- ExprResult ImmediatelyDeclaredConstraint =
- formImmediatelyDeclaredConstraint(
- *this, NS, NameInfo, NamedConcept,
- TemplateArgs ? TemplateArgs->getLAngleLoc() : SourceLocation(),
- TemplateArgs ? TemplateArgs->getRAngleLoc() : SourceLocation(),
- ParamAsArgument, ConstrainedParameter->getLocation(),
- [&] (TemplateArgumentListInfo &ConstraintArgs) {
- if (TemplateArgs)
- for (const auto &ArgLoc : TemplateArgs->arguments())
- ConstraintArgs.addArgument(ArgLoc);
- }, EllipsisLoc);
+ ExprResult ImmediatelyDeclaredConstraint = formImmediatelyDeclaredConstraint(
+ *this, NS, NameInfo, NamedConcept, FoundDecl,
+ TemplateArgs ? TemplateArgs->getLAngleLoc() : SourceLocation(),
+ TemplateArgs ? TemplateArgs->getRAngleLoc() : SourceLocation(),
+ ParamAsArgument, ConstrainedParameter->getLocation(),
+ [&](TemplateArgumentListInfo &ConstraintArgs) {
+ if (TemplateArgs)
+ for (const auto &ArgLoc : TemplateArgs->arguments())
+ ConstraintArgs.addArgument(ArgLoc);
+ },
+ EllipsisLoc);
if (ImmediatelyDeclaredConstraint.isInvalid())
return true;
auto *CL = ConceptReference::Create(Context, /*NNS=*/NS,
/*TemplateKWLoc=*/SourceLocation{},
/*ConceptNameInfo=*/NameInfo,
- /*FoundDecl=*/NamedConcept,
+ /*FoundDecl=*/FoundDecl,
/*NamedConcept=*/NamedConcept,
/*ArgsWritten=*/ArgsAsWritten);
ConstrainedParameter->setTypeConstraint(CL,
@@ -1290,8 +1245,9 @@ bool Sema::AttachTypeConstraint(AutoTypeLoc TL,
return true;
ExprResult ImmediatelyDeclaredConstraint = formImmediatelyDeclaredConstraint(
*this, TL.getNestedNameSpecifierLoc(), TL.getConceptNameInfo(),
- TL.getNamedConcept(), TL.getLAngleLoc(), TL.getRAngleLoc(),
- BuildDecltypeType(Ref), OrigConstrainedParm->getLocation(),
+ TL.getNamedConcept(), /*FoundDecl=*/TL.getFoundDecl(), TL.getLAngleLoc(),
+ TL.getRAngleLoc(), BuildDecltypeType(Ref),
+ OrigConstrainedParm->getLocation(),
[&](TemplateArgumentListInfo &ConstraintArgs) {
for (unsigned I = 0, C = TL.getNumArgs(); I != C; ++I)
ConstraintArgs.addArgument(TL.getArgLoc(I));
@@ -1306,11 +1262,6 @@ bool Sema::AttachTypeConstraint(AutoTypeLoc TL,
return false;
}
-/// Check that the type of a non-type template parameter is
-/// well-formed.
-///
-/// \returns the (possibly-promoted) parameter type if valid;
-/// otherwise, produces a diagnostic and returns a NULL type.
QualType Sema::CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc) {
if (TSI->getType()->isUndeducedType()) {
@@ -1325,9 +1276,6 @@ QualType Sema::CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
return CheckNonTypeTemplateParameterType(TSI->getType(), Loc);
}
-/// Require the given type to be a structural type, and diagnose if it is not.
-///
-/// \return \c true if an error was produced.
bool Sema::RequireStructuralType(QualType T, SourceLocation Loc) {
if (T->isDependentType())
return false;
@@ -1572,7 +1520,7 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
CheckFunctionOrTemplateParamDeclarator(S, D);
- IdentifierInfo *ParamName = D.getIdentifier();
+ const IdentifierInfo *ParamName = D.getIdentifier();
bool IsParameterPack = D.hasEllipsis();
NonTypeTemplateParmDecl *Param = NonTypeTemplateParmDecl::Create(
Context, Context.getTranslationUnitDecl(), D.getBeginLoc(),
@@ -1615,35 +1563,28 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
if (DiagnoseUnexpandedParameterPack(Default, UPPC_DefaultArgument))
return Param;
- Param->setDefaultArgument(Default);
+ Param->setDefaultArgument(
+ Context, getTrivialTemplateArgumentLoc(TemplateArgument(Default),
+ QualType(), SourceLocation()));
}
return Param;
}
-/// ActOnTemplateTemplateParameter - Called when a C++ template template
-/// parameter (e.g. T in template <template \<typename> class T> class array)
-/// has been parsed. S is the current scope.
-NamedDecl *Sema::ActOnTemplateTemplateParameter(Scope* S,
- SourceLocation TmpLoc,
- TemplateParameterList *Params,
- SourceLocation EllipsisLoc,
- IdentifierInfo *Name,
- SourceLocation NameLoc,
- unsigned Depth,
- unsigned Position,
- SourceLocation EqualLoc,
- ParsedTemplateArgument Default) {
+NamedDecl *Sema::ActOnTemplateTemplateParameter(
+ Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params,
+ bool Typename, SourceLocation EllipsisLoc, IdentifierInfo *Name,
+ SourceLocation NameLoc, unsigned Depth, unsigned Position,
+ SourceLocation EqualLoc, ParsedTemplateArgument Default) {
assert(S->isTemplateParamScope() &&
"Template template parameter not in template parameter scope!");
// Construct the parameter object.
bool IsParameterPack = EllipsisLoc.isValid();
- TemplateTemplateParmDecl *Param =
- TemplateTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(),
- NameLoc.isInvalid()? TmpLoc : NameLoc,
- Depth, Position, IsParameterPack,
- Name, Params);
+ TemplateTemplateParmDecl *Param = TemplateTemplateParmDecl::Create(
+ Context, Context.getTranslationUnitDecl(),
+ NameLoc.isInvalid() ? TmpLoc : NameLoc, Depth, Position, IsParameterPack,
+ Name, Typename, Params);
Param->setAccess(AS_public);
if (Param->isParameterPack())
@@ -1796,9 +1737,6 @@ bool Sema::ConstraintExpressionDependsOnEnclosingTemplate(
return Checker.getResult();
}
-/// ActOnTemplateParameterList - Builds a TemplateParameterList, optionally
-/// constrained by RequiresClause, that contains the template parameters in
-/// Params.
TemplateParameterList *
Sema::ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
@@ -1826,12 +1764,12 @@ static void SetNestedNameSpecifier(Sema &S, TagDecl *T,
// Returns the template parameter list with all default template argument
// information.
-static TemplateParameterList *GetTemplateParameterList(TemplateDecl *TD) {
+TemplateParameterList *Sema::GetTemplateParameterList(TemplateDecl *TD) {
// Make sure we get the template parameter list from the most
// recent declaration, since that is the only one that is guaranteed to
// have all the default template argument information.
Decl *D = TD->getMostRecentDecl();
- // C++11 [temp.param]p12:
+ // C++11 N3337 [temp.param]p12:
// A default template argument shall not be specified in a friend class
// template declaration.
//
@@ -1862,7 +1800,8 @@ DeclResult Sema::CheckClassTemplate(
TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody) {
assert(TemplateParams && TemplateParams->size() > 0 &&
"No template parameters");
- assert(TUK != TUK_Reference && "Can only declare or define class templates");
+ assert(TUK != TagUseKind::Reference &&
+ "Can only declare or define class templates");
bool Invalid = false;
// Check that we can declare a template here.
@@ -1884,8 +1823,9 @@ DeclResult Sema::CheckClassTemplate(
// C++11 [basic.lookup.elab]p2).
DeclContext *SemanticContext;
LookupResult Previous(*this, Name, NameLoc,
- (SS.isEmpty() && TUK == TUK_Friend)
- ? LookupTagName : LookupOrdinaryName,
+ (SS.isEmpty() && TUK == TagUseKind::Friend)
+ ? LookupTagName
+ : LookupOrdinaryName,
forRedeclarationInCurContext());
if (SS.isNotEmpty() && !SS.isInvalid()) {
SemanticContext = computeDeclContext(SS, true);
@@ -1893,11 +1833,11 @@ DeclResult Sema::CheckClassTemplate(
// FIXME: Horrible, horrible hack! We can't currently represent this
// in the AST, and historically we have just ignored such friend
// class templates, so don't complain here.
- Diag(NameLoc, TUK == TUK_Friend
+ Diag(NameLoc, TUK == TagUseKind::Friend
? diag::warn_template_qualified_friend_ignored
: diag::err_template_qualified_declarator_no_match)
<< SS.getScopeRep() << SS.getRange();
- return TUK != TUK_Friend;
+ return TUK != TagUseKind::Friend;
}
if (RequireCompleteDeclContext(SS, SemanticContext))
@@ -1910,8 +1850,12 @@ DeclResult Sema::CheckClassTemplate(
ContextRAII SavedContext(*this, SemanticContext);
if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams))
Invalid = true;
- } else if (TUK != TUK_Friend && TUK != TUK_Reference)
- diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc, false);
+ }
+
+ if (TUK != TagUseKind::Friend && TUK != TagUseKind::Reference)
+ diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc,
+ /*TemplateId-*/ nullptr,
+ /*IsMemberSpecialization*/ false);
LookupQualifiedName(Previous, SemanticContext);
} else {
@@ -1921,7 +1865,7 @@ DeclResult Sema::CheckClassTemplate(
// If T is the name of a class, then each of the following shall have a
// name different from T:
// -- every member template of class T
- if (TUK != TUK_Friend &&
+ if (TUK != TagUseKind::Friend &&
DiagnoseClassNameShadow(SemanticContext,
DeclarationNameInfo(Name, NameLoc)))
return true;
@@ -1951,7 +1895,7 @@ DeclResult Sema::CheckClassTemplate(
// We may have found the injected-class-name of a class template,
// class template partial specialization, or class template specialization.
// In these cases, grab the template that is being defined or specialized.
- if (!PrevClassTemplate && PrevDecl && isa<CXXRecordDecl>(PrevDecl) &&
+ if (!PrevClassTemplate && isa_and_nonnull<CXXRecordDecl>(PrevDecl) &&
cast<CXXRecordDecl>(PrevDecl)->isInjectedClassName()) {
PrevDecl = cast<CXXRecordDecl>(PrevDecl->getDeclContext());
PrevClassTemplate
@@ -1963,7 +1907,7 @@ DeclResult Sema::CheckClassTemplate(
}
}
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
// C++ [namespace.memdef]p3:
// [...] When looking for a prior declaration of a class or a function
// declared as a friend, and when the name of the friend class or
@@ -2000,9 +1944,8 @@ DeclResult Sema::CheckClassTemplate(
PrevDecl = (*Previous.begin())->getUnderlyingDecl();
}
}
- } else if (PrevDecl &&
- !isDeclInScope(Previous.getRepresentativeDecl(), SemanticContext,
- S, SS.isValid()))
+ } else if (PrevDecl && !isDeclInScope(Previous.getRepresentativeDecl(),
+ SemanticContext, S, SS.isValid()))
PrevDecl = PrevClassTemplate = nullptr;
if (auto *Shadow = dyn_cast_or_null<UsingShadowDecl>(
@@ -2024,7 +1967,7 @@ DeclResult Sema::CheckClassTemplate(
// Ensure that the template parameter lists are compatible. Skip this check
// for a friend in a dependent context: the template parameter list itself
// could be dependent.
- if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
+ if (!(TUK == TagUseKind::Friend && CurContext->isDependentContext()) &&
!TemplateParameterListsAreEqual(
TemplateCompareNewDeclInfo(SemanticContext ? SemanticContext
: CurContext,
@@ -2040,8 +1983,8 @@ DeclResult Sema::CheckClassTemplate(
// the class-key shall agree in kind with the original class
// template declaration (7.1.5.3).
RecordDecl *PrevRecordDecl = PrevClassTemplate->getTemplatedDecl();
- if (!isAcceptableTagRedeclaration(PrevRecordDecl, Kind,
- TUK == TUK_Definition, KWLoc, Name)) {
+ if (!isAcceptableTagRedeclaration(
+ PrevRecordDecl, Kind, TUK == TagUseKind::Definition, KWLoc, Name)) {
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< Name
<< FixItHint::CreateReplacement(KWLoc, PrevRecordDecl->getKindName());
@@ -2050,7 +1993,7 @@ DeclResult Sema::CheckClassTemplate(
}
// Check for redefinition of this class template.
- if (TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
if (TagDecl *Def = PrevRecordDecl->getDefinition()) {
// If we have a prior definition that is not visible, treat this as
// simply making that previous definition visible.
@@ -2087,7 +2030,7 @@ DeclResult Sema::CheckClassTemplate(
// merging in the template parameter list from the previous class
// template declaration. Skip this check for a friend in a dependent
// context, because the template parameter list might be dependent.
- if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
+ if (!(TUK == TagUseKind::Friend && CurContext->isDependentContext()) &&
CheckTemplateParameterList(
TemplateParams,
PrevClassTemplate ? GetTemplateParameterList(PrevClassTemplate)
@@ -2095,8 +2038,8 @@ DeclResult Sema::CheckClassTemplate(
(SS.isSet() && SemanticContext && SemanticContext->isRecord() &&
SemanticContext->isDependentContext())
? TPC_ClassTemplateMember
- : TUK == TUK_Friend ? TPC_FriendClassTemplate
- : TPC_ClassTemplate,
+ : TUK == TagUseKind::Friend ? TPC_FriendClassTemplate
+ : TPC_ClassTemplate,
SkipBody))
Invalid = true;
@@ -2104,9 +2047,10 @@ DeclResult Sema::CheckClassTemplate(
// If the name of the template was qualified, we must be defining the
// template out-of-line.
if (!SS.isInvalid() && !Invalid && !PrevClassTemplate) {
- Diag(NameLoc, TUK == TUK_Friend ? diag::err_friend_decl_does_not_match
- : diag::err_member_decl_does_not_match)
- << Name << SemanticContext << /*IsDefinition*/true << SS.getRange();
+ Diag(NameLoc, TUK == TagUseKind::Friend
+ ? diag::err_friend_decl_does_not_match
+ : diag::err_member_decl_does_not_match)
+ << Name << SemanticContext << /*IsDefinition*/ true << SS.getRange();
Invalid = true;
}
}
@@ -2116,8 +2060,8 @@ DeclResult Sema::CheckClassTemplate(
// recent declaration tricking the template instantiator to make substitutions
// there.
// FIXME: Figure out how to combine with shouldLinkDependentDeclWithPrevious
- bool ShouldAddRedecl
- = !(TUK == TUK_Friend && CurContext->isDependentContext());
+ bool ShouldAddRedecl =
+ !(TUK == TagUseKind::Friend && CurContext->isDependentContext());
CXXRecordDecl *NewClass =
CXXRecordDecl::Create(Context, Kind, SemanticContext, KWLoc, NameLoc, Name,
@@ -2132,7 +2076,7 @@ DeclResult Sema::CheckClassTemplate(
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(NewClass);
AddMsStructLayoutForRecord(NewClass);
}
@@ -2163,25 +2107,28 @@ DeclResult Sema::CheckClassTemplate(
PrevClassTemplate->setMemberSpecialization();
// Set the access specifier.
- if (!Invalid && TUK != TUK_Friend && NewTemplate->getDeclContext()->isRecord())
+ if (!Invalid && TUK != TagUseKind::Friend &&
+ NewTemplate->getDeclContext()->isRecord())
SetMemberAccessSpecifier(NewTemplate, PrevClassTemplate, AS);
// Set the lexical context of these templates
NewClass->setLexicalDeclContext(CurContext);
NewTemplate->setLexicalDeclContext(CurContext);
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip))
NewClass->startDefinition();
ProcessDeclAttributeList(S, NewClass, Attr);
+ ProcessAPINotes(NewClass);
if (PrevClassTemplate)
mergeDeclAttributes(NewClass, PrevClassTemplate->getTemplatedDecl());
AddPushedVisibilityAttribute(NewClass);
inferGslOwnerPointerAttribute(NewClass);
+ inferNullableClassAttribute(NewClass);
- if (TUK != TUK_Friend) {
+ if (TUK != TagUseKind::Friend) {
// Per C++ [basic.scope.temp]p2, skip the template parameter scopes.
Scope *Outer = S;
while ((Outer->getFlags() & Scope::TemplateParamScope) != 0)
@@ -2226,581 +2173,6 @@ DeclResult Sema::CheckClassTemplate(
return NewTemplate;
}
-namespace {
-/// Tree transform to "extract" a transformed type from a class template's
-/// constructor to a deduction guide.
-class ExtractTypeForDeductionGuide
- : public TreeTransform<ExtractTypeForDeductionGuide> {
- llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs;
-
-public:
- typedef TreeTransform<ExtractTypeForDeductionGuide> Base;
- ExtractTypeForDeductionGuide(
- Sema &SemaRef,
- llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs)
- : Base(SemaRef), MaterializedTypedefs(MaterializedTypedefs) {}
-
- TypeSourceInfo *transform(TypeSourceInfo *TSI) { return TransformType(TSI); }
-
- QualType TransformTypedefType(TypeLocBuilder &TLB, TypedefTypeLoc TL) {
- ASTContext &Context = SemaRef.getASTContext();
- TypedefNameDecl *OrigDecl = TL.getTypedefNameDecl();
- TypedefNameDecl *Decl = OrigDecl;
- // Transform the underlying type of the typedef and clone the Decl only if
- // the typedef has a dependent context.
- if (OrigDecl->getDeclContext()->isDependentContext()) {
- TypeLocBuilder InnerTLB;
- QualType Transformed =
- TransformType(InnerTLB, OrigDecl->getTypeSourceInfo()->getTypeLoc());
- TypeSourceInfo *TSI = InnerTLB.getTypeSourceInfo(Context, Transformed);
- if (isa<TypeAliasDecl>(OrigDecl))
- Decl = TypeAliasDecl::Create(
- Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
- OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
- else {
- assert(isa<TypedefDecl>(OrigDecl) && "Not a Type alias or typedef");
- Decl = TypedefDecl::Create(
- Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
- OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
- }
- MaterializedTypedefs.push_back(Decl);
- }
-
- QualType TDTy = Context.getTypedefType(Decl);
- TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(TDTy);
- TypedefTL.setNameLoc(TL.getNameLoc());
-
- return TDTy;
- }
-};
-
-/// Transform to convert portions of a constructor declaration into the
-/// corresponding deduction guide, per C++1z [over.match.class.deduct]p1.
-struct ConvertConstructorToDeductionGuideTransform {
- ConvertConstructorToDeductionGuideTransform(Sema &S,
- ClassTemplateDecl *Template)
- : SemaRef(S), Template(Template) {
- // If the template is nested, then we need to use the original
- // pattern to iterate over the constructors.
- ClassTemplateDecl *Pattern = Template;
- while (Pattern->getInstantiatedFromMemberTemplate()) {
- if (Pattern->isMemberSpecialization())
- break;
- Pattern = Pattern->getInstantiatedFromMemberTemplate();
- NestedPattern = Pattern;
- }
-
- if (NestedPattern)
- OuterInstantiationArgs = SemaRef.getTemplateInstantiationArgs(Template);
- }
-
- Sema &SemaRef;
- ClassTemplateDecl *Template;
- ClassTemplateDecl *NestedPattern = nullptr;
-
- DeclContext *DC = Template->getDeclContext();
- CXXRecordDecl *Primary = Template->getTemplatedDecl();
- DeclarationName DeductionGuideName =
- SemaRef.Context.DeclarationNames.getCXXDeductionGuideName(Template);
-
- QualType DeducedType = SemaRef.Context.getTypeDeclType(Primary);
-
- // Index adjustment to apply to convert depth-1 template parameters into
- // depth-0 template parameters.
- unsigned Depth1IndexAdjustment = Template->getTemplateParameters()->size();
-
- // Instantiation arguments for the outermost depth-1 templates
- // when the template is nested
- MultiLevelTemplateArgumentList OuterInstantiationArgs;
-
- /// Transform a constructor declaration into a deduction guide.
- NamedDecl *transformConstructor(FunctionTemplateDecl *FTD,
- CXXConstructorDecl *CD) {
- SmallVector<TemplateArgument, 16> SubstArgs;
-
- LocalInstantiationScope Scope(SemaRef);
-
- // C++ [over.match.class.deduct]p1:
- // -- For each constructor of the class template designated by the
- // template-name, a function template with the following properties:
-
- // -- The template parameters are the template parameters of the class
- // template followed by the template parameters (including default
- // template arguments) of the constructor, if any.
- TemplateParameterList *TemplateParams = GetTemplateParameterList(Template);
- if (FTD) {
- TemplateParameterList *InnerParams = FTD->getTemplateParameters();
- SmallVector<NamedDecl *, 16> AllParams;
- SmallVector<TemplateArgument, 16> Depth1Args;
- AllParams.reserve(TemplateParams->size() + InnerParams->size());
- AllParams.insert(AllParams.begin(),
- TemplateParams->begin(), TemplateParams->end());
- SubstArgs.reserve(InnerParams->size());
- Depth1Args.reserve(InnerParams->size());
-
- // Later template parameters could refer to earlier ones, so build up
- // a list of substituted template arguments as we go.
- for (NamedDecl *Param : *InnerParams) {
- MultiLevelTemplateArgumentList Args;
- Args.setKind(TemplateSubstitutionKind::Rewrite);
- Args.addOuterTemplateArguments(Depth1Args);
- Args.addOuterRetainedLevel();
- if (NestedPattern)
- Args.addOuterRetainedLevels(NestedPattern->getTemplateDepth());
- NamedDecl *NewParam = transformTemplateParameter(Param, Args);
- if (!NewParam)
- return nullptr;
-
- // Constraints require that we substitute depth-1 arguments
- // to match depths when substituted for evaluation later
- Depth1Args.push_back(SemaRef.Context.getCanonicalTemplateArgument(
- SemaRef.Context.getInjectedTemplateArg(NewParam)));
-
- if (NestedPattern) {
- TemplateDeclInstantiator Instantiator(SemaRef, DC,
- OuterInstantiationArgs);
- Instantiator.setEvaluateConstraints(false);
- SemaRef.runWithSufficientStackSpace(NewParam->getLocation(), [&] {
- NewParam = cast<NamedDecl>(Instantiator.Visit(NewParam));
- });
- }
-
- assert(NewParam->getTemplateDepth() == 0 &&
- "Unexpected template parameter depth");
-
- AllParams.push_back(NewParam);
- SubstArgs.push_back(SemaRef.Context.getCanonicalTemplateArgument(
- SemaRef.Context.getInjectedTemplateArg(NewParam)));
- }
-
- // Substitute new template parameters into requires-clause if present.
- Expr *RequiresClause = nullptr;
- if (Expr *InnerRC = InnerParams->getRequiresClause()) {
- MultiLevelTemplateArgumentList Args;
- Args.setKind(TemplateSubstitutionKind::Rewrite);
- Args.addOuterTemplateArguments(Depth1Args);
- Args.addOuterRetainedLevel();
- if (NestedPattern)
- Args.addOuterRetainedLevels(NestedPattern->getTemplateDepth());
- ExprResult E = SemaRef.SubstExpr(InnerRC, Args);
- if (E.isInvalid())
- return nullptr;
- RequiresClause = E.getAs<Expr>();
- }
-
- TemplateParams = TemplateParameterList::Create(
- SemaRef.Context, InnerParams->getTemplateLoc(),
- InnerParams->getLAngleLoc(), AllParams, InnerParams->getRAngleLoc(),
- RequiresClause);
- }
-
- // If we built a new template-parameter-list, track that we need to
- // substitute references to the old parameters into references to the
- // new ones.
- MultiLevelTemplateArgumentList Args;
- Args.setKind(TemplateSubstitutionKind::Rewrite);
- if (FTD) {
- Args.addOuterTemplateArguments(SubstArgs);
- Args.addOuterRetainedLevel();
- }
-
- if (NestedPattern)
- Args.addOuterRetainedLevels(NestedPattern->getTemplateDepth());
-
- FunctionProtoTypeLoc FPTL = CD->getTypeSourceInfo()->getTypeLoc()
- .getAsAdjusted<FunctionProtoTypeLoc>();
- assert(FPTL && "no prototype for constructor declaration");
-
- // Transform the type of the function, adjusting the return type and
- // replacing references to the old parameters with references to the
- // new ones.
- TypeLocBuilder TLB;
- SmallVector<ParmVarDecl*, 8> Params;
- SmallVector<TypedefNameDecl *, 4> MaterializedTypedefs;
- QualType NewType = transformFunctionProtoType(TLB, FPTL, Params, Args,
- MaterializedTypedefs);
- if (NewType.isNull())
- return nullptr;
- TypeSourceInfo *NewTInfo = TLB.getTypeSourceInfo(SemaRef.Context, NewType);
-
- return buildDeductionGuide(TemplateParams, CD, CD->getExplicitSpecifier(),
- NewTInfo, CD->getBeginLoc(), CD->getLocation(),
- CD->getEndLoc(), MaterializedTypedefs);
- }
-
- /// Build a deduction guide with the specified parameter types.
- NamedDecl *buildSimpleDeductionGuide(MutableArrayRef<QualType> ParamTypes) {
- SourceLocation Loc = Template->getLocation();
-
- // Build the requested type.
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.HasTrailingReturn = true;
- QualType Result = SemaRef.BuildFunctionType(DeducedType, ParamTypes, Loc,
- DeductionGuideName, EPI);
- TypeSourceInfo *TSI = SemaRef.Context.getTrivialTypeSourceInfo(Result, Loc);
- if (NestedPattern)
- TSI = SemaRef.SubstType(TSI, OuterInstantiationArgs, Loc,
- DeductionGuideName);
-
- FunctionProtoTypeLoc FPTL =
- TSI->getTypeLoc().castAs<FunctionProtoTypeLoc>();
-
- // Build the parameters, needed during deduction / substitution.
- SmallVector<ParmVarDecl*, 4> Params;
- for (auto T : ParamTypes) {
- auto *TSI = SemaRef.Context.getTrivialTypeSourceInfo(T, Loc);
- if (NestedPattern)
- TSI = SemaRef.SubstType(TSI, OuterInstantiationArgs, Loc,
- DeclarationName());
- ParmVarDecl *NewParam =
- ParmVarDecl::Create(SemaRef.Context, DC, Loc, Loc, nullptr,
- TSI->getType(), TSI, SC_None, nullptr);
- NewParam->setScopeInfo(0, Params.size());
- FPTL.setParam(Params.size(), NewParam);
- Params.push_back(NewParam);
- }
-
- return buildDeductionGuide(GetTemplateParameterList(Template), nullptr,
- ExplicitSpecifier(), TSI, Loc, Loc, Loc);
- }
-
-private:
- /// Transform a constructor template parameter into a deduction guide template
- /// parameter, rebuilding any internal references to earlier parameters and
- /// renumbering as we go.
- NamedDecl *transformTemplateParameter(NamedDecl *TemplateParam,
- MultiLevelTemplateArgumentList &Args) {
- if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(TemplateParam)) {
- // TemplateTypeParmDecl's index cannot be changed after creation, so
- // substitute it directly.
- auto *NewTTP = TemplateTypeParmDecl::Create(
- SemaRef.Context, DC, TTP->getBeginLoc(), TTP->getLocation(),
- TTP->getDepth() - 1, Depth1IndexAdjustment + TTP->getIndex(),
- TTP->getIdentifier(), TTP->wasDeclaredWithTypename(),
- TTP->isParameterPack(), TTP->hasTypeConstraint(),
- TTP->isExpandedParameterPack()
- ? std::optional<unsigned>(TTP->getNumExpansionParameters())
- : std::nullopt);
- if (const auto *TC = TTP->getTypeConstraint())
- SemaRef.SubstTypeConstraint(NewTTP, TC, Args,
- /*EvaluateConstraint*/ true);
- if (TTP->hasDefaultArgument()) {
- TypeSourceInfo *InstantiatedDefaultArg =
- SemaRef.SubstType(TTP->getDefaultArgumentInfo(), Args,
- TTP->getDefaultArgumentLoc(), TTP->getDeclName());
- if (InstantiatedDefaultArg)
- NewTTP->setDefaultArgument(InstantiatedDefaultArg);
- }
- SemaRef.CurrentInstantiationScope->InstantiatedLocal(TemplateParam,
- NewTTP);
- return NewTTP;
- }
-
- if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(TemplateParam))
- return transformTemplateParameterImpl(TTP, Args);
-
- return transformTemplateParameterImpl(
- cast<NonTypeTemplateParmDecl>(TemplateParam), Args);
- }
- template<typename TemplateParmDecl>
- TemplateParmDecl *
- transformTemplateParameterImpl(TemplateParmDecl *OldParam,
- MultiLevelTemplateArgumentList &Args) {
- // Ask the template instantiator to do the heavy lifting for us, then adjust
- // the index of the parameter once it's done.
- auto *NewParam =
- cast<TemplateParmDecl>(SemaRef.SubstDecl(OldParam, DC, Args));
- assert(NewParam->getDepth() == OldParam->getDepth() - 1 &&
- "unexpected template param depth");
- NewParam->setPosition(NewParam->getPosition() + Depth1IndexAdjustment);
- return NewParam;
- }
-
- QualType transformFunctionProtoType(
- TypeLocBuilder &TLB, FunctionProtoTypeLoc TL,
- SmallVectorImpl<ParmVarDecl *> &Params,
- MultiLevelTemplateArgumentList &Args,
- SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs) {
- SmallVector<QualType, 4> ParamTypes;
- const FunctionProtoType *T = TL.getTypePtr();
-
- // -- The types of the function parameters are those of the constructor.
- for (auto *OldParam : TL.getParams()) {
- ParmVarDecl *NewParam =
- transformFunctionTypeParam(OldParam, Args, MaterializedTypedefs);
- if (NestedPattern && NewParam)
- NewParam = transformFunctionTypeParam(NewParam, OuterInstantiationArgs,
- MaterializedTypedefs);
- if (!NewParam)
- return QualType();
- ParamTypes.push_back(NewParam->getType());
- Params.push_back(NewParam);
- }
-
- // -- The return type is the class template specialization designated by
- // the template-name and template arguments corresponding to the
- // template parameters obtained from the class template.
- //
- // We use the injected-class-name type of the primary template instead.
- // This has the convenient property that it is different from any type that
- // the user can write in a deduction-guide (because they cannot enter the
- // context of the template), so implicit deduction guides can never collide
- // with explicit ones.
- QualType ReturnType = DeducedType;
- TLB.pushTypeSpec(ReturnType).setNameLoc(Primary->getLocation());
-
- // Resolving a wording defect, we also inherit the variadicness of the
- // constructor.
- FunctionProtoType::ExtProtoInfo EPI;
- EPI.Variadic = T->isVariadic();
- EPI.HasTrailingReturn = true;
-
- QualType Result = SemaRef.BuildFunctionType(
- ReturnType, ParamTypes, TL.getBeginLoc(), DeductionGuideName, EPI);
- if (Result.isNull())
- return QualType();
-
- FunctionProtoTypeLoc NewTL = TLB.push<FunctionProtoTypeLoc>(Result);
- NewTL.setLocalRangeBegin(TL.getLocalRangeBegin());
- NewTL.setLParenLoc(TL.getLParenLoc());
- NewTL.setRParenLoc(TL.getRParenLoc());
- NewTL.setExceptionSpecRange(SourceRange());
- NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
- for (unsigned I = 0, E = NewTL.getNumParams(); I != E; ++I)
- NewTL.setParam(I, Params[I]);
-
- return Result;
- }
-
- ParmVarDecl *transformFunctionTypeParam(
- ParmVarDecl *OldParam, MultiLevelTemplateArgumentList &Args,
- llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs) {
- TypeSourceInfo *OldDI = OldParam->getTypeSourceInfo();
- TypeSourceInfo *NewDI;
- if (auto PackTL = OldDI->getTypeLoc().getAs<PackExpansionTypeLoc>()) {
- // Expand out the one and only element in each inner pack.
- Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, 0);
- NewDI =
- SemaRef.SubstType(PackTL.getPatternLoc(), Args,
- OldParam->getLocation(), OldParam->getDeclName());
- if (!NewDI) return nullptr;
- NewDI =
- SemaRef.CheckPackExpansion(NewDI, PackTL.getEllipsisLoc(),
- PackTL.getTypePtr()->getNumExpansions());
- } else
- NewDI = SemaRef.SubstType(OldDI, Args, OldParam->getLocation(),
- OldParam->getDeclName());
- if (!NewDI)
- return nullptr;
-
- // Extract the type. This (for instance) replaces references to typedef
- // members of the current instantiations with the definitions of those
- // typedefs, avoiding triggering instantiation of the deduced type during
- // deduction.
- NewDI = ExtractTypeForDeductionGuide(SemaRef, MaterializedTypedefs)
- .transform(NewDI);
-
- // Resolving a wording defect, we also inherit default arguments from the
- // constructor.
- ExprResult NewDefArg;
- if (OldParam->hasDefaultArg()) {
- // We don't care what the value is (we won't use it); just create a
- // placeholder to indicate there is a default argument.
- QualType ParamTy = NewDI->getType();
- NewDefArg = new (SemaRef.Context)
- OpaqueValueExpr(OldParam->getDefaultArg()->getBeginLoc(),
- ParamTy.getNonLValueExprType(SemaRef.Context),
- ParamTy->isLValueReferenceType() ? VK_LValue
- : ParamTy->isRValueReferenceType() ? VK_XValue
- : VK_PRValue);
- }
- // Handle arrays and functions decay.
- auto NewType = NewDI->getType();
- if (NewType->isArrayType() || NewType->isFunctionType())
- NewType = SemaRef.Context.getDecayedType(NewType);
-
- ParmVarDecl *NewParam = ParmVarDecl::Create(
- SemaRef.Context, DC, OldParam->getInnerLocStart(),
- OldParam->getLocation(), OldParam->getIdentifier(), NewType, NewDI,
- OldParam->getStorageClass(), NewDefArg.get());
- NewParam->setScopeInfo(OldParam->getFunctionScopeDepth(),
- OldParam->getFunctionScopeIndex());
- SemaRef.CurrentInstantiationScope->InstantiatedLocal(OldParam, NewParam);
- return NewParam;
- }
-
- FunctionTemplateDecl *buildDeductionGuide(
- TemplateParameterList *TemplateParams, CXXConstructorDecl *Ctor,
- ExplicitSpecifier ES, TypeSourceInfo *TInfo, SourceLocation LocStart,
- SourceLocation Loc, SourceLocation LocEnd,
- llvm::ArrayRef<TypedefNameDecl *> MaterializedTypedefs = {}) {
- DeclarationNameInfo Name(DeductionGuideName, Loc);
- ArrayRef<ParmVarDecl *> Params =
- TInfo->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams();
-
- // Build the implicit deduction guide template.
- auto *Guide =
- CXXDeductionGuideDecl::Create(SemaRef.Context, DC, LocStart, ES, Name,
- TInfo->getType(), TInfo, LocEnd, Ctor);
- Guide->setImplicit();
- Guide->setParams(Params);
-
- for (auto *Param : Params)
- Param->setDeclContext(Guide);
- for (auto *TD : MaterializedTypedefs)
- TD->setDeclContext(Guide);
-
- auto *GuideTemplate = FunctionTemplateDecl::Create(
- SemaRef.Context, DC, Loc, DeductionGuideName, TemplateParams, Guide);
- GuideTemplate->setImplicit();
- Guide->setDescribedFunctionTemplate(GuideTemplate);
-
- if (isa<CXXRecordDecl>(DC)) {
- Guide->setAccess(AS_public);
- GuideTemplate->setAccess(AS_public);
- }
-
- DC->addDecl(GuideTemplate);
- return GuideTemplate;
- }
-};
-}
-
-FunctionTemplateDecl *Sema::DeclareImplicitDeductionGuideFromInitList(
- TemplateDecl *Template, MutableArrayRef<QualType> ParamTypes,
- SourceLocation Loc) {
- if (CXXRecordDecl *DefRecord =
- cast<CXXRecordDecl>(Template->getTemplatedDecl())->getDefinition()) {
- if (TemplateDecl *DescribedTemplate =
- DefRecord->getDescribedClassTemplate())
- Template = DescribedTemplate;
- }
-
- DeclContext *DC = Template->getDeclContext();
- if (DC->isDependentContext())
- return nullptr;
-
- ConvertConstructorToDeductionGuideTransform Transform(
- *this, cast<ClassTemplateDecl>(Template));
- if (!isCompleteType(Loc, Transform.DeducedType))
- return nullptr;
-
- // In case we were expanding a pack when we attempted to declare deduction
- // guides, turn off pack expansion for everything we're about to do.
- ArgumentPackSubstitutionIndexRAII SubstIndex(*this,
- /*NewSubstitutionIndex=*/-1);
- // Create a template instantiation record to track the "instantiation" of
- // constructors into deduction guides.
- InstantiatingTemplate BuildingDeductionGuides(
- *this, Loc, Template,
- Sema::InstantiatingTemplate::BuildingDeductionGuidesTag{});
- if (BuildingDeductionGuides.isInvalid())
- return nullptr;
-
- ClassTemplateDecl *Pattern =
- Transform.NestedPattern ? Transform.NestedPattern : Transform.Template;
- ContextRAII SavedContext(*this, Pattern->getTemplatedDecl());
-
- auto *DG = cast<FunctionTemplateDecl>(
- Transform.buildSimpleDeductionGuide(ParamTypes));
- SavedContext.pop();
- return DG;
-}
-
-void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
- SourceLocation Loc) {
- if (CXXRecordDecl *DefRecord =
- cast<CXXRecordDecl>(Template->getTemplatedDecl())->getDefinition()) {
- if (TemplateDecl *DescribedTemplate = DefRecord->getDescribedClassTemplate())
- Template = DescribedTemplate;
- }
-
- DeclContext *DC = Template->getDeclContext();
- if (DC->isDependentContext())
- return;
-
- ConvertConstructorToDeductionGuideTransform Transform(
- *this, cast<ClassTemplateDecl>(Template));
- if (!isCompleteType(Loc, Transform.DeducedType))
- return;
-
- // Check whether we've already declared deduction guides for this template.
- // FIXME: Consider storing a flag on the template to indicate this.
- auto Existing = DC->lookup(Transform.DeductionGuideName);
- for (auto *D : Existing)
- if (D->isImplicit())
- return;
-
- // In case we were expanding a pack when we attempted to declare deduction
- // guides, turn off pack expansion for everything we're about to do.
- ArgumentPackSubstitutionIndexRAII SubstIndex(*this, -1);
- // Create a template instantiation record to track the "instantiation" of
- // constructors into deduction guides.
- InstantiatingTemplate BuildingDeductionGuides(
- *this, Loc, Template,
- Sema::InstantiatingTemplate::BuildingDeductionGuidesTag{});
- if (BuildingDeductionGuides.isInvalid())
- return;
-
- // Convert declared constructors into deduction guide templates.
- // FIXME: Skip constructors for which deduction must necessarily fail (those
- // for which some class template parameter without a default argument never
- // appears in a deduced context).
- ClassTemplateDecl *Pattern =
- Transform.NestedPattern ? Transform.NestedPattern : Transform.Template;
- ContextRAII SavedContext(*this, Pattern->getTemplatedDecl());
- llvm::SmallPtrSet<NamedDecl *, 8> ProcessedCtors;
- bool AddedAny = false;
- for (NamedDecl *D : LookupConstructors(Pattern->getTemplatedDecl())) {
- D = D->getUnderlyingDecl();
- if (D->isInvalidDecl() || D->isImplicit())
- continue;
-
- D = cast<NamedDecl>(D->getCanonicalDecl());
-
- // Within C++20 modules, we may have multiple same constructors in
- // multiple same RecordDecls. And it doesn't make sense to create
- // duplicated deduction guides for the duplicated constructors.
- if (ProcessedCtors.count(D))
- continue;
-
- auto *FTD = dyn_cast<FunctionTemplateDecl>(D);
- auto *CD =
- dyn_cast_or_null<CXXConstructorDecl>(FTD ? FTD->getTemplatedDecl() : D);
- // Class-scope explicit specializations (MS extension) do not result in
- // deduction guides.
- if (!CD || (!FTD && CD->isFunctionTemplateSpecialization()))
- continue;
-
- // Cannot make a deduction guide when unparsed arguments are present.
- if (llvm::any_of(CD->parameters(), [](ParmVarDecl *P) {
- return !P || P->hasUnparsedDefaultArg();
- }))
- continue;
-
- ProcessedCtors.insert(D);
- Transform.transformConstructor(FTD, CD);
- AddedAny = true;
- }
-
- // C++17 [over.match.class.deduct]
- // -- If C is not defined or does not declare any constructors, an
- // additional function template derived as above from a hypothetical
- // constructor C().
- if (!AddedAny)
- Transform.buildSimpleDeductionGuide(std::nullopt);
-
- // -- An additional function template derived as above from a hypothetical
- // constructor C(C), called the copy deduction candidate.
- cast<CXXDeductionGuideDecl>(
- cast<FunctionTemplateDecl>(
- Transform.buildSimpleDeductionGuide(Transform.DeducedType))
- ->getTemplatedDecl())
- ->setDeductionCandidateKind(DeductionCandidate::Copy);
-
- SavedContext.pop();
-}
-
/// Diagnose the presence of a default template argument on a
/// template parameter, which is ill-formed in certain contexts.
///
@@ -2899,34 +2271,6 @@ static bool DiagnoseUnexpandedParameterPacks(Sema &S,
return false;
}
-/// Checks the validity of a template parameter list, possibly
-/// considering the template parameter list from a previous
-/// declaration.
-///
-/// If an "old" template parameter list is provided, it must be
-/// equivalent (per TemplateParameterListsAreEqual) to the "new"
-/// template parameter list.
-///
-/// \param NewParams Template parameter list for a new template
-/// declaration. This template parameter list will be updated with any
-/// default arguments that are carried through from the previous
-/// template parameter list.
-///
-/// \param OldParams If provided, template parameter list from a
-/// previous declaration of the same template. Default template
-/// arguments will be merged from the old template parameter list to
-/// the new template parameter list.
-///
-/// \param TPC Describes the context in which we are checking the given
-/// template parameter list.
-///
-/// \param SkipBody If we might have already made a prior merged definition
-/// of this template visible, the corresponding body-skipping information.
-/// Default argument redefinition is not an error when skipping such a body,
-/// because (under the ODR) we can assume the default arguments are the same
-/// as the prior merged definition.
-///
-/// \returns true if an error occurred, false otherwise.
bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
@@ -2973,10 +2317,9 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
= dyn_cast<TemplateTypeParmDecl>(*NewParam)) {
// Check the presence of a default argument here.
if (NewTypeParm->hasDefaultArgument() &&
- DiagnoseDefaultTemplateArgument(*this, TPC,
- NewTypeParm->getLocation(),
- NewTypeParm->getDefaultArgumentInfo()->getTypeLoc()
- .getSourceRange()))
+ DiagnoseDefaultTemplateArgument(
+ *this, TPC, NewTypeParm->getLocation(),
+ NewTypeParm->getDefaultArgument().getSourceRange()))
NewTypeParm->removeDefaultArgument();
// Merge default arguments for template type parameters.
@@ -3025,9 +2368,9 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
// Check the presence of a default argument here.
if (NewNonTypeParm->hasDefaultArgument() &&
- DiagnoseDefaultTemplateArgument(*this, TPC,
- NewNonTypeParm->getLocation(),
- NewNonTypeParm->getDefaultArgument()->getSourceRange())) {
+ DiagnoseDefaultTemplateArgument(
+ *this, TPC, NewNonTypeParm->getLocation(),
+ NewNonTypeParm->getDefaultArgument().getSourceRange())) {
NewNonTypeParm->removeDefaultArgument();
}
@@ -3156,12 +2499,14 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
diag::note_template_param_prev_default_arg_in_other_module)
<< PrevModuleName;
Invalid = true;
- } else if (MissingDefaultArg && TPC != TPC_FunctionTemplate) {
- // C++ [temp.param]p11:
- // If a template-parameter of a class template has a default
- // template-argument, each subsequent template-parameter shall either
- // have a default template-argument supplied or be a template parameter
- // pack.
+ } else if (MissingDefaultArg &&
+ (TPC == TPC_ClassTemplate || TPC == TPC_FriendClassTemplate ||
+ TPC == TPC_VarTemplate || TPC == TPC_TypeAliasTemplate)) {
+ // C++ 23[temp.param]p14:
+ // If a template-parameter of a class template, variable template, or
+ // alias template has a default template argument, each subsequent
+ // template-parameter shall either have a default template argument
+ // supplied or be a template parameter pack.
Diag((*NewParam)->getLocation(),
diag::err_template_param_default_arg_missing);
Diag(PreviousDefaultArgLoc, diag::note_template_param_prev_default_arg);
@@ -3327,39 +2672,6 @@ static SourceRange getRangeOfTypeInNestedNameSpecifier(ASTContext &Context,
return SourceRange();
}
-/// Match the given template parameter lists to the given scope
-/// specifier, returning the template parameter list that applies to the
-/// name.
-///
-/// \param DeclStartLoc the start of the declaration that has a scope
-/// specifier or a template parameter list.
-///
-/// \param DeclLoc The location of the declaration itself.
-///
-/// \param SS the scope specifier that will be matched to the given template
-/// parameter lists. This scope specifier precedes a qualified name that is
-/// being declared.
-///
-/// \param TemplateId The template-id following the scope specifier, if there
-/// is one. Used to check for a missing 'template<>'.
-///
-/// \param ParamLists the template parameter lists, from the outermost to the
-/// innermost template parameter lists.
-///
-/// \param IsFriend Whether to apply the slightly different rules for
-/// matching template parameters to scope specifiers in friend
-/// declarations.
-///
-/// \param IsMemberSpecialization will be set true if the scope specifier
-/// denotes a fully-specialized type, and therefore this is a declaration of
-/// a member specialization.
-///
-/// \returns the template parameter list, if any, that corresponds to the
-/// name that is preceded by the scope specifier @p SS. This template
-/// parameter list may have template parameters (if we're declaring a
-/// template) or may have no template parameters (if we're declaring a
-/// template specialization), or may be NULL (if what we're declaring isn't
-/// itself a template).
TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS,
TemplateIdAnnotation *TemplateId,
@@ -3565,10 +2877,10 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
// C++ [temp.expl.spec]p16:
// In an explicit specialization declaration for a member of a class
- // template or a member template that ap- pears in namespace scope, the
+ // template or a member template that appears in namespace scope, the
// member template and some of its enclosing class templates may remain
// unspecialized, except that the declaration shall not explicitly
- // specialize a class member template if its en- closing class templates
+ // specialize a class member template if its enclosing class templates
// are not explicitly specialized as well.
if (ParamIdx < ParamLists.size()) {
if (ParamLists[ParamIdx]->size() == 0) {
@@ -3683,7 +2995,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
if (!SuppressDiagnostic)
Diag(ParamLists[ParamIdx]->getTemplateLoc(),
- AllExplicitSpecHeaders ? diag::warn_template_spec_extra_headers
+ AllExplicitSpecHeaders ? diag::ext_template_spec_extra_headers
: diag::err_template_spec_extra_headers)
<< SourceRange(ParamLists[ParamIdx]->getTemplateLoc(),
ParamLists[ParamLists.size() - 2]->getRAngleLoc());
@@ -3834,8 +3146,8 @@ checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
/// Determine whether this alias template is "enable_if_t".
/// libc++ >=14 uses "__enable_if_t" in C++11 mode.
static bool isEnableIfAliasTemplate(TypeAliasTemplateDecl *AliasTemplate) {
- return AliasTemplate->getName().equals("enable_if_t") ||
- AliasTemplate->getName().equals("__enable_if_t");
+ return AliasTemplate->getName() == "enable_if_t" ||
+ AliasTemplate->getName() == "__enable_if_t";
}
/// Collect all of the separable terms in the given condition, which
@@ -4028,13 +3340,24 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
AliasTemplate->getTemplateParameters()->getDepth());
LocalInstantiationScope Scope(*this);
- InstantiatingTemplate Inst(*this, TemplateLoc, Template);
+ InstantiatingTemplate Inst(
+ *this, /*PointOfInstantiation=*/TemplateLoc,
+ /*Entity=*/AliasTemplate,
+ /*TemplateArgs=*/TemplateArgLists.getInnermost());
+
+ // Diagnose uses of this alias.
+ (void)DiagnoseUseOfDecl(AliasTemplate, TemplateLoc);
+
if (Inst.isInvalid())
return QualType();
- CanonType = SubstType(Pattern->getUnderlyingType(),
- TemplateArgLists, AliasTemplate->getLocation(),
- AliasTemplate->getDeclName());
+ std::optional<ContextRAII> SavedContext;
+ if (!AliasTemplate->getDeclContext()->isFileContext())
+ SavedContext.emplace(*this, AliasTemplate->getDeclContext());
+
+ CanonType =
+ SubstType(Pattern->getUnderlyingType(), TemplateArgLists,
+ AliasTemplate->getLocation(), AliasTemplate->getDeclName());
if (CanonType.isNull()) {
// If this was enable_if and we failed to find the nested type
// within enable_if in a SFINAE context, dig out the specific
@@ -4229,7 +3552,7 @@ bool Sema::resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
TypeResult Sema::ActOnTemplateIdType(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
- TemplateTy TemplateD, IdentifierInfo *TemplateII,
+ TemplateTy TemplateD, const IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc,
bool IsCtorOrDtorName, bool IsClassName,
@@ -4404,7 +3727,7 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
IdentifierInfo *Id = D->getIdentifier();
assert(Id && "templated class must have an identifier");
- if (!isAcceptableTagRedeclaration(D, TagKind, TUK == TUK_Definition,
+ if (!isAcceptableTagRedeclaration(D, TagKind, TUK == TagUseKind::Definition,
TagLoc, Id)) {
Diag(TagLoc, diag::err_use_with_wrong_tag)
<< Result
@@ -4621,9 +3944,9 @@ void Sema::CheckDeductionGuideTemplate(FunctionTemplateDecl *TD) {
}
DeclResult Sema::ActOnVarTemplateSpecialization(
- Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc,
- TemplateParameterList *TemplateParams, StorageClass SC,
- bool IsPartialSpecialization) {
+ Scope *S, Declarator &D, TypeSourceInfo *DI, LookupResult &Previous,
+ SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
+ StorageClass SC, bool IsPartialSpecialization) {
// D must be variable template id.
assert(D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId &&
"Variable template specialization is declared with a template id.");
@@ -4739,7 +4062,8 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
VarTemplatePartialSpecializationDecl::Create(
Context, VarTemplate->getDeclContext(), TemplateKWLoc,
TemplateNameLoc, TemplateParams, VarTemplate, DI->getType(), DI, SC,
- CanonicalConverted, TemplateArgs);
+ CanonicalConverted);
+ Partial->setTemplateArgsAsWritten(TemplateArgs);
if (!PrevPartial)
VarTemplate->AddPartialSpecialization(Partial, InsertPos);
@@ -4757,7 +4081,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
Specialization = VarTemplateSpecializationDecl::Create(
Context, VarTemplate->getDeclContext(), TemplateKWLoc, TemplateNameLoc,
VarTemplate, DI->getType(), DI, SC, CanonicalConverted);
- Specialization->setTemplateArgsInfo(TemplateArgs);
+ Specialization->setTemplateArgsAsWritten(TemplateArgs);
if (!PrevDecl)
VarTemplate->AddSpecialization(Specialization, InsertPos);
@@ -4792,7 +4116,6 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
}
}
- Specialization->setTemplateKeywordLoc(TemplateKWLoc);
Specialization->setLexicalDeclContext(CurContext);
// Add the specialization into its lexical context, so that it can
@@ -4803,17 +4126,12 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
// Note that this is an explicit specialization.
Specialization->setSpecializationKind(TSK_ExplicitSpecialization);
- if (PrevDecl) {
- // Check that this isn't a redefinition of this specialization,
- // merging with previous declarations.
- LookupResult PrevSpec(*this, GetNameForDeclarator(D), LookupOrdinaryName,
- forRedeclarationInCurContext());
- PrevSpec.addDecl(PrevDecl);
- D.setRedeclaration(CheckVariableDeclaration(Specialization, PrevSpec));
- } else if (Specialization->isStaticDataMember() &&
- Specialization->isOutOfLine()) {
+ Previous.clear();
+ if (PrevDecl)
+ Previous.addDecl(PrevDecl);
+ else if (Specialization->isStaticDataMember() &&
+ Specialization->isOutOfLine())
Specialization->setAccess(VarTemplate->getAccess());
- }
return Specialization;
}
@@ -4863,9 +4181,7 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
// the set of specializations, based on the closest partial specialization
// that it represents. That is,
VarDecl *InstantiationPattern = Template->getTemplatedDecl();
- TemplateArgumentList TemplateArgList(TemplateArgumentList::OnStack,
- CanonicalConverted);
- TemplateArgumentList *InstantiationArgs = &TemplateArgList;
+ const TemplateArgumentList *PartialSpecArgs = nullptr;
bool AmbiguousPartialSpec = false;
typedef PartialSpecMatchResult MatchResult;
SmallVector<MatchResult, 4> Matched;
@@ -4886,7 +4202,8 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
TemplateDeductionInfo Info(FailedCandidates.getLocation());
if (TemplateDeductionResult Result =
- DeduceTemplateArguments(Partial, TemplateArgList, Info)) {
+ DeduceTemplateArguments(Partial, CanonicalConverted, Info);
+ Result != TemplateDeductionResult::Success) {
// Store the failed-deduction information for use in diagnostics, later.
// TODO: Actually use the failed-deduction info?
FailedCandidates.addCandidate().set(
@@ -4939,7 +4256,7 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
// Instantiate using the best variable template partial specialization.
InstantiationPattern = Best->Partial;
- InstantiationArgs = Best->Args;
+ PartialSpecArgs = Best->Args;
} else {
// -- If no match is found, the instantiation is generated
// from the primary template.
@@ -4951,7 +4268,7 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
// in DoMarkVarDeclReferenced().
// FIXME: LateAttrs et al.?
VarTemplateSpecializationDecl *Decl = BuildVarTemplateInstantiation(
- Template, InstantiationPattern, *InstantiationArgs, TemplateArgs,
+ Template, InstantiationPattern, PartialSpecArgs, TemplateArgs,
CanonicalConverted, TemplateNameLoc /*, LateAttrs, StartingScope*/);
if (!Decl)
return true;
@@ -4972,7 +4289,7 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
if (VarTemplatePartialSpecializationDecl *D =
dyn_cast<VarTemplatePartialSpecializationDecl>(InstantiationPattern))
- Decl->setInstantiationOf(D, InstantiationArgs);
+ Decl->setInstantiationOf(D, PartialSpecArgs);
checkSpecializationReachability(TemplateNameLoc, Decl);
@@ -4980,11 +4297,10 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
return Decl;
}
-ExprResult
-Sema::CheckVarTemplateId(const CXXScopeSpec &SS,
- const DeclarationNameInfo &NameInfo,
- VarTemplateDecl *Template, SourceLocation TemplateLoc,
- const TemplateArgumentListInfo *TemplateArgs) {
+ExprResult Sema::CheckVarTemplateId(
+ const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
+ VarTemplateDecl *Template, NamedDecl *FoundD, SourceLocation TemplateLoc,
+ const TemplateArgumentListInfo *TemplateArgs) {
DeclResult Decl = CheckVarTemplateId(Template, TemplateLoc, NameInfo.getLoc(),
*TemplateArgs);
@@ -5000,8 +4316,7 @@ Sema::CheckVarTemplateId(const CXXScopeSpec &SS,
NameInfo.getLoc());
// Build an ordinary singleton decl ref.
- return BuildDeclarationNameExpr(SS, NameInfo, Var,
- /*FoundD=*/nullptr, TemplateArgs);
+ return BuildDeclarationNameExpr(SS, NameInfo, Var, FoundD, TemplateArgs);
}
void Sema::diagnoseMissingTemplateArguments(TemplateName Name,
@@ -5013,6 +4328,15 @@ void Sema::diagnoseMissingTemplateArguments(TemplateName Name,
}
}
+void Sema::diagnoseMissingTemplateArguments(const CXXScopeSpec &SS,
+ bool TemplateKeyword,
+ TemplateDecl *TD,
+ SourceLocation Loc) {
+ TemplateName Name = Context.getQualifiedTemplateName(
+ SS.getScopeRep(), TemplateKeyword, TemplateName(TD));
+ diagnoseMissingTemplateArguments(Name, Loc);
+}
+
ExprResult
Sema::CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
@@ -5030,6 +4354,8 @@ Sema::CheckConceptTemplateId(const CXXScopeSpec &SS,
/*UpdateArgsWithConversions=*/false))
return ExprError();
+ DiagnoseUseOfDecl(NamedConcept, ConceptNameInfo.getLoc());
+
auto *CSD = ImplicitConceptSpecializationDecl::Create(
Context, NamedConcept->getDeclContext(), NamedConcept->getLocation(),
CanonicalConverted);
@@ -5042,7 +4368,7 @@ Sema::CheckConceptTemplateId(const CXXScopeSpec &SS,
LocalInstantiationScope Scope(*this);
EnterExpressionEvaluationContext EECtx{
- *this, ExpressionEvaluationContext::ConstantEvaluated, CSD};
+ *this, ExpressionEvaluationContext::Unevaluated, CSD};
if (!AreArgsDependent &&
CheckConstraintSatisfaction(
@@ -5081,25 +4407,26 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
// Non-function templates require a template argument list.
if (auto *TD = R.getAsSingle<TemplateDecl>()) {
if (!TemplateArgs && !isa<FunctionTemplateDecl>(TD)) {
- diagnoseMissingTemplateArguments(TemplateName(TD), R.getNameLoc());
+ diagnoseMissingTemplateArguments(
+ SS, /*TemplateKeyword=*/TemplateKWLoc.isValid(), TD, R.getNameLoc());
return ExprError();
}
}
bool KnownDependent = false;
// In C++1y, check variable template ids.
if (R.getAsSingle<VarTemplateDecl>()) {
- ExprResult Res = CheckVarTemplateId(SS, R.getLookupNameInfo(),
- R.getAsSingle<VarTemplateDecl>(),
- TemplateKWLoc, TemplateArgs);
+ ExprResult Res = CheckVarTemplateId(
+ SS, R.getLookupNameInfo(), R.getAsSingle<VarTemplateDecl>(),
+ R.getRepresentativeDecl(), TemplateKWLoc, TemplateArgs);
if (Res.isInvalid() || Res.isUsable())
return Res;
- // Result is dependent. Carry on to build an UnresolvedLookupEpxr.
+ // Result is dependent. Carry on to build an UnresolvedLookupExpr.
KnownDependent = true;
}
if (R.getAsSingle<ConceptDecl>()) {
return CheckConceptTemplateId(SS, TemplateKWLoc, R.getLookupNameInfo(),
- R.getFoundDecl(),
+ R.getRepresentativeDecl(),
R.getAsSingle<ConceptDecl>(), TemplateArgs);
}
@@ -5109,73 +4436,51 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
UnresolvedLookupExpr *ULE = UnresolvedLookupExpr::Create(
Context, R.getNamingClass(), SS.getWithLocInContext(Context),
TemplateKWLoc, R.getLookupNameInfo(), RequiresADL, TemplateArgs,
- R.begin(), R.end(), KnownDependent);
+ R.begin(), R.end(), KnownDependent,
+ /*KnownInstantiationDependent=*/false);
+
+ // Model the templates with UnresolvedTemplateTy. The expression should then
+ // either be transformed in an instantiation or be diagnosed in
+ // CheckPlaceholderExpr.
+ if (ULE->getType() == Context.OverloadTy && R.isSingleResult() &&
+ !R.getFoundDecl()->getAsFunction())
+ ULE->setType(Context.UnresolvedTemplateTy);
return ULE;
}
-// We actually only call this from template instantiation.
-ExprResult
-Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *TemplateArgs) {
-
+ExprResult Sema::BuildQualifiedTemplateIdExpr(
+ CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs, bool IsAddressOfOperand) {
assert(TemplateArgs || TemplateKWLoc.isValid());
- DeclContext *DC;
- if (!(DC = computeDeclContext(SS, false)) ||
- DC->isDependentContext() ||
- RequireCompleteDeclContext(SS, DC))
- return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
- bool MemberOfUnknownSpecialization;
LookupResult R(*this, NameInfo, LookupOrdinaryName);
- if (LookupTemplateName(R, (Scope *)nullptr, SS, QualType(),
- /*Entering*/false, MemberOfUnknownSpecialization,
- TemplateKWLoc))
+ if (LookupTemplateName(R, /*S=*/nullptr, SS, /*ObjectType=*/QualType(),
+ /*EnteringContext=*/false, TemplateKWLoc))
return ExprError();
if (R.isAmbiguous())
return ExprError();
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
+ return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
+
if (R.empty()) {
+ DeclContext *DC = computeDeclContext(SS);
Diag(NameInfo.getLoc(), diag::err_no_member)
<< NameInfo.getName() << DC << SS.getRange();
return ExprError();
}
- auto DiagnoseTypeTemplateDecl = [&](TemplateDecl *Temp,
- bool isTypeAliasTemplateDecl) {
- Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_type_template)
- << SS.getScopeRep() << NameInfo.getName().getAsString() << SS.getRange()
- << isTypeAliasTemplateDecl;
- Diag(Temp->getLocation(), diag::note_referenced_type_template) << 0;
- return ExprError();
- };
-
- if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>())
- return DiagnoseTypeTemplateDecl(Temp, false);
+ // If necessary, build an implicit class member access.
+ if (isPotentialImplicitMemberAccess(SS, R, IsAddressOfOperand))
+ return BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs,
+ /*S=*/nullptr);
- if (TypeAliasTemplateDecl *Temp = R.getAsSingle<TypeAliasTemplateDecl>())
- return DiagnoseTypeTemplateDecl(Temp, true);
-
- return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL=*/false, TemplateArgs);
}
-/// Form a template name from a name that is syntactically required to name a
-/// template, either due to use of the 'template' keyword or because a name in
-/// this syntactic context is assumed to name a template (C++ [temp.names]p2-4).
-///
-/// This action forms a template name given the name of the template and its
-/// optional scope specifier. This is used when the 'template' keyword is used
-/// or when the parsing context unambiguously treats a following '<' as
-/// introducing a template argument list. Note that this may produce a
-/// non-dependent template name if we can perform the lookup now and identify
-/// the named template.
-///
-/// For example, given "x.MetaFun::template apply", the scope specifier
-/// \p SS will be "MetaFun::", \p TemplateKWLoc contains the location
-/// of the "template" keyword, and "apply" is the \p Name.
TemplateNameKind Sema::ActOnTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
@@ -5253,14 +4558,13 @@ TemplateNameKind Sema::ActOnTemplateName(Scope *S,
DeclarationNameInfo DNI = GetNameFromUnqualifiedId(Name);
LookupResult R(*this, DNI.getName(), Name.getBeginLoc(),
LookupOrdinaryName);
- bool MOUS;
// Tell LookupTemplateName that we require a template so that it diagnoses
// cases where it finds a non-template.
RequiredTemplateKind RTK = TemplateKWLoc.isValid()
? RequiredTemplateKind(TemplateKWLoc)
: TemplateNameIsRequired;
- if (!LookupTemplateName(R, S, SS, ObjectType.get(), EnteringContext, MOUS,
- RTK, nullptr, /*AllowTypoCorrection=*/false) &&
+ if (!LookupTemplateName(R, S, SS, ObjectType.get(), EnteringContext, RTK,
+ /*ATK=*/nullptr, /*AllowTypoCorrection=*/false) &&
!R.isAmbiguous()) {
if (LookupCtx)
Diag(Name.getBeginLoc(), diag::err_no_member)
@@ -5349,11 +4653,10 @@ bool Sema::CheckTemplateTypeArgument(
if (auto *II = NameInfo.getName().getAsIdentifierInfo()) {
LookupResult Result(*this, NameInfo, LookupOrdinaryName);
- LookupParsedName(Result, CurScope, &SS);
+ LookupParsedName(Result, CurScope, &SS, /*ObjectType=*/QualType());
if (Result.getAsSingle<TypeDecl>() ||
- Result.getResultKind() ==
- LookupResult::NotFoundInCurrentInstantiation) {
+ Result.wasNotFoundInCurrentInstantiation()) {
assert(SS.getScopeRep() && "dependent scope expr must has a scope!");
// Suggest that the user add 'typename' before the NNS.
SourceLocation Loc = AL.getSourceRange().getBegin();
@@ -5386,6 +4689,15 @@ bool Sema::CheckTemplateTypeArgument(
[[fallthrough]];
}
default: {
+ // We allow instantiateing a template with template argument packs when
+ // building deduction guides.
+ if (Arg.getKind() == TemplateArgument::Pack &&
+ CodeSynthesisContexts.back().Kind ==
+ Sema::CodeSynthesisContext::BuildingDeductionGuides) {
+ SugaredConverted.push_back(Arg);
+ CanonicalConverted.push_back(Arg);
+ return false;
+ }
// We have a template type parameter but the template argument
// is not a type.
SourceRange SR = AL.getSourceRange();
@@ -5436,22 +4748,26 @@ bool Sema::CheckTemplateTypeArgument(
///
/// \param Converted the list of template arguments provided for template
/// parameters that precede \p Param in the template parameter list.
-/// \returns the substituted template argument, or NULL if an error occurred.
-static TypeSourceInfo *SubstDefaultTemplateArgument(
+///
+/// \param Output the resulting substituted template argument.
+///
+/// \returns true if an error occurred.
+static bool SubstDefaultTemplateArgument(
Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc,
SourceLocation RAngleLoc, TemplateTypeParmDecl *Param,
ArrayRef<TemplateArgument> SugaredConverted,
- ArrayRef<TemplateArgument> CanonicalConverted) {
- TypeSourceInfo *ArgType = Param->getDefaultArgumentInfo();
+ ArrayRef<TemplateArgument> CanonicalConverted,
+ TemplateArgumentLoc &Output) {
+ Output = Param->getDefaultArgument();
// If the argument type is dependent, instantiate it now based
// on the previously-computed template arguments.
- if (ArgType->getType()->isInstantiationDependentType()) {
+ if (Output.getArgument().isInstantiationDependent()) {
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc, Param, Template,
SugaredConverted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
- return nullptr;
+ return true;
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists(Template, SugaredConverted,
@@ -5464,12 +4780,14 @@ static TypeSourceInfo *SubstDefaultTemplateArgument(
ForLambdaCallOperator = Rec->isLambda();
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext(),
!ForLambdaCallOperator);
- ArgType =
- SemaRef.SubstType(ArgType, TemplateArgLists,
- Param->getDefaultArgumentLoc(), Param->getDeclName());
+
+ if (SemaRef.SubstTemplateArgument(Output, TemplateArgLists, Output,
+ Param->getDefaultArgumentLoc(),
+ Param->getDeclName()))
+ return true;
}
- return ArgType;
+ return false;
}
/// Substitute template arguments into the default template argument for
@@ -5494,16 +4812,17 @@ static TypeSourceInfo *SubstDefaultTemplateArgument(
/// parameters that precede \p Param in the template parameter list.
///
/// \returns the substituted template argument, or NULL if an error occurred.
-static ExprResult SubstDefaultTemplateArgument(
+static bool SubstDefaultTemplateArgument(
Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc,
SourceLocation RAngleLoc, NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> SugaredConverted,
- ArrayRef<TemplateArgument> CanonicalConverted) {
+ ArrayRef<TemplateArgument> CanonicalConverted,
+ TemplateArgumentLoc &Output) {
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc, Param, Template,
SugaredConverted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
- return ExprError();
+ return true;
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists(Template, SugaredConverted,
@@ -5514,7 +4833,8 @@ static ExprResult SubstDefaultTemplateArgument(
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
EnterExpressionEvaluationContext ConstantEvaluated(
SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- return SemaRef.SubstExpr(Param->getDefaultArgument(), TemplateArgLists);
+ return SemaRef.SubstTemplateArgument(Param->getDefaultArgument(),
+ TemplateArgLists, Output);
}
/// Substitute template arguments into the default template argument for
@@ -5577,9 +4897,6 @@ static TemplateName SubstDefaultTemplateArgument(
TemplateArgLists);
}
-/// If the given template parameter has a default template
-/// argument, substitute into that default template argument and
-/// return the corresponding template argument.
TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
TemplateDecl *Template, SourceLocation TemplateLoc,
SourceLocation RAngleLoc, Decl *Param,
@@ -5592,13 +4909,12 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
return TemplateArgumentLoc();
HasDefaultArg = true;
- TypeSourceInfo *DI = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, TypeParm, SugaredConverted,
- CanonicalConverted);
- if (DI)
- return TemplateArgumentLoc(TemplateArgument(DI->getType()), DI);
-
- return TemplateArgumentLoc();
+ TemplateArgumentLoc Output;
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
+ TypeParm, SugaredConverted,
+ CanonicalConverted, Output))
+ return TemplateArgumentLoc();
+ return Output;
}
if (NonTypeTemplateParmDecl *NonTypeParm
@@ -5607,14 +4923,12 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
return TemplateArgumentLoc();
HasDefaultArg = true;
- ExprResult Arg = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, NonTypeParm, SugaredConverted,
- CanonicalConverted);
- if (Arg.isInvalid())
+ TemplateArgumentLoc Output;
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
+ NonTypeParm, SugaredConverted,
+ CanonicalConverted, Output))
return TemplateArgumentLoc();
-
- Expr *ArgE = Arg.getAs<Expr>();
- return TemplateArgumentLoc(TemplateArgument(ArgE), ArgE);
+ return Output;
}
TemplateTemplateParmDecl *TempTempParm
@@ -5670,32 +4984,6 @@ convertTypeTemplateArgumentToTemplate(ASTContext &Context, TypeLoc TLoc) {
return TemplateArgumentLoc();
}
-/// Check that the given template argument corresponds to the given
-/// template parameter.
-///
-/// \param Param The template parameter against which the argument will be
-/// checked.
-///
-/// \param Arg The template argument, which may be updated due to conversions.
-///
-/// \param Template The template in which the template argument resides.
-///
-/// \param TemplateLoc The location of the template name for the template
-/// whose argument list we're matching.
-///
-/// \param RAngleLoc The location of the right angle bracket ('>') that closes
-/// the template argument list.
-///
-/// \param ArgumentPackIndex The index into the argument pack where this
-/// argument will be placed. Only valid if the parameter is a parameter pack.
-///
-/// \param Converted The checked, converted argument will be added to the
-/// end of this small vector.
-///
-/// \param CTAK Describes how we arrived at this particular template argument:
-/// explicitly written, deduced, etc.
-///
-/// \returns true on error, false otherwise.
bool Sema::CheckTemplateArgument(
NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template,
SourceLocation TemplateLoc, SourceLocation RAngleLoc,
@@ -5917,7 +5205,8 @@ bool Sema::CheckTemplateArgument(
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
- if (CheckTemplateTemplateArgument(TempParm, Params, Arg))
+ if (CheckTemplateTemplateArgument(TempParm, Params, Arg,
+ /*IsDeduced=*/CTAK != CTAK_Specified))
return true;
SugaredConverted.push_back(Arg.getArgument());
@@ -5989,7 +5278,8 @@ bool Sema::CheckTemplateArgumentList(
TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &SugaredConverted,
SmallVectorImpl<TemplateArgument> &CanonicalConverted,
- bool UpdateArgsWithConversions, bool *ConstraintsNotSatisfied) {
+ bool UpdateArgsWithConversions, bool *ConstraintsNotSatisfied,
+ bool PartialOrderingTTP) {
if (ConstraintsNotSatisfied)
*ConstraintsNotSatisfied = false;
@@ -6060,9 +5350,14 @@ bool Sema::CheckTemplateArgumentList(
bool PackExpansionIntoNonPack =
NewArgs[ArgIdx].getArgument().isPackExpansion() &&
(!(*Param)->isTemplateParameterPack() || getExpandedPackSize(*Param));
- if (PackExpansionIntoNonPack && (isa<TypeAliasTemplateDecl>(Template) ||
- isa<ConceptDecl>(Template))) {
- // Core issue 1430: we have a pack expansion as an argument to an
+ // CWG1430: Don't diagnose this pack expansion when partial
+ // ordering template template parameters. Some uses of the template could
+ // be valid, and invalid uses will be diagnosed later during
+ // instantiation.
+ if (PackExpansionIntoNonPack && !PartialOrderingTTP &&
+ (isa<TypeAliasTemplateDecl>(Template) ||
+ isa<ConceptDecl>(Template))) {
+ // CWG1430: we have a pack expansion as an argument to an
// alias template, and it's not part of a parameter pack. This
// can't be canonicalized, so reject it now.
// As for concepts - we cannot normalize constraints where this
@@ -6174,28 +5469,20 @@ bool Sema::CheckTemplateArgumentList(
return diagnoseMissingArgument(*this, TemplateLoc, Template, TTP,
NewArgs);
- TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, TTP, SugaredConverted,
- CanonicalConverted);
- if (!ArgType)
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
+ TTP, SugaredConverted,
+ CanonicalConverted, Arg))
return true;
-
- Arg = TemplateArgumentLoc(TemplateArgument(ArgType->getType()),
- ArgType);
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
if (!hasReachableDefaultArgument(NTTP))
return diagnoseMissingArgument(*this, TemplateLoc, Template, NTTP,
NewArgs);
- ExprResult E = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, NTTP, SugaredConverted,
- CanonicalConverted);
- if (E.isInvalid())
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
+ NTTP, SugaredConverted,
+ CanonicalConverted, Arg))
return true;
-
- Expr *Ex = E.getAs<Expr>();
- Arg = TemplateArgumentLoc(TemplateArgument(Ex), Ex);
} else {
TemplateTemplateParmDecl *TempParm
= cast<TemplateTemplateParmDecl>(*Param);
@@ -6277,8 +5564,6 @@ bool Sema::CheckTemplateArgumentList(
TemplateArgs = std::move(NewArgs);
if (!PartialTemplateArgs) {
- TemplateArgumentList StackTemplateArgs(TemplateArgumentList::OnStack,
- CanonicalConverted);
// Setup the context/ThisScope for the case where we are needing to
// re-instantiate constraints outside of normal instantiation.
DeclContext *NewContext = Template->getDeclContext();
@@ -6298,7 +5583,7 @@ bool Sema::CheckTemplateArgumentList(
CXXThisScopeRAII(*this, RD, ThisQuals, RD != nullptr);
MultiLevelTemplateArgumentList MLTAL = getTemplateInstantiationArgs(
- Template, NewContext, /*Final=*/false, &StackTemplateArgs,
+ Template, NewContext, /*Final=*/false, CanonicalConverted,
/*RelativeToPrimary=*/true,
/*Pattern=*/nullptr,
/*ForConceptInstantiation=*/true);
@@ -6460,6 +5745,11 @@ bool UnnamedLocalNoLinkageFinder::VisitDecltypeType(const DecltypeType*) {
return false;
}
+bool UnnamedLocalNoLinkageFinder::VisitPackIndexingType(
+ const PackIndexingType *) {
+ return false;
+}
+
bool UnnamedLocalNoLinkageFinder::VisitUnaryTransformType(
const UnaryTransformType*) {
return false;
@@ -6545,6 +5835,11 @@ bool UnnamedLocalNoLinkageFinder::VisitBitIntType(const BitIntType *T) {
return false;
}
+bool UnnamedLocalNoLinkageFinder::VisitArrayParameterType(
+ const ArrayParameterType *T) {
+ return VisitConstantArrayType(T);
+}
+
bool UnnamedLocalNoLinkageFinder::VisitDependentBitIntType(
const DependentBitIntType *T) {
return false;
@@ -6593,11 +5888,6 @@ bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
-/// Check a template argument against its corresponding
-/// template type parameter.
-///
-/// This routine implements the semantics of C++ [temp.arg.type]. It
-/// returns true if an error occurred, and false otherwise.
bool Sema::CheckTemplateArgument(TypeSourceInfo *ArgInfo) {
assert(ArgInfo && "invalid TypeSourceInfo");
QualType Arg = ArgInfo->getType();
@@ -7200,13 +6490,6 @@ CheckTemplateArgumentPointerToMember(Sema &S, NonTypeTemplateParmDecl *Param,
return true;
}
-/// Check a template argument against its corresponding
-/// non-type template parameter.
-///
-/// This routine implements the semantics of C++ [temp.arg.nontype].
-/// If an error occurred, it returns ExprError(); otherwise, it
-/// returns the converted template argument. \p ParamType is the
-/// type of the non-type template parameter after it has been instantiated.
ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType ParamType, Expr *Arg,
TemplateArgument &SugaredConverted,
@@ -7222,7 +6505,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// FIXME: The language rules don't say what happens in this case.
// FIXME: We get an opaque dependent type out of decltype(auto) if the
// expression is merely instantiation-dependent; is this enough?
- if (CTAK == CTAK_Deduced && Arg->isTypeDependent()) {
+ if (Arg->isTypeDependent()) {
auto *AT = dyn_cast<AutoType>(DeducedT);
if (AT && AT->isDecltypeAuto()) {
SugaredConverted = TemplateArgument(Arg);
@@ -7263,10 +6546,10 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// along with the other associated constraints after
// checking the template argument list.
/*IgnoreConstraints=*/true);
- if (Result == TDK_AlreadyDiagnosed) {
+ if (Result == TemplateDeductionResult::AlreadyDiagnosed) {
if (ParamType.isNull())
return ExprError();
- } else if (Result != TDK_Success) {
+ } else if (Result != TemplateDeductionResult::Success) {
Diag(Arg->getExprLoc(),
diag::err_non_type_template_parm_type_deduction_failure)
<< Param->getDeclName() << Param->getType() << Arg->getType()
@@ -7814,14 +7097,10 @@ static void DiagnoseTemplateParameterListArityMismatch(
Sema &S, TemplateParameterList *New, TemplateParameterList *Old,
Sema::TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc);
-/// Check a template argument against its corresponding
-/// template template parameter.
-///
-/// This routine implements the semantics of C++ [temp.arg.template].
-/// It returns true if an error occurred, and false otherwise.
bool Sema::CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
- TemplateArgumentLoc &Arg) {
+ TemplateArgumentLoc &Arg,
+ bool IsDeduced) {
TemplateName Name = Arg.getArgument().getAsTemplateOrTemplatePattern();
TemplateDecl *Template = Name.getAsTemplateDecl();
if (!Template) {
@@ -7859,9 +7138,6 @@ bool Sema::CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
// C++1z [temp.arg.template]p3: (DR 150)
// A template-argument matches a template template-parameter P when P
// is at least as specialized as the template-argument A.
- // FIXME: We should enable RelaxedTemplateTemplateArgs by default as it is a
- // defect report resolution from C++17 and shouldn't be introduced by
- // concepts.
if (getLangOpts().RelaxedTemplateTemplateArgs) {
// Quick check for the common case:
// If P contains a parameter pack, then A [...] matches P if each of A's
@@ -7876,8 +7152,8 @@ bool Sema::CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
!Template->hasAssociatedConstraints())
return false;
- if (isTemplateTemplateParameterAtLeastAsSpecializedAs(Params, Template,
- Arg.getLocation())) {
+ if (isTemplateTemplateParameterAtLeastAsSpecializedAs(
+ Params, Template, Arg.getLocation(), IsDeduced)) {
// P2113
// C++20[temp.func.order]p2
// [...] If both deductions succeed, the partial ordering selects the
@@ -7950,14 +7226,9 @@ void Sema::NoteTemplateParameterLocation(const NamedDecl &Decl) {
diag::note_template_param_external);
}
-/// Given a non-type template argument that refers to a
-/// declaration and the type of its corresponding non-type template
-/// parameter, produce an expression that properly refers to that
-/// declaration.
-ExprResult
-Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
- QualType ParamType,
- SourceLocation Loc) {
+ExprResult Sema::BuildExpressionFromDeclTemplateArgument(
+ const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc,
+ NamedDecl *TemplateParam) {
// C++ [temp.param]p8:
//
// A non-type template-parameter of type "array of T" or
@@ -8024,6 +7295,18 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
} else {
assert(ParamType->isReferenceType() &&
"unexpected type for decl template argument");
+ if (NonTypeTemplateParmDecl *NTTP =
+ dyn_cast_if_present<NonTypeTemplateParmDecl>(TemplateParam)) {
+ QualType TemplateParamType = NTTP->getType();
+ const AutoType *AT = TemplateParamType->getAs<AutoType>();
+ if (AT && AT->isDecltypeAuto()) {
+ RefExpr = new (getASTContext()) SubstNonTypeTemplateParmExpr(
+ ParamType->getPointeeType(), RefExpr.get()->getValueKind(),
+ RefExpr.get()->getExprLoc(), RefExpr.get(), VD, NTTP->getIndex(),
+ /*PackIndex=*/std::nullopt,
+ /*RefParam=*/true);
+ }
+ }
}
// At this point we should have the right value category.
@@ -8390,29 +7673,6 @@ void DiagnoseTemplateParameterListArityMismatch(Sema &S,
<< SourceRange(Old->getTemplateLoc(), Old->getRAngleLoc());
}
-/// Determine whether the given template parameter lists are
-/// equivalent.
-///
-/// \param New The new template parameter list, typically written in the
-/// source code as part of a new template declaration.
-///
-/// \param Old The old template parameter list, typically found via
-/// name lookup of the template declared with this template parameter
-/// list.
-///
-/// \param Complain If true, this routine will produce a diagnostic if
-/// the template parameter lists are not equivalent.
-///
-/// \param Kind describes how we are to match the template parameter lists.
-///
-/// \param TemplateArgLoc If this source location is valid, then we
-/// are actually checking the template parameter list of a template
-/// argument (New) against the template parameter list of its
-/// corresponding template template parameter (Old). We produce
-/// slightly different diagnostics in this scenario.
-///
-/// \returns True if the template parameter lists are equal, false
-/// otherwise.
bool Sema::TemplateParameterListsAreEqual(
const TemplateCompareNewDeclInfo &NewInstFrom, TemplateParameterList *New,
const NamedDecl *OldInstFrom, TemplateParameterList *Old, bool Complain,
@@ -8510,19 +7770,13 @@ bool Sema::TemplateParameterListsAreEqual(
return true;
}
-/// Check whether a template can be declared within this scope.
-///
-/// If the template declaration is valid in this scope, returns
-/// false. Otherwise, issues a diagnostic and returns true.
bool
Sema::CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams) {
if (!S)
return false;
// Find the nearest enclosing declaration scope.
- while ((S->getFlags() & Scope::DeclScope) == 0 ||
- (S->getFlags() & Scope::TemplateParamScope) != 0)
- S = S->getParent();
+ S = S->getDeclParent();
// C++ [temp.pre]p6: [P2096]
// A template, explicit specialization, or partial specialization shall not
@@ -8784,17 +8038,6 @@ static bool CheckNonTypeTemplatePartialSpecializationArgs(
return false;
}
-/// Check the non-type template arguments of a class template
-/// partial specialization according to C++ [temp.class.spec]p9.
-///
-/// \param TemplateNameLoc the location of the template name.
-/// \param PrimaryTemplate the template parameters of the primary class
-/// template.
-/// \param NumExplicit the number of explicitly-specified template arguments.
-/// \param TemplateArgs the template arguments of the class template
-/// partial specialization.
-///
-/// \returns \c true if there was an error, \c false otherwise.
bool Sema::CheckTemplatePartialSpecializationArgs(
SourceLocation TemplateNameLoc, TemplateDecl *PrimaryTemplate,
unsigned NumExplicit, ArrayRef<TemplateArgument> TemplateArgs) {
@@ -8825,12 +8068,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody) {
- assert(TUK != TUK_Reference && "References are not specializations");
+ assert(TUK != TagUseKind::Reference && "References are not specializations");
- // NOTE: KWLoc is the location of the tag keyword. This will instead
- // store the location of the outermost template keyword in the declaration.
- SourceLocation TemplateKWLoc = TemplateParameterLists.size() > 0
- ? TemplateParameterLists[0]->getTemplateLoc() : KWLoc;
SourceLocation TemplateNameLoc = TemplateId.TemplateNameLoc;
SourceLocation LAngleLoc = TemplateId.LAngleLoc;
SourceLocation RAngleLoc = TemplateId.RAngleLoc;
@@ -8850,6 +8089,15 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
bool isMemberSpecialization = false;
bool isPartialSpecialization = false;
+ if (SS.isSet()) {
+ if (TUK != TagUseKind::Reference && TUK != TagUseKind::Friend &&
+ diagnoseQualifiedDeclaration(SS, ClassTemplate->getDeclContext(),
+ ClassTemplate->getDeclName(),
+ TemplateNameLoc, &TemplateId,
+ /*IsMemberSpecialization=*/false))
+ return true;
+ }
+
// Check the validity of the template headers that introduce this
// template.
// FIXME: We probably shouldn't complain about these headers for
@@ -8857,9 +8105,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
bool Invalid = false;
TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
- KWLoc, TemplateNameLoc, SS, &TemplateId,
- TemplateParameterLists, TUK == TUK_Friend, isMemberSpecialization,
- Invalid);
+ KWLoc, TemplateNameLoc, SS, &TemplateId, TemplateParameterLists,
+ TUK == TagUseKind::Friend, isMemberSpecialization, Invalid);
if (Invalid)
return true;
@@ -8870,7 +8117,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (TemplateParams && TemplateParams->size() > 0) {
isPartialSpecialization = true;
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
Diag(KWLoc, diag::err_partial_specialization_friend)
<< SourceRange(LAngleLoc, RAngleLoc);
return true;
@@ -8889,10 +8136,10 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
}
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- if (Expr *DefArg = NTTP->getDefaultArgument()) {
+ if (NTTP->hasDefaultArgument()) {
Diag(NTTP->getDefaultArgumentLoc(),
diag::err_default_arg_in_partial_spec)
- << DefArg->getSourceRange();
+ << NTTP->getDefaultArgument().getSourceRange();
NTTP->removeDefaultArgument();
}
} else {
@@ -8906,14 +8153,15 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
}
}
} else if (TemplateParams) {
- if (TUK == TUK_Friend)
+ if (TUK == TagUseKind::Friend)
Diag(KWLoc, diag::err_template_spec_friend)
<< FixItHint::CreateRemoval(
SourceRange(TemplateParams->getTemplateLoc(),
TemplateParams->getRAngleLoc()))
<< SourceRange(LAngleLoc, RAngleLoc);
} else {
- assert(TUK == TUK_Friend && "should have a 'template<>' for this decl");
+ assert(TUK == TagUseKind::Friend &&
+ "should have a 'template<>' for this decl");
}
// Check that the specialization uses the same tag kind as the
@@ -8921,8 +8169,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
assert(Kind != TagTypeKind::Enum &&
"Invalid enum tag in class template spec!");
- if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
- Kind, TUK == TUK_Definition, KWLoc,
+ if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(), Kind,
+ TUK == TagUseKind::Definition, KWLoc,
ClassTemplate->getIdentifier())) {
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< ClassTemplate
@@ -8969,6 +8217,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
<< ClassTemplate->getDeclName();
isPartialSpecialization = false;
+ Invalid = true;
}
}
@@ -8985,7 +8234,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// Check whether we can declare a class template specialization in
// the current scope.
- if (TUK != TUK_Friend &&
+ if (TUK != TagUseKind::Friend &&
CheckTemplateSpecializationScope(*this, ClassTemplate, PrevDecl,
TemplateNameLoc,
isPartialSpecialization))
@@ -9012,8 +8261,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// This rule has since been removed, because it's redundant given DR1495,
// but we keep it because it produces better diagnostics and recovery.
Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template)
- << /*class template*/0 << (TUK == TUK_Definition)
- << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
+ << /*class template*/ 0 << (TUK == TagUseKind::Definition)
+ << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
return CheckClassTemplate(S, TagSpec, TUK, KWLoc, SS,
ClassTemplate->getIdentifier(),
TemplateNameLoc,
@@ -9032,7 +8281,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
ClassTemplatePartialSpecializationDecl::Create(
Context, Kind, ClassTemplate->getDeclContext(), KWLoc,
TemplateNameLoc, TemplateParams, ClassTemplate, CanonicalConverted,
- TemplateArgs, CanonType, PrevPartial);
+ CanonType, PrevPartial);
+ Partial->setTemplateArgsAsWritten(TemplateArgs);
SetNestedNameSpecifier(*this, Partial, SS);
if (TemplateParameterLists.size() > 1 && SS.isSet()) {
Partial->setTemplateParameterListsInfo(
@@ -9055,6 +8305,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
Specialization = ClassTemplateSpecializationDecl::Create(
Context, Kind, ClassTemplate->getDeclContext(), KWLoc, TemplateNameLoc,
ClassTemplate, CanonicalConverted, PrevDecl);
+ Specialization->setTemplateArgsAsWritten(TemplateArgs);
SetNestedNameSpecifier(*this, Specialization, SS);
if (TemplateParameterLists.size() > 0) {
Specialization->setTemplateParameterListsInfo(Context,
@@ -9103,11 +8354,11 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
}
// If this is not a friend, note that this is an explicit specialization.
- if (TUK != TUK_Friend)
+ if (TUK != TagUseKind::Friend)
Specialization->setSpecializationKind(TSK_ExplicitSpecialization);
// Check that this isn't a redefinition of this specialization.
- if (TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
RecordDecl *Def = Specialization->getDefinition();
NamedDecl *Hidden = nullptr;
if (Def && SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
@@ -9124,10 +8375,11 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
}
ProcessDeclAttributeList(S, Specialization, Attr);
+ ProcessAPINotes(Specialization);
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(Specialization);
AddMsStructLayoutForRecord(Specialization);
}
@@ -9137,21 +8389,6 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
<< (isPartialSpecialization? 1 : 0)
<< FixItHint::CreateRemoval(ModulePrivateLoc);
- // Build the fully-sugared type for this class template
- // specialization as the user wrote in the specialization
- // itself. This means that we'll pretty-print the type retrieved
- // from the specialization's declaration the way that the user
- // actually wrote the specialization, rather than formatting the
- // name based on the "canonical" representation used to store the
- // template arguments in the specialization.
- TypeSourceInfo *WrittenTy
- = Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc,
- TemplateArgs, CanonType);
- if (TUK != TUK_Friend) {
- Specialization->setTypeAsWritten(WrittenTy);
- Specialization->setTemplateKeywordLoc(TemplateKWLoc);
- }
-
// C++ [temp.expl.spec]p9:
// A template explicit specialization is in the scope of the
// namespace in which the template was defined.
@@ -9163,10 +8400,19 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
Specialization->setLexicalDeclContext(CurContext);
// We may be starting the definition of this specialization.
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip))
Specialization->startDefinition();
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
+ // Build the fully-sugared type for this class template
+ // specialization as the user wrote in the specialization
+ // itself. This means that we'll pretty-print the type retrieved
+ // from the specialization's declaration the way that the user
+ // actually wrote the specialization, rather than formatting the
+ // name based on the "canonical" representation used to store the
+ // template arguments in the specialization.
+ TypeSourceInfo *WrittenTy = Context.getTemplateSpecializationTypeInfo(
+ Name, TemplateNameLoc, TemplateArgs, CanonType);
FriendDecl *Friend = FriendDecl::Create(Context, CurContext,
TemplateNameLoc,
WrittenTy,
@@ -9183,6 +8429,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (SkipBody && SkipBody->ShouldSkip)
return SkipBody->Previous;
+ Specialization->setInvalidDecl(Invalid);
return Specialization;
}
@@ -9194,10 +8441,10 @@ Decl *Sema::ActOnTemplateDeclarator(Scope *S,
return NewDecl;
}
-Decl *Sema::ActOnConceptDefinition(Scope *S,
- MultiTemplateParamsArg TemplateParameterLists,
- IdentifierInfo *Name, SourceLocation NameLoc,
- Expr *ConstraintExpr) {
+Decl *Sema::ActOnConceptDefinition(
+ Scope *S, MultiTemplateParamsArg TemplateParameterLists,
+ const IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr,
+ const ParsedAttributesView &Attrs) {
DeclContext *DC = CurContext;
if (!DC->getRedeclContext()->isFileContext()) {
@@ -9259,6 +8506,9 @@ Decl *Sema::ActOnConceptDefinition(Scope *S,
ActOnDocumentableDecl(NewDecl);
if (AddToScope)
PushOnScopeChains(NewDecl, S);
+
+ ProcessDeclAttributeList(S, NewDecl, Attrs);
+
return NewDecl;
}
@@ -9332,28 +8582,6 @@ static SourceLocation DiagLocForExplicitInstantiation(
return PrevDiagLoc;
}
-/// Diagnose cases where we have an explicit template specialization
-/// before/after an explicit template instantiation, producing diagnostics
-/// for those cases where they are required and determining whether the
-/// new specialization/instantiation will have any effect.
-///
-/// \param NewLoc the location of the new explicit specialization or
-/// instantiation.
-///
-/// \param NewTSK the kind of the new explicit specialization or instantiation.
-///
-/// \param PrevDecl the previous declaration of the entity.
-///
-/// \param PrevTSK the kind of the old explicit specialization or instantiatin.
-///
-/// \param PrevPointOfInstantiation if valid, indicates where the previous
-/// declaration was instantiated (either implicitly or explicitly).
-///
-/// \param HasNoEffect will be set to true to indicate that the new
-/// specialization or instantiation has no effect and should be ignored.
-///
-/// \returns true if there was an error that should prevent the introduction of
-/// the new declaration into the AST, false otherwise.
bool
Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
@@ -9520,21 +8748,6 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
llvm_unreachable("Missing specialization/instantiation case?");
}
-/// Perform semantic analysis for the given dependent function
-/// template specialization.
-///
-/// The only possible way to get a dependent function template specialization
-/// is with a friend declaration, like so:
-///
-/// \code
-/// template \<class T> void foo(T);
-/// template \<class T> class A {
-/// friend void foo<>(T);
-/// };
-/// \endcode
-///
-/// There really isn't any useful analysis we can do here, so we
-/// just store the information.
bool Sema::CheckDependentFunctionTemplateSpecialization(
FunctionDecl *FD, const TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous) {
@@ -9577,28 +8790,6 @@ bool Sema::CheckDependentFunctionTemplateSpecialization(
return false;
}
-/// Perform semantic analysis for the given function template
-/// specialization.
-///
-/// This routine performs all of the semantic analysis required for an
-/// explicit function template specialization. On successful completion,
-/// the function declaration \p FD will become a function template
-/// specialization.
-///
-/// \param FD the function declaration, which will be updated to become a
-/// function template specialization.
-///
-/// \param ExplicitTemplateArgs the explicitly-provided template arguments,
-/// if any. Note that this may be valid info even when 0 arguments are
-/// explicitly provided as in, e.g., \c void sort<>(char*, char*);
-/// as it anyway contains info on the angle brackets locations.
-///
-/// \param Previous the set of declarations that may be specialized by
-/// this function specialization.
-///
-/// \param QualifiedFriend whether this is a lookup for a qualified friend
-/// declaration with no explicit template argument list that might be
-/// befriending a function template specialization.
bool Sema::CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend) {
@@ -9622,15 +8813,20 @@ bool Sema::CheckFunctionTemplateSpecialization(
Ovl->getDeclContext()->getRedeclContext()))
continue;
+ QualType FT = FD->getType();
+ // C++11 [dcl.constexpr]p8:
+ // A constexpr specifier for a non-static member function that is not
+ // a constructor declares that member function to be const.
+ //
// When matching a constexpr member function template specialization
// against the primary template, we don't yet know whether the
// specialization has an implicit 'const' (because we don't know whether
// it will be a static member function until we know which template it
- // specializes), so adjust it now assuming it specializes this template.
- QualType FT = FD->getType();
- if (FD->isConstexpr()) {
- CXXMethodDecl *OldMD =
- dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
+ // specializes). This rule was removed in C++14.
+ if (auto *NewMD = dyn_cast<CXXMethodDecl>(FD);
+ !getLangOpts().CPlusPlus14 && NewMD && NewMD->isConstexpr() &&
+ !isa<CXXConstructorDecl, CXXDestructorDecl>(NewMD)) {
+ auto *OldMD = dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
if (OldMD && OldMD->isConst()) {
const FunctionProtoType *FPT = FT->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
@@ -9655,8 +8851,8 @@ bool Sema::CheckFunctionTemplateSpecialization(
FunctionDecl *Specialization = nullptr;
if (TemplateDeductionResult TDK = DeduceTemplateArguments(
cast<FunctionTemplateDecl>(FunTmpl->getFirstDecl()),
- ExplicitTemplateArgs ? &Args : nullptr, FT, Specialization,
- Info)) {
+ ExplicitTemplateArgs ? &Args : nullptr, FT, Specialization, Info);
+ TDK != TemplateDeductionResult::Success) {
// Template argument deduction failed; record why it failed, so
// that we can provide nifty diagnostics.
FailedCandidates.addCandidate().set(
@@ -9672,12 +8868,13 @@ bool Sema::CheckFunctionTemplateSpecialization(
// take target attributes into account, we reject candidates
// here that have a different target.
if (LangOpts.CUDA &&
- IdentifyCUDATarget(Specialization,
- /* IgnoreImplicitHDAttr = */ true) !=
- IdentifyCUDATarget(FD, /* IgnoreImplicitHDAttr = */ true)) {
+ CUDA().IdentifyTarget(Specialization,
+ /* IgnoreImplicitHDAttr = */ true) !=
+ CUDA().IdentifyTarget(FD, /* IgnoreImplicitHDAttr = */ true)) {
FailedCandidates.addCandidate().set(
I.getPair(), FunTmpl->getTemplatedDecl(),
- MakeDeductionFailureInfo(Context, TDK_CUDATargetMismatch, Info));
+ MakeDeductionFailureInfo(
+ Context, TemplateDeductionResult::CUDATargetMismatch, Info));
continue;
}
@@ -9718,6 +8915,40 @@ bool Sema::CheckFunctionTemplateSpecialization(
// Ignore access information; it doesn't figure into redeclaration checking.
FunctionDecl *Specialization = cast<FunctionDecl>(*Result);
+ // C++23 [except.spec]p13:
+ // An exception specification is considered to be needed when:
+ // - [...]
+ // - the exception specification is compared to that of another declaration
+ // (e.g., an explicit specialization or an overriding virtual function);
+ // - [...]
+ //
+ // The exception specification of a defaulted function is evaluated as
+ // described above only when needed; similarly, the noexcept-specifier of a
+ // specialization of a function template or member function of a class
+ // template is instantiated only when needed.
+ //
+ // The standard doesn't specify what the "comparison with another declaration"
+ // entails, nor the exact circumstances in which it occurs. Moreover, it does
+ // not state which properties of an explicit specialization must match the
+ // primary template.
+ //
+ // We assume that an explicit specialization must correspond with (per
+ // [basic.scope.scope]p4) and declare the same entity as (per [basic.link]p8)
+ // the declaration produced by substitution into the function template.
+ //
+ // Since the determination whether two function declarations correspond does
+ // not consider exception specification, we only need to instantiate it once
+ // we determine the primary template when comparing types per
+ // [basic.link]p11.1.
+ auto *SpecializationFPT =
+ Specialization->getType()->castAs<FunctionProtoType>();
+ // If the function has a dependent exception specification, resolve it after
+ // we have selected the primary template so we can check whether it matches.
+ if (getLangOpts().CPlusPlus17 &&
+ isUnresolvedExceptionSpec(SpecializationFPT->getExceptionSpecType()) &&
+ !ResolveExceptionSpec(FD->getLocation(), SpecializationFPT))
+ return true;
+
FunctionTemplateSpecializationInfo *SpecInfo
= Specialization->getTemplateSpecializationInfo();
assert(SpecInfo && "Function template specialization info missing?");
@@ -9797,8 +9028,8 @@ bool Sema::CheckFunctionTemplateSpecialization(
// specialization, with the template arguments from the previous
// specialization.
// Take copies of (semantic and syntactic) template argument lists.
- const TemplateArgumentList* TemplArgs = new (Context)
- TemplateArgumentList(Specialization->getTemplateSpecializationArgs());
+ TemplateArgumentList *TemplArgs = TemplateArgumentList::CreateCopy(
+ Context, Specialization->getTemplateSpecializationArgs()->asArray());
FD->setFunctionTemplateSpecialization(
Specialization->getPrimaryTemplate(), TemplArgs, /*InsertPos=*/nullptr,
SpecInfo->getTemplateSpecializationKind(),
@@ -9810,7 +9041,7 @@ bool Sema::CheckFunctionTemplateSpecialization(
// virtue e.g. of being constexpr, and it passes these implicit
// attributes on to its specializations.)
if (LangOpts.CUDA)
- inheritCUDATargetAttrs(FD, *Specialization->getPrimaryTemplate());
+ CUDA().inheritTargetAttrs(FD, *Specialization->getPrimaryTemplate());
// The "previous declaration" for this function template specialization is
// the prior function template specialization.
@@ -9819,20 +9050,6 @@ bool Sema::CheckFunctionTemplateSpecialization(
return false;
}
-/// Perform semantic analysis for the given non-template member
-/// specialization.
-///
-/// This routine performs all of the semantic analysis required for an
-/// explicit member function specialization. On successful completion,
-/// the function declaration \p FD will become a member function
-/// specialization.
-///
-/// \param Member the member declaration, which will be updated to become a
-/// specialization.
-///
-/// \param Previous the set of declarations, one of which may be specialized
-/// by this function specialization; the set will be modified to contain the
-/// redeclared member.
bool
Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
assert(!isa<TemplateDecl>(Member) && "Only for non-template members");
@@ -9846,24 +9063,53 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
if (Previous.empty()) {
// Nowhere to look anyway.
} else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Member)) {
+ SmallVector<FunctionDecl *> Candidates;
+ bool Ambiguous = false;
for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
I != E; ++I) {
- NamedDecl *D = (*I)->getUnderlyingDecl();
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
- QualType Adjusted = Function->getType();
- if (!hasExplicitCallingConv(Adjusted))
- Adjusted = adjustCCAndNoReturn(Adjusted, Method->getType());
- // This doesn't handle deduced return types, but both function
- // declarations should be undeduced at this point.
- if (Context.hasSameType(Adjusted, Method->getType())) {
- FoundInstantiation = *I;
- Instantiation = Method;
- InstantiatedFrom = Method->getInstantiatedFromMemberFunction();
- MSInfo = Method->getMemberSpecializationInfo();
- break;
- }
+ CXXMethodDecl *Method =
+ dyn_cast<CXXMethodDecl>((*I)->getUnderlyingDecl());
+ if (!Method)
+ continue;
+ QualType Adjusted = Function->getType();
+ if (!hasExplicitCallingConv(Adjusted))
+ Adjusted = adjustCCAndNoReturn(Adjusted, Method->getType());
+ // This doesn't handle deduced return types, but both function
+ // declarations should be undeduced at this point.
+ if (!Context.hasSameType(Adjusted, Method->getType()))
+ continue;
+ if (ConstraintSatisfaction Satisfaction;
+ Method->getTrailingRequiresClause() &&
+ (CheckFunctionConstraints(Method, Satisfaction,
+ /*UsageLoc=*/Member->getLocation(),
+ /*ForOverloadResolution=*/true) ||
+ !Satisfaction.IsSatisfied))
+ continue;
+ Candidates.push_back(Method);
+ FunctionDecl *MoreConstrained =
+ Instantiation ? getMoreConstrainedFunction(
+ Method, cast<FunctionDecl>(Instantiation))
+ : Method;
+ if (!MoreConstrained) {
+ Ambiguous = true;
+ continue;
+ }
+ if (MoreConstrained == Method) {
+ Ambiguous = false;
+ FoundInstantiation = *I;
+ Instantiation = Method;
+ InstantiatedFrom = Method->getInstantiatedFromMemberFunction();
+ MSInfo = Method->getMemberSpecializationInfo();
}
}
+ if (Ambiguous) {
+ Diag(Member->getLocation(), diag::err_function_member_spec_ambiguous)
+ << Member << (InstantiatedFrom ? InstantiatedFrom : Instantiation);
+ for (FunctionDecl *Candidate : Candidates)
+ Diag(Candidate->getLocation(), diag::note_function_member_spec_matched)
+ << Candidate;
+ return true;
+ }
} else if (isa<VarDecl>(Member)) {
VarDecl *PrevVar;
if (Previous.isSingleResult() &&
@@ -10153,7 +9399,6 @@ static void dllExportImportClassTemplateSpecialization(
S.referenceDLLExportedClassMethods();
}
-// Explicit instantiation of a class template specialization
DeclResult Sema::ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
@@ -10337,26 +9582,16 @@ DeclResult Sema::ActOnExplicitInstantiation(
}
}
- // Build the fully-sugared type for this explicit instantiation as
- // the user wrote in the explicit instantiation itself. This means
- // that we'll pretty-print the type retrieved from the
- // specialization's declaration the way that the user actually wrote
- // the explicit instantiation, rather than formatting the name based
- // on the "canonical" representation used to store the template
- // arguments in the specialization.
- TypeSourceInfo *WrittenTy
- = Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc,
- TemplateArgs,
- Context.getTypeDeclType(Specialization));
- Specialization->setTypeAsWritten(WrittenTy);
+ Specialization->setTemplateArgsAsWritten(TemplateArgs);
// Set source locations for keywords.
- Specialization->setExternLoc(ExternLoc);
+ Specialization->setExternKeywordLoc(ExternLoc);
Specialization->setTemplateKeywordLoc(TemplateLoc);
Specialization->setBraceRange(SourceRange());
bool PreviouslyDLLExported = Specialization->hasAttr<DLLExportAttr>();
ProcessDeclAttributeList(S, Specialization, Attr);
+ ProcessAPINotes(Specialization);
// Add the explicit instantiation into its lexical context. However,
// since explicit instantiations are never found by name lookup, we
@@ -10402,8 +9637,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
Def->setTemplateSpecializationKind(TSK);
if (!getDLLAttr(Def) && getDLLAttr(Specialization) &&
- (Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
- !Context.getTargetInfo().getTriple().isPS())) {
+ Context.getTargetInfo().shouldDLLImportComdatSymbols()) {
// An explicit instantiation definition can add a dll attribute to a
// template with a previous instantiation declaration. MinGW doesn't
// allow this.
@@ -10420,8 +9654,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
bool NewlyDLLExported =
!PreviouslyDLLExported && Specialization->hasAttr<DLLExportAttr>();
if (Old_TSK == TSK_ImplicitInstantiation && NewlyDLLExported &&
- (Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
- !Context.getTargetInfo().getTriple().isPS())) {
+ Context.getTargetInfo().shouldDLLImportComdatSymbols()) {
// An explicit instantiation definition can add a dll attribute to a
// template with a previous implicit instantiation. MinGW doesn't allow
// this. We limit clang to only adding dllexport, to avoid potentially
@@ -10460,7 +9693,6 @@ DeclResult Sema::ActOnExplicitInstantiation(
return Specialization;
}
-// Explicit instantiation of a member class of a class template.
DeclResult
Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc, unsigned TagSpec,
@@ -10470,11 +9702,13 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
bool Owned = false;
bool IsDependent = false;
- Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference, KWLoc, SS, Name,
- NameLoc, Attr, AS_none, /*ModulePrivateLoc=*/SourceLocation(),
+ Decl *TagD =
+ ActOnTag(S, TagSpec, TagUseKind::Reference, KWLoc, SS, Name, NameLoc,
+ Attr, AS_none, /*ModulePrivateLoc=*/SourceLocation(),
MultiTemplateParamsArg(), Owned, IsDependent, SourceLocation(),
false, TypeResult(), /*IsTypeSpecifier*/ false,
- /*IsTemplateParamOrArg*/ false, /*OOK=*/OOK_Outside).get();
+ /*IsTemplateParamOrArg*/ false, /*OOK=*/OOK_Outside)
+ .get();
assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
if (!TagD)
@@ -10591,11 +9825,8 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
return true;
}
- // The scope passed in may not be a decl scope. Zip up the scope tree until
- // we find one that is.
- while ((S->getFlags() & Scope::DeclScope) == 0 ||
- (S->getFlags() & Scope::TemplateParamScope) != 0)
- S = S->getParent();
+ // Get the innermost enclosing declaration scope.
+ S = S->getDeclParent();
// Determine the type of the declaration.
TypeSourceInfo *T = GetTypeForDeclarator(D);
@@ -10653,7 +9884,8 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
: TSK_ExplicitInstantiationDeclaration;
LookupResult Previous(*this, NameInfo, LookupOrdinaryName);
- LookupParsedName(Previous, S, &D.getCXXScopeSpec());
+ LookupParsedName(Previous, S, &D.getCXXScopeSpec(),
+ /*ObjectType=*/QualType());
if (!R->isFunctionType()) {
// C++ [temp.explicit]p1:
@@ -10765,8 +9997,16 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
if (!HasNoEffect) {
// Instantiate static data member or variable template.
Prev->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
+ if (auto *VTSD = dyn_cast<VarTemplatePartialSpecializationDecl>(Prev)) {
+ VTSD->setExternKeywordLoc(ExternLoc);
+ VTSD->setTemplateKeywordLoc(TemplateLoc);
+ }
+
// Merge attributes.
ProcessDeclAttributeList(S, Prev, D.getDeclSpec().getAttributes());
+ if (PrevTemplate)
+ ProcessAPINotes(Prev);
+
if (TSK == TSK_ExplicitInstantiationDefinition)
InstantiateVariableDefinition(D.getIdentifierLoc(), Prev);
}
@@ -10827,11 +10067,10 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
TemplateDeductionInfo Info(FailedCandidates.getLocation());
FunctionDecl *Specialization = nullptr;
- if (TemplateDeductionResult TDK
- = DeduceTemplateArguments(FunTmpl,
- (HasExplicitTemplateArgs ? &TemplateArgs
- : nullptr),
- R, Specialization, Info)) {
+ if (TemplateDeductionResult TDK = DeduceTemplateArguments(
+ FunTmpl, (HasExplicitTemplateArgs ? &TemplateArgs : nullptr), R,
+ Specialization, Info);
+ TDK != TemplateDeductionResult::Success) {
// Keep track of almost-matches.
FailedCandidates.addCandidate()
.set(P.getPair(), FunTmpl->getTemplatedDecl(),
@@ -10846,12 +10085,13 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// target attributes into account, we reject candidates here that
// have a different target.
if (LangOpts.CUDA &&
- IdentifyCUDATarget(Specialization,
- /* IgnoreImplicitHDAttr = */ true) !=
- IdentifyCUDATarget(D.getDeclSpec().getAttributes())) {
+ CUDA().IdentifyTarget(Specialization,
+ /* IgnoreImplicitHDAttr = */ true) !=
+ CUDA().IdentifyTarget(D.getDeclSpec().getAttributes())) {
FailedCandidates.addCandidate().set(
P.getPair(), FunTmpl->getTemplatedDecl(),
- MakeDeductionFailureInfo(Context, TDK_CUDATargetMismatch, Info));
+ MakeDeductionFailureInfo(
+ Context, TemplateDeductionResult::CUDATargetMismatch, Info));
continue;
}
@@ -10942,6 +10182,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
}
ProcessDeclAttributeList(S, Specialization, D.getDeclSpec().getAttributes());
+ ProcessAPINotes(Specialization);
// In MSVC mode, dllimported explicit instantiation definitions are treated as
// instantiation declarations.
@@ -10984,10 +10225,11 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
return (Decl*) nullptr;
}
-TypeResult
-Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
- const CXXScopeSpec &SS, IdentifierInfo *Name,
- SourceLocation TagLoc, SourceLocation NameLoc) {
+TypeResult Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
+ const CXXScopeSpec &SS,
+ const IdentifierInfo *Name,
+ SourceLocation TagLoc,
+ SourceLocation NameLoc) {
// This has to hold, because SS is expected to be defined.
assert(Name && "Expected a name in a dependent tag");
@@ -10997,9 +10239,9 @@ Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
- if (TUK == TUK_Declaration || TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Declaration || TUK == TagUseKind::Definition) {
Diag(NameLoc, diag::err_dependent_tag_decl)
- << (TUK == TUK_Definition) << llvm::to_underlying(Kind)
+ << (TUK == TagUseKind::Definition) << llvm::to_underlying(Kind)
<< SS.getRange();
return true;
}
@@ -11047,14 +10289,10 @@ TypeResult Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
}
TypeResult
-Sema::ActOnTypenameType(Scope *S,
- SourceLocation TypenameLoc,
- const CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- TemplateTy TemplateIn,
- IdentifierInfo *TemplateII,
- SourceLocation TemplateIILoc,
- SourceLocation LAngleLoc,
+Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ TemplateTy TemplateIn, const IdentifierInfo *TemplateII,
+ SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc) {
if (TypenameLoc.isValid() && S && !S->getTemplateParamParent())
@@ -11086,6 +10324,13 @@ Sema::ActOnTypenameType(Scope *S,
// Construct a dependent template specialization type.
assert(DTN && "dependent template has non-dependent name?");
assert(DTN->getQualifier() == SS.getScopeRep());
+
+ if (!DTN->isIdentifier()) {
+ Diag(TemplateIILoc, diag::err_template_id_not_a_type) << Template;
+ NoteAllFoundTemplates(Template);
+ return true;
+ }
+
QualType T = Context.getDependentTemplateSpecializationType(
ElaboratedTypeKeyword::Typename, DTN->getQualifier(),
DTN->getIdentifier(), TemplateArgs.arguments());
@@ -11130,7 +10375,6 @@ Sema::ActOnTypenameType(Scope *S,
return CreateParsedType(T, TSI);
}
-
/// Determine whether this failed name lookup should be treated as being
/// disabled by a usage of std::enable_if.
static bool isEnableIf(NestedNameSpecifierLoc NNS, const IdentifierInfo &II,
@@ -11441,32 +10685,6 @@ namespace {
};
} // end anonymous namespace
-/// Rebuilds a type within the context of the current instantiation.
-///
-/// The type \p T is part of the type of an out-of-line member definition of
-/// a class template (or class template partial specialization) that was parsed
-/// and constructed before we entered the scope of the class template (or
-/// partial specialization thereof). This routine will rebuild that type now
-/// that we have entered the declarator's scope, which may produce different
-/// canonical types, e.g.,
-///
-/// \code
-/// template<typename T>
-/// struct X {
-/// typedef T* pointer;
-/// pointer data();
-/// };
-///
-/// template<typename T>
-/// typename X<T>::pointer X<T>::data() { ... }
-/// \endcode
-///
-/// Here, the type "typename X<T>::pointer" will be created as a DependentNameType,
-/// since we do not know that we can look into X<T> when we parsed the type.
-/// This function will rebuild the type, performing the lookup of "pointer"
-/// in X<T> and returning an ElaboratedType whose canonical type is the same
-/// as the canonical type of T*, allowing the return types of the out-of-line
-/// definition and the declaration to match.
TypeSourceInfo *Sema::RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name) {
@@ -11499,8 +10717,6 @@ bool Sema::RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS) {
return false;
}
-/// Rebuild the template parameters now that we know we're in a current
-/// instantiation.
bool Sema::RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params) {
for (unsigned I = 0, N = Params->size(); I != N; ++I) {
@@ -11547,8 +10763,6 @@ bool Sema::RebuildTemplateParamsInCurrentInstantiation(
return false;
}
-/// Produces a formatted string that describes the binding of
-/// template parameters to template arguments.
std::string
Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args) {
@@ -11794,10 +11008,6 @@ void Sema::checkSpecializationReachability(SourceLocation Loc,
.check(Spec);
}
-/// Returns the top most location responsible for the definition of \p N.
-/// If \p N is a a template specialization, this is the location
-/// of the top of the instantiation stack.
-/// Otherwise, the location of \p N is returned.
SourceLocation Sema::getTopMostPointOfInstantiation(const NamedDecl *N) const {
if (!getLangOpts().CPlusPlus || CodeSynthesisContexts.empty())
return N->getLocation();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
index e9e7ab5bb669..b7b857ebf804 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -133,19 +133,21 @@ static bool hasSameExtendedValue(llvm::APSInt X, llvm::APSInt Y) {
return X == Y;
}
-static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
+static TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
Sema &S, TemplateParameterList *TemplateParams, QualType Param,
QualType Arg, TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF,
bool PartialOrdering = false, bool DeducedFromArrayBound = false);
-static Sema::TemplateDeductionResult
+enum class PackFold { ParameterToArgument, ArgumentToParameter };
+static TemplateDeductionResult
DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
ArrayRef<TemplateArgument> Ps,
ArrayRef<TemplateArgument> As,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- bool NumberOfArgumentsMustMatch);
+ bool NumberOfArgumentsMustMatch,
+ PackFold PackFold = PackFold::ParameterToArgument);
static void MarkUsedTemplateParameters(ASTContext &Ctx,
const TemplateArgument &TemplateArg,
@@ -393,10 +395,11 @@ checkDeducedTemplateArguments(ASTContext &Context,
/// Deduce the value of the given non-type template parameter
/// as the given deduced template argument. All non-type template parameter
/// deduction is funneled through here.
-static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
+static TemplateDeductionResult DeduceNonTypeTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
- const NonTypeTemplateParmDecl *NTTP, const DeducedTemplateArgument &NewDeduced,
- QualType ValueType, TemplateDeductionInfo &Info,
+ const NonTypeTemplateParmDecl *NTTP,
+ const DeducedTemplateArgument &NewDeduced, QualType ValueType,
+ TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
assert(NTTP->getDepth() == Info.getDeducedDepth() &&
"deducing non-type template argument with wrong depth");
@@ -407,19 +410,19 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
Info.Param = const_cast<NonTypeTemplateParmDecl*>(NTTP);
Info.FirstArg = Deduced[NTTP->getIndex()];
Info.SecondArg = NewDeduced;
- return Sema::TDK_Inconsistent;
+ return TemplateDeductionResult::Inconsistent;
}
Deduced[NTTP->getIndex()] = Result;
if (!S.getLangOpts().CPlusPlus17)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
if (NTTP->isExpandedParameterPack())
// FIXME: We may still need to deduce parts of the type here! But we
// don't have any way to find which slice of the type to use, and the
// type stored on the NTTP itself is nonsense. Perhaps the type of an
// expanded NTTP should be a pack expansion type?
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
// Get the type of the parameter for deduction. If it's a (dependent) array
// or function type, we will not have decayed it yet, so do that now.
@@ -446,7 +449,7 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
/// Deduce the value of the given non-type template parameter
/// from the given integral constant.
-static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
+static TemplateDeductionResult DeduceNonTypeTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
const NonTypeTemplateParmDecl *NTTP, const llvm::APSInt &Value,
QualType ValueType, bool DeducedFromArrayBound, TemplateDeductionInfo &Info,
@@ -460,7 +463,7 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
/// Deduce the value of the given non-type template parameter
/// from the given null pointer template argument type.
-static Sema::TemplateDeductionResult DeduceNullPtrTemplateArgument(
+static TemplateDeductionResult DeduceNullPtrTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
const NonTypeTemplateParmDecl *NTTP, QualType NullPtrType,
TemplateDeductionInfo &Info,
@@ -481,9 +484,10 @@ static Sema::TemplateDeductionResult DeduceNullPtrTemplateArgument(
/// from the given type- or value-dependent expression.
///
/// \returns true if deduction succeeded, false otherwise.
-static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
+static TemplateDeductionResult DeduceNonTypeTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
- const NonTypeTemplateParmDecl *NTTP, Expr *Value, TemplateDeductionInfo &Info,
+ const NonTypeTemplateParmDecl *NTTP, Expr *Value,
+ TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
DeducedTemplateArgument(Value),
@@ -494,7 +498,7 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
/// from the given declaration.
///
/// \returns true if deduction succeeded, false otherwise.
-static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
+static TemplateDeductionResult DeduceNonTypeTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
const NonTypeTemplateParmDecl *NTTP, ValueDecl *D, QualType T,
TemplateDeductionInfo &Info,
@@ -505,27 +509,112 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
S, TemplateParams, NTTP, DeducedTemplateArgument(New), T, Info, Deduced);
}
-static Sema::TemplateDeductionResult
-DeduceTemplateArguments(Sema &S,
- TemplateParameterList *TemplateParams,
- TemplateName Param,
- TemplateName Arg,
+/// Create a shallow copy of a given template parameter declaration, with
+/// empty source locations and using the given TemplateArgument as it's
+/// default argument.
+///
+/// \returns The new template parameter declaration.
+static NamedDecl *getTemplateParameterWithDefault(Sema &S, NamedDecl *A,
+ TemplateArgument Default) {
+ switch (A->getKind()) {
+ case Decl::TemplateTypeParm: {
+ auto *T = cast<TemplateTypeParmDecl>(A);
+ auto *R = TemplateTypeParmDecl::Create(
+ S.Context, A->getDeclContext(), SourceLocation(), SourceLocation(),
+ T->getDepth(), T->getIndex(), T->getIdentifier(),
+ T->wasDeclaredWithTypename(), T->isParameterPack(),
+ T->hasTypeConstraint());
+ R->setDefaultArgument(
+ S.Context,
+ S.getTrivialTemplateArgumentLoc(Default, QualType(), SourceLocation()));
+ if (R->hasTypeConstraint()) {
+ auto *C = R->getTypeConstraint();
+ R->setTypeConstraint(C->getConceptReference(),
+ C->getImmediatelyDeclaredConstraint());
+ }
+ return R;
+ }
+ case Decl::NonTypeTemplateParm: {
+ auto *T = cast<NonTypeTemplateParmDecl>(A);
+ auto *R = NonTypeTemplateParmDecl::Create(
+ S.Context, A->getDeclContext(), SourceLocation(), SourceLocation(),
+ T->getDepth(), T->getIndex(), T->getIdentifier(), T->getType(),
+ T->isParameterPack(), T->getTypeSourceInfo());
+ R->setDefaultArgument(S.Context,
+ S.getTrivialTemplateArgumentLoc(
+ Default, Default.getNonTypeTemplateArgumentType(),
+ SourceLocation()));
+ if (auto *PTC = T->getPlaceholderTypeConstraint())
+ R->setPlaceholderTypeConstraint(PTC);
+ return R;
+ }
+ case Decl::TemplateTemplateParm: {
+ auto *T = cast<TemplateTemplateParmDecl>(A);
+ auto *R = TemplateTemplateParmDecl::Create(
+ S.Context, A->getDeclContext(), SourceLocation(), T->getDepth(),
+ T->getIndex(), T->isParameterPack(), T->getIdentifier(),
+ T->wasDeclaredWithTypename(), T->getTemplateParameters());
+ R->setDefaultArgument(
+ S.Context,
+ S.getTrivialTemplateArgumentLoc(Default, QualType(), SourceLocation()));
+ return R;
+ }
+ default:
+ llvm_unreachable("Unexpected Decl Kind");
+ }
+}
+
+static TemplateDeductionResult
+DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
+ TemplateName Param, TemplateName Arg,
TemplateDeductionInfo &Info,
+ ArrayRef<TemplateArgument> DefaultArguments,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
TemplateDecl *ParamDecl = Param.getAsTemplateDecl();
if (!ParamDecl) {
// The parameter type is dependent and is not a template template parameter,
// so there is nothing that we can deduce.
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
- if (TemplateTemplateParmDecl *TempParam
- = dyn_cast<TemplateTemplateParmDecl>(ParamDecl)) {
+ if (auto *TempParam = dyn_cast<TemplateTemplateParmDecl>(ParamDecl)) {
// If we're not deducing at this depth, there's nothing to deduce.
if (TempParam->getDepth() != Info.getDeducedDepth())
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
+
+ auto NewDeduced = DeducedTemplateArgument(Arg);
+ // Provisional resolution for CWG2398: If Arg is also a template template
+ // param, and it names a template specialization, then we deduce a
+ // synthesized template template parameter based on A, but using the TS's
+ // arguments as defaults.
+ if (auto *TempArg = dyn_cast_or_null<TemplateTemplateParmDecl>(
+ Arg.getAsTemplateDecl())) {
+ assert(!TempArg->isExpandedParameterPack());
+
+ TemplateParameterList *As = TempArg->getTemplateParameters();
+ if (DefaultArguments.size() != 0) {
+ assert(DefaultArguments.size() <= As->size());
+ SmallVector<NamedDecl *, 4> Params(As->size());
+ for (unsigned I = 0; I < DefaultArguments.size(); ++I)
+ Params[I] = getTemplateParameterWithDefault(S, As->getParam(I),
+ DefaultArguments[I]);
+ for (unsigned I = DefaultArguments.size(); I < As->size(); ++I)
+ Params[I] = As->getParam(I);
+ // FIXME: We could unique these, and also the parameters, but we don't
+ // expect programs to contain a large enough amount of these deductions
+ // for that to be worthwhile.
+ auto *TPL = TemplateParameterList::Create(
+ S.Context, SourceLocation(), SourceLocation(), Params,
+ SourceLocation(), As->getRequiresClause());
+ NewDeduced = DeducedTemplateArgument(
+ TemplateName(TemplateTemplateParmDecl::Create(
+ S.Context, TempArg->getDeclContext(), SourceLocation(),
+ TempArg->getDepth(), TempArg->getPosition(),
+ TempArg->isParameterPack(), TempArg->getIdentifier(),
+ TempArg->wasDeclaredWithTypename(), TPL)));
+ }
+ }
- DeducedTemplateArgument NewDeduced(S.Context.getCanonicalTemplateName(Arg));
DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
Deduced[TempParam->getIndex()],
NewDeduced);
@@ -533,21 +622,21 @@ DeduceTemplateArguments(Sema &S,
Info.Param = TempParam;
Info.FirstArg = Deduced[TempParam->getIndex()];
Info.SecondArg = NewDeduced;
- return Sema::TDK_Inconsistent;
+ return TemplateDeductionResult::Inconsistent;
}
Deduced[TempParam->getIndex()] = Result;
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
// Verify that the two template names are equivalent.
if (S.Context.hasSameTemplateName(Param, Arg))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
// Mismatch of non-dependent template parameter to argument.
Info.FirstArg = TemplateArgument(Param);
Info.SecondArg = TemplateArgument(Arg);
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
/// Deduce the template arguments by comparing the template parameter
@@ -568,7 +657,19 @@ DeduceTemplateArguments(Sema &S,
/// \returns the result of template argument deduction so far. Note that a
/// "success" result means that template argument deduction has not yet failed,
/// but it may still fail, later, for other reasons.
-static Sema::TemplateDeductionResult
+
+static const TemplateSpecializationType *getLastTemplateSpecType(QualType QT) {
+ for (const Type *T = QT.getTypePtr(); /**/; /**/) {
+ const TemplateSpecializationType *TST =
+ T->getAs<TemplateSpecializationType>();
+ assert(TST && "Expected a TemplateSpecializationType");
+ if (!TST->isSugared())
+ return TST;
+ T = TST->desugar().getTypePtr();
+ }
+}
+
+static TemplateDeductionResult
DeduceTemplateSpecArguments(Sema &S, TemplateParameterList *TemplateParams,
const QualType P, QualType A,
TemplateDeductionInfo &Info,
@@ -576,41 +677,59 @@ DeduceTemplateSpecArguments(Sema &S, TemplateParameterList *TemplateParams,
QualType UP = P;
if (const auto *IP = P->getAs<InjectedClassNameType>())
UP = IP->getInjectedSpecializationType();
- // FIXME: Try to preserve type sugar here, which is hard
- // because of the unresolved template arguments.
- const auto *TP = UP.getCanonicalType()->castAs<TemplateSpecializationType>();
+
+ assert(isa<TemplateSpecializationType>(UP.getCanonicalType()));
+ const TemplateSpecializationType *TP = ::getLastTemplateSpecType(UP);
TemplateName TNP = TP->getTemplateName();
// If the parameter is an alias template, there is nothing to deduce.
if (const auto *TD = TNP.getAsTemplateDecl(); TD && TD->isTypeAlias())
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
- ArrayRef<TemplateArgument> PResolved = TP->template_arguments();
+ // FIXME: To preserve sugar, the TST needs to carry sugared resolved
+ // arguments.
+ ArrayRef<TemplateArgument> PResolved =
+ TP->getCanonicalTypeInternal()
+ ->castAs<TemplateSpecializationType>()
+ ->template_arguments();
QualType UA = A;
+ std::optional<NestedNameSpecifier *> NNS;
// Treat an injected-class-name as its underlying template-id.
- if (const auto *Injected = A->getAs<InjectedClassNameType>())
+ if (const auto *Elaborated = A->getAs<ElaboratedType>()) {
+ NNS = Elaborated->getQualifier();
+ } else if (const auto *Injected = A->getAs<InjectedClassNameType>()) {
UA = Injected->getInjectedSpecializationType();
+ NNS = nullptr;
+ }
// Check whether the template argument is a dependent template-id.
- // FIXME: Should not lose sugar here.
- if (const auto *SA =
- dyn_cast<TemplateSpecializationType>(UA.getCanonicalType())) {
+ if (isa<TemplateSpecializationType>(UA.getCanonicalType())) {
+ const TemplateSpecializationType *SA = ::getLastTemplateSpecType(UA);
TemplateName TNA = SA->getTemplateName();
// If the argument is an alias template, there is nothing to deduce.
if (const auto *TD = TNA.getAsTemplateDecl(); TD && TD->isTypeAlias())
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
+
+ // FIXME: To preserve sugar, the TST needs to carry sugared resolved
+ // arguments.
+ ArrayRef<TemplateArgument> AResolved =
+ SA->getCanonicalTypeInternal()
+ ->castAs<TemplateSpecializationType>()
+ ->template_arguments();
// Perform template argument deduction for the template name.
- if (auto Result =
- DeduceTemplateArguments(S, TemplateParams, TNP, TNA, Info, Deduced))
+ if (auto Result = DeduceTemplateArguments(S, TemplateParams, TNP, TNA, Info,
+ AResolved, Deduced);
+ Result != TemplateDeductionResult::Success)
return Result;
+
// Perform template argument deduction on each template
// argument. Ignore any missing/extra arguments, since they could be
// filled in by default arguments.
- return DeduceTemplateArguments(S, TemplateParams, PResolved,
- SA->template_arguments(), Info, Deduced,
+ return DeduceTemplateArguments(S, TemplateParams, PResolved, AResolved,
+ Info, Deduced,
/*NumberOfArgumentsMustMatch=*/false);
}
@@ -623,13 +742,19 @@ DeduceTemplateSpecArguments(Sema &S, TemplateParameterList *TemplateParams,
if (!SA) {
Info.FirstArg = TemplateArgument(P);
Info.SecondArg = TemplateArgument(A);
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
+ TemplateName TNA = TemplateName(SA->getSpecializedTemplate());
+ if (NNS)
+ TNA = S.Context.getQualifiedTemplateName(
+ *NNS, false, TemplateName(SA->getSpecializedTemplate()));
+
// Perform template argument deduction for the template name.
- if (auto Result = DeduceTemplateArguments(
- S, TemplateParams, TP->getTemplateName(),
- TemplateName(SA->getSpecializedTemplate()), Info, Deduced))
+ if (auto Result =
+ DeduceTemplateArguments(S, TemplateParams, TNP, TNA, Info,
+ SA->getTemplateArgs().asArray(), Deduced);
+ Result != TemplateDeductionResult::Success)
return Result;
// Perform template argument deduction for the template arguments.
@@ -646,8 +771,10 @@ static bool IsPossiblyOpaquelyQualifiedTypeInternal(const Type *T) {
case Type::TypeOf:
case Type::DependentName:
case Type::Decltype:
+ case Type::PackIndexing:
case Type::UnresolvedUsing:
case Type::TemplateTypeParm:
+ case Type::Auto:
return true;
case Type::ConstantArray:
@@ -730,6 +857,7 @@ private:
void addPack(unsigned Index) {
// Save the deduced template argument for the parameter pack expanded
// by this pack expansion, then clear out the deduction.
+ DeducedFromEarlierParameter = !Deduced[Index].isNull();
DeducedPack Pack(Index);
Pack.Saved = Deduced[Index];
Deduced[Index] = TemplateArgument();
@@ -858,6 +986,23 @@ public:
Info.PendingDeducedPacks[Pack.Index] = Pack.Outer;
}
+ // Return the size of the saved packs if all of them has the same size.
+ std::optional<unsigned> getSavedPackSizeIfAllEqual() const {
+ unsigned PackSize = Packs[0].Saved.pack_size();
+
+ if (std::all_of(Packs.begin() + 1, Packs.end(), [&PackSize](const auto &P) {
+ return P.Saved.pack_size() == PackSize;
+ }))
+ return PackSize;
+ return {};
+ }
+
+ /// Determine whether this pack has already been deduced from a previous
+ /// argument.
+ bool isDeducedFromEarlierParameter() const {
+ return DeducedFromEarlierParameter;
+ }
+
/// Determine whether this pack has already been partially expanded into a
/// sequence of (prior) function parameters / template arguments.
bool isPartiallyExpanded() { return IsPartiallyExpanded; }
@@ -899,7 +1044,7 @@ public:
/// Finish template argument deduction for a set of argument packs,
/// producing the argument packs and checking for consistency with prior
/// deductions.
- Sema::TemplateDeductionResult finish() {
+ TemplateDeductionResult finish() {
// Build argument packs for each of the parameter packs expanded by this
// pack expansion.
for (auto &Pack : Packs) {
@@ -976,7 +1121,7 @@ public:
Info.Param = makeTemplateParameter(Param);
Info.FirstArg = OldPack;
Info.SecondArg = NewPack;
- return Sema::TDK_Inconsistent;
+ return TemplateDeductionResult::Inconsistent;
}
// If we have a pre-expanded pack and we didn't deduce enough elements
@@ -985,14 +1130,14 @@ public:
if (*Expansions != PackElements) {
Info.Param = makeTemplateParameter(Param);
Info.FirstArg = Result;
- return Sema::TDK_IncompletePack;
+ return TemplateDeductionResult::IncompletePack;
}
}
*Loc = Result;
}
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
private:
@@ -1003,6 +1148,7 @@ private:
unsigned PackElements = 0;
bool IsPartiallyExpanded = false;
bool DeducePackIfNotAlreadyDeduced = false;
+ bool DeducedFromEarlierParameter = false;
/// The number of expansions, if we have a fully-expanded pack in this scope.
std::optional<unsigned> FixedNumExpansions;
@@ -1041,15 +1187,13 @@ private:
/// \returns the result of template argument deduction so far. Note that a
/// "success" result means that template argument deduction has not yet failed,
/// but it may still fail, later, for other reasons.
-static Sema::TemplateDeductionResult
-DeduceTemplateArguments(Sema &S,
- TemplateParameterList *TemplateParams,
+static TemplateDeductionResult
+DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
const QualType *Params, unsigned NumParams,
const QualType *Args, unsigned NumArgs,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- unsigned TDF,
- bool PartialOrdering = false) {
+ unsigned TDF, bool PartialOrdering = false) {
// C++0x [temp.deduct.type]p10:
// Similarly, if P has a form that contains (T), then each parameter type
// Pi of the respective parameter-type- list of P is compared with the
@@ -1065,22 +1209,22 @@ DeduceTemplateArguments(Sema &S,
// Make sure we have an argument.
if (ArgIdx >= NumArgs)
- return Sema::TDK_MiscellaneousDeductionFailure;
+ return TemplateDeductionResult::MiscellaneousDeductionFailure;
if (isa<PackExpansionType>(Args[ArgIdx])) {
// C++0x [temp.deduct.type]p22:
// If the original function parameter associated with A is a function
// parameter pack and the function parameter associated with P is not
// a function parameter pack, then template argument deduction fails.
- return Sema::TDK_MiscellaneousDeductionFailure;
+ return TemplateDeductionResult::MiscellaneousDeductionFailure;
}
- if (Sema::TemplateDeductionResult Result =
- DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams, Params[ParamIdx].getUnqualifiedType(),
- Args[ArgIdx].getUnqualifiedType(), Info, Deduced, TDF,
- PartialOrdering,
- /*DeducedFromArrayBound=*/false))
+ if (TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, Params[ParamIdx].getUnqualifiedType(),
+ Args[ArgIdx].getUnqualifiedType(), Info, Deduced, TDF,
+ PartialOrdering,
+ /*DeducedFromArrayBound=*/false);
+ Result != TemplateDeductionResult::Success)
return Result;
++ArgIdx;
@@ -1102,11 +1246,11 @@ DeduceTemplateArguments(Sema &S,
if (ParamIdx + 1 == NumParams || PackScope.hasFixedArity()) {
for (; ArgIdx < NumArgs && PackScope.hasNextElement(); ++ArgIdx) {
// Deduce template arguments from the pattern.
- if (Sema::TemplateDeductionResult Result =
- DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams, Pattern.getUnqualifiedType(),
- Args[ArgIdx].getUnqualifiedType(), Info, Deduced, TDF,
- PartialOrdering, /*DeducedFromArrayBound=*/false))
+ if (TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, Pattern.getUnqualifiedType(),
+ Args[ArgIdx].getUnqualifiedType(), Info, Deduced, TDF,
+ PartialOrdering, /*DeducedFromArrayBound=*/false);
+ Result != TemplateDeductionResult::Success)
return Result;
PackScope.nextPackElement();
@@ -1139,7 +1283,8 @@ DeduceTemplateArguments(Sema &S,
// Build argument packs for each of the parameter packs expanded by this
// pack expansion.
- if (auto Result = PackScope.finish())
+ if (auto Result = PackScope.finish();
+ Result != TemplateDeductionResult::Success)
return Result;
}
@@ -1151,13 +1296,13 @@ DeduceTemplateArguments(Sema &S,
// Ai is ignored;
if (PartialOrdering && ArgIdx + 1 == NumArgs &&
isa<PackExpansionType>(Args[ArgIdx]))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
// Make sure we don't have any extra arguments.
if (ArgIdx < NumArgs)
- return Sema::TDK_MiscellaneousDeductionFailure;
+ return TemplateDeductionResult::MiscellaneousDeductionFailure;
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
/// Determine whether the parameter has qualifiers that the argument
@@ -1191,13 +1336,6 @@ static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType,
return (ParamQs.getCVRQualifiers() & ~ArgQs.getCVRQualifiers()) != 0;
}
-/// Compare types for equality with respect to possibly compatible
-/// function types (noreturn adjustment, implicit calling conventions). If any
-/// of parameter and argument is not a function, just perform type comparison.
-///
-/// \param P the template parameter type.
-///
-/// \param A the argument type.
bool Sema::isSameOrCompatibleFunctionType(QualType P, QualType A) {
const FunctionType *PF = P->getAs<FunctionType>(),
*AF = A->getAs<FunctionType>();
@@ -1207,13 +1345,11 @@ bool Sema::isSameOrCompatibleFunctionType(QualType P, QualType A) {
return Context.hasSameType(P, A);
// Noreturn and noexcept adjustment.
- QualType AdjustedParam;
- if (IsFunctionConversion(P, A, AdjustedParam))
- return Context.hasSameType(AdjustedParam, A);
+ if (QualType AdjustedParam; IsFunctionConversion(P, A, AdjustedParam))
+ P = AdjustedParam;
// FIXME: Compatible calling conventions.
-
- return Context.hasSameType(P, A);
+ return Context.hasSameFunctionTypeIgnoringExceptionSpec(P, A);
}
/// Get the index of the first template parameter that was originally from the
@@ -1265,7 +1401,7 @@ static CXXRecordDecl *getCanonicalRD(QualType T) {
/// \returns the result of template argument deduction with the bases. "invalid"
/// means no matches, "success" found a single item, and the
/// "MiscellaneousDeductionFailure" result happens when the match is ambiguous.
-static Sema::TemplateDeductionResult
+static TemplateDeductionResult
DeduceTemplateBases(Sema &S, const CXXRecordDecl *RD,
TemplateParameterList *TemplateParams, QualType P,
TemplateDeductionInfo &Info,
@@ -1317,13 +1453,13 @@ DeduceTemplateBases(Sema &S, const CXXRecordDecl *RD,
SmallVector<DeducedTemplateArgument, 8> DeducedCopy(Deduced.begin(),
Deduced.end());
TemplateDeductionInfo BaseInfo(TemplateDeductionInfo::ForBase, Info);
- Sema::TemplateDeductionResult BaseResult = DeduceTemplateSpecArguments(
+ TemplateDeductionResult BaseResult = DeduceTemplateSpecArguments(
S, TemplateParams, P, NextT, BaseInfo, DeducedCopy);
// If this was a successful deduction, add it to the list of matches,
// otherwise we need to continue searching its bases.
const CXXRecordDecl *RD = ::getCanonicalRD(NextT);
- if (BaseResult == Sema::TDK_Success)
+ if (BaseResult == TemplateDeductionResult::Success)
Matches.insert({RD, DeducedCopy});
else
AddBases(RD);
@@ -1353,12 +1489,12 @@ DeduceTemplateBases(Sema &S, const CXXRecordDecl *RD,
}
if (Matches.empty())
- return Sema::TDK_Invalid;
+ return TemplateDeductionResult::Invalid;
if (Matches.size() > 1)
- return Sema::TDK_MiscellaneousDeductionFailure;
+ return TemplateDeductionResult::MiscellaneousDeductionFailure;
std::swap(Matches.front().second, Deduced);
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
/// Deduce the template arguments by comparing the parameter type and
@@ -1385,7 +1521,7 @@ DeduceTemplateBases(Sema &S, const CXXRecordDecl *RD,
/// \returns the result of template argument deduction so far. Note that a
/// "success" result means that template argument deduction has not yet failed,
/// but it may still fail, later, for other reasons.
-static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
+static TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
Sema &S, TemplateParameterList *TemplateParams, QualType P, QualType A,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF,
@@ -1439,7 +1575,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
PQuals.withoutObjCLifetime() == AQuals.withoutObjCLifetime())) {
Info.FirstArg = TemplateArgument(P);
Info.SecondArg = TemplateArgument(A);
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
}
Qualifiers DiscardedQuals;
@@ -1493,7 +1629,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
// Just skip any attempts to deduce from a placeholder type or a parameter
// at a different depth.
if (A->isPlaceholderType() || Info.getDeducedDepth() != TTP->getDepth())
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
unsigned Index = TTP->getIndex();
@@ -1513,13 +1649,13 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
Info.FirstArg = TemplateArgument(P);
Info.SecondArg = TemplateArgument(A);
- return Sema::TDK_Underqualified;
+ return TemplateDeductionResult::Underqualified;
}
// Do not match a function type with a cv-qualified type.
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1584
if (A->isFunctionType() && P.hasQualifiers())
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
assert(TTP->getDepth() == Info.getDeducedDepth() &&
"saw template type parameter with wrong depth");
@@ -1547,7 +1683,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
Info.FirstArg = TemplateArgument(P);
Info.SecondArg = TemplateArgument(A);
- return Sema::TDK_Underqualified;
+ return TemplateDeductionResult::Underqualified;
}
// Objective-C ARC:
@@ -1567,11 +1703,11 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
Info.FirstArg = Deduced[Index];
Info.SecondArg = NewDeduced;
- return Sema::TDK_Inconsistent;
+ return TemplateDeductionResult::Inconsistent;
}
Deduced[Index] = Result;
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
// Set up the template argument deduction information for a failure.
@@ -1583,19 +1719,19 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
// at, so we have to wait until all of the parameter packs in this
// expansion have arguments.
if (P->getAs<SubstTemplateTypeParmPackType>())
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
// Check the cv-qualifiers on the parameter and argument types.
if (!(TDF & TDF_IgnoreQualifiers)) {
if (TDF & TDF_ParamWithReferenceType) {
if (hasInconsistentOrSupersetQualifiersOf(P, A))
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
} else if (TDF & TDF_ArgWithReferenceType) {
// C++ [temp.deduct.conv]p4:
// If the original A is a reference type, A can be more cv-qualified
// than the deduced A
if (!A.getQualifiers().compatiblyIncludes(P.getQualifiers()))
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
// Strip out all extra qualifiers from the argument to figure out the
// type we're converting to, prior to the qualification conversion.
@@ -1604,22 +1740,22 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
A = S.Context.getQualifiedType(A, P.getQualifiers());
} else if (!IsPossiblyOpaquelyQualifiedType(P)) {
if (P.getCVRQualifiers() != A.getCVRQualifiers())
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
}
// If the parameter type is not dependent, there is nothing to deduce.
if (!P->isDependentType()) {
if (TDF & TDF_SkipNonDependent)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
if ((TDF & TDF_IgnoreQualifiers) ? S.Context.hasSameUnqualifiedType(P, A)
: S.Context.hasSameType(P, A))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
if (TDF & TDF_AllowCompatibleFunctionType &&
S.isSameOrCompatibleFunctionType(P, A))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
if (!(TDF & TDF_IgnoreQualifiers))
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
// Otherwise, when ignoring qualifiers, the types not having the same
// unqualified type does not mean they do not match, so in this case we
// must keep going and analyze with a non-dependent parameter type.
@@ -1643,7 +1779,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
// There's no corresponding wording for [temp.deduct.decl], but we treat
// it the same to match other compilers.
if (P->isDependentType())
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
[[fallthrough]];
case Type::Builtin:
case Type::VariableArray:
@@ -1659,14 +1795,14 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
((TDF & TDF_IgnoreQualifiers)
? S.Context.hasSameUnqualifiedType(P, A)
: S.Context.hasSameType(P, A))
- ? Sema::TDK_Success
- : Sema::TDK_NonDeducedMismatch;
+ ? TemplateDeductionResult::Success
+ : TemplateDeductionResult::NonDeducedMismatch;
// _Complex T [placeholder extension]
case Type::Complex: {
const auto *CP = P->castAs<ComplexType>(), *CA = A->getAs<ComplexType>();
if (!CA)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
return DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, CP->getElementType(), CA->getElementType(), Info,
Deduced, TDF);
@@ -1676,7 +1812,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
case Type::Atomic: {
const auto *PA = P->castAs<AtomicType>(), *AA = A->getAs<AtomicType>();
if (!AA)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
return DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, PA->getValueType(), AA->getValueType(), Info,
Deduced, TDF);
@@ -1690,7 +1826,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
} else if (const auto *PA = A->getAs<ObjCObjectPointerType>()) {
PointeeType = PA->getPointeeType();
} else {
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
return DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, P->castAs<PointerType>()->getPointeeType(),
@@ -1703,7 +1839,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
const auto *RP = P->castAs<LValueReferenceType>(),
*RA = A->getAs<LValueReferenceType>();
if (!RA)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
return DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, RP->getPointeeType(), RA->getPointeeType(), Info,
@@ -1715,7 +1851,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
const auto *RP = P->castAs<RValueReferenceType>(),
*RA = A->getAs<RValueReferenceType>();
if (!RA)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
return DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, RP->getPointeeType(), RA->getPointeeType(), Info,
@@ -1726,7 +1862,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
case Type::IncompleteArray: {
const auto *IAA = S.Context.getAsIncompleteArrayType(A);
if (!IAA)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
const auto *IAP = S.Context.getAsIncompleteArrayType(P);
assert(IAP && "Template parameter not of incomplete array type");
@@ -1742,7 +1878,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
*CAP = S.Context.getAsConstantArrayType(P);
assert(CAP);
if (!CAA || CAA->getSize() != CAP->getSize())
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
return DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, CAP->getElementType(), CAA->getElementType(), Info,
@@ -1753,21 +1889,22 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
case Type::DependentSizedArray: {
const auto *AA = S.Context.getAsArrayType(A);
if (!AA)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
// Check the element type of the arrays
const auto *DAP = S.Context.getAsDependentSizedArrayType(P);
assert(DAP);
if (auto Result = DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, DAP->getElementType(), AA->getElementType(),
- Info, Deduced, TDF & TDF_IgnoreQualifiers))
+ Info, Deduced, TDF & TDF_IgnoreQualifiers);
+ Result != TemplateDeductionResult::Success)
return Result;
// Determine the array bound is something we can deduce.
const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, DAP->getSizeExpr());
if (!NTTP)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
// We can perform template argument deduction for the given non-type
// template parameter.
@@ -1785,7 +1922,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, NTTP, DAA->getSizeExpr(), Info, Deduced);
// Incomplete type does not match a dependently-sized array type
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
// type(*)(T)
@@ -1795,30 +1932,32 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
const auto *FPP = P->castAs<FunctionProtoType>(),
*FPA = A->getAs<FunctionProtoType>();
if (!FPA)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
if (FPP->getMethodQuals() != FPA->getMethodQuals() ||
FPP->getRefQualifier() != FPA->getRefQualifier() ||
FPP->isVariadic() != FPA->isVariadic())
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
// Check return types.
if (auto Result = DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, FPP->getReturnType(), FPA->getReturnType(),
Info, Deduced, 0,
/*PartialOrdering=*/false,
- /*DeducedFromArrayBound=*/false))
+ /*DeducedFromArrayBound=*/false);
+ Result != TemplateDeductionResult::Success)
return Result;
// Check parameter types.
if (auto Result = DeduceTemplateArguments(
S, TemplateParams, FPP->param_type_begin(), FPP->getNumParams(),
FPA->param_type_begin(), FPA->getNumParams(), Info, Deduced,
- TDF & TDF_TopLevelParameterTypeList, PartialOrdering))
+ TDF & TDF_TopLevelParameterTypeList, PartialOrdering);
+ Result != TemplateDeductionResult::Success)
return Result;
if (TDF & TDF_AllowCompatibleFunctionType)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
// FIXME: Per core-2016/10/1019 (no corresponding core issue yet), permit
// deducing through the noexcept-specifier if it's part of the canonical
@@ -1856,7 +1995,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
// Careful about [temp.deduct.call] and [temp.deduct.conv], which allow
// top-level differences in noexcept-specifications.
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
case Type::InjectedClassName:
@@ -1880,7 +2019,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
auto Result =
DeduceTemplateSpecArguments(S, TemplateParams, P, A, Info, Deduced);
- if (Result == Sema::TDK_Success)
+ if (Result == TemplateDeductionResult::Success)
return Result;
// We cannot inspect base classes as part of deduction when the type
@@ -1889,13 +2028,17 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
if (!S.isCompleteType(Info.getLocation(), A))
return Result;
+ if (getCanonicalRD(A)->isInvalidDecl())
+ return Result;
+
// Reset the incorrectly deduced argument from above.
Deduced = DeducedOrig;
// Check bases according to C++14 [temp.deduct.call] p4b3:
auto BaseResult = DeduceTemplateBases(S, getCanonicalRD(A),
TemplateParams, P, Info, Deduced);
- return BaseResult != Sema::TDK_Invalid ? BaseResult : Result;
+ return BaseResult != TemplateDeductionResult::Invalid ? BaseResult
+ : Result;
}
// T type::*
@@ -1911,7 +2054,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
const auto *MPP = P->castAs<MemberPointerType>(),
*MPA = A->getAs<MemberPointerType>();
if (!MPA)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
QualType PPT = MPP->getPointeeType();
if (PPT->isFunctionType())
@@ -1924,7 +2067,8 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
if (auto Result = DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams, PPT, APT, Info, Deduced, SubTDF))
+ S, TemplateParams, PPT, APT, Info, Deduced, SubTDF);
+ Result != TemplateDeductionResult::Success)
return Result;
return DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, QualType(MPP->getClass(), 0),
@@ -1940,7 +2084,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
const auto *BPP = P->castAs<BlockPointerType>(),
*BPA = A->getAs<BlockPointerType>();
if (!BPA)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
return DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, BPP->getPointeeType(), BPA->getPointeeType(), Info,
Deduced, 0);
@@ -1955,7 +2099,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
if (const auto *VA = A->getAs<ExtVectorType>()) {
// Make sure that the vectors have the same number of elements.
if (VP->getNumElements() != VA->getNumElements())
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
ElementType = VA->getElementType();
} else if (const auto *VA = A->getAs<DependentSizedExtVectorType>()) {
// We can't check the number of elements, since the argument has a
@@ -1963,7 +2107,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
// ordering.
ElementType = VA->getElementType();
} else {
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
// Perform deduction on the element types.
return DeduceTemplateArgumentsByTypeMatch(
@@ -1978,14 +2122,15 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
// Perform deduction on the element types.
if (auto Result = DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, VP->getElementType(), VA->getElementType(),
- Info, Deduced, TDF))
+ Info, Deduced, TDF);
+ Result != TemplateDeductionResult::Success)
return Result;
// Perform deduction on the vector size, if we can.
const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, VP->getSizeExpr());
if (!NTTP)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
ArgSize = VA->getNumElements();
@@ -2001,20 +2146,21 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
// Perform deduction on the element types.
if (auto Result = DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, VP->getElementType(), VA->getElementType(),
- Info, Deduced, TDF))
+ Info, Deduced, TDF);
+ Result != TemplateDeductionResult::Success)
return Result;
// Perform deduction on the vector size, if we can.
const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, VP->getSizeExpr());
if (!NTTP)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
VA->getSizeExpr(), Info, Deduced);
}
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
// (clang extension)
@@ -2027,14 +2173,15 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
// Perform deduction on the element types.
if (auto Result = DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, VP->getElementType(), VA->getElementType(),
- Info, Deduced, TDF))
+ Info, Deduced, TDF);
+ Result != TemplateDeductionResult::Success)
return Result;
// Perform deduction on the vector size, if we can.
const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, VP->getSizeExpr());
if (!NTTP)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
ArgSize = VA->getNumElements();
@@ -2050,20 +2197,21 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
// Perform deduction on the element types.
if (auto Result = DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, VP->getElementType(), VA->getElementType(),
- Info, Deduced, TDF))
+ Info, Deduced, TDF);
+ Result != TemplateDeductionResult::Success)
return Result;
// Perform deduction on the vector size, if we can.
const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, VP->getSizeExpr());
if (!NTTP)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
VA->getSizeExpr(), Info, Deduced);
}
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
// (clang extension)
@@ -2074,12 +2222,12 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
const auto *MP = P->castAs<ConstantMatrixType>(),
*MA = A->getAs<ConstantMatrixType>();
if (!MA)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
// Check that the dimensions are the same
if (MP->getNumRows() != MA->getNumRows() ||
MP->getNumColumns() != MA->getNumColumns()) {
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
// Perform deduction on element types.
return DeduceTemplateArgumentsByTypeMatch(
@@ -2091,12 +2239,13 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
const auto *MP = P->castAs<DependentSizedMatrixType>();
const auto *MA = A->getAs<MatrixType>();
if (!MA)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
// Check the element type of the matrixes.
if (auto Result = DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, MP->getElementType(), MA->getElementType(),
- Info, Deduced, TDF))
+ Info, Deduced, TDF);
+ Result != TemplateDeductionResult::Success)
return Result;
// Try to deduce a matrix dimension.
@@ -2111,26 +2260,26 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
std::optional<llvm::APSInt> ParamConst =
ParamExpr->getIntegerConstantExpr(S.Context);
if (!ParamConst)
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
if (ACM) {
if ((ACM->*GetArgDimension)() == *ParamConst)
- return Sema::TDK_Success;
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::Success;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
Expr *ArgExpr = (ADM->*GetArgDimensionExpr)();
if (std::optional<llvm::APSInt> ArgConst =
ArgExpr->getIntegerConstantExpr(S.Context))
if (*ArgConst == *ParamConst)
- return Sema::TDK_Success;
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::Success;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, ParamExpr);
if (!NTTP)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
if (ACM) {
llvm::APSInt ArgConst(
@@ -2148,7 +2297,8 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
if (auto Result = DeduceMatrixArg(MP->getRowExpr(), MA,
&ConstantMatrixType::getNumRows,
- &DependentSizedMatrixType::getRowExpr))
+ &DependentSizedMatrixType::getRowExpr);
+ Result != TemplateDeductionResult::Success)
return Result;
return DeduceMatrixArg(MP->getColumnExpr(), MA,
@@ -2166,14 +2316,15 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
// Perform deduction on the pointer type.
if (auto Result = DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, ASP->getPointeeType(), ASA->getPointeeType(),
- Info, Deduced, TDF))
+ Info, Deduced, TDF);
+ Result != TemplateDeductionResult::Success)
return Result;
// Perform deduction on the address space, if we can.
const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, ASP->getAddrSpaceExpr());
if (!NTTP)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
return DeduceNonTypeTemplateArgument(
S, TemplateParams, NTTP, ASA->getAddrSpaceExpr(), Info, Deduced);
@@ -2187,33 +2338,34 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
// Perform deduction on the pointer types.
if (auto Result = DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, ASP->getPointeeType(),
- S.Context.removeAddrSpaceQualType(A), Info, Deduced, TDF))
+ S.Context.removeAddrSpaceQualType(A), Info, Deduced, TDF);
+ Result != TemplateDeductionResult::Success)
return Result;
// Perform deduction on the address space, if we can.
const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, ASP->getAddrSpaceExpr());
if (!NTTP)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
ArgAddressSpace, S.Context.IntTy,
true, Info, Deduced);
}
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
case Type::DependentBitInt: {
const auto *IP = P->castAs<DependentBitIntType>();
if (const auto *IA = A->getAs<BitIntType>()) {
if (IP->isUnsigned() != IA->isUnsigned())
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
const NonTypeTemplateParmDecl *NTTP =
getDeducedParameterFromExpr(Info, IP->getNumBitsExpr());
if (!NTTP)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
ArgSize = IA->getNumBits();
@@ -2225,11 +2377,11 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
if (const auto *IA = A->getAs<DependentBitIntType>()) {
if (IP->isUnsigned() != IA->isUnsigned())
- return Sema::TDK_NonDeducedMismatch;
- return Sema::TDK_Success;
+ return TemplateDeductionResult::NonDeducedMismatch;
+ return TemplateDeductionResult::Success;
}
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
case Type::TypeOfExpr:
@@ -2242,14 +2394,24 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
case Type::DependentTemplateSpecialization:
case Type::PackExpansion:
case Type::Pipe:
+ case Type::ArrayParameter:
// No template argument deduction for these types
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
+
+ case Type::PackIndexing: {
+ const PackIndexingType *PIT = P->getAs<PackIndexingType>();
+ if (PIT->hasSelectedType()) {
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, PIT->getSelectedType(), A, Info, Deduced, TDF);
+ }
+ return TemplateDeductionResult::IncompletePack;
+ }
}
llvm_unreachable("Invalid Type Class!");
}
-static Sema::TemplateDeductionResult
+static TemplateDeductionResult
DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
const TemplateArgument &P, TemplateArgument A,
TemplateDeductionInfo &Info,
@@ -2270,15 +2432,16 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
S, TemplateParams, P.getAsType(), A.getAsType(), Info, Deduced, 0);
Info.FirstArg = P;
Info.SecondArg = A;
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
case TemplateArgument::Template:
if (A.getKind() == TemplateArgument::Template)
return DeduceTemplateArguments(S, TemplateParams, P.getAsTemplate(),
- A.getAsTemplate(), Info, Deduced);
+ A.getAsTemplate(), Info,
+ /*DefaultArguments=*/{}, Deduced);
Info.FirstArg = P;
Info.SecondArg = A;
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
case TemplateArgument::TemplateExpansion:
llvm_unreachable("caller should handle pack expansions");
@@ -2286,38 +2449,38 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
case TemplateArgument::Declaration:
if (A.getKind() == TemplateArgument::Declaration &&
isSameDeclaration(P.getAsDecl(), A.getAsDecl()))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
Info.FirstArg = P;
Info.SecondArg = A;
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
case TemplateArgument::NullPtr:
if (A.getKind() == TemplateArgument::NullPtr &&
S.Context.hasSameType(P.getNullPtrType(), A.getNullPtrType()))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
Info.FirstArg = P;
Info.SecondArg = A;
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
case TemplateArgument::Integral:
if (A.getKind() == TemplateArgument::Integral) {
if (hasSameExtendedValue(P.getAsIntegral(), A.getAsIntegral()))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
Info.FirstArg = P;
Info.SecondArg = A;
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
case TemplateArgument::StructuralValue:
if (A.getKind() == TemplateArgument::StructuralValue &&
A.structurallyEquals(P))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
Info.FirstArg = P;
Info.SecondArg = A;
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
case TemplateArgument::Expression:
if (const NonTypeTemplateParmDecl *NTTP =
@@ -2346,13 +2509,13 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
case TemplateArgument::Pack:
Info.FirstArg = P;
Info.SecondArg = A;
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
llvm_unreachable("Unknown template argument kind");
}
// Can't deduce anything, but that's okay.
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
case TemplateArgument::Pack:
llvm_unreachable("Argument packs should be expanded by the caller!");
}
@@ -2403,19 +2566,21 @@ static bool hasPackExpansionBeforeEnd(ArrayRef<TemplateArgument> Args) {
return false;
}
-static Sema::TemplateDeductionResult
+static TemplateDeductionResult
DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
ArrayRef<TemplateArgument> Ps,
ArrayRef<TemplateArgument> As,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- bool NumberOfArgumentsMustMatch) {
+ bool NumberOfArgumentsMustMatch, PackFold PackFold) {
+ if (PackFold == PackFold::ArgumentToParameter)
+ std::swap(Ps, As);
// C++0x [temp.deduct.type]p9:
// If the template argument list of P contains a pack expansion that is not
// the last template argument, the entire template argument list is a
// non-deduced context.
if (hasPackExpansionBeforeEnd(Ps))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
// C++0x [temp.deduct.type]p9:
// If P has a form that contains <T> or <i>, then each argument Pi of the
@@ -2430,18 +2595,22 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// Check whether we have enough arguments.
if (!hasTemplateArgumentForDeduction(As, ArgIdx))
return NumberOfArgumentsMustMatch
- ? Sema::TDK_MiscellaneousDeductionFailure
- : Sema::TDK_Success;
+ ? TemplateDeductionResult::MiscellaneousDeductionFailure
+ : TemplateDeductionResult::Success;
// C++1z [temp.deduct.type]p9:
// During partial ordering, if Ai was originally a pack expansion [and]
// Pi is not a pack expansion, template argument deduction fails.
if (As[ArgIdx].isPackExpansion())
- return Sema::TDK_MiscellaneousDeductionFailure;
+ return TemplateDeductionResult::MiscellaneousDeductionFailure;
// Perform deduction for this Pi/Ai pair.
- if (auto Result = DeduceTemplateArguments(S, TemplateParams, P,
- As[ArgIdx], Info, Deduced))
+ TemplateArgument Pi = P, Ai = As[ArgIdx];
+ if (PackFold == PackFold::ArgumentToParameter)
+ std::swap(Pi, Ai);
+ if (auto Result =
+ DeduceTemplateArguments(S, TemplateParams, Pi, Ai, Info, Deduced);
+ Result != TemplateDeductionResult::Success)
return Result;
// Move to the next argument.
@@ -2467,9 +2636,13 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
for (; hasTemplateArgumentForDeduction(As, ArgIdx) &&
PackScope.hasNextElement();
++ArgIdx) {
+ TemplateArgument Pi = Pattern, Ai = As[ArgIdx];
+ if (PackFold == PackFold::ArgumentToParameter)
+ std::swap(Pi, Ai);
// Deduce template arguments from the pattern.
- if (auto Result = DeduceTemplateArguments(S, TemplateParams, Pattern,
- As[ArgIdx], Info, Deduced))
+ if (auto Result =
+ DeduceTemplateArguments(S, TemplateParams, Pi, Ai, Info, Deduced);
+ Result != TemplateDeductionResult::Success)
return Result;
PackScope.nextPackElement();
@@ -2477,22 +2650,21 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// Build argument packs for each of the parameter packs expanded by this
// pack expansion.
- if (auto Result = PackScope.finish())
+ if (auto Result = PackScope.finish();
+ Result != TemplateDeductionResult::Success)
return Result;
}
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
-static Sema::TemplateDeductionResult
-DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
- const TemplateArgumentList &ParamList,
- const TemplateArgumentList &ArgList,
- TemplateDeductionInfo &Info,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
- return DeduceTemplateArguments(S, TemplateParams, ParamList.asArray(),
- ArgList.asArray(), Info, Deduced,
- /*NumberOfArgumentsMustMatch=*/false);
+TemplateDeductionResult Sema::DeduceTemplateArguments(
+ TemplateParameterList *TemplateParams, ArrayRef<TemplateArgument> Ps,
+ ArrayRef<TemplateArgument> As, sema::TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ bool NumberOfArgumentsMustMatch) {
+ return ::DeduceTemplateArguments(*this, TemplateParams, Ps, As, Info, Deduced,
+ NumberOfArgumentsMustMatch);
}
/// Determine whether two template arguments are the same.
@@ -2575,22 +2747,10 @@ static bool isSameTemplateArg(ASTContext &Context,
llvm_unreachable("Invalid TemplateArgument Kind!");
}
-/// Allocate a TemplateArgumentLoc where all locations have
-/// been initialized to the given location.
-///
-/// \param Arg The template argument we are producing template argument
-/// location information for.
-///
-/// \param NTTPType For a declaration template argument, the type of
-/// the non-type template parameter that corresponds to this template
-/// argument. Can be null if no type sugar is available to add to the
-/// type from the template argument.
-///
-/// \param Loc The source location to use for the resulting template
-/// argument.
TemplateArgumentLoc
Sema::getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
- QualType NTTPType, SourceLocation Loc) {
+ QualType NTTPType, SourceLocation Loc,
+ NamedDecl *TemplateParam) {
switch (Arg.getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Can't get a NULL template argument here");
@@ -2602,7 +2762,8 @@ Sema::getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
case TemplateArgument::Declaration: {
if (NTTPType.isNull())
NTTPType = Arg.getParamTypeForDecl();
- Expr *E = BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
+ Expr *E = BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc,
+ TemplateParam)
.getAs<Expr>();
return TemplateArgumentLoc(TemplateArgument(E), E);
}
@@ -2669,8 +2830,8 @@ static bool ConvertDeducedTemplateArgument(
// Convert the deduced template argument into a template
// argument that we can check, almost as if the user had written
// the template argument explicitly.
- TemplateArgumentLoc ArgLoc =
- S.getTrivialTemplateArgumentLoc(Arg, QualType(), Info.getLocation());
+ TemplateArgumentLoc ArgLoc = S.getTrivialTemplateArgumentLoc(
+ Arg, QualType(), Info.getLocation(), Param);
// Check the template argument, converting it as necessary.
return S.CheckTemplateArgument(
@@ -2754,7 +2915,7 @@ static bool ConvertDeducedTemplateArgument(
// ClassTemplatePartialSpecializationDecl sadly does not derive from
// TemplateDecl.
template <typename TemplateDeclT>
-static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
+static TemplateDeductionResult ConvertDeducedTemplateArguments(
Sema &S, TemplateDeclT *Template, bool IsDeduced,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
TemplateDeductionInfo &Info,
@@ -2773,7 +2934,8 @@ static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
// FIXME: Where did the word "trailing" come from?
if (Deduced[I].isNull() && Param->isTemplateParameterPack()) {
if (auto Result =
- PackDeductionScope(S, TemplateParams, Deduced, Info, I).finish())
+ PackDeductionScope(S, TemplateParams, Deduced, Info, I).finish();
+ Result != TemplateDeductionResult::Success)
return Result;
}
@@ -2810,7 +2972,7 @@ static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
Info.reset(
TemplateArgumentList::CreateCopy(S.Context, SugaredBuilder),
TemplateArgumentList::CreateCopy(S.Context, CanonicalBuilder));
- return Sema::TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
}
continue;
@@ -2822,7 +2984,7 @@ static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
if (!TD) {
assert(isa<ClassTemplatePartialSpecializationDecl>(Template) ||
isa<VarTemplatePartialSpecializationDecl>(Template));
- return Sema::TDK_Incomplete;
+ return TemplateDeductionResult::Incomplete;
}
TemplateArgumentLoc DefArg;
@@ -2852,8 +3014,8 @@ static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
TemplateArgumentList::CreateCopy(S.Context, CanonicalBuilder));
if (PartialOverloading) break;
- return HasDefaultArg ? Sema::TDK_SubstitutionFailure
- : Sema::TDK_Incomplete;
+ return HasDefaultArg ? TemplateDeductionResult::SubstitutionFailure
+ : TemplateDeductionResult::Incomplete;
}
// Check whether we can actually use the default argument.
@@ -2865,13 +3027,13 @@ static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
// FIXME: These template arguments are temporary. Free them!
Info.reset(TemplateArgumentList::CreateCopy(S.Context, SugaredBuilder),
TemplateArgumentList::CreateCopy(S.Context, CanonicalBuilder));
- return Sema::TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
}
// If we get here, we successfully used the default template argument.
}
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
static DeclContext *getAsDeclContextOrEnclosing(Decl *D) {
@@ -2907,7 +3069,7 @@ bool DeducedArgsNeedReplacement<ClassTemplatePartialSpecializationDecl>(
}
template <typename TemplateDeclT>
-static Sema::TemplateDeductionResult
+static TemplateDeductionResult
CheckDeducedArgumentConstraints(Sema &S, TemplateDeclT *Template,
ArrayRef<TemplateArgument> SugaredDeducedArgs,
ArrayRef<TemplateArgument> CanonicalDeducedArgs,
@@ -2915,13 +3077,14 @@ CheckDeducedArgumentConstraints(Sema &S, TemplateDeclT *Template,
llvm::SmallVector<const Expr *, 3> AssociatedConstraints;
Template->getAssociatedConstraints(AssociatedConstraints);
- bool NeedsReplacement = DeducedArgsNeedReplacement(Template);
- TemplateArgumentList DeducedTAL{TemplateArgumentList::OnStack,
- CanonicalDeducedArgs};
+ std::optional<ArrayRef<TemplateArgument>> Innermost;
+ // If we don't need to replace the deduced template arguments,
+ // we can add them immediately as the inner-most argument list.
+ if (!DeducedArgsNeedReplacement(Template))
+ Innermost = CanonicalDeducedArgs;
MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
- Template, Template->getDeclContext(), /*Final=*/false,
- /*InnerMost=*/NeedsReplacement ? nullptr : &DeducedTAL,
+ Template, Template->getDeclContext(), /*Final=*/false, Innermost,
/*RelativeToPrimary=*/true, /*Pattern=*/
nullptr, /*ForConstraintInstantiation=*/true);
@@ -2929,7 +3092,7 @@ CheckDeducedArgumentConstraints(Sema &S, TemplateDeclT *Template,
// template args when this is a variable template partial specialization and
// not class-scope explicit specialization, so replace with Deduced Args
// instead of adding to inner-most.
- if (NeedsReplacement)
+ if (!Innermost)
MLTAL.replaceInnermostTemplateArguments(Template, CanonicalDeducedArgs);
if (S.CheckConstraintSatisfaction(Template, AssociatedConstraints, MLTAL,
@@ -2939,18 +3102,18 @@ CheckDeducedArgumentConstraints(Sema &S, TemplateDeclT *Template,
Info.reset(
TemplateArgumentList::CreateCopy(S.Context, SugaredDeducedArgs),
TemplateArgumentList::CreateCopy(S.Context, CanonicalDeducedArgs));
- return Sema::TDK_ConstraintsNotSatisfied;
+ return TemplateDeductionResult::ConstraintsNotSatisfied;
}
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
/// Complete template argument deduction for a partial specialization.
template <typename T>
static std::enable_if_t<IsPartialSpecialization<T>::value,
- Sema::TemplateDeductionResult>
+ TemplateDeductionResult>
FinishTemplateArgumentDeduction(
Sema &S, T *Partial, bool IsPartialOrdering,
- const TemplateArgumentList &TemplateArgs,
+ ArrayRef<TemplateArgument> TemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
TemplateDeductionInfo &Info) {
// Unevaluated SFINAE context.
@@ -2966,7 +3129,8 @@ FinishTemplateArgumentDeduction(
SmallVector<TemplateArgument, 4> SugaredBuilder, CanonicalBuilder;
if (auto Result = ConvertDeducedTemplateArguments(
S, Partial, IsPartialOrdering, Deduced, Info, SugaredBuilder,
- CanonicalBuilder))
+ CanonicalBuilder);
+ Result != TemplateDeductionResult::Success)
return Result;
// Form the template argument list from the deduced template arguments.
@@ -3003,7 +3167,7 @@ FinishTemplateArgumentDeduction(
Partial->getTemplateParameters()->getParam(ParamIdx));
Info.Param = makeTemplateParameter(Param);
Info.FirstArg = (*PartialTemplArgInfo)[ArgIdx].getArgument();
- return Sema::TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
}
bool ConstraintsNotSatisfied;
@@ -3013,8 +3177,9 @@ FinishTemplateArgumentDeduction(
Template, Partial->getLocation(), InstArgs, false,
SugaredConvertedInstArgs, CanonicalConvertedInstArgs,
/*UpdateArgsWithConversions=*/true, &ConstraintsNotSatisfied))
- return ConstraintsNotSatisfied ? Sema::TDK_ConstraintsNotSatisfied
- : Sema::TDK_SubstitutionFailure;
+ return ConstraintsNotSatisfied
+ ? TemplateDeductionResult::ConstraintsNotSatisfied
+ : TemplateDeductionResult::SubstitutionFailure;
TemplateParameterList *TemplateParams = Template->getTemplateParameters();
for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) {
@@ -3024,26 +3189,27 @@ FinishTemplateArgumentDeduction(
Info.Param = makeTemplateParameter(TemplateParams->getParam(I));
Info.FirstArg = TemplateArgs[I];
Info.SecondArg = InstArg;
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
}
if (Trap.hasErrorOccurred())
- return Sema::TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
if (auto Result = CheckDeducedArgumentConstraints(S, Partial, SugaredBuilder,
- CanonicalBuilder, Info))
+ CanonicalBuilder, Info);
+ Result != TemplateDeductionResult::Success)
return Result;
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
/// Complete template argument deduction for a class or variable template,
/// when partial ordering against a partial specialization.
// FIXME: Factor out duplication with partial specialization version above.
-static Sema::TemplateDeductionResult FinishTemplateArgumentDeduction(
+static TemplateDeductionResult FinishTemplateArgumentDeduction(
Sema &S, TemplateDecl *Template, bool PartialOrdering,
- const TemplateArgumentList &TemplateArgs,
+ ArrayRef<TemplateArgument> TemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
TemplateDeductionInfo &Info) {
// Unevaluated SFINAE context.
@@ -3061,7 +3227,8 @@ static Sema::TemplateDeductionResult FinishTemplateArgumentDeduction(
S, Template, /*IsDeduced*/ PartialOrdering, Deduced, Info,
SugaredBuilder, CanonicalBuilder,
/*CurrentInstantiationScope=*/nullptr,
- /*NumAlreadyConverted=*/0U, /*PartialOverloading=*/false))
+ /*NumAlreadyConverted=*/0U, /*PartialOverloading=*/false);
+ Result != TemplateDeductionResult::Success)
return Result;
// Check that we produced the correct argument list.
@@ -3073,29 +3240,66 @@ static Sema::TemplateDeductionResult FinishTemplateArgumentDeduction(
Info.Param = makeTemplateParameter(TemplateParams->getParam(I));
Info.FirstArg = TemplateArgs[I];
Info.SecondArg = InstArg;
- return Sema::TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
}
if (Trap.hasErrorOccurred())
- return Sema::TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
if (auto Result = CheckDeducedArgumentConstraints(S, Template, SugaredBuilder,
- CanonicalBuilder, Info))
+ CanonicalBuilder, Info);
+ Result != TemplateDeductionResult::Success)
return Result;
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
+/// Complete template argument deduction for DeduceTemplateArgumentsFromType.
+/// FIXME: this is mostly duplicated with the above two versions. Deduplicate
+/// the three implementations.
+static TemplateDeductionResult FinishTemplateArgumentDeduction(
+ Sema &S, TemplateDecl *TD,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ TemplateDeductionInfo &Info) {
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(
+ S, Sema::ExpressionEvaluationContext::Unevaluated);
+ Sema::SFINAETrap Trap(S);
-/// Perform template argument deduction to determine whether
-/// the given template arguments match the given class template
-/// partial specialization per C++ [temp.class.spec.match].
-Sema::TemplateDeductionResult
-Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
- const TemplateArgumentList &TemplateArgs,
- TemplateDeductionInfo &Info) {
+ Sema::ContextRAII SavedContext(S, getAsDeclContextOrEnclosing(TD));
+
+ // C++ [temp.deduct.type]p2:
+ // [...] or if any template argument remains neither deduced nor
+ // explicitly specified, template argument deduction fails.
+ SmallVector<TemplateArgument, 4> SugaredBuilder, CanonicalBuilder;
+ if (auto Result = ConvertDeducedTemplateArguments(
+ S, TD, /*IsPartialOrdering=*/false, Deduced, Info, SugaredBuilder,
+ CanonicalBuilder);
+ Result != TemplateDeductionResult::Success)
+ return Result;
+
+ if (Trap.hasErrorOccurred())
+ return TemplateDeductionResult::SubstitutionFailure;
+
+ if (auto Result = CheckDeducedArgumentConstraints(S, TD, SugaredBuilder,
+ CanonicalBuilder, Info);
+ Result != TemplateDeductionResult::Success)
+ return Result;
+
+ return TemplateDeductionResult::Success;
+}
+
+/// Perform template argument deduction to determine whether the given template
+/// arguments match the given class or variable template partial specialization
+/// per C++ [temp.class.spec.match].
+template <typename T>
+static std::enable_if_t<IsPartialSpecialization<T>::value,
+ TemplateDeductionResult>
+DeduceTemplateArguments(Sema &S, T *Partial,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ TemplateDeductionInfo &Info) {
if (Partial->isInvalidDecl())
- return TDK_Invalid;
+ return TemplateDeductionResult::Invalid;
// C++ [temp.class.spec.match]p2:
// A partial specialization matches a given actual template
@@ -3105,55 +3309,70 @@ Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
// Unevaluated SFINAE context.
EnterExpressionEvaluationContext Unevaluated(
- *this, Sema::ExpressionEvaluationContext::Unevaluated);
- SFINAETrap Trap(*this);
+ S, Sema::ExpressionEvaluationContext::Unevaluated);
+ Sema::SFINAETrap Trap(S);
// This deduction has no relation to any outer instantiation we might be
// performing.
- LocalInstantiationScope InstantiationScope(*this);
+ LocalInstantiationScope InstantiationScope(S);
SmallVector<DeducedTemplateArgument, 4> Deduced;
Deduced.resize(Partial->getTemplateParameters()->size());
- if (TemplateDeductionResult Result
- = ::DeduceTemplateArguments(*this,
- Partial->getTemplateParameters(),
- Partial->getTemplateArgs(),
- TemplateArgs, Info, Deduced))
+ if (TemplateDeductionResult Result = ::DeduceTemplateArguments(
+ S, Partial->getTemplateParameters(),
+ Partial->getTemplateArgs().asArray(), TemplateArgs, Info, Deduced,
+ /*NumberOfArgumentsMustMatch=*/false);
+ Result != TemplateDeductionResult::Success)
return Result;
SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
- InstantiatingTemplate Inst(*this, Info.getLocation(), Partial, DeducedArgs,
- Info);
+ Sema::InstantiatingTemplate Inst(S, Info.getLocation(), Partial, DeducedArgs,
+ Info);
if (Inst.isInvalid())
- return TDK_InstantiationDepth;
+ return TemplateDeductionResult::InstantiationDepth;
if (Trap.hasErrorOccurred())
- return Sema::TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
TemplateDeductionResult Result;
- runWithSufficientStackSpace(Info.getLocation(), [&] {
- Result = ::FinishTemplateArgumentDeduction(*this, Partial,
+ S.runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = ::FinishTemplateArgumentDeduction(S, Partial,
/*IsPartialOrdering=*/false,
TemplateArgs, Deduced, Info);
});
return Result;
}
-/// Perform template argument deduction to determine whether
-/// the given template arguments match the given variable template
-/// partial specialization per C++ [temp.class.spec.match].
-Sema::TemplateDeductionResult
+TemplateDeductionResult
+Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ TemplateDeductionInfo &Info) {
+ return ::DeduceTemplateArguments(*this, Partial, TemplateArgs, Info);
+}
+TemplateDeductionResult
Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
- const TemplateArgumentList &TemplateArgs,
+ ArrayRef<TemplateArgument> TemplateArgs,
TemplateDeductionInfo &Info) {
- if (Partial->isInvalidDecl())
- return TDK_Invalid;
+ return ::DeduceTemplateArguments(*this, Partial, TemplateArgs, Info);
+}
- // C++ [temp.class.spec.match]p2:
- // A partial specialization matches a given actual template
- // argument list if the template arguments of the partial
- // specialization can be deduced from the actual template argument
- // list (14.8.2).
+TemplateDeductionResult
+Sema::DeduceTemplateArgumentsFromType(TemplateDecl *TD, QualType FromType,
+ sema::TemplateDeductionInfo &Info) {
+ if (TD->isInvalidDecl())
+ return TemplateDeductionResult::Invalid;
+
+ QualType PType;
+ if (const auto *CTD = dyn_cast<ClassTemplateDecl>(TD)) {
+ // Use the InjectedClassNameType.
+ PType = Context.getTypeDeclType(CTD->getTemplatedDecl());
+ } else if (const auto *AliasTemplate = dyn_cast<TypeAliasTemplateDecl>(TD)) {
+ PType = AliasTemplate->getTemplatedDecl()
+ ->getUnderlyingType()
+ .getCanonicalType();
+ } else {
+ assert(false && "Expected a class or alias template");
+ }
// Unevaluated SFINAE context.
EnterExpressionEvaluationContext Unevaluated(
@@ -3164,27 +3383,27 @@ Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
// performing.
LocalInstantiationScope InstantiationScope(*this);
- SmallVector<DeducedTemplateArgument, 4> Deduced;
- Deduced.resize(Partial->getTemplateParameters()->size());
- if (TemplateDeductionResult Result = ::DeduceTemplateArguments(
- *this, Partial->getTemplateParameters(), Partial->getTemplateArgs(),
- TemplateArgs, Info, Deduced))
- return Result;
+ SmallVector<DeducedTemplateArgument> Deduced(
+ TD->getTemplateParameters()->size());
+ SmallVector<TemplateArgument> PArgs = {TemplateArgument(PType)};
+ SmallVector<TemplateArgument> AArgs = {TemplateArgument(FromType)};
+ if (auto DeducedResult = DeduceTemplateArguments(
+ TD->getTemplateParameters(), PArgs, AArgs, Info, Deduced, false);
+ DeducedResult != TemplateDeductionResult::Success) {
+ return DeducedResult;
+ }
SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
- InstantiatingTemplate Inst(*this, Info.getLocation(), Partial, DeducedArgs,
- Info);
+ InstantiatingTemplate Inst(*this, Info.getLocation(), TD, DeducedArgs, Info);
if (Inst.isInvalid())
- return TDK_InstantiationDepth;
+ return TemplateDeductionResult::InstantiationDepth;
if (Trap.hasErrorOccurred())
- return Sema::TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
TemplateDeductionResult Result;
runWithSufficientStackSpace(Info.getLocation(), [&] {
- Result = ::FinishTemplateArgumentDeduction(*this, Partial,
- /*IsPartialOrdering=*/false,
- TemplateArgs, Deduced, Info);
+ Result = ::FinishTemplateArgumentDeduction(*this, TD, Deduced, Info);
});
return Result;
}
@@ -3209,31 +3428,7 @@ static bool isSimpleTemplateIdType(QualType T) {
return false;
}
-/// Substitute the explicitly-provided template arguments into the
-/// given function template according to C++ [temp.arg.explicit].
-///
-/// \param FunctionTemplate the function template into which the explicit
-/// template arguments will be substituted.
-///
-/// \param ExplicitTemplateArgs the explicitly-specified template
-/// arguments.
-///
-/// \param Deduced the deduced template arguments, which will be populated
-/// with the converted and checked explicit template arguments.
-///
-/// \param ParamTypes will be populated with the instantiated function
-/// parameters.
-///
-/// \param FunctionType if non-NULL, the result type of the function template
-/// will also be instantiated and the pointed-to value will be updated with
-/// the instantiated function type.
-///
-/// \param Info if substitution fails for any reason, this object will be
-/// populated with more information about the failure.
-///
-/// \returns TDK_Success if substitution was successful, or some failure
-/// condition.
-Sema::TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments(
+TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
@@ -3251,7 +3446,7 @@ Sema::TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments(
if (FunctionType)
*FunctionType = Function->getType();
- return TDK_Success;
+ return TemplateDeductionResult::Success;
}
// Unevaluated SFINAE context.
@@ -3274,7 +3469,7 @@ Sema::TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments(
*this, Info.getLocation(), FunctionTemplate, DeducedArgs,
CodeSynthesisContext::ExplicitTemplateArgumentSubstitution, Info);
if (Inst.isInvalid())
- return TDK_InstantiationDepth;
+ return TemplateDeductionResult::InstantiationDepth;
if (CheckTemplateArgumentList(FunctionTemplate, SourceLocation(),
ExplicitTemplateArgs, true, SugaredBuilder,
@@ -3283,9 +3478,9 @@ Sema::TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments(
Trap.hasErrorOccurred()) {
unsigned Index = SugaredBuilder.size();
if (Index >= TemplateParams->size())
- return TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
Info.Param = makeTemplateParameter(TemplateParams->getParam(Index));
- return TDK_InvalidExplicitArguments;
+ return TemplateDeductionResult::InvalidExplicitArguments;
}
// Form the template argument list from the explicitly-specified
@@ -3344,7 +3539,7 @@ Sema::TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments(
if (SubstParmTypes(Function->getLocation(), Function->parameters(),
Proto->getExtParameterInfosOrNull(), MLTAL, ParamTypes,
/*params=*/nullptr, ExtParamInfos))
- return TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
}
// Instantiate the return type.
@@ -3370,13 +3565,13 @@ Sema::TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments(
SubstType(Proto->getReturnType(), MLTAL,
Function->getTypeSpecStartLoc(), Function->getDeclName());
if (ResultType.isNull() || Trap.hasErrorOccurred())
- return TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
// CUDA: Kernel function must have 'void' return type.
if (getLangOpts().CUDA)
if (Function->hasAttr<CUDAGlobalAttr>() && !ResultType->isVoidType()) {
Diag(Function->getLocation(), diag::err_kern_type_not_void_return)
<< Function->getType() << Function->getSourceRange();
- return TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
}
}
@@ -3386,34 +3581,17 @@ Sema::TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments(
SubstParmTypes(Function->getLocation(), Function->parameters(),
Proto->getExtParameterInfosOrNull(), MLTAL, ParamTypes,
/*params*/ nullptr, ExtParamInfos))
- return TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
if (FunctionType) {
auto EPI = Proto->getExtProtoInfo();
EPI.ExtParameterInfos = ExtParamInfos.getPointerOrNull(ParamTypes.size());
-
- // In C++1z onwards, exception specifications are part of the function type,
- // so substitution into the type must also substitute into the exception
- // specification.
- SmallVector<QualType, 4> ExceptionStorage;
- if (getLangOpts().CPlusPlus17 &&
- SubstExceptionSpec(Function->getLocation(), EPI.ExceptionSpec,
- ExceptionStorage,
- getTemplateInstantiationArgs(
- FunctionTemplate, nullptr, /*Final=*/true,
- /*Innermost=*/SugaredExplicitArgumentList,
- /*RelativeToPrimary=*/false,
- /*Pattern=*/nullptr,
- /*ForConstraintInstantiation=*/false,
- /*SkipForSpecialization=*/true)))
- return TDK_SubstitutionFailure;
-
*FunctionType = BuildFunctionType(ResultType, ParamTypes,
Function->getLocation(),
Function->getDeclName(),
EPI);
if (FunctionType->isNull() || Trap.hasErrorOccurred())
- return TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
}
// C++ [temp.arg.explicit]p2:
@@ -3435,23 +3613,24 @@ Sema::TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments(
Deduced.push_back(Arg);
}
- return TDK_Success;
+ return TemplateDeductionResult::Success;
}
/// Check whether the deduced argument type for a call to a function
/// template matches the actual argument type per C++ [temp.deduct.call]p4.
-static Sema::TemplateDeductionResult
+static TemplateDeductionResult
CheckOriginalCallArgDeduction(Sema &S, TemplateDeductionInfo &Info,
Sema::OriginalCallArg OriginalArg,
QualType DeducedA) {
ASTContext &Context = S.Context;
- auto Failed = [&]() -> Sema::TemplateDeductionResult {
+ auto Failed = [&]() -> TemplateDeductionResult {
Info.FirstArg = TemplateArgument(DeducedA);
Info.SecondArg = TemplateArgument(OriginalArg.OriginalArgType);
Info.CallArgIndex = OriginalArg.ArgIdx;
- return OriginalArg.DecomposedParam ? Sema::TDK_DeducedMismatchNested
- : Sema::TDK_DeducedMismatch;
+ return OriginalArg.DecomposedParam
+ ? TemplateDeductionResult::DeducedMismatchNested
+ : TemplateDeductionResult::DeducedMismatch;
};
QualType A = OriginalArg.OriginalArgType;
@@ -3459,7 +3638,7 @@ CheckOriginalCallArgDeduction(Sema &S, TemplateDeductionInfo &Info,
// Check for type equality (top-level cv-qualifiers are ignored).
if (Context.hasSameUnqualifiedType(A, DeducedA))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
// Strip off references on the argument types; they aren't needed for
// the following checks.
@@ -3483,7 +3662,7 @@ CheckOriginalCallArgDeduction(Sema &S, TemplateDeductionInfo &Info,
// the deduced A can be F.
QualType Tmp;
if (A->isFunctionType() && S.IsFunctionConversion(A, DeducedA, Tmp))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
Qualifiers AQuals = A.getQualifiers();
Qualifiers DeducedAQuals = DeducedA.getQualifiers();
@@ -3524,7 +3703,7 @@ CheckOriginalCallArgDeduction(Sema &S, TemplateDeductionInfo &Info,
(S.IsQualificationConversion(A, DeducedA, false,
ObjCLifetimeConversion) ||
S.IsFunctionConversion(A, DeducedA, ResultTy)))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
// - If P is a class and P has the form simple-template-id, then the
// transformed A can be a derived class of the deduced A. [...]
@@ -3545,11 +3724,11 @@ CheckOriginalCallArgDeduction(Sema &S, TemplateDeductionInfo &Info,
}
if (Context.hasSameUnqualifiedType(A, DeducedA))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
if (A->isRecordType() && isSimpleTemplateIdType(OriginalParamType) &&
S.IsDerivedFrom(Info.getLocation(), A, DeducedA))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
return Failed();
}
@@ -3587,7 +3766,7 @@ static unsigned getPackIndexForParam(Sema &S,
// if `Specialization` is a `CXXConstructorDecl` or `CXXConversionDecl`,
// we'll try to instantiate and update its explicit specifier after constraint
// checking.
-static Sema::TemplateDeductionResult instantiateExplicitSpecifierDeferred(
+static TemplateDeductionResult instantiateExplicitSpecifierDeferred(
Sema &S, FunctionDecl *Specialization,
const MultiLevelTemplateArgumentList &SubstArgs,
TemplateDeductionInfo &Info, FunctionTemplateDecl *FunctionTemplate,
@@ -3606,33 +3785,27 @@ static Sema::TemplateDeductionResult instantiateExplicitSpecifierDeferred(
ExplicitSpecifier ES = GetExplicitSpecifier(Specialization);
Expr *ExplicitExpr = ES.getExpr();
if (!ExplicitExpr)
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
if (!ExplicitExpr->isValueDependent())
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
Sema::InstantiatingTemplate Inst(
S, Info.getLocation(), FunctionTemplate, DeducedArgs,
Sema::CodeSynthesisContext::DeducedTemplateArgumentSubstitution, Info);
if (Inst.isInvalid())
- return Sema::TDK_InstantiationDepth;
+ return TemplateDeductionResult::InstantiationDepth;
Sema::SFINAETrap Trap(S);
const ExplicitSpecifier InstantiatedES =
S.instantiateExplicitSpecifier(SubstArgs, ES);
if (InstantiatedES.isInvalid() || Trap.hasErrorOccurred()) {
Specialization->setInvalidDecl(true);
- return Sema::TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
}
SetExplicitSpecifier(Specialization, InstantiatedES);
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
-/// Finish template argument deduction for a function template,
-/// checking the deduced template arguments for completeness and forming
-/// the function template specialization.
-///
-/// \param OriginalCallArgs If non-NULL, the original call arguments against
-/// which the deduced argument types should be compared.
-Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
+TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
@@ -3651,7 +3824,7 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
*this, Info.getLocation(), FunctionTemplate, DeducedArgs,
CodeSynthesisContext::DeducedTemplateArgumentSubstitution, Info);
if (Inst.isInvalid())
- return TDK_InstantiationDepth;
+ return TemplateDeductionResult::InstantiationDepth;
ContextRAII SavedContext(*this, FunctionTemplate->getTemplatedDecl());
@@ -3662,7 +3835,8 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
if (auto Result = ConvertDeducedTemplateArguments(
*this, FunctionTemplate, /*IsDeduced*/ true, Deduced, Info,
SugaredBuilder, CanonicalBuilder, CurrentInstantiationScope,
- NumExplicitlySpecified, PartialOverloading))
+ NumExplicitlySpecified, PartialOverloading);
+ Result != TemplateDeductionResult::Success)
return Result;
// C++ [temp.deduct.call]p10: [DR1391]
@@ -3675,7 +3849,7 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
// explicitly-specified template arguments, if the corresponding argument
// A cannot be implicitly converted to P, deduction fails.
if (CheckNonDependent())
- return TDK_NonDependentConversionFailure;
+ return TemplateDeductionResult::NonDependentConversionFailure;
// Form the template argument list from the deduced template arguments.
TemplateArgumentList *SugaredDeducedArgumentList =
@@ -3712,7 +3886,7 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
Specialization = cast_or_null<FunctionDecl>(
SubstDecl(FD, Owner, SubstArgs));
if (!Specialization || Specialization->isInvalidDecl())
- return TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
assert(Specialization->getPrimaryTemplate()->getCanonicalDecl() ==
FunctionTemplate->getCanonicalDecl());
@@ -3729,7 +3903,7 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
// failure.
if (Trap.hasErrorOccurred()) {
Specialization->setInvalidDecl(true);
- return TDK_SubstitutionFailure;
+ return TemplateDeductionResult::SubstitutionFailure;
}
// C++2a [temp.deduct]p5
@@ -3746,12 +3920,12 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
if (CheckInstantiatedFunctionTemplateConstraints(
Info.getLocation(), Specialization, CanonicalBuilder,
Info.AssociatedConstraintsSatisfaction))
- return TDK_MiscellaneousDeductionFailure;
+ return TemplateDeductionResult::MiscellaneousDeductionFailure;
if (!Info.AssociatedConstraintsSatisfaction.IsSatisfied) {
Info.reset(Info.takeSugared(),
TemplateArgumentList::CreateCopy(Context, CanonicalBuilder));
- return TDK_ConstraintsNotSatisfied;
+ return TemplateDeductionResult::ConstraintsNotSatisfied;
}
}
@@ -3759,10 +3933,11 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
// substitution of `FD` before. So, we try to instantiate it back if
// `Specialization` is either a constructor or a conversion function.
if (isa<CXXConstructorDecl, CXXConversionDecl>(Specialization)) {
- if (TDK_Success != instantiateExplicitSpecifierDeferred(
- *this, Specialization, SubstArgs, Info,
- FunctionTemplate, DeducedArgs)) {
- return TDK_SubstitutionFailure;
+ if (TemplateDeductionResult::Success !=
+ instantiateExplicitSpecifierDeferred(*this, Specialization, SubstArgs,
+ Info, FunctionTemplate,
+ DeducedArgs)) {
+ return TemplateDeductionResult::SubstitutionFailure;
}
}
@@ -3809,7 +3984,8 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
}
if (auto TDK =
- CheckOriginalCallArgDeduction(*this, Info, OriginalArg, DeducedA))
+ CheckOriginalCallArgDeduction(*this, Info, OriginalArg, DeducedA);
+ TDK != TemplateDeductionResult::Success)
return TDK;
}
}
@@ -3826,7 +4002,7 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
.append(Info.diag_begin(), Info.diag_end());
}
- return TDK_Success;
+ return TemplateDeductionResult::Success;
}
/// Gets the type of a function for template-argument-deducton
@@ -3918,7 +4094,8 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
FunctionDecl *Specialization = nullptr;
TemplateDeductionInfo Info(Ovl->getNameLoc());
if (S.DeduceTemplateArguments(FunTmpl, &ExplicitTemplateArgs,
- Specialization, Info))
+ Specialization,
+ Info) != TemplateDeductionResult::Success)
continue;
D = Specialization;
@@ -3948,10 +4125,10 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
SmallVector<DeducedTemplateArgument, 8>
Deduced(TemplateParams->size());
TemplateDeductionInfo Info(Ovl->getNameLoc());
- Sema::TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType,
- ArgType, Info, Deduced, TDF);
- if (Result) continue;
+ TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, ParamType, ArgType, Info, Deduced, TDF);
+ if (Result != TemplateDeductionResult::Success)
+ continue;
if (!Match.isNull())
return {};
Match = ArgType;
@@ -4064,7 +4241,7 @@ static bool
hasDeducibleTemplateParameters(Sema &S, FunctionTemplateDecl *FunctionTemplate,
QualType T);
-static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
+static TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
Sema &S, TemplateParameterList *TemplateParams, unsigned FirstInnerIndex,
QualType ParamType, QualType ArgType,
Expr::Classification ArgClassification, Expr *Arg,
@@ -4076,7 +4253,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
/// Attempt template argument deduction from an initializer list
/// deemed to be an argument in a function call.
-static Sema::TemplateDeductionResult DeduceFromInitializerList(
+static TemplateDeductionResult DeduceFromInitializerList(
Sema &S, TemplateParameterList *TemplateParams, QualType AdjustedParamType,
InitListExpr *ILE, TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
@@ -4091,7 +4268,7 @@ static Sema::TemplateDeductionResult DeduceFromInitializerList(
//
// We've already removed references and cv-qualifiers here.
if (!ILE->getNumInits())
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
QualType ElTy;
auto *ArrTy = S.Context.getAsArrayType(AdjustedParamType);
@@ -4100,14 +4277,14 @@ static Sema::TemplateDeductionResult DeduceFromInitializerList(
else if (!S.isStdInitializerList(AdjustedParamType, &ElTy)) {
// Otherwise, an initializer list argument causes the parameter to be
// considered a non-deduced context
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
// Resolving a core issue: a braced-init-list containing any designators is
// a non-deduced context.
for (Expr *E : ILE->inits())
if (isa<DesignatedInitExpr>(E))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
// Deduction only needs to be done for dependent types.
if (ElTy->isDependentType()) {
@@ -4115,7 +4292,8 @@ static Sema::TemplateDeductionResult DeduceFromInitializerList(
if (auto Result = DeduceTemplateArgumentsFromCallArgument(
S, TemplateParams, 0, ElTy, E->getType(),
E->Classify(S.getASTContext()), E, Info, Deduced,
- OriginalCallArgs, true, ArgIdx, TDF))
+ OriginalCallArgs, true, ArgIdx, TDF);
+ Result != TemplateDeductionResult::Success)
return Result;
}
}
@@ -4134,17 +4312,18 @@ static Sema::TemplateDeductionResult DeduceFromInitializerList(
llvm::APInt Size(S.Context.getIntWidth(T), ILE->getNumInits());
if (auto Result = DeduceNonTypeTemplateArgument(
S, TemplateParams, NTTP, llvm::APSInt(Size), T,
- /*ArrayBound=*/true, Info, Deduced))
+ /*ArrayBound=*/true, Info, Deduced);
+ Result != TemplateDeductionResult::Success)
return Result;
}
}
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
}
/// Perform template argument deduction per [temp.deduct.call] for a
/// single parameter / argument pair.
-static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
+static TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
Sema &S, TemplateParameterList *TemplateParams, unsigned FirstInnerIndex,
QualType ParamType, QualType ArgType,
Expr::Classification ArgClassification, Expr *Arg,
@@ -4161,7 +4340,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
if (AdjustFunctionParmAndArgTypesForDeduction(
S, TemplateParams, FirstInnerIndex, ParamType, ArgType,
ArgClassification, Arg, TDF, FailedTSC))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
// If [...] the argument is a non-empty initializer list [...]
if (InitListExpr *ILE = dyn_cast_if_present<InitListExpr>(Arg))
@@ -4180,32 +4359,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
ArgType, Info, Deduced, TDF);
}
-/// Perform template argument deduction from a function call
-/// (C++ [temp.deduct.call]).
-///
-/// \param FunctionTemplate the function template for which we are performing
-/// template argument deduction.
-///
-/// \param ExplicitTemplateArgs the explicit template arguments provided
-/// for this call.
-///
-/// \param Args the function call arguments
-///
-/// \param Specialization if template argument deduction was successful,
-/// this will be set to the function template specialization produced by
-/// template argument deduction.
-///
-/// \param Info the argument will be updated to provide additional information
-/// about template argument deduction.
-///
-/// \param CheckNonDependent A callback to invoke to check conversions for
-/// non-dependent parameters, between deduction and substitution, per DR1391.
-/// If this returns true, substitution will be skipped and we return
-/// TDK_NonDependentConversionFailure. The callback is passed the parameter
-/// types (after substituting explicit template arguments).
-///
-/// \returns the result of template argument deduction.
-Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
+TemplateDeductionResult Sema::DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, TemplateDeductionInfo &Info,
@@ -4213,7 +4367,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
QualType ObjectType, Expr::Classification ObjectClassification,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent) {
if (FunctionTemplate->isInvalidDecl())
- return TDK_Invalid;
+ return TemplateDeductionResult::Invalid;
FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
unsigned NumParams = Function->getNumParams();
@@ -4232,14 +4386,14 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// of the call (call it A) as described below.
if (Args.size() < Function->getMinRequiredExplicitArguments() &&
!PartialOverloading)
- return TDK_TooFewArguments;
+ return TemplateDeductionResult::TooFewArguments;
else if (TooManyArguments(NumParams, Args.size() + ExplicitObjectOffset,
PartialOverloading)) {
const auto *Proto = Function->getType()->castAs<FunctionProtoType>();
if (Proto->isTemplateVariadic())
/* Do nothing */;
else if (!Proto->isVariadic())
- return TDK_TooManyArguments;
+ return TemplateDeductionResult::TooManyArguments;
}
// The types of the parameters from which we will perform template argument
@@ -4257,7 +4411,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
FunctionTemplate, *ExplicitTemplateArgs, Deduced, ParamTypes, nullptr,
Info);
});
- if (Result)
+ if (Result != TemplateDeductionResult::Success)
return Result;
NumExplicitlySpecified = Deduced.size();
@@ -4271,15 +4425,15 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// Deduce an argument of type ParamType from an expression with index ArgIdx.
auto DeduceCallArgument = [&](QualType ParamType, unsigned ArgIdx,
- bool ExplicitObjetArgument) {
+ bool ExplicitObjectArgument) {
// C++ [demp.deduct.call]p1: (DR1391)
// Template argument deduction is done by comparing each function template
// parameter that contains template-parameters that participate in
// template argument deduction ...
if (!hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
- return Sema::TDK_Success;
+ return TemplateDeductionResult::Success;
- if (ExplicitObjetArgument) {
+ if (ExplicitObjectArgument) {
// ... with the type of the corresponding argument
return DeduceTemplateArgumentsFromCallArgument(
*this, TemplateParams, FirstInnerIndex, ParamType, ObjectType,
@@ -4314,13 +4468,15 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
if (ParamIdx == 0 && HasExplicitObject) {
if (auto Result = DeduceCallArgument(ParamType, 0,
- /*ExplicitObjetArgument=*/true))
+ /*ExplicitObjectArgument=*/true);
+ Result != TemplateDeductionResult::Success)
return Result;
continue;
}
if (auto Result = DeduceCallArgument(ParamType, ArgIdx++,
- /*ExplicitObjetArgument=*/false))
+ /*ExplicitObjectArgument=*/false);
+ Result != TemplateDeductionResult::Success)
return Result;
continue;
@@ -4354,7 +4510,8 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
PackScope.nextPackElement(), ++ArgIdx) {
ParamTypesForArgChecking.push_back(ParamPattern);
if (auto Result = DeduceCallArgument(ParamPattern, ArgIdx,
- /*ExplicitObjetArgument=*/false))
+ /*ExplicitObjectArgument=*/false);
+ Result != TemplateDeductionResult::Success)
return Result;
}
} else {
@@ -4371,12 +4528,43 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// corresponding argument is a list?
PackScope.nextPackElement();
}
+ } else if (!IsTrailingPack && !PackScope.isPartiallyExpanded() &&
+ PackScope.isDeducedFromEarlierParameter()) {
+ // [temp.deduct.general#3]
+ // When all template arguments have been deduced
+ // or obtained from default template arguments, all uses of template
+ // parameters in the template parameter list of the template are
+ // replaced with the corresponding deduced or default argument values
+ //
+ // If we have a trailing parameter pack, that has been deduced
+ // previously we substitute the pack here in a similar fashion as
+ // above with the trailing parameter packs. The main difference here is
+ // that, in this case we are not processing all of the remaining
+ // arguments. We are only process as many arguments as we have in
+ // the already deduced parameter.
+ std::optional<unsigned> ArgPosAfterSubstitution =
+ PackScope.getSavedPackSizeIfAllEqual();
+ if (!ArgPosAfterSubstitution)
+ continue;
+
+ unsigned PackArgEnd = ArgIdx + *ArgPosAfterSubstitution;
+ for (; ArgIdx < PackArgEnd && ArgIdx < Args.size(); ArgIdx++) {
+ ParamTypesForArgChecking.push_back(ParamPattern);
+ if (auto Result =
+ DeduceCallArgument(ParamPattern, ArgIdx,
+ /*ExplicitObjectArgument=*/false);
+ Result != TemplateDeductionResult::Success)
+ return Result;
+
+ PackScope.nextPackElement();
+ }
}
}
// Build argument packs for each of the parameter packs expanded by this
// pack expansion.
- if (auto Result = PackScope.finish())
+ if (auto Result = PackScope.finish();
+ Result != TemplateDeductionResult::Success)
return Result;
}
@@ -4432,41 +4620,13 @@ QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType,
ArgFunctionTypeP->getParamTypes(), EPI);
}
-/// Deduce template arguments when taking the address of a function
-/// template (C++ [temp.deduct.funcaddr]) or matching a specialization to
-/// a template.
-///
-/// \param FunctionTemplate the function template for which we are performing
-/// template argument deduction.
-///
-/// \param ExplicitTemplateArgs the explicitly-specified template
-/// arguments.
-///
-/// \param ArgFunctionType the function type that will be used as the
-/// "argument" type (A) when performing template argument deduction from the
-/// function template's function type. This type may be NULL, if there is no
-/// argument type to compare against, in C++0x [temp.arg.explicit]p3.
-///
-/// \param Specialization if template argument deduction was successful,
-/// this will be set to the function template specialization produced by
-/// template argument deduction.
-///
-/// \param Info the argument will be updated to provide additional information
-/// about template argument deduction.
-///
-/// \param IsAddressOfFunction If \c true, we are deducing as part of taking
-/// the address of a function template per [temp.deduct.funcaddr] and
-/// [over.over]. If \c false, we are looking up a function template
-/// specialization based on its signature, per [temp.deduct.decl].
-///
-/// \returns the result of template argument deduction.
-Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
+TemplateDeductionResult Sema::DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType,
FunctionDecl *&Specialization, TemplateDeductionInfo &Info,
bool IsAddressOfFunction) {
if (FunctionTemplate->isInvalidDecl())
- return TDK_Invalid;
+ return TemplateDeductionResult::Invalid;
FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
TemplateParameterList *TemplateParams
@@ -4485,7 +4645,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
FunctionTemplate, *ExplicitTemplateArgs, Deduced, ParamTypes,
&FunctionType, Info);
});
- if (Result)
+ if (Result != TemplateDeductionResult::Success)
return Result;
NumExplicitlySpecified = Deduced.size();
@@ -4518,10 +4678,10 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
unsigned TDF =
TDF_TopLevelParameterTypeList | TDF_AllowCompatibleFunctionType;
// Deduce template arguments from the function type.
- if (TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
- FunctionType, ArgFunctionType,
- Info, Deduced, TDF))
+ if (TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(
+ *this, TemplateParams, FunctionType, ArgFunctionType, Info, Deduced,
+ TDF);
+ Result != TemplateDeductionResult::Success)
return Result;
}
@@ -4531,7 +4691,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
NumExplicitlySpecified,
Specialization, Info);
});
- if (Result)
+ if (Result != TemplateDeductionResult::Success)
return Result;
// If the function has a deduced return type, deduce it now, so we can check
@@ -4539,22 +4699,18 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
if (HasDeducedReturnType && IsAddressOfFunction &&
Specialization->getReturnType()->isUndeducedType() &&
DeduceReturnType(Specialization, Info.getLocation(), false))
- return TDK_MiscellaneousDeductionFailure;
+ return TemplateDeductionResult::MiscellaneousDeductionFailure;
+ // [C++26][expr.const]/p17
+ // An expression or conversion is immediate-escalating if it is not initially
+ // in an immediate function context and it is [...]
+ // a potentially-evaluated id-expression that denotes an immediate function.
if (IsAddressOfFunction && getLangOpts().CPlusPlus20 &&
Specialization->isImmediateEscalating() &&
+ parentEvaluationContext().isPotentiallyEvaluated() &&
CheckIfFunctionSpecializationIsImmediate(Specialization,
Info.getLocation()))
- return TDK_MiscellaneousDeductionFailure;
-
- // If the function has a dependent exception specification, resolve it now,
- // so we can check that the exception specification matches.
- auto *SpecializationFPT =
- Specialization->getType()->castAs<FunctionProtoType>();
- if (getLangOpts().CPlusPlus17 &&
- isUnresolvedExceptionSpec(SpecializationFPT->getExceptionSpecType()) &&
- !ResolveExceptionSpec(Info.getLocation(), SpecializationFPT))
- return TDK_MiscellaneousDeductionFailure;
+ return TemplateDeductionResult::MiscellaneousDeductionFailure;
// Adjust the exception specification of the argument to match the
// substituted and resolved type we just formed. (Calling convention and
@@ -4577,29 +4733,26 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// specialization with respect to arguments of compatible pointer to function
// types, template argument deduction fails.
if (!ArgFunctionType.isNull()) {
- if (IsAddressOfFunction
- ? !isSameOrCompatibleFunctionType(
- Context.getCanonicalType(SpecializationType),
- Context.getCanonicalType(ArgFunctionType))
- : !Context.hasSameType(SpecializationType, ArgFunctionType)) {
+ if (IsAddressOfFunction ? !isSameOrCompatibleFunctionType(
+ Context.getCanonicalType(SpecializationType),
+ Context.getCanonicalType(ArgFunctionType))
+ : !Context.hasSameFunctionTypeIgnoringExceptionSpec(
+ SpecializationType, ArgFunctionType)) {
Info.FirstArg = TemplateArgument(SpecializationType);
Info.SecondArg = TemplateArgument(ArgFunctionType);
- return TDK_NonDeducedMismatch;
+ return TemplateDeductionResult::NonDeducedMismatch;
}
}
- return TDK_Success;
+ return TemplateDeductionResult::Success;
}
-/// Deduce template arguments for a templated conversion
-/// function (C++ [temp.deduct.conv]) and, if successful, produce a
-/// conversion function template specialization.
-Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
+TemplateDeductionResult Sema::DeduceTemplateArguments(
FunctionTemplateDecl *ConversionTemplate, QualType ObjectType,
Expr::Classification ObjectClassification, QualType ToType,
CXXConversionDecl *&Specialization, TemplateDeductionInfo &Info) {
if (ConversionTemplate->isInvalidDecl())
- return TDK_Invalid;
+ return TemplateDeductionResult::Invalid;
CXXConversionDecl *ConversionGeneric
= cast<CXXConversionDecl>(ConversionTemplate->getTemplatedDecl());
@@ -4701,13 +4854,14 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
*this, TemplateParams, getFirstInnerIndex(ConversionTemplate),
ParamType, ObjectType, ObjectClassification,
/*Arg=*/nullptr, Info, Deduced, OriginalCallArgs,
- /*Decomposed*/ false, 0, /*TDF*/ 0))
+ /*Decomposed*/ false, 0, /*TDF*/ 0);
+ Result != TemplateDeductionResult::Success)
return Result;
}
- if (TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
- P, A, Info, Deduced, TDF))
+ if (TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(
+ *this, TemplateParams, P, A, Info, Deduced, TDF);
+ Result != TemplateDeductionResult::Success)
return Result;
// Create an Instantiation Scope for finalizing the operator.
@@ -4724,35 +4878,12 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
return Result;
}
-/// Deduce template arguments for a function template when there is
-/// nothing to deduce against (C++0x [temp.arg.explicit]p3).
-///
-/// \param FunctionTemplate the function template for which we are performing
-/// template argument deduction.
-///
-/// \param ExplicitTemplateArgs the explicitly-specified template
-/// arguments.
-///
-/// \param Specialization if template argument deduction was successful,
-/// this will be set to the function template specialization produced by
-/// template argument deduction.
-///
-/// \param Info the argument will be updated to provide additional information
-/// about template argument deduction.
-///
-/// \param IsAddressOfFunction If \c true, we are deducing as part of taking
-/// the address of a function template in a context where we do not have a
-/// target type, per [over.over]. If \c false, we are looking up a function
-/// template specialization based on its signature, which only happens when
-/// deducing a function parameter type from an argument that is a template-id
-/// naming a function template specialization.
-///
-/// \returns the result of template argument deduction.
-Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
- FunctionTemplateDecl *FunctionTemplate,
- TemplateArgumentListInfo *ExplicitTemplateArgs,
- FunctionDecl *&Specialization, TemplateDeductionInfo &Info,
- bool IsAddressOfFunction) {
+TemplateDeductionResult
+Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ FunctionDecl *&Specialization,
+ TemplateDeductionInfo &Info,
+ bool IsAddressOfFunction) {
return DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs,
QualType(), Specialization, Info,
IsAddressOfFunction);
@@ -4872,6 +5003,20 @@ static bool CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
return true;
MultiLevelTemplateArgumentList MLTAL(Concept, CanonicalConverted,
/*Final=*/false);
+ // Build up an EvaluationContext with an ImplicitConceptSpecializationDecl so
+ // that the template arguments of the constraint can be preserved. For
+ // example:
+ //
+ // template <class T>
+ // concept C = []<D U = void>() { return true; }();
+ //
+ // We need the argument for T while evaluating type constraint D in
+ // building the CallExpr to the lambda.
+ EnterExpressionEvaluationContext EECtx(
+ S, Sema::ExpressionEvaluationContext::Unevaluated,
+ ImplicitConceptSpecializationDecl::Create(
+ S.getASTContext(), Concept->getDeclContext(), Concept->getLocation(),
+ CanonicalConverted));
if (S.CheckConstraintSatisfaction(Concept, {Concept->getConstraintExpr()},
MLTAL, TypeLoc.getLocalSourceRange(),
Satisfaction))
@@ -4896,32 +5041,14 @@ static bool CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
return false;
}
-/// Deduce the type for an auto type-specifier (C++11 [dcl.spec.auto]p6)
-///
-/// Note that this is done even if the initializer is dependent. (This is
-/// necessary to support partial ordering of templates using 'auto'.)
-/// A dependent type will be produced when deducing from a dependent type.
-///
-/// \param Type the type pattern using the auto type-specifier.
-/// \param Init the initializer for the variable whose type is to be deduced.
-/// \param Result if type deduction was successful, this will be set to the
-/// deduced type.
-/// \param Info the argument will be updated to provide additional information
-/// about template argument deduction.
-/// \param DependentDeduction Set if we should permit deduction in
-/// dependent cases. This is necessary for template partial ordering with
-/// 'auto' template parameters. The template parameter depth to be used
-/// should be specified in the 'Info' parameter.
-/// \param IgnoreConstraints Set if we should not fail if the deduced type does
-/// not satisfy the type-constraint in the auto type.
-Sema::TemplateDeductionResult
+TemplateDeductionResult
Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
TemplateDeductionInfo &Info, bool DependentDeduction,
bool IgnoreConstraints,
TemplateSpecCandidateSet *FailedTSC) {
assert(DependentDeduction || Info.getDeducedDepth() == 0);
if (Init->containsErrors())
- return TDK_AlreadyDiagnosed;
+ return TemplateDeductionResult::AlreadyDiagnosed;
const AutoType *AT = Type.getType()->getContainedAutoType();
assert(AT);
@@ -4929,7 +5056,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
if (Init->getType()->isNonOverloadPlaceholderType() || AT->isDecltypeAuto()) {
ExprResult NonPlaceholder = CheckPlaceholderExpr(Init);
if (NonPlaceholder.isInvalid())
- return TDK_AlreadyDiagnosed;
+ return TemplateDeductionResult::AlreadyDiagnosed;
Init = NonPlaceholder.get();
}
@@ -4941,7 +5068,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
Init->containsUnexpandedParameterPack())) {
Result = SubstituteDeducedTypeTransform(*this, DependentResult).Apply(Type);
assert(!Result.isNull() && "substituting DependentTy can't fail");
- return TDK_Success;
+ return TemplateDeductionResult::Success;
}
// Make sure that we treat 'char[]' equaly as 'char*' in C23 mode.
@@ -4951,7 +5078,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
TypeLoc TL = TypeLoc(Init->getType(), Type.getOpaqueData());
Result = SubstituteDeducedTypeTransform(*this, DependentResult).Apply(TL);
assert(!Result.isNull() && "substituting DependentTy can't fail");
- return TDK_Success;
+ return TemplateDeductionResult::Success;
}
// Emit a warning if 'auto*' is used in pedantic and in C23 mode.
@@ -4963,7 +5090,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
if (!getLangOpts().CPlusPlus && InitList) {
Diag(Init->getBeginLoc(), diag::err_auto_init_list_from_c)
<< (int)AT->getKeyword() << getLangOpts().C23;
- return TDK_AlreadyDiagnosed;
+ return TemplateDeductionResult::AlreadyDiagnosed;
}
// Deduce type of TemplParam in Func(Init)
@@ -4977,7 +5104,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
Result =
SubstituteDeducedTypeTransform(*this, DependentResult).Apply(Type);
assert(!Result.isNull() && "substituting DependentTy can't fail");
- return TDK_Success;
+ return TemplateDeductionResult::Success;
}
return TDK;
};
@@ -4989,7 +5116,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
if (AT->isDecltypeAuto()) {
if (InitList) {
Diag(Init->getBeginLoc(), diag::err_decltype_auto_initializer_list);
- return TDK_AlreadyDiagnosed;
+ return TemplateDeductionResult::AlreadyDiagnosed;
}
DeducedType = getDecltypeForExpr(Init);
@@ -5012,24 +5139,25 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
// deduce against that. Such deduction only succeeds if removing
// cv-qualifiers and references results in std::initializer_list<T>.
if (!Type.getType().getNonReferenceType()->getAs<AutoType>())
- return TDK_Invalid;
+ return TemplateDeductionResult::Invalid;
SourceRange DeducedFromInitRange;
for (Expr *Init : InitList->inits()) {
// Resolving a core issue: a braced-init-list containing any designators
// is a non-deduced context.
if (isa<DesignatedInitExpr>(Init))
- return TDK_Invalid;
+ return TemplateDeductionResult::Invalid;
if (auto TDK = DeduceTemplateArgumentsFromCallArgument(
*this, TemplateParamsSt.get(), 0, TemplArg, Init->getType(),
Init->Classify(getASTContext()), Init, Info, Deduced,
OriginalCallArgs, /*Decomposed=*/true,
- /*ArgIdx=*/0, /*TDF=*/0)) {
- if (TDK == TDK_Inconsistent) {
+ /*ArgIdx=*/0, /*TDF=*/0);
+ TDK != TemplateDeductionResult::Success) {
+ if (TDK == TemplateDeductionResult::Inconsistent) {
Diag(Info.getLocation(), diag::err_auto_inconsistent_deduction)
<< Info.FirstArg << Info.SecondArg << DeducedFromInitRange
<< Init->getSourceRange();
- return DeductionFailed(TDK_AlreadyDiagnosed);
+ return DeductionFailed(TemplateDeductionResult::AlreadyDiagnosed);
}
return DeductionFailed(TDK);
}
@@ -5041,7 +5169,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
} else {
if (!getLangOpts().CPlusPlus && Init->refersToBitField()) {
Diag(Loc, diag::err_auto_bitfield);
- return TDK_AlreadyDiagnosed;
+ return TemplateDeductionResult::AlreadyDiagnosed;
}
QualType FuncParam =
SubstituteDeducedTypeTransform(*this, TemplArg).Apply(Type);
@@ -5051,19 +5179,20 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
*this, TemplateParamsSt.get(), 0, FuncParam, Init->getType(),
Init->Classify(getASTContext()), Init, Info, Deduced,
OriginalCallArgs, /*Decomposed=*/false, /*ArgIdx=*/0, /*TDF=*/0,
- FailedTSC))
+ FailedTSC);
+ TDK != TemplateDeductionResult::Success)
return DeductionFailed(TDK);
}
// Could be null if somehow 'auto' appears in a non-deduced context.
if (Deduced[0].getKind() != TemplateArgument::Type)
- return DeductionFailed(TDK_Incomplete);
+ return DeductionFailed(TemplateDeductionResult::Incomplete);
DeducedType = Deduced[0].getAsType();
if (InitList) {
DeducedType = BuildStdInitializerList(DeducedType, Loc);
if (DeducedType.isNull())
- return TDK_AlreadyDiagnosed;
+ return TemplateDeductionResult::AlreadyDiagnosed;
}
}
@@ -5071,7 +5200,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
if (!Context.hasSameType(DeducedType, Result)) {
Info.FirstArg = Result;
Info.SecondArg = DeducedType;
- return DeductionFailed(TDK_Inconsistent);
+ return DeductionFailed(TemplateDeductionResult::Inconsistent);
}
DeducedType = Context.getCommonSugaredType(Result, DeducedType);
}
@@ -5079,11 +5208,11 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
if (AT->isConstrained() && !IgnoreConstraints &&
CheckDeducedPlaceholderConstraints(
*this, *AT, Type.getContainedAutoTypeLoc(), DeducedType))
- return TDK_AlreadyDiagnosed;
+ return TemplateDeductionResult::AlreadyDiagnosed;
Result = SubstituteDeducedTypeTransform(*this, DeducedType).Apply(Type);
if (Result.isNull())
- return TDK_AlreadyDiagnosed;
+ return TemplateDeductionResult::AlreadyDiagnosed;
// Check that the deduced argument type is compatible with the original
// argument type per C++ [temp.deduct.call]p4.
@@ -5092,13 +5221,14 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
assert((bool)InitList == OriginalArg.DecomposedParam &&
"decomposed non-init-list in auto deduction?");
if (auto TDK =
- CheckOriginalCallArgDeduction(*this, Info, OriginalArg, DeducedA)) {
+ CheckOriginalCallArgDeduction(*this, Info, OriginalArg, DeducedA);
+ TDK != TemplateDeductionResult::Success) {
Result = QualType();
return DeductionFailed(TDK);
}
}
- return TDK_Success;
+ return TemplateDeductionResult::Success;
}
QualType Sema::SubstAutoType(QualType TypeWithAuto,
@@ -5140,7 +5270,8 @@ TypeSourceInfo *Sema::ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
.TransformType(TypeWithAuto);
}
-void Sema::DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init) {
+void Sema::DiagnoseAutoDeductionFailure(const VarDecl *VDecl,
+ const Expr *Init) {
if (isa<InitListExpr>(Init))
Diag(VDecl->getLocation(),
VDecl->isInitCapture()
@@ -5245,38 +5376,38 @@ bool Sema::CheckIfFunctionSpecializationIsImmediate(FunctionDecl *FD,
return false;
}
-/// If this is a non-static member function,
-static void
-AddImplicitObjectParameterType(ASTContext &Context,
- CXXMethodDecl *Method,
- SmallVectorImpl<QualType> &ArgTypes) {
- // C++11 [temp.func.order]p3:
- // [...] The new parameter is of type "reference to cv A," where cv are
- // the cv-qualifiers of the function template (if any) and A is
- // the class of which the function template is a member.
+static QualType GetImplicitObjectParameterType(ASTContext &Context,
+ const CXXMethodDecl *Method,
+ QualType RawType,
+ bool IsOtherRvr) {
+ // C++20 [temp.func.order]p3.1, p3.2:
+ // - The type X(M) is "rvalue reference to cv A" if the optional
+ // ref-qualifier of M is && or if M has no ref-qualifier and the
+ // positionally-corresponding parameter of the other transformed template
+ // has rvalue reference type; if this determination depends recursively
+ // upon whether X(M) is an rvalue reference type, it is not considered to
+ // have rvalue reference type.
//
- // The standard doesn't say explicitly, but we pick the appropriate kind of
- // reference type based on [over.match.funcs]p4.
- assert(Method && Method->isImplicitObjectMemberFunction() &&
- "expected an implicit objet function");
- QualType ArgTy = Context.getTypeDeclType(Method->getParent());
- ArgTy = Context.getQualifiedType(ArgTy, Method->getMethodQualifiers());
- if (Method->getRefQualifier() == RQ_RValue)
- ArgTy = Context.getRValueReferenceType(ArgTy);
- else
- ArgTy = Context.getLValueReferenceType(ArgTy);
- ArgTypes.push_back(ArgTy);
+ // - Otherwise, X(M) is "lvalue reference to cv A".
+ assert(Method && !Method->isExplicitObjectMemberFunction() &&
+ "expected a member function with no explicit object parameter");
+
+ RawType = Context.getQualifiedType(RawType, Method->getMethodQualifiers());
+ if (Method->getRefQualifier() == RQ_RValue ||
+ (IsOtherRvr && Method->getRefQualifier() == RQ_None))
+ return Context.getRValueReferenceType(RawType);
+ return Context.getLValueReferenceType(RawType);
}
/// Determine whether the function template \p FT1 is at least as
/// specialized as \p FT2.
-static bool isAtLeastAsSpecializedAs(Sema &S,
- SourceLocation Loc,
- FunctionTemplateDecl *FT1,
- FunctionTemplateDecl *FT2,
+static bool isAtLeastAsSpecializedAs(Sema &S, SourceLocation Loc,
+ const FunctionTemplateDecl *FT1,
+ const FunctionTemplateDecl *FT2,
TemplatePartialOrderingContext TPOC,
- unsigned NumCallArguments1,
- bool Reversed) {
+ bool Reversed,
+ const SmallVector<QualType> &Args1,
+ const SmallVector<QualType> &Args2) {
assert(!Reversed || TPOC == TPOC_Call);
FunctionDecl *FD1 = FT1->getTemplatedDecl();
@@ -5293,73 +5424,15 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
// The types used to determine the ordering depend on the context in which
// the partial ordering is done:
TemplateDeductionInfo Info(Loc);
- SmallVector<QualType, 4> Args2;
switch (TPOC) {
- case TPOC_Call: {
- // - In the context of a function call, the function parameter types are
- // used.
- CXXMethodDecl *Method1 = dyn_cast<CXXMethodDecl>(FD1);
- CXXMethodDecl *Method2 = dyn_cast<CXXMethodDecl>(FD2);
-
- // C++11 [temp.func.order]p3:
- // [...] If only one of the function templates is a non-static
- // member, that function template is considered to have a new
- // first parameter inserted in its function parameter list. The
- // new parameter is of type "reference to cv A," where cv are
- // the cv-qualifiers of the function template (if any) and A is
- // the class of which the function template is a member.
- //
- // Note that we interpret this to mean "if one of the function
- // templates is a non-static member and the other is a non-member";
- // otherwise, the ordering rules for static functions against non-static
- // functions don't make any sense.
- //
- // C++98/03 doesn't have this provision but we've extended DR532 to cover
- // it as wording was broken prior to it.
- SmallVector<QualType, 4> Args1;
-
- unsigned NumComparedArguments = NumCallArguments1;
-
- if (!Method2 && Method1 && Method1->isImplicitObjectMemberFunction()) {
- // Compare 'this' from Method1 against first parameter from Method2.
- AddImplicitObjectParameterType(S.Context, Method1, Args1);
- ++NumComparedArguments;
- } else if (!Method1 && Method2 &&
- Method2->isImplicitObjectMemberFunction()) {
- // Compare 'this' from Method2 against first parameter from Method1.
- AddImplicitObjectParameterType(S.Context, Method2, Args2);
- } else if (Method1 && Method2 && Reversed &&
- Method1->isImplicitObjectMemberFunction() &&
- Method2->isImplicitObjectMemberFunction()) {
- // Compare 'this' from Method1 against second parameter from Method2
- // and 'this' from Method2 against second parameter from Method1.
- AddImplicitObjectParameterType(S.Context, Method1, Args1);
- AddImplicitObjectParameterType(S.Context, Method2, Args2);
- ++NumComparedArguments;
- }
-
- Args1.insert(Args1.end(), Proto1->param_type_begin(),
- Proto1->param_type_end());
- Args2.insert(Args2.end(), Proto2->param_type_begin(),
- Proto2->param_type_end());
-
- // C++ [temp.func.order]p5:
- // The presence of unused ellipsis and default arguments has no effect on
- // the partial ordering of function templates.
- if (Args1.size() > NumComparedArguments)
- Args1.resize(NumComparedArguments);
- if (Args2.size() > NumComparedArguments)
- Args2.resize(NumComparedArguments);
- if (Reversed)
- std::reverse(Args2.begin(), Args2.end());
-
+ case TPOC_Call:
if (DeduceTemplateArguments(S, TemplateParams, Args2.data(), Args2.size(),
Args1.data(), Args1.size(), Info, Deduced,
- TDF_None, /*PartialOrdering=*/true))
+ TDF_None, /*PartialOrdering=*/true) !=
+ TemplateDeductionResult::Success)
return false;
break;
- }
case TPOC_Conversion:
// - In the context of a call to a conversion operator, the return types
@@ -5367,17 +5440,17 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
if (DeduceTemplateArgumentsByTypeMatch(
S, TemplateParams, Proto2->getReturnType(), Proto1->getReturnType(),
Info, Deduced, TDF_None,
- /*PartialOrdering=*/true))
+ /*PartialOrdering=*/true) != TemplateDeductionResult::Success)
return false;
break;
case TPOC_Other:
// - In other contexts (14.6.6.2) the function template's function type
// is used.
- if (DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- FD2->getType(), FD1->getType(),
- Info, Deduced, TDF_None,
- /*PartialOrdering=*/true))
+ if (DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, FD2->getType(), FD1->getType(), Info, Deduced,
+ TDF_AllowCompatibleFunctionType,
+ /*PartialOrdering=*/true) != TemplateDeductionResult::Success)
return false;
break;
}
@@ -5408,20 +5481,40 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
switch (TPOC) {
case TPOC_Call:
for (unsigned I = 0, N = Args2.size(); I != N; ++I)
- ::MarkUsedTemplateParameters(S.Context, Args2[I], false,
- TemplateParams->getDepth(),
- UsedParameters);
+ ::MarkUsedTemplateParameters(S.Context, Args2[I], /*OnlyDeduced=*/false,
+ TemplateParams->getDepth(), UsedParameters);
break;
case TPOC_Conversion:
- ::MarkUsedTemplateParameters(S.Context, Proto2->getReturnType(), false,
+ ::MarkUsedTemplateParameters(S.Context, Proto2->getReturnType(),
+ /*OnlyDeduced=*/false,
TemplateParams->getDepth(), UsedParameters);
break;
case TPOC_Other:
- ::MarkUsedTemplateParameters(S.Context, FD2->getType(), false,
- TemplateParams->getDepth(),
- UsedParameters);
+ // We do not deduce template arguments from the exception specification
+ // when determining the primary template of a function template
+ // specialization or when taking the address of a function template.
+ // Therefore, we do not mark template parameters in the exception
+ // specification as used during partial ordering to prevent the following
+ // from being ambiguous:
+ //
+ // template<typename T, typename U>
+ // void f(U) noexcept(noexcept(T())); // #1
+ //
+ // template<typename T>
+ // void f(T*) noexcept; // #2
+ //
+ // template<>
+ // void f<int>(int*) noexcept; // explicit specialization of #2
+ //
+ // Although there is no corresponding wording in the standard, this seems
+ // to be the intended behavior given the definition of
+ // 'deduction substitution loci' in [temp.deduct].
+ ::MarkUsedTemplateParameters(
+ S.Context,
+ S.Context.getFunctionTypeWithExceptionSpec(FD2->getType(), EST_None),
+ /*OnlyDeduced=*/false, TemplateParams->getDepth(), UsedParameters);
break;
}
@@ -5434,38 +5527,83 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
return true;
}
-/// Returns the more specialized function template according
-/// to the rules of function template partial ordering (C++ [temp.func.order]).
-///
-/// \param FT1 the first function template
-///
-/// \param FT2 the second function template
-///
-/// \param TPOC the context in which we are performing partial ordering of
-/// function templates.
-///
-/// \param NumCallArguments1 The number of arguments in the call to FT1, used
-/// only when \c TPOC is \c TPOC_Call.
-///
-/// \param NumCallArguments2 The number of arguments in the call to FT2, used
-/// only when \c TPOC is \c TPOC_Call.
-///
-/// \param Reversed If \c true, exactly one of FT1 and FT2 is an overload
-/// candidate with a reversed parameter order. In this case, the corresponding
-/// P/A pairs between FT1 and FT2 are reversed.
-///
-/// \returns the more specialized function template. If neither
-/// template is more specialized, returns NULL.
FunctionTemplateDecl *Sema::getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
- unsigned NumCallArguments2, bool Reversed) {
+ QualType RawObj1Ty, QualType RawObj2Ty, bool Reversed) {
+ SmallVector<QualType> Args1;
+ SmallVector<QualType> Args2;
+ const FunctionDecl *FD1 = FT1->getTemplatedDecl();
+ const FunctionDecl *FD2 = FT2->getTemplatedDecl();
+ bool ShouldConvert1 = false;
+ bool ShouldConvert2 = false;
+ QualType Obj1Ty;
+ QualType Obj2Ty;
+ if (TPOC == TPOC_Call) {
+ const FunctionProtoType *Proto1 =
+ FD1->getType()->castAs<FunctionProtoType>();
+ const FunctionProtoType *Proto2 =
+ FD2->getType()->castAs<FunctionProtoType>();
- bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC,
- NumCallArguments1, Reversed);
- bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC,
- NumCallArguments2, Reversed);
+ // - In the context of a function call, the function parameter types are
+ // used.
+ const CXXMethodDecl *Method1 = dyn_cast<CXXMethodDecl>(FD1);
+ const CXXMethodDecl *Method2 = dyn_cast<CXXMethodDecl>(FD2);
+ // C++20 [temp.func.order]p3
+ // [...] Each function template M that is a member function is
+ // considered to have a new first parameter of type
+ // X(M), described below, inserted in its function parameter list.
+ //
+ // Note that we interpret "that is a member function" as
+ // "that is a member function with no expicit object argument".
+ // Otherwise the ordering rules for methods with expicit objet arguments
+ // against anything else make no sense.
+ ShouldConvert1 = Method1 && !Method1->isExplicitObjectMemberFunction();
+ ShouldConvert2 = Method2 && !Method2->isExplicitObjectMemberFunction();
+ if (ShouldConvert1) {
+ bool IsRValRef2 =
+ ShouldConvert2
+ ? Method2->getRefQualifier() == RQ_RValue
+ : Proto2->param_type_begin()[0]->isRValueReferenceType();
+ // Compare 'this' from Method1 against first parameter from Method2.
+ Obj1Ty = GetImplicitObjectParameterType(this->Context, Method1, RawObj1Ty,
+ IsRValRef2);
+ Args1.push_back(Obj1Ty);
+ }
+ if (ShouldConvert2) {
+ bool IsRValRef1 =
+ ShouldConvert1
+ ? Method1->getRefQualifier() == RQ_RValue
+ : Proto1->param_type_begin()[0]->isRValueReferenceType();
+ // Compare 'this' from Method2 against first parameter from Method1.
+ Obj2Ty = GetImplicitObjectParameterType(this->Context, Method2, RawObj2Ty,
+ IsRValRef1);
+ Args2.push_back(Obj2Ty);
+ }
+ size_t NumComparedArguments = NumCallArguments1;
+ // Either added an argument above or the prototype includes an explicit
+ // object argument we need to count
+ if (Method1)
+ ++NumComparedArguments;
+
+ Args1.insert(Args1.end(), Proto1->param_type_begin(),
+ Proto1->param_type_end());
+ Args2.insert(Args2.end(), Proto2->param_type_begin(),
+ Proto2->param_type_end());
+
+ // C++ [temp.func.order]p5:
+ // The presence of unused ellipsis and default arguments has no effect on
+ // the partial ordering of function templates.
+ Args1.resize(std::min(Args1.size(), NumComparedArguments));
+ Args2.resize(std::min(Args2.size(), NumComparedArguments));
+ if (Reversed)
+ std::reverse(Args2.begin(), Args2.end());
+ }
+ bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC, Reversed,
+ Args1, Args2);
+ bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC, Reversed,
+ Args2, Args1);
// C++ [temp.deduct.partial]p10:
// F is more specialized than G if F is at least as specialized as G and G
// is not at least as specialized as F.
@@ -5479,12 +5617,28 @@ FunctionTemplateDecl *Sema::getMoreSpecializedTemplate(
// ... and if G has a trailing function parameter pack for which F does not
// have a corresponding parameter, and if F does not have a trailing
// function parameter pack, then F is more specialized than G.
- FunctionDecl *FD1 = FT1->getTemplatedDecl();
- FunctionDecl *FD2 = FT2->getTemplatedDecl();
- unsigned NumParams1 = FD1->getNumParams();
- unsigned NumParams2 = FD2->getNumParams();
- bool Variadic1 = NumParams1 && FD1->parameters().back()->isParameterPack();
- bool Variadic2 = NumParams2 && FD2->parameters().back()->isParameterPack();
+
+ SmallVector<QualType> Param1;
+ Param1.reserve(FD1->param_size() + ShouldConvert1);
+ if (ShouldConvert1)
+ Param1.push_back(Obj1Ty);
+ for (const auto &P : FD1->parameters())
+ Param1.push_back(P->getType());
+
+ SmallVector<QualType> Param2;
+ Param2.reserve(FD2->param_size() + ShouldConvert2);
+ if (ShouldConvert2)
+ Param2.push_back(Obj2Ty);
+ for (const auto &P : FD2->parameters())
+ Param2.push_back(P->getType());
+
+ unsigned NumParams1 = Param1.size();
+ unsigned NumParams2 = Param2.size();
+
+ bool Variadic1 =
+ FD1->param_size() && FD1->parameters().back()->isParameterPack();
+ bool Variadic2 =
+ FD2->param_size() && FD2->parameters().back()->isParameterPack();
if (Variadic1 != Variadic2) {
if (Variadic1 && NumParams1 > NumParams2)
return FT2;
@@ -5495,8 +5649,8 @@ FunctionTemplateDecl *Sema::getMoreSpecializedTemplate(
// This a speculative fix for CWG1432 (Similar to the fix for CWG1395) that
// there is no wording or even resolution for this issue.
for (int i = 0, e = std::min(NumParams1, NumParams2); i < e; ++i) {
- QualType T1 = FD1->getParamDecl(i)->getType().getCanonicalType();
- QualType T2 = FD2->getParamDecl(i)->getType().getCanonicalType();
+ QualType T1 = Param1[i].getCanonicalType();
+ QualType T2 = Param2[i].getCanonicalType();
auto *TST1 = dyn_cast<TemplateSpecializationType>(T1);
auto *TST2 = dyn_cast<TemplateSpecializationType>(T2);
if (!TST1 || !TST2)
@@ -5551,9 +5705,11 @@ FunctionTemplateDecl *Sema::getMoreSpecializedTemplate(
Sema::TPL_TemplateParamsEquivalent))
return nullptr;
+ // [dcl.fct]p5:
+ // Any top-level cv-qualifiers modifying a parameter type are deleted when
+ // forming the function type.
for (unsigned i = 0; i < NumParams1; ++i)
- if (!Context.hasSameType(FD1->getParamDecl(i)->getType(),
- FD2->getParamDecl(i)->getType()))
+ if (!Context.hasSameUnqualifiedType(Param1[i], Param2[i]))
return nullptr;
// C++20 [temp.func.order]p6.3:
@@ -5589,31 +5745,6 @@ static bool isSameTemplate(TemplateDecl *T1, TemplateDecl *T2) {
return T1->getCanonicalDecl() == T2->getCanonicalDecl();
}
-/// Retrieve the most specialized of the given function template
-/// specializations.
-///
-/// \param SpecBegin the start iterator of the function template
-/// specializations that we will be comparing.
-///
-/// \param SpecEnd the end iterator of the function template
-/// specializations, paired with \p SpecBegin.
-///
-/// \param Loc the location where the ambiguity or no-specializations
-/// diagnostic should occur.
-///
-/// \param NoneDiag partial diagnostic used to diagnose cases where there are
-/// no matching candidates.
-///
-/// \param AmbigDiag partial diagnostic used to diagnose an ambiguity, if one
-/// occurs.
-///
-/// \param CandidateDiag partial diagnostic used for each function template
-/// specialization that is a candidate in the ambiguous ordering. One parameter
-/// in this diagnostic should be unbound, which will correspond to the string
-/// describing the template arguments for the function template specialization.
-///
-/// \returns the most specialized function template specialization, if
-/// found. Otherwise, returns SpecEnd.
UnresolvedSetIterator Sema::getMostSpecialized(
UnresolvedSetIterator SpecBegin, UnresolvedSetIterator SpecEnd,
TemplateSpecCandidateSet &FailedCandidates,
@@ -5641,8 +5772,8 @@ UnresolvedSetIterator Sema::getMostSpecialized(
FunctionTemplateDecl *Challenger
= cast<FunctionDecl>(*I)->getPrimaryTemplate();
assert(Challenger && "Not a function template specialization?");
- if (isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger,
- Loc, TPOC_Other, 0, 0),
+ if (isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger, Loc,
+ TPOC_Other, 0),
Challenger)) {
Best = I;
BestTemplate = Challenger;
@@ -5657,7 +5788,7 @@ UnresolvedSetIterator Sema::getMostSpecialized(
= cast<FunctionDecl>(*I)->getPrimaryTemplate();
if (I != Best &&
!isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger,
- Loc, TPOC_Other, 0, 0),
+ Loc, TPOC_Other, 0),
BestTemplate)) {
Ambiguous = true;
break;
@@ -5689,6 +5820,29 @@ UnresolvedSetIterator Sema::getMostSpecialized(
return SpecEnd;
}
+FunctionDecl *Sema::getMoreConstrainedFunction(FunctionDecl *FD1,
+ FunctionDecl *FD2) {
+ assert(!FD1->getDescribedTemplate() && !FD2->getDescribedTemplate() &&
+ "not for function templates");
+ FunctionDecl *F1 = FD1;
+ if (FunctionDecl *MF = FD1->getInstantiatedFromMemberFunction())
+ F1 = MF;
+ FunctionDecl *F2 = FD2;
+ if (FunctionDecl *MF = FD2->getInstantiatedFromMemberFunction())
+ F2 = MF;
+ llvm::SmallVector<const Expr *, 1> AC1, AC2;
+ F1->getAssociatedConstraints(AC1);
+ F2->getAssociatedConstraints(AC2);
+ bool AtLeastAsConstrained1, AtLeastAsConstrained2;
+ if (IsAtLeastAsConstrained(F1, AC1, F2, AC2, AtLeastAsConstrained1))
+ return nullptr;
+ if (IsAtLeastAsConstrained(F2, AC2, F1, AC1, AtLeastAsConstrained2))
+ return nullptr;
+ if (AtLeastAsConstrained1 == AtLeastAsConstrained2)
+ return nullptr;
+ return AtLeastAsConstrained1 ? FD1 : FD2;
+}
+
/// Determine whether one partial specialization, P1, is at least as
/// specialized than another, P2.
///
@@ -5728,9 +5882,9 @@ static bool isAtLeastAsSpecializedAs(Sema &S, QualType T1, QualType T2,
// Determine whether P1 is at least as specialized as P2.
Deduced.resize(P2->getTemplateParameters()->size());
- if (DeduceTemplateArgumentsByTypeMatch(S, P2->getTemplateParameters(),
- T2, T1, Info, Deduced, TDF_None,
- /*PartialOrdering=*/true))
+ if (DeduceTemplateArgumentsByTypeMatch(
+ S, P2->getTemplateParameters(), T2, T1, Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true) != TemplateDeductionResult::Success)
return false;
SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),
@@ -5743,11 +5897,10 @@ static bool isAtLeastAsSpecializedAs(Sema &S, QualType T1, QualType T2,
const auto *TST1 = cast<TemplateSpecializationType>(T1);
bool AtLeastAsSpecialized;
S.runWithSufficientStackSpace(Info.getLocation(), [&] {
- AtLeastAsSpecialized = !FinishTemplateArgumentDeduction(
- S, P2, /*IsPartialOrdering=*/true,
- TemplateArgumentList(TemplateArgumentList::OnStack,
- TST1->template_arguments()),
- Deduced, Info);
+ AtLeastAsSpecialized =
+ FinishTemplateArgumentDeduction(
+ S, P2, /*IsPartialOrdering=*/true, TST1->template_arguments(),
+ Deduced, Info) == TemplateDeductionResult::Success;
});
return AtLeastAsSpecialized;
}
@@ -5926,16 +6079,6 @@ getMoreSpecialized(Sema &S, QualType T1, QualType T2, TemplateLikeDecl *P1,
return AtLeastAsConstrained1 ? P1 : GetP2()(P1, P2);
}
-/// Returns the more specialized class template partial specialization
-/// according to the rules of partial ordering of class template partial
-/// specializations (C++ [temp.class.order]).
-///
-/// \param PS1 the first class template partial specialization
-///
-/// \param PS2 the second class template partial specialization
-///
-/// \returns the more specialized class template partial specialization. If
-/// neither partial specialization is more specialized, returns NULL.
ClassTemplatePartialSpecializationDecl *
Sema::getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
@@ -5999,7 +6142,8 @@ bool Sema::isMoreSpecializedThanPrimary(
}
bool Sema::isTemplateTemplateParameterAtLeastAsSpecializedAs(
- TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc) {
+ TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc,
+ bool IsDeduced) {
// C++1z [temp.arg.template]p4: (DR 150)
// A template template-parameter P is at least as specialized as a
// template template-argument A if, given the following rewrite to two
@@ -6009,11 +6153,10 @@ bool Sema::isTemplateTemplateParameterAtLeastAsSpecializedAs(
// equivalent partial ordering by performing deduction directly on
// the template parameter lists of the template template parameters.
//
- // Given an invented class template X with the template parameter list of
- // A (including default arguments):
- TemplateName X = Context.getCanonicalTemplateName(TemplateName(AArg));
TemplateParameterList *A = AArg->getTemplateParameters();
+ // Given an invented class template X with the template parameter list of
+ // A (including default arguments):
// - Each function template has a single function parameter whose type is
// a specialization of X with template arguments corresponding to the
// template parameters from the respective function template
@@ -6049,19 +6192,50 @@ bool Sema::isTemplateTemplateParameterAtLeastAsSpecializedAs(
// specialized as A.
SmallVector<TemplateArgument, 4> SugaredPArgs;
if (CheckTemplateArgumentList(AArg, Loc, PArgList, false, SugaredPArgs,
- PArgs) ||
+ PArgs, /*UpdateArgsWithConversions=*/true,
+ /*ConstraintsNotSatisfied=*/nullptr,
+ /*PartialOrderTTP=*/true) ||
Trap.hasErrorOccurred())
return false;
}
- QualType AType = Context.getCanonicalTemplateSpecializationType(X, AArgs);
- QualType PType = Context.getCanonicalTemplateSpecializationType(X, PArgs);
+ // Determine whether P1 is at least as specialized as P2.
+ TemplateDeductionInfo Info(Loc, A->getDepth());
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ Deduced.resize(A->size());
// ... the function template corresponding to P is at least as specialized
// as the function template corresponding to A according to the partial
// ordering rules for function templates.
- TemplateDeductionInfo Info(Loc, A->getDepth());
- return isAtLeastAsSpecializedAs(*this, PType, AType, AArg, Info);
+
+ // Provisional resolution for CWG2398: Regarding temp.arg.template]p4, when
+ // applying the partial ordering rules for function templates on
+ // the rewritten template template parameters:
+ // - In a deduced context, the matching of packs versus fixed-size needs to
+ // be inverted between Ps and As. On non-deduced context, matching needs to
+ // happen both ways, according to [temp.arg.template]p3, but this is
+ // currently implemented as a special case elsewhere.
+ if (::DeduceTemplateArguments(*this, A, AArgs, PArgs, Info, Deduced,
+ /*NumberOfArgumentsMustMatch=*/false,
+ IsDeduced ? PackFold::ArgumentToParameter
+ : PackFold::ParameterToArgument) !=
+ TemplateDeductionResult::Success)
+ return false;
+
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end());
+ Sema::InstantiatingTemplate Inst(*this, Info.getLocation(), AArg, DeducedArgs,
+ Info);
+ if (Inst.isInvalid())
+ return false;
+
+ bool AtLeastAsSpecialized;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ AtLeastAsSpecialized =
+ ::FinishTemplateArgumentDeduction(
+ *this, AArg, /*IsPartialOrdering=*/true, PArgs, Deduced, Info) ==
+ TemplateDeductionResult::Success;
+ });
+ return AtLeastAsSpecialized;
}
namespace {
@@ -6231,11 +6405,11 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
case Type::ConstantArray:
case Type::IncompleteArray:
+ case Type::ArrayParameter:
MarkUsedTemplateParameters(Ctx,
cast<ArrayType>(T)->getElementType(),
OnlyDeduced, Depth, Used);
break;
-
case Type::Vector:
case Type::ExtVector:
MarkUsedTemplateParameters(Ctx,
@@ -6422,6 +6596,15 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
OnlyDeduced, Depth, Used);
break;
+ case Type::PackIndexing:
+ if (!OnlyDeduced) {
+ MarkUsedTemplateParameters(Ctx, cast<PackIndexingType>(T)->getPattern(),
+ OnlyDeduced, Depth, Used);
+ MarkUsedTemplateParameters(Ctx, cast<PackIndexingType>(T)->getIndexExpr(),
+ OnlyDeduced, Depth, Used);
+ }
+ break;
+
case Type::UnaryTransform:
if (!OnlyDeduced)
MarkUsedTemplateParameters(Ctx,
@@ -6508,13 +6691,6 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
}
}
-/// Mark which template parameters are used in a given expression.
-///
-/// \param E the expression from which template parameters will be deduced.
-///
-/// \param Used a bit vector whose elements will be set to \c true
-/// to indicate when the corresponding template parameter will be
-/// deduced.
void
Sema::MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth,
@@ -6522,15 +6698,6 @@ Sema::MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
::MarkUsedTemplateParameters(Context, E, OnlyDeduced, Depth, Used);
}
-/// Mark which template parameters can be deduced from a given
-/// template argument list.
-///
-/// \param TemplateArgs the template argument list from which template
-/// parameters will be deduced.
-///
-/// \param Used a bit vector whose elements will be set to \c true
-/// to indicate when the corresponding template parameter will be
-/// deduced.
void
Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced, unsigned Depth,
@@ -6548,8 +6715,6 @@ Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
Depth, Used);
}
-/// Marks all of the template parameters that will be deduced by a
-/// call to the given function template.
void Sema::MarkDeducedTemplateParameters(
ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeductionGuide.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeductionGuide.cpp
new file mode 100644
index 000000000000..1bf82b31def9
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeductionGuide.cpp
@@ -0,0 +1,1450 @@
+//===- SemaTemplateDeductionGude.cpp - Template Argument Deduction---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements deduction guides for C++ class template argument
+// deduction.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TreeTransform.h"
+#include "TypeLocBuilder.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TypeTraits.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Overload.h"
+#include "clang/Sema/Ownership.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <optional>
+#include <utility>
+
+using namespace clang;
+using namespace sema;
+
+namespace {
+/// Tree transform to "extract" a transformed type from a class template's
+/// constructor to a deduction guide.
+class ExtractTypeForDeductionGuide
+ : public TreeTransform<ExtractTypeForDeductionGuide> {
+ llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs;
+ ClassTemplateDecl *NestedPattern;
+ const MultiLevelTemplateArgumentList *OuterInstantiationArgs;
+ std::optional<TemplateDeclInstantiator> TypedefNameInstantiator;
+
+public:
+ typedef TreeTransform<ExtractTypeForDeductionGuide> Base;
+ ExtractTypeForDeductionGuide(
+ Sema &SemaRef,
+ llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs,
+ ClassTemplateDecl *NestedPattern = nullptr,
+ const MultiLevelTemplateArgumentList *OuterInstantiationArgs = nullptr)
+ : Base(SemaRef), MaterializedTypedefs(MaterializedTypedefs),
+ NestedPattern(NestedPattern),
+ OuterInstantiationArgs(OuterInstantiationArgs) {
+ if (OuterInstantiationArgs)
+ TypedefNameInstantiator.emplace(
+ SemaRef, SemaRef.getASTContext().getTranslationUnitDecl(),
+ *OuterInstantiationArgs);
+ }
+
+ TypeSourceInfo *transform(TypeSourceInfo *TSI) { return TransformType(TSI); }
+
+ /// Returns true if it's safe to substitute \p Typedef with
+ /// \p OuterInstantiationArgs.
+ bool mightReferToOuterTemplateParameters(TypedefNameDecl *Typedef) {
+ if (!NestedPattern)
+ return false;
+
+ static auto WalkUp = [](DeclContext *DC, DeclContext *TargetDC) {
+ if (DC->Equals(TargetDC))
+ return true;
+ while (DC->isRecord()) {
+ if (DC->Equals(TargetDC))
+ return true;
+ DC = DC->getParent();
+ }
+ return false;
+ };
+
+ if (WalkUp(Typedef->getDeclContext(), NestedPattern->getTemplatedDecl()))
+ return true;
+ if (WalkUp(NestedPattern->getTemplatedDecl(), Typedef->getDeclContext()))
+ return true;
+ return false;
+ }
+
+ QualType
+ RebuildTemplateSpecializationType(TemplateName Template,
+ SourceLocation TemplateNameLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
+ if (!OuterInstantiationArgs ||
+ !isa_and_present<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()))
+ return Base::RebuildTemplateSpecializationType(Template, TemplateNameLoc,
+ TemplateArgs);
+
+ auto *TATD = cast<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
+ auto *Pattern = TATD;
+ while (Pattern->getInstantiatedFromMemberTemplate())
+ Pattern = Pattern->getInstantiatedFromMemberTemplate();
+ if (!mightReferToOuterTemplateParameters(Pattern->getTemplatedDecl()))
+ return Base::RebuildTemplateSpecializationType(Template, TemplateNameLoc,
+ TemplateArgs);
+
+ Decl *NewD =
+ TypedefNameInstantiator->InstantiateTypeAliasTemplateDecl(TATD);
+ if (!NewD)
+ return QualType();
+
+ auto *NewTATD = cast<TypeAliasTemplateDecl>(NewD);
+ MaterializedTypedefs.push_back(NewTATD->getTemplatedDecl());
+
+ return Base::RebuildTemplateSpecializationType(
+ TemplateName(NewTATD), TemplateNameLoc, TemplateArgs);
+ }
+
+ QualType TransformTypedefType(TypeLocBuilder &TLB, TypedefTypeLoc TL) {
+ ASTContext &Context = SemaRef.getASTContext();
+ TypedefNameDecl *OrigDecl = TL.getTypedefNameDecl();
+ TypedefNameDecl *Decl = OrigDecl;
+ // Transform the underlying type of the typedef and clone the Decl only if
+ // the typedef has a dependent context.
+ bool InDependentContext = OrigDecl->getDeclContext()->isDependentContext();
+
+ // A typedef/alias Decl within the NestedPattern may reference the outer
+ // template parameters. They're substituted with corresponding instantiation
+ // arguments here and in RebuildTemplateSpecializationType() above.
+ // Otherwise, we would have a CTAD guide with "dangling" template
+ // parameters.
+ // For example,
+ // template <class T> struct Outer {
+ // using Alias = S<T>;
+ // template <class U> struct Inner {
+ // Inner(Alias);
+ // };
+ // };
+ if (OuterInstantiationArgs && InDependentContext &&
+ TL.getTypePtr()->isInstantiationDependentType()) {
+ Decl = cast_if_present<TypedefNameDecl>(
+ TypedefNameInstantiator->InstantiateTypedefNameDecl(
+ OrigDecl, /*IsTypeAlias=*/isa<TypeAliasDecl>(OrigDecl)));
+ if (!Decl)
+ return QualType();
+ MaterializedTypedefs.push_back(Decl);
+ } else if (InDependentContext) {
+ TypeLocBuilder InnerTLB;
+ QualType Transformed =
+ TransformType(InnerTLB, OrigDecl->getTypeSourceInfo()->getTypeLoc());
+ TypeSourceInfo *TSI = InnerTLB.getTypeSourceInfo(Context, Transformed);
+ if (isa<TypeAliasDecl>(OrigDecl))
+ Decl = TypeAliasDecl::Create(
+ Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
+ OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
+ else {
+ assert(isa<TypedefDecl>(OrigDecl) && "Not a Type alias or typedef");
+ Decl = TypedefDecl::Create(
+ Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
+ OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
+ }
+ MaterializedTypedefs.push_back(Decl);
+ }
+
+ QualType TDTy = Context.getTypedefType(Decl);
+ TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(TDTy);
+ TypedefTL.setNameLoc(TL.getNameLoc());
+
+ return TDTy;
+ }
+};
+
+// Build a deduction guide using the provided information.
+//
+// A deduction guide can be either a template or a non-template function
+// declaration. If \p TemplateParams is null, a non-template function
+// declaration will be created.
+NamedDecl *buildDeductionGuide(
+ Sema &SemaRef, TemplateDecl *OriginalTemplate,
+ TemplateParameterList *TemplateParams, CXXConstructorDecl *Ctor,
+ ExplicitSpecifier ES, TypeSourceInfo *TInfo, SourceLocation LocStart,
+ SourceLocation Loc, SourceLocation LocEnd, bool IsImplicit,
+ llvm::ArrayRef<TypedefNameDecl *> MaterializedTypedefs = {}) {
+ DeclContext *DC = OriginalTemplate->getDeclContext();
+ auto DeductionGuideName =
+ SemaRef.Context.DeclarationNames.getCXXDeductionGuideName(
+ OriginalTemplate);
+
+ DeclarationNameInfo Name(DeductionGuideName, Loc);
+ ArrayRef<ParmVarDecl *> Params =
+ TInfo->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams();
+
+ // Build the implicit deduction guide template.
+ auto *Guide =
+ CXXDeductionGuideDecl::Create(SemaRef.Context, DC, LocStart, ES, Name,
+ TInfo->getType(), TInfo, LocEnd, Ctor);
+ Guide->setImplicit(IsImplicit);
+ Guide->setParams(Params);
+
+ for (auto *Param : Params)
+ Param->setDeclContext(Guide);
+ for (auto *TD : MaterializedTypedefs)
+ TD->setDeclContext(Guide);
+ if (isa<CXXRecordDecl>(DC))
+ Guide->setAccess(AS_public);
+
+ if (!TemplateParams) {
+ DC->addDecl(Guide);
+ return Guide;
+ }
+
+ auto *GuideTemplate = FunctionTemplateDecl::Create(
+ SemaRef.Context, DC, Loc, DeductionGuideName, TemplateParams, Guide);
+ GuideTemplate->setImplicit(IsImplicit);
+ Guide->setDescribedFunctionTemplate(GuideTemplate);
+
+ if (isa<CXXRecordDecl>(DC))
+ GuideTemplate->setAccess(AS_public);
+
+ DC->addDecl(GuideTemplate);
+ return GuideTemplate;
+}
+
+// Transform a given template type parameter `TTP`.
+TemplateTypeParmDecl *
+transformTemplateTypeParam(Sema &SemaRef, DeclContext *DC,
+ TemplateTypeParmDecl *TTP,
+ MultiLevelTemplateArgumentList &Args,
+ unsigned NewDepth, unsigned NewIndex) {
+ // TemplateTypeParmDecl's index cannot be changed after creation, so
+ // substitute it directly.
+ auto *NewTTP = TemplateTypeParmDecl::Create(
+ SemaRef.Context, DC, TTP->getBeginLoc(), TTP->getLocation(), NewDepth,
+ NewIndex, TTP->getIdentifier(), TTP->wasDeclaredWithTypename(),
+ TTP->isParameterPack(), TTP->hasTypeConstraint(),
+ TTP->isExpandedParameterPack()
+ ? std::optional<unsigned>(TTP->getNumExpansionParameters())
+ : std::nullopt);
+ if (const auto *TC = TTP->getTypeConstraint())
+ SemaRef.SubstTypeConstraint(NewTTP, TC, Args,
+ /*EvaluateConstraint=*/true);
+ if (TTP->hasDefaultArgument()) {
+ TemplateArgumentLoc InstantiatedDefaultArg;
+ if (!SemaRef.SubstTemplateArgument(
+ TTP->getDefaultArgument(), Args, InstantiatedDefaultArg,
+ TTP->getDefaultArgumentLoc(), TTP->getDeclName()))
+ NewTTP->setDefaultArgument(SemaRef.Context, InstantiatedDefaultArg);
+ }
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(TTP, NewTTP);
+ return NewTTP;
+}
+// Similar to above, but for non-type template or template template parameters.
+template <typename NonTypeTemplateOrTemplateTemplateParmDecl>
+NonTypeTemplateOrTemplateTemplateParmDecl *
+transformTemplateParam(Sema &SemaRef, DeclContext *DC,
+ NonTypeTemplateOrTemplateTemplateParmDecl *OldParam,
+ MultiLevelTemplateArgumentList &Args, unsigned NewIndex,
+ unsigned NewDepth) {
+ // Ask the template instantiator to do the heavy lifting for us, then adjust
+ // the index of the parameter once it's done.
+ auto *NewParam = cast<NonTypeTemplateOrTemplateTemplateParmDecl>(
+ SemaRef.SubstDecl(OldParam, DC, Args));
+ NewParam->setPosition(NewIndex);
+ NewParam->setDepth(NewDepth);
+ return NewParam;
+}
+
+/// Transform to convert portions of a constructor declaration into the
+/// corresponding deduction guide, per C++1z [over.match.class.deduct]p1.
+struct ConvertConstructorToDeductionGuideTransform {
+ ConvertConstructorToDeductionGuideTransform(Sema &S,
+ ClassTemplateDecl *Template)
+ : SemaRef(S), Template(Template) {
+ // If the template is nested, then we need to use the original
+ // pattern to iterate over the constructors.
+ ClassTemplateDecl *Pattern = Template;
+ while (Pattern->getInstantiatedFromMemberTemplate()) {
+ if (Pattern->isMemberSpecialization())
+ break;
+ Pattern = Pattern->getInstantiatedFromMemberTemplate();
+ NestedPattern = Pattern;
+ }
+
+ if (NestedPattern)
+ OuterInstantiationArgs = SemaRef.getTemplateInstantiationArgs(Template);
+ }
+
+ Sema &SemaRef;
+ ClassTemplateDecl *Template;
+ ClassTemplateDecl *NestedPattern = nullptr;
+
+ DeclContext *DC = Template->getDeclContext();
+ CXXRecordDecl *Primary = Template->getTemplatedDecl();
+ DeclarationName DeductionGuideName =
+ SemaRef.Context.DeclarationNames.getCXXDeductionGuideName(Template);
+
+ QualType DeducedType = SemaRef.Context.getTypeDeclType(Primary);
+
+ // Index adjustment to apply to convert depth-1 template parameters into
+ // depth-0 template parameters.
+ unsigned Depth1IndexAdjustment = Template->getTemplateParameters()->size();
+
+ // Instantiation arguments for the outermost depth-1 templates
+ // when the template is nested
+ MultiLevelTemplateArgumentList OuterInstantiationArgs;
+
+ /// Transform a constructor declaration into a deduction guide.
+ NamedDecl *transformConstructor(FunctionTemplateDecl *FTD,
+ CXXConstructorDecl *CD) {
+ SmallVector<TemplateArgument, 16> SubstArgs;
+
+ LocalInstantiationScope Scope(SemaRef);
+
+ // C++ [over.match.class.deduct]p1:
+ // -- For each constructor of the class template designated by the
+ // template-name, a function template with the following properties:
+
+ // -- The template parameters are the template parameters of the class
+ // template followed by the template parameters (including default
+ // template arguments) of the constructor, if any.
+ TemplateParameterList *TemplateParams =
+ SemaRef.GetTemplateParameterList(Template);
+ if (FTD) {
+ TemplateParameterList *InnerParams = FTD->getTemplateParameters();
+ SmallVector<NamedDecl *, 16> AllParams;
+ SmallVector<TemplateArgument, 16> Depth1Args;
+ AllParams.reserve(TemplateParams->size() + InnerParams->size());
+ AllParams.insert(AllParams.begin(), TemplateParams->begin(),
+ TemplateParams->end());
+ SubstArgs.reserve(InnerParams->size());
+ Depth1Args.reserve(InnerParams->size());
+
+ // Later template parameters could refer to earlier ones, so build up
+ // a list of substituted template arguments as we go.
+ for (NamedDecl *Param : *InnerParams) {
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ Args.addOuterTemplateArguments(Depth1Args);
+ Args.addOuterRetainedLevel();
+ if (NestedPattern)
+ Args.addOuterRetainedLevels(NestedPattern->getTemplateDepth());
+ NamedDecl *NewParam = transformTemplateParameter(Param, Args);
+ if (!NewParam)
+ return nullptr;
+ // Constraints require that we substitute depth-1 arguments
+ // to match depths when substituted for evaluation later
+ Depth1Args.push_back(SemaRef.Context.getInjectedTemplateArg(NewParam));
+
+ if (NestedPattern) {
+ TemplateDeclInstantiator Instantiator(SemaRef, DC,
+ OuterInstantiationArgs);
+ Instantiator.setEvaluateConstraints(false);
+ SemaRef.runWithSufficientStackSpace(NewParam->getLocation(), [&] {
+ NewParam = cast<NamedDecl>(Instantiator.Visit(NewParam));
+ });
+ }
+
+ assert(NewParam->getTemplateDepth() == 0 &&
+ "Unexpected template parameter depth");
+
+ AllParams.push_back(NewParam);
+ SubstArgs.push_back(SemaRef.Context.getInjectedTemplateArg(NewParam));
+ }
+
+ // Substitute new template parameters into requires-clause if present.
+ Expr *RequiresClause = nullptr;
+ if (Expr *InnerRC = InnerParams->getRequiresClause()) {
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ Args.addOuterTemplateArguments(Depth1Args);
+ Args.addOuterRetainedLevel();
+ if (NestedPattern)
+ Args.addOuterRetainedLevels(NestedPattern->getTemplateDepth());
+ ExprResult E = SemaRef.SubstExpr(InnerRC, Args);
+ if (E.isInvalid())
+ return nullptr;
+ RequiresClause = E.getAs<Expr>();
+ }
+
+ TemplateParams = TemplateParameterList::Create(
+ SemaRef.Context, InnerParams->getTemplateLoc(),
+ InnerParams->getLAngleLoc(), AllParams, InnerParams->getRAngleLoc(),
+ RequiresClause);
+ }
+
+ // If we built a new template-parameter-list, track that we need to
+ // substitute references to the old parameters into references to the
+ // new ones.
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ if (FTD) {
+ Args.addOuterTemplateArguments(SubstArgs);
+ Args.addOuterRetainedLevel();
+ }
+
+ FunctionProtoTypeLoc FPTL = CD->getTypeSourceInfo()
+ ->getTypeLoc()
+ .getAsAdjusted<FunctionProtoTypeLoc>();
+ assert(FPTL && "no prototype for constructor declaration");
+
+ // Transform the type of the function, adjusting the return type and
+ // replacing references to the old parameters with references to the
+ // new ones.
+ TypeLocBuilder TLB;
+ SmallVector<ParmVarDecl *, 8> Params;
+ SmallVector<TypedefNameDecl *, 4> MaterializedTypedefs;
+ QualType NewType = transformFunctionProtoType(TLB, FPTL, Params, Args,
+ MaterializedTypedefs);
+ if (NewType.isNull())
+ return nullptr;
+ TypeSourceInfo *NewTInfo = TLB.getTypeSourceInfo(SemaRef.Context, NewType);
+
+ return buildDeductionGuide(
+ SemaRef, Template, TemplateParams, CD, CD->getExplicitSpecifier(),
+ NewTInfo, CD->getBeginLoc(), CD->getLocation(), CD->getEndLoc(),
+ /*IsImplicit=*/true, MaterializedTypedefs);
+ }
+
+ /// Build a deduction guide with the specified parameter types.
+ NamedDecl *buildSimpleDeductionGuide(MutableArrayRef<QualType> ParamTypes) {
+ SourceLocation Loc = Template->getLocation();
+
+ // Build the requested type.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.HasTrailingReturn = true;
+ QualType Result = SemaRef.BuildFunctionType(DeducedType, ParamTypes, Loc,
+ DeductionGuideName, EPI);
+ TypeSourceInfo *TSI = SemaRef.Context.getTrivialTypeSourceInfo(Result, Loc);
+ if (NestedPattern)
+ TSI = SemaRef.SubstType(TSI, OuterInstantiationArgs, Loc,
+ DeductionGuideName);
+
+ if (!TSI)
+ return nullptr;
+
+ FunctionProtoTypeLoc FPTL =
+ TSI->getTypeLoc().castAs<FunctionProtoTypeLoc>();
+
+ // Build the parameters, needed during deduction / substitution.
+ SmallVector<ParmVarDecl *, 4> Params;
+ for (auto T : ParamTypes) {
+ auto *TSI = SemaRef.Context.getTrivialTypeSourceInfo(T, Loc);
+ if (NestedPattern)
+ TSI = SemaRef.SubstType(TSI, OuterInstantiationArgs, Loc,
+ DeclarationName());
+ if (!TSI)
+ return nullptr;
+
+ ParmVarDecl *NewParam =
+ ParmVarDecl::Create(SemaRef.Context, DC, Loc, Loc, nullptr,
+ TSI->getType(), TSI, SC_None, nullptr);
+ NewParam->setScopeInfo(0, Params.size());
+ FPTL.setParam(Params.size(), NewParam);
+ Params.push_back(NewParam);
+ }
+
+ return buildDeductionGuide(
+ SemaRef, Template, SemaRef.GetTemplateParameterList(Template), nullptr,
+ ExplicitSpecifier(), TSI, Loc, Loc, Loc, /*IsImplicit=*/true);
+ }
+
+private:
+ /// Transform a constructor template parameter into a deduction guide template
+ /// parameter, rebuilding any internal references to earlier parameters and
+ /// renumbering as we go.
+ NamedDecl *transformTemplateParameter(NamedDecl *TemplateParam,
+ MultiLevelTemplateArgumentList &Args) {
+ if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(TemplateParam))
+ return transformTemplateTypeParam(
+ SemaRef, DC, TTP, Args, TTP->getDepth() - 1,
+ Depth1IndexAdjustment + TTP->getIndex());
+ if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(TemplateParam))
+ return transformTemplateParam(SemaRef, DC, TTP, Args,
+ Depth1IndexAdjustment + TTP->getIndex(),
+ TTP->getDepth() - 1);
+ auto *NTTP = cast<NonTypeTemplateParmDecl>(TemplateParam);
+ return transformTemplateParam(SemaRef, DC, NTTP, Args,
+ Depth1IndexAdjustment + NTTP->getIndex(),
+ NTTP->getDepth() - 1);
+ }
+
+ QualType transformFunctionProtoType(
+ TypeLocBuilder &TLB, FunctionProtoTypeLoc TL,
+ SmallVectorImpl<ParmVarDecl *> &Params,
+ MultiLevelTemplateArgumentList &Args,
+ SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs) {
+ SmallVector<QualType, 4> ParamTypes;
+ const FunctionProtoType *T = TL.getTypePtr();
+
+ // -- The types of the function parameters are those of the constructor.
+ for (auto *OldParam : TL.getParams()) {
+ ParmVarDecl *NewParam = OldParam;
+ // Given
+ // template <class T> struct C {
+ // template <class U> struct D {
+ // template <class V> D(U, V);
+ // };
+ // };
+ // First, transform all the references to template parameters that are
+ // defined outside of the surrounding class template. That is T in the
+ // above example.
+ if (NestedPattern) {
+ NewParam = transformFunctionTypeParam(
+ NewParam, OuterInstantiationArgs, MaterializedTypedefs,
+ /*TransformingOuterPatterns=*/true);
+ if (!NewParam)
+ return QualType();
+ }
+ // Then, transform all the references to template parameters that are
+ // defined at the class template and the constructor. In this example,
+ // they're U and V, respectively.
+ NewParam =
+ transformFunctionTypeParam(NewParam, Args, MaterializedTypedefs,
+ /*TransformingOuterPatterns=*/false);
+ if (!NewParam)
+ return QualType();
+ ParamTypes.push_back(NewParam->getType());
+ Params.push_back(NewParam);
+ }
+
+ // -- The return type is the class template specialization designated by
+ // the template-name and template arguments corresponding to the
+ // template parameters obtained from the class template.
+ //
+ // We use the injected-class-name type of the primary template instead.
+ // This has the convenient property that it is different from any type that
+ // the user can write in a deduction-guide (because they cannot enter the
+ // context of the template), so implicit deduction guides can never collide
+ // with explicit ones.
+ QualType ReturnType = DeducedType;
+ TLB.pushTypeSpec(ReturnType).setNameLoc(Primary->getLocation());
+
+ // Resolving a wording defect, we also inherit the variadicness of the
+ // constructor.
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = T->isVariadic();
+ EPI.HasTrailingReturn = true;
+
+ QualType Result = SemaRef.BuildFunctionType(
+ ReturnType, ParamTypes, TL.getBeginLoc(), DeductionGuideName, EPI);
+ if (Result.isNull())
+ return QualType();
+
+ FunctionProtoTypeLoc NewTL = TLB.push<FunctionProtoTypeLoc>(Result);
+ NewTL.setLocalRangeBegin(TL.getLocalRangeBegin());
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+ NewTL.setExceptionSpecRange(SourceRange());
+ NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
+ for (unsigned I = 0, E = NewTL.getNumParams(); I != E; ++I)
+ NewTL.setParam(I, Params[I]);
+
+ return Result;
+ }
+
+ ParmVarDecl *transformFunctionTypeParam(
+ ParmVarDecl *OldParam, MultiLevelTemplateArgumentList &Args,
+ llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs,
+ bool TransformingOuterPatterns) {
+ TypeSourceInfo *OldDI = OldParam->getTypeSourceInfo();
+ TypeSourceInfo *NewDI;
+ if (auto PackTL = OldDI->getTypeLoc().getAs<PackExpansionTypeLoc>()) {
+ // Expand out the one and only element in each inner pack.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, 0);
+ NewDI =
+ SemaRef.SubstType(PackTL.getPatternLoc(), Args,
+ OldParam->getLocation(), OldParam->getDeclName());
+ if (!NewDI)
+ return nullptr;
+ NewDI =
+ SemaRef.CheckPackExpansion(NewDI, PackTL.getEllipsisLoc(),
+ PackTL.getTypePtr()->getNumExpansions());
+ } else
+ NewDI = SemaRef.SubstType(OldDI, Args, OldParam->getLocation(),
+ OldParam->getDeclName());
+ if (!NewDI)
+ return nullptr;
+
+ // Extract the type. This (for instance) replaces references to typedef
+ // members of the current instantiations with the definitions of those
+ // typedefs, avoiding triggering instantiation of the deduced type during
+ // deduction.
+ NewDI = ExtractTypeForDeductionGuide(
+ SemaRef, MaterializedTypedefs, NestedPattern,
+ TransformingOuterPatterns ? &Args : nullptr)
+ .transform(NewDI);
+
+ // Resolving a wording defect, we also inherit default arguments from the
+ // constructor.
+ ExprResult NewDefArg;
+ if (OldParam->hasDefaultArg()) {
+ // We don't care what the value is (we won't use it); just create a
+ // placeholder to indicate there is a default argument.
+ QualType ParamTy = NewDI->getType();
+ NewDefArg = new (SemaRef.Context)
+ OpaqueValueExpr(OldParam->getDefaultArgRange().getBegin(),
+ ParamTy.getNonLValueExprType(SemaRef.Context),
+ ParamTy->isLValueReferenceType() ? VK_LValue
+ : ParamTy->isRValueReferenceType() ? VK_XValue
+ : VK_PRValue);
+ }
+ // Handle arrays and functions decay.
+ auto NewType = NewDI->getType();
+ if (NewType->isArrayType() || NewType->isFunctionType())
+ NewType = SemaRef.Context.getDecayedType(NewType);
+
+ ParmVarDecl *NewParam = ParmVarDecl::Create(
+ SemaRef.Context, DC, OldParam->getInnerLocStart(),
+ OldParam->getLocation(), OldParam->getIdentifier(), NewType, NewDI,
+ OldParam->getStorageClass(), NewDefArg.get());
+ NewParam->setScopeInfo(OldParam->getFunctionScopeDepth(),
+ OldParam->getFunctionScopeIndex());
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(OldParam, NewParam);
+ return NewParam;
+ }
+};
+
+unsigned getTemplateParameterDepth(NamedDecl *TemplateParam) {
+ if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(TemplateParam))
+ return TTP->getDepth();
+ if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(TemplateParam))
+ return TTP->getDepth();
+ if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(TemplateParam))
+ return NTTP->getDepth();
+ llvm_unreachable("Unhandled template parameter types");
+}
+
+unsigned getTemplateParameterIndex(NamedDecl *TemplateParam) {
+ if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(TemplateParam))
+ return TTP->getIndex();
+ if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(TemplateParam))
+ return TTP->getIndex();
+ if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(TemplateParam))
+ return NTTP->getIndex();
+ llvm_unreachable("Unhandled template parameter types");
+}
+
+// Find all template parameters that appear in the given DeducedArgs.
+// Return the indices of the template parameters in the TemplateParams.
+SmallVector<unsigned> TemplateParamsReferencedInTemplateArgumentList(
+ const TemplateParameterList *TemplateParamsList,
+ ArrayRef<TemplateArgument> DeducedArgs) {
+ struct TemplateParamsReferencedFinder
+ : public RecursiveASTVisitor<TemplateParamsReferencedFinder> {
+ const TemplateParameterList *TemplateParamList;
+ llvm::BitVector ReferencedTemplateParams;
+
+ TemplateParamsReferencedFinder(
+ const TemplateParameterList *TemplateParamList)
+ : TemplateParamList(TemplateParamList),
+ ReferencedTemplateParams(TemplateParamList->size()) {}
+
+ bool VisitTemplateTypeParmType(TemplateTypeParmType *TTP) {
+ // We use the index and depth to retrieve the corresponding template
+ // parameter from the parameter list, which is more robost.
+ Mark(TTP->getDepth(), TTP->getIndex());
+ return true;
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) {
+ MarkAppeared(DRE->getFoundDecl());
+ return true;
+ }
+
+ bool TraverseTemplateName(TemplateName Template) {
+ if (auto *TD = Template.getAsTemplateDecl())
+ MarkAppeared(TD);
+ return RecursiveASTVisitor::TraverseTemplateName(Template);
+ }
+
+ void MarkAppeared(NamedDecl *ND) {
+ if (llvm::isa<NonTypeTemplateParmDecl, TemplateTypeParmDecl,
+ TemplateTemplateParmDecl>(ND))
+ Mark(getTemplateParameterDepth(ND), getTemplateParameterIndex(ND));
+ }
+ void Mark(unsigned Depth, unsigned Index) {
+ if (Index < TemplateParamList->size() &&
+ TemplateParamList->getParam(Index)->getTemplateDepth() == Depth)
+ ReferencedTemplateParams.set(Index);
+ }
+ };
+ TemplateParamsReferencedFinder Finder(TemplateParamsList);
+ Finder.TraverseTemplateArguments(DeducedArgs);
+
+ SmallVector<unsigned> Results;
+ for (unsigned Index = 0; Index < TemplateParamsList->size(); ++Index) {
+ if (Finder.ReferencedTemplateParams[Index])
+ Results.push_back(Index);
+ }
+ return Results;
+}
+
+bool hasDeclaredDeductionGuides(DeclarationName Name, DeclContext *DC) {
+ // Check whether we've already declared deduction guides for this template.
+ // FIXME: Consider storing a flag on the template to indicate this.
+ assert(Name.getNameKind() ==
+ DeclarationName::NameKind::CXXDeductionGuideName &&
+ "name must be a deduction guide name");
+ auto Existing = DC->lookup(Name);
+ for (auto *D : Existing)
+ if (D->isImplicit())
+ return true;
+ return false;
+}
+
+NamedDecl *transformTemplateParameter(Sema &SemaRef, DeclContext *DC,
+ NamedDecl *TemplateParam,
+ MultiLevelTemplateArgumentList &Args,
+ unsigned NewIndex, unsigned NewDepth) {
+ if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(TemplateParam))
+ return transformTemplateTypeParam(SemaRef, DC, TTP, Args, NewDepth,
+ NewIndex);
+ if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(TemplateParam))
+ return transformTemplateParam(SemaRef, DC, TTP, Args, NewIndex, NewDepth);
+ if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(TemplateParam))
+ return transformTemplateParam(SemaRef, DC, NTTP, Args, NewIndex, NewDepth);
+ llvm_unreachable("Unhandled template parameter types");
+}
+
+// Build the associated constraints for the alias deduction guides.
+// C++ [over.match.class.deduct]p3.3:
+// The associated constraints ([temp.constr.decl]) are the conjunction of the
+// associated constraints of g and a constraint that is satisfied if and only
+// if the arguments of A are deducible (see below) from the return type.
+//
+// The return result is expected to be the require-clause for the synthesized
+// alias deduction guide.
+Expr *
+buildAssociatedConstraints(Sema &SemaRef, FunctionTemplateDecl *F,
+ TypeAliasTemplateDecl *AliasTemplate,
+ ArrayRef<DeducedTemplateArgument> DeduceResults,
+ unsigned FirstUndeducedParamIdx, Expr *IsDeducible) {
+ Expr *RC = F->getTemplateParameters()->getRequiresClause();
+ if (!RC)
+ return IsDeducible;
+
+ ASTContext &Context = SemaRef.Context;
+ LocalInstantiationScope Scope(SemaRef);
+
+ // In the clang AST, constraint nodes are deliberately not instantiated unless
+ // they are actively being evaluated. Consequently, occurrences of template
+ // parameters in the require-clause expression have a subtle "depth"
+ // difference compared to normal occurrences in places, such as function
+ // parameters. When transforming the require-clause, we must take this
+ // distinction into account:
+ //
+ // 1) In the transformed require-clause, occurrences of template parameters
+ // must use the "uninstantiated" depth;
+ // 2) When substituting on the require-clause expr of the underlying
+ // deduction guide, we must use the entire set of template argument lists;
+ //
+ // It's important to note that we're performing this transformation on an
+ // *instantiated* AliasTemplate.
+
+ // For 1), if the alias template is nested within a class template, we
+ // calcualte the 'uninstantiated' depth by adding the substitution level back.
+ unsigned AdjustDepth = 0;
+ if (auto *PrimaryTemplate =
+ AliasTemplate->getInstantiatedFromMemberTemplate())
+ AdjustDepth = PrimaryTemplate->getTemplateDepth();
+
+ // We rebuild all template parameters with the uninstantiated depth, and
+ // build template arguments refer to them.
+ SmallVector<TemplateArgument> AdjustedAliasTemplateArgs;
+
+ for (auto *TP : *AliasTemplate->getTemplateParameters()) {
+ // Rebuild any internal references to earlier parameters and reindex
+ // as we go.
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ Args.addOuterTemplateArguments(AdjustedAliasTemplateArgs);
+ NamedDecl *NewParam = transformTemplateParameter(
+ SemaRef, AliasTemplate->getDeclContext(), TP, Args,
+ /*NewIndex=*/AdjustedAliasTemplateArgs.size(),
+ getTemplateParameterDepth(TP) + AdjustDepth);
+
+ TemplateArgument NewTemplateArgument =
+ Context.getInjectedTemplateArg(NewParam);
+ AdjustedAliasTemplateArgs.push_back(NewTemplateArgument);
+ }
+ // Template arguments used to transform the template arguments in
+ // DeducedResults.
+ SmallVector<TemplateArgument> TemplateArgsForBuildingRC(
+ F->getTemplateParameters()->size());
+ // Transform the transformed template args
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ Args.addOuterTemplateArguments(AdjustedAliasTemplateArgs);
+
+ for (unsigned Index = 0; Index < DeduceResults.size(); ++Index) {
+ const auto &D = DeduceResults[Index];
+ if (D.isNull()) { // non-deduced template parameters of f
+ NamedDecl *TP = F->getTemplateParameters()->getParam(Index);
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ Args.addOuterTemplateArguments(TemplateArgsForBuildingRC);
+ // Rebuild the template parameter with updated depth and index.
+ NamedDecl *NewParam = transformTemplateParameter(
+ SemaRef, F->getDeclContext(), TP, Args,
+ /*NewIndex=*/FirstUndeducedParamIdx,
+ getTemplateParameterDepth(TP) + AdjustDepth);
+ FirstUndeducedParamIdx += 1;
+ assert(TemplateArgsForBuildingRC[Index].isNull());
+ TemplateArgsForBuildingRC[Index] =
+ Context.getInjectedTemplateArg(NewParam);
+ continue;
+ }
+ TemplateArgumentLoc Input =
+ SemaRef.getTrivialTemplateArgumentLoc(D, QualType(), SourceLocation{});
+ TemplateArgumentLoc Output;
+ if (!SemaRef.SubstTemplateArgument(Input, Args, Output)) {
+ assert(TemplateArgsForBuildingRC[Index].isNull() &&
+ "InstantiatedArgs must be null before setting");
+ TemplateArgsForBuildingRC[Index] = Output.getArgument();
+ }
+ }
+
+ // A list of template arguments for transforming the require-clause of F.
+ // It must contain the entire set of template argument lists.
+ MultiLevelTemplateArgumentList ArgsForBuildingRC;
+ ArgsForBuildingRC.setKind(clang::TemplateSubstitutionKind::Rewrite);
+ ArgsForBuildingRC.addOuterTemplateArguments(TemplateArgsForBuildingRC);
+ // For 2), if the underlying deduction guide F is nested in a class template,
+ // we need the entire template argument list, as the constraint AST in the
+ // require-clause of F remains completely uninstantiated.
+ //
+ // For example:
+ // template <typename T> // depth 0
+ // struct Outer {
+ // template <typename U>
+ // struct Foo { Foo(U); };
+ //
+ // template <typename U> // depth 1
+ // requires C<U>
+ // Foo(U) -> Foo<int>;
+ // };
+ // template <typename U>
+ // using AFoo = Outer<int>::Foo<U>;
+ //
+ // In this scenario, the deduction guide for `Foo` inside `Outer<int>`:
+ // - The occurrence of U in the require-expression is [depth:1, index:0]
+ // - The occurrence of U in the function parameter is [depth:0, index:0]
+ // - The template parameter of U is [depth:0, index:0]
+ //
+ // We add the outer template arguments which is [int] to the multi-level arg
+ // list to ensure that the occurrence U in `C<U>` will be replaced with int
+ // during the substitution.
+ //
+ // NOTE: The underlying deduction guide F is instantiated -- either from an
+ // explicitly-written deduction guide member, or from a constructor.
+ // getInstantiatedFromMemberTemplate() can only handle the former case, so we
+ // check the DeclContext kind.
+ if (F->getLexicalDeclContext()->getDeclKind() ==
+ clang::Decl::ClassTemplateSpecialization) {
+ auto OuterLevelArgs = SemaRef.getTemplateInstantiationArgs(
+ F, F->getLexicalDeclContext(),
+ /*Final=*/false, /*Innermost=*/std::nullopt,
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr,
+ /*ForConstraintInstantiation=*/true);
+ for (auto It : OuterLevelArgs)
+ ArgsForBuildingRC.addOuterTemplateArguments(It.Args);
+ }
+
+ ExprResult E = SemaRef.SubstExpr(RC, ArgsForBuildingRC);
+ if (E.isInvalid())
+ return nullptr;
+
+ auto Conjunction =
+ SemaRef.BuildBinOp(SemaRef.getCurScope(), SourceLocation{},
+ BinaryOperatorKind::BO_LAnd, E.get(), IsDeducible);
+ if (Conjunction.isInvalid())
+ return nullptr;
+ return Conjunction.getAs<Expr>();
+}
+// Build the is_deducible constraint for the alias deduction guides.
+// [over.match.class.deduct]p3.3:
+// ... and a constraint that is satisfied if and only if the arguments
+// of A are deducible (see below) from the return type.
+Expr *buildIsDeducibleConstraint(Sema &SemaRef,
+ TypeAliasTemplateDecl *AliasTemplate,
+ QualType ReturnType,
+ SmallVector<NamedDecl *> TemplateParams) {
+ ASTContext &Context = SemaRef.Context;
+ // Constraint AST nodes must use uninstantiated depth.
+ if (auto *PrimaryTemplate =
+ AliasTemplate->getInstantiatedFromMemberTemplate();
+ PrimaryTemplate && TemplateParams.size() > 0) {
+ LocalInstantiationScope Scope(SemaRef);
+
+ // Adjust the depth for TemplateParams.
+ unsigned AdjustDepth = PrimaryTemplate->getTemplateDepth();
+ SmallVector<TemplateArgument> TransformedTemplateArgs;
+ for (auto *TP : TemplateParams) {
+ // Rebuild any internal references to earlier parameters and reindex
+ // as we go.
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ Args.addOuterTemplateArguments(TransformedTemplateArgs);
+ NamedDecl *NewParam = transformTemplateParameter(
+ SemaRef, AliasTemplate->getDeclContext(), TP, Args,
+ /*NewIndex=*/TransformedTemplateArgs.size(),
+ getTemplateParameterDepth(TP) + AdjustDepth);
+
+ TemplateArgument NewTemplateArgument =
+ Context.getInjectedTemplateArg(NewParam);
+ TransformedTemplateArgs.push_back(NewTemplateArgument);
+ }
+ // Transformed the ReturnType to restore the uninstantiated depth.
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ Args.addOuterTemplateArguments(TransformedTemplateArgs);
+ ReturnType = SemaRef.SubstType(
+ ReturnType, Args, AliasTemplate->getLocation(),
+ Context.DeclarationNames.getCXXDeductionGuideName(AliasTemplate));
+ };
+
+ SmallVector<TypeSourceInfo *> IsDeducibleTypeTraitArgs = {
+ Context.getTrivialTypeSourceInfo(
+ Context.getDeducedTemplateSpecializationType(
+ TemplateName(AliasTemplate), /*DeducedType=*/QualType(),
+ /*IsDependent=*/true)), // template specialization type whose
+ // arguments will be deduced.
+ Context.getTrivialTypeSourceInfo(
+ ReturnType), // type from which template arguments are deduced.
+ };
+ return TypeTraitExpr::Create(
+ Context, Context.getLogicalOperationType(), AliasTemplate->getLocation(),
+ TypeTrait::BTT_IsDeducible, IsDeducibleTypeTraitArgs,
+ AliasTemplate->getLocation(), /*Value*/ false);
+}
+
+std::pair<TemplateDecl *, llvm::ArrayRef<TemplateArgument>>
+getRHSTemplateDeclAndArgs(Sema &SemaRef, TypeAliasTemplateDecl *AliasTemplate) {
+ // Unwrap the sugared ElaboratedType.
+ auto RhsType = AliasTemplate->getTemplatedDecl()
+ ->getUnderlyingType()
+ .getSingleStepDesugaredType(SemaRef.Context);
+ TemplateDecl *Template = nullptr;
+ llvm::ArrayRef<TemplateArgument> AliasRhsTemplateArgs;
+ if (const auto *TST = RhsType->getAs<TemplateSpecializationType>()) {
+ // Cases where the RHS of the alias is dependent. e.g.
+ // template<typename T>
+ // using AliasFoo1 = Foo<T>; // a class/type alias template specialization
+ Template = TST->getTemplateName().getAsTemplateDecl();
+ AliasRhsTemplateArgs = TST->template_arguments();
+ } else if (const auto *RT = RhsType->getAs<RecordType>()) {
+ // Cases where template arguments in the RHS of the alias are not
+ // dependent. e.g.
+ // using AliasFoo = Foo<bool>;
+ if (const auto *CTSD = llvm::dyn_cast<ClassTemplateSpecializationDecl>(
+ RT->getAsCXXRecordDecl())) {
+ Template = CTSD->getSpecializedTemplate();
+ AliasRhsTemplateArgs = CTSD->getTemplateArgs().asArray();
+ }
+ } else {
+ assert(false && "unhandled RHS type of the alias");
+ }
+ return {Template, AliasRhsTemplateArgs};
+}
+
+// Build deduction guides for a type alias template from the given underlying
+// deduction guide F.
+FunctionTemplateDecl *
+BuildDeductionGuideForTypeAlias(Sema &SemaRef,
+ TypeAliasTemplateDecl *AliasTemplate,
+ FunctionTemplateDecl *F, SourceLocation Loc) {
+ LocalInstantiationScope Scope(SemaRef);
+ Sema::InstantiatingTemplate BuildingDeductionGuides(
+ SemaRef, AliasTemplate->getLocation(), F,
+ Sema::InstantiatingTemplate::BuildingDeductionGuidesTag{});
+ if (BuildingDeductionGuides.isInvalid())
+ return nullptr;
+
+ auto &Context = SemaRef.Context;
+ auto [Template, AliasRhsTemplateArgs] =
+ getRHSTemplateDeclAndArgs(SemaRef, AliasTemplate);
+
+ auto RType = F->getTemplatedDecl()->getReturnType();
+ // The (trailing) return type of the deduction guide.
+ const TemplateSpecializationType *FReturnType =
+ RType->getAs<TemplateSpecializationType>();
+ if (const auto *InjectedCNT = RType->getAs<InjectedClassNameType>())
+ // implicitly-generated deduction guide.
+ FReturnType = InjectedCNT->getInjectedTST();
+ else if (const auto *ET = RType->getAs<ElaboratedType>())
+ // explicit deduction guide.
+ FReturnType = ET->getNamedType()->getAs<TemplateSpecializationType>();
+ assert(FReturnType && "expected to see a return type");
+ // Deduce template arguments of the deduction guide f from the RHS of
+ // the alias.
+ //
+ // C++ [over.match.class.deduct]p3: ...For each function or function
+ // template f in the guides of the template named by the
+ // simple-template-id of the defining-type-id, the template arguments
+ // of the return type of f are deduced from the defining-type-id of A
+ // according to the process in [temp.deduct.type] with the exception
+ // that deduction does not fail if not all template arguments are
+ // deduced.
+ //
+ //
+ // template<typename X, typename Y>
+ // f(X, Y) -> f<Y, X>;
+ //
+ // template<typename U>
+ // using alias = f<int, U>;
+ //
+ // The RHS of alias is f<int, U>, we deduced the template arguments of
+ // the return type of the deduction guide from it: Y->int, X->U
+ sema::TemplateDeductionInfo TDeduceInfo(Loc);
+ // Must initialize n elements, this is required by DeduceTemplateArguments.
+ SmallVector<DeducedTemplateArgument> DeduceResults(
+ F->getTemplateParameters()->size());
+
+ // FIXME: DeduceTemplateArguments stops immediately at the first
+ // non-deducible template argument. However, this doesn't seem to casue
+ // issues for practice cases, we probably need to extend it to continue
+ // performing deduction for rest of arguments to align with the C++
+ // standard.
+ SemaRef.DeduceTemplateArguments(
+ F->getTemplateParameters(), FReturnType->template_arguments(),
+ AliasRhsTemplateArgs, TDeduceInfo, DeduceResults,
+ /*NumberOfArgumentsMustMatch=*/false);
+
+ SmallVector<TemplateArgument> DeducedArgs;
+ SmallVector<unsigned> NonDeducedTemplateParamsInFIndex;
+ // !!NOTE: DeduceResults respects the sequence of template parameters of
+ // the deduction guide f.
+ for (unsigned Index = 0; Index < DeduceResults.size(); ++Index) {
+ if (const auto &D = DeduceResults[Index]; !D.isNull()) // Deduced
+ DeducedArgs.push_back(D);
+ else
+ NonDeducedTemplateParamsInFIndex.push_back(Index);
+ }
+ auto DeducedAliasTemplateParams =
+ TemplateParamsReferencedInTemplateArgumentList(
+ AliasTemplate->getTemplateParameters(), DeducedArgs);
+ // All template arguments null by default.
+ SmallVector<TemplateArgument> TemplateArgsForBuildingFPrime(
+ F->getTemplateParameters()->size());
+
+ // Create a template parameter list for the synthesized deduction guide f'.
+ //
+ // C++ [over.match.class.deduct]p3.2:
+ // If f is a function template, f' is a function template whose template
+ // parameter list consists of all the template parameters of A
+ // (including their default template arguments) that appear in the above
+ // deductions or (recursively) in their default template arguments
+ SmallVector<NamedDecl *> FPrimeTemplateParams;
+ // Store template arguments that refer to the newly-created template
+ // parameters, used for building `TemplateArgsForBuildingFPrime`.
+ SmallVector<TemplateArgument, 16> TransformedDeducedAliasArgs(
+ AliasTemplate->getTemplateParameters()->size());
+
+ for (unsigned AliasTemplateParamIdx : DeducedAliasTemplateParams) {
+ auto *TP =
+ AliasTemplate->getTemplateParameters()->getParam(AliasTemplateParamIdx);
+ // Rebuild any internal references to earlier parameters and reindex as
+ // we go.
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ Args.addOuterTemplateArguments(TransformedDeducedAliasArgs);
+ NamedDecl *NewParam = transformTemplateParameter(
+ SemaRef, AliasTemplate->getDeclContext(), TP, Args,
+ /*NewIndex=*/FPrimeTemplateParams.size(),
+ getTemplateParameterDepth(TP));
+ FPrimeTemplateParams.push_back(NewParam);
+
+ TemplateArgument NewTemplateArgument =
+ Context.getInjectedTemplateArg(NewParam);
+ TransformedDeducedAliasArgs[AliasTemplateParamIdx] = NewTemplateArgument;
+ }
+ unsigned FirstUndeducedParamIdx = FPrimeTemplateParams.size();
+ // ...followed by the template parameters of f that were not deduced
+ // (including their default template arguments)
+ for (unsigned FTemplateParamIdx : NonDeducedTemplateParamsInFIndex) {
+ auto *TP = F->getTemplateParameters()->getParam(FTemplateParamIdx);
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ // We take a shortcut here, it is ok to reuse the
+ // TemplateArgsForBuildingFPrime.
+ Args.addOuterTemplateArguments(TemplateArgsForBuildingFPrime);
+ NamedDecl *NewParam = transformTemplateParameter(
+ SemaRef, F->getDeclContext(), TP, Args, FPrimeTemplateParams.size(),
+ getTemplateParameterDepth(TP));
+ FPrimeTemplateParams.push_back(NewParam);
+
+ assert(TemplateArgsForBuildingFPrime[FTemplateParamIdx].isNull() &&
+ "The argument must be null before setting");
+ TemplateArgsForBuildingFPrime[FTemplateParamIdx] =
+ Context.getInjectedTemplateArg(NewParam);
+ }
+
+ // To form a deduction guide f' from f, we leverage clang's instantiation
+ // mechanism, we construct a template argument list where the template
+ // arguments refer to the newly-created template parameters of f', and
+ // then apply instantiation on this template argument list to instantiate
+ // f, this ensures all template parameter occurrences are updated
+ // correctly.
+ //
+ // The template argument list is formed from the `DeducedArgs`, two parts:
+ // 1) appeared template parameters of alias: transfrom the deduced
+ // template argument;
+ // 2) non-deduced template parameters of f: rebuild a
+ // template argument;
+ //
+ // 2) has been built already (when rebuilding the new template
+ // parameters), we now perform 1).
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ Args.addOuterTemplateArguments(TransformedDeducedAliasArgs);
+ for (unsigned Index = 0; Index < DeduceResults.size(); ++Index) {
+ const auto &D = DeduceResults[Index];
+ if (D.isNull()) {
+ // 2): Non-deduced template parameter has been built already.
+ assert(!TemplateArgsForBuildingFPrime[Index].isNull() &&
+ "template arguments for non-deduced template parameters should "
+ "be been set!");
+ continue;
+ }
+ TemplateArgumentLoc Input =
+ SemaRef.getTrivialTemplateArgumentLoc(D, QualType(), SourceLocation{});
+ TemplateArgumentLoc Output;
+ if (!SemaRef.SubstTemplateArgument(Input, Args, Output)) {
+ assert(TemplateArgsForBuildingFPrime[Index].isNull() &&
+ "InstantiatedArgs must be null before setting");
+ TemplateArgsForBuildingFPrime[Index] = Output.getArgument();
+ }
+ }
+
+ auto *TemplateArgListForBuildingFPrime =
+ TemplateArgumentList::CreateCopy(Context, TemplateArgsForBuildingFPrime);
+ // Form the f' by substituting the template arguments into f.
+ if (auto *FPrime = SemaRef.InstantiateFunctionDeclaration(
+ F, TemplateArgListForBuildingFPrime, AliasTemplate->getLocation(),
+ Sema::CodeSynthesisContext::BuildingDeductionGuides)) {
+ auto *GG = cast<CXXDeductionGuideDecl>(FPrime);
+
+ Expr *IsDeducible = buildIsDeducibleConstraint(
+ SemaRef, AliasTemplate, FPrime->getReturnType(), FPrimeTemplateParams);
+ Expr *RequiresClause =
+ buildAssociatedConstraints(SemaRef, F, AliasTemplate, DeduceResults,
+ FirstUndeducedParamIdx, IsDeducible);
+
+ auto *FPrimeTemplateParamList = TemplateParameterList::Create(
+ Context, AliasTemplate->getTemplateParameters()->getTemplateLoc(),
+ AliasTemplate->getTemplateParameters()->getLAngleLoc(),
+ FPrimeTemplateParams,
+ AliasTemplate->getTemplateParameters()->getRAngleLoc(),
+ /*RequiresClause=*/RequiresClause);
+ auto *Result = cast<FunctionTemplateDecl>(buildDeductionGuide(
+ SemaRef, AliasTemplate, FPrimeTemplateParamList,
+ GG->getCorrespondingConstructor(), GG->getExplicitSpecifier(),
+ GG->getTypeSourceInfo(), AliasTemplate->getBeginLoc(),
+ AliasTemplate->getLocation(), AliasTemplate->getEndLoc(),
+ F->isImplicit()));
+ cast<CXXDeductionGuideDecl>(Result->getTemplatedDecl())
+ ->setDeductionCandidateKind(GG->getDeductionCandidateKind());
+ return Result;
+ }
+ return nullptr;
+}
+
+void DeclareImplicitDeductionGuidesForTypeAlias(
+ Sema &SemaRef, TypeAliasTemplateDecl *AliasTemplate, SourceLocation Loc) {
+ if (AliasTemplate->isInvalidDecl())
+ return;
+ auto &Context = SemaRef.Context;
+ // FIXME: if there is an explicit deduction guide after the first use of the
+ // type alias usage, we will not cover this explicit deduction guide. fix this
+ // case.
+ if (hasDeclaredDeductionGuides(
+ Context.DeclarationNames.getCXXDeductionGuideName(AliasTemplate),
+ AliasTemplate->getDeclContext()))
+ return;
+ auto [Template, AliasRhsTemplateArgs] =
+ getRHSTemplateDeclAndArgs(SemaRef, AliasTemplate);
+ if (!Template)
+ return;
+ DeclarationNameInfo NameInfo(
+ Context.DeclarationNames.getCXXDeductionGuideName(Template), Loc);
+ LookupResult Guides(SemaRef, NameInfo, clang::Sema::LookupOrdinaryName);
+ SemaRef.LookupQualifiedName(Guides, Template->getDeclContext());
+ Guides.suppressDiagnostics();
+
+ for (auto *G : Guides) {
+ if (auto *DG = dyn_cast<CXXDeductionGuideDecl>(G)) {
+ // The deduction guide is a non-template function decl, we just clone it.
+ auto *FunctionType =
+ SemaRef.Context.getTrivialTypeSourceInfo(DG->getType());
+ FunctionProtoTypeLoc FPTL =
+ FunctionType->getTypeLoc().castAs<FunctionProtoTypeLoc>();
+
+ // Clone the parameters.
+ for (unsigned I = 0, N = DG->getNumParams(); I != N; ++I) {
+ const auto *P = DG->getParamDecl(I);
+ auto *TSI = SemaRef.Context.getTrivialTypeSourceInfo(P->getType());
+ ParmVarDecl *NewParam = ParmVarDecl::Create(
+ SemaRef.Context, G->getDeclContext(),
+ DG->getParamDecl(I)->getBeginLoc(), P->getLocation(), nullptr,
+ TSI->getType(), TSI, SC_None, nullptr);
+ NewParam->setScopeInfo(0, I);
+ FPTL.setParam(I, NewParam);
+ }
+ auto *Transformed = cast<FunctionDecl>(buildDeductionGuide(
+ SemaRef, AliasTemplate, /*TemplateParams=*/nullptr,
+ /*Constructor=*/nullptr, DG->getExplicitSpecifier(), FunctionType,
+ AliasTemplate->getBeginLoc(), AliasTemplate->getLocation(),
+ AliasTemplate->getEndLoc(), DG->isImplicit()));
+
+ // FIXME: Here the synthesized deduction guide is not a templated
+ // function. Per [dcl.decl]p4, the requires-clause shall be present only
+ // if the declarator declares a templated function, a bug in standard?
+ auto *Constraint = buildIsDeducibleConstraint(
+ SemaRef, AliasTemplate, Transformed->getReturnType(), {});
+ if (auto *RC = DG->getTrailingRequiresClause()) {
+ auto Conjunction =
+ SemaRef.BuildBinOp(SemaRef.getCurScope(), SourceLocation{},
+ BinaryOperatorKind::BO_LAnd, RC, Constraint);
+ if (!Conjunction.isInvalid())
+ Constraint = Conjunction.getAs<Expr>();
+ }
+ Transformed->setTrailingRequiresClause(Constraint);
+ }
+ FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(G);
+ if (!F)
+ continue;
+ // The **aggregate** deduction guides are handled in a different code path
+ // (DeclareAggregateDeductionGuideFromInitList), which involves the tricky
+ // cache.
+ if (cast<CXXDeductionGuideDecl>(F->getTemplatedDecl())
+ ->getDeductionCandidateKind() == DeductionCandidate::Aggregate)
+ continue;
+
+ BuildDeductionGuideForTypeAlias(SemaRef, AliasTemplate, F, Loc);
+ }
+}
+
+// Build an aggregate deduction guide for a type alias template.
+FunctionTemplateDecl *DeclareAggregateDeductionGuideForTypeAlias(
+ Sema &SemaRef, TypeAliasTemplateDecl *AliasTemplate,
+ MutableArrayRef<QualType> ParamTypes, SourceLocation Loc) {
+ TemplateDecl *RHSTemplate =
+ getRHSTemplateDeclAndArgs(SemaRef, AliasTemplate).first;
+ if (!RHSTemplate)
+ return nullptr;
+
+ llvm::SmallVector<TypedefNameDecl *> TypedefDecls;
+ llvm::SmallVector<QualType> NewParamTypes;
+ ExtractTypeForDeductionGuide TypeAliasTransformer(SemaRef, TypedefDecls);
+ for (QualType P : ParamTypes) {
+ QualType Type = TypeAliasTransformer.TransformType(P);
+ if (Type.isNull())
+ return nullptr;
+ NewParamTypes.push_back(Type);
+ }
+
+ auto *RHSDeductionGuide = SemaRef.DeclareAggregateDeductionGuideFromInitList(
+ RHSTemplate, NewParamTypes, Loc);
+ if (!RHSDeductionGuide)
+ return nullptr;
+
+ for (TypedefNameDecl *TD : TypedefDecls)
+ TD->setDeclContext(RHSDeductionGuide->getTemplatedDecl());
+
+ return BuildDeductionGuideForTypeAlias(SemaRef, AliasTemplate,
+ RHSDeductionGuide, Loc);
+}
+
+} // namespace
+
+FunctionTemplateDecl *Sema::DeclareAggregateDeductionGuideFromInitList(
+ TemplateDecl *Template, MutableArrayRef<QualType> ParamTypes,
+ SourceLocation Loc) {
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(Template);
+ for (auto &T : ParamTypes)
+ T.getCanonicalType().Profile(ID);
+ unsigned Hash = ID.ComputeHash();
+
+ auto Found = AggregateDeductionCandidates.find(Hash);
+ if (Found != AggregateDeductionCandidates.end()) {
+ CXXDeductionGuideDecl *GD = Found->getSecond();
+ return GD->getDescribedFunctionTemplate();
+ }
+
+ if (auto *AliasTemplate = llvm::dyn_cast<TypeAliasTemplateDecl>(Template)) {
+ if (auto *FTD = DeclareAggregateDeductionGuideForTypeAlias(
+ *this, AliasTemplate, ParamTypes, Loc)) {
+ auto *GD = cast<CXXDeductionGuideDecl>(FTD->getTemplatedDecl());
+ GD->setDeductionCandidateKind(DeductionCandidate::Aggregate);
+ AggregateDeductionCandidates[Hash] = GD;
+ return FTD;
+ }
+ }
+
+ if (CXXRecordDecl *DefRecord =
+ cast<CXXRecordDecl>(Template->getTemplatedDecl())->getDefinition()) {
+ if (TemplateDecl *DescribedTemplate =
+ DefRecord->getDescribedClassTemplate())
+ Template = DescribedTemplate;
+ }
+
+ DeclContext *DC = Template->getDeclContext();
+ if (DC->isDependentContext())
+ return nullptr;
+
+ ConvertConstructorToDeductionGuideTransform Transform(
+ *this, cast<ClassTemplateDecl>(Template));
+ if (!isCompleteType(Loc, Transform.DeducedType))
+ return nullptr;
+
+ // In case we were expanding a pack when we attempted to declare deduction
+ // guides, turn off pack expansion for everything we're about to do.
+ ArgumentPackSubstitutionIndexRAII SubstIndex(*this,
+ /*NewSubstitutionIndex=*/-1);
+ // Create a template instantiation record to track the "instantiation" of
+ // constructors into deduction guides.
+ InstantiatingTemplate BuildingDeductionGuides(
+ *this, Loc, Template,
+ Sema::InstantiatingTemplate::BuildingDeductionGuidesTag{});
+ if (BuildingDeductionGuides.isInvalid())
+ return nullptr;
+
+ ClassTemplateDecl *Pattern =
+ Transform.NestedPattern ? Transform.NestedPattern : Transform.Template;
+ ContextRAII SavedContext(*this, Pattern->getTemplatedDecl());
+
+ auto *FTD = cast<FunctionTemplateDecl>(
+ Transform.buildSimpleDeductionGuide(ParamTypes));
+ SavedContext.pop();
+ auto *GD = cast<CXXDeductionGuideDecl>(FTD->getTemplatedDecl());
+ GD->setDeductionCandidateKind(DeductionCandidate::Aggregate);
+ AggregateDeductionCandidates[Hash] = GD;
+ return FTD;
+}
+
+void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
+ SourceLocation Loc) {
+ if (auto *AliasTemplate = llvm::dyn_cast<TypeAliasTemplateDecl>(Template)) {
+ DeclareImplicitDeductionGuidesForTypeAlias(*this, AliasTemplate, Loc);
+ return;
+ }
+ if (CXXRecordDecl *DefRecord =
+ cast<CXXRecordDecl>(Template->getTemplatedDecl())->getDefinition()) {
+ if (TemplateDecl *DescribedTemplate =
+ DefRecord->getDescribedClassTemplate())
+ Template = DescribedTemplate;
+ }
+
+ DeclContext *DC = Template->getDeclContext();
+ if (DC->isDependentContext())
+ return;
+
+ ConvertConstructorToDeductionGuideTransform Transform(
+ *this, cast<ClassTemplateDecl>(Template));
+ if (!isCompleteType(Loc, Transform.DeducedType))
+ return;
+
+ if (hasDeclaredDeductionGuides(Transform.DeductionGuideName, DC))
+ return;
+
+ // In case we were expanding a pack when we attempted to declare deduction
+ // guides, turn off pack expansion for everything we're about to do.
+ ArgumentPackSubstitutionIndexRAII SubstIndex(*this, -1);
+ // Create a template instantiation record to track the "instantiation" of
+ // constructors into deduction guides.
+ InstantiatingTemplate BuildingDeductionGuides(
+ *this, Loc, Template,
+ Sema::InstantiatingTemplate::BuildingDeductionGuidesTag{});
+ if (BuildingDeductionGuides.isInvalid())
+ return;
+
+ // Convert declared constructors into deduction guide templates.
+ // FIXME: Skip constructors for which deduction must necessarily fail (those
+ // for which some class template parameter without a default argument never
+ // appears in a deduced context).
+ ClassTemplateDecl *Pattern =
+ Transform.NestedPattern ? Transform.NestedPattern : Transform.Template;
+ ContextRAII SavedContext(*this, Pattern->getTemplatedDecl());
+ llvm::SmallPtrSet<NamedDecl *, 8> ProcessedCtors;
+ bool AddedAny = false;
+ for (NamedDecl *D : LookupConstructors(Pattern->getTemplatedDecl())) {
+ D = D->getUnderlyingDecl();
+ if (D->isInvalidDecl() || D->isImplicit())
+ continue;
+
+ D = cast<NamedDecl>(D->getCanonicalDecl());
+
+ // Within C++20 modules, we may have multiple same constructors in
+ // multiple same RecordDecls. And it doesn't make sense to create
+ // duplicated deduction guides for the duplicated constructors.
+ if (ProcessedCtors.count(D))
+ continue;
+
+ auto *FTD = dyn_cast<FunctionTemplateDecl>(D);
+ auto *CD =
+ dyn_cast_or_null<CXXConstructorDecl>(FTD ? FTD->getTemplatedDecl() : D);
+ // Class-scope explicit specializations (MS extension) do not result in
+ // deduction guides.
+ if (!CD || (!FTD && CD->isFunctionTemplateSpecialization()))
+ continue;
+
+ // Cannot make a deduction guide when unparsed arguments are present.
+ if (llvm::any_of(CD->parameters(), [](ParmVarDecl *P) {
+ return !P || P->hasUnparsedDefaultArg();
+ }))
+ continue;
+
+ ProcessedCtors.insert(D);
+ Transform.transformConstructor(FTD, CD);
+ AddedAny = true;
+ }
+
+ // C++17 [over.match.class.deduct]
+ // -- If C is not defined or does not declare any constructors, an
+ // additional function template derived as above from a hypothetical
+ // constructor C().
+ if (!AddedAny)
+ Transform.buildSimpleDeductionGuide(std::nullopt);
+
+ // -- An additional function template derived as above from a hypothetical
+ // constructor C(C), called the copy deduction candidate.
+ cast<CXXDeductionGuideDecl>(
+ cast<FunctionTemplateDecl>(
+ Transform.buildSimpleDeductionGuide(Transform.DeducedType))
+ ->getTemplatedDecl())
+ ->setDeductionCandidateKind(DeductionCandidate::Copy);
+
+ SavedContext.pop();
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
index e12186d7d82f..a09e3be83c45 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -20,7 +20,9 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/PrettyDeclStackTrace.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Stack.h"
@@ -35,6 +37,7 @@
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TimeProfiler.h"
@@ -79,6 +82,94 @@ struct Response {
return R;
}
};
+
+// Retrieve the primary template for a lambda call operator. It's
+// unfortunate that we only have the mappings of call operators rather
+// than lambda classes.
+const FunctionDecl *
+getPrimaryTemplateOfGenericLambda(const FunctionDecl *LambdaCallOperator) {
+ if (!isLambdaCallOperator(LambdaCallOperator))
+ return LambdaCallOperator;
+ while (true) {
+ if (auto *FTD = dyn_cast_if_present<FunctionTemplateDecl>(
+ LambdaCallOperator->getDescribedTemplate());
+ FTD && FTD->getInstantiatedFromMemberTemplate()) {
+ LambdaCallOperator =
+ FTD->getInstantiatedFromMemberTemplate()->getTemplatedDecl();
+ } else if (LambdaCallOperator->getPrimaryTemplate()) {
+ // Cases where the lambda operator is instantiated in
+ // TemplateDeclInstantiator::VisitCXXMethodDecl.
+ LambdaCallOperator =
+ LambdaCallOperator->getPrimaryTemplate()->getTemplatedDecl();
+ } else if (auto *Prev = cast<CXXMethodDecl>(LambdaCallOperator)
+ ->getInstantiatedFromMemberFunction())
+ LambdaCallOperator = Prev;
+ else
+ break;
+ }
+ return LambdaCallOperator;
+}
+
+struct EnclosingTypeAliasTemplateDetails {
+ TypeAliasTemplateDecl *Template = nullptr;
+ TypeAliasTemplateDecl *PrimaryTypeAliasDecl = nullptr;
+ ArrayRef<TemplateArgument> AssociatedTemplateArguments;
+
+ explicit operator bool() noexcept { return Template; }
+};
+
+// Find the enclosing type alias template Decl from CodeSynthesisContexts, as
+// well as its primary template and instantiating template arguments.
+EnclosingTypeAliasTemplateDetails
+getEnclosingTypeAliasTemplateDecl(Sema &SemaRef) {
+ for (auto &CSC : llvm::reverse(SemaRef.CodeSynthesisContexts)) {
+ if (CSC.Kind != Sema::CodeSynthesisContext::SynthesisKind::
+ TypeAliasTemplateInstantiation)
+ continue;
+ EnclosingTypeAliasTemplateDetails Result;
+ auto *TATD = cast<TypeAliasTemplateDecl>(CSC.Entity),
+ *Next = TATD->getInstantiatedFromMemberTemplate();
+ Result = {
+ /*Template=*/TATD,
+ /*PrimaryTypeAliasDecl=*/TATD,
+ /*AssociatedTemplateArguments=*/CSC.template_arguments(),
+ };
+ while (Next) {
+ Result.PrimaryTypeAliasDecl = Next;
+ Next = Next->getInstantiatedFromMemberTemplate();
+ }
+ return Result;
+ }
+ return {};
+}
+
+// Check if we are currently inside of a lambda expression that is
+// surrounded by a using alias declaration. e.g.
+// template <class> using type = decltype([](auto) { ^ }());
+// We have to do so since a TypeAliasTemplateDecl (or a TypeAliasDecl) is never
+// a DeclContext, nor does it have an associated specialization Decl from which
+// we could collect these template arguments.
+bool isLambdaEnclosedByTypeAliasDecl(
+ const FunctionDecl *LambdaCallOperator,
+ const TypeAliasTemplateDecl *PrimaryTypeAliasDecl) {
+ struct Visitor : RecursiveASTVisitor<Visitor> {
+ Visitor(const FunctionDecl *CallOperator) : CallOperator(CallOperator) {}
+ bool VisitLambdaExpr(const LambdaExpr *LE) {
+ // Return true to bail out of the traversal, implying the Decl contains
+ // the lambda.
+ return getPrimaryTemplateOfGenericLambda(LE->getCallOperator()) !=
+ CallOperator;
+ }
+ const FunctionDecl *CallOperator;
+ };
+
+ QualType Underlying =
+ PrimaryTypeAliasDecl->getTemplatedDecl()->getUnderlyingType();
+
+ return !Visitor(getPrimaryTemplateOfGenericLambda(LambdaCallOperator))
+ .TraverseType(Underlying);
+}
+
// Add template arguments from a variable template instantiation.
Response
HandleVarTemplateSpec(const VarTemplateSpecializationDecl *VarTemplSpec,
@@ -175,7 +266,7 @@ HandleClassTemplateSpec(const ClassTemplateSpecializationDecl *ClassTemplSpec,
return Response::UseNextDecl(ClassTemplSpec);
}
-Response HandleFunction(const FunctionDecl *Function,
+Response HandleFunction(Sema &SemaRef, const FunctionDecl *Function,
MultiLevelTemplateArgumentList &Result,
const FunctionDecl *Pattern, bool RelativeToPrimary,
bool ForConstraintInstantiation) {
@@ -198,6 +289,13 @@ Response HandleFunction(const FunctionDecl *Function,
TemplateArgs->asArray(),
/*Final=*/false);
+ if (RelativeToPrimary &&
+ (Function->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization ||
+ (Function->getFriendObjectKind() &&
+ !Function->getPrimaryTemplate()->getFriendObjectKind())))
+ return Response::UseNextDecl(Function);
+
// If this function was instantiated from a specialized member that is
// a function template, we're done.
assert(Function->getPrimaryTemplate() && "No function template?");
@@ -241,10 +339,38 @@ Response HandleFunctionTemplateDecl(const FunctionTemplateDecl *FTD,
while (const Type *Ty = NNS ? NNS->getAsType() : nullptr) {
if (NNS->isInstantiationDependent()) {
- if (const auto *TSTy = Ty->getAs<TemplateSpecializationType>())
+ if (const auto *TSTy = Ty->getAs<TemplateSpecializationType>()) {
+ ArrayRef<TemplateArgument> Arguments = TSTy->template_arguments();
+ // Prefer template arguments from the injected-class-type if possible.
+ // For example,
+ // ```cpp
+ // template <class... Pack> struct S {
+ // template <class T> void foo();
+ // };
+ // template <class... Pack> template <class T>
+ // ^^^^^^^^^^^^^ InjectedTemplateArgs
+ // They're of kind TemplateArgument::Pack, not of
+ // TemplateArgument::Type.
+ // void S<Pack...>::foo() {}
+ // ^^^^^^^
+ // TSTy->template_arguments() (which are of PackExpansionType)
+ // ```
+ // This meets the contract in
+ // TreeTransform::TryExpandParameterPacks that the template arguments
+ // for unexpanded parameters should be of a Pack kind.
+ if (TSTy->isCurrentInstantiation()) {
+ auto *RD = TSTy->getCanonicalTypeInternal()->getAsCXXRecordDecl();
+ if (ClassTemplateDecl *CTD = RD->getDescribedClassTemplate())
+ Arguments = CTD->getInjectedTemplateArgs();
+ else if (auto *Specialization =
+ dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ Arguments =
+ Specialization->getTemplateInstantiationArgs().asArray();
+ }
Result.addOuterTemplateArguments(
- const_cast<FunctionTemplateDecl *>(FTD), TSTy->template_arguments(),
+ const_cast<FunctionTemplateDecl *>(FTD), Arguments,
/*Final=*/false);
+ }
}
NNS = NNS->getPrefix();
@@ -254,7 +380,7 @@ Response HandleFunctionTemplateDecl(const FunctionTemplateDecl *FTD,
return Response::ChangeDecl(FTD->getLexicalDeclContext());
}
-Response HandleRecordDecl(const CXXRecordDecl *Rec,
+Response HandleRecordDecl(Sema &SemaRef, const CXXRecordDecl *Rec,
MultiLevelTemplateArgumentList &Result,
ASTContext &Context,
bool ForConstraintInstantiation) {
@@ -283,11 +409,38 @@ Response HandleRecordDecl(const CXXRecordDecl *Rec,
return Response::ChangeDecl(Rec->getLexicalDeclContext());
}
- // This is to make sure we pick up the VarTemplateSpecializationDecl that this
- // lambda is defined inside of.
- if (Rec->isLambda())
+ // This is to make sure we pick up the VarTemplateSpecializationDecl or the
+ // TypeAliasTemplateDecl that this lambda is defined inside of.
+ if (Rec->isLambda()) {
if (const Decl *LCD = Rec->getLambdaContextDecl())
return Response::ChangeDecl(LCD);
+ // Retrieve the template arguments for a using alias declaration.
+ // This is necessary for constraint checking, since we always keep
+ // constraints relative to the primary template.
+ if (auto TypeAlias = getEnclosingTypeAliasTemplateDecl(SemaRef);
+ ForConstraintInstantiation && TypeAlias) {
+ if (isLambdaEnclosedByTypeAliasDecl(Rec->getLambdaCallOperator(),
+ TypeAlias.PrimaryTypeAliasDecl)) {
+ Result.addOuterTemplateArguments(TypeAlias.Template,
+ TypeAlias.AssociatedTemplateArguments,
+ /*Final=*/false);
+ // Visit the parent of the current type alias declaration rather than
+ // the lambda thereof.
+ // E.g., in the following example:
+ // struct S {
+ // template <class> using T = decltype([]<Concept> {} ());
+ // };
+ // void foo() {
+ // S::T var;
+ // }
+ // The instantiated lambda expression (which we're visiting at 'var')
+ // has a function DeclContext 'foo' rather than the Record DeclContext
+ // S. This seems to be an oversight to me that we may want to set a
+ // Sema Context from the CXXScopeSpec before substituting into T.
+ return Response::ChangeDecl(TypeAlias.Template->getDeclContext());
+ }
+ }
+ }
return Response::UseNextDecl(Rec);
}
@@ -308,37 +461,9 @@ Response HandleGenericDeclContext(const Decl *CurDecl) {
} // namespace TemplateInstArgsHelpers
} // namespace
-/// Retrieve the template argument list(s) that should be used to
-/// instantiate the definition of the given declaration.
-///
-/// \param ND the declaration for which we are computing template instantiation
-/// arguments.
-///
-/// \param DC In the event we don't HAVE a declaration yet, we instead provide
-/// the decl context where it will be created. In this case, the `Innermost`
-/// should likely be provided. If ND is non-null, this is ignored.
-///
-/// \param Innermost if non-NULL, specifies a template argument list for the
-/// template declaration passed as ND.
-///
-/// \param RelativeToPrimary true if we should get the template
-/// arguments relative to the primary template, even when we're
-/// dealing with a specialization. This is only relevant for function
-/// template specializations.
-///
-/// \param Pattern If non-NULL, indicates the pattern from which we will be
-/// instantiating the definition of the given declaration, \p ND. This is
-/// used to determine the proper set of template instantiation arguments for
-/// friend function template specializations.
-///
-/// \param ForConstraintInstantiation when collecting arguments,
-/// ForConstraintInstantiation indicates we should continue looking when
-/// encountering a lambda generic call operator, and continue looking for
-/// arguments on an enclosing class template.
-
MultiLevelTemplateArgumentList Sema::getTemplateInstantiationArgs(
const NamedDecl *ND, const DeclContext *DC, bool Final,
- const TemplateArgumentList *Innermost, bool RelativeToPrimary,
+ std::optional<ArrayRef<TemplateArgument>> Innermost, bool RelativeToPrimary,
const FunctionDecl *Pattern, bool ForConstraintInstantiation,
bool SkipForSpecialization) {
assert((ND || DC) && "Can't find arguments for a decl if one isn't provided");
@@ -352,8 +477,8 @@ MultiLevelTemplateArgumentList Sema::getTemplateInstantiationArgs(
CurDecl = Decl::castFromDeclContext(DC);
if (Innermost) {
- Result.addOuterTemplateArguments(const_cast<NamedDecl *>(ND),
- Innermost->asArray(), Final);
+ Result.addOuterTemplateArguments(const_cast<NamedDecl *>(ND), *Innermost,
+ Final);
// Populate placeholder template arguments for TemplateTemplateParmDecls.
// This is essential for the case e.g.
//
@@ -381,10 +506,11 @@ MultiLevelTemplateArgumentList Sema::getTemplateInstantiationArgs(
R = HandleClassTemplateSpec(ClassTemplSpec, Result,
SkipForSpecialization);
} else if (const auto *Function = dyn_cast<FunctionDecl>(CurDecl)) {
- R = HandleFunction(Function, Result, Pattern, RelativeToPrimary,
+ R = HandleFunction(*this, Function, Result, Pattern, RelativeToPrimary,
ForConstraintInstantiation);
} else if (const auto *Rec = dyn_cast<CXXRecordDecl>(CurDecl)) {
- R = HandleRecordDecl(Rec, Result, Context, ForConstraintInstantiation);
+ R = HandleRecordDecl(*this, Rec, Result, Context,
+ ForConstraintInstantiation);
} else if (const auto *CSD =
dyn_cast<ImplicitConceptSpecializationDecl>(CurDecl)) {
R = HandleImplicitConceptSpecializationDecl(CSD, Result);
@@ -441,6 +567,7 @@ bool Sema::CodeSynthesisContext::isInstantiationRecord() const {
case BuildingBuiltinDumpStructCall:
case LambdaExpressionSubstitution:
case BuildingDeductionGuides:
+ case TypeAliasTemplateInstantiation:
return false;
// This function should never be called when Kind's value is Memoization.
@@ -519,9 +646,9 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
: InstantiatingTemplate(SemaRef, Kind, PointOfInstantiation,
InstantiationRange, FunctionTemplate, nullptr,
TemplateArgs, &DeductionInfo) {
- assert(
- Kind == CodeSynthesisContext::ExplicitTemplateArgumentSubstitution ||
- Kind == CodeSynthesisContext::DeducedTemplateArgumentSubstitution);
+ assert(Kind == CodeSynthesisContext::ExplicitTemplateArgumentSubstitution ||
+ Kind == CodeSynthesisContext::DeducedTemplateArgumentSubstitution ||
+ Kind == CodeSynthesisContext::BuildingDeductionGuides);
}
Sema::InstantiatingTemplate::InstantiatingTemplate(
@@ -587,6 +714,15 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
TemplateArgs) {}
Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation,
+ TypeAliasTemplateDecl *Entity, ArrayRef<TemplateArgument> TemplateArgs,
+ SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef, CodeSynthesisContext::TypeAliasTemplateInstantiation,
+ PointOfInstantiation, InstantiationRange, /*Entity=*/Entity,
+ /*Template=*/nullptr, TemplateArgs) {}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template,
NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange)
@@ -758,8 +894,6 @@ bool Sema::InstantiatingTemplate::CheckInstantiationDepth(
return true;
}
-/// Prints the current instantiation stack through a series of
-/// notes.
void Sema::PrintInstantiationStack() {
// Determine which template instantiations to skip, if any.
unsigned SkipStart = CodeSynthesisContexts.size(), SkipEnd = SkipStart;
@@ -825,11 +959,6 @@ void Sema::PrintInstantiationStack() {
Diags.Report(Active->PointOfInstantiation,
diag::note_template_class_instantiation_here)
<< CTD << Active->InstantiationRange;
- } else {
- Diags.Report(Active->PointOfInstantiation,
- diag::note_template_type_alias_instantiation_here)
- << cast<TypeAliasTemplateDecl>(D)
- << Active->InstantiationRange;
}
break;
}
@@ -989,7 +1118,8 @@ void Sema::PrintInstantiationStack() {
case CodeSynthesisContext::DeclaringSpecialMember:
Diags.Report(Active->PointOfInstantiation,
diag::note_in_declaration_of_implicit_special_member)
- << cast<CXXRecordDecl>(Active->Entity) << Active->SpecialMember;
+ << cast<CXXRecordDecl>(Active->Entity)
+ << llvm::to_underlying(Active->SpecialMember);
break;
case CodeSynthesisContext::DeclaringImplicitEqualityComparison:
@@ -1007,7 +1137,8 @@ void Sema::PrintInstantiationStack() {
auto *MD = cast<CXXMethodDecl>(FD);
Diags.Report(Active->PointOfInstantiation,
diag::note_member_synthesized_at)
- << MD->isExplicitlyDefaulted() << DFK.asSpecialMember()
+ << MD->isExplicitlyDefaulted()
+ << llvm::to_underlying(DFK.asSpecialMember())
<< Context.getTagDeclType(MD->getParent());
} else if (DFK.isComparison()) {
QualType RecordType = FD->getParamDecl(0)
@@ -1103,6 +1234,12 @@ void Sema::PrintInstantiationStack() {
Diags.Report(Active->PointOfInstantiation,
diag::note_building_deduction_guide_here);
break;
+ case CodeSynthesisContext::TypeAliasTemplateInstantiation:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_type_alias_instantiation_here)
+ << cast<TypeAliasTemplateDecl>(Active->Entity)
+ << Active->InstantiationRange;
+ break;
}
}
}
@@ -1118,12 +1255,13 @@ std::optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
++Active)
{
switch (Active->Kind) {
- case CodeSynthesisContext::TemplateInstantiation:
+ case CodeSynthesisContext::TypeAliasTemplateInstantiation:
// An instantiation of an alias template may or may not be a SFINAE
// context, depending on what else is on the stack.
if (isa<TypeAliasTemplateDecl>(Active->Entity))
break;
[[fallthrough]];
+ case CodeSynthesisContext::TemplateInstantiation:
case CodeSynthesisContext::DefaultFunctionArgumentInstantiation:
case CodeSynthesisContext::ExceptionSpecInstantiation:
case CodeSynthesisContext::ConstraintsCheck:
@@ -1382,6 +1520,7 @@ namespace {
NamedDecl *FirstQualifierInScope = nullptr,
bool AllowInjectedClassName = false);
+ const CXXAssumeAttr *TransformCXXAssumeAttr(const CXXAssumeAttr *AA);
const LoopHintAttr *TransformLoopHintAttr(const LoopHintAttr *LH);
const NoInlineAttr *TransformStmtNoInlineAttr(const Stmt *OrigS,
const Stmt *InstS,
@@ -1418,6 +1557,54 @@ namespace {
return inherited::TransformFunctionProtoType(TLB, TL);
}
+ QualType TransformInjectedClassNameType(TypeLocBuilder &TLB,
+ InjectedClassNameTypeLoc TL) {
+ auto Type = inherited::TransformInjectedClassNameType(TLB, TL);
+ // Special case for transforming a deduction guide, we return a
+ // transformed TemplateSpecializationType.
+ if (Type.isNull() &&
+ SemaRef.CodeSynthesisContexts.back().Kind ==
+ Sema::CodeSynthesisContext::BuildingDeductionGuides) {
+ // Return a TemplateSpecializationType for transforming a deduction
+ // guide.
+ if (auto *ICT = TL.getType()->getAs<InjectedClassNameType>()) {
+ auto Type =
+ inherited::TransformType(ICT->getInjectedSpecializationType());
+ TLB.pushTrivial(SemaRef.Context, Type, TL.getNameLoc());
+ return Type;
+ }
+ }
+ return Type;
+ }
+ // Override the default version to handle a rewrite-template-arg-pack case
+ // for building a deduction guide.
+ bool TransformTemplateArgument(const TemplateArgumentLoc &Input,
+ TemplateArgumentLoc &Output,
+ bool Uneval = false) {
+ const TemplateArgument &Arg = Input.getArgument();
+ std::vector<TemplateArgument> TArgs;
+ switch (Arg.getKind()) {
+ case TemplateArgument::Pack:
+ // Literally rewrite the template argument pack, instead of unpacking
+ // it.
+ for (auto &pack : Arg.getPackAsArray()) {
+ TemplateArgumentLoc Input = SemaRef.getTrivialTemplateArgumentLoc(
+ pack, QualType(), SourceLocation{});
+ TemplateArgumentLoc Output;
+ if (SemaRef.SubstTemplateArgument(Input, TemplateArgs, Output))
+ return true; // fails
+ TArgs.push_back(Output.getArgument());
+ }
+ Output = SemaRef.getTrivialTemplateArgumentLoc(
+ TemplateArgument(llvm::ArrayRef(TArgs).copy(SemaRef.Context)),
+ QualType(), SourceLocation{});
+ return false;
+ default:
+ break;
+ }
+ return inherited::TransformTemplateArgument(Input, Output, Uneval);
+ }
+
template<typename Fn>
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
@@ -1451,6 +1638,23 @@ namespace {
SubstTemplateTypeParmPackTypeLoc TL,
bool SuppressObjCLifetime);
+ CXXRecordDecl::LambdaDependencyKind
+ ComputeLambdaDependency(LambdaScopeInfo *LSI) {
+ if (auto TypeAlias =
+ TemplateInstArgsHelpers::getEnclosingTypeAliasTemplateDecl(
+ getSema());
+ TypeAlias && TemplateInstArgsHelpers::isLambdaEnclosedByTypeAliasDecl(
+ LSI->CallOperator, TypeAlias.PrimaryTypeAliasDecl)) {
+ unsigned TypeAliasDeclDepth = TypeAlias.Template->getTemplateDepth();
+ if (TypeAliasDeclDepth >= TemplateArgs.getNumSubstitutedLevels())
+ return CXXRecordDecl::LambdaDependencyKind::LDK_AlwaysDependent;
+ for (const TemplateArgument &TA : TypeAlias.AssociatedTemplateArguments)
+ if (TA.isDependent())
+ return CXXRecordDecl::LambdaDependencyKind::LDK_AlwaysDependent;
+ }
+ return inherited::ComputeLambdaDependency(LSI);
+ }
+
ExprResult TransformLambdaExpr(LambdaExpr *E) {
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
Sema::ConstraintEvalRAII<TemplateInstantiator> RAII(*this);
@@ -1625,7 +1829,7 @@ Decl *TemplateInstantiator::TransformDecl(SourceLocation Loc, Decl *D) {
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
}
- TemplateName Template = Arg.getAsTemplate().getNameToSubstitute();
+ TemplateName Template = Arg.getAsTemplate();
assert(!Template.isNull() && Template.getAsTemplateDecl() &&
"Wrong kind of template template argument");
return Template.getAsTemplateDecl();
@@ -1798,10 +2002,8 @@ TemplateName TemplateInstantiator::TransformTemplateName(
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
}
- TemplateName Template = Arg.getAsTemplate().getNameToSubstitute();
+ TemplateName Template = Arg.getAsTemplate();
assert(!Template.isNull() && "Null template template argument");
- assert(!Template.getAsQualifiedTemplateName() &&
- "template decl to substitute is qualified?");
if (Final)
return Template;
@@ -1821,8 +2023,8 @@ TemplateName TemplateInstantiator::TransformTemplateName(
if (SubstPack->getFinal())
return Template;
return getSema().Context.getSubstTemplateTemplateParm(
- Template.getNameToSubstitute(), SubstPack->getAssociatedDecl(),
- SubstPack->getIndex(), getPackIndex(Pack));
+ Template, SubstPack->getAssociatedDecl(), SubstPack->getIndex(),
+ getPackIndex(Pack));
}
return inherited::TransformTemplateName(SS, Name, NameLoc, ObjectType,
@@ -1898,6 +2100,21 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
Arg, PackIndex);
}
+const CXXAssumeAttr *
+TemplateInstantiator::TransformCXXAssumeAttr(const CXXAssumeAttr *AA) {
+ ExprResult Res = getDerived().TransformExpr(AA->getAssumption());
+ if (!Res.isUsable())
+ return AA;
+
+ Res = getSema().BuildCXXAssumeExpr(Res.get(), AA->getAttrName(),
+ AA->getRange());
+ if (!Res.isUsable())
+ return AA;
+
+ return CXXAssumeAttr::CreateImplicit(getSema().Context, Res.get(),
+ AA->getRange());
+}
+
const LoopHintAttr *
TemplateInstantiator::TransformLoopHintAttr(const LoopHintAttr *LH) {
Expr *TransformedExpr = getDerived().TransformExpr(LH->getValue()).get();
@@ -1906,13 +2123,26 @@ TemplateInstantiator::TransformLoopHintAttr(const LoopHintAttr *LH) {
return LH;
// Generate error if there is a problem with the value.
- if (getSema().CheckLoopHintExpr(TransformedExpr, LH->getLocation()))
+ if (getSema().CheckLoopHintExpr(TransformedExpr, LH->getLocation(),
+ LH->getSemanticSpelling() ==
+ LoopHintAttr::Pragma_unroll))
return LH;
+ LoopHintAttr::OptionType Option = LH->getOption();
+ LoopHintAttr::LoopHintState State = LH->getState();
+
+ llvm::APSInt ValueAPS =
+ TransformedExpr->EvaluateKnownConstInt(getSema().getASTContext());
+ // The values of 0 and 1 block any unrolling of the loop.
+ if (ValueAPS.isZero() || ValueAPS.isOne()) {
+ Option = LoopHintAttr::Unroll;
+ State = LoopHintAttr::Disable;
+ }
+
// Create new LoopHintValueAttr with integral expression in place of the
// non-type template parameter.
- return LoopHintAttr::CreateImplicit(getSema().Context, LH->getOption(),
- LH->getState(), TransformedExpr, *LH);
+ return LoopHintAttr::CreateImplicit(getSema().Context, Option, State,
+ TransformedExpr, *LH);
}
const NoInlineAttr *TemplateInstantiator::TransformStmtNoInlineAttr(
const Stmt *OrigS, const Stmt *InstS, const NoInlineAttr *A) {
@@ -2257,10 +2487,7 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
assert(Arg.getKind() == TemplateArgument::Type &&
"unexpected nontype template argument kind in template rewrite");
QualType NewT = Arg.getAsType();
- assert(isa<TemplateTypeParmType>(NewT) &&
- "type parm not rewritten to type parm");
- auto NewTL = TLB.push<TemplateTypeParmTypeLoc>(NewT);
- NewTL.setNameLoc(TL.getNameLoc());
+ TLB.pushTrivial(SemaRef.Context, NewT, TL.getNameLoc());
return NewT;
}
@@ -2352,16 +2579,12 @@ createSubstDiag(Sema &S, TemplateDeductionInfo &Info,
} else {
ErrorLoc = Info.getLocation();
}
- char *MessageBuf = new (S.Context) char[Message.size()];
- std::copy(Message.begin(), Message.end(), MessageBuf);
SmallString<128> Entity;
llvm::raw_svector_ostream OS(Entity);
Printer(OS);
- char *EntityBuf = new (S.Context) char[Entity.size()];
- std::copy(Entity.begin(), Entity.end(), EntityBuf);
- return new (S.Context) concepts::Requirement::SubstitutionDiagnostic{
- StringRef(EntityBuf, Entity.size()), ErrorLoc,
- StringRef(MessageBuf, Message.size())};
+ const ASTContext &C = S.Context;
+ return new (C) concepts::Requirement::SubstitutionDiagnostic{
+ C.backupStr(Entity), ErrorLoc, C.backupStr(Message)};
}
concepts::Requirement::SubstitutionDiagnostic *
@@ -2370,10 +2593,9 @@ concepts::createSubstDiagAt(Sema &S, SourceLocation Location,
SmallString<128> Entity;
llvm::raw_svector_ostream OS(Entity);
Printer(OS);
- char *EntityBuf = new (S.Context) char[Entity.size()];
- llvm::copy(Entity, EntityBuf);
- return new (S.Context) concepts::Requirement::SubstitutionDiagnostic{
- /*SubstitutedEntity=*/StringRef(EntityBuf, Entity.size()),
+ const ASTContext &C = S.Context;
+ return new (C) concepts::Requirement::SubstitutionDiagnostic{
+ /*SubstitutedEntity=*/C.backupStr(Entity),
/*DiagLoc=*/Location, /*DiagMessage=*/StringRef()};
}
@@ -2481,7 +2703,7 @@ TemplateInstantiator::TransformExprRequirement(concepts::ExprRequirement *Req) {
if (TPLInst.isInvalid())
return nullptr;
TemplateParameterList *TPL = TransformTemplateParameterList(OrigTPL);
- if (!TPL)
+ if (!TPL || Trap.hasErrorOccurred())
TransRetReq.emplace(createSubstDiag(SemaRef, Info,
[&] (llvm::raw_ostream& OS) {
RetReq.getTypeConstraint()->getImmediatelyDeclaredConstraint()
@@ -2549,56 +2771,23 @@ TemplateInstantiator::TransformNestedRequirement(
assert(!Trap.hasErrorOccurred() && "Substitution failures must be handled "
"by CheckConstraintSatisfaction.");
}
+ ASTContext &C = SemaRef.Context;
if (TransConstraint.isUsable() &&
TransConstraint.get()->isInstantiationDependent())
- return new (SemaRef.Context)
- concepts::NestedRequirement(TransConstraint.get());
+ return new (C) concepts::NestedRequirement(TransConstraint.get());
if (TransConstraint.isInvalid() || !TransConstraint.get() ||
Satisfaction.HasSubstitutionFailure()) {
SmallString<128> Entity;
llvm::raw_svector_ostream OS(Entity);
Req->getConstraintExpr()->printPretty(OS, nullptr,
SemaRef.getPrintingPolicy());
- char *EntityBuf = new (SemaRef.Context) char[Entity.size()];
- std::copy(Entity.begin(), Entity.end(), EntityBuf);
- return new (SemaRef.Context) concepts::NestedRequirement(
- SemaRef.Context, StringRef(EntityBuf, Entity.size()), Satisfaction);
+ return new (C) concepts::NestedRequirement(
+ SemaRef.Context, C.backupStr(Entity), Satisfaction);
}
- return new (SemaRef.Context) concepts::NestedRequirement(
- SemaRef.Context, TransConstraint.get(), Satisfaction);
+ return new (C)
+ concepts::NestedRequirement(C, TransConstraint.get(), Satisfaction);
}
-
-/// Perform substitution on the type T with a given set of template
-/// arguments.
-///
-/// This routine substitutes the given template arguments into the
-/// type T and produces the instantiated type.
-///
-/// \param T the type into which the template arguments will be
-/// substituted. If this type is not dependent, it will be returned
-/// immediately.
-///
-/// \param Args the template arguments that will be
-/// substituted for the top-level template parameters within T.
-///
-/// \param Loc the location in the source code where this substitution
-/// is being performed. It will typically be the location of the
-/// declarator (if we're instantiating the type of some declaration)
-/// or the location of the type in the source code (if, e.g., we're
-/// instantiating the type of a cast expression).
-///
-/// \param Entity the name of the entity associated with a declaration
-/// being instantiated (if any). May be empty to indicate that there
-/// is no such entity (if, e.g., this is a type that occurs as part of
-/// a cast expression) or that the entity has no name (e.g., an
-/// unnamed function parameter).
-///
-/// \param AllowDeducedTST Whether a DeducedTemplateSpecializationType is
-/// acceptable as the top level type of the result.
-///
-/// \returns If the instantiation succeeds, the instantiated
-/// type. Otherwise, produces diagnostics and returns a NULL type.
TypeSourceInfo *Sema::SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &Args,
SourceLocation Loc,
@@ -2686,10 +2875,6 @@ static bool NeedsInstantiationAsFunctionType(TypeSourceInfo *T) {
return false;
}
-/// A form of SubstType intended specifically for instantiating the
-/// type of a FunctionDecl. Its purpose is solely to force the
-/// instantiation of default-argument expressions and to avoid
-/// instantiating an exception-specification.
TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &Args,
SourceLocation Loc,
@@ -2872,7 +3057,8 @@ bool Sema::SubstTypeConstraint(
}
return AttachTypeConstraint(
TC->getNestedNameSpecifierLoc(), TC->getConceptNameInfo(),
- TC->getNamedConcept(), &InstArgs, Inst,
+ TC->getNamedConcept(),
+ /*FoundDecl=*/TC->getConceptReference()->getFoundDecl(), &InstArgs, Inst,
Inst->isParameterPack()
? cast<CXXFoldExpr>(TC->getImmediatelyDeclaredConstraint())
->getEllipsisLoc()
@@ -3000,9 +3186,6 @@ ParmVarDecl *Sema::SubstParmVarDecl(
return NewParm;
}
-/// Substitute the given template arguments into the given set of
-/// parameters, producing the set of parameter types that would be generated
-/// from such a substitution.
bool Sema::SubstParmTypes(
SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
@@ -3020,7 +3203,6 @@ bool Sema::SubstParmTypes(
Loc, Params, nullptr, ExtParamInfos, ParamTypes, OutParams, ParamInfos);
}
-/// Substitute the given template arguments into the default argument.
bool Sema::SubstDefaultArgument(
SourceLocation Loc,
ParmVarDecl *Param,
@@ -3049,6 +3231,7 @@ bool Sema::SubstDefaultArgument(
// default argument expression appears.
ContextRAII SavedContext(*this, FD);
std::unique_ptr<LocalInstantiationScope> LIS;
+ MultiLevelTemplateArgumentList NewTemplateArgs = TemplateArgs;
if (ForCallExpr) {
// When instantiating a default argument due to use in a call expression,
@@ -3061,11 +3244,20 @@ bool Sema::SubstDefaultArgument(
/*ForDefinition*/ false);
if (addInstantiatedParametersToScope(FD, PatternFD, *LIS, TemplateArgs))
return true;
+ const FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate();
+ if (PrimaryTemplate && PrimaryTemplate->isOutOfLine()) {
+ TemplateArgumentList *CurrentTemplateArgumentList =
+ TemplateArgumentList::CreateCopy(getASTContext(),
+ TemplateArgs.getInnermost());
+ NewTemplateArgs = getTemplateInstantiationArgs(
+ FD, FD->getDeclContext(), /*Final=*/false,
+ CurrentTemplateArgumentList->asArray(), /*RelativeToPrimary=*/true);
+ }
}
runWithSufficientStackSpace(Loc, [&] {
- Result = SubstInitializer(PatternExpr, TemplateArgs,
- /*DirectInit*/false);
+ Result = SubstInitializer(PatternExpr, NewTemplateArgs,
+ /*DirectInit*/ false);
});
}
if (Result.isInvalid())
@@ -3102,12 +3294,6 @@ bool Sema::SubstDefaultArgument(
return false;
}
-/// Perform substitution on the base class specifiers of the
-/// given class template specialization.
-///
-/// Produces a diagnostic and returns true on error, returns false and
-/// attaches the instantiated base classes to the class template
-/// specialization if successful.
bool
Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
@@ -3222,28 +3408,6 @@ namespace clang {
}
}
-/// Instantiate the definition of a class from a given pattern.
-///
-/// \param PointOfInstantiation The point of instantiation within the
-/// source code.
-///
-/// \param Instantiation is the declaration whose definition is being
-/// instantiated. This will be either a class template specialization
-/// or a member class of a class template specialization.
-///
-/// \param Pattern is the pattern from which the instantiation
-/// occurs. This will be either the declaration of a class template or
-/// the declaration of a member class of a class template.
-///
-/// \param TemplateArgs The template arguments to be substituted into
-/// the pattern.
-///
-/// \param TSK the kind of implicit or explicit instantiation to perform.
-///
-/// \param Complain whether to complain if the class cannot be instantiated due
-/// to the lack of a definition.
-///
-/// \returns true if an error occurred, false otherwise.
bool
Sema::InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
@@ -3258,11 +3422,16 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
return true;
llvm::TimeTraceScope TimeScope("InstantiateClass", [&]() {
- std::string Name;
- llvm::raw_string_ostream OS(Name);
+ llvm::TimeTraceMetadata M;
+ llvm::raw_string_ostream OS(M.Detail);
Instantiation->getNameForDiagnostic(OS, getPrintingPolicy(),
/*Qualified=*/true);
- return Name;
+ if (llvm::isTimeTraceVerbose()) {
+ auto Loc = SourceMgr.getExpansionLoc(Instantiation->getLocation());
+ M.File = SourceMgr.getFilename(Loc);
+ M.Line = SourceMgr.getExpansionLineNumber(Loc);
+ }
+ return M;
});
Pattern = PatternDef;
@@ -3489,21 +3658,6 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
return Instantiation->isInvalidDecl();
}
-/// Instantiate the definition of an enum from a given pattern.
-///
-/// \param PointOfInstantiation The point of instantiation within the
-/// source code.
-/// \param Instantiation is the declaration whose definition is being
-/// instantiated. This will be a member enumeration of a class
-/// temploid specialization, or a local enumeration within a
-/// function temploid specialization.
-/// \param Pattern The templated declaration from which the instantiation
-/// occurs.
-/// \param TemplateArgs The template arguments to be substituted into
-/// the pattern.
-/// \param TSK The kind of implicit or explicit instantiation to perform.
-///
-/// \return \c true if an error occurred, \c false otherwise.
bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
@@ -3554,21 +3708,6 @@ bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
return Instantiation->isInvalidDecl();
}
-
-/// Instantiate the definition of a field from the given pattern.
-///
-/// \param PointOfInstantiation The point of instantiation within the
-/// source code.
-/// \param Instantiation is the declaration whose definition is being
-/// instantiated. This will be a class of a class temploid
-/// specialization, or a local enumeration within a function temploid
-/// specialization.
-/// \param Pattern The templated declaration from which the instantiation
-/// occurs.
-/// \param TemplateArgs The template arguments to be substituted into
-/// the pattern.
-///
-/// \return \c true if an error occurred, \c false otherwise.
bool Sema::InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs) {
@@ -3655,8 +3794,9 @@ bool Sema::usesPartialOrExplicitSpecialization(
->getPartialSpecializations(PartialSpecs);
for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
TemplateDeductionInfo Info(Loc);
- if (!DeduceTemplateArguments(PartialSpecs[I],
- ClassTemplateSpec->getTemplateArgs(), Info))
+ if (DeduceTemplateArguments(PartialSpecs[I],
+ ClassTemplateSpec->getTemplateArgs().asArray(),
+ Info) == TemplateDeductionResult::Success)
return true;
}
@@ -3700,8 +3840,9 @@ getPatternForClassTemplateSpecialization(
for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
ClassTemplatePartialSpecializationDecl *Partial = PartialSpecs[I];
TemplateDeductionInfo Info(FailedCandidates.getLocation());
- if (Sema::TemplateDeductionResult Result = S.DeduceTemplateArguments(
- Partial, ClassTemplateSpec->getTemplateArgs(), Info)) {
+ if (TemplateDeductionResult Result = S.DeduceTemplateArguments(
+ Partial, ClassTemplateSpec->getTemplateArgs().asArray(), Info);
+ Result != TemplateDeductionResult::Success) {
// Store the failed-deduction information for use in diagnostics, later.
// TODO: Actually use the failed-deduction info?
FailedCandidates.addCandidate().set(
@@ -3834,9 +3975,6 @@ bool Sema::InstantiateClassTemplateSpecialization(
getTemplateInstantiationArgs(ClassTemplateSpec), TSK, Complain);
}
-/// Instantiates the definitions of all of the member
-/// of the given class, which is an instantiation of a class template
-/// or a member class of a template.
void
Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
@@ -4066,9 +4204,6 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
}
}
-/// Instantiate the definitions of all of the members of the
-/// given class template specialization, which was named as part of an
-/// explicit instantiation.
void
Sema::InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
@@ -4098,6 +4233,15 @@ Sema::SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs) {
return Instantiator.TransformStmt(S);
}
+bool Sema::SubstTemplateArgument(
+ const TemplateArgumentLoc &Input,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateArgumentLoc &Output, SourceLocation Loc,
+ const DeclarationName &Entity) {
+ TemplateInstantiator Instantiator(*this, TemplateArgs, Loc, Entity);
+ return Instantiator.TransformTemplateArgument(Input, Output);
+}
+
bool Sema::SubstTemplateArguments(
ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
@@ -4169,7 +4313,6 @@ Sema::SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
return Instantiator.TransformNestedNameSpecifierLoc(NNS);
}
-/// Do template substitution on declaration name info.
DeclarationNameInfo
Sema::SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index fbc8572ea0e0..a12d2eff1d2c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -26,7 +26,12 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaAMDGPU.h"
+#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaSwift.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "llvm/Support/TimeProfiler.h"
@@ -398,7 +403,7 @@ static void instantiateOMPDeclareSimdDeclAttr(
++SI;
}
LinModifiers.append(Attr.modifiers_begin(), Attr.modifiers_end());
- (void)S.ActOnOpenMPDeclareSimdDirective(
+ (void)S.OpenMP().ActOnOpenMPDeclareSimdDirective(
S.ConvertDeclToDeclGroup(New), Attr.getBranchState(), Simdlen.get(),
Uniforms, Aligneds, Alignments, Linears, LinModifiers, Steps,
Attr.getRange());
@@ -475,9 +480,9 @@ static void instantiateOMPDeclareVariantAttr(
// Check function/variant ref for `omp declare variant` but not for `omp
// begin declare variant` (which use implicit attributes).
std::optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
- S.checkOpenMPDeclareVariantFunction(S.ConvertDeclToDeclGroup(New), E, TI,
- Attr.appendArgs_size(),
- Attr.getRange());
+ S.OpenMP().checkOpenMPDeclareVariantFunction(
+ S.ConvertDeclToDeclGroup(New), E, TI, Attr.appendArgs_size(),
+ Attr.getRange());
if (!DeclVarData)
return;
@@ -538,7 +543,7 @@ static void instantiateOMPDeclareVariantAttr(
AppendArgs.emplace_back(II.IsTarget, II.IsTargetSync);
}
- S.ActOnOpenMPDeclareVariantDirective(
+ S.OpenMP().ActOnOpenMPDeclareVariantDirective(
FD, E, TI, NothingExprs, NeedDevicePtrExprs, AppendArgs, SourceLocation(),
SourceLocation(), Attr.getRange());
}
@@ -560,7 +565,7 @@ static void instantiateDependentAMDGPUFlatWorkGroupSizeAttr(
return;
Expr *MaxExpr = Result.getAs<Expr>();
- S.addAMDGPUFlatWorkGroupSizeAttr(New, Attr, MinExpr, MaxExpr);
+ S.AMDGPU().addAMDGPUFlatWorkGroupSizeAttr(New, Attr, MinExpr, MaxExpr);
}
ExplicitSpecifier Sema::instantiateExplicitSpecifier(
@@ -604,7 +609,30 @@ static void instantiateDependentAMDGPUWavesPerEUAttr(
MaxExpr = Result.getAs<Expr>();
}
- S.addAMDGPUWavesPerEUAttr(New, Attr, MinExpr, MaxExpr);
+ S.AMDGPU().addAMDGPUWavesPerEUAttr(New, Attr, MinExpr, MaxExpr);
+}
+
+static void instantiateDependentAMDGPUMaxNumWorkGroupsAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const AMDGPUMaxNumWorkGroupsAttr &Attr, Decl *New) {
+ EnterExpressionEvaluationContext Unevaluated(
+ S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ ExprResult ResultX = S.SubstExpr(Attr.getMaxNumWorkGroupsX(), TemplateArgs);
+ if (!ResultX.isUsable())
+ return;
+ ExprResult ResultY = S.SubstExpr(Attr.getMaxNumWorkGroupsY(), TemplateArgs);
+ if (!ResultY.isUsable())
+ return;
+ ExprResult ResultZ = S.SubstExpr(Attr.getMaxNumWorkGroupsZ(), TemplateArgs);
+ if (!ResultZ.isUsable())
+ return;
+
+ Expr *XExpr = ResultX.getAs<Expr>();
+ Expr *YExpr = ResultY.getAs<Expr>();
+ Expr *ZExpr = ResultZ.getAs<Expr>();
+
+ S.AMDGPU().addAMDGPUMaxNumWorkGroupsAttr(New, Attr, XExpr, YExpr, ZExpr);
}
// This doesn't take any template parameters, but we have a custom action that
@@ -792,6 +820,12 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
*AMDGPUFlatWorkGroupSize, New);
}
+ if (const auto *AMDGPUMaxNumWorkGroups =
+ dyn_cast<AMDGPUMaxNumWorkGroupsAttr>(TmplAttr)) {
+ instantiateDependentAMDGPUMaxNumWorkGroupsAttr(
+ *this, TemplateArgs, *AMDGPUMaxNumWorkGroups, New);
+ }
+
if (const auto *ParamAttr = dyn_cast<HLSLParamModifierAttr>(TmplAttr)) {
instantiateDependentHLSLParamModifierAttr(*this, TemplateArgs, ParamAttr,
New);
@@ -807,14 +841,15 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
}
if (const auto *ABIAttr = dyn_cast<ParameterABIAttr>(TmplAttr)) {
- AddParameterABIAttr(New, *ABIAttr, ABIAttr->getABI());
+ Swift().AddParameterABIAttr(New, *ABIAttr, ABIAttr->getABI());
continue;
}
if (isa<NSConsumedAttr>(TmplAttr) || isa<OSConsumedAttr>(TmplAttr) ||
isa<CFConsumedAttr>(TmplAttr)) {
- AddXConsumedAttr(New, *TmplAttr, attrToRetainOwnershipKind(TmplAttr),
- /*template instantiation=*/true);
+ ObjC().AddXConsumedAttr(New, *TmplAttr,
+ attrToRetainOwnershipKind(TmplAttr),
+ /*template instantiation=*/true);
continue;
}
@@ -858,12 +893,6 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
}
}
-/// Update instantiation attributes after template was late parsed.
-///
-/// Some attributes are evaluated based on the body of template. If it is
-/// late parsed, such attributes cannot be evaluated when declaration is
-/// instantiated. This function is used to update instantiation attributes when
-/// template definition is ready.
void Sema::updateAttrsForLateParsedTemplate(const Decl *Pattern, Decl *Inst) {
for (const auto *Attr : Pattern->attrs()) {
if (auto *A = dyn_cast<StrictFPAttr>(Attr)) {
@@ -874,10 +903,6 @@ void Sema::updateAttrsForLateParsedTemplate(const Decl *Pattern, Decl *Inst) {
}
}
-/// In the MS ABI, we need to instantiate default arguments of dllexported
-/// default constructors along with the constructor definition. This allows IR
-/// gen to emit a constructor closure which calls the default constructor with
-/// its default arguments.
void Sema::InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor) {
assert(Context.getTargetInfo().getCXXABI().isMicrosoft() &&
Ctor->isDefaultConstructor());
@@ -1071,8 +1096,8 @@ Decl *TemplateDeclInstantiator::VisitTypeAliasDecl(TypeAliasDecl *D) {
return Typedef;
}
-Decl *
-TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
+Decl *TemplateDeclInstantiator::InstantiateTypeAliasTemplateDecl(
+ TypeAliasTemplateDecl *D) {
// Create a local instantiation scope for this type alias template, which
// will contain the instantiations of the template parameters.
LocalInstantiationScope Scope(SemaRef);
@@ -1083,6 +1108,15 @@ TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
return nullptr;
TypeAliasDecl *Pattern = D->getTemplatedDecl();
+ Sema::InstantiatingTemplate InstTemplate(
+ SemaRef, D->getBeginLoc(), D,
+ D->getTemplateDepth() >= TemplateArgs.getNumLevels()
+ ? ArrayRef<TemplateArgument>()
+ : (TemplateArgs.begin() + TemplateArgs.getNumLevels() - 1 -
+ D->getTemplateDepth())
+ ->Args);
+ if (InstTemplate.isInvalid())
+ return nullptr;
TypeAliasTemplateDecl *PrevAliasTemplate = nullptr;
if (getPreviousDeclForInstantiation<TypedefNameDecl>(Pattern)) {
@@ -1109,7 +1143,14 @@ TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
if (!PrevAliasTemplate)
Inst->setInstantiatedFromMemberTemplate(D);
- Owner->addDecl(Inst);
+ return Inst;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
+ Decl *Inst = InstantiateTypeAliasTemplateDecl(D);
+ if (Inst)
+ Owner->addDecl(Inst);
return Inst;
}
@@ -1177,7 +1218,7 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D,
// In ARC, infer 'retaining' for variables of retainable type.
if (SemaRef.getLangOpts().ObjCAutoRefCount &&
- SemaRef.inferObjCARCLifetime(Var))
+ SemaRef.ObjC().inferObjCARCLifetime(Var))
Var->setInvalidDecl();
if (SemaRef.getLangOpts().OpenCL)
@@ -1407,11 +1448,8 @@ Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
if (!InstTy)
return nullptr;
- FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getBeginLoc(),
- D->getFriendLoc(), InstTy);
- if (!FD)
- return nullptr;
-
+ FriendDecl *FD = FriendDecl::Create(
+ SemaRef.Context, Owner, D->getLocation(), InstTy, D->getFriendLoc());
FD->setAccess(AS_public);
FD->setUnsupportedFriend(D->isUnsupportedFriend());
Owner->addDecl(FD);
@@ -1701,6 +1739,7 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
assert(!Owner->isDependentContext());
Inst->setLexicalDeclContext(Owner);
RecordInst->setLexicalDeclContext(Owner);
+ Inst->setObjectOfFriendDecl();
if (PrevClassTemplate) {
Inst->setCommonPtr(PrevClassTemplate->getCommonPtr());
@@ -2222,23 +2261,27 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
FunctionTemplate->setInstantiatedFromMemberTemplate(
D->getDescribedFunctionTemplate());
}
- } else if (FunctionTemplate) {
+ } else if (FunctionTemplate &&
+ SemaRef.CodeSynthesisContexts.back().Kind !=
+ Sema::CodeSynthesisContext::BuildingDeductionGuides) {
// Record this function template specialization.
ArrayRef<TemplateArgument> Innermost = TemplateArgs.getInnermost();
Function->setFunctionTemplateSpecialization(FunctionTemplate,
TemplateArgumentList::CreateCopy(SemaRef.Context,
Innermost),
/*InsertPos=*/nullptr);
- } else if (isFriend && D->isThisDeclarationADefinition()) {
- // Do not connect the friend to the template unless it's actually a
- // definition. We don't want non-template functions to be marked as being
- // template instantiations.
- Function->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation);
- } else if (!isFriend) {
- // If this is not a function template, and this is not a friend (that is,
- // this is a locally declared function), save the instantiation relationship
- // for the purposes of constraint instantiation.
- Function->setInstantiatedFromDecl(D);
+ } else if (FunctionRewriteKind == RewriteKind::None) {
+ if (isFriend && D->isThisDeclarationADefinition()) {
+ // Do not connect the friend to the template unless it's actually a
+ // definition. We don't want non-template functions to be marked as being
+ // template instantiations.
+ Function->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation);
+ } else if (!isFriend) {
+ // If this is not a function template, and this is not a friend (that is,
+ // this is a locally declared function), save the instantiation
+ // relationship for the purposes of constraint instantiation.
+ Function->setInstantiatedFromDecl(D);
+ }
}
if (isFriend) {
@@ -2256,7 +2299,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
SemaRef, Function->getDeclName(), SourceLocation(),
D->isLocalExternDecl() ? Sema::LookupRedeclarationWithLinkage
: Sema::LookupOrdinaryName,
- D->isLocalExternDecl() ? Sema::ForExternalRedeclaration
+ D->isLocalExternDecl() ? RedeclarationKind::ForExternalRedeclaration
: SemaRef.forRedeclarationInCurContext());
if (DependentFunctionTemplateSpecializationInfo *DFTSI =
@@ -2400,7 +2443,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
return nullptr;
}
if (D->isDeleted())
- SemaRef.SetDeclDeleted(Function, D->getLocation());
+ SemaRef.SetDeclDeleted(Function, D->getLocation(), D->getDeletedMessage());
NamedDecl *PrincipalDecl =
(TemplateParams ? cast<NamedDecl>(FunctionTemplate) : Function);
@@ -2629,7 +2672,7 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
TemplateArgumentList::CreateCopy(SemaRef.Context,
Innermost),
/*InsertPos=*/nullptr);
- } else if (!isFriend) {
+ } else if (!isFriend && FunctionRewriteKind == RewriteKind::None) {
// Record that this is an instantiation of a member function.
Method->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation);
}
@@ -2657,7 +2700,7 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
Method->setInvalidDecl();
LookupResult Previous(SemaRef, NameInfo, Sema::LookupOrdinaryName,
- Sema::ForExternalRedeclaration);
+ RedeclarationKind::ForExternalRedeclaration);
bool IsExplicitSpecialization = false;
@@ -2776,7 +2819,8 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
return nullptr;
}
if (D->isDeletedAsWritten())
- SemaRef.SetDeclDeleted(Method, Method->getLocation());
+ SemaRef.SetDeclDeleted(Method, Method->getLocation(),
+ D->getDeletedMessage());
// If this is an explicit specialization, mark the implicitly-instantiated
// template specialization as being an explicit specialization too.
@@ -2912,11 +2956,10 @@ Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
}
}
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
- TypeSourceInfo *InstantiatedDefaultArg =
- SemaRef.SubstType(D->getDefaultArgumentInfo(), TemplateArgs,
- D->getDefaultArgumentLoc(), D->getDeclName());
- if (InstantiatedDefaultArg)
- Inst->setDefaultArgument(InstantiatedDefaultArg);
+ TemplateArgumentLoc Output;
+ if (!SemaRef.SubstTemplateArgument(D->getDefaultArgument(), TemplateArgs,
+ Output))
+ Inst->setDefaultArgument(SemaRef.getASTContext(), Output);
}
// Introduce this template parameter's instantiation into the instantiation
@@ -3080,9 +3123,10 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
EnterExpressionEvaluationContext ConstantEvaluated(
SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- ExprResult Value = SemaRef.SubstExpr(D->getDefaultArgument(), TemplateArgs);
- if (!Value.isInvalid())
- Param->setDefaultArgument(Value.get());
+ TemplateArgumentLoc Result;
+ if (!SemaRef.SubstTemplateArgument(D->getDefaultArgument(), TemplateArgs,
+ Result))
+ Param->setDefaultArgument(SemaRef.Context, Result);
}
// Introduce this template parameter's instantiation into the instantiation
@@ -3195,12 +3239,14 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl(
Param = TemplateTemplateParmDecl::Create(
SemaRef.Context, Owner, D->getLocation(),
D->getDepth() - TemplateArgs.getNumSubstitutedLevels(),
- D->getPosition(), D->getIdentifier(), InstParams, ExpandedParams);
+ D->getPosition(), D->getIdentifier(), D->wasDeclaredWithTypename(),
+ InstParams, ExpandedParams);
else
Param = TemplateTemplateParmDecl::Create(
SemaRef.Context, Owner, D->getLocation(),
D->getDepth() - TemplateArgs.getNumSubstitutedLevels(),
- D->getPosition(), D->isParameterPack(), D->getIdentifier(), InstParams);
+ D->getPosition(), D->isParameterPack(), D->getIdentifier(),
+ D->wasDeclaredWithTypename(), InstParams);
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
NestedNameSpecifierLoc QualifierLoc =
D->getDefaultArgument().getTemplateQualifierLoc();
@@ -3322,7 +3368,7 @@ Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
// fact, it's not really even possible in non-class scopes).
bool CheckRedeclaration = Owner->isRecord();
LookupResult Prev(SemaRef, NameInfo, Sema::LookupUsingDeclName,
- Sema::ForVisibleRedeclaration);
+ RedeclarationKind::ForVisibleRedeclaration);
UsingDecl *NewUD = UsingDecl::Create(SemaRef.Context, Owner,
D->getUsingLoc(),
@@ -3374,6 +3420,10 @@ Decl *TemplateDeclInstantiator::VisitUsingEnumDecl(UsingEnumDecl *D) {
TypeSourceInfo *TSI = SemaRef.SubstType(D->getEnumType(), TemplateArgs,
D->getLocation(), D->getDeclName());
+
+ if (!TSI)
+ return nullptr;
+
UsingEnumDecl *NewUD =
UsingEnumDecl::Create(SemaRef.Context, Owner, D->getUsingLoc(),
D->getEnumLoc(), D->getLocation(), TSI);
@@ -3545,7 +3595,7 @@ Decl *TemplateDeclInstantiator::VisitOMPThreadPrivateDecl(
}
OMPThreadPrivateDecl *TD =
- SemaRef.CheckOMPThreadPrivateDecl(D->getLocation(), Vars);
+ SemaRef.OpenMP().CheckOMPThreadPrivateDecl(D->getLocation(), Vars);
TD->setAccess(AS_public);
Owner->addDecl(TD);
@@ -3568,14 +3618,14 @@ Decl *TemplateDeclInstantiator::VisitOMPAllocateDecl(OMPAllocateDecl *D) {
ExprResult NewE = SemaRef.SubstExpr(AC->getAllocator(), TemplateArgs);
if (!NewE.isUsable())
continue;
- IC = SemaRef.ActOnOpenMPAllocatorClause(
+ IC = SemaRef.OpenMP().ActOnOpenMPAllocatorClause(
NewE.get(), AC->getBeginLoc(), AC->getLParenLoc(), AC->getEndLoc());
} else if (auto *AC = dyn_cast<OMPAlignClause>(C)) {
ExprResult NewE = SemaRef.SubstExpr(AC->getAlignment(), TemplateArgs);
if (!NewE.isUsable())
continue;
- IC = SemaRef.ActOnOpenMPAlignClause(NewE.get(), AC->getBeginLoc(),
- AC->getLParenLoc(), AC->getEndLoc());
+ IC = SemaRef.OpenMP().ActOnOpenMPAlignClause(
+ NewE.get(), AC->getBeginLoc(), AC->getLParenLoc(), AC->getEndLoc());
// If align clause value ends up being invalid, this can end up null.
if (!IC)
continue;
@@ -3583,7 +3633,7 @@ Decl *TemplateDeclInstantiator::VisitOMPAllocateDecl(OMPAllocateDecl *D) {
Clauses.push_back(IC);
}
- Sema::DeclGroupPtrTy Res = SemaRef.ActOnOpenMPAllocateDirective(
+ Sema::DeclGroupPtrTy Res = SemaRef.OpenMP().ActOnOpenMPAllocateDirective(
D->getLocation(), Vars, Clauses, Owner);
if (Res.get().isNull())
return nullptr;
@@ -3604,7 +3654,7 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
D->getType()->containsUnexpandedParameterPack();
QualType SubstReductionType;
if (RequiresInstantiation) {
- SubstReductionType = SemaRef.ActOnOpenMPDeclareReductionType(
+ SubstReductionType = SemaRef.OpenMP().ActOnOpenMPDeclareReductionType(
D->getLocation(),
ParsedType::make(SemaRef.SubstType(
D->getType(), TemplateArgs, D->getLocation(), DeclarationName())));
@@ -3625,7 +3675,7 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
SemaRef.CurrentInstantiationScope->findInstantiationOf(PrevDeclInScope)
->get<Decl *>());
}
- auto DRD = SemaRef.ActOnOpenMPDeclareReductionDirectiveStart(
+ auto DRD = SemaRef.OpenMP().ActOnOpenMPDeclareReductionDirectiveStart(
/*S=*/nullptr, Owner, D->getDeclName(), ReductionTypes, D->getAccess(),
PrevDeclInScope);
auto *NewDRD = cast<OMPDeclareReductionDecl>(DRD.get().getSingleDecl());
@@ -3634,7 +3684,7 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
Expr *SubstInitializer = nullptr;
// Combiners instantiation sequence.
if (Combiner) {
- SemaRef.ActOnOpenMPDeclareReductionCombinerStart(
+ SemaRef.OpenMP().ActOnOpenMPDeclareReductionCombinerStart(
/*S=*/nullptr, NewDRD);
SemaRef.CurrentInstantiationScope->InstantiatedLocal(
cast<DeclRefExpr>(D->getCombinerIn())->getDecl(),
@@ -3646,12 +3696,14 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, Qualifiers(),
ThisContext);
SubstCombiner = SemaRef.SubstExpr(Combiner, TemplateArgs).get();
- SemaRef.ActOnOpenMPDeclareReductionCombinerEnd(NewDRD, SubstCombiner);
+ SemaRef.OpenMP().ActOnOpenMPDeclareReductionCombinerEnd(NewDRD,
+ SubstCombiner);
}
// Initializers instantiation sequence.
if (Init) {
- VarDecl *OmpPrivParm = SemaRef.ActOnOpenMPDeclareReductionInitializerStart(
- /*S=*/nullptr, NewDRD);
+ VarDecl *OmpPrivParm =
+ SemaRef.OpenMP().ActOnOpenMPDeclareReductionInitializerStart(
+ /*S=*/nullptr, NewDRD);
SemaRef.CurrentInstantiationScope->InstantiatedLocal(
cast<DeclRefExpr>(D->getInitOrig())->getDecl(),
cast<DeclRefExpr>(NewDRD->getInitOrig())->getDecl());
@@ -3668,8 +3720,8 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
SemaRef.InstantiateVariableInitializer(OmpPrivParm, OldPrivParm,
TemplateArgs);
}
- SemaRef.ActOnOpenMPDeclareReductionInitializerEnd(NewDRD, SubstInitializer,
- OmpPrivParm);
+ SemaRef.OpenMP().ActOnOpenMPDeclareReductionInitializerEnd(
+ NewDRD, SubstInitializer, OmpPrivParm);
}
IsCorrect = IsCorrect && SubstCombiner &&
(!Init ||
@@ -3678,7 +3730,7 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
(D->getInitializerKind() != OMPDeclareReductionInitKind::Call &&
!SubstInitializer));
- (void)SemaRef.ActOnOpenMPDeclareReductionDirectiveEnd(
+ (void)SemaRef.OpenMP().ActOnOpenMPDeclareReductionDirectiveEnd(
/*S=*/nullptr, DRD, IsCorrect && !D->isInvalidDecl());
return NewDRD;
@@ -3694,7 +3746,7 @@ TemplateDeclInstantiator::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
QualType SubstMapperTy;
DeclarationName VN = D->getVarName();
if (RequiresInstantiation) {
- SubstMapperTy = SemaRef.ActOnOpenMPDeclareMapperType(
+ SubstMapperTy = SemaRef.OpenMP().ActOnOpenMPDeclareMapperType(
D->getLocation(),
ParsedType::make(SemaRef.SubstType(D->getType(), TemplateArgs,
D->getLocation(), VN)));
@@ -3714,11 +3766,12 @@ TemplateDeclInstantiator::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
SmallVector<OMPClause *, 6> Clauses;
// Instantiate the mapper variable.
DeclarationNameInfo DirName;
- SemaRef.StartOpenMPDSABlock(llvm::omp::OMPD_declare_mapper, DirName,
- /*S=*/nullptr,
- (*D->clauselist_begin())->getBeginLoc());
- ExprResult MapperVarRef = SemaRef.ActOnOpenMPDeclareMapperDirectiveVarDecl(
- /*S=*/nullptr, SubstMapperTy, D->getLocation(), VN);
+ SemaRef.OpenMP().StartOpenMPDSABlock(llvm::omp::OMPD_declare_mapper, DirName,
+ /*S=*/nullptr,
+ (*D->clauselist_begin())->getBeginLoc());
+ ExprResult MapperVarRef =
+ SemaRef.OpenMP().ActOnOpenMPDeclareMapperDirectiveVarDecl(
+ /*S=*/nullptr, SubstMapperTy, D->getLocation(), VN);
SemaRef.CurrentInstantiationScope->InstantiatedLocal(
cast<DeclRefExpr>(D->getMapperVarRef())->getDecl(),
cast<DeclRefExpr>(MapperVarRef.get())->getDecl());
@@ -3748,17 +3801,17 @@ TemplateDeclInstantiator::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
SemaRef.SubstDeclarationNameInfo(OldC->getMapperIdInfo(), TemplateArgs);
OMPVarListLocTy Locs(OldC->getBeginLoc(), OldC->getLParenLoc(),
OldC->getEndLoc());
- OMPClause *NewC = SemaRef.ActOnOpenMPMapClause(
+ OMPClause *NewC = SemaRef.OpenMP().ActOnOpenMPMapClause(
OldC->getIteratorModifier(), OldC->getMapTypeModifiers(),
OldC->getMapTypeModifiersLoc(), SS, NewNameInfo, OldC->getMapType(),
OldC->isImplicitMapType(), OldC->getMapLoc(), OldC->getColonLoc(),
NewVars, Locs);
Clauses.push_back(NewC);
}
- SemaRef.EndOpenMPDSABlock(nullptr);
+ SemaRef.OpenMP().EndOpenMPDSABlock(nullptr);
if (!IsCorrect)
return nullptr;
- Sema::DeclGroupPtrTy DG = SemaRef.ActOnOpenMPDeclareMapperDirective(
+ Sema::DeclGroupPtrTy DG = SemaRef.OpenMP().ActOnOpenMPDeclareMapperDirective(
/*S=*/nullptr, Owner, D->getDeclName(), SubstMapperTy, D->getLocation(),
VN, D->getAccess(), MapperVarRef.get(), Clauses, PrevDeclInScope);
Decl *NewDMD = DG.get().getSingleDecl();
@@ -3812,15 +3865,16 @@ TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl(
// Substitute into the template arguments of the class template explicit
// specialization.
- TemplateSpecializationTypeLoc Loc = D->getTypeAsWritten()->getTypeLoc().
- castAs<TemplateSpecializationTypeLoc>();
- TemplateArgumentListInfo InstTemplateArgs(Loc.getLAngleLoc(),
- Loc.getRAngleLoc());
- SmallVector<TemplateArgumentLoc, 4> ArgLocs;
- for (unsigned I = 0; I != Loc.getNumArgs(); ++I)
- ArgLocs.push_back(Loc.getArgLoc(I));
- if (SemaRef.SubstTemplateArguments(ArgLocs, TemplateArgs, InstTemplateArgs))
- return nullptr;
+ TemplateArgumentListInfo InstTemplateArgs;
+ if (const ASTTemplateArgumentListInfo *TemplateArgsInfo =
+ D->getTemplateArgsAsWritten()) {
+ InstTemplateArgs.setLAngleLoc(TemplateArgsInfo->getLAngleLoc());
+ InstTemplateArgs.setRAngleLoc(TemplateArgsInfo->getRAngleLoc());
+
+ if (SemaRef.SubstTemplateArguments(TemplateArgsInfo->arguments(),
+ TemplateArgs, InstTemplateArgs))
+ return nullptr;
+ }
// Check that the template argument list is well-formed for this
// class template.
@@ -3874,6 +3928,7 @@ TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl::Create(
SemaRef.Context, D->getTagKind(), Owner, D->getBeginLoc(),
D->getLocation(), InstClassTemplate, CanonicalConverted, PrevDecl);
+ InstD->setTemplateArgsAsWritten(InstTemplateArgs);
// Add this partial specialization to the set of class template partial
// specializations.
@@ -3884,28 +3939,10 @@ TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl(
if (SubstQualifier(D, InstD))
return nullptr;
- // Build the canonical type that describes the converted template
- // arguments of the class template explicit specialization.
- QualType CanonType = SemaRef.Context.getTemplateSpecializationType(
- TemplateName(InstClassTemplate), CanonicalConverted,
- SemaRef.Context.getRecordType(InstD));
-
- // Build the fully-sugared type for this class template
- // specialization as the user wrote in the specialization
- // itself. This means that we'll pretty-print the type retrieved
- // from the specialization's declaration the way that the user
- // actually wrote the specialization, rather than formatting the
- // name based on the "canonical" representation used to store the
- // template arguments in the specialization.
- TypeSourceInfo *WrittenTy = SemaRef.Context.getTemplateSpecializationTypeInfo(
- TemplateName(InstClassTemplate), D->getLocation(), InstTemplateArgs,
- CanonType);
-
InstD->setAccess(D->getAccess());
InstD->setInstantiationOfMemberClass(D, TSK_ImplicitInstantiation);
InstD->setSpecializationKind(D->getSpecializationKind());
- InstD->setTypeAsWritten(WrittenTy);
- InstD->setExternLoc(D->getExternLoc());
+ InstD->setExternKeywordLoc(D->getExternKeywordLoc());
InstD->setTemplateKeywordLoc(D->getTemplateKeywordLoc());
Owner->addDecl(InstD);
@@ -3939,7 +3976,7 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
// Substitute the current template arguments.
if (const ASTTemplateArgumentListInfo *TemplateArgsInfo =
- D->getTemplateArgsInfo()) {
+ D->getTemplateArgsAsWritten()) {
VarTemplateArgsInfo.setLAngleLoc(TemplateArgsInfo->getLAngleLoc());
VarTemplateArgsInfo.setRAngleLoc(TemplateArgsInfo->getRAngleLoc());
@@ -3997,7 +4034,7 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *Var = VarTemplateSpecializationDecl::Create(
SemaRef.Context, Owner, D->getInnerLocStart(), D->getLocation(),
VarTemplate, DI->getType(), DI, D->getStorageClass(), Converted);
- Var->setTemplateArgsInfo(TemplateArgsInfo);
+ Var->setTemplateArgsAsWritten(TemplateArgsInfo);
if (!PrevDecl) {
void *InsertPos = nullptr;
VarTemplate->findSpecialization(Converted, InsertPos);
@@ -4239,19 +4276,21 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
QualType CanonType = SemaRef.Context.getTemplateSpecializationType(
TemplateName(ClassTemplate), CanonicalConverted);
- // Build the fully-sugared type for this class template
- // specialization as the user wrote in the specialization
- // itself. This means that we'll pretty-print the type retrieved
- // from the specialization's declaration the way that the user
- // actually wrote the specialization, rather than formatting the
- // name based on the "canonical" representation used to store the
- // template arguments in the specialization.
- TypeSourceInfo *WrittenTy
- = SemaRef.Context.getTemplateSpecializationTypeInfo(
- TemplateName(ClassTemplate),
- PartialSpec->getLocation(),
- InstTemplateArgs,
- CanonType);
+ // Create the class template partial specialization declaration.
+ ClassTemplatePartialSpecializationDecl *InstPartialSpec =
+ ClassTemplatePartialSpecializationDecl::Create(
+ SemaRef.Context, PartialSpec->getTagKind(), Owner,
+ PartialSpec->getBeginLoc(), PartialSpec->getLocation(), InstParams,
+ ClassTemplate, CanonicalConverted, CanonType,
+ /*PrevDecl=*/nullptr);
+
+ InstPartialSpec->setTemplateArgsAsWritten(InstTemplateArgs);
+
+ // Substitute the nested name specifier, if any.
+ if (SubstQualifier(PartialSpec, InstPartialSpec))
+ return nullptr;
+
+ InstPartialSpec->setInstantiatedFromMember(PartialSpec);
if (PrevDecl) {
// We've already seen a partial specialization with the same template
@@ -4269,28 +4308,14 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
//
// Outer<int, int> outer; // error: the partial specializations of Inner
// // have the same signature.
- SemaRef.Diag(PartialSpec->getLocation(), diag::err_partial_spec_redeclared)
- << WrittenTy->getType();
+ SemaRef.Diag(InstPartialSpec->getLocation(),
+ diag::err_partial_spec_redeclared)
+ << InstPartialSpec;
SemaRef.Diag(PrevDecl->getLocation(), diag::note_prev_partial_spec_here)
<< SemaRef.Context.getTypeDeclType(PrevDecl);
return nullptr;
}
-
- // Create the class template partial specialization declaration.
- ClassTemplatePartialSpecializationDecl *InstPartialSpec =
- ClassTemplatePartialSpecializationDecl::Create(
- SemaRef.Context, PartialSpec->getTagKind(), Owner,
- PartialSpec->getBeginLoc(), PartialSpec->getLocation(), InstParams,
- ClassTemplate, CanonicalConverted, InstTemplateArgs, CanonType,
- nullptr);
- // Substitute the nested name specifier, if any.
- if (SubstQualifier(PartialSpec, InstPartialSpec))
- return nullptr;
-
- InstPartialSpec->setInstantiatedFromMember(PartialSpec);
- InstPartialSpec->setTypeAsWritten(WrittenTy);
-
// Check the completed partial specialization.
SemaRef.CheckTemplatePartialSpecialization(InstPartialSpec);
@@ -4359,46 +4384,6 @@ TemplateDeclInstantiator::InstantiateVarTemplatePartialSpecialization(
VarTemplate->findPartialSpecialization(CanonicalConverted, InstParams,
InsertPos);
- // Build the canonical type that describes the converted template
- // arguments of the variable template partial specialization.
- QualType CanonType = SemaRef.Context.getTemplateSpecializationType(
- TemplateName(VarTemplate), CanonicalConverted);
-
- // Build the fully-sugared type for this variable template
- // specialization as the user wrote in the specialization
- // itself. This means that we'll pretty-print the type retrieved
- // from the specialization's declaration the way that the user
- // actually wrote the specialization, rather than formatting the
- // name based on the "canonical" representation used to store the
- // template arguments in the specialization.
- TypeSourceInfo *WrittenTy = SemaRef.Context.getTemplateSpecializationTypeInfo(
- TemplateName(VarTemplate), PartialSpec->getLocation(), InstTemplateArgs,
- CanonType);
-
- if (PrevDecl) {
- // We've already seen a partial specialization with the same template
- // parameters and template arguments. This can happen, for example, when
- // substituting the outer template arguments ends up causing two
- // variable template partial specializations of a member variable template
- // to have identical forms, e.g.,
- //
- // template<typename T, typename U>
- // struct Outer {
- // template<typename X, typename Y> pair<X,Y> p;
- // template<typename Y> pair<T, Y> p;
- // template<typename Y> pair<U, Y> p;
- // };
- //
- // Outer<int, int> outer; // error: the partial specializations of Inner
- // // have the same signature.
- SemaRef.Diag(PartialSpec->getLocation(),
- diag::err_var_partial_spec_redeclared)
- << WrittenTy->getType();
- SemaRef.Diag(PrevDecl->getLocation(),
- diag::note_var_prev_partial_spec_here);
- return nullptr;
- }
-
// Do substitution on the type of the declaration
TypeSourceInfo *DI = SemaRef.SubstType(
PartialSpec->getTypeSourceInfo(), TemplateArgs,
@@ -4418,16 +4403,39 @@ TemplateDeclInstantiator::InstantiateVarTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl::Create(
SemaRef.Context, Owner, PartialSpec->getInnerLocStart(),
PartialSpec->getLocation(), InstParams, VarTemplate, DI->getType(),
- DI, PartialSpec->getStorageClass(), CanonicalConverted,
- InstTemplateArgs);
+ DI, PartialSpec->getStorageClass(), CanonicalConverted);
+
+ InstPartialSpec->setTemplateArgsAsWritten(InstTemplateArgs);
// Substitute the nested name specifier, if any.
if (SubstQualifier(PartialSpec, InstPartialSpec))
return nullptr;
InstPartialSpec->setInstantiatedFromMember(PartialSpec);
- InstPartialSpec->setTypeAsWritten(WrittenTy);
+ if (PrevDecl) {
+ // We've already seen a partial specialization with the same template
+ // parameters and template arguments. This can happen, for example, when
+ // substituting the outer template arguments ends up causing two
+ // variable template partial specializations of a member variable template
+ // to have identical forms, e.g.,
+ //
+ // template<typename T, typename U>
+ // struct Outer {
+ // template<typename X, typename Y> pair<X,Y> p;
+ // template<typename Y> pair<T, Y> p;
+ // template<typename Y> pair<U, Y> p;
+ // };
+ //
+ // Outer<int, int> outer; // error: the partial specializations of Inner
+ // // have the same signature.
+ SemaRef.Diag(PartialSpec->getLocation(),
+ diag::err_var_partial_spec_redeclared)
+ << InstPartialSpec;
+ SemaRef.Diag(PrevDecl->getLocation(),
+ diag::note_var_prev_partial_spec_here);
+ return nullptr;
+ }
// Check the completed partial specialization.
SemaRef.CheckTemplatePartialSpecialization(InstPartialSpec);
@@ -4541,8 +4549,6 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
return NewTInfo;
}
-/// Introduce the instantiated local variables into the local
-/// instantiation scope.
void Sema::addInstantiatedLocalVarsToScope(FunctionDecl *Function,
const FunctionDecl *PatternDecl,
LocalInstantiationScope &Scope) {
@@ -4571,9 +4577,6 @@ void Sema::addInstantiatedLocalVarsToScope(FunctionDecl *Function,
}
}
-/// Introduce the instantiated function parameters into the local
-/// instantiation scope, and set the parameter names to those used
-/// in the template.
bool Sema::addInstantiatedParametersToScope(
FunctionDecl *Function, const FunctionDecl *PatternDecl,
LocalInstantiationScope &Scope,
@@ -4656,9 +4659,10 @@ bool Sema::InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
//
// template<typename T>
// A<T> Foo(int a = A<T>::FooImpl());
- MultiLevelTemplateArgumentList TemplateArgs = getTemplateInstantiationArgs(
- FD, FD->getLexicalDeclContext(), /*Final=*/false, nullptr,
- /*RelativeToPrimary=*/true);
+ MultiLevelTemplateArgumentList TemplateArgs =
+ getTemplateInstantiationArgs(FD, FD->getLexicalDeclContext(),
+ /*Final=*/false, /*Innermost=*/std::nullopt,
+ /*RelativeToPrimary=*/true);
if (SubstDefaultArgument(CallLoc, Param, TemplateArgs, /*ForCallExpr*/ true))
return true;
@@ -4696,9 +4700,10 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
Sema::ContextRAII savedContext(*this, Decl);
LocalInstantiationScope Scope(*this);
- MultiLevelTemplateArgumentList TemplateArgs = getTemplateInstantiationArgs(
- Decl, Decl->getLexicalDeclContext(), /*Final=*/false, nullptr,
- /*RelativeToPrimary*/ true);
+ MultiLevelTemplateArgumentList TemplateArgs =
+ getTemplateInstantiationArgs(Decl, Decl->getLexicalDeclContext(),
+ /*Final=*/false, /*Innermost=*/std::nullopt,
+ /*RelativeToPrimary*/ true);
// FIXME: We can't use getTemplateInstantiationPattern(false) in general
// here, because for a non-defining friend declaration in a class template,
@@ -4710,6 +4715,12 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
return;
}
+ // The noexcept specification could reference any lambda captures. Ensure
+ // those are added to the LocalInstantiationScope.
+ LambdaScopeForCallOperatorInstantiationRAII PushLambdaCaptures(
+ *this, Decl, TemplateArgs, Scope,
+ /*ShouldAddDeclsFromParentScope=*/false);
+
SubstExceptionSpec(Decl, Template->getType()->castAs<FunctionProtoType>(),
TemplateArgs);
}
@@ -4824,7 +4835,7 @@ TemplateDeclInstantiator::InitMethodInstantiation(CXXMethodDecl *New,
bool TemplateDeclInstantiator::SubstDefaultedFunction(FunctionDecl *New,
FunctionDecl *Tmpl) {
// Transfer across any unqualified lookups.
- if (auto *DFI = Tmpl->getDefaultedFunctionInfo()) {
+ if (auto *DFI = Tmpl->getDefalutedOrDeletedInfo()) {
SmallVector<DeclAccessPair, 32> Lookups;
Lookups.reserve(DFI->getUnqualifiedLookups().size());
bool AnyChanged = false;
@@ -4839,8 +4850,8 @@ bool TemplateDeclInstantiator::SubstDefaultedFunction(FunctionDecl *New,
// It's unlikely that substitution will change any declarations. Don't
// store an unnecessary copy in that case.
- New->setDefaultedFunctionInfo(
- AnyChanged ? FunctionDecl::DefaultedFunctionInfo::Create(
+ New->setDefaultedOrDeletedInfo(
+ AnyChanged ? FunctionDecl::DefaultedOrDeletedFunctionInfo::Create(
SemaRef.Context, Lookups)
: DFI);
}
@@ -4849,21 +4860,13 @@ bool TemplateDeclInstantiator::SubstDefaultedFunction(FunctionDecl *New,
return false;
}
-/// Instantiate (or find existing instantiation of) a function template with a
-/// given set of template arguments.
-///
-/// Usually this should not be used, and template argument deduction should be
-/// used in its place.
-FunctionDecl *
-Sema::InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
- const TemplateArgumentList *Args,
- SourceLocation Loc) {
+FunctionDecl *Sema::InstantiateFunctionDeclaration(
+ FunctionTemplateDecl *FTD, const TemplateArgumentList *Args,
+ SourceLocation Loc, CodeSynthesisContext::SynthesisKind CSC) {
FunctionDecl *FD = FTD->getTemplatedDecl();
sema::TemplateDeductionInfo Info(Loc);
- InstantiatingTemplate Inst(
- *this, Loc, FTD, Args->asArray(),
- CodeSynthesisContext::ExplicitTemplateArgumentSubstitution, Info);
+ InstantiatingTemplate Inst(*this, Loc, FTD, Args->asArray(), CSC, Info);
if (Inst.isInvalid())
return nullptr;
@@ -4874,23 +4877,6 @@ Sema::InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
return cast_or_null<FunctionDecl>(SubstDecl(FD, FD->getParent(), MArgs));
}
-/// Instantiate the definition of the given function from its
-/// template.
-///
-/// \param PointOfInstantiation the point at which the instantiation was
-/// required. Note that this is not precisely a "point of instantiation"
-/// for the function, but it's close.
-///
-/// \param Function the already-instantiated declaration of a
-/// function template specialization or member function of a class template
-/// specialization.
-///
-/// \param Recursive if true, recursively instantiates any functions that
-/// are required by this instantiation.
-///
-/// \param DefinitionRequired if true, then we are performing an explicit
-/// instantiation where the body of the function is required. Complain if
-/// there is no such body.
void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive,
@@ -4980,11 +4966,16 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
}
llvm::TimeTraceScope TimeScope("InstantiateFunction", [&]() {
- std::string Name;
- llvm::raw_string_ostream OS(Name);
+ llvm::TimeTraceMetadata M;
+ llvm::raw_string_ostream OS(M.Detail);
Function->getNameForDiagnostic(OS, getPrintingPolicy(),
/*Qualified=*/true);
- return Name;
+ if (llvm::isTimeTraceVerbose()) {
+ auto Loc = SourceMgr.getExpansionLoc(Function->getLocation());
+ M.File = SourceMgr.getFilename(Loc);
+ M.Line = SourceMgr.getExpansionLineNumber(Loc);
+ }
+ return M;
});
// If we're performing recursive template instantiation, create our own
@@ -5052,10 +5043,19 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
Function->setLocation(PatternDecl->getLocation());
Function->setInnerLocStart(PatternDecl->getInnerLocStart());
Function->setRangeEnd(PatternDecl->getEndLoc());
+ Function->setDeclarationNameLoc(PatternDecl->getNameInfo().getInfo());
EnterExpressionEvaluationContext EvalContext(
*this, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+ Qualifiers ThisTypeQuals;
+ CXXRecordDecl *ThisContext = nullptr;
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Function)) {
+ ThisContext = Method->getParent();
+ ThisTypeQuals = Method->getMethodQualifiers();
+ }
+ CXXThisScopeRAII ThisScope(*this, ThisContext, ThisTypeQuals);
+
// Introduce a new scope where local variable instantiations will be
// recorded, unless we're actually a member function within a local
// class, in which case we need to merge our results with the parent
@@ -5076,10 +5076,10 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
assert(PatternDecl->isDefaulted() &&
"Special member needs to be defaulted");
auto PatternSM = getDefaultedFunctionKind(PatternDecl).asSpecialMember();
- if (!(PatternSM == Sema::CXXCopyConstructor ||
- PatternSM == Sema::CXXCopyAssignment ||
- PatternSM == Sema::CXXMoveConstructor ||
- PatternSM == Sema::CXXMoveAssignment))
+ if (!(PatternSM == CXXSpecialMemberKind::CopyConstructor ||
+ PatternSM == CXXSpecialMemberKind::CopyAssignment ||
+ PatternSM == CXXSpecialMemberKind::MoveConstructor ||
+ PatternSM == CXXSpecialMemberKind::MoveAssignment))
return;
auto *NewRec = dyn_cast<CXXRecordDecl>(Function->getDeclContext());
@@ -5131,6 +5131,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
ParmVarDecl *Parm = Function->getParamDecl(0);
TypeSourceInfo *NewParmSI = IR.TransformType(Parm->getTypeSourceInfo());
+ assert(NewParmSI && "Type transformation failed.");
Parm->setType(NewParmSI->getType());
Parm->setTypeSourceInfo(NewParmSI);
};
@@ -5140,8 +5141,8 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
SetDeclDefaulted(Function, PatternDecl->getLocation());
} else {
MultiLevelTemplateArgumentList TemplateArgs = getTemplateInstantiationArgs(
- Function, Function->getLexicalDeclContext(), /*Final=*/false, nullptr,
- false, PatternDecl);
+ Function, Function->getLexicalDeclContext(), /*Final=*/false,
+ /*Innermost=*/std::nullopt, false, PatternDecl);
// Substitute into the qualifier; we can get a substitution failure here
// through evil use of alias templates.
@@ -5211,7 +5212,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
VarTemplateSpecializationDecl *Sema::BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
- const TemplateArgumentList &TemplateArgList,
+ const TemplateArgumentList *PartialSpecArgs,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs,
@@ -5236,14 +5237,15 @@ VarTemplateSpecializationDecl *Sema::BuildVarTemplateInstantiation(
MultiLevelTemplateArgumentList MultiLevelList;
if (auto *PartialSpec =
dyn_cast<VarTemplatePartialSpecializationDecl>(FromVar)) {
+ assert(PartialSpecArgs);
IsMemberSpec = PartialSpec->isMemberSpecialization();
MultiLevelList.addOuterTemplateArguments(
- PartialSpec, TemplateArgList.asArray(), /*Final=*/false);
+ PartialSpec, PartialSpecArgs->asArray(), /*Final=*/false);
} else {
assert(VarTemplate == FromVar->getDescribedVarTemplate());
IsMemberSpec = VarTemplate->isMemberSpecialization();
- MultiLevelList.addOuterTemplateArguments(
- VarTemplate, TemplateArgList.asArray(), /*Final=*/false);
+ MultiLevelList.addOuterTemplateArguments(VarTemplate, Converted,
+ /*Final=*/false);
}
if (!IsMemberSpec)
FromVar = FromVar->getFirstDecl();
@@ -5258,8 +5260,6 @@ VarTemplateSpecializationDecl *Sema::BuildVarTemplateInstantiation(
VarTemplate, FromVar, TemplateArgsInfo, Converted));
}
-/// Instantiates a variable template specialization by completing it
-/// with appropriate type information and initializer.
VarTemplateSpecializationDecl *Sema::CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs) {
@@ -5288,9 +5288,6 @@ VarTemplateSpecializationDecl *Sema::CompleteVarTemplateSpecializationDecl(
return VarSpec;
}
-/// BuildVariableInstantiation - Used after a new variable has been created.
-/// Sets basic variable data and decides whether to postpone the
-/// variable instantiation.
void Sema::BuildVariableInstantiation(
VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
@@ -5342,7 +5339,7 @@ void Sema::BuildVariableInstantiation(
*this, NewVar->getDeclName(), NewVar->getLocation(),
NewVar->isLocalExternDecl() ? Sema::LookupRedeclarationWithLinkage
: Sema::LookupOrdinaryName,
- NewVar->isLocalExternDecl() ? Sema::ForExternalRedeclaration
+ NewVar->isLocalExternDecl() ? RedeclarationKind::ForExternalRedeclaration
: forRedeclarationInCurContext());
if (NewVar->isLocalExternDecl() && OldVar->getPreviousDecl() &&
@@ -5421,7 +5418,6 @@ void Sema::BuildVariableInstantiation(
DiagnoseUnusedDecl(NewVar);
}
-/// Instantiate the initializer of a variable.
void Sema::InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs) {
@@ -5440,6 +5436,7 @@ void Sema::InstantiateVariableInitializer(
EnterExpressionEvaluationContext Evaluated(
*this, Sema::ExpressionEvaluationContext::PotentiallyEvaluated, Var);
+ keepInLifetimeExtendingContext();
// Instantiate the initializer.
ExprResult Init;
@@ -5487,24 +5484,9 @@ void Sema::InstantiateVariableInitializer(
}
if (getLangOpts().CUDA)
- checkAllowedCUDAInitializer(Var);
+ CUDA().checkAllowedInitializer(Var);
}
-/// Instantiate the definition of the given variable from its
-/// template.
-///
-/// \param PointOfInstantiation the point at which the instantiation was
-/// required. Note that this is not precisely a "point of instantiation"
-/// for the variable, but it's close.
-///
-/// \param Var the already-instantiated declaration of a templated variable.
-///
-/// \param Recursive if true, recursively instantiates any functions that
-/// are required by this instantiation.
-///
-/// \param DefinitionRequired if true, then we are performing an explicit
-/// instantiation where a definition of the variable is required. Complain
-/// if there is no such definition.
void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive,
bool DefinitionRequired, bool AtEndOfTU) {
@@ -5679,7 +5661,7 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
TemplateArgumentListInfo TemplateArgInfo;
if (const ASTTemplateArgumentListInfo *ArgInfo =
- VarSpec->getTemplateArgsInfo()) {
+ VarSpec->getTemplateArgsAsWritten()) {
TemplateArgInfo.setLAngleLoc(ArgInfo->getLAngleLoc());
TemplateArgInfo.setRAngleLoc(ArgInfo->getRAngleLoc());
for (const TemplateArgumentLoc &Arg : ArgInfo->arguments())
@@ -6071,10 +6053,6 @@ static NamedDecl *findInstantiationOf(ASTContext &Ctx,
return nullptr;
}
-/// Finds the instantiation of the given declaration context
-/// within the current instantiation.
-///
-/// \returns NULL if there was an error
DeclContext *Sema::FindInstantiatedContext(SourceLocation Loc, DeclContext* DC,
const MultiLevelTemplateArgumentList &TemplateArgs) {
if (NamedDecl *D = dyn_cast<NamedDecl>(DC)) {
@@ -6097,32 +6075,6 @@ static bool isDependentContextAtLevel(DeclContext *DC, unsigned Level) {
return cast<Decl>(DC)->getTemplateDepth() > Level;
}
-/// Find the instantiation of the given declaration within the
-/// current instantiation.
-///
-/// This routine is intended to be used when \p D is a declaration
-/// referenced from within a template, that needs to mapped into the
-/// corresponding declaration within an instantiation. For example,
-/// given:
-///
-/// \code
-/// template<typename T>
-/// struct X {
-/// enum Kind {
-/// KnownValue = sizeof(T)
-/// };
-///
-/// bool getKind() const { return KnownValue; }
-/// };
-///
-/// template struct X<int>;
-/// \endcode
-///
-/// In the instantiation of X<int>::getKind(), we need to map the \p
-/// EnumConstantDecl for \p KnownValue (which refers to
-/// X<T>::<Kind>::KnownValue) to its instantiation (X<int>::<Kind>::KnownValue).
-/// \p FindInstantiatedDecl performs this mapping from within the instantiation
-/// of X<int>.
NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext) {
@@ -6282,10 +6234,25 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
getTrivialTemplateArgumentLoc(UnpackedArg, QualType(), Loc));
}
QualType T = CheckTemplateIdType(TemplateName(TD), Loc, Args);
- if (T.isNull())
+ // We may get a non-null type with errors, in which case
+ // `getAsCXXRecordDecl` will return `nullptr`. For instance, this
+ // happens when one of the template arguments is an invalid
+ // expression. We return early to avoid triggering the assertion
+ // about the `CodeSynthesisContext`.
+ if (T.isNull() || T->containsErrors())
return nullptr;
- auto *SubstRecord = T->getAsCXXRecordDecl();
- assert(SubstRecord && "class template id not a class type?");
+ CXXRecordDecl *SubstRecord = T->getAsCXXRecordDecl();
+
+ if (!SubstRecord) {
+ // T can be a dependent TemplateSpecializationType when performing a
+ // substitution for building a deduction guide.
+ assert(CodeSynthesisContexts.back().Kind ==
+ CodeSynthesisContext::BuildingDeductionGuides);
+ // Return a nullptr as a sentinel value, we handle it properly in
+ // the TemplateInstantiator::TransformInjectedClassNameType
+ // override, which we transform it to a TemplateSpecializationType.
+ return nullptr;
+ }
// Check that this template-id names the primary template and not a
// partial or explicit specialization. (In the latter cases, it's
// meaningless to attempt to find an instantiation of D within the
@@ -6412,8 +6379,6 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
return D;
}
-/// Performs template instantiation for all implicit template
-/// instantiations we have seen until this point.
void Sema::PerformPendingInstantiations(bool LocalOnly) {
std::deque<PendingImplicitInstantiation> delayedPCHInstantiations;
while (!PendingLocalImplicitInstantiations.empty() ||
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
index 4a7872b2cc73..3d4ccaf68c70 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -66,6 +66,9 @@ namespace {
bool shouldWalkTypesOfTypeLocs() const { return false; }
+ // We need this so we can find e.g. attributes on lambdas.
+ bool shouldVisitImplicitCode() const { return true; }
+
//------------------------------------------------------------------------
// Recording occurrences of (unexpanded) parameter packs.
//------------------------------------------------------------------------
@@ -184,6 +187,15 @@ namespace {
bool TraversePackExpansionTypeLoc(PackExpansionTypeLoc TL) { return true; }
bool TraversePackExpansionExpr(PackExpansionExpr *E) { return true; }
bool TraverseCXXFoldExpr(CXXFoldExpr *E) { return true; }
+ bool TraversePackIndexingExpr(PackIndexingExpr *E) {
+ return inherited::TraverseStmt(E->getIndexExpr());
+ }
+ bool TraversePackIndexingType(PackIndexingType *E) {
+ return inherited::TraverseStmt(E->getIndexExpr());
+ }
+ bool TraversePackIndexingTypeLoc(PackIndexingTypeLoc TL) {
+ return inherited::TraverseStmt(TL.getIndexExpr());
+ }
///@}
@@ -554,6 +566,10 @@ void Sema::collectUnexpandedParameterPacks(
.TraverseDeclarationNameInfo(NameInfo);
}
+void Sema::collectUnexpandedParameterPacks(
+ Expr *E, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseStmt(E);
+}
ParsedTemplateArgument
Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg,
@@ -865,6 +881,7 @@ std::optional<unsigned> Sema::getNumArgumentsInExpansion(
bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
const DeclSpec &DS = D.getDeclSpec();
switch (DS.getTypeSpecType()) {
+ case TST_typename_pack_indexing:
case TST_typename:
case TST_typeof_unqualType:
case TST_typeofType:
@@ -997,20 +1014,6 @@ class ParameterPackValidatorCCC final : public CorrectionCandidateCallback {
}
-/// Called when an expression computing the size of a parameter pack
-/// is parsed.
-///
-/// \code
-/// template<typename ...Types> struct count {
-/// static const unsigned value = sizeof...(Types);
-/// };
-/// \endcode
-///
-//
-/// \param OpLoc The location of the "sizeof" keyword.
-/// \param Name The name of the parameter pack whose size will be determined.
-/// \param NameLoc The source location of the name of the parameter pack.
-/// \param RParenLoc The location of the closing parentheses.
ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
@@ -1050,8 +1053,7 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
}
if (!ParameterPack || !ParameterPack->isParameterPack()) {
- Diag(NameLoc, diag::err_sizeof_pack_no_pack_name)
- << &Name;
+ Diag(NameLoc, diag::err_expected_name_of_pack) << &Name;
return ExprError();
}
@@ -1061,6 +1063,67 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
RParenLoc);
}
+static bool isParameterPack(Expr *PackExpression) {
+ if (auto *D = dyn_cast<DeclRefExpr>(PackExpression); D) {
+ ValueDecl *VD = D->getDecl();
+ return VD->isParameterPack();
+ }
+ return false;
+}
+
+ExprResult Sema::ActOnPackIndexingExpr(Scope *S, Expr *PackExpression,
+ SourceLocation EllipsisLoc,
+ SourceLocation LSquareLoc,
+ Expr *IndexExpr,
+ SourceLocation RSquareLoc) {
+ bool isParameterPack = ::isParameterPack(PackExpression);
+ if (!isParameterPack) {
+ if (!PackExpression->containsErrors()) {
+ CorrectDelayedTyposInExpr(IndexExpr);
+ Diag(PackExpression->getBeginLoc(), diag::err_expected_name_of_pack)
+ << PackExpression;
+ }
+ return ExprError();
+ }
+ ExprResult Res =
+ BuildPackIndexingExpr(PackExpression, EllipsisLoc, IndexExpr, RSquareLoc);
+ if (!Res.isInvalid())
+ Diag(Res.get()->getBeginLoc(), getLangOpts().CPlusPlus26
+ ? diag::warn_cxx23_pack_indexing
+ : diag::ext_pack_indexing);
+ return Res;
+}
+
+ExprResult
+Sema::BuildPackIndexingExpr(Expr *PackExpression, SourceLocation EllipsisLoc,
+ Expr *IndexExpr, SourceLocation RSquareLoc,
+ ArrayRef<Expr *> ExpandedExprs, bool EmptyPack) {
+
+ std::optional<int64_t> Index;
+ if (!IndexExpr->isInstantiationDependent()) {
+ llvm::APSInt Value(Context.getIntWidth(Context.getSizeType()));
+
+ ExprResult Res = CheckConvertedConstantExpression(
+ IndexExpr, Context.getSizeType(), Value, CCEK_ArrayBound);
+ if (!Res.isUsable())
+ return ExprError();
+ Index = Value.getExtValue();
+ IndexExpr = Res.get();
+ }
+
+ if (Index && (!ExpandedExprs.empty() || EmptyPack)) {
+ if (*Index < 0 || EmptyPack || *Index >= int64_t(ExpandedExprs.size())) {
+ Diag(PackExpression->getBeginLoc(), diag::err_pack_index_out_of_bound)
+ << *Index << PackExpression << ExpandedExprs.size();
+ return ExprError();
+ }
+ }
+
+ return PackIndexingExpr::Create(getASTContext(), EllipsisLoc, RSquareLoc,
+ PackExpression, IndexExpr, Index,
+ ExpandedExprs, EmptyPack);
+}
+
TemplateArgumentLoc Sema::getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis,
std::optional<unsigned> &NumExpansions) const {
@@ -1175,6 +1238,17 @@ std::optional<unsigned> Sema::getFullyPackExpandedSize(TemplateArgument Arg) {
// expanded this pack expansion into the enclosing pack if we could.
if (Elem.isPackExpansion())
return std::nullopt;
+ // Don't guess the size of unexpanded packs. The pack within a template
+ // argument may have yet to be of a PackExpansion type before we see the
+ // ellipsis in the annotation stage.
+ //
+ // This doesn't mean we would invalidate the optimization: Arg can be an
+ // unexpanded pack regardless of Elem's dependence. For instance,
+ // A TemplateArgument that contains either a SubstTemplateTypeParmPackType
+ // or SubstNonTypeTemplateParmPackExpr is always considered Unexpanded, but
+ // the underlying TemplateArgument thereof may not.
+ if (Elem.containsUnexpandedParameterPack())
+ return std::nullopt;
}
return Pack.pack_size();
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
index 92086d7277fd..6fa39cdccef2 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
@@ -31,12 +31,17 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedAttr.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenMP.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -137,12 +142,18 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_IntelOclBicc: \
case ParsedAttr::AT_PreserveMost: \
case ParsedAttr::AT_PreserveAll: \
- case ParsedAttr::AT_M68kRTD
+ case ParsedAttr::AT_M68kRTD: \
+ case ParsedAttr::AT_PreserveNone: \
+ case ParsedAttr::AT_RISCVVectorCC
// Function type attributes.
#define FUNCTION_TYPE_ATTRS_CASELIST \
case ParsedAttr::AT_NSReturnsRetained: \
case ParsedAttr::AT_NoReturn: \
+ case ParsedAttr::AT_NonBlocking: \
+ case ParsedAttr::AT_NonAllocating: \
+ case ParsedAttr::AT_Blocking: \
+ case ParsedAttr::AT_Allocating: \
case ParsedAttr::AT_Regparm: \
case ParsedAttr::AT_CmseNSCall: \
case ParsedAttr::AT_ArmStreaming: \
@@ -374,11 +385,10 @@ enum TypeAttrLocation {
static void
processTypeAttrs(TypeProcessingState &state, QualType &type,
TypeAttrLocation TAL, const ParsedAttributesView &attrs,
- Sema::CUDAFunctionTarget CFT = Sema::CFT_HostDevice);
+ CUDAFunctionTarget CFT = CUDAFunctionTarget::HostDevice);
static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
- QualType &type,
- Sema::CUDAFunctionTarget CFT);
+ QualType &type, CUDAFunctionTarget CFT);
static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &state,
ParsedAttr &attr, QualType &type);
@@ -625,7 +635,7 @@ static void distributeFunctionTypeAttr(TypeProcessingState &state,
static bool distributeFunctionTypeAttrToInnermost(
TypeProcessingState &state, ParsedAttr &attr,
ParsedAttributesView &attrList, QualType &declSpecType,
- Sema::CUDAFunctionTarget CFT) {
+ CUDAFunctionTarget CFT) {
Declarator &declarator = state.getDeclarator();
// Put it on the innermost function chunk, if there is one.
@@ -642,10 +652,10 @@ static bool distributeFunctionTypeAttrToInnermost(
/// A function type attribute was written in the decl spec. Try to
/// apply it somewhere.
-static void
-distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
- ParsedAttr &attr, QualType &declSpecType,
- Sema::CUDAFunctionTarget CFT) {
+static void distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
+ ParsedAttr &attr,
+ QualType &declSpecType,
+ CUDAFunctionTarget CFT) {
state.saveDeclSpecAttrs();
// Try to distribute to the innermost.
@@ -662,9 +672,10 @@ distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
/// Try to apply it somewhere.
/// `Attrs` is the attribute list containing the declaration (either of the
/// declarator or the declaration).
-static void distributeFunctionTypeAttrFromDeclarator(
- TypeProcessingState &state, ParsedAttr &attr, QualType &declSpecType,
- Sema::CUDAFunctionTarget CFT) {
+static void distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
+ ParsedAttr &attr,
+ QualType &declSpecType,
+ CUDAFunctionTarget CFT) {
Declarator &declarator = state.getDeclarator();
// Try to distribute to the innermost.
@@ -692,7 +703,7 @@ static void distributeFunctionTypeAttrFromDeclarator(
/// declarator or the declaration).
static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
QualType &declSpecType,
- Sema::CUDAFunctionTarget CFT) {
+ CUDAFunctionTarget CFT) {
// The called functions in this loop actually remove things from the current
// list, so iterating over the existing list isn't possible. Instead, make a
// non-owning copy and iterate over that.
@@ -844,419 +855,6 @@ static bool checkOmittedBlockReturnType(Sema &S, Declarator &declarator,
return true;
}
-/// Apply Objective-C type arguments to the given type.
-static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
- ArrayRef<TypeSourceInfo *> typeArgs,
- SourceRange typeArgsRange, bool failOnError,
- bool rebuilding) {
- // We can only apply type arguments to an Objective-C class type.
- const auto *objcObjectType = type->getAs<ObjCObjectType>();
- if (!objcObjectType || !objcObjectType->getInterface()) {
- S.Diag(loc, diag::err_objc_type_args_non_class)
- << type
- << typeArgsRange;
-
- if (failOnError)
- return QualType();
- return type;
- }
-
- // The class type must be parameterized.
- ObjCInterfaceDecl *objcClass = objcObjectType->getInterface();
- ObjCTypeParamList *typeParams = objcClass->getTypeParamList();
- if (!typeParams) {
- S.Diag(loc, diag::err_objc_type_args_non_parameterized_class)
- << objcClass->getDeclName()
- << FixItHint::CreateRemoval(typeArgsRange);
-
- if (failOnError)
- return QualType();
-
- return type;
- }
-
- // The type must not already be specialized.
- if (objcObjectType->isSpecialized()) {
- S.Diag(loc, diag::err_objc_type_args_specialized_class)
- << type
- << FixItHint::CreateRemoval(typeArgsRange);
-
- if (failOnError)
- return QualType();
-
- return type;
- }
-
- // Check the type arguments.
- SmallVector<QualType, 4> finalTypeArgs;
- unsigned numTypeParams = typeParams->size();
- bool anyPackExpansions = false;
- for (unsigned i = 0, n = typeArgs.size(); i != n; ++i) {
- TypeSourceInfo *typeArgInfo = typeArgs[i];
- QualType typeArg = typeArgInfo->getType();
-
- // Type arguments cannot have explicit qualifiers or nullability.
- // We ignore indirect sources of these, e.g. behind typedefs or
- // template arguments.
- if (TypeLoc qual = typeArgInfo->getTypeLoc().findExplicitQualifierLoc()) {
- bool diagnosed = false;
- SourceRange rangeToRemove;
- if (auto attr = qual.getAs<AttributedTypeLoc>()) {
- rangeToRemove = attr.getLocalSourceRange();
- if (attr.getTypePtr()->getImmediateNullability()) {
- typeArg = attr.getTypePtr()->getModifiedType();
- S.Diag(attr.getBeginLoc(),
- diag::err_objc_type_arg_explicit_nullability)
- << typeArg << FixItHint::CreateRemoval(rangeToRemove);
- diagnosed = true;
- }
- }
-
- // When rebuilding, qualifiers might have gotten here through a
- // final substitution.
- if (!rebuilding && !diagnosed) {
- S.Diag(qual.getBeginLoc(), diag::err_objc_type_arg_qualified)
- << typeArg << typeArg.getQualifiers().getAsString()
- << FixItHint::CreateRemoval(rangeToRemove);
- }
- }
-
- // Remove qualifiers even if they're non-local.
- typeArg = typeArg.getUnqualifiedType();
-
- finalTypeArgs.push_back(typeArg);
-
- if (typeArg->getAs<PackExpansionType>())
- anyPackExpansions = true;
-
- // Find the corresponding type parameter, if there is one.
- ObjCTypeParamDecl *typeParam = nullptr;
- if (!anyPackExpansions) {
- if (i < numTypeParams) {
- typeParam = typeParams->begin()[i];
- } else {
- // Too many arguments.
- S.Diag(loc, diag::err_objc_type_args_wrong_arity)
- << false
- << objcClass->getDeclName()
- << (unsigned)typeArgs.size()
- << numTypeParams;
- S.Diag(objcClass->getLocation(), diag::note_previous_decl)
- << objcClass;
-
- if (failOnError)
- return QualType();
-
- return type;
- }
- }
-
- // Objective-C object pointer types must be substitutable for the bounds.
- if (const auto *typeArgObjC = typeArg->getAs<ObjCObjectPointerType>()) {
- // If we don't have a type parameter to match against, assume
- // everything is fine. There was a prior pack expansion that
- // means we won't be able to match anything.
- if (!typeParam) {
- assert(anyPackExpansions && "Too many arguments?");
- continue;
- }
-
- // Retrieve the bound.
- QualType bound = typeParam->getUnderlyingType();
- const auto *boundObjC = bound->castAs<ObjCObjectPointerType>();
-
- // Determine whether the type argument is substitutable for the bound.
- if (typeArgObjC->isObjCIdType()) {
- // When the type argument is 'id', the only acceptable type
- // parameter bound is 'id'.
- if (boundObjC->isObjCIdType())
- continue;
- } else if (S.Context.canAssignObjCInterfaces(boundObjC, typeArgObjC)) {
- // Otherwise, we follow the assignability rules.
- continue;
- }
-
- // Diagnose the mismatch.
- S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
- diag::err_objc_type_arg_does_not_match_bound)
- << typeArg << bound << typeParam->getDeclName();
- S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here)
- << typeParam->getDeclName();
-
- if (failOnError)
- return QualType();
-
- return type;
- }
-
- // Block pointer types are permitted for unqualified 'id' bounds.
- if (typeArg->isBlockPointerType()) {
- // If we don't have a type parameter to match against, assume
- // everything is fine. There was a prior pack expansion that
- // means we won't be able to match anything.
- if (!typeParam) {
- assert(anyPackExpansions && "Too many arguments?");
- continue;
- }
-
- // Retrieve the bound.
- QualType bound = typeParam->getUnderlyingType();
- if (bound->isBlockCompatibleObjCPointerType(S.Context))
- continue;
-
- // Diagnose the mismatch.
- S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
- diag::err_objc_type_arg_does_not_match_bound)
- << typeArg << bound << typeParam->getDeclName();
- S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here)
- << typeParam->getDeclName();
-
- if (failOnError)
- return QualType();
-
- return type;
- }
-
- // Dependent types will be checked at instantiation time.
- if (typeArg->isDependentType()) {
- continue;
- }
-
- // Diagnose non-id-compatible type arguments.
- S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
- diag::err_objc_type_arg_not_id_compatible)
- << typeArg << typeArgInfo->getTypeLoc().getSourceRange();
-
- if (failOnError)
- return QualType();
-
- return type;
- }
-
- // Make sure we didn't have the wrong number of arguments.
- if (!anyPackExpansions && finalTypeArgs.size() != numTypeParams) {
- S.Diag(loc, diag::err_objc_type_args_wrong_arity)
- << (typeArgs.size() < typeParams->size())
- << objcClass->getDeclName()
- << (unsigned)finalTypeArgs.size()
- << (unsigned)numTypeParams;
- S.Diag(objcClass->getLocation(), diag::note_previous_decl)
- << objcClass;
-
- if (failOnError)
- return QualType();
-
- return type;
- }
-
- // Success. Form the specialized type.
- return S.Context.getObjCObjectType(type, finalTypeArgs, { }, false);
-}
-
-QualType Sema::BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
- SourceLocation ProtocolLAngleLoc,
- ArrayRef<ObjCProtocolDecl *> Protocols,
- ArrayRef<SourceLocation> ProtocolLocs,
- SourceLocation ProtocolRAngleLoc,
- bool FailOnError) {
- QualType Result = QualType(Decl->getTypeForDecl(), 0);
- if (!Protocols.empty()) {
- bool HasError;
- Result = Context.applyObjCProtocolQualifiers(Result, Protocols,
- HasError);
- if (HasError) {
- Diag(SourceLocation(), diag::err_invalid_protocol_qualifiers)
- << SourceRange(ProtocolLAngleLoc, ProtocolRAngleLoc);
- if (FailOnError) Result = QualType();
- }
- if (FailOnError && Result.isNull())
- return QualType();
- }
-
- return Result;
-}
-
-QualType Sema::BuildObjCObjectType(
- QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc,
- ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc,
- SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols,
- ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc,
- bool FailOnError, bool Rebuilding) {
- QualType Result = BaseType;
- if (!TypeArgs.empty()) {
- Result =
- applyObjCTypeArgs(*this, Loc, Result, TypeArgs,
- SourceRange(TypeArgsLAngleLoc, TypeArgsRAngleLoc),
- FailOnError, Rebuilding);
- if (FailOnError && Result.isNull())
- return QualType();
- }
-
- if (!Protocols.empty()) {
- bool HasError;
- Result = Context.applyObjCProtocolQualifiers(Result, Protocols,
- HasError);
- if (HasError) {
- Diag(Loc, diag::err_invalid_protocol_qualifiers)
- << SourceRange(ProtocolLAngleLoc, ProtocolRAngleLoc);
- if (FailOnError) Result = QualType();
- }
- if (FailOnError && Result.isNull())
- return QualType();
- }
-
- return Result;
-}
-
-TypeResult Sema::actOnObjCProtocolQualifierType(
- SourceLocation lAngleLoc,
- ArrayRef<Decl *> protocols,
- ArrayRef<SourceLocation> protocolLocs,
- SourceLocation rAngleLoc) {
- // Form id<protocol-list>.
- QualType Result = Context.getObjCObjectType(
- Context.ObjCBuiltinIdTy, {},
- llvm::ArrayRef((ObjCProtocolDecl *const *)protocols.data(),
- protocols.size()),
- false);
- Result = Context.getObjCObjectPointerType(Result);
-
- TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result);
- TypeLoc ResultTL = ResultTInfo->getTypeLoc();
-
- auto ObjCObjectPointerTL = ResultTL.castAs<ObjCObjectPointerTypeLoc>();
- ObjCObjectPointerTL.setStarLoc(SourceLocation()); // implicit
-
- auto ObjCObjectTL = ObjCObjectPointerTL.getPointeeLoc()
- .castAs<ObjCObjectTypeLoc>();
- ObjCObjectTL.setHasBaseTypeAsWritten(false);
- ObjCObjectTL.getBaseLoc().initialize(Context, SourceLocation());
-
- // No type arguments.
- ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation());
- ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation());
-
- // Fill in protocol qualifiers.
- ObjCObjectTL.setProtocolLAngleLoc(lAngleLoc);
- ObjCObjectTL.setProtocolRAngleLoc(rAngleLoc);
- for (unsigned i = 0, n = protocols.size(); i != n; ++i)
- ObjCObjectTL.setProtocolLoc(i, protocolLocs[i]);
-
- // We're done. Return the completed type to the parser.
- return CreateParsedType(Result, ResultTInfo);
-}
-
-TypeResult Sema::actOnObjCTypeArgsAndProtocolQualifiers(
- Scope *S,
- SourceLocation Loc,
- ParsedType BaseType,
- SourceLocation TypeArgsLAngleLoc,
- ArrayRef<ParsedType> TypeArgs,
- SourceLocation TypeArgsRAngleLoc,
- SourceLocation ProtocolLAngleLoc,
- ArrayRef<Decl *> Protocols,
- ArrayRef<SourceLocation> ProtocolLocs,
- SourceLocation ProtocolRAngleLoc) {
- TypeSourceInfo *BaseTypeInfo = nullptr;
- QualType T = GetTypeFromParser(BaseType, &BaseTypeInfo);
- if (T.isNull())
- return true;
-
- // Handle missing type-source info.
- if (!BaseTypeInfo)
- BaseTypeInfo = Context.getTrivialTypeSourceInfo(T, Loc);
-
- // Extract type arguments.
- SmallVector<TypeSourceInfo *, 4> ActualTypeArgInfos;
- for (unsigned i = 0, n = TypeArgs.size(); i != n; ++i) {
- TypeSourceInfo *TypeArgInfo = nullptr;
- QualType TypeArg = GetTypeFromParser(TypeArgs[i], &TypeArgInfo);
- if (TypeArg.isNull()) {
- ActualTypeArgInfos.clear();
- break;
- }
-
- assert(TypeArgInfo && "No type source info?");
- ActualTypeArgInfos.push_back(TypeArgInfo);
- }
-
- // Build the object type.
- QualType Result = BuildObjCObjectType(
- T, BaseTypeInfo->getTypeLoc().getSourceRange().getBegin(),
- TypeArgsLAngleLoc, ActualTypeArgInfos, TypeArgsRAngleLoc,
- ProtocolLAngleLoc,
- llvm::ArrayRef((ObjCProtocolDecl *const *)Protocols.data(),
- Protocols.size()),
- ProtocolLocs, ProtocolRAngleLoc,
- /*FailOnError=*/false,
- /*Rebuilding=*/false);
-
- if (Result == T)
- return BaseType;
-
- // Create source information for this type.
- TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result);
- TypeLoc ResultTL = ResultTInfo->getTypeLoc();
-
- // For id<Proto1, Proto2> or Class<Proto1, Proto2>, we'll have an
- // object pointer type. Fill in source information for it.
- if (auto ObjCObjectPointerTL = ResultTL.getAs<ObjCObjectPointerTypeLoc>()) {
- // The '*' is implicit.
- ObjCObjectPointerTL.setStarLoc(SourceLocation());
- ResultTL = ObjCObjectPointerTL.getPointeeLoc();
- }
-
- if (auto OTPTL = ResultTL.getAs<ObjCTypeParamTypeLoc>()) {
- // Protocol qualifier information.
- if (OTPTL.getNumProtocols() > 0) {
- assert(OTPTL.getNumProtocols() == Protocols.size());
- OTPTL.setProtocolLAngleLoc(ProtocolLAngleLoc);
- OTPTL.setProtocolRAngleLoc(ProtocolRAngleLoc);
- for (unsigned i = 0, n = Protocols.size(); i != n; ++i)
- OTPTL.setProtocolLoc(i, ProtocolLocs[i]);
- }
-
- // We're done. Return the completed type to the parser.
- return CreateParsedType(Result, ResultTInfo);
- }
-
- auto ObjCObjectTL = ResultTL.castAs<ObjCObjectTypeLoc>();
-
- // Type argument information.
- if (ObjCObjectTL.getNumTypeArgs() > 0) {
- assert(ObjCObjectTL.getNumTypeArgs() == ActualTypeArgInfos.size());
- ObjCObjectTL.setTypeArgsLAngleLoc(TypeArgsLAngleLoc);
- ObjCObjectTL.setTypeArgsRAngleLoc(TypeArgsRAngleLoc);
- for (unsigned i = 0, n = ActualTypeArgInfos.size(); i != n; ++i)
- ObjCObjectTL.setTypeArgTInfo(i, ActualTypeArgInfos[i]);
- } else {
- ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation());
- ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation());
- }
-
- // Protocol qualifier information.
- if (ObjCObjectTL.getNumProtocols() > 0) {
- assert(ObjCObjectTL.getNumProtocols() == Protocols.size());
- ObjCObjectTL.setProtocolLAngleLoc(ProtocolLAngleLoc);
- ObjCObjectTL.setProtocolRAngleLoc(ProtocolRAngleLoc);
- for (unsigned i = 0, n = Protocols.size(); i != n; ++i)
- ObjCObjectTL.setProtocolLoc(i, ProtocolLocs[i]);
- } else {
- ObjCObjectTL.setProtocolLAngleLoc(SourceLocation());
- ObjCObjectTL.setProtocolRAngleLoc(SourceLocation());
- }
-
- // Base type.
- ObjCObjectTL.setHasBaseTypeAsWritten(true);
- if (ObjCObjectTL.getType() == T)
- ObjCObjectTL.getBaseLoc().initializeFullCopy(BaseTypeInfo->getTypeLoc());
- else
- ObjCObjectTL.getBaseLoc().initialize(Context, Loc);
-
- // We're done. Return the completed type to the parser.
- return CreateParsedType(Result, ResultTInfo);
-}
-
static OpenCLAccessAttr::Spelling
getImageAccess(const ParsedAttributesView &Attrs) {
for (const ParsedAttr &AL : Attrs)
@@ -1560,7 +1158,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
case DeclSpec::TST_float128:
if (!S.Context.getTargetInfo().hasFloat128Type() &&
- !S.getLangOpts().SYCLIsDevice &&
+ !S.getLangOpts().SYCLIsDevice && !S.getLangOpts().CUDAIsDevice &&
!(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsTargetDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__float128";
@@ -1667,6 +1265,19 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
break;
}
+ case DeclSpec::TST_typename_pack_indexing: {
+ Expr *E = DS.getPackIndexingExpr();
+ assert(E && "Didn't get an expression for pack indexing");
+ QualType Pattern = S.GetTypeFromParser(DS.getRepAsType());
+ Result = S.BuildPackIndexingType(Pattern, E, DS.getBeginLoc(),
+ DS.getEllipsisLoc());
+ if (Result.isNull()) {
+ declarator.setInvalidType(true);
+ Result = Context.IntTy;
+ }
+ break;
+ }
+
#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case DeclSpec::TST_##Trait:
#include "clang/Basic/TransformTypeTraits.def"
Result = S.GetTypeFromParser(DS.getRepAsType());
@@ -1812,7 +1423,9 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.getVectorType(Result, 128/typeSize, VecKind);
}
- // FIXME: Imaginary.
+ // _Imaginary was a feature of C99 through C23 but was never supported in
+ // Clang. The feature was removed in C2y, but we retain the unsupported
+ // diagnostic for an improved user experience.
if (DS.getTypeSpecComplex() == DeclSpec::TSC_imaginary)
S.Diag(DS.getTypeSpecComplexLoc(), diag::err_imaginary_not_supported);
@@ -2038,7 +1651,6 @@ QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
return BuildQualifiedType(T, Loc, Q, DS);
}
-/// Build a paren type including \p T.
QualType Sema::BuildParenType(QualType T) {
return Context.getParenType(T);
}
@@ -2169,19 +1781,6 @@ static QualType deduceOpenCLPointeeAddrSpace(Sema &S, QualType PointeeType) {
return PointeeType;
}
-/// Build a pointer type.
-///
-/// \param T The type to which we'll be building a pointer.
-///
-/// \param Loc The location of the entity whose type involves this
-/// pointer type or, if there is no such entity, the location of the
-/// type that will have pointer type.
-///
-/// \param Entity The name of the entity that involves the pointer
-/// type, if known.
-///
-/// \returns A suitable pointer type, if there are no
-/// errors. Otherwise, returns a NULL type.
QualType Sema::BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity) {
if (T->isReferenceType()) {
@@ -2234,19 +1833,6 @@ QualType Sema::BuildPointerType(QualType T,
return Context.getPointerType(T);
}
-/// Build a reference type.
-///
-/// \param T The type to which we'll be building a reference.
-///
-/// \param Loc The location of the entity whose type involves this
-/// reference type or, if there is no such entity, the location of the
-/// type that will have reference type.
-///
-/// \param Entity The name of the entity that involves the reference
-/// type, if known.
-///
-/// \returns A suitable reference type, if there are no
-/// errors. Otherwise, returns a NULL type.
QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
SourceLocation Loc,
DeclarationName Entity) {
@@ -2322,38 +1908,14 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
return Context.getRValueReferenceType(T);
}
-/// Build a Read-only Pipe type.
-///
-/// \param T The type to which we'll be building a Pipe.
-///
-/// \param Loc We do not use it for now.
-///
-/// \returns A suitable pipe type, if there are no errors. Otherwise, returns a
-/// NULL type.
QualType Sema::BuildReadPipeType(QualType T, SourceLocation Loc) {
return Context.getReadPipeType(T);
}
-/// Build a Write-only Pipe type.
-///
-/// \param T The type to which we'll be building a Pipe.
-///
-/// \param Loc We do not use it for now.
-///
-/// \returns A suitable pipe type, if there are no errors. Otherwise, returns a
-/// NULL type.
QualType Sema::BuildWritePipeType(QualType T, SourceLocation Loc) {
return Context.getWritePipeType(T);
}
-/// Build a bit-precise integer type.
-///
-/// \param IsUnsigned Boolean representing the signedness of the type.
-///
-/// \param BitWidth Size of this int type in bits, or an expression representing
-/// that.
-///
-/// \param Loc Location of the keyword.
QualType Sema::BuildBitIntType(bool IsUnsigned, Expr *BitWidth,
SourceLocation Loc) {
if (BitWidth->isInstantiationDependent())
@@ -2462,21 +2024,6 @@ bool Sema::checkArrayElementAlignment(QualType EltTy, SourceLocation Loc) {
return false;
}
-/// Build an array type.
-///
-/// \param T The type of each element in the array.
-///
-/// \param ASM C99 array size modifier (e.g., '*', 'static').
-///
-/// \param ArraySize Expression describing the size of the array.
-///
-/// \param Brackets The range from the opening '[' to the closing ']'.
-///
-/// \param Entity The name of the entity that involves the array
-/// type, if known.
-///
-/// \returns A suitable array type, if there are no errors. Otherwise,
-/// returns a NULL type.
QualType Sema::BuildArrayType(QualType T, ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity) {
@@ -2618,7 +2165,7 @@ QualType Sema::BuildArrayType(QualType T, ArraySizeModifier ASM,
} else if (isSFINAEContext()) {
VLADiag = diag::err_vla_in_sfinae;
VLAIsError = true;
- } else if (getLangOpts().OpenMP && isInOpenMPTaskUntiedContext()) {
+ } else if (getLangOpts().OpenMP && OpenMP().isInOpenMPTaskUntiedContext()) {
VLADiag = diag::err_openmp_vla_in_task_untied;
VLAIsError = true;
} else if (getLangOpts().CPlusPlus) {
@@ -2714,7 +2261,7 @@ QualType Sema::BuildArrayType(QualType T, ArraySizeModifier ASM,
bool IsCUDADevice = (getLangOpts().CUDA && getLangOpts().CUDAIsDevice);
targetDiag(Loc,
IsCUDADevice ? diag::err_cuda_vla : diag::err_vla_unsupported)
- << (IsCUDADevice ? CurrentCUDATarget() : 0);
+ << (IsCUDADevice ? llvm::to_underlying(CUDA().CurrentTarget()) : 0);
} else if (sema::FunctionScopeInfo *FSI = getCurFunction()) {
// VLAs are supported on this target, but we may need to do delayed
// checking that the VLA is not being used within a coroutine.
@@ -2816,9 +2363,6 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
VectorKind::Generic);
}
-/// Build an ext-vector type.
-///
-/// Run the required checks for the extended vector type.
QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc) {
// Unlike gcc's vector_size attribute, we do not allow vectors to be defined
@@ -2829,7 +2373,7 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
// on bitvectors, and we have no well-defined ABI for bitvectors, so vectors
// of bool aren't allowed.
//
- // We explictly allow bool elements in ext_vector_type for C/C++.
+ // We explicitly allow bool elements in ext_vector_type for C/C++.
bool IsNoBoolVecLang = getLangOpts().OpenCL || getLangOpts().OpenCLCPlusPlus;
if ((!T->isDependentType() && !T->isIntegerType() &&
!T->isRealFloatingType()) ||
@@ -3098,7 +2642,7 @@ QualType Sema::BuildFunctionType(QualType T,
if (EPI.ExtInfo.getProducesResult()) {
// This is just a warning, so we can't fail to build if we see it.
- checkNSReturnsRetainedReturnType(Loc, T);
+ ObjC().checkNSReturnsRetainedReturnType(Loc, T);
}
if (Invalid)
@@ -3107,15 +2651,6 @@ QualType Sema::BuildFunctionType(QualType T,
return Context.getFunctionType(T, ParamTypes, EPI);
}
-/// Build a member pointer type \c T Class::*.
-///
-/// \param T the type to which the member pointer refers.
-/// \param Class the class type into which the member pointer points.
-/// \param Loc the location where this type begins
-/// \param Entity the name of the entity that will have this member pointer type
-///
-/// \returns a member pointer type, if successful, or a NULL type if there was
-/// an error.
QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity) {
@@ -3168,17 +2703,6 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
return Context.getMemberPointerType(T, Class.getTypePtr());
}
-/// Build a block pointer type.
-///
-/// \param T The type to which we'll be building a block pointer.
-///
-/// \param Loc The source location, used for diagnostics.
-///
-/// \param Entity The name of the entity that involves the block pointer
-/// type, if known.
-///
-/// \returns A suitable block pointer type, if there are no
-/// errors. Otherwise, returns a NULL type.
QualType Sema::BuildBlockPointerType(QualType T,
SourceLocation Loc,
DeclarationName Entity) {
@@ -3485,7 +3009,7 @@ InventTemplateParameter(TypeProcessingState &state, QualType T,
if (!Invalid) {
S.AttachTypeConstraint(
AutoLoc.getNestedNameSpecifierLoc(), AutoLoc.getConceptNameInfo(),
- AutoLoc.getNamedConcept(),
+ AutoLoc.getNamedConcept(), /*FoundDecl=*/AutoLoc.getFoundDecl(),
AutoLoc.hasExplicitTemplateArgs() ? &TAL : nullptr,
InventedTemplateParam, D.getEllipsisLoc());
}
@@ -3493,7 +3017,8 @@ InventTemplateParameter(TypeProcessingState &state, QualType T,
// The 'auto' appears in the decl-specifiers; we've not finished forming
// TypeSourceInfo for it yet.
TemplateIdAnnotation *TemplateId = D.getDeclSpec().getRepAsTemplateId();
- TemplateArgumentListInfo TemplateArgsInfo;
+ TemplateArgumentListInfo TemplateArgsInfo(TemplateId->LAngleLoc,
+ TemplateId->RAngleLoc);
bool Invalid = false;
if (TemplateId->LAngleLoc.isValid()) {
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
@@ -3511,11 +3036,17 @@ InventTemplateParameter(TypeProcessingState &state, QualType T,
}
}
if (!Invalid) {
+ UsingShadowDecl *USD =
+ TemplateId->Template.get().getAsUsingShadowDecl();
+ auto *CD =
+ cast<ConceptDecl>(TemplateId->Template.get().getAsTemplateDecl());
S.AttachTypeConstraint(
D.getDeclSpec().getTypeSpecScope().getWithLocInContext(S.Context),
DeclarationNameInfo(DeclarationName(TemplateId->Name),
TemplateId->TemplateNameLoc),
- cast<ConceptDecl>(TemplateId->Template.get().getAsTemplateDecl()),
+ CD,
+ /*FoundDecl=*/
+ USD ? cast<NamedDecl>(USD) : CD,
TemplateId->LAngleLoc.isValid() ? &TemplateArgsInfo : nullptr,
InventedTemplateParam, D.getEllipsisLoc());
}
@@ -3591,7 +3122,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
// D.getDeclarationAttributes()) because those are always C++11 attributes,
// and those don't get distributed.
distributeTypeAttrsFromDeclarator(
- state, T, SemaRef.IdentifyCUDATarget(D.getAttributes()));
+ state, T, SemaRef.CUDA().IdentifyTarget(D.getAttributes()));
// Find the deduced type in this type. Look in the trailing return type if we
// have one, otherwise in the DeclSpec type.
@@ -3644,9 +3175,13 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
Info = &SemaRef.InventedParameterInfos.back();
} else {
// In C++14, generic lambdas allow 'auto' in their parameters.
- if (!SemaRef.getLangOpts().CPlusPlus14 || !Auto ||
- Auto->getKeyword() != AutoTypeKeyword::Auto) {
- Error = 16;
+ if (!SemaRef.getLangOpts().CPlusPlus14 && Auto &&
+ Auto->getKeyword() == AutoTypeKeyword::Auto) {
+ Error = 25; // auto not allowed in lambda parameter (before C++14)
+ break;
+ } else if (!Auto || Auto->getKeyword() != AutoTypeKeyword::Auto) {
+ Error = 16; // __auto_type or decltype(auto) not allowed in lambda
+ // parameter
break;
}
Info = SemaRef.getCurLambda();
@@ -3661,8 +3196,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
break;
}
case DeclaratorContext::Member: {
- if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
- D.isFunctionDeclarator())
+ if (D.isStaticMember() || D.isFunctionDeclarator())
break;
bool Cxx = SemaRef.getLangOpts().CPlusPlus;
if (isa<ObjCContainerDecl>(SemaRef.CurContext)) {
@@ -4112,7 +3646,7 @@ static CallingConv getCCForDeclaratorChunk(
// handleFunctionTypeAttr.
CallingConv CC;
if (!S.CheckCallingConvAttr(AL, CC, /*FunctionDecl=*/nullptr,
- S.IdentifyCUDATarget(D.getAttributes())) &&
+ S.CUDA().IdentifyTarget(D.getAttributes())) &&
(!FTI.isVariadic || supportsVariadicCall(CC))) {
return CC;
}
@@ -4230,14 +3764,6 @@ IdentifierInfo *Sema::getNullabilityKeyword(NullabilityKind nullability) {
llvm_unreachable("Unknown nullability kind.");
}
-/// Retrieve the identifier "NSError".
-IdentifierInfo *Sema::getNSErrorIdent() {
- if (!Ident_NSError)
- Ident_NSError = PP.getIdentifierInfo("NSError");
-
- return Ident_NSError;
-}
-
/// Check whether there is a nullability attribute of any kind in the given
/// attribute list.
static bool hasNullabilityAttr(const ParsedAttributesView &attrs) {
@@ -4363,7 +3889,7 @@ classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator,
// If this is NSError**, report that.
if (auto objcClassDecl = objcObjectPtr->getInterfaceDecl()) {
- if (objcClassDecl->getIdentifier() == S.getNSErrorIdent() &&
+ if (objcClassDecl->getIdentifier() == S.ObjC().getNSErrorIdent() &&
numNormalPointers == 2 && numTypeSpecifierPointers < 2) {
return PointerDeclaratorKind::NSErrorPointerPointer;
}
@@ -4374,7 +3900,8 @@ classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator,
// Look at Objective-C class types.
if (auto objcClass = type->getAs<ObjCInterfaceType>()) {
- if (objcClass->getInterface()->getIdentifier() == S.getNSErrorIdent()) {
+ if (objcClass->getInterface()->getIdentifier() ==
+ S.ObjC().getNSErrorIdent()) {
if (numNormalPointers == 2 && numTypeSpecifierPointers < 2)
return PointerDeclaratorKind::NSErrorPointerPointer;
}
@@ -4391,7 +3918,7 @@ classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator,
// If this is CFErrorRef*, report it as such.
if (numNormalPointers == 2 && numTypeSpecifierPointers < 2 &&
- S.isCFError(recordDecl)) {
+ S.ObjC().isCFError(recordDecl)) {
return PointerDeclaratorKind::CFErrorRefPointer;
}
break;
@@ -4415,31 +3942,6 @@ classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator,
}
}
-bool Sema::isCFError(RecordDecl *RD) {
- // If we already know about CFError, test it directly.
- if (CFError)
- return CFError == RD;
-
- // Check whether this is CFError, which we identify based on its bridge to
- // NSError. CFErrorRef used to be declared with "objc_bridge" but is now
- // declared with "objc_bridge_mutable", so look for either one of the two
- // attributes.
- if (RD->getTagKind() == TagTypeKind::Struct) {
- IdentifierInfo *bridgedType = nullptr;
- if (auto bridgeAttr = RD->getAttr<ObjCBridgeAttr>())
- bridgedType = bridgeAttr->getBridgedType();
- else if (auto bridgeAttr = RD->getAttr<ObjCBridgeMutableAttr>())
- bridgedType = bridgeAttr->getBridgedType();
-
- if (bridgedType == getNSErrorIdent()) {
- CFError = RD;
- return true;
- }
- }
-
- return false;
-}
-
static FileID getNullabilityCompletenessCheckFileID(Sema &S,
SourceLocation loc) {
// If we're anywhere in a function, method, or closure context, don't perform
@@ -4691,6 +4193,19 @@ static bool DiagnoseMultipleAddrSpaceAttributes(Sema &S, LangAS ASOld,
return false;
}
+// Whether this is a type broadly expected to have nullability attached.
+// These types are affected by `#pragma assume_nonnull`, and missing nullability
+// will be diagnosed with -Wnullability-completeness.
+static bool shouldHaveNullability(QualType T) {
+ return T->canHaveNullability(/*ResultIfUnknown=*/false) &&
+ // For now, do not infer/require nullability on C++ smart pointers.
+ // It's unclear whether the pragma's behavior is useful for C++.
+ // e.g. treating type-aliases and template-type-parameters differently
+ // from types of declarations can be surprising.
+ !isa<RecordType, TemplateSpecializationType>(
+ T->getCanonicalTypeInternal());
+}
+
static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
QualType declSpecType,
TypeSourceInfo *TInfo) {
@@ -4809,8 +4324,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// inner pointers.
complainAboutMissingNullability = CAMN_InnerPointers;
- if (T->canHaveNullability(/*ResultIfUnknown*/ false) &&
- !T->getNullability()) {
+ if (shouldHaveNullability(T) && !T->getNullability()) {
// Note that we allow but don't require nullability on dependent types.
++NumPointersRemaining;
}
@@ -5033,8 +4547,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// If the type itself could have nullability but does not, infer pointer
// nullability and perform consistency checking.
if (S.CodeSynthesisContexts.empty()) {
- if (T->canHaveNullability(/*ResultIfUnknown*/ false) &&
- !T->getNullability()) {
+ if (shouldHaveNullability(T) && !T->getNullability()) {
if (isVaList(T)) {
// Record that we've seen a pointer, but do nothing else.
if (NumPointersRemaining > 0)
@@ -5249,6 +4762,61 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Check for auto functions and trailing return type and adjust the
// return type accordingly.
if (!D.isInvalidType()) {
+ auto IsClassType = [&](CXXScopeSpec &SS) {
+ // If there already was an problem with the scope, don’t issue another
+ // error about the explicit object parameter.
+ return SS.isInvalid() ||
+ isa_and_present<CXXRecordDecl>(S.computeDeclContext(SS));
+ };
+
+ // C++23 [dcl.fct]p6:
+ //
+ // An explicit-object-parameter-declaration is a parameter-declaration
+ // with a this specifier. An explicit-object-parameter-declaration shall
+ // appear only as the first parameter-declaration of a
+ // parameter-declaration-list of one of:
+ //
+ // - a declaration of a member function or member function template
+ // ([class.mem]), or
+ //
+ // - an explicit instantiation ([temp.explicit]) or explicit
+ // specialization ([temp.expl.spec]) of a templated member function,
+ // or
+ //
+ // - a lambda-declarator [expr.prim.lambda].
+ DeclaratorContext C = D.getContext();
+ ParmVarDecl *First =
+ FTI.NumParams
+ ? dyn_cast_if_present<ParmVarDecl>(FTI.Params[0].Param)
+ : nullptr;
+
+ bool IsFunctionDecl = D.getInnermostNonParenChunk() == &DeclType;
+ if (First && First->isExplicitObjectParameter() &&
+ C != DeclaratorContext::LambdaExpr &&
+
+ // Either not a member or nested declarator in a member.
+ //
+ // Note that e.g. 'static' or 'friend' declarations are accepted
+ // here; we diagnose them later when we build the member function
+ // because it's easier that way.
+ (C != DeclaratorContext::Member || !IsFunctionDecl) &&
+
+ // Allow out-of-line definitions of member functions.
+ !IsClassType(D.getCXXScopeSpec())) {
+ if (IsFunctionDecl)
+ S.Diag(First->getBeginLoc(),
+ diag::err_explicit_object_parameter_nonmember)
+ << /*non-member*/ 2 << /*function*/ 0
+ << First->getSourceRange();
+ else
+ S.Diag(First->getBeginLoc(),
+ diag::err_explicit_object_parameter_invalid)
+ << First->getSourceRange();
+
+ D.setInvalidType();
+ AreDeclaratorChunksValid = false;
+ }
+
// trailing-return-type is only required if we're declaring a function,
// and not, for instance, a pointer to a function.
if (D.getDeclSpec().hasAutoTypeSpec() &&
@@ -5788,7 +5356,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// See if there are any attributes on this declarator chunk.
processTypeAttrs(state, T, TAL_DeclChunk, DeclType.getAttrs(),
- S.IdentifyCUDATarget(D.getAttributes()));
+ S.CUDA().IdentifyTarget(D.getAttributes()));
if (DeclType.Kind != DeclaratorChunk::Paren) {
if (ExpectNoDerefChunk && !IsNoDerefableChunk(DeclType))
@@ -5894,6 +5462,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// - the type-id in the default argument of a type-parameter, or
// - the type-id of a template-argument for a type-parameter
//
+ // C++23 [dcl.fct]p6 (P0847R7)
+ // ... A member-declarator with an explicit-object-parameter-declaration
+ // shall not include a ref-qualifier or a cv-qualifier-seq and shall not be
+ // declared static or virtual ...
+ //
// FIXME: Checking this here is insufficient. We accept-invalid on:
//
// template<typename T> struct S { void f(T); };
@@ -5901,8 +5474,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
//
// ... for instance.
if (IsQualifiedFunction &&
- !(Kind == Member && !D.isExplicitObjectMemberFunction() &&
- D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static) &&
+ // Check for non-static member function and not and
+ // explicit-object-parameter-declaration
+ (Kind != Member || D.isExplicitObjectMemberFunction() ||
+ D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
+ (D.getContext() == clang::DeclaratorContext::Member &&
+ D.isStaticMember())) &&
!IsTypedefName && D.getContext() != DeclaratorContext::TemplateArg &&
D.getContext() != DeclaratorContext::TemplateTypeArg) {
SourceLocation Loc = D.getBeginLoc();
@@ -6065,11 +5642,6 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
return GetTypeSourceInfoForDeclarator(state, T, TInfo);
}
-/// GetTypeForDeclarator - Convert the type for the specified
-/// declarator to Type instances.
-///
-/// The result of this call will never be null, but the associated
-/// type may be a null type if there's an unrecoverable error.
TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D) {
// Determine the type of the declarator. Not all forms of declarator
// have a type.
@@ -6312,6 +5884,10 @@ namespace {
TL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
TL.setRParenLoc(DS.getTypeofParensRange().getEnd());
}
+ void VisitPackIndexingTypeLoc(PackIndexingTypeLoc TL) {
+ assert(DS.getTypeSpecType() == DeclSpec::TST_typename_pack_indexing);
+ TL.setEllipsisLoc(DS.getEllipsisLoc());
+ }
void VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
assert(DS.isTransformTypeTrait(DS.getTypeSpecType()));
TL.setKWLoc(DS.getTypeSpecTypeLoc());
@@ -6396,9 +5972,16 @@ namespace {
DeclarationNameInfo DNI = DeclarationNameInfo(
TL.getTypePtr()->getTypeConstraintConcept()->getDeclName(),
TemplateId->TemplateNameLoc);
+
+ NamedDecl *FoundDecl;
+ if (auto TN = TemplateId->Template.get();
+ UsingShadowDecl *USD = TN.getAsUsingShadowDecl())
+ FoundDecl = cast<NamedDecl>(USD);
+ else
+ FoundDecl = cast_if_present<NamedDecl>(TN.getAsTemplateDecl());
+
auto *CR = ConceptReference::Create(
- Context, NNS, TemplateId->TemplateKWLoc, DNI,
- /*FoundDecl=*/nullptr,
+ Context, NNS, TemplateId->TemplateKWLoc, DNI, FoundDecl,
/*NamedDecl=*/TL.getTypePtr()->getTypeConstraintConcept(),
ASTTemplateArgumentListInfo::Create(Context, TemplateArgsInfo));
TL.setConceptReference(CR);
@@ -6463,10 +6046,16 @@ namespace {
void VisitDecayedTypeLoc(DecayedTypeLoc TL) {
llvm_unreachable("decayed type locs not expected here!");
}
+ void VisitArrayParameterTypeLoc(ArrayParameterTypeLoc TL) {
+ llvm_unreachable("array parameter type locs not expected here!");
+ }
void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
fillAttributedTypeLoc(TL, State);
}
+ void VisitCountAttributedTypeLoc(CountAttributedTypeLoc TL) {
+ // nothing
+ }
void VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc TL) {
// nothing
}
@@ -6774,12 +6363,10 @@ TypeResult Sema::ActOnTypeName(Declarator &D) {
CheckExtraCXXDefaultArguments(D);
}
- return CreateParsedType(T, TInfo);
-}
-
-ParsedType Sema::ActOnObjCInstanceType(SourceLocation Loc) {
- QualType T = Context.getObjCInstanceType();
- TypeSourceInfo *TInfo = Context.getTrivialTypeSourceInfo(T, Loc);
+ if (AutoTypeLoc TL = TInfo->getTypeLoc().getContainedAutoTypeLoc()) {
+ const AutoType *AT = TL.getTypePtr();
+ CheckConstrainedAuto(AT, TL.getConceptNameLoc());
+ }
return CreateParsedType(T, TInfo);
}
@@ -6834,10 +6421,6 @@ static bool BuildAddressSpaceIndex(Sema &S, LangAS &ASIdx,
return true;
}
-/// BuildAddressSpaceAttr - Builds a DependentAddressSpaceType if an expression
-/// is uninstantiated. If instantiated it will apply the appropriate address
-/// space to the type. This function allows dependent template variables to be
-/// used in conjunction with the address_space attribute
QualType Sema::BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc) {
if (!AddrSpace->isValueDependent()) {
@@ -7893,10 +7476,119 @@ static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) {
return createSimpleAttr<PreserveAllAttr>(Ctx, Attr);
case ParsedAttr::AT_M68kRTD:
return createSimpleAttr<M68kRTDAttr>(Ctx, Attr);
+ case ParsedAttr::AT_PreserveNone:
+ return createSimpleAttr<PreserveNoneAttr>(Ctx, Attr);
+ case ParsedAttr::AT_RISCVVectorCC:
+ return createSimpleAttr<RISCVVectorCCAttr>(Ctx, Attr);
}
llvm_unreachable("unexpected attribute kind!");
}
+std::optional<FunctionEffectMode>
+Sema::ActOnEffectExpression(Expr *CondExpr, StringRef AttributeName) {
+ if (CondExpr->isTypeDependent() || CondExpr->isValueDependent())
+ return FunctionEffectMode::Dependent;
+
+ std::optional<llvm::APSInt> ConditionValue =
+ CondExpr->getIntegerConstantExpr(Context);
+ if (!ConditionValue) {
+ // FIXME: err_attribute_argument_type doesn't quote the attribute
+ // name but needs to; users are inconsistent.
+ Diag(CondExpr->getExprLoc(), diag::err_attribute_argument_type)
+ << AttributeName << AANT_ArgumentIntegerConstant
+ << CondExpr->getSourceRange();
+ return std::nullopt;
+ }
+ return !ConditionValue->isZero() ? FunctionEffectMode::True
+ : FunctionEffectMode::False;
+}
+
+static bool
+handleNonBlockingNonAllocatingTypeAttr(TypeProcessingState &TPState,
+ ParsedAttr &PAttr, QualType &QT,
+ FunctionTypeUnwrapper &Unwrapped) {
+ // Delay if this is not a function type.
+ if (!Unwrapped.isFunctionType())
+ return false;
+
+ Sema &S = TPState.getSema();
+
+ // Require FunctionProtoType.
+ auto *FPT = Unwrapped.get()->getAs<FunctionProtoType>();
+ if (FPT == nullptr) {
+ S.Diag(PAttr.getLoc(), diag::err_func_with_effects_no_prototype)
+ << PAttr.getAttrName()->getName();
+ return true;
+ }
+
+ // Parse the new attribute.
+ // non/blocking or non/allocating? Or conditional (computed)?
+ bool IsNonBlocking = PAttr.getKind() == ParsedAttr::AT_NonBlocking ||
+ PAttr.getKind() == ParsedAttr::AT_Blocking;
+
+ FunctionEffectMode NewMode = FunctionEffectMode::None;
+ Expr *CondExpr = nullptr; // only valid if dependent
+
+ if (PAttr.getKind() == ParsedAttr::AT_NonBlocking ||
+ PAttr.getKind() == ParsedAttr::AT_NonAllocating) {
+ if (!PAttr.checkAtMostNumArgs(S, 1)) {
+ PAttr.setInvalid();
+ return true;
+ }
+
+ // Parse the condition, if any.
+ if (PAttr.getNumArgs() == 1) {
+ CondExpr = PAttr.getArgAsExpr(0);
+ std::optional<FunctionEffectMode> MaybeMode =
+ S.ActOnEffectExpression(CondExpr, PAttr.getAttrName()->getName());
+ if (!MaybeMode) {
+ PAttr.setInvalid();
+ return true;
+ }
+ NewMode = *MaybeMode;
+ if (NewMode != FunctionEffectMode::Dependent)
+ CondExpr = nullptr;
+ } else {
+ NewMode = FunctionEffectMode::True;
+ }
+ } else {
+ // This is the `blocking` or `allocating` attribute.
+ if (S.CheckAttrNoArgs(PAttr)) {
+ // The attribute has been marked invalid.
+ return true;
+ }
+ NewMode = FunctionEffectMode::False;
+ }
+
+ const FunctionEffect::Kind FEKind =
+ (NewMode == FunctionEffectMode::False)
+ ? (IsNonBlocking ? FunctionEffect::Kind::Blocking
+ : FunctionEffect::Kind::Allocating)
+ : (IsNonBlocking ? FunctionEffect::Kind::NonBlocking
+ : FunctionEffect::Kind::NonAllocating);
+ const FunctionEffectWithCondition NewEC{FunctionEffect(FEKind),
+ EffectConditionExpr(CondExpr)};
+
+ if (S.diagnoseConflictingFunctionEffect(FPT->getFunctionEffects(), NewEC,
+ PAttr.getLoc())) {
+ PAttr.setInvalid();
+ return true;
+ }
+
+ // Add the effect to the FunctionProtoType.
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ FunctionEffectSet FX(EPI.FunctionEffects);
+ FunctionEffectSet::Conflicts Errs;
+ [[maybe_unused]] bool Success = FX.insert(NewEC, Errs);
+ assert(Success && "effect conflicts should have been diagnosed above");
+ EPI.FunctionEffects = FunctionEffectsRef(FX);
+
+ QualType NewType = S.Context.getFunctionType(FPT->getReturnType(),
+ FPT->getParamTypes(), EPI);
+ QT = Unwrapped.wrap(S, NewType->getAs<FunctionType>());
+ return true;
+}
+
static bool checkMutualExclusion(TypeProcessingState &state,
const FunctionProtoType::ExtProtoInfo &EPI,
ParsedAttr &Attr,
@@ -7966,8 +7658,7 @@ static bool handleArmStateAttribute(Sema &S,
/// Process an individual function attribute. Returns true to
/// indicate that the attribute was handled, false if it wasn't.
static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
- QualType &type,
- Sema::CUDAFunctionTarget CFT) {
+ QualType &type, CUDAFunctionTarget CFT) {
Sema &S = state.getSema();
FunctionTypeUnwrapper unwrapped(S, type);
@@ -8015,8 +7706,8 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
return false;
// Check whether the return type is reasonable.
- if (S.checkNSReturnsRetainedReturnType(attr.getLoc(),
- unwrapped.get()->getReturnType()))
+ if (S.ObjC().checkNSReturnsRetainedReturnType(
+ attr.getLoc(), unwrapped.get()->getReturnType()))
return true;
// Only actually change the underlying type in ARC builds.
@@ -8210,6 +7901,13 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
return true;
}
+ if (attr.getKind() == ParsedAttr::AT_NonBlocking ||
+ attr.getKind() == ParsedAttr::AT_NonAllocating ||
+ attr.getKind() == ParsedAttr::AT_Blocking ||
+ attr.getKind() == ParsedAttr::AT_Allocating) {
+ return handleNonBlockingNonAllocatingTypeAttr(state, attr, type, unwrapped);
+ }
+
// Delay if the type didn't work out to a function.
if (!unwrapped.isFunctionType()) return false;
@@ -8462,23 +8160,19 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
// Target must have NEON (or MVE, whose vectors are similar enough
// not to need a separate attribute)
- if (!(S.Context.getTargetInfo().hasFeature("neon") ||
- S.Context.getTargetInfo().hasFeature("mve") ||
- S.Context.getTargetInfo().hasFeature("sve") ||
- S.Context.getTargetInfo().hasFeature("sme") ||
- IsTargetCUDAAndHostARM) &&
- VecKind == VectorKind::Neon) {
- S.Diag(Attr.getLoc(), diag::err_attribute_unsupported)
- << Attr << "'neon', 'mve', 'sve' or 'sme'";
+ if (!S.Context.getTargetInfo().hasFeature("mve") &&
+ VecKind == VectorKind::Neon &&
+ S.Context.getTargetInfo().getTriple().isArmMClass()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_unsupported_m_profile)
+ << Attr << "'mve'";
Attr.setInvalid();
return;
}
- if (!(S.Context.getTargetInfo().hasFeature("neon") ||
- S.Context.getTargetInfo().hasFeature("mve") ||
- IsTargetCUDAAndHostARM) &&
- VecKind == VectorKind::NeonPoly) {
- S.Diag(Attr.getLoc(), diag::err_attribute_unsupported)
- << Attr << "'neon' or 'mve'";
+ if (!S.Context.getTargetInfo().hasFeature("mve") &&
+ VecKind == VectorKind::NeonPoly &&
+ S.Context.getTargetInfo().getTriple().isArmMClass()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_unsupported_m_profile)
+ << Attr << "'mve'";
Attr.setInvalid();
return;
}
@@ -8801,7 +8495,7 @@ static void HandleHLSLParamModifierAttr(QualType &CurType,
static void processTypeAttrs(TypeProcessingState &state, QualType &type,
TypeAttrLocation TAL,
const ParsedAttributesView &attrs,
- Sema::CUDAFunctionTarget CFT) {
+ CUDAFunctionTarget CFT) {
state.setParsedNoDeref(false);
if (attrs.empty())
@@ -9147,6 +8841,20 @@ void Sema::completeExprArrayBound(Expr *E) {
}
}
}
+ if (const auto CastE = dyn_cast<ExplicitCastExpr>(E)) {
+ QualType DestType = CastE->getTypeAsWritten();
+ if (const auto *IAT = Context.getAsIncompleteArrayType(DestType)) {
+ // C++20 [expr.static.cast]p.4: ... If T is array of unknown bound,
+ // this direct-initialization defines the type of the expression
+ // as U[1]
+ QualType ResultType = Context.getConstantArrayType(
+ IAT->getElementType(),
+ llvm::APInt(Context.getTypeSize(Context.getSizeType()), 1),
+ /*SizeExpr=*/nullptr, ArraySizeModifier::Normal,
+ /*IndexTypeQuals=*/0);
+ E->setType(ResultType);
+ }
+ }
}
QualType Sema::getCompletedType(Expr *E) {
@@ -9163,21 +8871,6 @@ QualType Sema::getCompletedType(Expr *E) {
return E->getType();
}
-/// Ensure that the type of the given expression is complete.
-///
-/// This routine checks whether the expression \p E has a complete type. If the
-/// expression refers to an instantiable construct, that instantiation is
-/// performed as needed to complete its type. Furthermore
-/// Sema::RequireCompleteType is called for the expression's type (or in the
-/// case of a reference type, the referred-to type).
-///
-/// \param E The expression whose type is required to be complete.
-/// \param Kind Selects which completeness rules should be applied.
-/// \param Diagnoser The object that will emit a diagnostic if the type is
-/// incomplete.
-///
-/// \returns \c true if the type of \p E is incomplete and diagnosed, \c false
-/// otherwise.
bool Sema::RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(E->getExprLoc(), getCompletedType(E), Kind,
@@ -9189,25 +8882,6 @@ bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
-/// Ensure that the type T is a complete type.
-///
-/// This routine checks whether the type @p T is complete in any
-/// context where a complete type is required. If @p T is a complete
-/// type, returns false. If @p T is a class template specialization,
-/// this routine then attempts to perform class template
-/// instantiation. If instantiation fails, or if @p T is incomplete
-/// and cannot be completed, issues the diagnostic @p diag (giving it
-/// the type @p T) and returns true.
-///
-/// @param Loc The location in the source that the incomplete type
-/// diagnostic should refer to.
-///
-/// @param T The type that this routine is examining for completeness.
-///
-/// @param Kind Selects which completeness rules should be applied.
-///
-/// @returns @c true if @p T is incomplete and a diagnostic was emitted,
-/// @c false otherwise.
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser) {
@@ -9384,7 +9058,6 @@ static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
}
}
-/// The implementation of RequireCompleteType
bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind,
TypeDiagnoser *Diagnoser) {
@@ -9576,24 +9249,6 @@ static unsigned getLiteralDiagFromTagKind(TagTypeKind Tag) {
}
}
-/// Ensure that the type T is a literal type.
-///
-/// This routine checks whether the type @p T is a literal type. If @p T is an
-/// incomplete type, an attempt is made to complete it. If @p T is a literal
-/// type, or @p AllowIncompleteType is true and @p T is an incomplete type,
-/// returns false. Otherwise, this routine issues the diagnostic @p PD (giving
-/// it the type @p T), along with notes explaining why the type is not a
-/// literal type, and returns true.
-///
-/// @param Loc The location in the source that the non-literal type
-/// diagnostic should refer to.
-///
-/// @param T The type that this routine is examining for literalness.
-///
-/// @param Diagnoser Emits a diagnostic if T is not a literal type.
-///
-/// @returns @c true if @p T is not a literal type and a diagnostic was emitted,
-/// @c false otherwise.
bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
assert(!T->isDependentType() && "type should not be dependent");
@@ -9676,7 +9331,8 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
: diag::note_non_literal_nontrivial_dtor)
<< RD;
if (!Dtor->isUserProvided())
- SpecialMemberIsTrivial(Dtor, CXXDestructor, TAH_IgnoreTrivialABI,
+ SpecialMemberIsTrivial(Dtor, CXXSpecialMemberKind::Destructor,
+ TAH_IgnoreTrivialABI,
/*Diagnose*/ true);
}
}
@@ -9689,9 +9345,6 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireLiteralType(Loc, T, Diagnoser);
}
-/// Retrieve a version of the type 'T' that is elaborated by Keyword, qualified
-/// by the nested-name-specifier contained in SS, and that is (re)declared by
-/// OwnedTagDecl, which is nullptr if this is not a (re)declaration.
QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl) {
@@ -9716,17 +9369,47 @@ QualType Sema::BuildTypeofExprType(Expr *E, TypeOfKind Kind) {
return Context.getTypeOfExprType(E, Kind);
}
+static void
+BuildTypeCoupledDecls(Expr *E,
+ llvm::SmallVectorImpl<TypeCoupledDeclRefInfo> &Decls) {
+ // Currently, 'counted_by' only allows direct DeclRefExpr to FieldDecl.
+ auto *CountDecl = cast<DeclRefExpr>(E)->getDecl();
+ Decls.push_back(TypeCoupledDeclRefInfo(CountDecl, /*IsDref*/ false));
+}
+
+QualType Sema::BuildCountAttributedArrayOrPointerType(QualType WrappedTy,
+ Expr *CountExpr,
+ bool CountInBytes,
+ bool OrNull) {
+ assert(WrappedTy->isIncompleteArrayType() || WrappedTy->isPointerType());
+
+ llvm::SmallVector<TypeCoupledDeclRefInfo, 1> Decls;
+ BuildTypeCoupledDecls(CountExpr, Decls);
+ /// When the resulting expression is invalid, we still create the AST using
+ /// the original count expression for the sake of AST dump.
+ return Context.getCountAttributedType(WrappedTy, CountExpr, CountInBytes,
+ OrNull, Decls);
+}
+
/// getDecltypeForExpr - Given an expr, will return the decltype for
/// that expression, according to the rules in C++11
/// [dcl.type.simple]p4 and C++11 [expr.lambda.prim]p18.
QualType Sema::getDecltypeForExpr(Expr *E) {
- if (E->isTypeDependent())
- return Context.DependentTy;
Expr *IDExpr = E;
if (auto *ImplCastExpr = dyn_cast<ImplicitCastExpr>(E))
IDExpr = ImplCastExpr->getSubExpr();
+ if (auto *PackExpr = dyn_cast<PackIndexingExpr>(E)) {
+ if (E->isInstantiationDependent())
+ IDExpr = PackExpr->getPackIdExpression();
+ else
+ IDExpr = PackExpr->getSelectedExpr();
+ }
+
+ if (E->isTypeDependent())
+ return Context.DependentTy;
+
// C++11 [dcl.type.simple]p4:
// The type denoted by decltype(e) is defined as follows:
@@ -9798,6 +9481,54 @@ QualType Sema::BuildDecltypeType(Expr *E, bool AsUnevaluated) {
return Context.getDecltypeType(E, getDecltypeForExpr(E));
}
+QualType Sema::ActOnPackIndexingType(QualType Pattern, Expr *IndexExpr,
+ SourceLocation Loc,
+ SourceLocation EllipsisLoc) {
+ if (!IndexExpr)
+ return QualType();
+
+ // Diagnose unexpanded packs but continue to improve recovery.
+ if (!Pattern->containsUnexpandedParameterPack())
+ Diag(Loc, diag::err_expected_name_of_pack) << Pattern;
+
+ QualType Type = BuildPackIndexingType(Pattern, IndexExpr, Loc, EllipsisLoc);
+
+ if (!Type.isNull())
+ Diag(Loc, getLangOpts().CPlusPlus26 ? diag::warn_cxx23_pack_indexing
+ : diag::ext_pack_indexing);
+ return Type;
+}
+
+QualType Sema::BuildPackIndexingType(QualType Pattern, Expr *IndexExpr,
+ SourceLocation Loc,
+ SourceLocation EllipsisLoc,
+ bool FullySubstituted,
+ ArrayRef<QualType> Expansions) {
+
+ std::optional<int64_t> Index;
+ if (FullySubstituted && !IndexExpr->isValueDependent() &&
+ !IndexExpr->isTypeDependent()) {
+ llvm::APSInt Value(Context.getIntWidth(Context.getSizeType()));
+ ExprResult Res = CheckConvertedConstantExpression(
+ IndexExpr, Context.getSizeType(), Value, CCEK_ArrayBound);
+ if (!Res.isUsable())
+ return QualType();
+ Index = Value.getExtValue();
+ IndexExpr = Res.get();
+ }
+
+ if (FullySubstituted && Index) {
+ if (*Index < 0 || *Index >= int64_t(Expansions.size())) {
+ Diag(IndexExpr->getBeginLoc(), diag::err_pack_index_out_of_bound)
+ << *Index << Pattern << Expansions.size();
+ return QualType();
+ }
+ }
+
+ return Context.getPackIndexingType(Pattern, IndexExpr, FullySubstituted,
+ Expansions, Index.value_or(-1));
+}
+
static QualType GetEnumUnderlyingType(Sema &S, QualType BaseType,
SourceLocation Loc) {
assert(BaseType->isEnumeralType());
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaWasm.cpp b/contrib/llvm-project/clang/lib/Sema/SemaWasm.cpp
new file mode 100644
index 000000000000..c0fa05bc1760
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaWasm.cpp
@@ -0,0 +1,341 @@
+//===------ SemaWasm.cpp ---- WebAssembly target-specific routines --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to WebAssembly.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaWasm.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Attr.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+
+SemaWasm::SemaWasm(Sema &S) : SemaBase(S) {}
+
+/// Checks the argument at the given index is a WebAssembly table and if it
+/// is, sets ElTy to the element type.
+static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex,
+ QualType &ElTy) {
+ Expr *ArgExpr = E->getArg(ArgIndex);
+ const auto *ATy = dyn_cast<ArrayType>(ArgExpr->getType());
+ if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) {
+ return S.Diag(ArgExpr->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_be_table_type)
+ << ArgIndex + 1 << ArgExpr->getSourceRange();
+ }
+ ElTy = ATy->getElementType();
+ return false;
+}
+
+/// Checks the argument at the given index is an integer.
+static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E,
+ unsigned ArgIndex) {
+ Expr *ArgExpr = E->getArg(ArgIndex);
+ if (!ArgExpr->getType()->isIntegerType()) {
+ return S.Diag(ArgExpr->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_be_integer_type)
+ << ArgIndex + 1 << ArgExpr->getSourceRange();
+ }
+ return false;
+}
+
+bool SemaWasm::BuiltinWasmRefNullExtern(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() != 0)
+ return true;
+
+ TheCall->setType(getASTContext().getWebAssemblyExternrefType());
+
+ return false;
+}
+
+bool SemaWasm::BuiltinWasmRefNullFunc(CallExpr *TheCall) {
+ ASTContext &Context = getASTContext();
+ if (TheCall->getNumArgs() != 0) {
+ Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << /*expected*/ 0 << TheCall->getNumArgs()
+ << /*is non object*/ 0;
+ return true;
+ }
+
+ // This custom type checking code ensures that the nodes are as expected
+ // in order to later on generate the necessary builtin.
+ QualType Pointee = Context.getFunctionType(Context.VoidTy, {}, {});
+ QualType Type = Context.getPointerType(Pointee);
+ Pointee = Context.getAddrSpaceQualType(Pointee, LangAS::wasm_funcref);
+ Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type,
+ Context.getPointerType(Pointee));
+ TheCall->setType(Type);
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, and the second
+/// is an index to use as index into the table.
+bool SemaWasm::BuiltinWasmTableGet(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, 1))
+ return true;
+
+ // If all is well, we set the type of TheCall to be the type of the
+ // element of the table.
+ // i.e. a table.get on an externref table has type externref,
+ // or whatever the type of the table element is.
+ TheCall->setType(ElTy);
+
+ return false;
+}
+
+/// Check that the first argumnet is a WebAssembly table, the second is
+/// an index to use as index into the table and the third is the reference
+/// type to set into the table.
+bool SemaWasm::BuiltinWasmTableSet(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 3))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, 1))
+ return true;
+
+ if (!getASTContext().hasSameType(ElTy, TheCall->getArg(2)->getType()))
+ return true;
+
+ return false;
+}
+
+/// Check that the argument is a WebAssembly table.
+bool SemaWasm::BuiltinWasmTableSize(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 1))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, ElTy))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is the
+/// value to use for new elements (of a type matching the table type), the
+/// third value is an integer.
+bool SemaWasm::BuiltinWasmTableGrow(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 3))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, ElTy))
+ return true;
+
+ Expr *NewElemArg = TheCall->getArg(1);
+ if (!getASTContext().hasSameType(ElTy, NewElemArg->getType())) {
+ return Diag(NewElemArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 2 << 1 << NewElemArg->getSourceRange();
+ }
+
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, 2))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is an
+/// integer, the third is the value to use to fill the table (of a type
+/// matching the table type), and the fourth is an integer.
+bool SemaWasm::BuiltinWasmTableFill(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 4))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, 1))
+ return true;
+
+ Expr *NewElemArg = TheCall->getArg(2);
+ if (!getASTContext().hasSameType(ElTy, NewElemArg->getType())) {
+ return Diag(NewElemArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 3 << 1 << NewElemArg->getSourceRange();
+ }
+
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, 3))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is also a
+/// WebAssembly table (of the same element type), and the third to fifth
+/// arguments are integers.
+bool SemaWasm::BuiltinWasmTableCopy(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 5))
+ return true;
+
+ QualType XElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, XElTy))
+ return true;
+
+ QualType YElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 1, YElTy))
+ return true;
+
+ Expr *TableYArg = TheCall->getArg(1);
+ if (!getASTContext().hasSameType(XElTy, YElTy)) {
+ return Diag(TableYArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 2 << 1 << TableYArg->getSourceRange();
+ }
+
+ for (int I = 2; I <= 4; I++) {
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, I))
+ return true;
+ }
+
+ return false;
+}
+
+bool SemaWasm::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_ref_null_extern:
+ return BuiltinWasmRefNullExtern(TheCall);
+ case WebAssembly::BI__builtin_wasm_ref_null_func:
+ return BuiltinWasmRefNullFunc(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_get:
+ return BuiltinWasmTableGet(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_set:
+ return BuiltinWasmTableSet(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_size:
+ return BuiltinWasmTableSize(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_grow:
+ return BuiltinWasmTableGrow(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_fill:
+ return BuiltinWasmTableFill(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_copy:
+ return BuiltinWasmTableCopy(TheCall);
+ }
+
+ return false;
+}
+
+WebAssemblyImportModuleAttr *
+SemaWasm::mergeImportModuleAttr(Decl *D,
+ const WebAssemblyImportModuleAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
+
+ if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
+ if (ExistingAttr->getImportModule() == AL.getImportModule())
+ return nullptr;
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import)
+ << 0 << ExistingAttr->getImportModule() << AL.getImportModule();
+ Diag(AL.getLoc(), diag::note_previous_attribute);
+ return nullptr;
+ }
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
+ return nullptr;
+ }
+ return ::new (getASTContext())
+ WebAssemblyImportModuleAttr(getASTContext(), AL, AL.getImportModule());
+}
+
+WebAssemblyImportNameAttr *
+SemaWasm::mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
+
+ if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportNameAttr>()) {
+ if (ExistingAttr->getImportName() == AL.getImportName())
+ return nullptr;
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import)
+ << 1 << ExistingAttr->getImportName() << AL.getImportName();
+ Diag(AL.getLoc(), diag::note_previous_attribute);
+ return nullptr;
+ }
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
+ return nullptr;
+ }
+ return ::new (getASTContext())
+ WebAssemblyImportNameAttr(getASTContext(), AL, AL.getImportName());
+}
+
+void SemaWasm::handleWebAssemblyImportModuleAttr(Decl *D,
+ const ParsedAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
+ return;
+ }
+
+ FD->addAttr(::new (getASTContext())
+ WebAssemblyImportModuleAttr(getASTContext(), AL, Str));
+}
+
+void SemaWasm::handleWebAssemblyImportNameAttr(Decl *D, const ParsedAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
+ return;
+ }
+
+ FD->addAttr(::new (getASTContext())
+ WebAssemblyImportNameAttr(getASTContext(), AL, Str));
+}
+
+void SemaWasm::handleWebAssemblyExportNameAttr(Decl *D, const ParsedAttr &AL) {
+ ASTContext &Context = getASTContext();
+ if (!isFuncOrMethodForAttrSubject(D)) {
+ Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
+ return;
+ }
+
+ auto *FD = cast<FunctionDecl>(D);
+ if (FD->isThisDeclarationADefinition()) {
+ Diag(D->getLocation(), diag::err_alias_is_definition) << FD << 0;
+ return;
+ }
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+
+ D->addAttr(::new (Context) WebAssemblyExportNameAttr(Context, AL, Str));
+ D->addAttr(UsedAttr::CreateImplicit(Context));
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaX86.cpp b/contrib/llvm-project/clang/lib/Sema/SemaX86.cpp
new file mode 100644
index 000000000000..be26454ce909
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaX86.cpp
@@ -0,0 +1,972 @@
+//===------ SemaX86.cpp ---------- X86 target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to X86.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaX86.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Attr.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/TargetParser/Triple.h"
+#include <bitset>
+
+namespace clang {
+
+SemaX86::SemaX86(Sema &S) : SemaBase(S) {}
+
+// Check if the rounding mode is legal.
+bool SemaX86::CheckBuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
+ // Indicates if this instruction has rounding control or just SAE.
+ bool HasRC = false;
+
+ unsigned ArgNum = 0;
+ switch (BuiltinID) {
+ default:
+ return false;
+ case X86::BI__builtin_ia32_vcvttsd2si32:
+ case X86::BI__builtin_ia32_vcvttsd2si64:
+ case X86::BI__builtin_ia32_vcvttsd2usi32:
+ case X86::BI__builtin_ia32_vcvttsd2usi64:
+ case X86::BI__builtin_ia32_vcvttss2si32:
+ case X86::BI__builtin_ia32_vcvttss2si64:
+ case X86::BI__builtin_ia32_vcvttss2usi32:
+ case X86::BI__builtin_ia32_vcvttss2usi64:
+ case X86::BI__builtin_ia32_vcvttsh2si32:
+ case X86::BI__builtin_ia32_vcvttsh2si64:
+ case X86::BI__builtin_ia32_vcvttsh2usi32:
+ case X86::BI__builtin_ia32_vcvttsh2usi64:
+ ArgNum = 1;
+ break;
+ case X86::BI__builtin_ia32_maxpd512:
+ case X86::BI__builtin_ia32_maxps512:
+ case X86::BI__builtin_ia32_minpd512:
+ case X86::BI__builtin_ia32_minps512:
+ case X86::BI__builtin_ia32_maxph512:
+ case X86::BI__builtin_ia32_minph512:
+ ArgNum = 2;
+ break;
+ case X86::BI__builtin_ia32_vcvtph2pd512_mask:
+ case X86::BI__builtin_ia32_vcvtph2psx512_mask:
+ case X86::BI__builtin_ia32_cvtps2pd512_mask:
+ case X86::BI__builtin_ia32_cvttpd2dq512_mask:
+ case X86::BI__builtin_ia32_cvttpd2qq512_mask:
+ case X86::BI__builtin_ia32_cvttpd2udq512_mask:
+ case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
+ case X86::BI__builtin_ia32_cvttps2dq512_mask:
+ case X86::BI__builtin_ia32_cvttps2qq512_mask:
+ case X86::BI__builtin_ia32_cvttps2udq512_mask:
+ case X86::BI__builtin_ia32_cvttps2uqq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2w512_mask:
+ case X86::BI__builtin_ia32_vcvttph2uw512_mask:
+ case X86::BI__builtin_ia32_vcvttph2dq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2udq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2qq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2uqq512_mask:
+ case X86::BI__builtin_ia32_getexppd512_mask:
+ case X86::BI__builtin_ia32_getexpps512_mask:
+ case X86::BI__builtin_ia32_getexpph512_mask:
+ case X86::BI__builtin_ia32_vcomisd:
+ case X86::BI__builtin_ia32_vcomiss:
+ case X86::BI__builtin_ia32_vcomish:
+ case X86::BI__builtin_ia32_vcvtph2ps512_mask:
+ ArgNum = 3;
+ break;
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ case X86::BI__builtin_ia32_cmpsd_mask:
+ case X86::BI__builtin_ia32_cmpss_mask:
+ case X86::BI__builtin_ia32_cmpsh_mask:
+ case X86::BI__builtin_ia32_vcvtsh2sd_round_mask:
+ case X86::BI__builtin_ia32_vcvtsh2ss_round_mask:
+ case X86::BI__builtin_ia32_cvtss2sd_round_mask:
+ case X86::BI__builtin_ia32_getexpsd128_round_mask:
+ case X86::BI__builtin_ia32_getexpss128_round_mask:
+ case X86::BI__builtin_ia32_getexpsh128_round_mask:
+ case X86::BI__builtin_ia32_getmantpd512_mask:
+ case X86::BI__builtin_ia32_getmantps512_mask:
+ case X86::BI__builtin_ia32_getmantph512_mask:
+ case X86::BI__builtin_ia32_maxsd_round_mask:
+ case X86::BI__builtin_ia32_maxss_round_mask:
+ case X86::BI__builtin_ia32_maxsh_round_mask:
+ case X86::BI__builtin_ia32_minsd_round_mask:
+ case X86::BI__builtin_ia32_minss_round_mask:
+ case X86::BI__builtin_ia32_minsh_round_mask:
+ case X86::BI__builtin_ia32_reducepd512_mask:
+ case X86::BI__builtin_ia32_reduceps512_mask:
+ case X86::BI__builtin_ia32_reduceph512_mask:
+ case X86::BI__builtin_ia32_rndscalepd_mask:
+ case X86::BI__builtin_ia32_rndscaleps_mask:
+ case X86::BI__builtin_ia32_rndscaleph_mask:
+ ArgNum = 4;
+ break;
+ case X86::BI__builtin_ia32_fixupimmpd512_mask:
+ case X86::BI__builtin_ia32_fixupimmpd512_maskz:
+ case X86::BI__builtin_ia32_fixupimmps512_mask:
+ case X86::BI__builtin_ia32_fixupimmps512_maskz:
+ case X86::BI__builtin_ia32_fixupimmsd_mask:
+ case X86::BI__builtin_ia32_fixupimmsd_maskz:
+ case X86::BI__builtin_ia32_fixupimmss_mask:
+ case X86::BI__builtin_ia32_fixupimmss_maskz:
+ case X86::BI__builtin_ia32_getmantsd_round_mask:
+ case X86::BI__builtin_ia32_getmantss_round_mask:
+ case X86::BI__builtin_ia32_getmantsh_round_mask:
+ case X86::BI__builtin_ia32_rangepd512_mask:
+ case X86::BI__builtin_ia32_rangeps512_mask:
+ case X86::BI__builtin_ia32_rangesd128_round_mask:
+ case X86::BI__builtin_ia32_rangess128_round_mask:
+ case X86::BI__builtin_ia32_reducesd_mask:
+ case X86::BI__builtin_ia32_reducess_mask:
+ case X86::BI__builtin_ia32_reducesh_mask:
+ case X86::BI__builtin_ia32_rndscalesd_round_mask:
+ case X86::BI__builtin_ia32_rndscaless_round_mask:
+ case X86::BI__builtin_ia32_rndscalesh_round_mask:
+ ArgNum = 5;
+ break;
+ case X86::BI__builtin_ia32_vcvtsd2si64:
+ case X86::BI__builtin_ia32_vcvtsd2si32:
+ case X86::BI__builtin_ia32_vcvtsd2usi32:
+ case X86::BI__builtin_ia32_vcvtsd2usi64:
+ case X86::BI__builtin_ia32_vcvtss2si32:
+ case X86::BI__builtin_ia32_vcvtss2si64:
+ case X86::BI__builtin_ia32_vcvtss2usi32:
+ case X86::BI__builtin_ia32_vcvtss2usi64:
+ case X86::BI__builtin_ia32_vcvtsh2si32:
+ case X86::BI__builtin_ia32_vcvtsh2si64:
+ case X86::BI__builtin_ia32_vcvtsh2usi32:
+ case X86::BI__builtin_ia32_vcvtsh2usi64:
+ case X86::BI__builtin_ia32_sqrtpd512:
+ case X86::BI__builtin_ia32_sqrtps512:
+ case X86::BI__builtin_ia32_sqrtph512:
+ ArgNum = 1;
+ HasRC = true;
+ break;
+ case X86::BI__builtin_ia32_addph512:
+ case X86::BI__builtin_ia32_divph512:
+ case X86::BI__builtin_ia32_mulph512:
+ case X86::BI__builtin_ia32_subph512:
+ case X86::BI__builtin_ia32_addpd512:
+ case X86::BI__builtin_ia32_addps512:
+ case X86::BI__builtin_ia32_divpd512:
+ case X86::BI__builtin_ia32_divps512:
+ case X86::BI__builtin_ia32_mulpd512:
+ case X86::BI__builtin_ia32_mulps512:
+ case X86::BI__builtin_ia32_subpd512:
+ case X86::BI__builtin_ia32_subps512:
+ case X86::BI__builtin_ia32_cvtsi2sd64:
+ case X86::BI__builtin_ia32_cvtsi2ss32:
+ case X86::BI__builtin_ia32_cvtsi2ss64:
+ case X86::BI__builtin_ia32_cvtusi2sd64:
+ case X86::BI__builtin_ia32_cvtusi2ss32:
+ case X86::BI__builtin_ia32_cvtusi2ss64:
+ case X86::BI__builtin_ia32_vcvtusi2sh:
+ case X86::BI__builtin_ia32_vcvtusi642sh:
+ case X86::BI__builtin_ia32_vcvtsi2sh:
+ case X86::BI__builtin_ia32_vcvtsi642sh:
+ ArgNum = 2;
+ HasRC = true;
+ break;
+ case X86::BI__builtin_ia32_cvtdq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtudq2ps512_mask:
+ case X86::BI__builtin_ia32_vcvtpd2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtps2phx512_mask:
+ case X86::BI__builtin_ia32_cvtpd2ps512_mask:
+ case X86::BI__builtin_ia32_cvtpd2dq512_mask:
+ case X86::BI__builtin_ia32_cvtpd2qq512_mask:
+ case X86::BI__builtin_ia32_cvtpd2udq512_mask:
+ case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
+ case X86::BI__builtin_ia32_cvtps2dq512_mask:
+ case X86::BI__builtin_ia32_cvtps2qq512_mask:
+ case X86::BI__builtin_ia32_cvtps2udq512_mask:
+ case X86::BI__builtin_ia32_cvtps2uqq512_mask:
+ case X86::BI__builtin_ia32_cvtqq2pd512_mask:
+ case X86::BI__builtin_ia32_cvtqq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
+ case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtph2w512_mask:
+ case X86::BI__builtin_ia32_vcvtph2uw512_mask:
+ case X86::BI__builtin_ia32_vcvtph2dq512_mask:
+ case X86::BI__builtin_ia32_vcvtph2udq512_mask:
+ case X86::BI__builtin_ia32_vcvtph2qq512_mask:
+ case X86::BI__builtin_ia32_vcvtph2uqq512_mask:
+ case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
+ ArgNum = 3;
+ HasRC = true;
+ break;
+ case X86::BI__builtin_ia32_addsh_round_mask:
+ case X86::BI__builtin_ia32_addss_round_mask:
+ case X86::BI__builtin_ia32_addsd_round_mask:
+ case X86::BI__builtin_ia32_divsh_round_mask:
+ case X86::BI__builtin_ia32_divss_round_mask:
+ case X86::BI__builtin_ia32_divsd_round_mask:
+ case X86::BI__builtin_ia32_mulsh_round_mask:
+ case X86::BI__builtin_ia32_mulss_round_mask:
+ case X86::BI__builtin_ia32_mulsd_round_mask:
+ case X86::BI__builtin_ia32_subsh_round_mask:
+ case X86::BI__builtin_ia32_subss_round_mask:
+ case X86::BI__builtin_ia32_subsd_round_mask:
+ case X86::BI__builtin_ia32_scalefph512_mask:
+ case X86::BI__builtin_ia32_scalefpd512_mask:
+ case X86::BI__builtin_ia32_scalefps512_mask:
+ case X86::BI__builtin_ia32_scalefsd_round_mask:
+ case X86::BI__builtin_ia32_scalefss_round_mask:
+ case X86::BI__builtin_ia32_scalefsh_round_mask:
+ case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
+ case X86::BI__builtin_ia32_vcvtss2sh_round_mask:
+ case X86::BI__builtin_ia32_vcvtsd2sh_round_mask:
+ case X86::BI__builtin_ia32_sqrtsd_round_mask:
+ case X86::BI__builtin_ia32_sqrtss_round_mask:
+ case X86::BI__builtin_ia32_sqrtsh_round_mask:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask:
+ case X86::BI__builtin_ia32_vfmaddsd3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask3:
+ case X86::BI__builtin_ia32_vfmaddss3_mask:
+ case X86::BI__builtin_ia32_vfmaddss3_maskz:
+ case X86::BI__builtin_ia32_vfmaddss3_mask3:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask:
+ case X86::BI__builtin_ia32_vfmaddsh3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask3:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmaddps512_mask:
+ case X86::BI__builtin_ia32_vfmaddps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddph512_mask:
+ case X86::BI__builtin_ia32_vfmaddph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddcsh_mask:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_vfmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfmaddcph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddcph512_mask3:
+ case X86::BI__builtin_ia32_vfcmaddcsh_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_vfcmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfcmaddcph512_maskz:
+ case X86::BI__builtin_ia32_vfcmaddcph512_mask3:
+ case X86::BI__builtin_ia32_vfmulcsh_mask:
+ case X86::BI__builtin_ia32_vfmulcph512_mask:
+ case X86::BI__builtin_ia32_vfcmulcsh_mask:
+ case X86::BI__builtin_ia32_vfcmulcph512_mask:
+ ArgNum = 4;
+ HasRC = true;
+ break;
+ }
+
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
+ // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
+ // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
+ // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
+ if (Result == 4 /*ROUND_CUR_DIRECTION*/ || Result == 8 /*ROUND_NO_EXC*/ ||
+ (!HasRC && Result == 12 /*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
+ (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
+ return false;
+
+ return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
+ << Arg->getSourceRange();
+}
+
+// Check if the gather/scatter scale is legal.
+bool SemaX86::CheckBuiltinGatherScatterScale(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ unsigned ArgNum = 0;
+ switch (BuiltinID) {
+ default:
+ return false;
+ case X86::BI__builtin_ia32_gatherd_pd:
+ case X86::BI__builtin_ia32_gatherd_pd256:
+ case X86::BI__builtin_ia32_gatherq_pd:
+ case X86::BI__builtin_ia32_gatherq_pd256:
+ case X86::BI__builtin_ia32_gatherd_ps:
+ case X86::BI__builtin_ia32_gatherd_ps256:
+ case X86::BI__builtin_ia32_gatherq_ps:
+ case X86::BI__builtin_ia32_gatherq_ps256:
+ case X86::BI__builtin_ia32_gatherd_q:
+ case X86::BI__builtin_ia32_gatherd_q256:
+ case X86::BI__builtin_ia32_gatherq_q:
+ case X86::BI__builtin_ia32_gatherq_q256:
+ case X86::BI__builtin_ia32_gatherd_d:
+ case X86::BI__builtin_ia32_gatherd_d256:
+ case X86::BI__builtin_ia32_gatherq_d:
+ case X86::BI__builtin_ia32_gatherq_d256:
+ case X86::BI__builtin_ia32_gather3div2df:
+ case X86::BI__builtin_ia32_gather3div2di:
+ case X86::BI__builtin_ia32_gather3div4df:
+ case X86::BI__builtin_ia32_gather3div4di:
+ case X86::BI__builtin_ia32_gather3div4sf:
+ case X86::BI__builtin_ia32_gather3div4si:
+ case X86::BI__builtin_ia32_gather3div8sf:
+ case X86::BI__builtin_ia32_gather3div8si:
+ case X86::BI__builtin_ia32_gather3siv2df:
+ case X86::BI__builtin_ia32_gather3siv2di:
+ case X86::BI__builtin_ia32_gather3siv4df:
+ case X86::BI__builtin_ia32_gather3siv4di:
+ case X86::BI__builtin_ia32_gather3siv4sf:
+ case X86::BI__builtin_ia32_gather3siv4si:
+ case X86::BI__builtin_ia32_gather3siv8sf:
+ case X86::BI__builtin_ia32_gather3siv8si:
+ case X86::BI__builtin_ia32_gathersiv8df:
+ case X86::BI__builtin_ia32_gathersiv16sf:
+ case X86::BI__builtin_ia32_gatherdiv8df:
+ case X86::BI__builtin_ia32_gatherdiv16sf:
+ case X86::BI__builtin_ia32_gathersiv8di:
+ case X86::BI__builtin_ia32_gathersiv16si:
+ case X86::BI__builtin_ia32_gatherdiv8di:
+ case X86::BI__builtin_ia32_gatherdiv16si:
+ case X86::BI__builtin_ia32_scatterdiv2df:
+ case X86::BI__builtin_ia32_scatterdiv2di:
+ case X86::BI__builtin_ia32_scatterdiv4df:
+ case X86::BI__builtin_ia32_scatterdiv4di:
+ case X86::BI__builtin_ia32_scatterdiv4sf:
+ case X86::BI__builtin_ia32_scatterdiv4si:
+ case X86::BI__builtin_ia32_scatterdiv8sf:
+ case X86::BI__builtin_ia32_scatterdiv8si:
+ case X86::BI__builtin_ia32_scattersiv2df:
+ case X86::BI__builtin_ia32_scattersiv2di:
+ case X86::BI__builtin_ia32_scattersiv4df:
+ case X86::BI__builtin_ia32_scattersiv4di:
+ case X86::BI__builtin_ia32_scattersiv4sf:
+ case X86::BI__builtin_ia32_scattersiv4si:
+ case X86::BI__builtin_ia32_scattersiv8sf:
+ case X86::BI__builtin_ia32_scattersiv8si:
+ case X86::BI__builtin_ia32_scattersiv8df:
+ case X86::BI__builtin_ia32_scattersiv16sf:
+ case X86::BI__builtin_ia32_scatterdiv8df:
+ case X86::BI__builtin_ia32_scatterdiv16sf:
+ case X86::BI__builtin_ia32_scattersiv8di:
+ case X86::BI__builtin_ia32_scattersiv16si:
+ case X86::BI__builtin_ia32_scatterdiv8di:
+ case X86::BI__builtin_ia32_scatterdiv16si:
+ ArgNum = 4;
+ break;
+ }
+
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
+ return false;
+
+ return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
+ << Arg->getSourceRange();
+}
+
+enum { TileRegLow = 0, TileRegHigh = 7 };
+
+bool SemaX86::CheckBuiltinTileArgumentsRange(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ for (int ArgNum : ArgNums) {
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, TileRegLow,
+ TileRegHigh))
+ return true;
+ }
+ return false;
+}
+
+bool SemaX86::CheckBuiltinTileDuplicate(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ // Because the max number of tile register is TileRegHigh + 1, so here we use
+ // each bit to represent the usage of them in bitset.
+ std::bitset<TileRegHigh + 1> ArgValues;
+ for (int ArgNum : ArgNums) {
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ continue;
+
+ llvm::APSInt Result;
+ if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+ int ArgExtValue = Result.getExtValue();
+ assert((ArgExtValue >= TileRegLow && ArgExtValue <= TileRegHigh) &&
+ "Incorrect tile register num.");
+ if (ArgValues.test(ArgExtValue))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_x86_builtin_tile_arg_duplicate)
+ << TheCall->getArg(ArgNum)->getSourceRange();
+ ArgValues.set(ArgExtValue);
+ }
+ return false;
+}
+
+bool SemaX86::CheckBuiltinTileRangeAndDuplicate(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ return CheckBuiltinTileArgumentsRange(TheCall, ArgNums) ||
+ CheckBuiltinTileDuplicate(TheCall, ArgNums);
+}
+
+bool SemaX86::CheckBuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
+ switch (BuiltinID) {
+ default:
+ return false;
+ case X86::BI__builtin_ia32_tileloadd64:
+ case X86::BI__builtin_ia32_tileloaddt164:
+ case X86::BI__builtin_ia32_tilestored64:
+ case X86::BI__builtin_ia32_tilezero:
+ return CheckBuiltinTileArgumentsRange(TheCall, 0);
+ case X86::BI__builtin_ia32_tdpbssd:
+ case X86::BI__builtin_ia32_tdpbsud:
+ case X86::BI__builtin_ia32_tdpbusd:
+ case X86::BI__builtin_ia32_tdpbuud:
+ case X86::BI__builtin_ia32_tdpbf16ps:
+ case X86::BI__builtin_ia32_tdpfp16ps:
+ case X86::BI__builtin_ia32_tcmmimfp16ps:
+ case X86::BI__builtin_ia32_tcmmrlfp16ps:
+ return CheckBuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
+ }
+}
+static bool isX86_32Builtin(unsigned BuiltinID) {
+ // These builtins only work on x86-32 targets.
+ switch (BuiltinID) {
+ case X86::BI__builtin_ia32_readeflags_u32:
+ case X86::BI__builtin_ia32_writeeflags_u32:
+ return true;
+ }
+
+ return false;
+}
+
+bool SemaX86::CheckBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
+ // Check for 32-bit only builtins on a 64-bit target.
+ const llvm::Triple &TT = TI.getTriple();
+ if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
+ return Diag(TheCall->getCallee()->getBeginLoc(),
+ diag::err_32_bit_builtin_64_bit_tgt);
+
+ // If the intrinsic has rounding or SAE make sure its valid.
+ if (CheckBuiltinRoundingOrSAE(BuiltinID, TheCall))
+ return true;
+
+ // If the intrinsic has a gather/scatter scale immediate make sure its valid.
+ if (CheckBuiltinGatherScatterScale(BuiltinID, TheCall))
+ return true;
+
+ // If the intrinsic has a tile arguments, make sure they are valid.
+ if (CheckBuiltinTileArguments(BuiltinID, TheCall))
+ return true;
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ int i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default:
+ return false;
+ case X86::BI__builtin_ia32_vec_ext_v2si:
+ case X86::BI__builtin_ia32_vec_ext_v2di:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256:
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_extractf64x4_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ i = 1;
+ l = 0;
+ u = 1;
+ break;
+ case X86::BI__builtin_ia32_vec_set_v2di:
+ case X86::BI__builtin_ia32_vinsertf128_pd256:
+ case X86::BI__builtin_ia32_vinsertf128_ps256:
+ case X86::BI__builtin_ia32_vinsertf128_si256:
+ case X86::BI__builtin_ia32_insert128i256:
+ case X86::BI__builtin_ia32_insertf32x8:
+ case X86::BI__builtin_ia32_inserti32x8:
+ case X86::BI__builtin_ia32_insertf64x4:
+ case X86::BI__builtin_ia32_inserti64x4:
+ case X86::BI__builtin_ia32_insertf64x2_256:
+ case X86::BI__builtin_ia32_inserti64x2_256:
+ case X86::BI__builtin_ia32_insertf32x4_256:
+ case X86::BI__builtin_ia32_inserti32x4_256:
+ i = 2;
+ l = 0;
+ u = 1;
+ break;
+ case X86::BI__builtin_ia32_vpermilpd:
+ case X86::BI__builtin_ia32_vec_ext_v4hi:
+ case X86::BI__builtin_ia32_vec_ext_v4si:
+ case X86::BI__builtin_ia32_vec_ext_v4sf:
+ case X86::BI__builtin_ia32_vec_ext_v4di:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ i = 1;
+ l = 0;
+ u = 3;
+ break;
+ case X86::BI_mm_prefetch:
+ case X86::BI__builtin_ia32_vec_ext_v8hi:
+ case X86::BI__builtin_ia32_vec_ext_v8si:
+ i = 1;
+ l = 0;
+ u = 7;
+ break;
+ case X86::BI__builtin_ia32_sha1rnds4:
+ case X86::BI__builtin_ia32_blendpd:
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_vec_set_v4hi:
+ case X86::BI__builtin_ia32_vec_set_v4si:
+ case X86::BI__builtin_ia32_vec_set_v4di:
+ case X86::BI__builtin_ia32_shuf_f32x4_256:
+ case X86::BI__builtin_ia32_shuf_f64x2_256:
+ case X86::BI__builtin_ia32_shuf_i32x4_256:
+ case X86::BI__builtin_ia32_shuf_i64x2_256:
+ case X86::BI__builtin_ia32_insertf64x2_512:
+ case X86::BI__builtin_ia32_inserti64x2_512:
+ case X86::BI__builtin_ia32_insertf32x4:
+ case X86::BI__builtin_ia32_inserti32x4:
+ i = 2;
+ l = 0;
+ u = 3;
+ break;
+ case X86::BI__builtin_ia32_vpermil2pd:
+ case X86::BI__builtin_ia32_vpermil2pd256:
+ case X86::BI__builtin_ia32_vpermil2ps:
+ case X86::BI__builtin_ia32_vpermil2ps256:
+ i = 3;
+ l = 0;
+ u = 3;
+ break;
+ case X86::BI__builtin_ia32_cmpb128_mask:
+ case X86::BI__builtin_ia32_cmpw128_mask:
+ case X86::BI__builtin_ia32_cmpd128_mask:
+ case X86::BI__builtin_ia32_cmpq128_mask:
+ case X86::BI__builtin_ia32_cmpb256_mask:
+ case X86::BI__builtin_ia32_cmpw256_mask:
+ case X86::BI__builtin_ia32_cmpd256_mask:
+ case X86::BI__builtin_ia32_cmpq256_mask:
+ case X86::BI__builtin_ia32_cmpb512_mask:
+ case X86::BI__builtin_ia32_cmpw512_mask:
+ case X86::BI__builtin_ia32_cmpd512_mask:
+ case X86::BI__builtin_ia32_cmpq512_mask:
+ case X86::BI__builtin_ia32_ucmpb128_mask:
+ case X86::BI__builtin_ia32_ucmpw128_mask:
+ case X86::BI__builtin_ia32_ucmpd128_mask:
+ case X86::BI__builtin_ia32_ucmpq128_mask:
+ case X86::BI__builtin_ia32_ucmpb256_mask:
+ case X86::BI__builtin_ia32_ucmpw256_mask:
+ case X86::BI__builtin_ia32_ucmpd256_mask:
+ case X86::BI__builtin_ia32_ucmpq256_mask:
+ case X86::BI__builtin_ia32_ucmpb512_mask:
+ case X86::BI__builtin_ia32_ucmpw512_mask:
+ case X86::BI__builtin_ia32_ucmpd512_mask:
+ case X86::BI__builtin_ia32_ucmpq512_mask:
+ case X86::BI__builtin_ia32_vpcomub:
+ case X86::BI__builtin_ia32_vpcomuw:
+ case X86::BI__builtin_ia32_vpcomud:
+ case X86::BI__builtin_ia32_vpcomuq:
+ case X86::BI__builtin_ia32_vpcomb:
+ case X86::BI__builtin_ia32_vpcomw:
+ case X86::BI__builtin_ia32_vpcomd:
+ case X86::BI__builtin_ia32_vpcomq:
+ case X86::BI__builtin_ia32_vec_set_v8hi:
+ case X86::BI__builtin_ia32_vec_set_v8si:
+ i = 2;
+ l = 0;
+ u = 7;
+ break;
+ case X86::BI__builtin_ia32_vpermilpd256:
+ case X86::BI__builtin_ia32_roundps:
+ case X86::BI__builtin_ia32_roundpd:
+ case X86::BI__builtin_ia32_roundps256:
+ case X86::BI__builtin_ia32_roundpd256:
+ case X86::BI__builtin_ia32_getmantpd128_mask:
+ case X86::BI__builtin_ia32_getmantpd256_mask:
+ case X86::BI__builtin_ia32_getmantps128_mask:
+ case X86::BI__builtin_ia32_getmantps256_mask:
+ case X86::BI__builtin_ia32_getmantpd512_mask:
+ case X86::BI__builtin_ia32_getmantps512_mask:
+ case X86::BI__builtin_ia32_getmantph128_mask:
+ case X86::BI__builtin_ia32_getmantph256_mask:
+ case X86::BI__builtin_ia32_getmantph512_mask:
+ case X86::BI__builtin_ia32_vec_ext_v16qi:
+ case X86::BI__builtin_ia32_vec_ext_v16hi:
+ i = 1;
+ l = 0;
+ u = 15;
+ break;
+ case X86::BI__builtin_ia32_pblendd128:
+ case X86::BI__builtin_ia32_blendps:
+ case X86::BI__builtin_ia32_blendpd256:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_roundss:
+ case X86::BI__builtin_ia32_roundsd:
+ case X86::BI__builtin_ia32_rangepd128_mask:
+ case X86::BI__builtin_ia32_rangepd256_mask:
+ case X86::BI__builtin_ia32_rangepd512_mask:
+ case X86::BI__builtin_ia32_rangeps128_mask:
+ case X86::BI__builtin_ia32_rangeps256_mask:
+ case X86::BI__builtin_ia32_rangeps512_mask:
+ case X86::BI__builtin_ia32_getmantsd_round_mask:
+ case X86::BI__builtin_ia32_getmantss_round_mask:
+ case X86::BI__builtin_ia32_getmantsh_round_mask:
+ case X86::BI__builtin_ia32_vec_set_v16qi:
+ case X86::BI__builtin_ia32_vec_set_v16hi:
+ i = 2;
+ l = 0;
+ u = 15;
+ break;
+ case X86::BI__builtin_ia32_vec_ext_v32qi:
+ i = 1;
+ l = 0;
+ u = 31;
+ break;
+ case X86::BI__builtin_ia32_cmpps:
+ case X86::BI__builtin_ia32_cmpss:
+ case X86::BI__builtin_ia32_cmppd:
+ case X86::BI__builtin_ia32_cmpsd:
+ case X86::BI__builtin_ia32_cmpps256:
+ case X86::BI__builtin_ia32_cmppd256:
+ case X86::BI__builtin_ia32_cmpps128_mask:
+ case X86::BI__builtin_ia32_cmppd128_mask:
+ case X86::BI__builtin_ia32_cmpps256_mask:
+ case X86::BI__builtin_ia32_cmppd256_mask:
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ case X86::BI__builtin_ia32_cmpsd_mask:
+ case X86::BI__builtin_ia32_cmpss_mask:
+ case X86::BI__builtin_ia32_vec_set_v32qi:
+ i = 2;
+ l = 0;
+ u = 31;
+ break;
+ case X86::BI__builtin_ia32_permdf256:
+ case X86::BI__builtin_ia32_permdi256:
+ case X86::BI__builtin_ia32_permdf512:
+ case X86::BI__builtin_ia32_permdi512:
+ case X86::BI__builtin_ia32_vpermilps:
+ case X86::BI__builtin_ia32_vpermilps256:
+ case X86::BI__builtin_ia32_vpermilpd512:
+ case X86::BI__builtin_ia32_vpermilps512:
+ case X86::BI__builtin_ia32_pshufd:
+ case X86::BI__builtin_ia32_pshufd256:
+ case X86::BI__builtin_ia32_pshufd512:
+ case X86::BI__builtin_ia32_pshufhw:
+ case X86::BI__builtin_ia32_pshufhw256:
+ case X86::BI__builtin_ia32_pshufhw512:
+ case X86::BI__builtin_ia32_pshuflw:
+ case X86::BI__builtin_ia32_pshuflw256:
+ case X86::BI__builtin_ia32_pshuflw512:
+ case X86::BI__builtin_ia32_vcvtps2ph:
+ case X86::BI__builtin_ia32_vcvtps2ph_mask:
+ case X86::BI__builtin_ia32_vcvtps2ph256:
+ case X86::BI__builtin_ia32_vcvtps2ph256_mask:
+ case X86::BI__builtin_ia32_vcvtps2ph512_mask:
+ case X86::BI__builtin_ia32_rndscaleps_128_mask:
+ case X86::BI__builtin_ia32_rndscalepd_128_mask:
+ case X86::BI__builtin_ia32_rndscaleps_256_mask:
+ case X86::BI__builtin_ia32_rndscalepd_256_mask:
+ case X86::BI__builtin_ia32_rndscaleps_mask:
+ case X86::BI__builtin_ia32_rndscalepd_mask:
+ case X86::BI__builtin_ia32_rndscaleph_mask:
+ case X86::BI__builtin_ia32_reducepd128_mask:
+ case X86::BI__builtin_ia32_reducepd256_mask:
+ case X86::BI__builtin_ia32_reducepd512_mask:
+ case X86::BI__builtin_ia32_reduceps128_mask:
+ case X86::BI__builtin_ia32_reduceps256_mask:
+ case X86::BI__builtin_ia32_reduceps512_mask:
+ case X86::BI__builtin_ia32_reduceph128_mask:
+ case X86::BI__builtin_ia32_reduceph256_mask:
+ case X86::BI__builtin_ia32_reduceph512_mask:
+ case X86::BI__builtin_ia32_prold512:
+ case X86::BI__builtin_ia32_prolq512:
+ case X86::BI__builtin_ia32_prold128:
+ case X86::BI__builtin_ia32_prold256:
+ case X86::BI__builtin_ia32_prolq128:
+ case X86::BI__builtin_ia32_prolq256:
+ case X86::BI__builtin_ia32_prord512:
+ case X86::BI__builtin_ia32_prorq512:
+ case X86::BI__builtin_ia32_prord128:
+ case X86::BI__builtin_ia32_prord256:
+ case X86::BI__builtin_ia32_prorq128:
+ case X86::BI__builtin_ia32_prorq256:
+ case X86::BI__builtin_ia32_fpclasspd128_mask:
+ case X86::BI__builtin_ia32_fpclasspd256_mask:
+ case X86::BI__builtin_ia32_fpclassps128_mask:
+ case X86::BI__builtin_ia32_fpclassps256_mask:
+ case X86::BI__builtin_ia32_fpclassps512_mask:
+ case X86::BI__builtin_ia32_fpclasspd512_mask:
+ case X86::BI__builtin_ia32_fpclassph128_mask:
+ case X86::BI__builtin_ia32_fpclassph256_mask:
+ case X86::BI__builtin_ia32_fpclassph512_mask:
+ case X86::BI__builtin_ia32_fpclasssd_mask:
+ case X86::BI__builtin_ia32_fpclassss_mask:
+ case X86::BI__builtin_ia32_fpclasssh_mask:
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift:
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift:
+ case X86::BI__builtin_ia32_kshiftliqi:
+ case X86::BI__builtin_ia32_kshiftlihi:
+ case X86::BI__builtin_ia32_kshiftlisi:
+ case X86::BI__builtin_ia32_kshiftlidi:
+ case X86::BI__builtin_ia32_kshiftriqi:
+ case X86::BI__builtin_ia32_kshiftrihi:
+ case X86::BI__builtin_ia32_kshiftrisi:
+ case X86::BI__builtin_ia32_kshiftridi:
+ i = 1;
+ l = 0;
+ u = 255;
+ break;
+ case X86::BI__builtin_ia32_vperm2f128_pd256:
+ case X86::BI__builtin_ia32_vperm2f128_ps256:
+ case X86::BI__builtin_ia32_vperm2f128_si256:
+ case X86::BI__builtin_ia32_permti256:
+ case X86::BI__builtin_ia32_pblendw128:
+ case X86::BI__builtin_ia32_pblendw256:
+ case X86::BI__builtin_ia32_blendps256:
+ case X86::BI__builtin_ia32_pblendd256:
+ case X86::BI__builtin_ia32_palignr128:
+ case X86::BI__builtin_ia32_palignr256:
+ case X86::BI__builtin_ia32_palignr512:
+ case X86::BI__builtin_ia32_alignq512:
+ case X86::BI__builtin_ia32_alignd512:
+ case X86::BI__builtin_ia32_alignd128:
+ case X86::BI__builtin_ia32_alignd256:
+ case X86::BI__builtin_ia32_alignq128:
+ case X86::BI__builtin_ia32_alignq256:
+ case X86::BI__builtin_ia32_vcomisd:
+ case X86::BI__builtin_ia32_vcomiss:
+ case X86::BI__builtin_ia32_shuf_f32x4:
+ case X86::BI__builtin_ia32_shuf_f64x2:
+ case X86::BI__builtin_ia32_shuf_i32x4:
+ case X86::BI__builtin_ia32_shuf_i64x2:
+ case X86::BI__builtin_ia32_shufpd512:
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512:
+ case X86::BI__builtin_ia32_dbpsadbw128:
+ case X86::BI__builtin_ia32_dbpsadbw256:
+ case X86::BI__builtin_ia32_dbpsadbw512:
+ case X86::BI__builtin_ia32_vpshldd128:
+ case X86::BI__builtin_ia32_vpshldd256:
+ case X86::BI__builtin_ia32_vpshldd512:
+ case X86::BI__builtin_ia32_vpshldq128:
+ case X86::BI__builtin_ia32_vpshldq256:
+ case X86::BI__builtin_ia32_vpshldq512:
+ case X86::BI__builtin_ia32_vpshldw128:
+ case X86::BI__builtin_ia32_vpshldw256:
+ case X86::BI__builtin_ia32_vpshldw512:
+ case X86::BI__builtin_ia32_vpshrdd128:
+ case X86::BI__builtin_ia32_vpshrdd256:
+ case X86::BI__builtin_ia32_vpshrdd512:
+ case X86::BI__builtin_ia32_vpshrdq128:
+ case X86::BI__builtin_ia32_vpshrdq256:
+ case X86::BI__builtin_ia32_vpshrdq512:
+ case X86::BI__builtin_ia32_vpshrdw128:
+ case X86::BI__builtin_ia32_vpshrdw256:
+ case X86::BI__builtin_ia32_vpshrdw512:
+ i = 2;
+ l = 0;
+ u = 255;
+ break;
+ case X86::BI__builtin_ia32_fixupimmpd512_mask:
+ case X86::BI__builtin_ia32_fixupimmpd512_maskz:
+ case X86::BI__builtin_ia32_fixupimmps512_mask:
+ case X86::BI__builtin_ia32_fixupimmps512_maskz:
+ case X86::BI__builtin_ia32_fixupimmsd_mask:
+ case X86::BI__builtin_ia32_fixupimmsd_maskz:
+ case X86::BI__builtin_ia32_fixupimmss_mask:
+ case X86::BI__builtin_ia32_fixupimmss_maskz:
+ case X86::BI__builtin_ia32_fixupimmpd128_mask:
+ case X86::BI__builtin_ia32_fixupimmpd128_maskz:
+ case X86::BI__builtin_ia32_fixupimmpd256_mask:
+ case X86::BI__builtin_ia32_fixupimmpd256_maskz:
+ case X86::BI__builtin_ia32_fixupimmps128_mask:
+ case X86::BI__builtin_ia32_fixupimmps128_maskz:
+ case X86::BI__builtin_ia32_fixupimmps256_mask:
+ case X86::BI__builtin_ia32_fixupimmps256_maskz:
+ case X86::BI__builtin_ia32_pternlogd512_mask:
+ case X86::BI__builtin_ia32_pternlogd512_maskz:
+ case X86::BI__builtin_ia32_pternlogq512_mask:
+ case X86::BI__builtin_ia32_pternlogq512_maskz:
+ case X86::BI__builtin_ia32_pternlogd128_mask:
+ case X86::BI__builtin_ia32_pternlogd128_maskz:
+ case X86::BI__builtin_ia32_pternlogd256_mask:
+ case X86::BI__builtin_ia32_pternlogd256_maskz:
+ case X86::BI__builtin_ia32_pternlogq128_mask:
+ case X86::BI__builtin_ia32_pternlogq128_maskz:
+ case X86::BI__builtin_ia32_pternlogq256_mask:
+ case X86::BI__builtin_ia32_pternlogq256_maskz:
+ case X86::BI__builtin_ia32_vsm3rnds2:
+ i = 3;
+ l = 0;
+ u = 255;
+ break;
+ case X86::BI__builtin_ia32_reducesd_mask:
+ case X86::BI__builtin_ia32_reducess_mask:
+ case X86::BI__builtin_ia32_rndscalesd_round_mask:
+ case X86::BI__builtin_ia32_rndscaless_round_mask:
+ case X86::BI__builtin_ia32_rndscalesh_round_mask:
+ case X86::BI__builtin_ia32_reducesh_mask:
+ i = 4;
+ l = 0;
+ u = 255;
+ break;
+ case X86::BI__builtin_ia32_cmpccxadd32:
+ case X86::BI__builtin_ia32_cmpccxadd64:
+ i = 3;
+ l = 0;
+ u = 15;
+ break;
+ }
+
+ // Note that we don't force a hard error on the range check here, allowing
+ // template-generated or macro-generated dead code to potentially have out-of-
+ // range values. These need to code generate, but don't need to necessarily
+ // make any sense. We use a warning that defaults to an error.
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u,
+ /*RangeIsError*/ false);
+}
+
+void SemaX86::handleAnyInterruptAttr(Decl *D, const ParsedAttr &AL) {
+ // Semantic checks for a function with the 'interrupt' attribute.
+ // a) Must be a function.
+ // b) Must have the 'void' return type.
+ // c) Must take 1 or 2 arguments.
+ // d) The 1st argument must be a pointer.
+ // e) The 2nd argument (if any) must be an unsigned integer.
+ ASTContext &Context = getASTContext();
+
+ if (!isFuncOrMethodForAttrSubject(D) || !hasFunctionProto(D) ||
+ isInstanceMethod(D) ||
+ CXXMethodDecl::isStaticOverloadedOperator(
+ cast<NamedDecl>(D)->getDeclName().getCXXOverloadedOperator())) {
+ Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute()
+ << ExpectedFunctionWithProtoType;
+ return;
+ }
+ // Interrupt handler must have void return type.
+ if (!getFunctionOrMethodResultType(D)->isVoidType()) {
+ Diag(getFunctionOrMethodResultSourceRange(D).getBegin(),
+ diag::err_anyx86_interrupt_attribute)
+ << (SemaRef.Context.getTargetInfo().getTriple().getArch() ==
+ llvm::Triple::x86
+ ? 0
+ : 1)
+ << 0;
+ return;
+ }
+ // Interrupt handler must have 1 or 2 parameters.
+ unsigned NumParams = getFunctionOrMethodNumParams(D);
+ if (NumParams < 1 || NumParams > 2) {
+ Diag(D->getBeginLoc(), diag::err_anyx86_interrupt_attribute)
+ << (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86
+ ? 0
+ : 1)
+ << 1;
+ return;
+ }
+ // The first argument must be a pointer.
+ if (!getFunctionOrMethodParamType(D, 0)->isPointerType()) {
+ Diag(getFunctionOrMethodParamRange(D, 0).getBegin(),
+ diag::err_anyx86_interrupt_attribute)
+ << (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86
+ ? 0
+ : 1)
+ << 2;
+ return;
+ }
+ // The second argument, if present, must be an unsigned integer.
+ unsigned TypeSize =
+ Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86_64
+ ? 64
+ : 32;
+ if (NumParams == 2 &&
+ (!getFunctionOrMethodParamType(D, 1)->isUnsignedIntegerType() ||
+ Context.getTypeSize(getFunctionOrMethodParamType(D, 1)) != TypeSize)) {
+ Diag(getFunctionOrMethodParamRange(D, 1).getBegin(),
+ diag::err_anyx86_interrupt_attribute)
+ << (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86
+ ? 0
+ : 1)
+ << 3 << Context.getIntTypeForBitwidth(TypeSize, /*Signed=*/false);
+ return;
+ }
+ D->addAttr(::new (Context) AnyX86InterruptAttr(Context, AL));
+ D->addAttr(UsedAttr::CreateImplicit(Context));
+}
+
+void SemaX86::handleForceAlignArgPointerAttr(Decl *D, const ParsedAttr &AL) {
+ // If we try to apply it to a function pointer, don't warn, but don't
+ // do anything, either. It doesn't matter anyway, because there's nothing
+ // special about calling a force_align_arg_pointer function.
+ const auto *VD = dyn_cast<ValueDecl>(D);
+ if (VD && VD->getType()->isFunctionPointerType())
+ return;
+ // Also don't warn on function pointer typedefs.
+ const auto *TD = dyn_cast<TypedefNameDecl>(D);
+ if (TD && (TD->getUnderlyingType()->isFunctionPointerType() ||
+ TD->getUnderlyingType()->isFunctionType()))
+ return;
+ // Attribute can only be applied to function types.
+ if (!isa<FunctionDecl>(D)) {
+ Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
+ return;
+ }
+
+ D->addAttr(::new (getASTContext())
+ X86ForceAlignArgPointerAttr(getASTContext(), AL));
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
index 2f012cade6b9..0ae393524fe0 100644
--- a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
@@ -27,6 +27,7 @@
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/Basic/DiagnosticParse.h"
#include "clang/Basic/OpenMPKinds.h"
@@ -38,6 +39,11 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaOpenACC.h"
+#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPseudoObject.h"
+#include "clang/Sema/SemaSYCL.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
@@ -766,6 +772,12 @@ public:
/// the body.
StmtResult SkipLambdaBody(LambdaExpr *E, Stmt *Body);
+ CXXRecordDecl::LambdaDependencyKind
+ ComputeLambdaDependency(LambdaScopeInfo *LSI) {
+ return static_cast<CXXRecordDecl::LambdaDependencyKind>(
+ LSI->Lambda->getLambdaDependencyKind());
+ }
+
QualType TransformReferenceType(TypeLocBuilder &TLB, ReferenceTypeLoc TL);
StmtResult TransformCompoundStmt(CompoundStmt *S, bool IsStmtExpr);
@@ -786,6 +798,9 @@ public:
ParenExpr *PE, DependentScopeDeclRefExpr *DRE, bool IsAddressOfOperand,
TypeSourceInfo **RecoveryTSI);
+ ExprResult TransformUnresolvedLookupExpr(UnresolvedLookupExpr *E,
+ bool IsAddressOfOperand);
+
StmtResult TransformOMPExecutableDirective(OMPExecutableDirective *S);
// FIXME: We use LLVM_ATTRIBUTE_NOINLINE because inlining causes a ridiculous
@@ -1047,6 +1062,12 @@ public:
/// Subclasses may override this routine to provide different behavior.
QualType RebuildDecltypeType(Expr *Underlying, SourceLocation Loc);
+ QualType RebuildPackIndexingType(QualType Pattern, Expr *IndexExpr,
+ SourceLocation Loc,
+ SourceLocation EllipsisLoc,
+ bool FullySubstituted,
+ ArrayRef<QualType> Expansions = {});
+
/// Build a new C++11 auto type.
///
/// By default, builds a new AutoType with the given deduced type.
@@ -1590,8 +1611,8 @@ public:
Stmt *TryBody,
MultiStmtArg CatchStmts,
Stmt *Finally) {
- return getSema().ActOnObjCAtTryStmt(AtLoc, TryBody, CatchStmts,
- Finally);
+ return getSema().ObjC().ActOnObjCAtTryStmt(AtLoc, TryBody, CatchStmts,
+ Finally);
}
/// Rebuild an Objective-C exception declaration.
@@ -1600,10 +1621,9 @@ public:
/// Subclasses may override this routine to provide different behavior.
VarDecl *RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
TypeSourceInfo *TInfo, QualType T) {
- return getSema().BuildObjCExceptionDecl(TInfo, T,
- ExceptionDecl->getInnerLocStart(),
- ExceptionDecl->getLocation(),
- ExceptionDecl->getIdentifier());
+ return getSema().ObjC().BuildObjCExceptionDecl(
+ TInfo, T, ExceptionDecl->getInnerLocStart(),
+ ExceptionDecl->getLocation(), ExceptionDecl->getIdentifier());
}
/// Build a new Objective-C \@catch statement.
@@ -1614,8 +1634,7 @@ public:
SourceLocation RParenLoc,
VarDecl *Var,
Stmt *Body) {
- return getSema().ActOnObjCAtCatchStmt(AtLoc, RParenLoc,
- Var, Body);
+ return getSema().ObjC().ActOnObjCAtCatchStmt(AtLoc, RParenLoc, Var, Body);
}
/// Build a new Objective-C \@finally statement.
@@ -1624,7 +1643,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildObjCAtFinallyStmt(SourceLocation AtLoc,
Stmt *Body) {
- return getSema().ActOnObjCAtFinallyStmt(AtLoc, Body);
+ return getSema().ObjC().ActOnObjCAtFinallyStmt(AtLoc, Body);
}
/// Build a new Objective-C \@throw statement.
@@ -1633,7 +1652,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildObjCAtThrowStmt(SourceLocation AtLoc,
Expr *Operand) {
- return getSema().BuildObjCAtThrowStmt(AtLoc, Operand);
+ return getSema().ObjC().BuildObjCAtThrowStmt(AtLoc, Operand);
}
/// Build a new OpenMP Canonical loop.
@@ -1641,7 +1660,7 @@ public:
/// Ensures that the outermost loop in @p LoopStmt is wrapped by a
/// OMPCanonicalLoop.
StmtResult RebuildOMPCanonicalLoop(Stmt *LoopStmt) {
- return getSema().ActOnOpenMPCanonicalLoop(LoopStmt);
+ return getSema().OpenMP().ActOnOpenMPCanonicalLoop(LoopStmt);
}
/// Build a new OpenMP executable directive.
@@ -1654,7 +1673,7 @@ public:
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind PrevMappedDirective = OMPD_unknown) {
- return getSema().ActOnOpenMPExecutableDirective(
+ return getSema().OpenMP().ActOnOpenMPExecutableDirective(
Kind, DirName, CancelRegion, Clauses, AStmt, StartLoc, EndLoc,
PrevMappedDirective);
}
@@ -1669,9 +1688,9 @@ public:
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPIfClause(NameModifier, Condition, StartLoc,
- LParenLoc, NameModifierLoc, ColonLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPIfClause(
+ NameModifier, Condition, StartLoc, LParenLoc, NameModifierLoc, ColonLoc,
+ EndLoc);
}
/// Build a new OpenMP 'final' clause.
@@ -1681,8 +1700,8 @@ public:
OMPClause *RebuildOMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPFinalClause(Condition, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPFinalClause(Condition, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'num_threads' clause.
@@ -1693,8 +1712,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPNumThreadsClause(NumThreads, StartLoc,
- LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPNumThreadsClause(NumThreads, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'safelen' clause.
@@ -1704,7 +1723,8 @@ public:
OMPClause *RebuildOMPSafelenClause(Expr *Len, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPSafelenClause(Len, StartLoc, LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPSafelenClause(Len, StartLoc, LParenLoc,
+ EndLoc);
}
/// Build a new OpenMP 'simdlen' clause.
@@ -1714,28 +1734,30 @@ public:
OMPClause *RebuildOMPSimdlenClause(Expr *Len, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPSimdlenClause(Len, StartLoc, LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPSimdlenClause(Len, StartLoc, LParenLoc,
+ EndLoc);
}
OMPClause *RebuildOMPSizesClause(ArrayRef<Expr *> Sizes,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPSizesClause(Sizes, StartLoc, LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPSizesClause(Sizes, StartLoc, LParenLoc,
+ EndLoc);
}
/// Build a new OpenMP 'full' clause.
OMPClause *RebuildOMPFullClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPFullClause(StartLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPFullClause(StartLoc, EndLoc);
}
/// Build a new OpenMP 'partial' clause.
OMPClause *RebuildOMPPartialClause(Expr *Factor, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPPartialClause(Factor, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPPartialClause(Factor, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'allocator' clause.
@@ -1745,7 +1767,8 @@ public:
OMPClause *RebuildOMPAllocatorClause(Expr *A, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPAllocatorClause(A, StartLoc, LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPAllocatorClause(A, StartLoc, LParenLoc,
+ EndLoc);
}
/// Build a new OpenMP 'collapse' clause.
@@ -1755,8 +1778,8 @@ public:
OMPClause *RebuildOMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPCollapseClause(Num, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPCollapseClause(Num, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'default' clause.
@@ -1767,8 +1790,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDefaultClause(Kind, KindKwLoc,
- StartLoc, LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPDefaultClause(
+ Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
/// Build a new OpenMP 'proc_bind' clause.
@@ -1780,8 +1803,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPProcBindClause(Kind, KindKwLoc,
- StartLoc, LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPProcBindClause(
+ Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
/// Build a new OpenMP 'schedule' clause.
@@ -1793,7 +1816,7 @@ public:
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc) {
- return getSema().ActOnOpenMPScheduleClause(
+ return getSema().OpenMP().ActOnOpenMPScheduleClause(
M1, M2, Kind, ChunkSize, StartLoc, LParenLoc, M1Loc, M2Loc, KindLoc,
CommaLoc, EndLoc);
}
@@ -1805,7 +1828,8 @@ public:
OMPClause *RebuildOMPOrderedClause(SourceLocation StartLoc,
SourceLocation EndLoc,
SourceLocation LParenLoc, Expr *Num) {
- return getSema().ActOnOpenMPOrderedClause(StartLoc, EndLoc, LParenLoc, Num);
+ return getSema().OpenMP().ActOnOpenMPOrderedClause(StartLoc, EndLoc,
+ LParenLoc, Num);
}
/// Build a new OpenMP 'private' clause.
@@ -1816,8 +1840,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPPrivateClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPPrivateClause(VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'firstprivate' clause.
@@ -1828,8 +1852,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPFirstprivateClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPFirstprivateClause(VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'lastprivate' clause.
@@ -1843,7 +1867,7 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPLastprivateClause(
+ return getSema().OpenMP().ActOnOpenMPLastprivateClause(
VarList, LPKind, LPKindLoc, ColonLoc, StartLoc, LParenLoc, EndLoc);
}
@@ -1855,8 +1879,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPSharedClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPSharedClause(VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'reduction' clause.
@@ -1870,7 +1894,7 @@ public:
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions) {
- return getSema().ActOnOpenMPReductionClause(
+ return getSema().OpenMP().ActOnOpenMPReductionClause(
VarList, Modifier, StartLoc, LParenLoc, ModifierLoc, ColonLoc, EndLoc,
ReductionIdScopeSpec, ReductionId, UnresolvedReductions);
}
@@ -1885,7 +1909,7 @@ public:
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions) {
- return getSema().ActOnOpenMPTaskReductionClause(
+ return getSema().OpenMP().ActOnOpenMPTaskReductionClause(
VarList, StartLoc, LParenLoc, ColonLoc, EndLoc, ReductionIdScopeSpec,
ReductionId, UnresolvedReductions);
}
@@ -1901,7 +1925,7 @@ public:
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions) {
- return getSema().ActOnOpenMPInReductionClause(
+ return getSema().OpenMP().ActOnOpenMPInReductionClause(
VarList, StartLoc, LParenLoc, ColonLoc, EndLoc, ReductionIdScopeSpec,
ReductionId, UnresolvedReductions);
}
@@ -1915,9 +1939,9 @@ public:
SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation StepModifierLoc, SourceLocation EndLoc) {
- return getSema().ActOnOpenMPLinearClause(VarList, Step, StartLoc, LParenLoc,
- Modifier, ModifierLoc, ColonLoc,
- StepModifierLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPLinearClause(
+ VarList, Step, StartLoc, LParenLoc, Modifier, ModifierLoc, ColonLoc,
+ StepModifierLoc, EndLoc);
}
/// Build a new OpenMP 'aligned' clause.
@@ -1929,8 +1953,8 @@ public:
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPAlignedClause(VarList, Alignment, StartLoc,
- LParenLoc, ColonLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPAlignedClause(
+ VarList, Alignment, StartLoc, LParenLoc, ColonLoc, EndLoc);
}
/// Build a new OpenMP 'copyin' clause.
@@ -1941,8 +1965,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPCopyinClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPCopyinClause(VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'copyprivate' clause.
@@ -1953,8 +1977,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPCopyprivateClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPCopyprivateClause(VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'flush' pseudo clause.
@@ -1965,8 +1989,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPFlushClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPFlushClause(VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'depobj' pseudo clause.
@@ -1976,8 +2000,8 @@ public:
OMPClause *RebuildOMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDepobjClause(Depobj, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPDepobjClause(Depobj, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'depend' pseudo clause.
@@ -1989,8 +2013,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDependClause(Data, DepModifier, VarList,
- StartLoc, LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPDependClause(
+ Data, DepModifier, VarList, StartLoc, LParenLoc, EndLoc);
}
/// Build a new OpenMP 'device' clause.
@@ -2002,8 +2026,8 @@ public:
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDeviceClause(Modifier, Device, StartLoc,
- LParenLoc, ModifierLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPDeviceClause(
+ Modifier, Device, StartLoc, LParenLoc, ModifierLoc, EndLoc);
}
/// Build a new OpenMP 'map' clause.
@@ -2017,7 +2041,7 @@ public:
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers) {
- return getSema().ActOnOpenMPMapClause(
+ return getSema().OpenMP().ActOnOpenMPMapClause(
IteratorModifier, MapTypeModifiers, MapTypeModifiersLoc,
MapperIdScopeSpec, MapperId, MapType, IsMapTypeImplicit, MapLoc,
ColonLoc, VarList, Locs,
@@ -2033,8 +2057,8 @@ public:
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPAllocateClause(Allocate, VarList, StartLoc,
- LParenLoc, ColonLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPAllocateClause(
+ Allocate, VarList, StartLoc, LParenLoc, ColonLoc, EndLoc);
}
/// Build a new OpenMP 'num_teams' clause.
@@ -2044,8 +2068,8 @@ public:
OMPClause *RebuildOMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPNumTeamsClause(NumTeams, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPNumTeamsClause(NumTeams, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'thread_limit' clause.
@@ -2056,8 +2080,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPThreadLimitClause(ThreadLimit, StartLoc,
- LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPThreadLimitClause(
+ ThreadLimit, StartLoc, LParenLoc, EndLoc);
}
/// Build a new OpenMP 'priority' clause.
@@ -2067,8 +2091,8 @@ public:
OMPClause *RebuildOMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPPriorityClause(Priority, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPPriorityClause(Priority, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'grainsize' clause.
@@ -2080,8 +2104,8 @@ public:
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPGrainsizeClause(Modifier, Device, StartLoc,
- LParenLoc, ModifierLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPGrainsizeClause(
+ Modifier, Device, StartLoc, LParenLoc, ModifierLoc, EndLoc);
}
/// Build a new OpenMP 'num_tasks' clause.
@@ -2093,8 +2117,8 @@ public:
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPNumTasksClause(Modifier, NumTasks, StartLoc,
- LParenLoc, ModifierLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPNumTasksClause(
+ Modifier, NumTasks, StartLoc, LParenLoc, ModifierLoc, EndLoc);
}
/// Build a new OpenMP 'hint' clause.
@@ -2104,7 +2128,8 @@ public:
OMPClause *RebuildOMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPHintClause(Hint, StartLoc, LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPHintClause(Hint, StartLoc, LParenLoc,
+ EndLoc);
}
/// Build a new OpenMP 'detach' clause.
@@ -2114,7 +2139,8 @@ public:
OMPClause *RebuildOMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDetachClause(Evt, StartLoc, LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPDetachClause(Evt, StartLoc, LParenLoc,
+ EndLoc);
}
/// Build a new OpenMP 'dist_schedule' clause.
@@ -2126,7 +2152,7 @@ public:
Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDistScheduleClause(
+ return getSema().OpenMP().ActOnOpenMPDistScheduleClause(
Kind, ChunkSize, StartLoc, LParenLoc, KindLoc, CommaLoc, EndLoc);
}
@@ -2141,9 +2167,9 @@ public:
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers) {
- return getSema().ActOnOpenMPToClause(MotionModifiers, MotionModifiersLoc,
- MapperIdScopeSpec, MapperId, ColonLoc,
- VarList, Locs, UnresolvedMappers);
+ return getSema().OpenMP().ActOnOpenMPToClause(
+ MotionModifiers, MotionModifiersLoc, MapperIdScopeSpec, MapperId,
+ ColonLoc, VarList, Locs, UnresolvedMappers);
}
/// Build a new OpenMP 'from' clause.
@@ -2157,7 +2183,7 @@ public:
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers) {
- return getSema().ActOnOpenMPFromClause(
+ return getSema().OpenMP().ActOnOpenMPFromClause(
MotionModifiers, MotionModifiersLoc, MapperIdScopeSpec, MapperId,
ColonLoc, VarList, Locs, UnresolvedMappers);
}
@@ -2168,7 +2194,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
OMPClause *RebuildOMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs) {
- return getSema().ActOnOpenMPUseDevicePtrClause(VarList, Locs);
+ return getSema().OpenMP().ActOnOpenMPUseDevicePtrClause(VarList, Locs);
}
/// Build a new OpenMP 'use_device_addr' clause.
@@ -2177,7 +2203,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
OMPClause *RebuildOMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs) {
- return getSema().ActOnOpenMPUseDeviceAddrClause(VarList, Locs);
+ return getSema().OpenMP().ActOnOpenMPUseDeviceAddrClause(VarList, Locs);
}
/// Build a new OpenMP 'is_device_ptr' clause.
@@ -2186,7 +2212,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
OMPClause *RebuildOMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs) {
- return getSema().ActOnOpenMPIsDevicePtrClause(VarList, Locs);
+ return getSema().OpenMP().ActOnOpenMPIsDevicePtrClause(VarList, Locs);
}
/// Build a new OpenMP 'has_device_addr' clause.
@@ -2195,7 +2221,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
OMPClause *RebuildOMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs) {
- return getSema().ActOnOpenMPHasDeviceAddrClause(VarList, Locs);
+ return getSema().OpenMP().ActOnOpenMPHasDeviceAddrClause(VarList, Locs);
}
/// Build a new OpenMP 'defaultmap' clause.
@@ -2209,8 +2235,8 @@ public:
SourceLocation MLoc,
SourceLocation KindLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDefaultmapClause(M, Kind, StartLoc, LParenLoc,
- MLoc, KindLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPDefaultmapClause(
+ M, Kind, StartLoc, LParenLoc, MLoc, KindLoc, EndLoc);
}
/// Build a new OpenMP 'nontemporal' clause.
@@ -2221,8 +2247,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPNontemporalClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPNontemporalClause(VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'inclusive' clause.
@@ -2233,8 +2259,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPInclusiveClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPInclusiveClause(VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'exclusive' clause.
@@ -2245,8 +2271,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPExclusiveClause(VarList, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPExclusiveClause(VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'uses_allocators' clause.
@@ -2254,10 +2280,10 @@ public:
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
OMPClause *RebuildOMPUsesAllocatorsClause(
- ArrayRef<Sema::UsesAllocatorsData> Data, SourceLocation StartLoc,
+ ArrayRef<SemaOpenMP::UsesAllocatorsData> Data, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc) {
- return getSema().ActOnOpenMPUsesAllocatorClause(StartLoc, LParenLoc, EndLoc,
- Data);
+ return getSema().OpenMP().ActOnOpenMPUsesAllocatorClause(
+ StartLoc, LParenLoc, EndLoc, Data);
}
/// Build a new OpenMP 'affinity' clause.
@@ -2269,8 +2295,8 @@ public:
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators) {
- return getSema().ActOnOpenMPAffinityClause(StartLoc, LParenLoc, ColonLoc,
- EndLoc, Modifier, Locators);
+ return getSema().OpenMP().ActOnOpenMPAffinityClause(
+ StartLoc, LParenLoc, ColonLoc, EndLoc, Modifier, Locators);
}
/// Build a new OpenMP 'order' clause.
@@ -2281,8 +2307,8 @@ public:
OpenMPOrderClauseKind Kind, SourceLocation KindKwLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc,
OpenMPOrderClauseModifier Modifier, SourceLocation ModifierKwLoc) {
- return getSema().ActOnOpenMPOrderClause(Modifier, Kind, StartLoc, LParenLoc,
- ModifierKwLoc, KindKwLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPOrderClause(
+ Modifier, Kind, StartLoc, LParenLoc, ModifierKwLoc, KindKwLoc, EndLoc);
}
/// Build a new OpenMP 'init' clause.
@@ -2294,8 +2320,8 @@ public:
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPInitClause(InteropVar, InteropInfo, StartLoc,
- LParenLoc, VarLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPInitClause(
+ InteropVar, InteropInfo, StartLoc, LParenLoc, VarLoc, EndLoc);
}
/// Build a new OpenMP 'use' clause.
@@ -2305,8 +2331,8 @@ public:
OMPClause *RebuildOMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc, SourceLocation EndLoc) {
- return getSema().ActOnOpenMPUseClause(InteropVar, StartLoc, LParenLoc,
- VarLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPUseClause(InteropVar, StartLoc,
+ LParenLoc, VarLoc, EndLoc);
}
/// Build a new OpenMP 'destroy' clause.
@@ -2317,8 +2343,8 @@ public:
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDestroyClause(InteropVar, StartLoc, LParenLoc,
- VarLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPDestroyClause(
+ InteropVar, StartLoc, LParenLoc, VarLoc, EndLoc);
}
/// Build a new OpenMP 'novariants' clause.
@@ -2329,8 +2355,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPNovariantsClause(Condition, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPNovariantsClause(Condition, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'nocontext' clause.
@@ -2340,8 +2366,8 @@ public:
OMPClause *RebuildOMPNocontextClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPNocontextClause(Condition, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPNocontextClause(Condition, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'filter' clause.
@@ -2351,8 +2377,8 @@ public:
OMPClause *RebuildOMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPFilterClause(ThreadID, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPFilterClause(ThreadID, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'bind' clause.
@@ -2364,8 +2390,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPBindClause(Kind, KindLoc, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPBindClause(Kind, KindLoc, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'ompx_dyn_cgroup_mem' clause.
@@ -2375,8 +2401,8 @@ public:
OMPClause *RebuildOMPXDynCGroupMemClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPXDynCGroupMemClause(Size, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPXDynCGroupMemClause(Size, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'ompx_attribute' clause.
@@ -2387,8 +2413,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPXAttributeClause(Attrs, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPXAttributeClause(Attrs, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'ompx_bare' clause.
@@ -2397,7 +2423,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
OMPClause *RebuildOMPXBareClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPXBareClause(StartLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPXBareClause(StartLoc, EndLoc);
}
/// Build a new OpenMP 'align' clause.
@@ -2407,7 +2433,8 @@ public:
OMPClause *RebuildOMPAlignClause(Expr *A, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPAlignClause(A, StartLoc, LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPAlignClause(A, StartLoc, LParenLoc,
+ EndLoc);
}
/// Build a new OpenMP 'at' clause.
@@ -2418,8 +2445,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPAtClause(Kind, KwLoc, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPAtClause(Kind, KwLoc, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'severity' clause.
@@ -2431,8 +2458,8 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPSeverityClause(Kind, KwLoc, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().OpenMP().ActOnOpenMPSeverityClause(Kind, KwLoc, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'message' clause.
@@ -2442,7 +2469,8 @@ public:
OMPClause *RebuildOMPMessageClause(Expr *MS, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPMessageClause(MS, StartLoc, LParenLoc, EndLoc);
+ return getSema().OpenMP().ActOnOpenMPMessageClause(MS, StartLoc, LParenLoc,
+ EndLoc);
}
/// Build a new OpenMP 'doacross' clause.
@@ -2454,7 +2482,7 @@ public:
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDoacrossClause(
+ return getSema().OpenMP().ActOnOpenMPDoacrossClause(
DepType, DepLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
}
@@ -2464,7 +2492,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *object) {
- return getSema().ActOnObjCAtSynchronizedOperand(atLoc, object);
+ return getSema().ObjC().ActOnObjCAtSynchronizedOperand(atLoc, object);
}
/// Build a new Objective-C \@synchronized statement.
@@ -2473,7 +2501,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *Object, Stmt *Body) {
- return getSema().ActOnObjCAtSynchronizedStmt(AtLoc, Object, Body);
+ return getSema().ObjC().ActOnObjCAtSynchronizedStmt(AtLoc, Object, Body);
}
/// Build a new Objective-C \@autoreleasepool statement.
@@ -2482,7 +2510,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildObjCAutoreleasePoolStmt(SourceLocation AtLoc,
Stmt *Body) {
- return getSema().ActOnObjCAutoreleasePoolStmt(AtLoc, Body);
+ return getSema().ObjC().ActOnObjCAutoreleasePoolStmt(AtLoc, Body);
}
/// Build a new Objective-C fast enumeration statement.
@@ -2494,14 +2522,13 @@ public:
Expr *Collection,
SourceLocation RParenLoc,
Stmt *Body) {
- StmtResult ForEachStmt = getSema().ActOnObjCForCollectionStmt(ForLoc,
- Element,
- Collection,
- RParenLoc);
+ StmtResult ForEachStmt = getSema().ObjC().ActOnObjCForCollectionStmt(
+ ForLoc, Element, Collection, RParenLoc);
if (ForEachStmt.isInvalid())
return StmtError();
- return getSema().FinishObjCForCollectionStmt(ForEachStmt.get(), Body);
+ return getSema().ObjC().FinishObjCForCollectionStmt(ForEachStmt.get(),
+ Body);
}
/// Build a new C++ exception declaration.
@@ -2544,12 +2571,11 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildCXXForRangeStmt(SourceLocation ForLoc,
- SourceLocation CoawaitLoc, Stmt *Init,
- SourceLocation ColonLoc, Stmt *Range,
- Stmt *Begin, Stmt *End, Expr *Cond,
- Expr *Inc, Stmt *LoopVar,
- SourceLocation RParenLoc) {
+ StmtResult RebuildCXXForRangeStmt(
+ SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *Init,
+ SourceLocation ColonLoc, Stmt *Range, Stmt *Begin, Stmt *End, Expr *Cond,
+ Expr *Inc, Stmt *LoopVar, SourceLocation RParenLoc,
+ ArrayRef<MaterializeTemporaryExpr *> LifetimeExtendTemps) {
// If we've just learned that the range is actually an Objective-C
// collection, treat this as an Objective-C fast enumeration loop.
if (DeclStmt *RangeStmt = dyn_cast<DeclStmt>(Range)) {
@@ -2568,16 +2594,16 @@ public:
diag::err_objc_for_range_init_stmt)
<< Init->getSourceRange();
}
- return getSema().ActOnObjCForCollectionStmt(ForLoc, LoopVar,
- RangeExpr, RParenLoc);
+ return getSema().ObjC().ActOnObjCForCollectionStmt(
+ ForLoc, LoopVar, RangeExpr, RParenLoc);
}
}
}
}
- return getSema().BuildCXXForRangeStmt(ForLoc, CoawaitLoc, Init, ColonLoc,
- Range, Begin, End, Cond, Inc, LoopVar,
- RParenLoc, Sema::BFRK_Rebuild);
+ return getSema().BuildCXXForRangeStmt(
+ ForLoc, CoawaitLoc, Init, ColonLoc, Range, Begin, End, Cond, Inc,
+ LoopVar, RParenLoc, Sema::BFRK_Rebuild, LifetimeExtendTemps);
}
/// Build a new C++0x range-based for statement.
@@ -2619,7 +2645,8 @@ public:
SourceLocation LParen,
SourceLocation RParen,
TypeSourceInfo *TSI) {
- return getSema().BuildSYCLUniqueStableNameExpr(OpLoc, LParen, RParen, TSI);
+ return getSema().SYCL().BuildUniqueStableNameExpr(OpLoc, LParen, RParen,
+ TSI);
}
/// Build a new predefined expression.
@@ -2756,15 +2783,23 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildOMPArraySectionExpr(Expr *Base, SourceLocation LBracketLoc,
- Expr *LowerBound,
- SourceLocation ColonLocFirst,
- SourceLocation ColonLocSecond,
- Expr *Length, Expr *Stride,
- SourceLocation RBracketLoc) {
- return getSema().ActOnOMPArraySectionExpr(Base, LBracketLoc, LowerBound,
- ColonLocFirst, ColonLocSecond,
- Length, Stride, RBracketLoc);
+ ExprResult RebuildArraySectionExpr(bool IsOMPArraySection, Expr *Base,
+ SourceLocation LBracketLoc,
+ Expr *LowerBound,
+ SourceLocation ColonLocFirst,
+ SourceLocation ColonLocSecond,
+ Expr *Length, Expr *Stride,
+ SourceLocation RBracketLoc) {
+ if (IsOMPArraySection)
+ return getSema().OpenMP().ActOnOMPArraySectionExpr(
+ Base, LBracketLoc, LowerBound, ColonLocFirst, ColonLocSecond, Length,
+ Stride, RBracketLoc);
+
+ assert(Stride == nullptr && !ColonLocSecond.isValid() &&
+ "Stride/second colon not allowed for OpenACC");
+
+ return getSema().OpenACC().ActOnArraySectionExpr(
+ Base, LBracketLoc, LowerBound, ColonLocFirst, Length, RBracketLoc);
}
/// Build a new array shaping expression.
@@ -2775,19 +2810,20 @@ public:
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> BracketsRanges) {
- return getSema().ActOnOMPArrayShapingExpr(Base, LParenLoc, RParenLoc, Dims,
- BracketsRanges);
+ return getSema().OpenMP().ActOnOMPArrayShapingExpr(
+ Base, LParenLoc, RParenLoc, Dims, BracketsRanges);
}
/// Build a new iterator expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildOMPIteratorExpr(
- SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc,
- ArrayRef<Sema::OMPIteratorData> Data) {
- return getSema().ActOnOMPIteratorExpr(/*Scope=*/nullptr, IteratorKwLoc,
- LLoc, RLoc, Data);
+ ExprResult
+ RebuildOMPIteratorExpr(SourceLocation IteratorKwLoc, SourceLocation LLoc,
+ SourceLocation RLoc,
+ ArrayRef<SemaOpenMP::OMPIteratorData> Data) {
+ return getSema().OpenMP().ActOnOMPIteratorExpr(
+ /*Scope=*/nullptr, IteratorKwLoc, LLoc, RLoc, Data);
}
/// Build a new call expression.
@@ -2839,16 +2875,30 @@ public:
return ExprError();
Base = BaseResult.get();
+ // `TranformMaterializeTemporaryExpr()` removes materialized temporaries
+ // from the AST, so we need to re-insert them if needed (since
+ // `BuildFieldRefereneExpr()` doesn't do this).
+ if (!isArrow && Base->isPRValue()) {
+ BaseResult = getSema().TemporaryMaterializationConversion(Base);
+ if (BaseResult.isInvalid())
+ return ExprError();
+ Base = BaseResult.get();
+ }
+
CXXScopeSpec EmptySS;
return getSema().BuildFieldReferenceExpr(
Base, isArrow, OpLoc, EmptySS, cast<FieldDecl>(Member),
- DeclAccessPair::make(FoundDecl, FoundDecl->getAccess()), MemberNameInfo);
+ DeclAccessPair::make(FoundDecl, FoundDecl->getAccess()),
+ MemberNameInfo);
}
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
Base = BaseResult.get();
+ if (Base->containsErrors())
+ return ExprError();
+
QualType BaseType = Base->getType();
if (isArrow && !BaseType->isPointerType())
@@ -3294,12 +3344,13 @@ public:
/// Build a new C++ "this" expression.
///
- /// By default, builds a new "this" expression without performing any
- /// semantic analysis. Subclasses may override this routine to provide
- /// different behavior.
+ /// By default, performs semantic analysis to build a new "this" expression.
+ /// Subclasses may override this routine to provide different behavior.
ExprResult RebuildCXXThisExpr(SourceLocation ThisLoc,
QualType ThisType,
bool isImplicit) {
+ if (getSema().CheckCXXThisType(ThisLoc, ThisType))
+ return ExprError();
return getSema().BuildCXXThisExpr(ThisLoc, ThisType, isImplicit);
}
@@ -3431,11 +3482,11 @@ public:
SS.Adopt(QualifierLoc);
if (TemplateArgs || TemplateKWLoc.isValid())
- return getSema().BuildQualifiedTemplateIdExpr(SS, TemplateKWLoc, NameInfo,
- TemplateArgs);
+ return getSema().BuildQualifiedTemplateIdExpr(
+ SS, TemplateKWLoc, NameInfo, TemplateArgs, IsAddressOfOperand);
return getSema().BuildQualifiedDeclarationNameExpr(
- SS, NameInfo, IsAddressOfOperand, /*S*/nullptr, RecoveryTSI);
+ SS, NameInfo, IsAddressOfOperand, RecoveryTSI);
}
/// Build a new template-id expression.
@@ -3582,6 +3633,16 @@ public:
RParenLoc, Length, PartialArgs);
}
+ ExprResult RebuildPackIndexingExpr(SourceLocation EllipsisLoc,
+ SourceLocation RSquareLoc,
+ Expr *PackIdExpression, Expr *IndexExpr,
+ ArrayRef<Expr *> ExpandedExprs,
+ bool EmptyPack = false) {
+ return getSema().BuildPackIndexingExpr(PackIdExpression, EllipsisLoc,
+ IndexExpr, RSquareLoc, ExpandedExprs,
+ EmptyPack);
+ }
+
/// Build a new expression representing a call to a source location
/// builtin.
///
@@ -3672,7 +3733,7 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
- return getSema().BuildObjCBoxedExpr(SR, ValueExpr);
+ return getSema().ObjC().BuildObjCBoxedExpr(SR, ValueExpr);
}
/// Build a new Objective-C array literal.
@@ -3681,16 +3742,16 @@ public:
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildObjCArrayLiteral(SourceRange Range,
Expr **Elements, unsigned NumElements) {
- return getSema().BuildObjCArrayLiteral(Range,
- MultiExprArg(Elements, NumElements));
+ return getSema().ObjC().BuildObjCArrayLiteral(
+ Range, MultiExprArg(Elements, NumElements));
}
ExprResult RebuildObjCSubscriptRefExpr(SourceLocation RB,
Expr *Base, Expr *Key,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod) {
- return getSema().BuildObjCSubscriptExpression(RB, Base, Key,
- getterMethod, setterMethod);
+ return getSema().ObjC().BuildObjCSubscriptExpression(
+ RB, Base, Key, getterMethod, setterMethod);
}
/// Build a new Objective-C dictionary literal.
@@ -3699,7 +3760,7 @@ public:
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildObjCDictionaryLiteral(SourceRange Range,
MutableArrayRef<ObjCDictionaryElement> Elements) {
- return getSema().BuildObjCDictionaryLiteral(Range, Elements);
+ return getSema().ObjC().BuildObjCDictionaryLiteral(Range, Elements);
}
/// Build a new Objective-C \@encode expression.
@@ -3709,7 +3770,8 @@ public:
ExprResult RebuildObjCEncodeExpr(SourceLocation AtLoc,
TypeSourceInfo *EncodeTypeInfo,
SourceLocation RParenLoc) {
- return SemaRef.BuildObjCEncodeExpression(AtLoc, EncodeTypeInfo, RParenLoc);
+ return SemaRef.ObjC().BuildObjCEncodeExpression(AtLoc, EncodeTypeInfo,
+ RParenLoc);
}
/// Build a new Objective-C class message.
@@ -3720,11 +3782,10 @@ public:
SourceLocation LBracLoc,
MultiExprArg Args,
SourceLocation RBracLoc) {
- return SemaRef.BuildClassMessage(ReceiverTypeInfo,
- ReceiverTypeInfo->getType(),
- /*SuperLoc=*/SourceLocation(),
- Sel, Method, LBracLoc, SelectorLocs,
- RBracLoc, Args);
+ return SemaRef.ObjC().BuildClassMessage(
+ ReceiverTypeInfo, ReceiverTypeInfo->getType(),
+ /*SuperLoc=*/SourceLocation(), Sel, Method, LBracLoc, SelectorLocs,
+ RBracLoc, Args);
}
/// Build a new Objective-C instance message.
@@ -3735,11 +3796,10 @@ public:
SourceLocation LBracLoc,
MultiExprArg Args,
SourceLocation RBracLoc) {
- return SemaRef.BuildInstanceMessage(Receiver,
- Receiver->getType(),
- /*SuperLoc=*/SourceLocation(),
- Sel, Method, LBracLoc, SelectorLocs,
- RBracLoc, Args);
+ return SemaRef.ObjC().BuildInstanceMessage(Receiver, Receiver->getType(),
+ /*SuperLoc=*/SourceLocation(),
+ Sel, Method, LBracLoc,
+ SelectorLocs, RBracLoc, Args);
}
/// Build a new Objective-C instance/class message to 'super'.
@@ -3751,18 +3811,13 @@ public:
SourceLocation LBracLoc,
MultiExprArg Args,
SourceLocation RBracLoc) {
- return Method->isInstanceMethod() ? SemaRef.BuildInstanceMessage(nullptr,
- SuperType,
- SuperLoc,
- Sel, Method, LBracLoc, SelectorLocs,
- RBracLoc, Args)
- : SemaRef.BuildClassMessage(nullptr,
- SuperType,
- SuperLoc,
- Sel, Method, LBracLoc, SelectorLocs,
- RBracLoc, Args);
-
-
+ return Method->isInstanceMethod()
+ ? SemaRef.ObjC().BuildInstanceMessage(
+ nullptr, SuperType, SuperLoc, Sel, Method, LBracLoc,
+ SelectorLocs, RBracLoc, Args)
+ : SemaRef.ObjC().BuildClassMessage(nullptr, SuperType, SuperLoc,
+ Sel, Method, LBracLoc,
+ SelectorLocs, RBracLoc, Args);
}
/// Build a new Objective-C ivar reference expression.
@@ -3867,15 +3922,14 @@ public:
FPOptionsOverride());
// Type-check the __builtin_shufflevector expression.
- return SemaRef.SemaBuiltinShuffleVector(cast<CallExpr>(TheCall.get()));
+ return SemaRef.BuiltinShuffleVector(cast<CallExpr>(TheCall.get()));
}
/// Build a new convert vector expression.
ExprResult RebuildConvertVectorExpr(SourceLocation BuiltinLoc,
Expr *SrcExpr, TypeSourceInfo *DstTInfo,
SourceLocation RParenLoc) {
- return SemaRef.SemaConvertVectorExpr(SrcExpr, DstTInfo,
- BuiltinLoc, RParenLoc);
+ return SemaRef.ConvertVectorExpr(SrcExpr, DstTInfo, BuiltinLoc, RParenLoc);
}
/// Build a new template argument pack expansion.
@@ -3980,6 +4034,25 @@ public:
return getSema().CreateRecoveryExpr(BeginLoc, EndLoc, SubExprs, Type);
}
+ StmtResult RebuildOpenACCComputeConstruct(OpenACCDirectiveKind K,
+ SourceLocation BeginLoc,
+ SourceLocation DirLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OpenACCClause *> Clauses,
+ StmtResult StrBlock) {
+ return getSema().OpenACC().ActOnEndStmtDirective(K, BeginLoc, DirLoc,
+ EndLoc, Clauses, StrBlock);
+ }
+
+ StmtResult RebuildOpenACCLoopConstruct(SourceLocation BeginLoc,
+ SourceLocation DirLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OpenACCClause *> Clauses,
+ StmtResult Loop) {
+ return getSema().OpenACC().ActOnEndStmtDirective(
+ OpenACCDirectiveKind::Loop, BeginLoc, DirLoc, EndLoc, Clauses, Loop);
+ }
+
private:
TypeLoc TransformTypeInObjectScope(TypeLoc TL,
QualType ObjectType,
@@ -3998,6 +4071,15 @@ private:
QualType TransformDependentNameType(TypeLocBuilder &TLB,
DependentNameTypeLoc TL,
bool DeducibleTSTContext);
+
+ llvm::SmallVector<OpenACCClause *>
+ TransformOpenACCClauseList(OpenACCDirectiveKind DirKind,
+ ArrayRef<const OpenACCClause *> OldClauses);
+
+ OpenACCClause *
+ TransformOpenACCClause(ArrayRef<const OpenACCClause *> ExistingClauses,
+ OpenACCDirectiveKind DirKind,
+ const OpenACCClause *OldClause);
};
template <typename Derived>
@@ -4135,6 +4217,7 @@ ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init,
getSema(), EnterExpressionEvaluationContext::InitList,
Construct->isListInitialization());
+ getSema().keepInLifetimeExtendingContext();
SmallVector<Expr*, 8> NewArgs;
bool ArgChanged = false;
if (getDerived().TransformExprs(Construct->getArgs(), Construct->getNumArgs(),
@@ -4377,7 +4460,8 @@ NestedNameSpecifierLoc TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
SS.Adopt(ETL.getQualifierLoc());
TL = ETL.getNamedTypeLoc();
}
- SS.Extend(SemaRef.Context, /*FIXME:*/ SourceLocation(), TL,
+
+ SS.Extend(SemaRef.Context, TL.getTemplateKeywordLoc(), TL,
Q.getLocalEndLoc());
break;
}
@@ -4533,6 +4617,7 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
ObjectType, AllowInjectedClassName);
}
+ // FIXME: Try to preserve more of the TemplateName.
if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
TemplateDecl *TransTemplate
= cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
@@ -4540,11 +4625,8 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
if (!TransTemplate)
return TemplateName();
- if (!getDerived().AlwaysRebuild() &&
- TransTemplate == Template)
- return Name;
-
- return TemplateName(TransTemplate);
+ return getDerived().RebuildTemplateName(SS, /*TemplateKeyword=*/false,
+ TransTemplate);
}
if (SubstTemplateTemplateParmPackStorage *SubstPack
@@ -4704,8 +4786,6 @@ public:
const TemplateArgumentLoc *operator->() const { return &Arg; }
};
- TemplateArgumentLocInventIterator() { }
-
explicit TemplateArgumentLocInventIterator(TreeTransform<Derived> &Self,
InputIterator Iter)
: Self(Self), Iter(Iter) { }
@@ -5200,6 +5280,23 @@ QualType TreeTransform<Derived>::TransformDecayedType(TypeLocBuilder &TLB,
return Result;
}
+template <typename Derived>
+QualType
+TreeTransform<Derived>::TransformArrayParameterType(TypeLocBuilder &TLB,
+ ArrayParameterTypeLoc TL) {
+ QualType OriginalType = getDerived().TransformType(TLB, TL.getElementLoc());
+ if (OriginalType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ OriginalType != TL.getElementLoc().getType())
+ Result = SemaRef.Context.getArrayParameterType(OriginalType);
+ TLB.push<ArrayParameterTypeLoc>(Result);
+ // Nothing to set for ArrayParameterTypeLoc.
+ return Result;
+}
+
template<typename Derived>
QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
PointerTypeLoc TL) {
@@ -6131,7 +6228,9 @@ QualType TreeTransform<Derived>::TransformFunctionProtoType(
// "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
// and the end of the function-definition, member-declarator, or
// declarator.
- Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, ThisTypeQuals);
+ auto *RD = dyn_cast<CXXRecordDecl>(SemaRef.getCurLexicalContext());
+ Sema::CXXThisScopeRAII ThisScope(
+ SemaRef, !ThisContext && RD ? RD : ThisContext, ThisTypeQuals);
ResultType = getDerived().TransformType(TLB, TL.getReturnLoc());
if (ResultType.isNull())
@@ -6171,6 +6270,55 @@ QualType TreeTransform<Derived>::TransformFunctionProtoType(
EPI.ExtParameterInfos = nullptr;
}
+ // Transform any function effects with unevaluated conditions.
+ // Hold this set in a local for the rest of this function, since EPI
+ // may need to hold a FunctionEffectsRef pointing into it.
+ std::optional<FunctionEffectSet> NewFX;
+ if (ArrayRef FXConds = EPI.FunctionEffects.conditions(); !FXConds.empty()) {
+ NewFX.emplace();
+ EnterExpressionEvaluationContext Unevaluated(
+ getSema(), Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ for (const FunctionEffectWithCondition &PrevEC : EPI.FunctionEffects) {
+ FunctionEffectWithCondition NewEC = PrevEC;
+ if (Expr *CondExpr = PrevEC.Cond.getCondition()) {
+ ExprResult NewExpr = getDerived().TransformExpr(CondExpr);
+ if (NewExpr.isInvalid())
+ return QualType();
+ std::optional<FunctionEffectMode> Mode =
+ SemaRef.ActOnEffectExpression(NewExpr.get(), PrevEC.Effect.name());
+ if (!Mode)
+ return QualType();
+
+ // The condition expression has been transformed, and re-evaluated.
+ // It may or may not have become constant.
+ switch (*Mode) {
+ case FunctionEffectMode::True:
+ NewEC.Cond = {};
+ break;
+ case FunctionEffectMode::False:
+ NewEC.Effect = FunctionEffect(PrevEC.Effect.oppositeKind());
+ NewEC.Cond = {};
+ break;
+ case FunctionEffectMode::Dependent:
+ NewEC.Cond = EffectConditionExpr(NewExpr.get());
+ break;
+ case FunctionEffectMode::None:
+ llvm_unreachable(
+ "FunctionEffectMode::None shouldn't be possible here");
+ }
+ }
+ if (!SemaRef.diagnoseConflictingFunctionEffect(*NewFX, NewEC,
+ TL.getBeginLoc())) {
+ FunctionEffectSet::Conflicts Errs;
+ NewFX->insert(NewEC, Errs);
+ assert(Errs.empty());
+ }
+ }
+ EPI.FunctionEffects = *NewFX;
+ EPIChanged = true;
+ }
+
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || ResultType != T->getReturnType() ||
T->getParamTypes() != llvm::ArrayRef(ParamTypes) || EPIChanged) {
@@ -6406,7 +6554,7 @@ QualType TreeTransform<Derived>::TransformTypeOfExprType(TypeLocBuilder &TLB,
return QualType();
QualType Result = TL.getType();
- TypeOfKind Kind = Result->getAs<TypeOfExprType>()->getKind();
+ TypeOfKind Kind = Result->castAs<TypeOfExprType>()->getKind();
if (getDerived().AlwaysRebuild() || E.get() != TL.getUnderlyingExpr()) {
Result =
getDerived().RebuildTypeOfExprType(E.get(), TL.getTypeofLoc(), Kind);
@@ -6431,7 +6579,7 @@ QualType TreeTransform<Derived>::TransformTypeOfType(TypeLocBuilder &TLB,
return QualType();
QualType Result = TL.getType();
- TypeOfKind Kind = Result->getAs<TypeOfType>()->getKind();
+ TypeOfKind Kind = Result->castAs<TypeOfType>()->getKind();
if (getDerived().AlwaysRebuild() || New_Under_TI != Old_Under_TI) {
Result = getDerived().RebuildTypeOfType(New_Under_TI->getType(), Kind);
if (Result.isNull())
@@ -6480,6 +6628,108 @@ QualType TreeTransform<Derived>::TransformDecltypeType(TypeLocBuilder &TLB,
return Result;
}
+template <typename Derived>
+QualType
+TreeTransform<Derived>::TransformPackIndexingType(TypeLocBuilder &TLB,
+ PackIndexingTypeLoc TL) {
+ // Transform the index
+ ExprResult IndexExpr = getDerived().TransformExpr(TL.getIndexExpr());
+ if (IndexExpr.isInvalid())
+ return QualType();
+ QualType Pattern = TL.getPattern();
+
+ const PackIndexingType *PIT = TL.getTypePtr();
+ SmallVector<QualType, 5> SubtitutedTypes;
+ llvm::ArrayRef<QualType> Types = PIT->getExpansions();
+
+ bool NotYetExpanded = Types.empty();
+ bool FullySubstituted = true;
+
+ if (Types.empty())
+ Types = llvm::ArrayRef<QualType>(&Pattern, 1);
+
+ for (const QualType &T : Types) {
+ if (!T->containsUnexpandedParameterPack()) {
+ QualType Transformed = getDerived().TransformType(T);
+ if (Transformed.isNull())
+ return QualType();
+ SubtitutedTypes.push_back(Transformed);
+ continue;
+ }
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(T, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool ShouldExpand = true;
+ bool RetainExpansion = false;
+ std::optional<unsigned> OrigNumExpansions;
+ std::optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(TL.getEllipsisLoc(), SourceRange(),
+ Unexpanded, ShouldExpand,
+ RetainExpansion, NumExpansions))
+ return QualType();
+ if (!ShouldExpand) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ // FIXME: should we keep TypeLoc for individual expansions in
+ // PackIndexingTypeLoc?
+ TypeSourceInfo *TI =
+ SemaRef.getASTContext().getTrivialTypeSourceInfo(T, TL.getBeginLoc());
+ QualType Pack = getDerived().TransformType(TLB, TI->getTypeLoc());
+ if (Pack.isNull())
+ return QualType();
+ if (NotYetExpanded) {
+ FullySubstituted = false;
+ QualType Out = getDerived().RebuildPackIndexingType(
+ Pack, IndexExpr.get(), SourceLocation(), TL.getEllipsisLoc(),
+ FullySubstituted);
+ if (Out.isNull())
+ return QualType();
+
+ PackIndexingTypeLoc Loc = TLB.push<PackIndexingTypeLoc>(Out);
+ Loc.setEllipsisLoc(TL.getEllipsisLoc());
+ return Out;
+ }
+ SubtitutedTypes.push_back(Pack);
+ continue;
+ }
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ QualType Out = getDerived().TransformType(T);
+ if (Out.isNull())
+ return QualType();
+ SubtitutedTypes.push_back(Out);
+ }
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ FullySubstituted = false;
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+ QualType Out = getDerived().TransformType(T);
+ if (Out.isNull())
+ return QualType();
+ SubtitutedTypes.push_back(Out);
+ }
+ }
+
+ // A pack indexing type can appear in a larger pack expansion,
+ // e.g. `Pack...[pack_of_indexes]...`
+ // so we need to temporarily disable substitution of pack elements
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ QualType Result = getDerived().TransformType(TLB, TL.getPatternLoc());
+
+ QualType Out = getDerived().RebuildPackIndexingType(
+ Result, IndexExpr.get(), SourceLocation(), TL.getEllipsisLoc(),
+ FullySubstituted, SubtitutedTypes);
+ if (Out.isNull())
+ return Out;
+
+ PackIndexingTypeLoc Loc = TLB.push<PackIndexingTypeLoc>(Out);
+ Loc.setEllipsisLoc(TL.getEllipsisLoc());
+ return Out;
+}
+
template<typename Derived>
QualType TreeTransform<Derived>::TransformUnaryTransformType(
TypeLocBuilder &TLB,
@@ -6487,8 +6737,13 @@ QualType TreeTransform<Derived>::TransformUnaryTransformType(
QualType Result = TL.getType();
if (Result->isDependentType()) {
const UnaryTransformType *T = TL.getTypePtr();
- QualType NewBase =
- getDerived().TransformType(TL.getUnderlyingTInfo())->getType();
+
+ TypeSourceInfo *NewBaseTSI =
+ getDerived().TransformType(TL.getUnderlyingTInfo());
+ if (!NewBaseTSI)
+ return QualType();
+ QualType NewBase = NewBaseTSI->getType();
+
Result = getDerived().RebuildUnaryTransformType(NewBase,
T->getUTTKind(),
TL.getKWLoc());
@@ -7088,10 +7343,10 @@ QualType TreeTransform<Derived>::TransformAttributedType(
// FIXME: dependent operand expressions?
if (getDerived().AlwaysRebuild() ||
modifiedType != oldType->getModifiedType()) {
- // TODO: this is really lame; we should really be rebuilding the
- // equivalent type from first principles.
- QualType equivalentType
- = getDerived().TransformType(oldType->getEquivalentType());
+ TypeLocBuilder AuxiliaryTLB;
+ AuxiliaryTLB.reserve(TL.getFullDataSize());
+ QualType equivalentType =
+ getDerived().TransformType(AuxiliaryTLB, TL.getEquivalentTypeLoc());
if (equivalentType.isNull())
return QualType();
@@ -7127,6 +7382,35 @@ QualType TreeTransform<Derived>::TransformAttributedType(TypeLocBuilder &TLB,
}
template <typename Derived>
+QualType TreeTransform<Derived>::TransformCountAttributedType(
+ TypeLocBuilder &TLB, CountAttributedTypeLoc TL) {
+ const CountAttributedType *OldTy = TL.getTypePtr();
+ QualType InnerTy = getDerived().TransformType(TLB, TL.getInnerLoc());
+ if (InnerTy.isNull())
+ return QualType();
+
+ Expr *OldCount = TL.getCountExpr();
+ Expr *NewCount = nullptr;
+ if (OldCount) {
+ ExprResult CountResult = getDerived().TransformExpr(OldCount);
+ if (CountResult.isInvalid())
+ return QualType();
+ NewCount = CountResult.get();
+ }
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || InnerTy != OldTy->desugar() ||
+ OldCount != NewCount) {
+ // Currently, CountAttributedType can only wrap incomplete array types.
+ Result = SemaRef.BuildCountAttributedArrayOrPointerType(
+ InnerTy, NewCount, OldTy->isCountInBytes(), OldTy->isOrNull());
+ }
+
+ TLB.push<CountAttributedTypeLoc>(Result);
+ return Result;
+}
+
+template <typename Derived>
QualType TreeTransform<Derived>::TransformBTFTagAttributedType(
TypeLocBuilder &TLB, BTFTagAttributedTypeLoc TL) {
// The BTFTagAttributedType is available for C only.
@@ -7742,6 +8026,11 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
// Transform the "then" branch.
StmtResult Then;
if (!ConstexprConditionValue || *ConstexprConditionValue) {
+ EnterExpressionEvaluationContext Ctx(
+ getSema(), Sema::ExpressionEvaluationContext::ImmediateFunctionContext,
+ nullptr, Sema::ExpressionEvaluationContextRecord::EK_Other,
+ S->isNonNegatedConsteval());
+
Then = getDerived().TransformStmt(S->getThen());
if (Then.isInvalid())
return StmtError();
@@ -7756,6 +8045,11 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
// Transform the "else" branch.
StmtResult Else;
if (!ConstexprConditionValue || !*ConstexprConditionValue) {
+ EnterExpressionEvaluationContext Ctx(
+ getSema(), Sema::ExpressionEvaluationContext::ImmediateFunctionContext,
+ nullptr, Sema::ExpressionEvaluationContextRecord::EK_Other,
+ S->isNegatedConsteval());
+
Else = getDerived().TransformStmt(S->getElse());
if (Else.isInvalid())
return StmtError();
@@ -7863,7 +8157,7 @@ template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
if (getSema().getLangOpts().OpenMP)
- getSema().startOpenMPLoop();
+ getSema().OpenMP().startOpenMPLoop();
// Transform the initialization statement
StmtResult Init = getDerived().TransformStmt(S->getInit());
@@ -7873,7 +8167,8 @@ TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
// In OpenMP loop region loop control variable must be captured and be
// private. Perform analysis of first part (if any).
if (getSema().getLangOpts().OpenMP && Init.isUsable())
- getSema().ActOnOpenMPLoopInitialization(S->getForLoc(), Init.get());
+ getSema().OpenMP().ActOnOpenMPLoopInitialization(S->getForLoc(),
+ Init.get());
// Transform the condition
Sema::ConditionResult Cond = getDerived().TransformCondition(
@@ -8544,6 +8839,17 @@ StmtResult TreeTransform<Derived>::TransformCXXTryStmt(CXXTryStmt *S) {
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
+ EnterExpressionEvaluationContext ForRangeInitContext(
+ getSema(), Sema::ExpressionEvaluationContext::PotentiallyEvaluated,
+ /*LambdaContextDecl=*/nullptr,
+ Sema::ExpressionEvaluationContextRecord::EK_Other,
+ getSema().getLangOpts().CPlusPlus23);
+
+ // P2718R0 - Lifetime extension in range-based for loops.
+ if (getSema().getLangOpts().CPlusPlus23) {
+ auto &LastRecord = getSema().ExprEvalContexts.back();
+ LastRecord.InLifetimeExtendingContext = true;
+ }
StmtResult Init =
S->getInit() ? getDerived().TransformStmt(S->getInit()) : StmtResult();
if (Init.isInvalid())
@@ -8553,6 +8859,12 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
if (Range.isInvalid())
return StmtError();
+ // Before c++23, ForRangeLifetimeExtendTemps should be empty.
+ assert(getSema().getLangOpts().CPlusPlus23 ||
+ getSema().ExprEvalContexts.back().ForRangeLifetimeExtendTemps.empty());
+ auto ForRangeLifetimeExtendTemps =
+ getSema().ExprEvalContexts.back().ForRangeLifetimeExtendTemps;
+
StmtResult Begin = getDerived().TransformStmt(S->getBeginStmt());
if (Begin.isInvalid())
return StmtError();
@@ -8589,13 +8901,10 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
Cond.get() != S->getCond() ||
Inc.get() != S->getInc() ||
LoopVar.get() != S->getLoopVarStmt()) {
- NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(),
- S->getCoawaitLoc(), Init.get(),
- S->getColonLoc(), Range.get(),
- Begin.get(), End.get(),
- Cond.get(),
- Inc.get(), LoopVar.get(),
- S->getRParenLoc());
+ NewStmt = getDerived().RebuildCXXForRangeStmt(
+ S->getForLoc(), S->getCoawaitLoc(), Init.get(), S->getColonLoc(),
+ Range.get(), Begin.get(), End.get(), Cond.get(), Inc.get(),
+ LoopVar.get(), S->getRParenLoc(), ForRangeLifetimeExtendTemps);
if (NewStmt.isInvalid() && LoopVar.get() != S->getLoopVarStmt()) {
// Might not have attached any initializer to the loop variable.
getSema().ActOnInitializerError(
@@ -8611,13 +8920,10 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
// Body has changed but we didn't rebuild the for-range statement. Rebuild
// it now so we have a new statement to attach the body to.
if (Body.get() != S->getBody() && NewStmt.get() == S) {
- NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(),
- S->getCoawaitLoc(), Init.get(),
- S->getColonLoc(), Range.get(),
- Begin.get(), End.get(),
- Cond.get(),
- Inc.get(), LoopVar.get(),
- S->getRParenLoc());
+ NewStmt = getDerived().RebuildCXXForRangeStmt(
+ S->getForLoc(), S->getCoawaitLoc(), Init.get(), S->getColonLoc(),
+ Range.get(), Begin.get(), End.get(), Cond.get(), Inc.get(),
+ LoopVar.get(), S->getRParenLoc(), ForRangeLifetimeExtendTemps);
if (NewStmt.isInvalid())
return StmtError();
}
@@ -8821,9 +9127,9 @@ StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective(
for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end();
I != E; ++I) {
if (*I) {
- getDerived().getSema().StartOpenMPClause((*I)->getClauseKind());
+ getDerived().getSema().OpenMP().StartOpenMPClause((*I)->getClauseKind());
OMPClause *Clause = getDerived().TransformOMPClause(*I);
- getDerived().getSema().EndOpenMPClause();
+ getDerived().getSema().OpenMP().EndOpenMPClause();
if (Clause)
TClauses.push_back(Clause);
} else {
@@ -8832,8 +9138,9 @@ StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective(
}
StmtResult AssociatedStmt;
if (D->hasAssociatedStmt() && D->getAssociatedStmt()) {
- getDerived().getSema().ActOnOpenMPRegionStart(D->getDirectiveKind(),
- /*CurScope=*/nullptr);
+ getDerived().getSema().OpenMP().ActOnOpenMPRegionStart(
+ D->getDirectiveKind(),
+ /*CurScope=*/nullptr);
StmtResult Body;
{
Sema::CompoundScopeRAII CompoundScope(getSema());
@@ -8851,7 +9158,7 @@ StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective(
Body = getDerived().RebuildOMPCanonicalLoop(Body.get());
}
AssociatedStmt =
- getDerived().getSema().ActOnOpenMPRegionEnd(Body, TClauses);
+ getDerived().getSema().OpenMP().ActOnOpenMPRegionEnd(Body, TClauses);
if (AssociatedStmt.isInvalid()) {
return StmtError();
}
@@ -8892,10 +9199,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPParallelDirective(OMPParallelDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_parallel, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -8903,10 +9210,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPSimdDirective(OMPSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_simd, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -8914,10 +9221,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPTileDirective(OMPTileDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(D->getDirectiveKind(), DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ D->getDirectiveKind(), DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -8925,10 +9232,32 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPUnrollDirective(OMPUnrollDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(D->getDirectiveKind(), DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ D->getDirectiveKind(), DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPReverseDirective(OMPReverseDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ D->getDirectiveKind(), DirName, nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPInterchangeDirective(
+ OMPInterchangeDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ D->getDirectiveKind(), DirName, nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -8936,10 +9265,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPForDirective(OMPForDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_for, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_for, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -8947,10 +9276,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPForSimdDirective(OMPForSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_for_simd, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_for_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -8958,10 +9287,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPSectionsDirective(OMPSectionsDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_sections, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_sections, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -8969,10 +9298,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPSectionDirective(OMPSectionDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_section, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_section, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -8980,10 +9309,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPScopeDirective(OMPScopeDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_scope, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_scope, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -8991,10 +9320,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPSingleDirective(OMPSingleDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_single, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_single, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9002,20 +9331,20 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPMasterDirective(OMPMasterDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_master, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_master, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPCriticalDirective(OMPCriticalDirective *D) {
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_critical, D->getDirectiveName(), nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9023,10 +9352,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelForDirective(
OMPParallelForDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_for, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_parallel_for, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9034,10 +9363,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelForSimdDirective(
OMPParallelForSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_for_simd, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_parallel_for_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9045,10 +9374,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelMasterDirective(
OMPParallelMasterDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_master, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_parallel_master, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9056,10 +9385,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelMaskedDirective(
OMPParallelMaskedDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_masked, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_parallel_masked, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9067,10 +9396,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_sections, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_parallel_sections, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9078,10 +9407,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPTaskDirective(OMPTaskDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_task, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_task, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9089,10 +9418,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTaskyieldDirective(
OMPTaskyieldDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_taskyield, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_taskyield, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9100,10 +9429,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPBarrierDirective(OMPBarrierDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_barrier, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_barrier, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9111,10 +9440,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPTaskwaitDirective(OMPTaskwaitDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_taskwait, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_taskwait, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9122,10 +9451,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPErrorDirective(OMPErrorDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_error, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_error, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9133,10 +9462,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTaskgroupDirective(
OMPTaskgroupDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_taskgroup, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_taskgroup, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9144,10 +9473,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPFlushDirective(OMPFlushDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_flush, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_flush, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9155,10 +9484,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPDepobjDirective(OMPDepobjDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_depobj, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_depobj, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9166,10 +9495,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPScanDirective(OMPScanDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_scan, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_scan, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9177,10 +9506,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPOrderedDirective(OMPOrderedDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_ordered, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_ordered, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9188,10 +9517,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPAtomicDirective(OMPAtomicDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_atomic, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_atomic, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9199,10 +9528,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPTargetDirective(OMPTargetDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_target, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9210,10 +9539,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetDataDirective(
OMPTargetDataDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_data, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_target_data, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9221,10 +9550,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetEnterDataDirective(
OMPTargetEnterDataDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_enter_data, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_target_enter_data, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9232,10 +9561,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetExitDataDirective(
OMPTargetExitDataDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_exit_data, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_target_exit_data, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9243,10 +9572,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetParallelDirective(
OMPTargetParallelDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_parallel, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_target_parallel, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9254,10 +9583,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetParallelForDirective(
OMPTargetParallelForDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_parallel_for, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_target_parallel_for, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9265,10 +9594,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetUpdateDirective(
OMPTargetUpdateDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_update, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_target_update, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9276,10 +9605,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPTeamsDirective(OMPTeamsDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_teams, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_teams, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9287,10 +9616,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPCancellationPointDirective(
OMPCancellationPointDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_cancellation_point, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_cancellation_point, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9298,10 +9627,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPCancelDirective(OMPCancelDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_cancel, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_cancel, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9309,10 +9638,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_taskloop, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_taskloop, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9320,10 +9649,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTaskLoopSimdDirective(
OMPTaskLoopSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_taskloop_simd, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_taskloop_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9331,10 +9660,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPMasterTaskLoopDirective(
OMPMasterTaskLoopDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_master_taskloop, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_master_taskloop, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9342,10 +9671,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPMaskedTaskLoopDirective(
OMPMaskedTaskLoopDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_masked_taskloop, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_masked_taskloop, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9353,10 +9682,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPMasterTaskLoopSimdDirective(
OMPMasterTaskLoopSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_master_taskloop_simd, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_master_taskloop_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9364,10 +9693,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPMaskedTaskLoopSimdDirective(
OMPMaskedTaskLoopSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_masked_taskloop_simd, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_masked_taskloop_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9375,10 +9704,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_parallel_master_taskloop, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9386,10 +9715,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelMaskedTaskLoopDirective(
OMPParallelMaskedTaskLoopDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_parallel_masked_taskloop, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9398,10 +9727,10 @@ StmtResult
TreeTransform<Derived>::TransformOMPParallelMasterTaskLoopSimdDirective(
OMPParallelMasterTaskLoopSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_parallel_master_taskloop_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9410,10 +9739,10 @@ StmtResult
TreeTransform<Derived>::TransformOMPParallelMaskedTaskLoopSimdDirective(
OMPParallelMaskedTaskLoopSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_parallel_masked_taskloop_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9421,10 +9750,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPDistributeDirective(
OMPDistributeDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_distribute, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_distribute, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9432,10 +9761,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPDistributeParallelForDirective(
OMPDistributeParallelForDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_distribute_parallel_for, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9444,10 +9773,10 @@ StmtResult
TreeTransform<Derived>::TransformOMPDistributeParallelForSimdDirective(
OMPDistributeParallelForSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_distribute_parallel_for_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9455,10 +9784,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPDistributeSimdDirective(
OMPDistributeSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_distribute_simd, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_distribute_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9466,10 +9795,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetParallelForSimdDirective(
OMPTargetParallelForSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_target_parallel_for_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9477,10 +9806,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetSimdDirective(
OMPTargetSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_simd, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_target_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9488,10 +9817,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeDirective(
OMPTeamsDistributeDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_teams_distribute, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_teams_distribute, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9499,10 +9828,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeSimdDirective(
OMPTeamsDistributeSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_teams_distribute_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9510,11 +9839,11 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeParallelForSimdDirective(
OMPTeamsDistributeParallelForSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_teams_distribute_parallel_for_simd, DirName, nullptr,
D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9522,10 +9851,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeParallelForDirective(
OMPTeamsDistributeParallelForDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_teams_distribute_parallel_for, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9533,10 +9862,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetTeamsDirective(
OMPTargetTeamsDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_teams, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_target_teams, DirName, nullptr, D->getBeginLoc());
auto Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9544,10 +9873,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetTeamsDistributeDirective(
OMPTargetTeamsDistributeDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_target_teams_distribute, DirName, nullptr, D->getBeginLoc());
auto Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9556,11 +9885,11 @@ StmtResult
TreeTransform<Derived>::TransformOMPTargetTeamsDistributeParallelForDirective(
OMPTargetTeamsDistributeParallelForDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_target_teams_distribute_parallel_for, DirName, nullptr,
D->getBeginLoc());
auto Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9569,11 +9898,11 @@ StmtResult TreeTransform<Derived>::
TransformOMPTargetTeamsDistributeParallelForSimdDirective(
OMPTargetTeamsDistributeParallelForSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_target_teams_distribute_parallel_for_simd, DirName, nullptr,
D->getBeginLoc());
auto Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9582,10 +9911,10 @@ StmtResult
TreeTransform<Derived>::TransformOMPTargetTeamsDistributeSimdDirective(
OMPTargetTeamsDistributeSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
OMPD_target_teams_distribute_simd, DirName, nullptr, D->getBeginLoc());
auto Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9593,10 +9922,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPInteropDirective(OMPInteropDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_interop, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_interop, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9604,10 +9933,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPDispatchDirective(OMPDispatchDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_dispatch, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_dispatch, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9615,10 +9944,10 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPMaskedDirective(OMPMaskedDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_masked, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_masked, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9626,10 +9955,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPGenericLoopDirective(
OMPGenericLoopDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_loop, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_loop, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9637,10 +9966,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTeamsGenericLoopDirective(
OMPTeamsGenericLoopDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_teams_loop, DirName, nullptr,
- D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_teams_loop, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9648,10 +9977,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetTeamsGenericLoopDirective(
OMPTargetTeamsGenericLoopDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_teams_loop, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_target_teams_loop, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9659,10 +9988,10 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelGenericLoopDirective(
OMPParallelGenericLoopDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_loop, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_parallel_loop, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9671,10 +10000,10 @@ StmtResult
TreeTransform<Derived>::TransformOMPTargetParallelGenericLoopDirective(
OMPTargetParallelGenericLoopDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_parallel_loop, DirName,
- nullptr, D->getBeginLoc());
+ getDerived().getSema().OpenMP().StartOpenMPDSABlock(
+ OMPD_target_parallel_loop, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
- getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get());
return Res;
}
@@ -9948,6 +10277,12 @@ TreeTransform<Derived>::TransformOMPRelaxedClause(OMPRelaxedClause *C) {
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPWeakClause(OMPWeakClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPThreadsClause(OMPThreadsClause *C) {
// No need to rebuild this clause, no template-dependent parameters.
@@ -10202,12 +10537,11 @@ TreeTransform<Derived>::TransformOMPReductionClause(OMPReductionClause *C) {
cast<NamedDecl>(getDerived().TransformDecl(E->getExprLoc(), D));
Decls.addDecl(InstD, InstD->getAccess());
}
- UnresolvedReductions.push_back(
- UnresolvedLookupExpr::Create(
+ UnresolvedReductions.push_back(UnresolvedLookupExpr::Create(
SemaRef.Context, /*NamingClass=*/nullptr,
- ReductionIdScopeSpec.getWithLocInContext(SemaRef.Context),
- NameInfo, /*ADL=*/true, ULE->isOverloaded(),
- Decls.begin(), Decls.end()));
+ ReductionIdScopeSpec.getWithLocInContext(SemaRef.Context), NameInfo,
+ /*ADL=*/true, Decls.begin(), Decls.end(),
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false));
} else
UnresolvedReductions.push_back(nullptr);
}
@@ -10253,7 +10587,8 @@ OMPClause *TreeTransform<Derived>::TransformOMPTaskReductionClause(
UnresolvedReductions.push_back(UnresolvedLookupExpr::Create(
SemaRef.Context, /*NamingClass=*/nullptr,
ReductionIdScopeSpec.getWithLocInContext(SemaRef.Context), NameInfo,
- /*ADL=*/true, ULE->isOverloaded(), Decls.begin(), Decls.end()));
+ /*ADL=*/true, Decls.begin(), Decls.end(),
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false));
} else
UnresolvedReductions.push_back(nullptr);
}
@@ -10298,7 +10633,8 @@ TreeTransform<Derived>::TransformOMPInReductionClause(OMPInReductionClause *C) {
UnresolvedReductions.push_back(UnresolvedLookupExpr::Create(
SemaRef.Context, /*NamingClass=*/nullptr,
ReductionIdScopeSpec.getWithLocInContext(SemaRef.Context), NameInfo,
- /*ADL=*/true, ULE->isOverloaded(), Decls.begin(), Decls.end()));
+ /*ADL=*/true, Decls.begin(), Decls.end(),
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false));
} else
UnresolvedReductions.push_back(nullptr);
}
@@ -10479,8 +10815,8 @@ bool transformOMPMappableExprListClause(
UnresolvedMappers.push_back(UnresolvedLookupExpr::Create(
TT.getSema().Context, /*NamingClass=*/nullptr,
MapperIdScopeSpec.getWithLocInContext(TT.getSema().Context),
- MapperIdInfo, /*ADL=*/true, ULE->isOverloaded(), Decls.begin(),
- Decls.end()));
+ MapperIdInfo, /*ADL=*/true, Decls.begin(), Decls.end(),
+ /*KnownDependent=*/false, /*KnownInstantiationDependent=*/false));
} else {
UnresolvedMappers.push_back(nullptr);
}
@@ -10758,7 +11094,7 @@ TreeTransform<Derived>::TransformOMPExclusiveClause(OMPExclusiveClause *C) {
template <typename Derived>
OMPClause *TreeTransform<Derived>::TransformOMPUsesAllocatorsClause(
OMPUsesAllocatorsClause *C) {
- SmallVector<Sema::UsesAllocatorsData, 16> Data;
+ SmallVector<SemaOpenMP::UsesAllocatorsData, 16> Data;
Data.reserve(C->getNumberOfAllocators());
for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
@@ -10771,7 +11107,7 @@ OMPClause *TreeTransform<Derived>::TransformOMPUsesAllocatorsClause(
if (AllocatorTraits.isInvalid())
continue;
}
- Sema::UsesAllocatorsData &NewD = Data.emplace_back();
+ SemaOpenMP::UsesAllocatorsData &NewD = Data.emplace_back();
NewD.Allocator = Allocator.get();
NewD.AllocatorTraits = AllocatorTraits.get();
NewD.LParenLoc = D.LParenLoc;
@@ -10859,6 +11195,513 @@ OMPClause *TreeTransform<Derived>::TransformOMPXBareClause(OMPXBareClause *C) {
}
//===----------------------------------------------------------------------===//
+// OpenACC transformation
+//===----------------------------------------------------------------------===//
+namespace {
+template <typename Derived>
+class OpenACCClauseTransform final
+ : public OpenACCClauseVisitor<OpenACCClauseTransform<Derived>> {
+ TreeTransform<Derived> &Self;
+ ArrayRef<const OpenACCClause *> ExistingClauses;
+ SemaOpenACC::OpenACCParsedClause &ParsedClause;
+ OpenACCClause *NewClause = nullptr;
+
+ llvm::SmallVector<Expr *> VisitVarList(ArrayRef<Expr *> VarList) {
+ llvm::SmallVector<Expr *> InstantiatedVarList;
+ for (Expr *CurVar : VarList) {
+ ExprResult Res = Self.TransformExpr(CurVar);
+
+ if (!Res.isUsable())
+ continue;
+
+ Res = Self.getSema().OpenACC().ActOnVar(ParsedClause.getClauseKind(),
+ Res.get());
+
+ if (Res.isUsable())
+ InstantiatedVarList.push_back(Res.get());
+ }
+
+ return InstantiatedVarList;
+ }
+
+public:
+ OpenACCClauseTransform(TreeTransform<Derived> &Self,
+ ArrayRef<const OpenACCClause *> ExistingClauses,
+ SemaOpenACC::OpenACCParsedClause &PC)
+ : Self(Self), ExistingClauses(ExistingClauses), ParsedClause(PC) {}
+
+ OpenACCClause *CreatedClause() const { return NewClause; }
+
+#define VISIT_CLAUSE(CLAUSE_NAME) \
+ void Visit##CLAUSE_NAME##Clause(const OpenACC##CLAUSE_NAME##Clause &Clause);
+#include "clang/Basic/OpenACCClauses.def"
+};
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitDefaultClause(
+ const OpenACCDefaultClause &C) {
+ ParsedClause.setDefaultDetails(C.getDefaultClauseKind());
+
+ NewClause = OpenACCDefaultClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getDefaultClauseKind(),
+ ParsedClause.getBeginLoc(), ParsedClause.getLParenLoc(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitIfClause(const OpenACCIfClause &C) {
+ Expr *Cond = const_cast<Expr *>(C.getConditionExpr());
+ assert(Cond && "If constructed with invalid Condition");
+ Sema::ConditionResult Res = Self.TransformCondition(
+ Cond->getExprLoc(), /*Var=*/nullptr, Cond, Sema::ConditionKind::Boolean);
+
+ if (Res.isInvalid() || !Res.get().second)
+ return;
+
+ ParsedClause.setConditionDetails(Res.get().second);
+
+ NewClause = OpenACCIfClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getConditionExpr(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitSelfClause(
+ const OpenACCSelfClause &C) {
+
+ if (C.hasConditionExpr()) {
+ Expr *Cond = const_cast<Expr *>(C.getConditionExpr());
+ Sema::ConditionResult Res =
+ Self.TransformCondition(Cond->getExprLoc(), /*Var=*/nullptr, Cond,
+ Sema::ConditionKind::Boolean);
+
+ if (Res.isInvalid() || !Res.get().second)
+ return;
+
+ ParsedClause.setConditionDetails(Res.get().second);
+ }
+
+ NewClause = OpenACCSelfClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getConditionExpr(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitNumGangsClause(
+ const OpenACCNumGangsClause &C) {
+ llvm::SmallVector<Expr *> InstantiatedIntExprs;
+
+ for (Expr *CurIntExpr : C.getIntExprs()) {
+ ExprResult Res = Self.TransformExpr(CurIntExpr);
+
+ if (!Res.isUsable())
+ return;
+
+ Res = Self.getSema().OpenACC().ActOnIntExpr(OpenACCDirectiveKind::Invalid,
+ C.getClauseKind(),
+ C.getBeginLoc(), Res.get());
+ if (!Res.isUsable())
+ return;
+
+ InstantiatedIntExprs.push_back(Res.get());
+ }
+
+ ParsedClause.setIntExprDetails(InstantiatedIntExprs);
+ NewClause = OpenACCNumGangsClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getIntExprs(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitPrivateClause(
+ const OpenACCPrivateClause &C) {
+ ParsedClause.setVarListDetails(VisitVarList(C.getVarList()),
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+
+ NewClause = OpenACCPrivateClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getVarList(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitFirstPrivateClause(
+ const OpenACCFirstPrivateClause &C) {
+ ParsedClause.setVarListDetails(VisitVarList(C.getVarList()),
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+
+ NewClause = OpenACCFirstPrivateClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getVarList(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitNoCreateClause(
+ const OpenACCNoCreateClause &C) {
+ ParsedClause.setVarListDetails(VisitVarList(C.getVarList()),
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+
+ NewClause = OpenACCNoCreateClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getVarList(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitPresentClause(
+ const OpenACCPresentClause &C) {
+ ParsedClause.setVarListDetails(VisitVarList(C.getVarList()),
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+
+ NewClause = OpenACCPresentClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getVarList(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitCopyClause(
+ const OpenACCCopyClause &C) {
+ ParsedClause.setVarListDetails(VisitVarList(C.getVarList()),
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+
+ NewClause = OpenACCCopyClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getClauseKind(),
+ ParsedClause.getBeginLoc(), ParsedClause.getLParenLoc(),
+ ParsedClause.getVarList(), ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitCopyInClause(
+ const OpenACCCopyInClause &C) {
+ ParsedClause.setVarListDetails(VisitVarList(C.getVarList()), C.isReadOnly(),
+ /*IsZero=*/false);
+
+ NewClause = OpenACCCopyInClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getClauseKind(),
+ ParsedClause.getBeginLoc(), ParsedClause.getLParenLoc(),
+ ParsedClause.isReadOnly(), ParsedClause.getVarList(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitCopyOutClause(
+ const OpenACCCopyOutClause &C) {
+ ParsedClause.setVarListDetails(VisitVarList(C.getVarList()),
+ /*IsReadOnly=*/false, C.isZero());
+
+ NewClause = OpenACCCopyOutClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getClauseKind(),
+ ParsedClause.getBeginLoc(), ParsedClause.getLParenLoc(),
+ ParsedClause.isZero(), ParsedClause.getVarList(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitCreateClause(
+ const OpenACCCreateClause &C) {
+ ParsedClause.setVarListDetails(VisitVarList(C.getVarList()),
+ /*IsReadOnly=*/false, C.isZero());
+
+ NewClause = OpenACCCreateClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getClauseKind(),
+ ParsedClause.getBeginLoc(), ParsedClause.getLParenLoc(),
+ ParsedClause.isZero(), ParsedClause.getVarList(),
+ ParsedClause.getEndLoc());
+}
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitAttachClause(
+ const OpenACCAttachClause &C) {
+ llvm::SmallVector<Expr *> VarList = VisitVarList(C.getVarList());
+
+ // Ensure each var is a pointer type.
+ VarList.erase(std::remove_if(VarList.begin(), VarList.end(), [&](Expr *E) {
+ return Self.getSema().OpenACC().CheckVarIsPointerType(
+ OpenACCClauseKind::Attach, E);
+ }), VarList.end());
+
+ ParsedClause.setVarListDetails(VarList,
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+ NewClause = OpenACCAttachClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getVarList(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitDevicePtrClause(
+ const OpenACCDevicePtrClause &C) {
+ llvm::SmallVector<Expr *> VarList = VisitVarList(C.getVarList());
+
+ // Ensure each var is a pointer type.
+ VarList.erase(std::remove_if(VarList.begin(), VarList.end(), [&](Expr *E) {
+ return Self.getSema().OpenACC().CheckVarIsPointerType(
+ OpenACCClauseKind::DevicePtr, E);
+ }), VarList.end());
+
+ ParsedClause.setVarListDetails(VarList,
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+ NewClause = OpenACCDevicePtrClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getVarList(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitNumWorkersClause(
+ const OpenACCNumWorkersClause &C) {
+ Expr *IntExpr = const_cast<Expr *>(C.getIntExpr());
+ assert(IntExpr && "num_workers clause constructed with invalid int expr");
+
+ ExprResult Res = Self.TransformExpr(IntExpr);
+ if (!Res.isUsable())
+ return;
+
+ Res = Self.getSema().OpenACC().ActOnIntExpr(OpenACCDirectiveKind::Invalid,
+ C.getClauseKind(),
+ C.getBeginLoc(), Res.get());
+ if (!Res.isUsable())
+ return;
+
+ ParsedClause.setIntExprDetails(Res.get());
+ NewClause = OpenACCNumWorkersClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getIntExprs()[0],
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitVectorLengthClause(
+ const OpenACCVectorLengthClause &C) {
+ Expr *IntExpr = const_cast<Expr *>(C.getIntExpr());
+ assert(IntExpr && "vector_length clause constructed with invalid int expr");
+
+ ExprResult Res = Self.TransformExpr(IntExpr);
+ if (!Res.isUsable())
+ return;
+
+ Res = Self.getSema().OpenACC().ActOnIntExpr(OpenACCDirectiveKind::Invalid,
+ C.getClauseKind(),
+ C.getBeginLoc(), Res.get());
+ if (!Res.isUsable())
+ return;
+
+ ParsedClause.setIntExprDetails(Res.get());
+ NewClause = OpenACCVectorLengthClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getIntExprs()[0],
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitAsyncClause(
+ const OpenACCAsyncClause &C) {
+ if (C.hasIntExpr()) {
+ ExprResult Res = Self.TransformExpr(const_cast<Expr *>(C.getIntExpr()));
+ if (!Res.isUsable())
+ return;
+
+ Res = Self.getSema().OpenACC().ActOnIntExpr(OpenACCDirectiveKind::Invalid,
+ C.getClauseKind(),
+ C.getBeginLoc(), Res.get());
+ if (!Res.isUsable())
+ return;
+ ParsedClause.setIntExprDetails(Res.get());
+ }
+
+ NewClause = OpenACCAsyncClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(),
+ ParsedClause.getNumIntExprs() != 0 ? ParsedClause.getIntExprs()[0]
+ : nullptr,
+ ParsedClause.getEndLoc());
+}
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitWaitClause(
+ const OpenACCWaitClause &C) {
+ if (!C.getLParenLoc().isInvalid()) {
+ Expr *DevNumExpr = nullptr;
+ llvm::SmallVector<Expr *> InstantiatedQueueIdExprs;
+
+ // Instantiate devnum expr if it exists.
+ if (C.getDevNumExpr()) {
+ ExprResult Res = Self.TransformExpr(C.getDevNumExpr());
+ if (!Res.isUsable())
+ return;
+ Res = Self.getSema().OpenACC().ActOnIntExpr(OpenACCDirectiveKind::Invalid,
+ C.getClauseKind(),
+ C.getBeginLoc(), Res.get());
+ if (!Res.isUsable())
+ return;
+
+ DevNumExpr = Res.get();
+ }
+
+ // Instantiate queue ids.
+ for (Expr *CurQueueIdExpr : C.getQueueIdExprs()) {
+ ExprResult Res = Self.TransformExpr(CurQueueIdExpr);
+ if (!Res.isUsable())
+ return;
+ Res = Self.getSema().OpenACC().ActOnIntExpr(OpenACCDirectiveKind::Invalid,
+ C.getClauseKind(),
+ C.getBeginLoc(), Res.get());
+ if (!Res.isUsable())
+ return;
+
+ InstantiatedQueueIdExprs.push_back(Res.get());
+ }
+
+ ParsedClause.setWaitDetails(DevNumExpr, C.getQueuesLoc(),
+ std::move(InstantiatedQueueIdExprs));
+ }
+
+ NewClause = OpenACCWaitClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), ParsedClause.getDevNumExpr(),
+ ParsedClause.getQueuesLoc(), ParsedClause.getQueueIdExprs(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitDeviceTypeClause(
+ const OpenACCDeviceTypeClause &C) {
+ // Nothing to transform here, just create a new version of 'C'.
+ NewClause = OpenACCDeviceTypeClause::Create(
+ Self.getSema().getASTContext(), C.getClauseKind(),
+ ParsedClause.getBeginLoc(), ParsedClause.getLParenLoc(),
+ C.getArchitectures(), ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitAutoClause(
+ const OpenACCAutoClause &C) {
+ // Nothing to do, so just create a new node.
+ NewClause = OpenACCAutoClause::Create(Self.getSema().getASTContext(),
+ ParsedClause.getBeginLoc(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitIndependentClause(
+ const OpenACCIndependentClause &C) {
+ NewClause = OpenACCIndependentClause::Create(Self.getSema().getASTContext(),
+ ParsedClause.getBeginLoc(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitSeqClause(
+ const OpenACCSeqClause &C) {
+ NewClause = OpenACCSeqClause::Create(Self.getSema().getASTContext(),
+ ParsedClause.getBeginLoc(),
+ ParsedClause.getEndLoc());
+}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitReductionClause(
+ const OpenACCReductionClause &C) {
+ SmallVector<Expr *> TransformedVars = VisitVarList(C.getVarList());
+ SmallVector<Expr *> ValidVars;
+
+ for (Expr *Var : TransformedVars) {
+ ExprResult Res = Self.getSema().OpenACC().CheckReductionVar(Var);
+ if (Res.isUsable())
+ ValidVars.push_back(Res.get());
+ }
+
+ NewClause = OpenACCReductionClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), C.getReductionOp(), ValidVars,
+ ParsedClause.getEndLoc());
+}
+} // namespace
+template <typename Derived>
+OpenACCClause *TreeTransform<Derived>::TransformOpenACCClause(
+ ArrayRef<const OpenACCClause *> ExistingClauses,
+ OpenACCDirectiveKind DirKind, const OpenACCClause *OldClause) {
+
+ SemaOpenACC::OpenACCParsedClause ParsedClause(
+ DirKind, OldClause->getClauseKind(), OldClause->getBeginLoc());
+ ParsedClause.setEndLoc(OldClause->getEndLoc());
+
+ if (const auto *WithParms = dyn_cast<OpenACCClauseWithParams>(OldClause))
+ ParsedClause.setLParenLoc(WithParms->getLParenLoc());
+
+ OpenACCClauseTransform<Derived> Transform{*this, ExistingClauses,
+ ParsedClause};
+ Transform.Visit(OldClause);
+
+ return Transform.CreatedClause();
+}
+
+template <typename Derived>
+llvm::SmallVector<OpenACCClause *>
+TreeTransform<Derived>::TransformOpenACCClauseList(
+ OpenACCDirectiveKind DirKind, ArrayRef<const OpenACCClause *> OldClauses) {
+ llvm::SmallVector<OpenACCClause *> TransformedClauses;
+ for (const auto *Clause : OldClauses) {
+ if (OpenACCClause *TransformedClause = getDerived().TransformOpenACCClause(
+ TransformedClauses, DirKind, Clause))
+ TransformedClauses.push_back(TransformedClause);
+ }
+ return TransformedClauses;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOpenACCComputeConstruct(
+ OpenACCComputeConstruct *C) {
+ getSema().OpenACC().ActOnConstruct(C->getDirectiveKind(), C->getBeginLoc());
+
+ if (getSema().OpenACC().ActOnStartStmtDirective(C->getDirectiveKind(),
+ C->getBeginLoc()))
+ return StmtError();
+
+ llvm::SmallVector<OpenACCClause *> TransformedClauses =
+ getDerived().TransformOpenACCClauseList(C->getDirectiveKind(),
+ C->clauses());
+ // Transform Structured Block.
+ SemaOpenACC::AssociatedStmtRAII AssocStmtRAII(getSema().OpenACC(),
+ C->getDirectiveKind());
+ StmtResult StrBlock = getDerived().TransformStmt(C->getStructuredBlock());
+ StrBlock = getSema().OpenACC().ActOnAssociatedStmt(
+ C->getBeginLoc(), C->getDirectiveKind(), StrBlock);
+
+ return getDerived().RebuildOpenACCComputeConstruct(
+ C->getDirectiveKind(), C->getBeginLoc(), C->getDirectiveLoc(),
+ C->getEndLoc(), TransformedClauses, StrBlock);
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOpenACCLoopConstruct(OpenACCLoopConstruct *C) {
+
+ getSema().OpenACC().ActOnConstruct(C->getDirectiveKind(), C->getBeginLoc());
+
+ if (getSema().OpenACC().ActOnStartStmtDirective(C->getDirectiveKind(),
+ C->getBeginLoc()))
+ return StmtError();
+
+ llvm::SmallVector<OpenACCClause *> TransformedClauses =
+ getDerived().TransformOpenACCClauseList(C->getDirectiveKind(),
+ C->clauses());
+
+ // Transform Loop.
+ SemaOpenACC::AssociatedStmtRAII AssocStmtRAII(getSema().OpenACC(),
+ C->getDirectiveKind());
+ StmtResult Loop = getDerived().TransformStmt(C->getLoop());
+ Loop = getSema().OpenACC().ActOnAssociatedStmt(C->getBeginLoc(),
+ C->getDirectiveKind(), Loop);
+
+ return getDerived().RebuildOpenACCLoopConstruct(
+ C->getBeginLoc(), C->getDirectiveLoc(), C->getEndLoc(),
+ TransformedClauses, Loop);
+}
+
+//===----------------------------------------------------------------------===//
// Expression transformation
//===----------------------------------------------------------------------===//
template<typename Derived>
@@ -10928,8 +11771,8 @@ TreeTransform<Derived>::TransformDeclRefExpr(DeclRefExpr *E) {
}
if (!getDerived().AlwaysRebuild() &&
- QualifierLoc == E->getQualifierLoc() &&
- ND == E->getDecl() &&
+ !E->isCapturedByCopyInLambdaWithExplicitObjectParameter() &&
+ QualifierLoc == E->getQualifierLoc() && ND == E->getDecl() &&
Found == E->getFoundDecl() &&
NameInfo.getName() == E->getDecl()->getDeclName() &&
!E->hasExplicitTemplateArgs()) {
@@ -11064,7 +11907,11 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformAddressOfOperand(Expr *E) {
if (DependentScopeDeclRefExpr *DRE = dyn_cast<DependentScopeDeclRefExpr>(E))
- return getDerived().TransformDependentScopeDeclRefExpr(DRE, true, nullptr);
+ return getDerived().TransformDependentScopeDeclRefExpr(
+ DRE, /*IsAddressOfOperand=*/true, nullptr);
+ else if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E))
+ return getDerived().TransformUnresolvedLookupExpr(
+ ULE, /*IsAddressOfOperand=*/true);
else
return getDerived().TransformExpr(E);
}
@@ -11193,7 +12040,7 @@ TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
// better solution (rebuilding the semantic expressions and
// rebinding OVEs as necessary) doesn't work; we'd need
// TreeTransform to not strip away implicit conversions.
- Expr *newSyntacticForm = SemaRef.recreateSyntacticForm(E);
+ Expr *newSyntacticForm = SemaRef.PseudoObject().recreateSyntacticForm(E);
ExprResult result = getDerived().TransformExpr(newSyntacticForm);
if (result.isInvalid()) return ExprError();
@@ -11201,7 +12048,7 @@ TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
// expression must have been an lvalue-to-rvalue conversion which we
// should reapply.
if (result.get()->hasPlaceholderType(BuiltinType::PseudoObject))
- result = SemaRef.checkPseudoObjectRValue(result.get());
+ result = SemaRef.PseudoObject().checkRValue(result.get());
return result;
}
@@ -11306,7 +12153,7 @@ TreeTransform<Derived>::TransformMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
template <typename Derived>
ExprResult
-TreeTransform<Derived>::TransformOMPArraySectionExpr(OMPArraySectionExpr *E) {
+TreeTransform<Derived>::TransformArraySectionExpr(ArraySectionExpr *E) {
ExprResult Base = getDerived().TransformExpr(E->getBase());
if (Base.isInvalid())
return ExprError();
@@ -11326,20 +12173,25 @@ TreeTransform<Derived>::TransformOMPArraySectionExpr(OMPArraySectionExpr *E) {
}
ExprResult Stride;
- if (Expr *Str = E->getStride()) {
- Stride = getDerived().TransformExpr(Str);
- if (Stride.isInvalid())
- return ExprError();
+ if (E->isOMPArraySection()) {
+ if (Expr *Str = E->getStride()) {
+ Stride = getDerived().TransformExpr(Str);
+ if (Stride.isInvalid())
+ return ExprError();
+ }
}
if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase() &&
- LowerBound.get() == E->getLowerBound() && Length.get() == E->getLength())
+ LowerBound.get() == E->getLowerBound() &&
+ Length.get() == E->getLength() &&
+ (E->isOpenACCArraySection() || Stride.get() == E->getStride()))
return E;
- return getDerived().RebuildOMPArraySectionExpr(
- Base.get(), E->getBase()->getEndLoc(), LowerBound.get(),
- E->getColonLocFirst(), E->getColonLocSecond(), Length.get(), Stride.get(),
- E->getRBracketLoc());
+ return getDerived().RebuildArraySectionExpr(
+ E->isOMPArraySection(), Base.get(), E->getBase()->getEndLoc(),
+ LowerBound.get(), E->getColonLocFirst(),
+ E->isOMPArraySection() ? E->getColonLocSecond() : SourceLocation{},
+ Length.get(), Stride.get(), E->getRBracketLoc());
}
template <typename Derived>
@@ -11371,7 +12223,7 @@ template <typename Derived>
ExprResult
TreeTransform<Derived>::TransformOMPIteratorExpr(OMPIteratorExpr *E) {
unsigned NumIterators = E->numOfIterators();
- SmallVector<Sema::OMPIteratorData, 4> Data(NumIterators);
+ SmallVector<SemaOpenMP::OMPIteratorData, 4> Data(NumIterators);
bool ErrorFound = false;
bool NeedToRebuild = getDerived().AlwaysRebuild();
@@ -11506,7 +12358,8 @@ TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
// Skip for member expression of (this->f), rebuilt thisi->f is needed
// for Openmp where the field need to be privatizized in the case.
if (!(isa<CXXThisExpr>(E->getBase()) &&
- getSema().isOpenMPRebuildMemberExpr(cast<ValueDecl>(Member)))) {
+ getSema().OpenMP().isOpenMPRebuildMemberExpr(
+ cast<ValueDecl>(Member)))) {
// Mark it referenced in the new context regardless.
// FIXME: this is a bit instantiation-specific.
SemaRef.MarkMemberReferenced(E);
@@ -12097,7 +12950,7 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
}
ExprResult First;
- if (E->getOperator() == OO_Amp)
+ if (E->getNumArgs() == 1 && E->getOperator() == OO_Amp)
First = getDerived().TransformAddressOfOperand(E->getArg(0));
else
First = getDerived().TransformExpr(E->getArg(0));
@@ -12166,6 +13019,11 @@ ExprResult TreeTransform<Derived>::TransformSourceLocExpr(SourceLocExpr *E) {
getSema().CurContext);
}
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformEmbedExpr(EmbedExpr *E) {
+ return E;
+}
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
@@ -12392,9 +13250,17 @@ TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) {
//
// In other contexts, the type of `this` may be overrided
// for type deduction, so we need to recompute it.
- QualType T = getSema().getCurLambda() ?
- getDerived().TransformType(E->getType())
- : getSema().getCurrentThisType();
+ //
+ // Always recompute the type if we're in the body of a lambda, and
+ // 'this' is dependent on a lambda's explicit object parameter.
+ QualType T = [&]() {
+ auto &S = getSema();
+ if (E->isCapturedByCopyInLambdaWithExplicitObjectParameter())
+ return S.getCurrentThisType();
+ if (S.getCurLambda())
+ return getDerived().TransformType(E->getType());
+ return S.getCurrentThisType();
+ }();
if (!getDerived().AlwaysRebuild() && T == E->getType()) {
// Mark it referenced in the new context regardless.
@@ -12742,7 +13608,7 @@ bool TreeTransform<Derived>::TransformOverloadExprDecls(OverloadExpr *Old,
}
AllEmptyPacks &= Decls.empty();
- };
+ }
// C++ [temp.res]/8.4.2:
// The program is ill-formed, no diagnostic required, if [...] lookup for
@@ -12759,13 +13625,39 @@ bool TreeTransform<Derived>::TransformOverloadExprDecls(OverloadExpr *Old,
// Resolve a kind, but don't do any further analysis. If it's
// ambiguous, the callee needs to deal with it.
R.resolveKind();
+
+ if (Old->hasTemplateKeyword() && !R.empty()) {
+ NamedDecl *FoundDecl = R.getRepresentativeDecl()->getUnderlyingDecl();
+ getSema().FilterAcceptableTemplateNames(R,
+ /*AllowFunctionTemplates=*/true,
+ /*AllowDependent=*/true);
+ if (R.empty()) {
+ // If a 'template' keyword was used, a lookup that finds only non-template
+ // names is an error.
+ getSema().Diag(R.getNameLoc(),
+ diag::err_template_kw_refers_to_non_template)
+ << R.getLookupName() << Old->getQualifierLoc().getSourceRange()
+ << Old->hasTemplateKeyword() << Old->getTemplateKeywordLoc();
+ getSema().Diag(FoundDecl->getLocation(),
+ diag::note_template_kw_refers_to_non_template)
+ << R.getLookupName();
+ return true;
+ }
+ }
+
return false;
}
-template<typename Derived>
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformUnresolvedLookupExpr(
+ UnresolvedLookupExpr *Old) {
+ return TransformUnresolvedLookupExpr(Old, /*IsAddressOfOperand=*/false);
+}
+
+template <typename Derived>
ExprResult
-TreeTransform<Derived>::TransformUnresolvedLookupExpr(
- UnresolvedLookupExpr *Old) {
+TreeTransform<Derived>::TransformUnresolvedLookupExpr(UnresolvedLookupExpr *Old,
+ bool IsAddressOfOperand) {
LookupResult R(SemaRef, Old->getName(), Old->getNameLoc(),
Sema::LookupOrdinaryName);
@@ -12797,26 +13689,8 @@ TreeTransform<Derived>::TransformUnresolvedLookupExpr(
R.setNamingClass(NamingClass);
}
+ // Rebuild the template arguments, if any.
SourceLocation TemplateKWLoc = Old->getTemplateKeywordLoc();
-
- // If we have neither explicit template arguments, nor the template keyword,
- // it's a normal declaration name or member reference.
- if (!Old->hasExplicitTemplateArgs() && !TemplateKWLoc.isValid()) {
- NamedDecl *D = R.getAsSingle<NamedDecl>();
- // In a C++11 unevaluated context, an UnresolvedLookupExpr might refer to an
- // instance member. In other contexts, BuildPossibleImplicitMemberExpr will
- // give a good diagnostic.
- if (D && D->isCXXInstanceMember()) {
- return SemaRef.BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R,
- /*TemplateArgs=*/nullptr,
- /*Scope=*/nullptr);
- }
-
- return getDerived().RebuildDeclarationNameExpr(SS, R, Old->requiresADL());
- }
-
- // If we have template arguments, rebuild them, then rebuild the
- // templateid expression.
TemplateArgumentListInfo TransArgs(Old->getLAngleLoc(), Old->getRAngleLoc());
if (Old->hasExplicitTemplateArgs() &&
getDerived().TransformTemplateArguments(Old->getTemplateArgs(),
@@ -12826,6 +13700,23 @@ TreeTransform<Derived>::TransformUnresolvedLookupExpr(
return ExprError();
}
+ // An UnresolvedLookupExpr can refer to a class member. This occurs e.g. when
+ // a non-static data member is named in an unevaluated operand, or when
+ // a member is named in a dependent class scope function template explicit
+ // specialization that is neither declared static nor with an explicit object
+ // parameter.
+ if (SemaRef.isPotentialImplicitMemberAccess(SS, R, IsAddressOfOperand))
+ return SemaRef.BuildPossibleImplicitMemberExpr(
+ SS, TemplateKWLoc, R,
+ Old->hasExplicitTemplateArgs() ? &TransArgs : nullptr,
+ /*S=*/nullptr);
+
+ // If we have neither explicit template arguments, nor the template keyword,
+ // it's a normal declaration name or member reference.
+ if (!Old->hasExplicitTemplateArgs() && !TemplateKWLoc.isValid())
+ return getDerived().RebuildDeclarationNameExpr(SS, R, Old->requiresADL());
+
+ // If we have template arguments, then rebuild the template-id expression.
return getDerived().RebuildTemplateIdExpr(SS, TemplateKWLoc, R,
Old->requiresADL(), &TransArgs);
}
@@ -13478,10 +14369,29 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// use evaluation contexts to distinguish the function parameter case.
CXXRecordDecl::LambdaDependencyKind DependencyKind =
CXXRecordDecl::LDK_Unknown;
+ DeclContext *DC = getSema().CurContext;
+ // A RequiresExprBodyDecl is not interesting for dependencies.
+ // For the following case,
+ //
+ // template <typename>
+ // concept C = requires { [] {}; };
+ //
+ // template <class F>
+ // struct Widget;
+ //
+ // template <C F>
+ // struct Widget<F> {};
+ //
+ // While we are substituting Widget<F>, the parent of DC would be
+ // the template specialization itself. Thus, the lambda expression
+ // will be deemed as dependent even if there are no dependent template
+ // arguments.
+ // (A ClassTemplateSpecializationDecl is always a dependent context.)
+ while (DC->isRequiresExprBody())
+ DC = DC->getParent();
if ((getSema().isUnevaluatedContext() ||
getSema().isConstantEvaluatedContext()) &&
- (getSema().CurContext->isFileContext() ||
- !getSema().CurContext->getParent()->isDependentContext()))
+ (DC->isFileContext() || !DC->getParent()->isDependentContext()))
DependencyKind = CXXRecordDecl::LDK_NeverDependent;
CXXRecordDecl *OldClass = E->getLambdaClass();
@@ -13717,7 +14627,12 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// FIXME: Sema's lambda-building mechanism expects us to push an expression
// evaluation context even if we're not transforming the function body.
getSema().PushExpressionEvaluationContext(
+ E->getCallOperator()->isConsteval() ?
+ Sema::ExpressionEvaluationContext::ImmediateFunctionContext :
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+ getSema().currentEvaluationContext().InImmediateEscalatingFunctionContext =
+ getSema().getLangOpts().CPlusPlus20 &&
+ E->getCallOperator()->isImmediateEscalating();
Sema::CodeSynthesisContext C;
C.Kind = clang::Sema::CodeSynthesisContext::LambdaExpressionSubstitution;
@@ -13748,6 +14663,46 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
/*IsInstantiation*/ true);
SavedContext.pop();
+ // Recompute the dependency of the lambda so that we can defer the lambda call
+ // construction until after we have all the necessary template arguments. For
+ // example, given
+ //
+ // template <class> struct S {
+ // template <class U>
+ // using Type = decltype([](U){}(42.0));
+ // };
+ // void foo() {
+ // using T = S<int>::Type<float>;
+ // ^~~~~~
+ // }
+ //
+ // We would end up here from instantiating S<int> when ensuring its
+ // completeness. That would transform the lambda call expression regardless of
+ // the absence of the corresponding argument for U.
+ //
+ // Going ahead with unsubstituted type U makes things worse: we would soon
+ // compare the argument type (which is float) against the parameter U
+ // somewhere in Sema::BuildCallExpr. Then we would quickly run into a bogus
+ // error suggesting unmatched types 'U' and 'float'!
+ //
+ // That said, everything will be fine if we defer that semantic checking.
+ // Fortunately, we have such a mechanism that bypasses it if the CallExpr is
+ // dependent. Since the CallExpr's dependency boils down to the lambda's
+ // dependency in this case, we can harness that by recomputing the dependency
+ // from the instantiation arguments.
+ //
+ // FIXME: Creating the type of a lambda requires us to have a dependency
+ // value, which happens before its substitution. We update its dependency
+ // *after* the substitution in case we can't decide the dependency
+ // so early, e.g. because we want to see if any of the *substituted*
+ // parameters are dependent.
+ DependencyKind = getDerived().ComputeLambdaDependency(&LSICopy);
+ Class->setLambdaDependencyKind(DependencyKind);
+ // Clean up the type cache created previously. Then, we re-create a type for
+ // such Decl with the new DependencyKind.
+ Class->setTypeForDecl(nullptr);
+ getSema().Context.getTypeDeclType(Class);
+
return getSema().BuildLambdaExpr(E->getBeginLoc(), Body.get()->getEndLoc(),
&LSICopy);
}
@@ -14166,6 +15121,85 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
Args.size(), std::nullopt);
}
+template <typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformPackIndexingExpr(PackIndexingExpr *E) {
+ if (!E->isValueDependent())
+ return E;
+
+ // Transform the index
+ ExprResult IndexExpr = getDerived().TransformExpr(E->getIndexExpr());
+ if (IndexExpr.isInvalid())
+ return ExprError();
+
+ SmallVector<Expr *, 5> ExpandedExprs;
+ if (!E->expandsToEmptyPack() && E->getExpressions().empty()) {
+ Expr *Pattern = E->getPackIdExpression();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(E->getPackIdExpression(),
+ Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool ShouldExpand = true;
+ bool RetainExpansion = false;
+ std::optional<unsigned> OrigNumExpansions;
+ std::optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(
+ E->getEllipsisLoc(), Pattern->getSourceRange(), Unexpanded,
+ ShouldExpand, RetainExpansion, NumExpansions))
+ return true;
+ if (!ShouldExpand) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ ExprResult Pack = getDerived().TransformExpr(Pattern);
+ if (Pack.isInvalid())
+ return ExprError();
+ return getDerived().RebuildPackIndexingExpr(
+ E->getEllipsisLoc(), E->getRSquareLoc(), Pack.get(), IndexExpr.get(),
+ std::nullopt);
+ }
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+ if (Out.get()->containsUnexpandedParameterPack()) {
+ Out = getDerived().RebuildPackExpansion(Out.get(), E->getEllipsisLoc(),
+ OrigNumExpansions);
+ if (Out.isInvalid())
+ return true;
+ }
+ ExpandedExprs.push_back(Out.get());
+ }
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+
+ Out = getDerived().RebuildPackExpansion(Out.get(), E->getEllipsisLoc(),
+ OrigNumExpansions);
+ if (Out.isInvalid())
+ return true;
+ ExpandedExprs.push_back(Out.get());
+ }
+ } else if (!E->expandsToEmptyPack()) {
+ if (getDerived().TransformExprs(E->getExpressions().data(),
+ E->getExpressions().size(), false,
+ ExpandedExprs))
+ return ExprError();
+ }
+
+ return getDerived().RebuildPackIndexingExpr(
+ E->getEllipsisLoc(), E->getRSquareLoc(), E->getPackIdExpression(),
+ IndexExpr.get(), ExpandedExprs,
+ /*EmptyPack=*/ExpandedExprs.size() == 0);
+}
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformSubstNonTypeTemplateParmPackExpr(
@@ -14575,9 +15609,9 @@ TransformObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
Result.get() == E->getSubExpr())
return E;
- return SemaRef.BuildObjCBridgedCast(E->getLParenLoc(), E->getBridgeKind(),
- E->getBridgeKeywordLoc(), TSInfo,
- Result.get());
+ return SemaRef.ObjC().BuildObjCBridgedCast(
+ E->getLParenLoc(), E->getBridgeKind(), E->getBridgeKeywordLoc(), TSInfo,
+ Result.get());
}
template <typename Derived>
@@ -14965,10 +15999,9 @@ QualType TreeTransform<Derived>::RebuildObjCTypeParamType(
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc) {
- return SemaRef.BuildObjCTypeParamType(Decl,
- ProtocolLAngleLoc, Protocols,
- ProtocolLocs, ProtocolRAngleLoc,
- /*FailOnError=*/true);
+ return SemaRef.ObjC().BuildObjCTypeParamType(
+ Decl, ProtocolLAngleLoc, Protocols, ProtocolLocs, ProtocolRAngleLoc,
+ /*FailOnError=*/true);
}
template<typename Derived>
@@ -14982,11 +16015,11 @@ QualType TreeTransform<Derived>::RebuildObjCObjectType(
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc) {
- return SemaRef.BuildObjCObjectType(BaseType, Loc, TypeArgsLAngleLoc, TypeArgs,
- TypeArgsRAngleLoc, ProtocolLAngleLoc,
- Protocols, ProtocolLocs, ProtocolRAngleLoc,
- /*FailOnError=*/true,
- /*Rebuilding=*/true);
+ return SemaRef.ObjC().BuildObjCObjectType(
+ BaseType, Loc, TypeArgsLAngleLoc, TypeArgs, TypeArgsRAngleLoc,
+ ProtocolLAngleLoc, Protocols, ProtocolLocs, ProtocolRAngleLoc,
+ /*FailOnError=*/true,
+ /*Rebuilding=*/true);
}
template<typename Derived>
@@ -15207,6 +16240,15 @@ QualType TreeTransform<Derived>::RebuildDecltypeType(Expr *E, SourceLocation) {
return SemaRef.BuildDecltypeType(E);
}
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildPackIndexingType(
+ QualType Pattern, Expr *IndexExpr, SourceLocation Loc,
+ SourceLocation EllipsisLoc, bool FullySubstituted,
+ ArrayRef<QualType> Expansions) {
+ return SemaRef.BuildPackIndexingType(Pattern, IndexExpr, Loc, EllipsisLoc,
+ FullySubstituted, Expansions);
+}
+
template<typename Derived>
QualType TreeTransform<Derived>::RebuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
@@ -15310,8 +16352,8 @@ ExprResult TreeTransform<Derived>::RebuildCXXOperatorCallExpr(
if (First->getObjectKind() == OK_ObjCProperty) {
BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
if (BinaryOperator::isAssignmentOp(Opc))
- return SemaRef.checkPseudoObjectAssignment(/*Scope=*/nullptr, OpLoc, Opc,
- First, Second);
+ return SemaRef.PseudoObject().checkAssignment(/*Scope=*/nullptr, OpLoc,
+ Opc, First, Second);
ExprResult Result = SemaRef.CheckPlaceholderExpr(First);
if (Result.isInvalid())
return ExprError();
@@ -15350,10 +16392,11 @@ ExprResult TreeTransform<Derived>::RebuildCXXOperatorCallExpr(
return getSema().CreateBuiltinUnaryOp(OpLoc, Opc, First);
}
} else {
- if (!First->getType()->isOverloadableType() &&
+ if (!First->isTypeDependent() && !Second->isTypeDependent() &&
+ !First->getType()->isOverloadableType() &&
!Second->getType()->isOverloadableType()) {
- // Neither of the arguments is an overloadable type, so try to
- // create a built-in binary operation.
+ // Neither of the arguments is type-dependent or has an overloadable
+ // type, so try to create a built-in binary operation.
BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
ExprResult Result
= SemaRef.CreateBuiltinBinOp(OpLoc, Opc, First, Second);
@@ -15364,12 +16407,8 @@ ExprResult TreeTransform<Derived>::RebuildCXXOperatorCallExpr(
}
}
- // Add any functions found via argument-dependent lookup.
- Expr *Args[2] = { First, Second };
- unsigned NumArgs = 1 + (Second != nullptr);
-
// Create the overloaded operator invocation for unary operators.
- if (NumArgs == 1 || isPostIncDec) {
+ if (!Second || isPostIncDec) {
UnaryOperatorKind Opc
= UnaryOperator::getOverloadedOpcode(Op, isPostIncDec);
return SemaRef.CreateOverloadedUnaryOp(OpLoc, Opc, Functions, First,
@@ -15378,8 +16417,8 @@ ExprResult TreeTransform<Derived>::RebuildCXXOperatorCallExpr(
// Create the overloaded operator invocation for binary operators.
BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
- ExprResult Result = SemaRef.CreateOverloadedBinOp(
- OpLoc, Opc, Functions, Args[0], Args[1], RequiresADL);
+ ExprResult Result = SemaRef.CreateOverloadedBinOp(OpLoc, Opc, Functions,
+ First, Second, RequiresADL);
if (Result.isInvalid())
return ExprError();
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
index 6110e287b7fb..444a8a3d3a51 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
@@ -186,6 +186,9 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
case BuiltinType::Overload:
ID = PREDEF_TYPE_OVERLOAD_ID;
break;
+ case BuiltinType::UnresolvedTemplate:
+ ID = PREDEF_TYPE_UNRESOLVED_TEMPLATE;
+ break;
case BuiltinType::BoundMember:
ID = PREDEF_TYPE_BOUND_MEMBER;
break;
@@ -255,14 +258,19 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
ID = PREDEF_TYPE_##Id##_ID; \
break;
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ ID = PREDEF_TYPE_##Id##_ID; \
+ break;
+#include "clang/Basic/AMDGPUTypes.def"
case BuiltinType::BuiltinFn:
ID = PREDEF_TYPE_BUILTIN_FN;
break;
case BuiltinType::IncompleteMatrixIdx:
ID = PREDEF_TYPE_INCOMPLETE_MATRIX_IDX;
break;
- case BuiltinType::OMPArraySection:
- ID = PREDEF_TYPE_OMP_ARRAY_SECTION;
+ case BuiltinType::ArraySection:
+ ID = PREDEF_TYPE_ARRAY_SECTION;
break;
case BuiltinType::OMPArrayShaping:
ID = PREDEF_TYPE_OMP_ARRAY_SHAPING;
@@ -275,7 +283,7 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
break;
}
- return TypeIdx(ID);
+ return TypeIdx(0, ID);
}
unsigned serialization::ComputeHash(Selector Sel) {
@@ -284,7 +292,7 @@ unsigned serialization::ComputeHash(Selector Sel) {
++N;
unsigned R = 5381;
for (unsigned I = 0; I != N; ++I)
- if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(I))
+ if (const IdentifierInfo *II = Sel.getIdentifierInfoForSlot(I))
R = llvm::djbHash(II->getName(), R);
return R;
}
@@ -338,7 +346,7 @@ serialization::getDefinitiveDeclContext(const DeclContext *DC) {
// FIXME: These are defined in one place, but properties in class extensions
// end up being back-patched into the main interface. See
- // Sema::HandlePropertyInClassExtension for the offending code.
+ // SemaObjC::HandlePropertyInClassExtension for the offending code.
case Decl::ObjCInterface:
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.h b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.h
index 296642e3674a..0230908d3e05 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.h
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.h
@@ -46,30 +46,6 @@ enum DeclUpdateKind {
TypeIdx TypeIdxFromBuiltin(const BuiltinType *BT);
-template <typename IdxForTypeTy>
-TypeID MakeTypeID(ASTContext &Context, QualType T, IdxForTypeTy IdxForType) {
- if (T.isNull())
- return PREDEF_TYPE_NULL_ID;
-
- unsigned FastQuals = T.getLocalFastQualifiers();
- T.removeLocalFastQualifiers();
-
- if (T.hasLocalNonFastQualifiers())
- return IdxForType(T).asTypeID(FastQuals);
-
- assert(!T.hasLocalQualifiers());
-
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(T.getTypePtr()))
- return TypeIdxFromBuiltin(BT).asTypeID(FastQuals);
-
- if (T == Context.AutoDeductTy)
- return TypeIdx(PREDEF_TYPE_AUTO_DEDUCT).asTypeID(FastQuals);
- if (T == Context.AutoRRefDeductTy)
- return TypeIdx(PREDEF_TYPE_AUTO_RREF_DEDUCT).asTypeID(FastQuals);
-
- return IdxForType(T).asTypeID(FastQuals);
-}
-
unsigned ComputeHash(Selector Sel);
/// Retrieve the "definitive" declaration that provides all of the
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
index 490b8cb10a48..2d8f5a801f0e 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
@@ -31,7 +31,7 @@
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/ODRDiagsEmitter.h"
-#include "clang/AST/ODRHash.h"
+#include "clang/AST/OpenACCClause.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/RawCommentList.h"
#include "clang/AST/TemplateBase.h"
@@ -40,9 +40,11 @@
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeLocVisitor.h"
#include "clang/AST/UnresolvedSet.h"
+#include "clang/Basic/ASTSourceDescriptor.h"
#include "clang/Basic/CommentOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticError.h"
+#include "clang/Basic/DiagnosticIDs.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExceptionSpecificationType.h"
@@ -53,6 +55,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Basic/OpenACCKinds.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/PragmaKinds.h"
@@ -76,6 +79,8 @@
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaCUDA.h"
+#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/Weak.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTDeserializationListener.h"
@@ -827,36 +832,37 @@ bool SimpleASTReaderListener::ReadPreprocessorOptions(
OptionValidateNone);
}
-/// Check the header search options deserialized from the control block
-/// against the header search options in an existing preprocessor.
+/// Check that the specified and the existing module cache paths are equivalent.
///
/// \param Diags If non-null, produce diagnostics for any mismatches incurred.
-static bool checkHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
- StringRef SpecificModuleCachePath,
- StringRef ExistingModuleCachePath,
- DiagnosticsEngine *Diags,
- const LangOptions &LangOpts,
- const PreprocessorOptions &PPOpts) {
- if (LangOpts.Modules) {
- if (SpecificModuleCachePath != ExistingModuleCachePath &&
- !PPOpts.AllowPCHWithDifferentModulesCachePath) {
- if (Diags)
- Diags->Report(diag::err_pch_modulecache_mismatch)
- << SpecificModuleCachePath << ExistingModuleCachePath;
- return true;
- }
- }
-
- return false;
+/// \returns true when the module cache paths differ.
+static bool checkModuleCachePath(llvm::vfs::FileSystem &VFS,
+ StringRef SpecificModuleCachePath,
+ StringRef ExistingModuleCachePath,
+ DiagnosticsEngine *Diags,
+ const LangOptions &LangOpts,
+ const PreprocessorOptions &PPOpts) {
+ if (!LangOpts.Modules || PPOpts.AllowPCHWithDifferentModulesCachePath ||
+ SpecificModuleCachePath == ExistingModuleCachePath)
+ return false;
+ auto EqualOrErr =
+ VFS.equivalent(SpecificModuleCachePath, ExistingModuleCachePath);
+ if (EqualOrErr && *EqualOrErr)
+ return false;
+ if (Diags)
+ Diags->Report(diag::err_pch_modulecache_mismatch)
+ << SpecificModuleCachePath << ExistingModuleCachePath;
+ return true;
}
bool PCHValidator::ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
StringRef SpecificModuleCachePath,
bool Complain) {
- return checkHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
- PP.getHeaderSearchInfo().getModuleCachePath(),
- Complain ? &Reader.Diags : nullptr,
- PP.getLangOpts(), PP.getPreprocessorOpts());
+ return checkModuleCachePath(Reader.getFileManager().getVirtualFileSystem(),
+ SpecificModuleCachePath,
+ PP.getHeaderSearchInfo().getModuleCachePath(),
+ Complain ? &Reader.Diags : nullptr,
+ PP.getLangOpts(), PP.getPreprocessorOpts());
}
void PCHValidator::ReadCounter(const ModuleFile &M, unsigned Value) {
@@ -902,6 +908,39 @@ unsigned ASTSelectorLookupTrait::ComputeHash(Selector Sel) {
return serialization::ComputeHash(Sel);
}
+LocalDeclID LocalDeclID::get(ASTReader &Reader, ModuleFile &MF, DeclID Value) {
+ LocalDeclID ID(Value);
+#ifndef NDEBUG
+ if (!MF.ModuleOffsetMap.empty())
+ Reader.ReadModuleOffsetMap(MF);
+
+ unsigned ModuleFileIndex = ID.getModuleFileIndex();
+ unsigned LocalDeclID = ID.getLocalDeclIndex();
+
+ assert(ModuleFileIndex <= MF.TransitiveImports.size());
+
+ ModuleFile *OwningModuleFile =
+ ModuleFileIndex == 0 ? &MF : MF.TransitiveImports[ModuleFileIndex - 1];
+ assert(OwningModuleFile);
+
+ unsigned LocalNumDecls = OwningModuleFile->LocalNumDecls;
+
+ if (!ModuleFileIndex)
+ LocalNumDecls += NUM_PREDEF_DECL_IDS;
+
+ assert(LocalDeclID < LocalNumDecls);
+#endif
+ (void)Reader;
+ (void)MF;
+ return ID;
+}
+
+LocalDeclID LocalDeclID::get(ASTReader &Reader, ModuleFile &MF,
+ unsigned ModuleFileIndex, unsigned LocalDeclID) {
+ DeclID Value = (DeclID)ModuleFileIndex << 32 | (DeclID)LocalDeclID;
+ return LocalDeclID::get(Reader, MF, Value);
+}
+
std::pair<unsigned, unsigned>
ASTSelectorLookupTrait::ReadKeyDataLength(const unsigned char*& d) {
return readULEBKeyDataLength(d);
@@ -912,20 +951,19 @@ ASTSelectorLookupTrait::ReadKey(const unsigned char* d, unsigned) {
using namespace llvm::support;
SelectorTable &SelTable = Reader.getContext().Selectors;
- unsigned N =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
- IdentifierInfo *FirstII = Reader.getLocalIdentifier(
- F, endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d));
+ unsigned N = endian::readNext<uint16_t, llvm::endianness::little>(d);
+ const IdentifierInfo *FirstII = Reader.getLocalIdentifier(
+ F, endian::readNext<IdentifierID, llvm::endianness::little>(d));
if (N == 0)
return SelTable.getNullarySelector(FirstII);
else if (N == 1)
return SelTable.getUnarySelector(FirstII);
- SmallVector<IdentifierInfo *, 16> Args;
+ SmallVector<const IdentifierInfo *, 16> Args;
Args.push_back(FirstII);
for (unsigned I = 1; I != N; ++I)
Args.push_back(Reader.getLocalIdentifier(
- F, endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d)));
+ F, endian::readNext<IdentifierID, llvm::endianness::little>(d)));
return SelTable.getSelector(N, Args.data());
}
@@ -938,11 +976,11 @@ ASTSelectorLookupTrait::ReadData(Selector, const unsigned char* d,
data_type Result;
Result.ID = Reader.getGlobalSelectorID(
- F, endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d));
+ F, endian::readNext<uint32_t, llvm::endianness::little>(d));
unsigned FullInstanceBits =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
+ endian::readNext<uint16_t, llvm::endianness::little>(d);
unsigned FullFactoryBits =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
+ endian::readNext<uint16_t, llvm::endianness::little>(d);
Result.InstanceBits = FullInstanceBits & 0x3;
Result.InstanceHasMoreThanOneDecl = (FullInstanceBits >> 2) & 0x1;
Result.FactoryBits = FullFactoryBits & 0x3;
@@ -953,16 +991,18 @@ ASTSelectorLookupTrait::ReadData(Selector, const unsigned char* d,
// Load instance methods
for (unsigned I = 0; I != NumInstanceMethods; ++I) {
if (ObjCMethodDecl *Method = Reader.GetLocalDeclAs<ObjCMethodDecl>(
- F,
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d)))
+ F, LocalDeclID::get(
+ Reader, F,
+ endian::readNext<DeclID, llvm::endianness::little>(d))))
Result.Instance.push_back(Method);
}
// Load factory methods
for (unsigned I = 0; I != NumFactoryMethods; ++I) {
if (ObjCMethodDecl *Method = Reader.GetLocalDeclAs<ObjCMethodDecl>(
- F,
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d)))
+ F, LocalDeclID::get(
+ Reader, F,
+ endian::readNext<DeclID, llvm::endianness::little>(d))))
Result.Factory.push_back(Method);
}
@@ -985,11 +1025,14 @@ ASTIdentifierLookupTraitBase::ReadKey(const unsigned char* d, unsigned n) {
}
/// Whether the given identifier is "interesting".
-static bool isInterestingIdentifier(ASTReader &Reader, IdentifierInfo &II,
+static bool isInterestingIdentifier(ASTReader &Reader, const IdentifierInfo &II,
bool IsModule) {
+ bool IsInteresting =
+ II.getNotableIdentifierID() != tok::NotableIdentifierKind::not_notable ||
+ II.getBuiltinID() != Builtin::ID::NotBuiltin ||
+ II.getObjCKeywordID() != tok::ObjCKeywordKind::objc_not_keyword;
return II.hadMacroDefinition() || II.isPoisoned() ||
- (!IsModule && II.getObjCOrBuiltinID()) ||
- II.hasRevertedTokenIDToIdentifier() ||
+ (!IsModule && IsInteresting) || II.hasRevertedTokenIDToIdentifier() ||
(!(IsModule && Reader.getPreprocessor().getLangOpts().CPlusPlus) &&
II.getFETokenInfo());
}
@@ -1000,11 +1043,11 @@ static bool readBit(unsigned &Bits) {
return Value;
}
-IdentID ASTIdentifierLookupTrait::ReadIdentifierID(const unsigned char *d) {
+IdentifierID ASTIdentifierLookupTrait::ReadIdentifierID(const unsigned char *d) {
using namespace llvm::support;
- unsigned RawID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
+ IdentifierID RawID =
+ endian::readNext<IdentifierID, llvm::endianness::little>(d);
return Reader.getGlobalIdentifierID(F, RawID >> 1);
}
@@ -1022,10 +1065,12 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
unsigned DataLen) {
using namespace llvm::support;
- unsigned RawID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
+ IdentifierID RawID =
+ endian::readNext<IdentifierID, llvm::endianness::little>(d);
bool IsInteresting = RawID & 0x01;
+ DataLen -= sizeof(IdentifierID);
+
// Wipe out the "is interesting" bit.
RawID = RawID >> 1;
@@ -1038,7 +1083,7 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
markIdentifierFromAST(Reader, *II);
Reader.markIdentifierUpToDate(II);
- IdentID ID = Reader.getGlobalIdentifierID(F, RawID);
+ IdentifierID ID = Reader.getGlobalIdentifierID(F, RawID);
if (!IsInteresting) {
// For uninteresting identifiers, there's nothing else to do. Just notify
// the reader that we've finished loading this identifier.
@@ -1047,9 +1092,8 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
}
unsigned ObjCOrBuiltinID =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
- unsigned Bits =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
+ endian::readNext<uint16_t, llvm::endianness::little>(d);
+ unsigned Bits = endian::readNext<uint16_t, llvm::endianness::little>(d);
bool CPlusPlusOperatorKeyword = readBit(Bits);
bool HasRevertedTokenIDToIdentifier = readBit(Bits);
bool Poisoned = readBit(Bits);
@@ -1057,7 +1101,7 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
bool HadMacroDefinition = readBit(Bits);
assert(Bits == 0 && "Extra bits in the identifier?");
- DataLen -= 8;
+ DataLen -= sizeof(uint16_t) * 2;
// Set or check the various bits in the IdentifierInfo structure.
// Token IDs are read-only.
@@ -1078,7 +1122,7 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
// definition.
if (HadMacroDefinition) {
uint32_t MacroDirectivesOffset =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
+ endian::readNext<uint32_t, llvm::endianness::little>(d);
DataLen -= 4;
Reader.addPendingMacro(II, &F, MacroDirectivesOffset);
@@ -1089,11 +1133,12 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
// Read all of the declarations visible at global scope with this
// name.
if (DataLen > 0) {
- SmallVector<uint32_t, 4> DeclIDs;
- for (; DataLen > 0; DataLen -= 4)
+ SmallVector<GlobalDeclID, 4> DeclIDs;
+ for (; DataLen > 0; DataLen -= sizeof(DeclID))
DeclIDs.push_back(Reader.getGlobalDeclID(
- F,
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d)));
+ F, LocalDeclID::get(
+ Reader, F,
+ endian::readNext<DeclID, llvm::endianness::little>(d))));
Reader.SetGloballyVisibleDecls(II, DeclIDs);
}
@@ -1155,7 +1200,7 @@ unsigned DeclarationNameKey::getHash() const {
break;
}
- return ID.ComputeHash();
+ return ID.computeStableHash();
}
ModuleFile *
@@ -1163,7 +1208,7 @@ ASTDeclContextNameLookupTrait::ReadFileRef(const unsigned char *&d) {
using namespace llvm::support;
uint32_t ModuleFileID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
+ endian::readNext<uint32_t, llvm::endianness::little>(d);
return Reader.getLocalModuleFile(F, ModuleFileID);
}
@@ -1183,18 +1228,15 @@ ASTDeclContextNameLookupTrait::ReadKey(const unsigned char *d, unsigned) {
case DeclarationName::CXXLiteralOperatorName:
case DeclarationName::CXXDeductionGuideName:
Data = (uint64_t)Reader.getLocalIdentifier(
- F, endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d));
+ F, endian::readNext<IdentifierID, llvm::endianness::little>(d));
break;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
- Data =
- (uint64_t)Reader
- .getLocalSelector(
- F,
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
- d))
- .getAsOpaquePtr();
+ Data = (uint64_t)Reader
+ .getLocalSelector(
+ F, endian::readNext<uint32_t, llvm::endianness::little>(d))
+ .getAsOpaquePtr();
break;
case DeclarationName::CXXOperatorName:
Data = *d++; // OverloadedOperatorKind
@@ -1216,10 +1258,10 @@ void ASTDeclContextNameLookupTrait::ReadDataInto(internal_key_type,
data_type_builder &Val) {
using namespace llvm::support;
- for (unsigned NumDecls = DataLen / 4; NumDecls; --NumDecls) {
- uint32_t LocalID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
- Val.insert(Reader.getGlobalDeclID(F, LocalID));
+ for (unsigned NumDecls = DataLen / sizeof(DeclID); NumDecls; --NumDecls) {
+ LocalDeclID ID = LocalDeclID::get(
+ Reader, F, endian::readNext<DeclID, llvm::endianness::little>(d));
+ Val.insert(Reader.getGlobalDeclID(F, ID));
}
}
@@ -1265,9 +1307,8 @@ bool ASTReader::ReadLexicalDeclContextStorage(ModuleFile &M,
if (!Lex.first) {
Lex = std::make_pair(
&M, llvm::ArrayRef(
- reinterpret_cast<const llvm::support::unaligned_uint32_t *>(
- Blob.data()),
- Blob.size() / 4));
+ reinterpret_cast<const unaligned_decl_id_t *>(Blob.data()),
+ Blob.size() / sizeof(DeclID)));
}
DC->setHasExternalLexicalStorage(true);
return false;
@@ -1276,7 +1317,7 @@ bool ASTReader::ReadLexicalDeclContextStorage(ModuleFile &M,
bool ASTReader::ReadVisibleDeclContextStorage(ModuleFile &M,
BitstreamCursor &Cursor,
uint64_t Offset,
- DeclID ID) {
+ GlobalDeclID ID) {
assert(Offset != 0);
SavedStreamPosition SavedPosition(Cursor);
@@ -1651,15 +1692,14 @@ bool ASTReader::ReadSLocEntry(int ID) {
FileCharacter = (SrcMgr::CharacteristicKind)Record[2];
FileID FID = SourceMgr.createFileID(*File, IncludeLoc, FileCharacter, ID,
BaseOffset + Record[0]);
- SrcMgr::FileInfo &FileInfo =
- const_cast<SrcMgr::FileInfo&>(SourceMgr.getSLocEntry(FID).getFile());
+ SrcMgr::FileInfo &FileInfo = SourceMgr.getSLocEntry(FID).getFile();
FileInfo.NumCreatedFIDs = Record[5];
if (Record[3])
FileInfo.setHasLineDirectives();
unsigned NumFileDecls = Record[7];
if (NumFileDecls && ContextObj) {
- const DeclID *FirstDecl = F->FileSortedDecls + Record[6];
+ const unaligned_decl_id_t *FirstDecl = F->FileSortedDecls + Record[6];
assert(F->FileSortedDecls && "FILE_SORTED_DECLS not encountered yet ?");
FileDeclIDs[FID] =
FileDeclsInfo(F, llvm::ArrayRef(FirstDecl, NumFileDecls));
@@ -1695,8 +1735,7 @@ bool ASTReader::ReadSLocEntry(int ID) {
FileID FID = SourceMgr.createFileID(std::move(Buffer), FileCharacter, ID,
BaseOffset + Offset, IncludeLoc);
if (Record[3]) {
- auto &FileInfo =
- const_cast<SrcMgr::FileInfo &>(SourceMgr.getSLocEntry(FID).getFile());
+ auto &FileInfo = SourceMgr.getSLocEntry(FID).getFile();
FileInfo.setHasLineDirectives();
}
break;
@@ -1993,7 +2032,10 @@ const FileEntry *HeaderFileInfoTrait::getFile(const internal_key_type &Key) {
}
unsigned HeaderFileInfoTrait::ComputeHash(internal_key_ref ikey) {
- return llvm::hash_combine(ikey.Size, ikey.ModTime);
+ uint8_t buf[sizeof(ikey.Size) + sizeof(ikey.ModTime)];
+ memcpy(buf, &ikey.Size, sizeof(ikey.Size));
+ memcpy(buf + sizeof(ikey.Size), &ikey.ModTime, sizeof(ikey.ModTime));
+ return llvm::xxh3_64bits(buf);
}
HeaderFileInfoTrait::internal_key_type
@@ -2027,10 +2069,9 @@ HeaderFileInfoTrait::ReadKey(const unsigned char *d, unsigned) {
using namespace llvm::support;
internal_key_type ikey;
- ikey.Size =
- off_t(endian::readNext<uint64_t, llvm::endianness::little, unaligned>(d));
- ikey.ModTime = time_t(
- endian::readNext<uint64_t, llvm::endianness::little, unaligned>(d));
+ ikey.Size = off_t(endian::readNext<uint64_t, llvm::endianness::little>(d));
+ ikey.ModTime =
+ time_t(endian::readNext<uint64_t, llvm::endianness::little>(d));
ikey.Filename = (const char *)d;
ikey.Imported = true;
return ikey;
@@ -2057,10 +2098,10 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
HFI.isPragmaOnce |= (Flags >> 4) & 0x01;
HFI.DirInfo = (Flags >> 1) & 0x07;
HFI.IndexHeaderMapHeader = Flags & 0x01;
- HFI.ControllingMacroID = Reader.getGlobalIdentifierID(
- M, endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d));
+ HFI.LazyControllingMacro = Reader.getGlobalIdentifierID(
+ M, endian::readNext<IdentifierID, llvm::endianness::little>(d));
if (unsigned FrameworkOffset =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d)) {
+ endian::readNext<uint32_t, llvm::endianness::little>(d)) {
// The framework offset is 1 greater than the actual offset,
// since 0 is used as an indicator for "no framework name".
StringRef FrameworkName(FrameworkStrings + FrameworkOffset - 1);
@@ -2071,7 +2112,7 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
"Wrong data length in HeaderFileInfo deserialization");
while (d != End) {
uint32_t LocalSMID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
+ endian::readNext<uint32_t, llvm::endianness::little>(d);
auto HeaderRole = static_cast<ModuleMap::ModuleHeaderRole>(LocalSMID & 7);
LocalSMID >>= 3;
@@ -2091,7 +2132,7 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
Module::Header H = {std::string(key.Filename), "", *FE};
ModMap.addHeader(Mod, H, HeaderRole, /*Imported=*/true);
}
- HFI.isModuleHeader |= ModuleMap::isModular(HeaderRole);
+ HFI.mergeModuleMembership(HeaderRole);
}
// This HeaderFileInfo was externally loaded.
@@ -2224,7 +2265,7 @@ namespace {
} // namespace
-void ASTReader::updateOutOfDateIdentifier(IdentifierInfo &II) {
+void ASTReader::updateOutOfDateIdentifier(const IdentifierInfo &II) {
// Note that we are loading an identifier.
Deserializing AnIdentifier(this);
@@ -2249,11 +2290,11 @@ void ASTReader::updateOutOfDateIdentifier(IdentifierInfo &II) {
markIdentifierUpToDate(&II);
}
-void ASTReader::markIdentifierUpToDate(IdentifierInfo *II) {
+void ASTReader::markIdentifierUpToDate(const IdentifierInfo *II) {
if (!II)
return;
- II->setOutOfDate(false);
+ const_cast<IdentifierInfo *>(II)->setOutOfDate(false);
// Update the generation for this identifier.
if (getContext().getLangOpts().Modules)
@@ -2587,7 +2628,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
"We should only check the content of the inputs with "
"ValidateASTInputFilesContent enabled.");
- if (StoredContentHash == static_cast<uint64_t>(llvm::hash_code(-1)))
+ if (StoredContentHash == 0)
return OriginalChange;
auto MemBuffOrError = FileMgr.getBufferForFile(*File);
@@ -2601,8 +2642,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
return OriginalChange;
}
- // FIXME: hash_value is not guaranteed to be stable!
- auto ContentHash = hash_value(MemBuffOrError.get()->getBuffer());
+ auto ContentHash = xxh3_64bits(MemBuffOrError.get()->getBuffer());
if (StoredContentHash == static_cast<uint64_t>(ContentHash))
return Change{Change::None};
@@ -2635,6 +2675,14 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
F.StandardCXXModule && FileChange.Kind == Change::None)
FileChange = HasInputContentChanged(FileChange);
+ // When we have StoredTime equal to zero and ValidateASTInputFilesContent,
+ // it is better to check the content of the input files because we cannot rely
+ // on the file modification time, which will be the same (zero) for these
+ // files.
+ if (!StoredTime && ValidateASTInputFilesContent &&
+ FileChange.Kind == Change::None)
+ FileChange = HasInputContentChanged(FileChange);
+
// For an overridden file, there is nothing to validate.
if (!Overridden && FileChange.Kind != Change::None) {
if (Complain && !Diags.isDiagnosticInFlight()) {
@@ -3035,8 +3083,10 @@ ASTReader::ReadControlBlock(ModuleFile &F,
// The import location will be the local one for now; we will adjust
// all import locations of module imports after the global source
// location info are setup, in ReadAST.
- SourceLocation ImportLoc =
+ auto [ImportLoc, ImportModuleFileIndex] =
ReadUntranslatedSourceLocation(Record[Idx++]);
+ // The import location must belong to the current module file itself.
+ assert(ImportModuleFileIndex == 0);
off_t StoredSize = !IsImportingStdCXXModule ? (off_t)Record[Idx++] : 0;
time_t StoredModTime =
!IsImportingStdCXXModule ? (time_t)Record[Idx++] : 0;
@@ -3344,22 +3394,13 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
return llvm::createStringError(
std::errc::illegal_byte_sequence,
"duplicate TYPE_OFFSET record in AST file");
- F.TypeOffsets = reinterpret_cast<const UnderalignedInt64 *>(Blob.data());
+ F.TypeOffsets = reinterpret_cast<const UnalignedUInt64 *>(Blob.data());
F.LocalNumTypes = Record[0];
- unsigned LocalBaseTypeIndex = Record[1];
F.BaseTypeIndex = getTotalNumTypes();
- if (F.LocalNumTypes > 0) {
- // Introduce the global -> local mapping for types within this module.
- GlobalTypeMap.insert(std::make_pair(getTotalNumTypes(), &F));
-
- // Introduce the local -> global mapping for types within this module.
- F.TypeRemap.insertOrReplace(
- std::make_pair(LocalBaseTypeIndex,
- F.BaseTypeIndex - LocalBaseTypeIndex));
-
+ if (F.LocalNumTypes > 0)
TypesLoaded.resize(TypesLoaded.size() + F.LocalNumTypes);
- }
+
break;
}
@@ -3370,35 +3411,19 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
"duplicate DECL_OFFSET record in AST file");
F.DeclOffsets = (const DeclOffset *)Blob.data();
F.LocalNumDecls = Record[0];
- unsigned LocalBaseDeclID = Record[1];
- F.BaseDeclID = getTotalNumDecls();
-
- if (F.LocalNumDecls > 0) {
- // Introduce the global -> local mapping for declarations within this
- // module.
- GlobalDeclMap.insert(
- std::make_pair(getTotalNumDecls() + NUM_PREDEF_DECL_IDS, &F));
-
- // Introduce the local -> global mapping for declarations within this
- // module.
- F.DeclRemap.insertOrReplace(
- std::make_pair(LocalBaseDeclID, F.BaseDeclID - LocalBaseDeclID));
-
- // Introduce the global -> local mapping for declarations within this
- // module.
- F.GlobalToLocalDeclIDs[&F] = LocalBaseDeclID;
+ F.BaseDeclIndex = getTotalNumDecls();
+ if (F.LocalNumDecls > 0)
DeclsLoaded.resize(DeclsLoaded.size() + F.LocalNumDecls);
- }
+
break;
}
case TU_UPDATE_LEXICAL: {
DeclContext *TU = ContextObj->getTranslationUnitDecl();
LexicalContents Contents(
- reinterpret_cast<const llvm::support::unaligned_uint32_t *>(
- Blob.data()),
- static_cast<unsigned int>(Blob.size() / 4));
+ reinterpret_cast<const unaligned_decl_id_t *>(Blob.data()),
+ static_cast<unsigned int>(Blob.size() / sizeof(DeclID)));
TULexicalDecls.push_back(std::make_pair(&F, Contents));
TU->setHasExternalLexicalStorage(true);
break;
@@ -3406,7 +3431,7 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
case UPDATE_VISIBLE: {
unsigned Idx = 0;
- serialization::DeclID ID = ReadDeclID(F, Record, Idx);
+ GlobalDeclID ID = ReadDeclID(F, Record, Idx);
auto *Data = (const unsigned char*)Blob.data();
PendingVisibleUpdates[ID].push_back(PendingVisibleUpdate{&F, Data});
// If we've already loaded the decl, perform the updates when we finish
@@ -3438,24 +3463,11 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
"duplicate IDENTIFIER_OFFSET record in AST file");
F.IdentifierOffsets = (const uint32_t *)Blob.data();
F.LocalNumIdentifiers = Record[0];
- unsigned LocalBaseIdentifierID = Record[1];
F.BaseIdentifierID = getTotalNumIdentifiers();
- if (F.LocalNumIdentifiers > 0) {
- // Introduce the global -> local mapping for identifiers within this
- // module.
- GlobalIdentifierMap.insert(std::make_pair(getTotalNumIdentifiers() + 1,
- &F));
-
- // Introduce the local -> global mapping for identifiers within this
- // module.
- F.IdentifierRemap.insertOrReplace(
- std::make_pair(LocalBaseIdentifierID,
- F.BaseIdentifierID - LocalBaseIdentifierID));
-
+ if (F.LocalNumIdentifiers > 0)
IdentifiersLoaded.resize(IdentifiersLoaded.size()
+ F.LocalNumIdentifiers);
- }
break;
}
@@ -3466,8 +3478,8 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
case EAGERLY_DESERIALIZED_DECLS:
// FIXME: Skip reading this record if our ASTConsumer doesn't care
// about "interesting" decls (for instance, if we're building a module).
- for (unsigned I = 0, N = Record.size(); I != N; ++I)
- EagerlyDeserializedDecls.push_back(getGlobalDeclID(F, Record[I]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/)
+ EagerlyDeserializedDecls.push_back(ReadDeclID(F, Record, I));
break;
case MODULAR_CODEGEN_DECLS:
@@ -3475,8 +3487,8 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
// them (ie: if we're not codegenerating this module).
if (F.Kind == MK_MainFile ||
getContext().getLangOpts().BuildingPCHWithObjectFile)
- for (unsigned I = 0, N = Record.size(); I != N; ++I)
- EagerlyDeserializedDecls.push_back(getGlobalDeclID(F, Record[I]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/)
+ EagerlyDeserializedDecls.push_back(ReadDeclID(F, Record, I));
break;
case SPECIAL_TYPES:
@@ -3507,13 +3519,13 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
break;
case UNUSED_FILESCOPED_DECLS:
- for (unsigned I = 0, N = Record.size(); I != N; ++I)
- UnusedFileScopedDecls.push_back(getGlobalDeclID(F, Record[I]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/)
+ UnusedFileScopedDecls.push_back(ReadDeclID(F, Record, I));
break;
case DELEGATING_CTORS:
- for (unsigned I = 0, N = Record.size(); I != N; ++I)
- DelegatingCtorDecls.push_back(getGlobalDeclID(F, Record[I]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/)
+ DelegatingCtorDecls.push_back(ReadDeclID(F, Record, I));
break;
case WEAK_UNDECLARED_IDENTIFIERS:
@@ -3588,6 +3600,17 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
break;
}
+ case PP_UNSAFE_BUFFER_USAGE: {
+ if (!Record.empty()) {
+ SmallVector<SourceLocation, 64> SrcLocs;
+ unsigned Idx = 0;
+ while (Idx < Record.size())
+ SrcLocs.push_back(ReadSourceLocation(F, Record, Idx));
+ PP.setDeserializedSafeBufferOptOutMap(SrcLocs);
+ }
+ break;
+ }
+
case PP_CONDITIONAL_STACK:
if (!Record.empty()) {
unsigned Idx = 0, End = Record.size() - 1;
@@ -3621,7 +3644,7 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
break;
case FILE_SORTED_DECLS:
- F.FileSortedDecls = (const DeclID *)Blob.data();
+ F.FileSortedDecls = (const unaligned_decl_id_t *)Blob.data();
F.NumFileSortedDecls = Record[0];
break;
@@ -3655,13 +3678,6 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
std::make_pair(SourceManager::MaxLoadedOffset - F.SLocEntryBaseOffset
- SLocSpaceSize,&F));
- // Initialize the remapping table.
- // Invalid stays invalid.
- F.SLocRemap.insertOrReplace(std::make_pair(0U, 0));
- // This module. Base was 2 when being compiled.
- F.SLocRemap.insertOrReplace(std::make_pair(
- 2U, static_cast<SourceLocation::IntTy>(F.SLocEntryBaseOffset - 2)));
-
TotalNumSLocEntries += F.LocalNumSLocEntries;
break;
}
@@ -3675,8 +3691,8 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
break;
case EXT_VECTOR_DECLS:
- for (unsigned I = 0, N = Record.size(); I != N; ++I)
- ExtVectorDecls.push_back(getGlobalDeclID(F, Record[I]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/)
+ ExtVectorDecls.push_back(ReadDeclID(F, Record, I));
break;
case VTABLE_USES:
@@ -3690,18 +3706,14 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
VTableUses.clear();
for (unsigned Idx = 0, N = Record.size(); Idx != N; /* In loop */) {
- VTableUses.push_back(getGlobalDeclID(F, Record[Idx++]));
VTableUses.push_back(
- ReadSourceLocation(F, Record, Idx).getRawEncoding());
- VTableUses.push_back(Record[Idx++]);
+ {ReadDeclID(F, Record, Idx),
+ ReadSourceLocation(F, Record, Idx).getRawEncoding(),
+ (bool)Record[Idx++]});
}
break;
case PENDING_IMPLICIT_INSTANTIATIONS:
- if (PendingInstantiations.size() % 2 != 0)
- return llvm::createStringError(
- std::errc::illegal_byte_sequence,
- "Invalid existing PendingInstantiations");
if (Record.size() % 2 != 0)
return llvm::createStringError(
@@ -3709,9 +3721,9 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
"Invalid PENDING_IMPLICIT_INSTANTIATIONS block");
for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) {
- PendingInstantiations.push_back(getGlobalDeclID(F, Record[I++]));
PendingInstantiations.push_back(
- ReadSourceLocation(F, Record, I).getRawEncoding());
+ {ReadDeclID(F, Record, I),
+ ReadSourceLocation(F, Record, I).getRawEncoding()});
}
break;
@@ -3719,8 +3731,8 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
if (Record.size() != 3)
return llvm::createStringError(std::errc::illegal_byte_sequence,
"Invalid SEMA_DECL_REFS block");
- for (unsigned I = 0, N = Record.size(); I != N; ++I)
- SemaDeclRefs.push_back(getGlobalDeclID(F, Record[I]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/)
+ SemaDeclRefs.push_back(ReadDeclID(F, Record, I));
break;
case PPD_ENTITIES_OFFSETS: {
@@ -3778,9 +3790,9 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
return llvm::createStringError(
std::errc::illegal_byte_sequence,
"invalid DECL_UPDATE_OFFSETS block in AST file");
- for (unsigned I = 0, N = Record.size(); I != N; I += 2) {
- GlobalDeclID ID = getGlobalDeclID(F, Record[I]);
- DeclUpdateOffsets[ID].push_back(std::make_pair(&F, Record[I + 1]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/) {
+ GlobalDeclID ID = ReadDeclID(F, Record, I);
+ DeclUpdateOffsets[ID].push_back(std::make_pair(&F, Record[I++]));
// If we've already loaded the decl, perform the updates when we finish
// loading this block.
@@ -3790,6 +3802,33 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
}
break;
+ case DELAYED_NAMESPACE_LEXICAL_VISIBLE_RECORD: {
+ if (Record.size() % 3 != 0)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "invalid DELAYED_NAMESPACE_LEXICAL_VISIBLE_RECORD block in AST "
+ "file");
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/) {
+ GlobalDeclID ID = ReadDeclID(F, Record, I);
+
+ uint64_t BaseOffset = F.DeclsBlockStartOffset;
+ assert(BaseOffset && "Invalid DeclsBlockStartOffset for module file!");
+ uint64_t LocalLexicalOffset = Record[I++];
+ uint64_t LexicalOffset =
+ LocalLexicalOffset ? BaseOffset + LocalLexicalOffset : 0;
+ uint64_t LocalVisibleOffset = Record[I++];
+ uint64_t VisibleOffset =
+ LocalVisibleOffset ? BaseOffset + LocalVisibleOffset : 0;
+
+ DelayedNamespaceOffsetMap[ID] = {LexicalOffset, VisibleOffset};
+
+ assert(!GetExistingDecl(ID) &&
+ "We shouldn't load the namespace in the front of delayed "
+ "namespace lexical and visible block");
+ }
+ break;
+ }
+
case OBJC_CATEGORIES_MAP:
if (F.LocalNumObjCCategoriesInMap != 0)
return llvm::createStringError(
@@ -3808,8 +3847,8 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
// Later tables overwrite earlier ones.
// FIXME: Modules will have trouble with this.
CUDASpecialDeclRefs.clear();
- for (unsigned I = 0, N = Record.size(); I != N; ++I)
- CUDASpecialDeclRefs.push_back(getGlobalDeclID(F, Record[I]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/)
+ CUDASpecialDeclRefs.push_back(ReadDeclID(F, Record, I));
break;
case HEADER_SEARCH_TABLE:
@@ -3849,33 +3888,29 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
break;
case TENTATIVE_DEFINITIONS:
- for (unsigned I = 0, N = Record.size(); I != N; ++I)
- TentativeDefinitions.push_back(getGlobalDeclID(F, Record[I]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/)
+ TentativeDefinitions.push_back(ReadDeclID(F, Record, I));
break;
case KNOWN_NAMESPACES:
- for (unsigned I = 0, N = Record.size(); I != N; ++I)
- KnownNamespaces.push_back(getGlobalDeclID(F, Record[I]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/)
+ KnownNamespaces.push_back(ReadDeclID(F, Record, I));
break;
case UNDEFINED_BUT_USED:
- if (UndefinedButUsed.size() % 2 != 0)
- return llvm::createStringError(std::errc::illegal_byte_sequence,
- "Invalid existing UndefinedButUsed");
-
if (Record.size() % 2 != 0)
return llvm::createStringError(std::errc::illegal_byte_sequence,
"invalid undefined-but-used record");
for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) {
- UndefinedButUsed.push_back(getGlobalDeclID(F, Record[I++]));
UndefinedButUsed.push_back(
- ReadSourceLocation(F, Record, I).getRawEncoding());
+ {ReadDeclID(F, Record, I),
+ ReadSourceLocation(F, Record, I).getRawEncoding()});
}
break;
case DELETE_EXPRS_TO_ANALYZE:
for (unsigned I = 0, N = Record.size(); I != N;) {
- DelayedDeleteExprs.push_back(getGlobalDeclID(F, Record[I++]));
+ DelayedDeleteExprs.push_back(ReadDeclID(F, Record, I).getRawValue());
const uint64_t Count = Record[I++];
DelayedDeleteExprs.push_back(Count);
for (uint64_t C = 0; C < Count; ++C) {
@@ -3886,6 +3921,13 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
}
break;
+ case VTABLES_TO_EMIT:
+ if (F.Kind == MK_MainFile ||
+ getContext().getLangOpts().BuildingPCHWithObjectFile)
+ for (unsigned I = 0, N = Record.size(); I != N;)
+ VTablesToEmit.push_back(ReadDeclID(F, Record, I));
+ break;
+
case IMPORTED_MODULES:
if (!F.isModule()) {
// If we aren't loading a module (which has its own exports), make
@@ -3958,16 +4000,15 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
break;
case UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES:
- for (unsigned I = 0, N = Record.size(); I != N; ++I)
- UnusedLocalTypedefNameCandidates.push_back(
- getGlobalDeclID(F, Record[I]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/)
+ UnusedLocalTypedefNameCandidates.push_back(ReadDeclID(F, Record, I));
break;
case CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH:
if (Record.size() != 1)
return llvm::createStringError(std::errc::illegal_byte_sequence,
"invalid cuda pragma options record");
- ForceCUDAHostDeviceDepth = Record[0];
+ ForceHostDeviceDepth = Record[0];
break;
case ALIGN_PACK_PRAGMA_OPTIONS: {
@@ -4015,8 +4056,8 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
}
case DECLS_TO_CHECK_FOR_DEFERRED_DIAGS:
- for (unsigned I = 0, N = Record.size(); I != N; ++I)
- DeclsToCheckForDeferredDiags.insert(getGlobalDeclID(F, Record[I]));
+ for (unsigned I = 0, N = Record.size(); I != N; /*in loop*/)
+ DeclsToCheckForDeferredDiags.insert(ReadDeclID(F, Record, I));
break;
}
}
@@ -4030,25 +4071,14 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
const unsigned char *DataEnd = Data + F.ModuleOffsetMap.size();
F.ModuleOffsetMap = StringRef();
- // If we see this entry before SOURCE_LOCATION_OFFSETS, add placeholders.
- if (F.SLocRemap.find(0) == F.SLocRemap.end()) {
- F.SLocRemap.insert(std::make_pair(0U, 0));
- F.SLocRemap.insert(std::make_pair(2U, 1));
- }
-
- // Continuous range maps we may be updating in our module.
- using SLocRemapBuilder =
- ContinuousRangeMap<SourceLocation::UIntTy, SourceLocation::IntTy,
- 2>::Builder;
using RemapBuilder = ContinuousRangeMap<uint32_t, int, 2>::Builder;
- SLocRemapBuilder SLocRemap(F.SLocRemap);
- RemapBuilder IdentifierRemap(F.IdentifierRemap);
RemapBuilder MacroRemap(F.MacroRemap);
RemapBuilder PreprocessedEntityRemap(F.PreprocessedEntityRemap);
RemapBuilder SubmoduleRemap(F.SubmoduleRemap);
RemapBuilder SelectorRemap(F.SelectorRemap);
- RemapBuilder DeclRemap(F.DeclRemap);
- RemapBuilder TypeRemap(F.TypeRemap);
+
+ auto &ImportedModuleVector = F.TransitiveImports;
+ assert(ImportedModuleVector.empty());
while (Data < DataEnd) {
// FIXME: Looking up dependency modules by filename is horrible. Let's
@@ -4056,9 +4086,8 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
// how it goes...
using namespace llvm::support;
ModuleKind Kind = static_cast<ModuleKind>(
- endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data));
- uint16_t Len =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint8_t, llvm::endianness::little>(Data));
+ uint16_t Len = endian::readNext<uint16_t, llvm::endianness::little>(Data);
StringRef Name = StringRef((const char*)Data, Len);
Data += Len;
ModuleFile *OM = (Kind == MK_PrebuiltModule || Kind == MK_ExplicitModule ||
@@ -4066,29 +4095,22 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
? ModuleMgr.lookupByModuleName(Name)
: ModuleMgr.lookupByFileName(Name));
if (!OM) {
- std::string Msg =
- "SourceLocation remap refers to unknown module, cannot find ";
+ std::string Msg = "refers to unknown module, cannot find ";
Msg.append(std::string(Name));
Error(Msg);
return;
}
- SourceLocation::UIntTy SLocOffset =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- uint32_t IdentifierIDOffset =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ ImportedModuleVector.push_back(OM);
+
uint32_t MacroIDOffset =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little>(Data);
uint32_t PreprocessedEntityIDOffset =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little>(Data);
uint32_t SubmoduleIDOffset =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little>(Data);
uint32_t SelectorIDOffset =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- uint32_t DeclIDOffset =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
- uint32_t TypeIndexOffset =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little>(Data);
auto mapOffset = [&](uint32_t Offset, uint32_t BaseOffset,
RemapBuilder &Remap) {
@@ -4098,24 +4120,11 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
static_cast<int>(BaseOffset - Offset)));
};
- constexpr SourceLocation::UIntTy SLocNone =
- std::numeric_limits<SourceLocation::UIntTy>::max();
- if (SLocOffset != SLocNone)
- SLocRemap.insert(std::make_pair(
- SLocOffset, static_cast<SourceLocation::IntTy>(
- OM->SLocEntryBaseOffset - SLocOffset)));
-
- mapOffset(IdentifierIDOffset, OM->BaseIdentifierID, IdentifierRemap);
mapOffset(MacroIDOffset, OM->BaseMacroID, MacroRemap);
mapOffset(PreprocessedEntityIDOffset, OM->BasePreprocessedEntityID,
PreprocessedEntityRemap);
mapOffset(SubmoduleIDOffset, OM->BaseSubmoduleID, SubmoduleRemap);
mapOffset(SelectorIDOffset, OM->BaseSelectorID, SelectorRemap);
- mapOffset(DeclIDOffset, OM->BaseDeclID, DeclRemap);
- mapOffset(TypeIndexOffset, OM->BaseTypeIndex, TypeRemap);
-
- // Global -> local mappings.
- F.GlobalToLocalDeclIDs[OM] = DeclIDOffset;
}
}
@@ -4229,9 +4238,9 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
/// Move the given method to the back of the global list of methods.
static void moveMethodToBackOfGlobalList(Sema &S, ObjCMethodDecl *Method) {
// Find the entry for this selector in the method pool.
- Sema::GlobalMethodPool::iterator Known
- = S.MethodPool.find(Method->getSelector());
- if (Known == S.MethodPool.end())
+ SemaObjC::GlobalMethodPool::iterator Known =
+ S.ObjC().MethodPool.find(Method->getSelector());
+ if (Known == S.ObjC().MethodPool.end())
return;
// Retrieve the appropriate method list.
@@ -4641,8 +4650,7 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName, ModuleKind Type,
if (ContextObj) {
for (unsigned I = 0, N = ObjCClassesLoaded.size(); I != N; ++I) {
loadObjCCategories(ObjCClassesLoaded[I]->getGlobalID(),
- ObjCClassesLoaded[I],
- PreviousGeneration);
+ ObjCClassesLoaded[I], PreviousGeneration);
}
}
@@ -4977,7 +4985,7 @@ ASTReader::ASTReadResult ASTReader::readUnhashedControlBlockImpl(
}
case HEADER_SEARCH_PATHS: {
bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
- if (!AllowCompatibleConfigurationMismatch &&
+ if (Listener && !AllowCompatibleConfigurationMismatch &&
ParseHeaderSearchPaths(Record, Complain, *Listener))
Result = ConfigurationMismatch;
break;
@@ -4992,15 +5000,12 @@ ASTReader::ASTReadResult ASTReader::readUnhashedControlBlockImpl(
Record.begin(), Record.end());
break;
case HEADER_SEARCH_ENTRY_USAGE:
- if (!F)
- break;
- unsigned Count = Record[0];
- const char *Byte = Blob.data();
- F->SearchPathUsage = llvm::BitVector(Count, false);
- for (unsigned I = 0; I < Count; ++Byte)
- for (unsigned Bit = 0; Bit < 8 && I < Count; ++Bit, ++I)
- if (*Byte & (1 << Bit))
- F->SearchPathUsage[I] = true;
+ if (F)
+ F->SearchPathUsage = ReadBitVector(Record, Blob);
+ break;
+ case VFS_USAGE:
+ if (F)
+ F->VFSUsage = ReadBitVector(Record, Blob);
break;
}
}
@@ -5089,20 +5094,21 @@ void ASTReader::InitializeContext() {
// If there's a listener, notify them that we "read" the translation unit.
if (DeserializationListener)
- DeserializationListener->DeclRead(PREDEF_DECL_TRANSLATION_UNIT_ID,
- Context.getTranslationUnitDecl());
+ DeserializationListener->DeclRead(
+ GlobalDeclID(PREDEF_DECL_TRANSLATION_UNIT_ID),
+ Context.getTranslationUnitDecl());
// FIXME: Find a better way to deal with collisions between these
// built-in types. Right now, we just ignore the problem.
// Load the special types.
if (SpecialTypes.size() >= NumSpecialTypeIDs) {
- if (unsigned String = SpecialTypes[SPECIAL_TYPE_CF_CONSTANT_STRING]) {
+ if (TypeID String = SpecialTypes[SPECIAL_TYPE_CF_CONSTANT_STRING]) {
if (!Context.CFConstantStringTypeDecl)
Context.setCFConstantStringType(GetType(String));
}
- if (unsigned File = SpecialTypes[SPECIAL_TYPE_FILE]) {
+ if (TypeID File = SpecialTypes[SPECIAL_TYPE_FILE]) {
QualType FileType = GetType(File);
if (FileType.isNull()) {
Error("FILE type is NULL");
@@ -5123,7 +5129,7 @@ void ASTReader::InitializeContext() {
}
}
- if (unsigned Jmp_buf = SpecialTypes[SPECIAL_TYPE_JMP_BUF]) {
+ if (TypeID Jmp_buf = SpecialTypes[SPECIAL_TYPE_JMP_BUF]) {
QualType Jmp_bufType = GetType(Jmp_buf);
if (Jmp_bufType.isNull()) {
Error("jmp_buf type is NULL");
@@ -5144,7 +5150,7 @@ void ASTReader::InitializeContext() {
}
}
- if (unsigned Sigjmp_buf = SpecialTypes[SPECIAL_TYPE_SIGJMP_BUF]) {
+ if (TypeID Sigjmp_buf = SpecialTypes[SPECIAL_TYPE_SIGJMP_BUF]) {
QualType Sigjmp_bufType = GetType(Sigjmp_buf);
if (Sigjmp_bufType.isNull()) {
Error("sigjmp_buf type is NULL");
@@ -5162,25 +5168,24 @@ void ASTReader::InitializeContext() {
}
}
- if (unsigned ObjCIdRedef
- = SpecialTypes[SPECIAL_TYPE_OBJC_ID_REDEFINITION]) {
+ if (TypeID ObjCIdRedef = SpecialTypes[SPECIAL_TYPE_OBJC_ID_REDEFINITION]) {
if (Context.ObjCIdRedefinitionType.isNull())
Context.ObjCIdRedefinitionType = GetType(ObjCIdRedef);
}
- if (unsigned ObjCClassRedef
- = SpecialTypes[SPECIAL_TYPE_OBJC_CLASS_REDEFINITION]) {
+ if (TypeID ObjCClassRedef =
+ SpecialTypes[SPECIAL_TYPE_OBJC_CLASS_REDEFINITION]) {
if (Context.ObjCClassRedefinitionType.isNull())
Context.ObjCClassRedefinitionType = GetType(ObjCClassRedef);
}
- if (unsigned ObjCSelRedef
- = SpecialTypes[SPECIAL_TYPE_OBJC_SEL_REDEFINITION]) {
+ if (TypeID ObjCSelRedef =
+ SpecialTypes[SPECIAL_TYPE_OBJC_SEL_REDEFINITION]) {
if (Context.ObjCSelRedefinitionType.isNull())
Context.ObjCSelRedefinitionType = GetType(ObjCSelRedef);
}
- if (unsigned Ucontext_t = SpecialTypes[SPECIAL_TYPE_UCONTEXT_T]) {
+ if (TypeID Ucontext_t = SpecialTypes[SPECIAL_TYPE_UCONTEXT_T]) {
QualType Ucontext_tType = GetType(Ucontext_t);
if (Ucontext_tType.isNull()) {
Error("ucontext_t type is NULL");
@@ -5376,9 +5381,9 @@ namespace {
bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
StringRef SpecificModuleCachePath,
bool Complain) override {
- return checkHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
- ExistingModuleCachePath, nullptr,
- ExistingLangOpts, ExistingPPOpts);
+ return checkModuleCachePath(
+ FileMgr.getVirtualFileSystem(), SpecificModuleCachePath,
+ ExistingModuleCachePath, nullptr, ExistingLangOpts, ExistingPPOpts);
}
bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
@@ -5398,7 +5403,8 @@ bool ASTReader::readASTFileControlBlock(
StringRef Filename, FileManager &FileMgr,
const InMemoryModuleCache &ModuleCache,
const PCHContainerReader &PCHContainerRdr, bool FindModuleFileExtensions,
- ASTReaderListener &Listener, bool ValidateDiagnosticOptions) {
+ ASTReaderListener &Listener, bool ValidateDiagnosticOptions,
+ unsigned ClientLoadCapabilities) {
// Open the AST file.
std::unique_ptr<llvm::MemoryBuffer> OwnedBuffer;
llvm::MemoryBuffer *Buffer = ModuleCache.lookupPCM(Filename);
@@ -5453,7 +5459,7 @@ bool ASTReader::readASTFileControlBlock(
switch (Entry.ID) {
case OPTIONS_BLOCK_ID: {
std::string IgnoredSuggestedPredefines;
- if (ReadOptionsBlock(Stream, ARR_ConfigurationMismatch | ARR_OutOfDate,
+ if (ReadOptionsBlock(Stream, ClientLoadCapabilities,
/*AllowCompatibleConfigurationMismatch*/ false,
Listener, IgnoredSuggestedPredefines) != Success)
return true;
@@ -5679,7 +5685,7 @@ bool ASTReader::readASTFileControlBlock(
// Scan for the UNHASHED_CONTROL_BLOCK_ID block.
if (readUnhashedControlBlockImpl(
- nullptr, Bytes, ARR_ConfigurationMismatch | ARR_OutOfDate,
+ nullptr, Bytes, ClientLoadCapabilities,
/*AllowCompatibleConfigurationMismatch*/ false, &Listener,
ValidateDiagnosticOptions) != Success)
return true;
@@ -5997,9 +6003,9 @@ llvm::Error ASTReader::ReadSubmoduleBlock(ModuleFile &F,
case SUBMODULE_INITIALIZERS: {
if (!ContextObj)
break;
- SmallVector<uint32_t, 16> Inits;
- for (auto &ID : Record)
- Inits.push_back(getGlobalDeclID(F, ID));
+ SmallVector<GlobalDeclID, 16> Inits;
+ for (unsigned I = 0; I < Record.size(); /*in loop*/)
+ Inits.push_back(ReadDeclID(F, Record, I));
ContextObj->addLazyModuleInitializers(CurrentModule, Inits);
break;
}
@@ -6242,8 +6248,8 @@ SourceRange ASTReader::ReadSkippedRange(unsigned GlobalIndex) {
unsigned LocalIndex = GlobalIndex - M->BasePreprocessedSkippedRangeID;
assert(LocalIndex < M->NumPreprocessedSkippedRanges);
PPSkippedRange RawRange = M->PreprocessedSkippedRangeOffsets[LocalIndex];
- SourceRange Range(TranslateSourceLocation(*M, RawRange.getBegin()),
- TranslateSourceLocation(*M, RawRange.getEnd()));
+ SourceRange Range(ReadSourceLocation(*M, RawRange.getBegin()),
+ ReadSourceLocation(*M, RawRange.getEnd()));
assert(Range.isValid());
return Range;
}
@@ -6262,7 +6268,7 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
SavedStreamPosition SavedPosition(M.PreprocessorDetailCursor);
if (llvm::Error Err = M.PreprocessorDetailCursor.JumpToBit(
- M.MacroOffsetsBase + PPOffs.BitOffset)) {
+ M.MacroOffsetsBase + PPOffs.getOffset())) {
Error(std::move(Err));
return nullptr;
}
@@ -6279,8 +6285,8 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
return nullptr;
// Read the record.
- SourceRange Range(TranslateSourceLocation(M, PPOffs.getBegin()),
- TranslateSourceLocation(M, PPOffs.getEnd()));
+ SourceRange Range(ReadSourceLocation(M, PPOffs.getBegin()),
+ ReadSourceLocation(M, PPOffs.getEnd()));
PreprocessingRecord &PPRec = *PP.getPreprocessingRecord();
StringRef Blob;
RecordData Record;
@@ -6392,7 +6398,7 @@ struct PPEntityComp {
}
SourceLocation getLoc(const PPEntityOffset &PPE) const {
- return Reader.TranslateSourceLocation(M, PPE.getBegin());
+ return Reader.ReadSourceLocation(M, PPE.getBegin());
}
};
@@ -6436,7 +6442,7 @@ PreprocessedEntityID ASTReader::findPreprocessedEntity(SourceLocation Loc,
PPI = First;
std::advance(PPI, Half);
if (SourceMgr.isBeforeInTranslationUnit(
- TranslateSourceLocation(M, PPI->getEnd()), Loc)) {
+ ReadSourceLocation(M, PPI->getEnd()), Loc)) {
First = PPI;
++First;
Count = Count - Half - 1;
@@ -6477,7 +6483,7 @@ std::optional<bool> ASTReader::isPreprocessedEntityInFileID(unsigned Index,
unsigned LocalIndex = PPInfo.second;
const PPEntityOffset &PPOffs = M.PreprocessedEntityOffsets[LocalIndex];
- SourceLocation Loc = TranslateSourceLocation(M, PPOffs.getBegin());
+ SourceLocation Loc = ReadSourceLocation(M, PPOffs.getBegin());
if (Loc.isInvalid())
return false;
@@ -6621,17 +6627,15 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
while (NumLocations--) {
assert(Idx < Record.size() &&
"Invalid data, missing pragma diagnostic states");
- SourceLocation Loc = ReadSourceLocation(F, Record[Idx++]);
- auto IDAndOffset = SourceMgr.getDecomposedLoc(Loc);
- assert(IDAndOffset.first.isValid() && "invalid FileID for transition");
- assert(IDAndOffset.second == 0 && "not a start location for a FileID");
+ FileID FID = ReadFileID(F, Record, Idx);
+ assert(FID.isValid() && "invalid FileID for transition");
unsigned Transitions = Record[Idx++];
// Note that we don't need to set up Parent/ParentOffset here, because
// we won't be changing the diagnostic state within imported FileIDs
// (other than perhaps appending to the main source file, which has no
// parent).
- auto &F = Diag.DiagStatesByLoc.Files[IDAndOffset.first];
+ auto &F = Diag.DiagStatesByLoc.Files[FID];
F.StateTransitions.reserve(F.StateTransitions.size() + Transitions);
for (unsigned I = 0; I != Transitions; ++I) {
unsigned Offset = Record[Idx++];
@@ -6666,13 +6670,10 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
}
/// Get the correct cursor and offset for loading a type.
-ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
- GlobalTypeMapType::iterator I = GlobalTypeMap.find(Index);
- assert(I != GlobalTypeMap.end() && "Corrupted global type map");
- ModuleFile *M = I->second;
- return RecordLocation(
- M, M->TypeOffsets[Index - M->BaseTypeIndex].getBitOffset() +
- M->DeclsBlockStartOffset);
+ASTReader::RecordLocation ASTReader::TypeCursorForIndex(TypeID ID) {
+ auto [M, Index] = translateTypeIDToIndex(ID);
+ return RecordLocation(M, M->TypeOffsets[Index - M->BaseTypeIndex].get() +
+ M->DeclsBlockStartOffset);
}
static std::optional<Type::TypeClass> getTypeClassForCode(TypeCode code) {
@@ -6691,10 +6692,10 @@ static std::optional<Type::TypeClass> getTypeClassForCode(TypeCode code) {
/// routine actually reads the record corresponding to the type at the given
/// location. It is a helper routine for GetType, which deals with reading type
/// IDs.
-QualType ASTReader::readTypeRecord(unsigned Index) {
+QualType ASTReader::readTypeRecord(TypeID ID) {
assert(ContextObj && "reading type with no AST context");
ASTContext &Context = *ContextObj;
- RecordLocation Loc = TypeCursorForIndex(Index);
+ RecordLocation Loc = TypeCursorForIndex(ID);
BitstreamCursor &DeclsCursor = Loc.F->DeclsCursor;
// Keep track of where we are in the stream, then jump back there
@@ -6809,6 +6810,10 @@ void TypeLocReader::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
// nothing to do
}
+void TypeLocReader::VisitArrayParameterTypeLoc(ArrayParameterTypeLoc TL) {
+ // nothing to do
+}
+
void TypeLocReader::VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
TL.setExpansionLoc(readSourceLocation());
}
@@ -6946,6 +6951,10 @@ void TypeLocReader::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
TL.setRParenLoc(readSourceLocation());
}
+void TypeLocReader::VisitPackIndexingTypeLoc(PackIndexingTypeLoc TL) {
+ TL.setEllipsisLoc(readSourceLocation());
+}
+
void TypeLocReader::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
TL.setKWLoc(readSourceLocation());
TL.setLParenLoc(readSourceLocation());
@@ -6990,6 +6999,10 @@ void TypeLocReader::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
TL.setAttr(ReadAttr());
}
+void TypeLocReader::VisitCountAttributedTypeLoc(CountAttributedTypeLoc TL) {
+ // Nothing to do
+}
+
void TypeLocReader::VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc TL) {
// Nothing to do.
}
@@ -7123,15 +7136,44 @@ TypeSourceInfo *ASTRecordReader::readTypeSourceInfo() {
return TInfo;
}
+static unsigned getIndexForTypeID(serialization::TypeID ID) {
+ return (ID & llvm::maskTrailingOnes<TypeID>(32)) >> Qualifiers::FastWidth;
+}
+
+static unsigned getModuleFileIndexForTypeID(serialization::TypeID ID) {
+ return ID >> 32;
+}
+
+static bool isPredefinedType(serialization::TypeID ID) {
+ // We don't need to erase the higher bits since if these bits are not 0,
+ // it must be larger than NUM_PREDEF_TYPE_IDS.
+ return (ID >> Qualifiers::FastWidth) < NUM_PREDEF_TYPE_IDS;
+}
+
+std::pair<ModuleFile *, unsigned>
+ASTReader::translateTypeIDToIndex(serialization::TypeID ID) const {
+ assert(!isPredefinedType(ID) &&
+ "Predefined type shouldn't be in TypesLoaded");
+ unsigned ModuleFileIndex = getModuleFileIndexForTypeID(ID);
+ assert(ModuleFileIndex && "Untranslated Local Decl?");
+
+ ModuleFile *OwningModuleFile = &getModuleManager()[ModuleFileIndex - 1];
+ assert(OwningModuleFile &&
+ "untranslated type ID or local type ID shouldn't be in TypesLoaded");
+
+ return {OwningModuleFile,
+ OwningModuleFile->BaseTypeIndex + getIndexForTypeID(ID)};
+}
+
QualType ASTReader::GetType(TypeID ID) {
assert(ContextObj && "reading type with no AST context");
ASTContext &Context = *ContextObj;
unsigned FastQuals = ID & Qualifiers::FastMask;
- unsigned Index = ID >> Qualifiers::FastWidth;
- if (Index < NUM_PREDEF_TYPE_IDS) {
+ if (isPredefinedType(ID)) {
QualType T;
+ unsigned Index = getIndexForTypeID(ID);
switch ((PredefinedTypeIDs)Index) {
case PREDEF_TYPE_LAST_ID:
// We should never use this one.
@@ -7288,6 +7330,9 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_OVERLOAD_ID:
T = Context.OverloadTy;
break;
+ case PREDEF_TYPE_UNRESOLVED_TEMPLATE:
+ T = Context.UnresolvedTemplateTy;
+ break;
case PREDEF_TYPE_BOUND_MEMBER:
T = Context.BoundMemberTy;
break;
@@ -7361,11 +7406,11 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_INCOMPLETE_MATRIX_IDX:
T = Context.IncompleteMatrixIdxTy;
break;
- case PREDEF_TYPE_OMP_ARRAY_SECTION:
- T = Context.OMPArraySectionTy;
+ case PREDEF_TYPE_ARRAY_SECTION:
+ T = Context.ArraySectionTy;
break;
case PREDEF_TYPE_OMP_ARRAY_SHAPING:
- T = Context.OMPArraySectionTy;
+ T = Context.OMPArrayShapingTy;
break;
case PREDEF_TYPE_OMP_ITERATOR:
T = Context.OMPIteratorTy;
@@ -7390,16 +7435,22 @@ QualType ASTReader::GetType(TypeID ID) {
T = Context.SingletonId; \
break;
#include "clang/Basic/WebAssemblyReferenceTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId) \
+ case PREDEF_TYPE_##Id##_ID: \
+ T = Context.SingletonId; \
+ break;
+#include "clang/Basic/AMDGPUTypes.def"
}
assert(!T.isNull() && "Unknown predefined type");
return T.withFastQualifiers(FastQuals);
}
- Index -= NUM_PREDEF_TYPE_IDS;
+ unsigned Index = translateTypeIDToIndex(ID).second;
+
assert(Index < TypesLoaded.size() && "Type index out-of-range");
if (TypesLoaded[Index].isNull()) {
- TypesLoaded[Index] = readTypeRecord(Index);
+ TypesLoaded[Index] = readTypeRecord(ID);
if (TypesLoaded[Index].isNull())
return QualType();
@@ -7412,27 +7463,28 @@ QualType ASTReader::GetType(TypeID ID) {
return TypesLoaded[Index].withFastQualifiers(FastQuals);
}
-QualType ASTReader::getLocalType(ModuleFile &F, unsigned LocalID) {
+QualType ASTReader::getLocalType(ModuleFile &F, LocalTypeID LocalID) {
return GetType(getGlobalTypeID(F, LocalID));
}
-serialization::TypeID
-ASTReader::getGlobalTypeID(ModuleFile &F, unsigned LocalID) const {
- unsigned FastQuals = LocalID & Qualifiers::FastMask;
- unsigned LocalIndex = LocalID >> Qualifiers::FastWidth;
-
- if (LocalIndex < NUM_PREDEF_TYPE_IDS)
+serialization::TypeID ASTReader::getGlobalTypeID(ModuleFile &F,
+ LocalTypeID LocalID) const {
+ if (isPredefinedType(LocalID))
return LocalID;
if (!F.ModuleOffsetMap.empty())
ReadModuleOffsetMap(F);
- ContinuousRangeMap<uint32_t, int, 2>::iterator I
- = F.TypeRemap.find(LocalIndex - NUM_PREDEF_TYPE_IDS);
- assert(I != F.TypeRemap.end() && "Invalid index into type index remap");
+ unsigned ModuleFileIndex = getModuleFileIndexForTypeID(LocalID);
+ LocalID &= llvm::maskTrailingOnes<TypeID>(32);
+
+ if (ModuleFileIndex == 0)
+ LocalID -= NUM_PREDEF_TYPE_IDS << Qualifiers::FastWidth;
- unsigned GlobalIndex = LocalIndex + I->second;
- return (GlobalIndex << Qualifiers::FastWidth) | FastQuals;
+ ModuleFile &MF =
+ ModuleFileIndex ? *F.TransitiveImports[ModuleFileIndex - 1] : F;
+ ModuleFileIndex = MF.Index + 1;
+ return ((uint64_t)ModuleFileIndex << 32) | LocalID;
}
TemplateArgumentLocInfo
@@ -7494,9 +7546,7 @@ ASTRecordReader::readASTTemplateArgumentListInfo() {
return ASTTemplateArgumentListInfo::Create(getContext(), Result);
}
-Decl *ASTReader::GetExternalDecl(uint32_t ID) {
- return GetDecl(ID);
-}
+Decl *ASTReader::GetExternalDecl(GlobalDeclID ID) { return GetDecl(ID); }
void ASTReader::CompleteRedeclChain(const Decl *D) {
if (NumCurrentElementsDeserializing) {
@@ -7629,51 +7679,61 @@ CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
return Bases;
}
-serialization::DeclID
-ASTReader::getGlobalDeclID(ModuleFile &F, LocalDeclID LocalID) const {
+GlobalDeclID ASTReader::getGlobalDeclID(ModuleFile &F,
+ LocalDeclID LocalID) const {
if (LocalID < NUM_PREDEF_DECL_IDS)
- return LocalID;
+ return GlobalDeclID(LocalID.getRawValue());
+
+ unsigned OwningModuleFileIndex = LocalID.getModuleFileIndex();
+ DeclID ID = LocalID.getLocalDeclIndex();
if (!F.ModuleOffsetMap.empty())
ReadModuleOffsetMap(F);
- ContinuousRangeMap<uint32_t, int, 2>::iterator I
- = F.DeclRemap.find(LocalID - NUM_PREDEF_DECL_IDS);
- assert(I != F.DeclRemap.end() && "Invalid index into decl index remap");
+ ModuleFile *OwningModuleFile =
+ OwningModuleFileIndex == 0
+ ? &F
+ : F.TransitiveImports[OwningModuleFileIndex - 1];
- return LocalID + I->second;
+ if (OwningModuleFileIndex == 0)
+ ID -= NUM_PREDEF_DECL_IDS;
+
+ uint64_t NewModuleFileIndex = OwningModuleFile->Index + 1;
+ return GlobalDeclID(NewModuleFileIndex, ID);
}
-bool ASTReader::isDeclIDFromModule(serialization::GlobalDeclID ID,
- ModuleFile &M) const {
+bool ASTReader::isDeclIDFromModule(GlobalDeclID ID, ModuleFile &M) const {
// Predefined decls aren't from any module.
if (ID < NUM_PREDEF_DECL_IDS)
return false;
- return ID - NUM_PREDEF_DECL_IDS >= M.BaseDeclID &&
- ID - NUM_PREDEF_DECL_IDS < M.BaseDeclID + M.LocalNumDecls;
+ unsigned ModuleFileIndex = ID.getModuleFileIndex();
+ return M.Index == ModuleFileIndex - 1;
+}
+
+ModuleFile *ASTReader::getOwningModuleFile(GlobalDeclID ID) const {
+ // Predefined decls aren't from any module.
+ if (ID < NUM_PREDEF_DECL_IDS)
+ return nullptr;
+
+ uint64_t ModuleFileIndex = ID.getModuleFileIndex();
+ assert(ModuleFileIndex && "Untranslated Local Decl?");
+
+ return &getModuleManager()[ModuleFileIndex - 1];
}
-ModuleFile *ASTReader::getOwningModuleFile(const Decl *D) {
+ModuleFile *ASTReader::getOwningModuleFile(const Decl *D) const {
if (!D->isFromASTFile())
return nullptr;
- GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(D->getGlobalID());
- assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
- return I->second;
+
+ return getOwningModuleFile(D->getGlobalID());
}
SourceLocation ASTReader::getSourceLocationForDeclID(GlobalDeclID ID) {
if (ID < NUM_PREDEF_DECL_IDS)
return SourceLocation();
- unsigned Index = ID - NUM_PREDEF_DECL_IDS;
-
- if (Index > DeclsLoaded.size()) {
- Error("declaration ID out-of-range for AST file");
- return SourceLocation();
- }
-
- if (Decl *D = DeclsLoaded[Index])
+ if (Decl *D = GetExistingDecl(ID))
return D->getLocation();
SourceLocation Loc;
@@ -7681,7 +7741,10 @@ SourceLocation ASTReader::getSourceLocationForDeclID(GlobalDeclID ID) {
return Loc;
}
-static Decl *getPredefinedDecl(ASTContext &Context, PredefinedDeclIDs ID) {
+Decl *ASTReader::getPredefinedDecl(PredefinedDeclIDs ID) {
+ assert(ContextObj && "reading predefined decl without AST context");
+ ASTContext &Context = *ContextObj;
+ Decl *NewLoaded = nullptr;
switch (ID) {
case PREDEF_DECL_NULL_ID:
return nullptr;
@@ -7690,60 +7753,123 @@ static Decl *getPredefinedDecl(ASTContext &Context, PredefinedDeclIDs ID) {
return Context.getTranslationUnitDecl();
case PREDEF_DECL_OBJC_ID_ID:
- return Context.getObjCIdDecl();
+ if (Context.ObjCIdDecl)
+ return Context.ObjCIdDecl;
+ NewLoaded = Context.getObjCIdDecl();
+ break;
case PREDEF_DECL_OBJC_SEL_ID:
- return Context.getObjCSelDecl();
+ if (Context.ObjCSelDecl)
+ return Context.ObjCSelDecl;
+ NewLoaded = Context.getObjCSelDecl();
+ break;
case PREDEF_DECL_OBJC_CLASS_ID:
- return Context.getObjCClassDecl();
+ if (Context.ObjCClassDecl)
+ return Context.ObjCClassDecl;
+ NewLoaded = Context.getObjCClassDecl();
+ break;
case PREDEF_DECL_OBJC_PROTOCOL_ID:
- return Context.getObjCProtocolDecl();
+ if (Context.ObjCProtocolClassDecl)
+ return Context.ObjCProtocolClassDecl;
+ NewLoaded = Context.getObjCProtocolDecl();
+ break;
case PREDEF_DECL_INT_128_ID:
- return Context.getInt128Decl();
+ if (Context.Int128Decl)
+ return Context.Int128Decl;
+ NewLoaded = Context.getInt128Decl();
+ break;
case PREDEF_DECL_UNSIGNED_INT_128_ID:
- return Context.getUInt128Decl();
+ if (Context.UInt128Decl)
+ return Context.UInt128Decl;
+ NewLoaded = Context.getUInt128Decl();
+ break;
case PREDEF_DECL_OBJC_INSTANCETYPE_ID:
- return Context.getObjCInstanceTypeDecl();
+ if (Context.ObjCInstanceTypeDecl)
+ return Context.ObjCInstanceTypeDecl;
+ NewLoaded = Context.getObjCInstanceTypeDecl();
+ break;
case PREDEF_DECL_BUILTIN_VA_LIST_ID:
- return Context.getBuiltinVaListDecl();
+ if (Context.BuiltinVaListDecl)
+ return Context.BuiltinVaListDecl;
+ NewLoaded = Context.getBuiltinVaListDecl();
+ break;
case PREDEF_DECL_VA_LIST_TAG:
- return Context.getVaListTagDecl();
+ if (Context.VaListTagDecl)
+ return Context.VaListTagDecl;
+ NewLoaded = Context.getVaListTagDecl();
+ break;
case PREDEF_DECL_BUILTIN_MS_VA_LIST_ID:
- return Context.getBuiltinMSVaListDecl();
+ if (Context.BuiltinMSVaListDecl)
+ return Context.BuiltinMSVaListDecl;
+ NewLoaded = Context.getBuiltinMSVaListDecl();
+ break;
case PREDEF_DECL_BUILTIN_MS_GUID_ID:
+ // ASTContext::getMSGuidTagDecl won't create MSGuidTagDecl conditionally.
return Context.getMSGuidTagDecl();
case PREDEF_DECL_EXTERN_C_CONTEXT_ID:
- return Context.getExternCContextDecl();
+ if (Context.ExternCContext)
+ return Context.ExternCContext;
+ NewLoaded = Context.getExternCContextDecl();
+ break;
case PREDEF_DECL_MAKE_INTEGER_SEQ_ID:
- return Context.getMakeIntegerSeqDecl();
+ if (Context.MakeIntegerSeqDecl)
+ return Context.MakeIntegerSeqDecl;
+ NewLoaded = Context.getMakeIntegerSeqDecl();
+ break;
case PREDEF_DECL_CF_CONSTANT_STRING_ID:
- return Context.getCFConstantStringDecl();
+ if (Context.CFConstantStringTypeDecl)
+ return Context.CFConstantStringTypeDecl;
+ NewLoaded = Context.getCFConstantStringDecl();
+ break;
case PREDEF_DECL_CF_CONSTANT_STRING_TAG_ID:
- return Context.getCFConstantStringTagDecl();
+ if (Context.CFConstantStringTagDecl)
+ return Context.CFConstantStringTagDecl;
+ NewLoaded = Context.getCFConstantStringTagDecl();
+ break;
case PREDEF_DECL_TYPE_PACK_ELEMENT_ID:
- return Context.getTypePackElementDecl();
+ if (Context.TypePackElementDecl)
+ return Context.TypePackElementDecl;
+ NewLoaded = Context.getTypePackElementDecl();
+ break;
+ }
+
+ assert(NewLoaded && "Failed to load predefined decl?");
+
+ if (DeserializationListener)
+ DeserializationListener->PredefinedDeclBuilt(ID, NewLoaded);
+
+ return NewLoaded;
+}
+
+unsigned ASTReader::translateGlobalDeclIDToIndex(GlobalDeclID GlobalID) const {
+ ModuleFile *OwningModuleFile = getOwningModuleFile(GlobalID);
+ if (!OwningModuleFile) {
+ assert(GlobalID < NUM_PREDEF_DECL_IDS && "Untransalted Global ID?");
+ return GlobalID.getRawValue();
}
- llvm_unreachable("PredefinedDeclIDs unknown enum value");
+
+ return OwningModuleFile->BaseDeclIndex + GlobalID.getLocalDeclIndex();
}
-Decl *ASTReader::GetExistingDecl(DeclID ID) {
+Decl *ASTReader::GetExistingDecl(GlobalDeclID ID) {
assert(ContextObj && "reading decl with no AST context");
+
if (ID < NUM_PREDEF_DECL_IDS) {
- Decl *D = getPredefinedDecl(*ContextObj, (PredefinedDeclIDs)ID);
+ Decl *D = getPredefinedDecl((PredefinedDeclIDs)ID);
if (D) {
// Track that we have merged the declaration with ID \p ID into the
// pre-existing predefined declaration \p D.
@@ -7754,7 +7880,7 @@ Decl *ASTReader::GetExistingDecl(DeclID ID) {
return D;
}
- unsigned Index = ID - NUM_PREDEF_DECL_IDS;
+ unsigned Index = translateGlobalDeclIDToIndex(ID);
if (Index >= DeclsLoaded.size()) {
assert(0 && "declaration ID out-of-range for AST file");
@@ -7765,11 +7891,11 @@ Decl *ASTReader::GetExistingDecl(DeclID ID) {
return DeclsLoaded[Index];
}
-Decl *ASTReader::GetDecl(DeclID ID) {
+Decl *ASTReader::GetDecl(GlobalDeclID ID) {
if (ID < NUM_PREDEF_DECL_IDS)
return GetExistingDecl(ID);
- unsigned Index = ID - NUM_PREDEF_DECL_IDS;
+ unsigned Index = translateGlobalDeclIDToIndex(ID);
if (Index >= DeclsLoaded.size()) {
assert(0 && "declaration ID out-of-range for AST file");
@@ -7786,32 +7912,43 @@ Decl *ASTReader::GetDecl(DeclID ID) {
return DeclsLoaded[Index];
}
-DeclID ASTReader::mapGlobalIDToModuleFileGlobalID(ModuleFile &M,
- DeclID GlobalID) {
+LocalDeclID ASTReader::mapGlobalIDToModuleFileGlobalID(ModuleFile &M,
+ GlobalDeclID GlobalID) {
if (GlobalID < NUM_PREDEF_DECL_IDS)
- return GlobalID;
+ return LocalDeclID::get(*this, M, GlobalID.getRawValue());
- GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(GlobalID);
- assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
- ModuleFile *Owner = I->second;
+ if (!M.ModuleOffsetMap.empty())
+ ReadModuleOffsetMap(M);
- llvm::DenseMap<ModuleFile *, serialization::DeclID>::iterator Pos
- = M.GlobalToLocalDeclIDs.find(Owner);
- if (Pos == M.GlobalToLocalDeclIDs.end())
- return 0;
+ ModuleFile *Owner = getOwningModuleFile(GlobalID);
+ DeclID ID = GlobalID.getLocalDeclIndex();
- return GlobalID - Owner->BaseDeclID + Pos->second;
+ if (Owner == &M) {
+ ID += NUM_PREDEF_DECL_IDS;
+ return LocalDeclID::get(*this, M, ID);
+ }
+
+ uint64_t OrignalModuleFileIndex = 0;
+ for (unsigned I = 0; I < M.TransitiveImports.size(); I++)
+ if (M.TransitiveImports[I] == Owner) {
+ OrignalModuleFileIndex = I + 1;
+ break;
+ }
+
+ if (!OrignalModuleFileIndex)
+ return LocalDeclID();
+
+ return LocalDeclID::get(*this, M, OrignalModuleFileIndex, ID);
}
-serialization::DeclID ASTReader::ReadDeclID(ModuleFile &F,
- const RecordData &Record,
- unsigned &Idx) {
+GlobalDeclID ASTReader::ReadDeclID(ModuleFile &F, const RecordDataImpl &Record,
+ unsigned &Idx) {
if (Idx >= Record.size()) {
Error("Corrupted AST file");
- return 0;
+ return GlobalDeclID(0);
}
- return getGlobalDeclID(F, Record[Idx++]);
+ return getGlobalDeclID(F, LocalDeclID::get(*this, F, Record[Idx++]));
}
/// Resolve the offset of a statement into a statement.
@@ -7847,7 +7984,7 @@ void ASTReader::FindExternalLexicalDecls(
if (!IsKindWeWant(K))
continue;
- auto ID = (serialization::DeclID)+LexicalDecls[I + 1];
+ auto ID = (DeclID) + LexicalDecls[I + 1];
// Don't add predefined declarations to the lexical context more
// than once.
@@ -7858,7 +7995,7 @@ void ASTReader::FindExternalLexicalDecls(
PredefsVisited[ID] = true;
}
- if (Decl *D = GetLocalDecl(*M, ID)) {
+ if (Decl *D = GetLocalDecl(*M, LocalDeclID::get(*this, *M, ID))) {
assert(D->getKind() == K && "wrong kind for lexical decl");
if (!DC->isDeclInLexicalTraversal(D))
Decls.push_back(D);
@@ -7880,32 +8017,34 @@ void ASTReader::FindExternalLexicalDecls(
namespace {
-class DeclIDComp {
+class UnalignedDeclIDComp {
ASTReader &Reader;
ModuleFile &Mod;
public:
- DeclIDComp(ASTReader &Reader, ModuleFile &M) : Reader(Reader), Mod(M) {}
+ UnalignedDeclIDComp(ASTReader &Reader, ModuleFile &M)
+ : Reader(Reader), Mod(M) {}
- bool operator()(LocalDeclID L, LocalDeclID R) const {
+ bool operator()(unaligned_decl_id_t L, unaligned_decl_id_t R) const {
SourceLocation LHS = getLocation(L);
SourceLocation RHS = getLocation(R);
return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
}
- bool operator()(SourceLocation LHS, LocalDeclID R) const {
+ bool operator()(SourceLocation LHS, unaligned_decl_id_t R) const {
SourceLocation RHS = getLocation(R);
return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
}
- bool operator()(LocalDeclID L, SourceLocation RHS) const {
+ bool operator()(unaligned_decl_id_t L, SourceLocation RHS) const {
SourceLocation LHS = getLocation(L);
return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
}
- SourceLocation getLocation(LocalDeclID ID) const {
+ SourceLocation getLocation(unaligned_decl_id_t ID) const {
return Reader.getSourceManager().getFileLoc(
- Reader.getSourceLocationForDeclID(Reader.getGlobalDeclID(Mod, ID)));
+ Reader.getSourceLocationForDeclID(
+ Reader.getGlobalDeclID(Mod, LocalDeclID::get(Reader, Mod, ID))));
}
};
@@ -7928,8 +8067,8 @@ void ASTReader::FindFileRegionDecls(FileID File,
BeginLoc = SM.getLocForStartOfFile(File).getLocWithOffset(Offset);
SourceLocation EndLoc = BeginLoc.getLocWithOffset(Length);
- DeclIDComp DIDComp(*this, *DInfo.Mod);
- ArrayRef<serialization::LocalDeclID>::iterator BeginIt =
+ UnalignedDeclIDComp DIDComp(*this, *DInfo.Mod);
+ ArrayRef<unaligned_decl_id_t>::iterator BeginIt =
llvm::lower_bound(DInfo.Decls, BeginLoc, DIDComp);
if (BeginIt != DInfo.Decls.begin())
--BeginIt;
@@ -7938,18 +8077,20 @@ void ASTReader::FindFileRegionDecls(FileID File,
// to backtrack until we find it otherwise we will fail to report that the
// region overlaps with an objc container.
while (BeginIt != DInfo.Decls.begin() &&
- GetDecl(getGlobalDeclID(*DInfo.Mod, *BeginIt))
+ GetDecl(getGlobalDeclID(*DInfo.Mod,
+ LocalDeclID::get(*this, *DInfo.Mod, *BeginIt)))
->isTopLevelDeclInObjCContainer())
--BeginIt;
- ArrayRef<serialization::LocalDeclID>::iterator EndIt =
+ ArrayRef<unaligned_decl_id_t>::iterator EndIt =
llvm::upper_bound(DInfo.Decls, EndLoc, DIDComp);
if (EndIt != DInfo.Decls.end())
++EndIt;
- for (ArrayRef<serialization::LocalDeclID>::iterator
- DIt = BeginIt; DIt != EndIt; ++DIt)
- Decls.push_back(GetDecl(getGlobalDeclID(*DInfo.Mod, *DIt)));
+ for (ArrayRef<unaligned_decl_id_t>::iterator DIt = BeginIt; DIt != EndIt;
+ ++DIt)
+ Decls.push_back(GetDecl(getGlobalDeclID(
+ *DInfo.Mod, LocalDeclID::get(*this, *DInfo.Mod, *DIt))));
}
bool
@@ -7969,7 +8110,8 @@ ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC,
// Load the list of declarations.
SmallVector<NamedDecl *, 64> Decls;
llvm::SmallPtrSet<NamedDecl *, 8> Found;
- for (DeclID ID : It->second.Table.find(Name)) {
+
+ for (GlobalDeclID ID : It->second.Table.find(Name)) {
NamedDecl *ND = cast<NamedDecl>(GetDecl(ID));
if (ND->getDeclName() == Name && Found.insert(ND).second)
Decls.push_back(ND);
@@ -7990,7 +8132,7 @@ void ASTReader::completeVisibleDeclsMap(const DeclContext *DC) {
DeclsMap Decls;
- for (DeclID ID : It->second.Table.findAll()) {
+ for (GlobalDeclID ID : It->second.Table.findAll()) {
NamedDecl *ND = cast<NamedDecl>(GetDecl(ID));
Decls[ND->getDeclName()].push_back(ND);
}
@@ -8030,6 +8172,10 @@ void ASTReader::PassInterestingDeclToConsumer(Decl *D) {
Consumer->HandleInterestingDecl(DeclGroupRef(D));
}
+void ASTReader::PassVTableToConsumer(CXXRecordDecl *RD) {
+ Consumer->HandleVTable(RD);
+}
+
void ASTReader::StartTranslationUnit(ASTConsumer *Consumer) {
this->Consumer = Consumer;
@@ -8140,19 +8286,15 @@ dumpModuleIDMap(StringRef Name,
llvm::errs() << Name << ":\n";
for (typename MapType::const_iterator I = Map.begin(), IEnd = Map.end();
- I != IEnd; ++I) {
- llvm::errs() << " " << I->first << " -> " << I->second->FileName
- << "\n";
- }
+ I != IEnd; ++I)
+ llvm::errs() << " " << (DeclID)I->first << " -> " << I->second->FileName
+ << "\n";
}
LLVM_DUMP_METHOD void ASTReader::dump() {
llvm::errs() << "*** PCH/ModuleFile Remappings:\n";
dumpModuleIDMap("Global bit offset map", GlobalBitOffsetsMap);
dumpModuleIDMap("Global source location entry map", GlobalSLocEntryMap);
- dumpModuleIDMap("Global type map", GlobalTypeMap);
- dumpModuleIDMap("Global declaration map", GlobalDeclMap);
- dumpModuleIDMap("Global identifier map", GlobalIdentifierMap);
dumpModuleIDMap("Global macro map", GlobalMacroMap);
dumpModuleIDMap("Global submodule map", GlobalSubmoduleMap);
dumpModuleIDMap("Global selector map", GlobalSelectorMap);
@@ -8188,7 +8330,7 @@ void ASTReader::InitializeSema(Sema &S) {
// Makes sure any declarations that were deserialized "too early"
// still get added to the identifier's declaration chains.
- for (uint64_t ID : PreloadedDeclIDs) {
+ for (GlobalDeclID ID : PreloadedDeclIDs) {
NamedDecl *D = cast<NamedDecl>(GetDecl(ID));
pushExternalDeclIntoScope(D, D->getDeclName());
}
@@ -8217,11 +8359,11 @@ void ASTReader::UpdateSema() {
assert(SemaDeclRefs.size() % 3 == 0);
for (unsigned I = 0; I != SemaDeclRefs.size(); I += 3) {
if (!SemaObj->StdNamespace)
- SemaObj->StdNamespace = SemaDeclRefs[I];
+ SemaObj->StdNamespace = SemaDeclRefs[I].getRawValue();
if (!SemaObj->StdBadAlloc)
- SemaObj->StdBadAlloc = SemaDeclRefs[I+1];
+ SemaObj->StdBadAlloc = SemaDeclRefs[I + 1].getRawValue();
if (!SemaObj->StdAlignValT)
- SemaObj->StdAlignValT = SemaDeclRefs[I+2];
+ SemaObj->StdAlignValT = SemaDeclRefs[I + 2].getRawValue();
}
SemaDeclRefs.clear();
}
@@ -8238,7 +8380,7 @@ void ASTReader::UpdateSema() {
PragmaMSPointersToMembersState,
PointersToMembersPragmaLocation);
}
- SemaObj->ForceCUDAHostDeviceDepth = ForceCUDAHostDeviceDepth;
+ SemaObj->CUDA().ForceHostDeviceDepth = ForceHostDeviceDepth;
if (PragmaAlignPackCurrentValue) {
// The bottom of the stack might have a default value. It must be adjusted
@@ -8534,7 +8676,7 @@ namespace serialization {
static void addMethodsToPool(Sema &S, ArrayRef<ObjCMethodDecl *> Methods,
ObjCMethodList &List) {
for (ObjCMethodDecl *M : llvm::reverse(Methods))
- S.addMethodToGlobalList(&List, M);
+ S.ObjC().addMethodToGlobalList(&List, M);
}
void ASTReader::ReadMethodPool(Selector Sel) {
@@ -8559,8 +8701,10 @@ void ASTReader::ReadMethodPool(Selector Sel) {
return;
Sema &S = *getSema();
- Sema::GlobalMethodPool::iterator Pos =
- S.MethodPool.insert(std::make_pair(Sel, Sema::GlobalMethodPool::Lists()))
+ SemaObjC::GlobalMethodPool::iterator Pos =
+ S.ObjC()
+ .MethodPool
+ .insert(std::make_pair(Sel, SemaObjC::GlobalMethodPool::Lists()))
.first;
Pos->second.first.setBits(Visitor.getInstanceBits());
@@ -8594,18 +8738,20 @@ void ASTReader::ReadKnownNamespaces(
void ASTReader::ReadUndefinedButUsed(
llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {
for (unsigned Idx = 0, N = UndefinedButUsed.size(); Idx != N;) {
- NamedDecl *D = cast<NamedDecl>(GetDecl(UndefinedButUsed[Idx++]));
- SourceLocation Loc =
- SourceLocation::getFromRawEncoding(UndefinedButUsed[Idx++]);
+ UndefinedButUsedDecl &U = UndefinedButUsed[Idx++];
+ NamedDecl *D = cast<NamedDecl>(GetDecl(U.ID));
+ SourceLocation Loc = SourceLocation::getFromRawEncoding(U.RawLoc);
Undefined.insert(std::make_pair(D, Loc));
}
+ UndefinedButUsed.clear();
}
void ASTReader::ReadMismatchingDeleteExpressions(llvm::MapVector<
FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &
Exprs) {
for (unsigned Idx = 0, N = DelayedDeleteExprs.size(); Idx != N;) {
- FieldDecl *FD = cast<FieldDecl>(GetDecl(DelayedDeleteExprs[Idx++]));
+ FieldDecl *FD =
+ cast<FieldDecl>(GetDecl(GlobalDeclID(DelayedDeleteExprs[Idx++])));
uint64_t Count = DelayedDeleteExprs[Idx++];
for (uint64_t C = 0; C < Count; ++C) {
SourceLocation DeleteLoc =
@@ -8719,9 +8865,10 @@ void ASTReader::ReadWeakUndeclaredIdentifiers(
void ASTReader::ReadUsedVTables(SmallVectorImpl<ExternalVTableUse> &VTables) {
for (unsigned Idx = 0, N = VTableUses.size(); Idx < N; /* In loop */) {
ExternalVTableUse VT;
- VT.Record = dyn_cast_or_null<CXXRecordDecl>(GetDecl(VTableUses[Idx++]));
- VT.Location = SourceLocation::getFromRawEncoding(VTableUses[Idx++]);
- VT.DefinitionRequired = VTableUses[Idx++];
+ VTableUse &TableInfo = VTableUses[Idx++];
+ VT.Record = dyn_cast_or_null<CXXRecordDecl>(GetDecl(TableInfo.ID));
+ VT.Location = SourceLocation::getFromRawEncoding(TableInfo.RawLoc);
+ VT.DefinitionRequired = TableInfo.Used;
VTables.push_back(VT);
}
@@ -8731,9 +8878,9 @@ void ASTReader::ReadUsedVTables(SmallVectorImpl<ExternalVTableUse> &VTables) {
void ASTReader::ReadPendingInstantiations(
SmallVectorImpl<std::pair<ValueDecl *, SourceLocation>> &Pending) {
for (unsigned Idx = 0, N = PendingInstantiations.size(); Idx < N;) {
- ValueDecl *D = cast<ValueDecl>(GetDecl(PendingInstantiations[Idx++]));
- SourceLocation Loc
- = SourceLocation::getFromRawEncoding(PendingInstantiations[Idx++]);
+ PendingInstantiation &Inst = PendingInstantiations[Idx++];
+ ValueDecl *D = cast<ValueDecl>(GetDecl(Inst.ID));
+ SourceLocation Loc = SourceLocation::getFromRawEncoding(Inst.RawLoc);
Pending.push_back(std::make_pair(D, Loc));
}
@@ -8748,11 +8895,10 @@ void ASTReader::ReadLateParsedTemplates(
RecordDataImpl &LateParsed = LPT.second;
for (unsigned Idx = 0, N = LateParsed.size(); Idx < N;
/* In loop */) {
- FunctionDecl *FD =
- cast<FunctionDecl>(GetLocalDecl(*FMod, LateParsed[Idx++]));
+ FunctionDecl *FD = ReadDeclAs<FunctionDecl>(*FMod, LateParsed, Idx);
auto LT = std::make_unique<LateParsedTemplate>();
- LT->D = GetLocalDecl(*FMod, LateParsed[Idx++]);
+ LT->D = ReadDecl(*FMod, LateParsed, Idx);
LT->FPO = FPOptions::getFromOpaqueInt(LateParsed[Idx++]);
ModuleFile *F = getOwningModuleFile(LT->D);
@@ -8788,8 +8934,9 @@ void ASTReader::LoadSelector(Selector Sel) {
void ASTReader::SetIdentifierInfo(IdentifierID ID, IdentifierInfo *II) {
assert(ID && "Non-zero identifier ID required");
- assert(ID <= IdentifiersLoaded.size() && "identifier ID out of range");
- IdentifiersLoaded[ID - 1] = II;
+ unsigned Index = translateIdentifierIDToIndex(ID).second;
+ assert(Index < IdentifiersLoaded.size() && "identifier ID out of range");
+ IdentifiersLoaded[Index] = II;
if (DeserializationListener)
DeserializationListener->IdentifierRead(ID, II);
}
@@ -8810,10 +8957,9 @@ void ASTReader::SetIdentifierInfo(IdentifierID ID, IdentifierInfo *II) {
/// \param Decls if non-null, this vector will be populated with the set of
/// deserialized declarations. These declarations will not be pushed into
/// scope.
-void
-ASTReader::SetGloballyVisibleDecls(IdentifierInfo *II,
- const SmallVectorImpl<uint32_t> &DeclIDs,
- SmallVectorImpl<Decl *> *Decls) {
+void ASTReader::SetGloballyVisibleDecls(
+ IdentifierInfo *II, const SmallVectorImpl<GlobalDeclID> &DeclIDs,
+ SmallVectorImpl<Decl *> *Decls) {
if (NumCurrentElementsDeserializing && !Decls) {
PendingIdentifierInfos[II].append(DeclIDs.begin(), DeclIDs.end());
return;
@@ -8843,6 +8989,22 @@ ASTReader::SetGloballyVisibleDecls(IdentifierInfo *II,
}
}
+std::pair<ModuleFile *, unsigned>
+ASTReader::translateIdentifierIDToIndex(IdentifierID ID) const {
+ if (ID == 0)
+ return {nullptr, 0};
+
+ unsigned ModuleFileIndex = ID >> 32;
+ unsigned LocalID = ID & llvm::maskTrailingOnes<IdentifierID>(32);
+
+ assert(ModuleFileIndex && "not translating loaded IdentifierID?");
+ assert(getModuleManager().size() > ModuleFileIndex - 1);
+
+ ModuleFile &MF = getModuleManager()[ModuleFileIndex - 1];
+ assert(LocalID < MF.LocalNumIdentifiers);
+ return {&MF, MF.BaseIdentifierID + LocalID};
+}
+
IdentifierInfo *ASTReader::DecodeIdentifierInfo(IdentifierID ID) {
if (ID == 0)
return nullptr;
@@ -8852,45 +9014,48 @@ IdentifierInfo *ASTReader::DecodeIdentifierInfo(IdentifierID ID) {
return nullptr;
}
- ID -= 1;
- if (!IdentifiersLoaded[ID]) {
- GlobalIdentifierMapType::iterator I = GlobalIdentifierMap.find(ID + 1);
- assert(I != GlobalIdentifierMap.end() && "Corrupted global identifier map");
- ModuleFile *M = I->second;
- unsigned Index = ID - M->BaseIdentifierID;
+ auto [M, Index] = translateIdentifierIDToIndex(ID);
+ if (!IdentifiersLoaded[Index]) {
+ assert(M != nullptr && "Untranslated Identifier ID?");
+ assert(Index >= M->BaseIdentifierID);
+ unsigned LocalIndex = Index - M->BaseIdentifierID;
const unsigned char *Data =
- M->IdentifierTableData + M->IdentifierOffsets[Index];
+ M->IdentifierTableData + M->IdentifierOffsets[LocalIndex];
ASTIdentifierLookupTrait Trait(*this, *M);
auto KeyDataLen = Trait.ReadKeyDataLength(Data);
auto Key = Trait.ReadKey(Data, KeyDataLen.first);
auto &II = PP.getIdentifierTable().get(Key);
- IdentifiersLoaded[ID] = &II;
+ IdentifiersLoaded[Index] = &II;
markIdentifierFromAST(*this, II);
if (DeserializationListener)
- DeserializationListener->IdentifierRead(ID + 1, &II);
+ DeserializationListener->IdentifierRead(ID, &II);
}
- return IdentifiersLoaded[ID];
+ return IdentifiersLoaded[Index];
}
-IdentifierInfo *ASTReader::getLocalIdentifier(ModuleFile &M, unsigned LocalID) {
+IdentifierInfo *ASTReader::getLocalIdentifier(ModuleFile &M, uint64_t LocalID) {
return DecodeIdentifierInfo(getGlobalIdentifierID(M, LocalID));
}
-IdentifierID ASTReader::getGlobalIdentifierID(ModuleFile &M, unsigned LocalID) {
+IdentifierID ASTReader::getGlobalIdentifierID(ModuleFile &M, uint64_t LocalID) {
if (LocalID < NUM_PREDEF_IDENT_IDS)
return LocalID;
if (!M.ModuleOffsetMap.empty())
ReadModuleOffsetMap(M);
- ContinuousRangeMap<uint32_t, int, 2>::iterator I
- = M.IdentifierRemap.find(LocalID - NUM_PREDEF_IDENT_IDS);
- assert(I != M.IdentifierRemap.end()
- && "Invalid index into identifier index remap");
+ unsigned ModuleFileIndex = LocalID >> 32;
+ LocalID &= llvm::maskTrailingOnes<IdentifierID>(32);
+ ModuleFile *MF =
+ ModuleFileIndex ? M.TransitiveImports[ModuleFileIndex - 1] : &M;
+ assert(MF && "malformed identifier ID encoding?");
- return LocalID + I->second;
+ if (!ModuleFileIndex)
+ LocalID -= NUM_PREDEF_IDENT_IDS;
+
+ return ((IdentifierID)(MF->Index + 1) << 32) | LocalID;
}
MacroInfo *ASTReader::getMacro(MacroID ID) {
@@ -8935,7 +9100,7 @@ MacroID ASTReader::getGlobalMacroID(ModuleFile &M, unsigned LocalID) {
}
serialization::SubmoduleID
-ASTReader::getGlobalSubmoduleID(ModuleFile &M, unsigned LocalID) {
+ASTReader::getGlobalSubmoduleID(ModuleFile &M, unsigned LocalID) const {
if (LocalID < NUM_PREDEF_SUBMODULE_IDS)
return LocalID;
@@ -8968,7 +9133,7 @@ Module *ASTReader::getModule(unsigned ID) {
return getSubmodule(ID);
}
-ModuleFile *ASTReader::getLocalModuleFile(ModuleFile &M, unsigned ID) {
+ModuleFile *ASTReader::getLocalModuleFile(ModuleFile &M, unsigned ID) const {
if (ID & 1) {
// It's a module, look it up by submodule ID.
auto I = GlobalSubmoduleMap.find(getGlobalSubmoduleID(M, ID >> 1));
@@ -9111,6 +9276,10 @@ DeclarationNameInfo ASTRecordReader::readDeclarationNameInfo() {
return NameInfo;
}
+TypeCoupledDeclRefInfo ASTRecordReader::readTypeCoupledDeclRefInfo() {
+ return TypeCoupledDeclRefInfo(readDeclAs<ValueDecl>(), readBool());
+}
+
void ASTRecordReader::readQualifierInfo(QualifierInfo &Info) {
Info.QualifierLoc = readNestedNameSpecifierLoc();
unsigned NumTPLists = readInt();
@@ -9157,7 +9326,7 @@ void ASTRecordReader::readUnresolvedSet(LazyASTUnresolvedSet &Set) {
unsigned NumDecls = readInt();
Set.reserve(getContext(), NumDecls);
while (NumDecls--) {
- DeclID ID = readDeclID();
+ GlobalDeclID ID = readDeclID();
AccessSpecifier AS = (AccessSpecifier) readInt();
Set.addLazyDecl(getContext(), ID, AS);
}
@@ -9312,6 +9481,18 @@ SourceRange ASTReader::ReadSourceRange(ModuleFile &F, const RecordData &Record,
return SourceRange(beg, end);
}
+llvm::BitVector ASTReader::ReadBitVector(const RecordData &Record,
+ const StringRef Blob) {
+ unsigned Count = Record[0];
+ const char *Byte = Blob.data();
+ llvm::BitVector Ret = llvm::BitVector(Count, false);
+ for (unsigned I = 0; I < Count; ++Byte)
+ for (unsigned Bit = 0; Bit < 8 && I < Count; ++Bit, ++I)
+ if (*Byte & (1 << Bit))
+ Ret[I] = true;
+ return Ret;
+}
+
/// Read a floating-point value
llvm::APFloat ASTRecordReader::readAPFloat(const llvm::fltSemantics &Sem) {
return llvm::APFloat(Sem, readAPInt());
@@ -9367,6 +9548,20 @@ DiagnosticBuilder ASTReader::Diag(SourceLocation Loc, unsigned DiagID) const {
return Diags.Report(Loc, DiagID);
}
+void ASTReader::warnStackExhausted(SourceLocation Loc) {
+ // When Sema is available, avoid duplicate errors.
+ if (SemaObj) {
+ SemaObj->warnStackExhausted(Loc);
+ return;
+ }
+
+ if (WarnedStackExhausted)
+ return;
+ WarnedStackExhausted = true;
+
+ Diag(Loc, diag::warn_stack_exhausted);
+}
+
/// Retrieve the identifier table associated with the
/// preprocessor.
IdentifierTable &ASTReader::getIdentifierTable() {
@@ -9521,7 +9716,7 @@ void ASTReader::finishPendingActions() {
while (!PendingIdentifierInfos.empty()) {
IdentifierInfo *II = PendingIdentifierInfos.back().first;
- SmallVector<uint32_t, 4> DeclIDs =
+ SmallVector<GlobalDeclID, 4> DeclIDs =
std::move(PendingIdentifierInfos.back().second);
PendingIdentifierInfos.pop_back();
@@ -9745,7 +9940,8 @@ void ASTReader::finishPendingActions() {
!NonConstDefn->isLateTemplateParsed() &&
// We only perform ODR checks for decls not in the explicit
// global module fragment.
- !FD->shouldSkipCheckingODR() &&
+ !shouldSkipCheckingODR(FD) &&
+ !shouldSkipCheckingODR(NonConstDefn) &&
FD->getODRHash() != NonConstDefn->getODRHash()) {
if (!isa<CXXMethodDecl>(FD)) {
PendingFunctionOdrMergeFailures[FD].push_back(NonConstDefn);
@@ -10139,7 +10335,7 @@ void ASTReader::FinishedDeserializing() {
}
void ASTReader::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
- if (IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
// Remove any fake results before adding any real ones.
auto It = PendingFakeLookupResults.find(II);
if (It != PendingFakeLookupResults.end()) {
@@ -10335,6 +10531,9 @@ OMPClause *OMPClauseReader::readClause() {
case llvm::omp::OMPC_relaxed:
C = new (Context) OMPRelaxedClause();
break;
+ case llvm::omp::OMPC_weak:
+ C = new (Context) OMPWeakClause();
+ break;
case llvm::omp::OMPC_threads:
C = new (Context) OMPThreadsClause();
break;
@@ -10733,6 +10932,8 @@ void OMPClauseReader::VisitOMPReleaseClause(OMPReleaseClause *) {}
void OMPClauseReader::VisitOMPRelaxedClause(OMPRelaxedClause *) {}
+void OMPClauseReader::VisitOMPWeakClause(OMPWeakClause *) {}
+
void OMPClauseReader::VisitOMPThreadsClause(OMPThreadsClause *) {}
void OMPClauseReader::VisitOMPSIMDClause(OMPSIMDClause *) {}
@@ -11717,3 +11918,209 @@ void ASTRecordReader::readOMPChildren(OMPChildren *Data) {
for (unsigned I = 0, E = Data->getNumChildren(); I < E; ++I)
Data->getChildren()[I] = readStmt();
}
+
+SmallVector<Expr *> ASTRecordReader::readOpenACCVarList() {
+ unsigned NumVars = readInt();
+ llvm::SmallVector<Expr *> VarList;
+ for (unsigned I = 0; I < NumVars; ++I)
+ VarList.push_back(readSubExpr());
+ return VarList;
+}
+
+SmallVector<Expr *> ASTRecordReader::readOpenACCIntExprList() {
+ unsigned NumExprs = readInt();
+ llvm::SmallVector<Expr *> ExprList;
+ for (unsigned I = 0; I < NumExprs; ++I)
+ ExprList.push_back(readSubExpr());
+ return ExprList;
+}
+
+OpenACCClause *ASTRecordReader::readOpenACCClause() {
+ OpenACCClauseKind ClauseKind = readEnum<OpenACCClauseKind>();
+ SourceLocation BeginLoc = readSourceLocation();
+ SourceLocation EndLoc = readSourceLocation();
+
+ switch (ClauseKind) {
+ case OpenACCClauseKind::Default: {
+ SourceLocation LParenLoc = readSourceLocation();
+ OpenACCDefaultClauseKind DCK = readEnum<OpenACCDefaultClauseKind>();
+ return OpenACCDefaultClause::Create(getContext(), DCK, BeginLoc, LParenLoc,
+ EndLoc);
+ }
+ case OpenACCClauseKind::If: {
+ SourceLocation LParenLoc = readSourceLocation();
+ Expr *CondExpr = readSubExpr();
+ return OpenACCIfClause::Create(getContext(), BeginLoc, LParenLoc, CondExpr,
+ EndLoc);
+ }
+ case OpenACCClauseKind::Self: {
+ SourceLocation LParenLoc = readSourceLocation();
+ Expr *CondExpr = readBool() ? readSubExpr() : nullptr;
+ return OpenACCSelfClause::Create(getContext(), BeginLoc, LParenLoc,
+ CondExpr, EndLoc);
+ }
+ case OpenACCClauseKind::NumGangs: {
+ SourceLocation LParenLoc = readSourceLocation();
+ unsigned NumClauses = readInt();
+ llvm::SmallVector<Expr *> IntExprs;
+ for (unsigned I = 0; I < NumClauses; ++I)
+ IntExprs.push_back(readSubExpr());
+ return OpenACCNumGangsClause::Create(getContext(), BeginLoc, LParenLoc,
+ IntExprs, EndLoc);
+ }
+ case OpenACCClauseKind::NumWorkers: {
+ SourceLocation LParenLoc = readSourceLocation();
+ Expr *IntExpr = readSubExpr();
+ return OpenACCNumWorkersClause::Create(getContext(), BeginLoc, LParenLoc,
+ IntExpr, EndLoc);
+ }
+ case OpenACCClauseKind::VectorLength: {
+ SourceLocation LParenLoc = readSourceLocation();
+ Expr *IntExpr = readSubExpr();
+ return OpenACCVectorLengthClause::Create(getContext(), BeginLoc, LParenLoc,
+ IntExpr, EndLoc);
+ }
+ case OpenACCClauseKind::Private: {
+ SourceLocation LParenLoc = readSourceLocation();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCPrivateClause::Create(getContext(), BeginLoc, LParenLoc,
+ VarList, EndLoc);
+ }
+ case OpenACCClauseKind::FirstPrivate: {
+ SourceLocation LParenLoc = readSourceLocation();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCFirstPrivateClause::Create(getContext(), BeginLoc, LParenLoc,
+ VarList, EndLoc);
+ }
+ case OpenACCClauseKind::Attach: {
+ SourceLocation LParenLoc = readSourceLocation();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCAttachClause::Create(getContext(), BeginLoc, LParenLoc,
+ VarList, EndLoc);
+ }
+ case OpenACCClauseKind::DevicePtr: {
+ SourceLocation LParenLoc = readSourceLocation();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCDevicePtrClause::Create(getContext(), BeginLoc, LParenLoc,
+ VarList, EndLoc);
+ }
+ case OpenACCClauseKind::NoCreate: {
+ SourceLocation LParenLoc = readSourceLocation();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCNoCreateClause::Create(getContext(), BeginLoc, LParenLoc,
+ VarList, EndLoc);
+ }
+ case OpenACCClauseKind::Present: {
+ SourceLocation LParenLoc = readSourceLocation();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCPresentClause::Create(getContext(), BeginLoc, LParenLoc,
+ VarList, EndLoc);
+ }
+ case OpenACCClauseKind::PCopy:
+ case OpenACCClauseKind::PresentOrCopy:
+ case OpenACCClauseKind::Copy: {
+ SourceLocation LParenLoc = readSourceLocation();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCCopyClause::Create(getContext(), ClauseKind, BeginLoc,
+ LParenLoc, VarList, EndLoc);
+ }
+ case OpenACCClauseKind::CopyIn:
+ case OpenACCClauseKind::PCopyIn:
+ case OpenACCClauseKind::PresentOrCopyIn: {
+ SourceLocation LParenLoc = readSourceLocation();
+ bool IsReadOnly = readBool();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCCopyInClause::Create(getContext(), ClauseKind, BeginLoc,
+ LParenLoc, IsReadOnly, VarList, EndLoc);
+ }
+ case OpenACCClauseKind::CopyOut:
+ case OpenACCClauseKind::PCopyOut:
+ case OpenACCClauseKind::PresentOrCopyOut: {
+ SourceLocation LParenLoc = readSourceLocation();
+ bool IsZero = readBool();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCCopyOutClause::Create(getContext(), ClauseKind, BeginLoc,
+ LParenLoc, IsZero, VarList, EndLoc);
+ }
+ case OpenACCClauseKind::Create:
+ case OpenACCClauseKind::PCreate:
+ case OpenACCClauseKind::PresentOrCreate: {
+ SourceLocation LParenLoc = readSourceLocation();
+ bool IsZero = readBool();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCCreateClause::Create(getContext(), ClauseKind, BeginLoc,
+ LParenLoc, IsZero, VarList, EndLoc);
+ }
+ case OpenACCClauseKind::Async: {
+ SourceLocation LParenLoc = readSourceLocation();
+ Expr *AsyncExpr = readBool() ? readSubExpr() : nullptr;
+ return OpenACCAsyncClause::Create(getContext(), BeginLoc, LParenLoc,
+ AsyncExpr, EndLoc);
+ }
+ case OpenACCClauseKind::Wait: {
+ SourceLocation LParenLoc = readSourceLocation();
+ Expr *DevNumExpr = readBool() ? readSubExpr() : nullptr;
+ SourceLocation QueuesLoc = readSourceLocation();
+ llvm::SmallVector<Expr *> QueueIdExprs = readOpenACCIntExprList();
+ return OpenACCWaitClause::Create(getContext(), BeginLoc, LParenLoc,
+ DevNumExpr, QueuesLoc, QueueIdExprs,
+ EndLoc);
+ }
+ case OpenACCClauseKind::DeviceType:
+ case OpenACCClauseKind::DType: {
+ SourceLocation LParenLoc = readSourceLocation();
+ llvm::SmallVector<DeviceTypeArgument> Archs;
+ unsigned NumArchs = readInt();
+
+ for (unsigned I = 0; I < NumArchs; ++I) {
+ IdentifierInfo *Ident = readBool() ? readIdentifier() : nullptr;
+ SourceLocation Loc = readSourceLocation();
+ Archs.emplace_back(Ident, Loc);
+ }
+
+ return OpenACCDeviceTypeClause::Create(getContext(), ClauseKind, BeginLoc,
+ LParenLoc, Archs, EndLoc);
+ }
+ case OpenACCClauseKind::Reduction: {
+ SourceLocation LParenLoc = readSourceLocation();
+ OpenACCReductionOperator Op = readEnum<OpenACCReductionOperator>();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCReductionClause::Create(getContext(), BeginLoc, LParenLoc, Op,
+ VarList, EndLoc);
+ }
+ case OpenACCClauseKind::Seq:
+ return OpenACCSeqClause::Create(getContext(), BeginLoc, EndLoc);
+ case OpenACCClauseKind::Independent:
+ return OpenACCIndependentClause::Create(getContext(), BeginLoc, EndLoc);
+ case OpenACCClauseKind::Auto:
+ return OpenACCAutoClause::Create(getContext(), BeginLoc, EndLoc);
+
+ case OpenACCClauseKind::Finalize:
+ case OpenACCClauseKind::IfPresent:
+ case OpenACCClauseKind::Worker:
+ case OpenACCClauseKind::Vector:
+ case OpenACCClauseKind::NoHost:
+ case OpenACCClauseKind::UseDevice:
+ case OpenACCClauseKind::Delete:
+ case OpenACCClauseKind::Detach:
+ case OpenACCClauseKind::Device:
+ case OpenACCClauseKind::DeviceResident:
+ case OpenACCClauseKind::Host:
+ case OpenACCClauseKind::Link:
+ case OpenACCClauseKind::Collapse:
+ case OpenACCClauseKind::Bind:
+ case OpenACCClauseKind::DeviceNum:
+ case OpenACCClauseKind::DefaultAsync:
+ case OpenACCClauseKind::Tile:
+ case OpenACCClauseKind::Gang:
+ case OpenACCClauseKind::Invalid:
+ llvm_unreachable("Clause serialization not yet implemented");
+ }
+ llvm_unreachable("Invalid Clause Kind");
+}
+
+void ASTRecordReader::readOpenACCClauseList(
+ MutableArrayRef<const OpenACCClause *> Clauses) {
+ for (unsigned I = 0; I < Clauses.size(); ++I)
+ Clauses[I] = readOpenACCClause();
+}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
index 110f55f8c0f4..154acdfbe032 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -49,6 +49,7 @@
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/Stack.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTRecordReader.h"
@@ -84,18 +85,16 @@ namespace clang {
ASTReader &Reader;
ASTRecordReader &Record;
ASTReader::RecordLocation Loc;
- const DeclID ThisDeclID;
+ const GlobalDeclID ThisDeclID;
const SourceLocation ThisDeclLoc;
using RecordData = ASTReader::RecordData;
TypeID DeferredTypeID = 0;
unsigned AnonymousDeclNumber = 0;
- GlobalDeclID NamedDeclForTagDecl = 0;
+ GlobalDeclID NamedDeclForTagDecl = GlobalDeclID();
IdentifierInfo *TypedefNameForLinkage = nullptr;
- bool HasPendingBody = false;
-
///A flag to carry the information for a decl from the entity is
/// used. We use it to delay the marking of the canonical decl as used until
/// the entire declaration is deserialized and merged.
@@ -126,15 +125,13 @@ namespace clang {
return Record.readTypeSourceInfo();
}
- serialization::DeclID readDeclID() {
- return Record.readDeclID();
- }
+ GlobalDeclID readDeclID() { return Record.readDeclID(); }
std::string readString() {
return Record.readString();
}
- void readDeclIDList(SmallVectorImpl<DeclID> &IDs) {
+ void readDeclIDList(SmallVectorImpl<GlobalDeclID> &IDs) {
for (unsigned I = 0, Size = Record.readInt(); I != Size; ++I)
IDs.push_back(readDeclID());
}
@@ -260,14 +257,14 @@ namespace clang {
public:
ASTDeclReader(ASTReader &Reader, ASTRecordReader &Record,
- ASTReader::RecordLocation Loc,
- DeclID thisDeclID, SourceLocation ThisDeclLoc)
+ ASTReader::RecordLocation Loc, GlobalDeclID thisDeclID,
+ SourceLocation ThisDeclLoc)
: Reader(Reader), Record(Record), Loc(Loc), ThisDeclID(thisDeclID),
ThisDeclLoc(ThisDeclLoc) {}
- template <typename T> static
- void AddLazySpecializations(T *D,
- SmallVectorImpl<serialization::DeclID>& IDs) {
+ template <typename T>
+ static void AddLazySpecializations(T *D,
+ SmallVectorImpl<GlobalDeclID> &IDs) {
if (IDs.empty())
return;
@@ -277,13 +274,14 @@ namespace clang {
auto *&LazySpecializations = D->getCommonPtr()->LazySpecializations;
if (auto &Old = LazySpecializations) {
- IDs.insert(IDs.end(), Old + 1, Old + 1 + Old[0]);
+ IDs.insert(IDs.end(), Old + 1, Old + 1 + Old[0].getRawValue());
llvm::sort(IDs);
IDs.erase(std::unique(IDs.begin(), IDs.end()), IDs.end());
}
- auto *Result = new (C) serialization::DeclID[1 + IDs.size()];
- *Result = IDs.size();
+ auto *Result = new (C) GlobalDeclID[1 + IDs.size()];
+ *Result = GlobalDeclID(IDs.size());
+
std::copy(IDs.begin(), IDs.end(), Result + 1);
LazySpecializations = Result;
@@ -314,13 +312,10 @@ namespace clang {
static void markIncompleteDeclChainImpl(Redeclarable<DeclT> *D);
static void markIncompleteDeclChainImpl(...);
- /// Determine whether this declaration has a pending body.
- bool hasPendingBody() const { return HasPendingBody; }
-
void ReadFunctionDefinition(FunctionDecl *FD);
void Visit(Decl *D);
- void UpdateDecl(Decl *D, SmallVectorImpl<serialization::DeclID> &);
+ void UpdateDecl(Decl *D, SmallVectorImpl<GlobalDeclID> &);
static void setNextObjCCategory(ObjCCategoryDecl *Cat,
ObjCCategoryDecl *Next) {
@@ -541,7 +536,6 @@ void ASTDeclReader::ReadFunctionDefinition(FunctionDecl *FD) {
}
// Store the offset of the body so we can lazily load it later.
Reader.PendingBodies[FD] = GetCurrentCursorOffset();
- HasPendingBody = true;
}
void ASTDeclReader::Visit(Decl *D) {
@@ -563,7 +557,7 @@ void ASTDeclReader::Visit(Decl *D) {
// If this is a tag declaration with a typedef name for linkage, it's safe
// to load that typedef now.
- if (NamedDeclForTagDecl)
+ if (NamedDeclForTagDecl.isValid())
cast<TagDecl>(D)->TypedefNameDeclOrQualifier =
cast<TypedefNameDecl>(Reader.GetDecl(NamedDeclForTagDecl));
} else if (auto *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
@@ -607,8 +601,8 @@ void ASTDeclReader::VisitDecl(Decl *D) {
// placeholder.
GlobalDeclID SemaDCIDForTemplateParmDecl = readDeclID();
GlobalDeclID LexicalDCIDForTemplateParmDecl =
- HasStandaloneLexicalDC ? readDeclID() : 0;
- if (!LexicalDCIDForTemplateParmDecl)
+ HasStandaloneLexicalDC ? readDeclID() : GlobalDeclID();
+ if (LexicalDCIDForTemplateParmDecl.isInvalid())
LexicalDCIDForTemplateParmDecl = SemaDCIDForTemplateParmDecl;
Reader.addPendingDeclContextInfo(D,
SemaDCIDForTemplateParmDecl,
@@ -800,21 +794,16 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
BitsUnpacker EnumDeclBits(Record.readInt());
ED->setNumPositiveBits(EnumDeclBits.getNextBits(/*Width=*/8));
ED->setNumNegativeBits(EnumDeclBits.getNextBits(/*Width=*/8));
- bool ShouldSkipCheckingODR = EnumDeclBits.getNextBit();
ED->setScoped(EnumDeclBits.getNextBit());
ED->setScopedUsingClassTag(EnumDeclBits.getNextBit());
ED->setFixed(EnumDeclBits.getNextBit());
- if (!ShouldSkipCheckingODR) {
- ED->setHasODRHash(true);
- ED->ODRHash = Record.readInt();
- }
+ ED->setHasODRHash(true);
+ ED->ODRHash = Record.readInt();
// If this is a definition subject to the ODR, and we already have a
// definition, merge this one into it.
- if (ED->isCompleteDefinition() &&
- Reader.getContext().getLangOpts().Modules &&
- Reader.getContext().getLangOpts().CPlusPlus) {
+ if (ED->isCompleteDefinition() && Reader.getContext().getLangOpts().Modules) {
EnumDecl *&OldDef = Reader.EnumDefinitions[ED->getCanonicalDecl()];
if (!OldDef) {
// This is the first time we've seen an imported definition. Look for a
@@ -832,7 +821,7 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
Reader.mergeDefinitionVisibility(OldDef, ED);
// We don't want to check the ODR hash value for declarations from global
// module fragment.
- if (!ED->shouldSkipCheckingODR() &&
+ if (!shouldSkipCheckingODR(ED) && !shouldSkipCheckingODR(OldDef) &&
OldDef->getODRHash() != ED->getODRHash())
Reader.PendingEnumOdrMergeFailures[OldDef].push_back(ED);
} else {
@@ -872,9 +861,6 @@ ASTDeclReader::VisitRecordDeclImpl(RecordDecl *RD) {
void ASTDeclReader::VisitRecordDecl(RecordDecl *RD) {
VisitRecordDeclImpl(RD);
- // We should only reach here if we're in C/Objective-C. There is no
- // global module fragment.
- assert(!RD->shouldSkipCheckingODR());
RD->setODRHash(Record.readInt());
// Maintain the invariant of a redeclaration chain containing only
@@ -1074,7 +1060,6 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
FD->setCachedLinkage((Linkage)FunctionDeclBits.getNextBits(/*Width=*/3));
FD->setStorageClass((StorageClass)FunctionDeclBits.getNextBits(/*Width=*/3));
- bool ShouldSkipCheckingODR = FunctionDeclBits.getNextBit();
FD->setInlineSpecified(FunctionDeclBits.getNextBit());
FD->setImplicitlyInline(FunctionDeclBits.getNextBit());
FD->setHasSkippedBody(FunctionDeclBits.getNextBit());
@@ -1104,21 +1089,29 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
if (FD->isExplicitlyDefaulted())
FD->setDefaultLoc(readSourceLocation());
- if (!ShouldSkipCheckingODR) {
- FD->ODRHash = Record.readInt();
- FD->setHasODRHash(true);
- }
+ FD->ODRHash = Record.readInt();
+ FD->setHasODRHash(true);
- if (FD->isDefaulted()) {
- if (unsigned NumLookups = Record.readInt()) {
+ if (FD->isDefaulted() || FD->isDeletedAsWritten()) {
+ // If 'Info' is nonzero, we need to read an DefaultedOrDeletedInfo; if,
+ // additionally, the second bit is also set, we also need to read
+ // a DeletedMessage for the DefaultedOrDeletedInfo.
+ if (auto Info = Record.readInt()) {
+ bool HasMessage = Info & 2;
+ StringLiteral *DeletedMessage =
+ HasMessage ? cast<StringLiteral>(Record.readExpr()) : nullptr;
+
+ unsigned NumLookups = Record.readInt();
SmallVector<DeclAccessPair, 8> Lookups;
for (unsigned I = 0; I != NumLookups; ++I) {
NamedDecl *ND = Record.readDeclAs<NamedDecl>();
AccessSpecifier AS = (AccessSpecifier)Record.readInt();
Lookups.push_back(DeclAccessPair::make(ND, AS));
}
- FD->setDefaultedFunctionInfo(FunctionDecl::DefaultedFunctionInfo::Create(
- Reader.getContext(), Lookups));
+
+ FD->setDefaultedOrDeletedInfo(
+ FunctionDecl::DefaultedOrDeletedFunctionInfo::Create(
+ Reader.getContext(), Lookups, DeletedMessage));
}
}
@@ -1164,7 +1157,6 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
// Load the body on-demand. Most clients won't care, because method
// definitions rarely show up in headers.
Reader.PendingBodies[MD] = GetCurrentCursorOffset();
- HasPendingBody = true;
}
MD->setSelfDecl(readDeclAs<ImplicitParamDecl>());
MD->setCmdDecl(readDeclAs<ImplicitParamDecl>());
@@ -1845,18 +1837,13 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
// this namespace; loading it might load a later declaration of the
// same namespace, and we have an invariant that older declarations
// get merged before newer ones try to merge.
- GlobalDeclID AnonNamespace = 0;
- if (Redecl.getFirstID() == ThisDeclID) {
+ GlobalDeclID AnonNamespace;
+ if (Redecl.getFirstID() == ThisDeclID)
AnonNamespace = readDeclID();
- } else {
- // Link this namespace back to the first declaration, which has already
- // been deserialized.
- D->AnonOrFirstNamespaceAndFlags.setPointer(D->getFirstDecl());
- }
mergeRedeclarable(D, Redecl);
- if (AnonNamespace) {
+ if (AnonNamespace.isValid()) {
// Each module has its own anonymous namespace, which is disjoint from
// any other module's anonymous namespaces, so don't attach the anonymous
// namespace at all.
@@ -1975,8 +1962,6 @@ void ASTDeclReader::ReadCXXDefinitionData(
BitsUnpacker CXXRecordDeclBits = Record.readInt();
- bool ShouldSkipCheckingODR = CXXRecordDeclBits.getNextBit();
-
#define FIELD(Name, Width, Merge) \
if (!CXXRecordDeclBits.canGetNextNBits(Width)) \
CXXRecordDeclBits.updateValue(Record.readInt()); \
@@ -1985,12 +1970,9 @@ void ASTDeclReader::ReadCXXDefinitionData(
#include "clang/AST/CXXRecordDeclDefinitionBits.def"
#undef FIELD
- // We only perform ODR checks for decls not in GMF.
- if (!ShouldSkipCheckingODR) {
- // Note: the caller has deserialized the IsLambda bit already.
- Data.ODRHash = Record.readInt();
- Data.HasODRHash = true;
- }
+ // Note: the caller has deserialized the IsLambda bit already.
+ Data.ODRHash = Record.readInt();
+ Data.HasODRHash = true;
if (Record.readInt()) {
Reader.DefinitionSource[D] =
@@ -2016,7 +1998,7 @@ void ASTDeclReader::ReadCXXDefinitionData(
if (Data.NumVBases)
Data.VBases = ReadGlobalOffset();
- Data.FirstFriend = readDeclID();
+ Data.FirstFriend = readDeclID().getRawValue();
} else {
using Capture = LambdaCapture;
@@ -2152,7 +2134,7 @@ void ASTDeclReader::MergeDefinitionData(
}
// We don't want to check ODR for decls in the global module fragment.
- if (MergeDD.Definition->shouldSkipCheckingODR())
+ if (shouldSkipCheckingODR(MergeDD.Definition) || shouldSkipCheckingODR(D))
return;
if (D->getODRHash() != MergeDD.ODRHash) {
@@ -2275,12 +2257,12 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
// Lazily load the key function to avoid deserializing every method so we can
// compute it.
if (WasDefinition) {
- DeclID KeyFn = readDeclID();
- if (KeyFn && D->isCompleteDefinition())
+ GlobalDeclID KeyFn = readDeclID();
+ if (KeyFn.isValid() && D->isCompleteDefinition())
// FIXME: This is wrong for the ARM ABI, where some other module may have
// made this function no longer be a key function. We need an update
// record or similar for that case.
- C.KeyFunctions[D] = KeyFn;
+ C.KeyFunctions[D] = KeyFn.getRawValue();
}
return Redecl;
@@ -2369,7 +2351,7 @@ void ASTDeclReader::VisitFriendDecl(FriendDecl *D) {
for (unsigned i = 0; i != D->NumTPLists; ++i)
D->getTrailingObjects<TemplateParameterList *>()[i] =
Record.readTemplateParameterList();
- D->NextFriend = readDeclID();
+ D->NextFriend = readDeclID().getRawValue();
D->UnsupportedFriend = (Record.readInt() != 0);
D->FriendLoc = readSourceLocation();
}
@@ -2454,7 +2436,7 @@ void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (ThisDeclID == Redecl.getFirstID()) {
// This ClassTemplateDecl owns a CommonPtr; read it to keep track of all of
// the specializations.
- SmallVector<serialization::DeclID, 32> SpecIDs;
+ SmallVector<GlobalDeclID, 32> SpecIDs;
readDeclIDList(SpecIDs);
ASTDeclReader::AddLazySpecializations(D, SpecIDs);
}
@@ -2482,7 +2464,7 @@ void ASTDeclReader::VisitVarTemplateDecl(VarTemplateDecl *D) {
if (ThisDeclID == Redecl.getFirstID()) {
// This VarTemplateDecl owns a CommonPtr; read it to keep track of all of
// the specializations.
- SmallVector<serialization::DeclID, 32> SpecIDs;
+ SmallVector<GlobalDeclID, 32> SpecIDs;
readDeclIDList(SpecIDs);
ASTDeclReader::AddLazySpecializations(D, SpecIDs);
}
@@ -2548,16 +2530,17 @@ ASTDeclReader::VisitClassTemplateSpecializationDeclImpl(
}
}
- // Explicit info.
- if (TypeSourceInfo *TyInfo = readTypeSourceInfo()) {
- auto *ExplicitInfo =
- new (C) ClassTemplateSpecializationDecl::ExplicitSpecializationInfo;
- ExplicitInfo->TypeAsWritten = TyInfo;
- ExplicitInfo->ExternLoc = readSourceLocation();
+ // extern/template keyword locations for explicit instantiations
+ if (Record.readBool()) {
+ auto *ExplicitInfo = new (C) ExplicitInstantiationInfo;
+ ExplicitInfo->ExternKeywordLoc = readSourceLocation();
ExplicitInfo->TemplateKeywordLoc = readSourceLocation();
D->ExplicitInfo = ExplicitInfo;
}
+ if (Record.readBool())
+ D->setTemplateArgsAsWritten(Record.readASTTemplateArgumentListInfo());
+
return Redecl;
}
@@ -2567,7 +2550,6 @@ void ASTDeclReader::VisitClassTemplatePartialSpecializationDecl(
// need them for profiling
TemplateParameterList *Params = Record.readTemplateParameterList();
D->TemplateParams = Params;
- D->ArgsAsWritten = Record.readASTTemplateArgumentListInfo();
RedeclarableResult Redecl = VisitClassTemplateSpecializationDeclImpl(D);
@@ -2584,7 +2566,7 @@ void ASTDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
if (ThisDeclID == Redecl.getFirstID()) {
// This FunctionTemplateDecl owns a CommonPtr; read it.
- SmallVector<serialization::DeclID, 32> SpecIDs;
+ SmallVector<GlobalDeclID, 32> SpecIDs;
readDeclIDList(SpecIDs);
ASTDeclReader::AddLazySpecializations(D, SpecIDs);
}
@@ -2617,16 +2599,17 @@ ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
}
}
- // Explicit info.
- if (TypeSourceInfo *TyInfo = readTypeSourceInfo()) {
- auto *ExplicitInfo =
- new (C) VarTemplateSpecializationDecl::ExplicitSpecializationInfo;
- ExplicitInfo->TypeAsWritten = TyInfo;
- ExplicitInfo->ExternLoc = readSourceLocation();
+ // extern/template keyword locations for explicit instantiations
+ if (Record.readBool()) {
+ auto *ExplicitInfo = new (C) ExplicitInstantiationInfo;
+ ExplicitInfo->ExternKeywordLoc = readSourceLocation();
ExplicitInfo->TemplateKeywordLoc = readSourceLocation();
D->ExplicitInfo = ExplicitInfo;
}
+ if (Record.readBool())
+ D->setTemplateArgsAsWritten(Record.readASTTemplateArgumentListInfo());
+
SmallVector<TemplateArgument, 8> TemplArgs;
Record.readTemplateArgumentList(TemplArgs, /*Canonicalize*/ true);
D->TemplateArgs = TemplateArgumentList::CreateCopy(C, TemplArgs);
@@ -2666,7 +2649,6 @@ void ASTDeclReader::VisitVarTemplatePartialSpecializationDecl(
VarTemplatePartialSpecializationDecl *D) {
TemplateParameterList *Params = Record.readTemplateParameterList();
D->TemplateParams = Params;
- D->ArgsAsWritten = Record.readASTTemplateArgumentListInfo();
RedeclarableResult Redecl = VisitVarTemplateSpecializationDeclImpl(D);
@@ -2683,7 +2665,8 @@ void ASTDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
D->setDeclaredWithTypename(Record.readInt());
- if (D->hasTypeConstraint()) {
+ const bool TypeConstraintInitialized = Record.readBool();
+ if (TypeConstraintInitialized && D->hasTypeConstraint()) {
ConceptReference *CR = nullptr;
if (Record.readBool())
CR = Record.readConceptReference();
@@ -2695,7 +2678,8 @@ void ASTDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
}
if (Record.readInt())
- D->setDefaultArgument(readTypeSourceInfo());
+ D->setDefaultArgument(Reader.getContext(),
+ Record.readTemplateArgumentLoc());
}
void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
@@ -2716,12 +2700,14 @@ void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
// Rest of NonTypeTemplateParmDecl.
D->ParameterPack = Record.readInt();
if (Record.readInt())
- D->setDefaultArgument(Record.readExpr());
+ D->setDefaultArgument(Reader.getContext(),
+ Record.readTemplateArgumentLoc());
}
}
void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
VisitTemplateDecl(D);
+ D->setDeclaredWithTypename(Record.readBool());
// TemplateParmPosition.
D->setDepth(Record.readInt());
D->setPosition(Record.readInt());
@@ -2779,7 +2765,7 @@ ASTDeclReader::VisitDeclContext(DeclContext *DC) {
template <typename T>
ASTDeclReader::RedeclarableResult
ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
- DeclID FirstDeclID = readDeclID();
+ GlobalDeclID FirstDeclID = readDeclID();
Decl *MergeWith = nullptr;
bool IsKeyDecl = ThisDeclID == FirstDeclID;
@@ -2787,9 +2773,9 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
uint64_t RedeclOffset = 0;
- // 0 indicates that this declaration was the only declaration of its entity,
- // and is used for space optimization.
- if (FirstDeclID == 0) {
+ // invalid FirstDeclID indicates that this declaration was the only
+ // declaration of its entity, and is used for space optimization.
+ if (FirstDeclID.isInvalid()) {
FirstDeclID = ThisDeclID;
IsKeyDecl = true;
IsFirstLocalDecl = true;
@@ -2918,9 +2904,9 @@ void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D,
bool IsKeyDecl) {
auto *DPattern = D->getTemplatedDecl();
auto *ExistingPattern = Existing->getTemplatedDecl();
- RedeclarableResult Result(/*MergeWith*/ ExistingPattern,
- DPattern->getCanonicalDecl()->getGlobalID(),
- IsKeyDecl);
+ RedeclarableResult Result(
+ /*MergeWith*/ ExistingPattern,
+ DPattern->getCanonicalDecl()->getGlobalID(), IsKeyDecl);
if (auto *DClass = dyn_cast<CXXRecordDecl>(DPattern)) {
// Merge with any existing definition.
@@ -2970,13 +2956,6 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase, T *Existing,
ExistingCanon->Used |= D->Used;
D->Used = false;
- // When we merge a namespace, update its pointer to the first namespace.
- // We cannot have loaded any redeclarations of this declaration yet, so
- // there's nothing else that needs to be updated.
- if (auto *Namespace = dyn_cast<NamespaceDecl>(D))
- Namespace->AnonOrFirstNamespaceAndFlags.setPointer(
- assert_cast<NamespaceDecl *>(ExistingCanon));
-
// When we merge a template, merge its pattern.
if (auto *DTemplate = dyn_cast<RedeclarableTemplateDecl>(D))
mergeTemplatePattern(
@@ -3075,14 +3054,14 @@ void ASTDeclReader::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
Expr *Init = Record.readExpr();
auto IK = static_cast<OMPDeclareReductionInitKind>(Record.readInt());
D->setInitializer(Init, IK);
- D->PrevDeclInScope = readDeclID();
+ D->PrevDeclInScope = readDeclID().getRawValue();
}
void ASTDeclReader::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
Record.readOMPChildren(D->Data);
VisitValueDecl(D);
D->VarName = Record.readDeclarationName();
- D->PrevDeclInScope = readDeclID();
+ D->PrevDeclInScope = readDeclID().getRawValue();
}
void ASTDeclReader::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
@@ -3136,9 +3115,7 @@ public:
OMPTraitInfo *readOMPTraitInfo() { return Reader.readOMPTraitInfo(); }
- template <typename T> T *GetLocalDeclAs(uint32_t LocalID) {
- return Reader.GetLocalDeclAs<T>(LocalID);
- }
+ template <typename T> T *readDeclAs() { return Reader.readDeclAs<T>(); }
};
}
@@ -3205,7 +3182,7 @@ inline void ASTReader::LoadedDecl(unsigned Index, Decl *D) {
/// This routine should return true for anything that might affect
/// code generation, e.g., inline function definitions, Objective-C
/// declarations with metadata, etc.
-static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
+bool ASTReader::isConsumerInterestedIn(Decl *D) {
// An ObjCMethodDecl is never considered as "interesting" because its
// implementation container always is.
@@ -3214,7 +3191,7 @@ static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
if (isPartOfPerModuleInitializer(D)) {
auto *M = D->getImportedOwningModule();
if (M && M->Kind == Module::ModuleMapModule &&
- Ctx.DeclMustBeEmitted(D))
+ getContext().DeclMustBeEmitted(D))
return false;
}
@@ -3229,7 +3206,7 @@ static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
(Var->isThisDeclarationADefinition() == VarDecl::Definition ||
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Var));
if (const auto *Func = dyn_cast<FunctionDecl>(D))
- return Func->doesThisDeclarationHaveABody() || HasBody;
+ return Func->doesThisDeclarationHaveABody() || PendingBodies.count(D);
if (auto *ES = D->getASTContext().getExternalSource())
if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
@@ -3239,14 +3216,13 @@ static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
}
/// Get the correct cursor and offset for loading a declaration.
-ASTReader::RecordLocation
-ASTReader::DeclCursorForID(DeclID ID, SourceLocation &Loc) {
- GlobalDeclMapType::iterator I = GlobalDeclMap.find(ID);
- assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
- ModuleFile *M = I->second;
- const DeclOffset &DOffs =
- M->DeclOffsets[ID - M->BaseDeclID - NUM_PREDEF_DECL_IDS];
- Loc = TranslateSourceLocation(*M, DOffs.getLocation());
+ASTReader::RecordLocation ASTReader::DeclCursorForID(GlobalDeclID ID,
+ SourceLocation &Loc) {
+ ModuleFile *M = getOwningModuleFile(ID);
+ assert(M);
+ unsigned LocalDeclIndex = ID.getLocalDeclIndex();
+ const DeclOffset &DOffs = M->DeclOffsets[LocalDeclIndex];
+ Loc = ReadSourceLocation(*M, DOffs.getRawLoc());
return RecordLocation(M, DOffs.getBitOffset(M->DeclsBlockStartOffset));
}
@@ -3292,7 +3268,7 @@ ASTDeclReader::getOrFakePrimaryClassDefinition(ASTReader &Reader,
DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader,
DeclContext *DC) {
if (auto *ND = dyn_cast<NamespaceDecl>(DC))
- return ND->getOriginalNamespace();
+ return ND->getFirstDecl();
if (auto *RD = dyn_cast<CXXRecordDecl>(DC))
return getOrFakePrimaryClassDefinition(Reader, RD);
@@ -3301,16 +3277,15 @@ DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader,
return RD->getDefinition();
if (auto *ED = dyn_cast<EnumDecl>(DC))
- return ED->getASTContext().getLangOpts().CPlusPlus? ED->getDefinition()
- : nullptr;
+ return ED->getDefinition();
if (auto *OID = dyn_cast<ObjCInterfaceDecl>(DC))
return OID->getDefinition();
- // We can see the TU here only if we have no Sema object. In that case,
- // there's no TU scope to look in, so using the DC alone is sufficient.
+ // We can see the TU here only if we have no Sema object. It is possible
+ // we're in clang-repl so we still need to get the primary context.
if (auto *TU = dyn_cast<TranslationUnitDecl>(DC))
- return TU;
+ return TU->getPrimaryContext();
return nullptr;
}
@@ -3526,7 +3501,7 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
// same template specialization into the same CXXRecordDecl.
auto MergedDCIt = Reader.MergedDeclContexts.find(D->getLexicalDeclContext());
if (MergedDCIt != Reader.MergedDeclContexts.end() &&
- !D->shouldSkipCheckingODR() && MergedDCIt->second == D->getDeclContext())
+ !shouldSkipCheckingODR(D) && MergedDCIt->second == D->getDeclContext())
Reader.PendingOdrMergeChecks.push_back(D);
return FindExistingResult(Reader, D, /*Existing=*/nullptr,
@@ -3710,6 +3685,54 @@ static void inheritDefaultTemplateArguments(ASTContext &Context,
}
}
+// [basic.link]/p10:
+// If two declarations of an entity are attached to different modules,
+// the program is ill-formed;
+static void checkMultipleDefinitionInNamedModules(ASTReader &Reader, Decl *D,
+ Decl *Previous) {
+ Module *M = Previous->getOwningModule();
+
+ // We only care about the case in named modules.
+ if (!M || !M->isNamedModule())
+ return;
+
+ // If it is previous implcitly introduced, it is not meaningful to
+ // diagnose it.
+ if (Previous->isImplicit())
+ return;
+
+ // FIXME: Get rid of the enumeration of decl types once we have an appropriate
+ // abstract for decls of an entity. e.g., the namespace decl and using decl
+ // doesn't introduce an entity.
+ if (!isa<VarDecl, FunctionDecl, TagDecl, RedeclarableTemplateDecl>(Previous))
+ return;
+
+ // Skip implicit instantiations since it may give false positive diagnostic
+ // messages.
+ // FIXME: Maybe this shows the implicit instantiations may have incorrect
+ // module owner ships. But given we've finished the compilation of a module,
+ // how can we add new entities to that module?
+ if (auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(Previous);
+ VTSD && !VTSD->isExplicitSpecialization())
+ return;
+ if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Previous);
+ CTSD && !CTSD->isExplicitSpecialization())
+ return;
+ if (auto *Func = dyn_cast<FunctionDecl>(Previous))
+ if (auto *FTSI = Func->getTemplateSpecializationInfo();
+ FTSI && !FTSI->isExplicitSpecialization())
+ return;
+
+ // It is fine if they are in the same module.
+ if (Reader.getContext().isInSameModule(M, D->getOwningModule()))
+ return;
+
+ Reader.Diag(Previous->getLocation(),
+ diag::err_multiple_decl_in_different_modules)
+ << cast<NamedDecl>(Previous) << M->Name;
+ Reader.Diag(D->getLocation(), diag::note_also_found);
+}
+
void ASTDeclReader::attachPreviousDecl(ASTReader &Reader, Decl *D,
Decl *Previous, Decl *Canon) {
assert(D && Previous);
@@ -3723,6 +3746,8 @@ void ASTDeclReader::attachPreviousDecl(ASTReader &Reader, Decl *D,
#include "clang/AST/DeclNodes.inc"
}
+ checkMultipleDefinitionInNamedModules(Reader, D, Previous);
+
// If the declaration was visible in one module, a redeclaration of it in
// another module remains visible even if it wouldn't be visible by itself.
//
@@ -3788,8 +3813,7 @@ void ASTReader::markIncompleteDeclChain(Decl *D) {
}
/// Read the declaration at the given offset from the AST file.
-Decl *ASTReader::ReadDeclRecord(DeclID ID) {
- unsigned Index = ID - NUM_PREDEF_DECL_IDS;
+Decl *ASTReader::ReadDeclRecord(GlobalDeclID ID) {
SourceLocation DeclLoc;
RecordLocation Loc = DeclCursorForID(ID, DeclLoc);
llvm::BitstreamCursor &DeclsCursor = Loc.F->DeclsCursor;
@@ -3823,6 +3847,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
llvm::report_fatal_error(
Twine("ASTReader::readDeclRecord failed reading decl code: ") +
toString(MaybeDeclCode.takeError()));
+
switch ((DeclCode)MaybeDeclCode.get()) {
case DECL_CONTEXT_LEXICAL:
case DECL_CONTEXT_VISIBLE:
@@ -3949,9 +3974,8 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
}
case DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK: {
bool HasTypeConstraint = Record.readInt();
- D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID,
- Record.readInt(),
- HasTypeConstraint);
+ D = NonTypeTemplateParmDecl::CreateDeserialized(
+ Context, ID, Record.readInt(), HasTypeConstraint);
break;
}
case DECL_TEMPLATE_TEMPLATE_PARM:
@@ -4120,17 +4144,29 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
}
assert(D && "Unknown declaration reading AST file");
- LoadedDecl(Index, D);
+ LoadedDecl(translateGlobalDeclIDToIndex(ID), D);
// Set the DeclContext before doing any deserialization, to make sure internal
// calls to Decl::getASTContext() by Decl's methods will find the
// TranslationUnitDecl without crashing.
D->setDeclContext(Context.getTranslationUnitDecl());
- Reader.Visit(D);
+
+ // Reading some declarations can result in deep recursion.
+ clang::runWithSufficientStackSpace([&] { warnStackExhausted(DeclLoc); },
+ [&] { Reader.Visit(D); });
// If this declaration is also a declaration context, get the
// offsets for its tables of lexical and visible declarations.
if (auto *DC = dyn_cast<DeclContext>(D)) {
std::pair<uint64_t, uint64_t> Offsets = Reader.VisitDeclContext(DC);
+
+ // Get the lexical and visible block for the delayed namespace.
+ // It is sufficient to judge if ID is in DelayedNamespaceOffsetMap.
+ // But it may be more efficient to filter the other cases.
+ if (!Offsets.first && !Offsets.second && isa<NamespaceDecl>(D))
+ if (auto Iter = DelayedNamespaceOffsetMap.find(ID);
+ Iter != DelayedNamespaceOffsetMap.end())
+ Offsets = Iter->second;
+
if (Offsets.first &&
ReadLexicalDeclContextStorage(*Loc.F, DeclsCursor, Offsets.first, DC))
return nullptr;
@@ -4156,8 +4192,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
// AST consumer might need to know about, queue it.
// We don't pass it to the consumer immediately because we may be in recursive
// loading, and some declarations may still be initializing.
- PotentiallyInterestingDecls.push_back(
- InterestingDecl(D, Reader.hasPendingBody()));
+ PotentiallyInterestingDecls.push_back(D);
return D;
}
@@ -4178,24 +4213,55 @@ void ASTReader::PassInterestingDeclsToConsumer() {
GetDecl(ID);
EagerlyDeserializedDecls.clear();
- while (!PotentiallyInterestingDecls.empty()) {
- InterestingDecl D = PotentiallyInterestingDecls.front();
- PotentiallyInterestingDecls.pop_front();
- if (isConsumerInterestedIn(getContext(), D.getDecl(), D.hasPendingBody()))
- PassInterestingDeclToConsumer(D.getDecl());
+ auto ConsumingPotentialInterestingDecls = [this]() {
+ while (!PotentiallyInterestingDecls.empty()) {
+ Decl *D = PotentiallyInterestingDecls.front();
+ PotentiallyInterestingDecls.pop_front();
+ if (isConsumerInterestedIn(D))
+ PassInterestingDeclToConsumer(D);
+ }
+ };
+ std::deque<Decl *> MaybeInterestingDecls =
+ std::move(PotentiallyInterestingDecls);
+ PotentiallyInterestingDecls.clear();
+ assert(PotentiallyInterestingDecls.empty());
+ while (!MaybeInterestingDecls.empty()) {
+ Decl *D = MaybeInterestingDecls.front();
+ MaybeInterestingDecls.pop_front();
+ // Since we load the variable's initializers lazily, it'd be problematic
+ // if the initializers dependent on each other. So here we try to load the
+ // initializers of static variables to make sure they are passed to code
+ // generator by order. If we read anything interesting, we would consume
+ // that before emitting the current declaration.
+ if (auto *VD = dyn_cast<VarDecl>(D);
+ VD && VD->isFileVarDecl() && !VD->isExternallyVisible())
+ VD->getInit();
+ ConsumingPotentialInterestingDecls();
+ if (isConsumerInterestedIn(D))
+ PassInterestingDeclToConsumer(D);
+ }
+
+ // If we add any new potential interesting decl in the last call, consume it.
+ ConsumingPotentialInterestingDecls();
+
+ for (GlobalDeclID ID : VTablesToEmit) {
+ auto *RD = cast<CXXRecordDecl>(GetDecl(ID));
+ assert(!RD->shouldEmitInExternalSource());
+ PassVTableToConsumer(RD);
}
+ VTablesToEmit.clear();
}
void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) {
// The declaration may have been modified by files later in the chain.
// If this is the case, read the record containing the updates from each file
// and pass it to ASTDeclReader to make the modifications.
- serialization::GlobalDeclID ID = Record.ID;
+ GlobalDeclID ID = Record.ID;
Decl *D = Record.D;
ProcessingUpdatesRAIIObj ProcessingUpdates(*this);
DeclUpdateOffsetsMap::iterator UpdI = DeclUpdateOffsets.find(ID);
- SmallVector<serialization::DeclID, 8> PendingLazySpecializationIDs;
+ SmallVector<GlobalDeclID, 8> PendingLazySpecializationIDs;
if (UpdI != DeclUpdateOffsets.end()) {
auto UpdateOffsets = std::move(UpdI->second);
@@ -4205,8 +4271,7 @@ void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) {
// the declaration, then we know it was interesting and we skip the call
// to isConsumerInterestedIn because it is unsafe to call in the
// current ASTReader state.
- bool WasInteresting =
- Record.JustLoaded || isConsumerInterestedIn(getContext(), D, false);
+ bool WasInteresting = Record.JustLoaded || isConsumerInterestedIn(D);
for (auto &FileAndOffset : UpdateOffsets) {
ModuleFile *F = FileAndOffset.first;
uint64_t Offset = FileAndOffset.second;
@@ -4238,10 +4303,8 @@ void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) {
// We might have made this declaration interesting. If so, remember that
// we need to hand it off to the consumer.
- if (!WasInteresting &&
- isConsumerInterestedIn(getContext(), D, Reader.hasPendingBody())) {
- PotentiallyInterestingDecls.push_back(
- InterestingDecl(D, Reader.hasPendingBody()));
+ if (!WasInteresting && isConsumerInterestedIn(D)) {
+ PotentiallyInterestingDecls.push_back(D);
WasInteresting = true;
}
}
@@ -4318,7 +4381,8 @@ void ASTReader::loadPendingDeclChain(Decl *FirstLocal, uint64_t LocalOffset) {
// we should instead generate one loop per kind and dispatch up-front?
Decl *MostRecent = FirstLocal;
for (unsigned I = 0, N = Record.size(); I != N; ++I) {
- auto *D = GetLocalDecl(*M, Record[N - I - 1]);
+ unsigned Idx = N - I - 1;
+ auto *D = ReadDecl(*M, Record, Idx);
ASTDeclReader::attachPreviousDecl(*this, D, MostRecent, CanonDecl);
MostRecent = D;
}
@@ -4335,7 +4399,7 @@ namespace {
llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized;
ObjCCategoryDecl *Tail = nullptr;
llvm::DenseMap<DeclarationName, ObjCCategoryDecl *> NameCategoryMap;
- serialization::GlobalDeclID InterfaceID;
+ GlobalDeclID InterfaceID;
unsigned PreviousGeneration;
void add(ObjCCategoryDecl *Cat) {
@@ -4377,11 +4441,10 @@ namespace {
}
public:
- ObjCCategoriesVisitor(ASTReader &Reader,
- ObjCInterfaceDecl *Interface,
- llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized,
- serialization::GlobalDeclID InterfaceID,
- unsigned PreviousGeneration)
+ ObjCCategoriesVisitor(
+ ASTReader &Reader, ObjCInterfaceDecl *Interface,
+ llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized,
+ GlobalDeclID InterfaceID, unsigned PreviousGeneration)
: Reader(Reader), Interface(Interface), Deserialized(Deserialized),
InterfaceID(InterfaceID), PreviousGeneration(PreviousGeneration) {
// Populate the name -> category map with the set of known categories.
@@ -4403,8 +4466,9 @@ namespace {
// Map global ID of the definition down to the local ID used in this
// module file. If there is no such mapping, we'll find nothing here
// (or in any module it imports).
- DeclID LocalID = Reader.mapGlobalIDToModuleFileGlobalID(M, InterfaceID);
- if (!LocalID)
+ LocalDeclID LocalID =
+ Reader.mapGlobalIDToModuleFileGlobalID(M, InterfaceID);
+ if (LocalID.isInvalid())
return true;
// Perform a binary search to find the local redeclarations for this
@@ -4415,7 +4479,7 @@ namespace {
M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap,
Compare);
if (Result == M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap ||
- Result->DefinitionID != LocalID) {
+ LocalID != Result->getDefinitionID()) {
// We didn't find anything. If the class definition is in this module
// file, then the module files it depends on cannot have any categories,
// so suppress further lookup.
@@ -4427,16 +4491,14 @@ namespace {
unsigned N = M.ObjCCategories[Offset];
M.ObjCCategories[Offset++] = 0; // Don't try to deserialize again
for (unsigned I = 0; I != N; ++I)
- add(cast_or_null<ObjCCategoryDecl>(
- Reader.GetLocalDecl(M, M.ObjCCategories[Offset++])));
+ add(Reader.ReadDeclAs<ObjCCategoryDecl>(M, M.ObjCCategories, Offset));
return true;
}
};
} // namespace
-void ASTReader::loadObjCCategories(serialization::GlobalDeclID ID,
- ObjCInterfaceDecl *D,
+void ASTReader::loadObjCCategories(GlobalDeclID ID, ObjCInterfaceDecl *D,
unsigned PreviousGeneration) {
ObjCCategoriesVisitor Visitor(*this, D, CategoriesDeserialized, ID,
PreviousGeneration);
@@ -4464,8 +4526,9 @@ static void forAllLaterRedecls(DeclT *D, Fn F) {
}
}
-void ASTDeclReader::UpdateDecl(Decl *D,
- llvm::SmallVectorImpl<serialization::DeclID> &PendingLazySpecializationIDs) {
+void ASTDeclReader::UpdateDecl(
+ Decl *D,
+ llvm::SmallVectorImpl<GlobalDeclID> &PendingLazySpecializationIDs) {
while (Record.getIdx() < Record.size()) {
switch ((DeclUpdateKind)Record.readInt()) {
case UPD_CXX_ADDED_IMPLICIT_MEMBER: {
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h b/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
index 25a46ddabcb7..536b19f91691 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
@@ -49,15 +49,15 @@ public:
static const int MaxTables = 4;
/// The lookup result is a list of global declaration IDs.
- using data_type = SmallVector<DeclID, 4>;
+ using data_type = SmallVector<GlobalDeclID, 4>;
struct data_type_builder {
data_type &Data;
- llvm::DenseSet<DeclID> Found;
+ llvm::DenseSet<GlobalDeclID> Found;
data_type_builder(data_type &D) : Data(D) {}
- void insert(DeclID ID) {
+ void insert(GlobalDeclID ID) {
// Just use a linear scan unless we have more than a few IDs.
if (Found.empty() && !Data.empty()) {
if (Data.size() <= 4) {
@@ -108,7 +108,7 @@ public:
static void MergeDataInto(const data_type &From, data_type_builder &To) {
To.Data.reserve(To.Data.size() + From.size());
- for (DeclID ID : From)
+ for (GlobalDeclID ID : From)
To.insert(ID);
}
@@ -175,7 +175,7 @@ public:
const unsigned char* d,
unsigned DataLen);
- IdentID ReadIdentifierID(const unsigned char *d);
+ IdentifierID ReadIdentifierID(const unsigned char *d);
ASTReader &getReader() const { return Reader; }
};
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
index 85ecfa1a1a0b..c1361efd8c5f 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -790,19 +790,19 @@ readConstraintSatisfaction(ASTRecordReader &Record) {
ConstraintSatisfaction Satisfaction;
Satisfaction.IsSatisfied = Record.readInt();
Satisfaction.ContainsErrors = Record.readInt();
+ const ASTContext &C = Record.getContext();
if (!Satisfaction.IsSatisfied) {
unsigned NumDetailRecords = Record.readInt();
for (unsigned i = 0; i != NumDetailRecords; ++i) {
- Expr *ConstraintExpr = Record.readExpr();
if (/* IsDiagnostic */Record.readInt()) {
SourceLocation DiagLocation = Record.readSourceLocation();
- std::string DiagMessage = Record.readString();
+ StringRef DiagMessage = C.backupStr(Record.readString());
+
Satisfaction.Details.emplace_back(
- ConstraintExpr, new (Record.getContext())
- ConstraintSatisfaction::SubstitutionDiagnostic{
- DiagLocation, DiagMessage});
+ new (C) ConstraintSatisfaction::SubstitutionDiagnostic(
+ DiagLocation, DiagMessage));
} else
- Satisfaction.Details.emplace_back(ConstraintExpr, Record.readExpr());
+ Satisfaction.Details.emplace_back(Record.readExpr());
}
}
return Satisfaction;
@@ -821,9 +821,11 @@ void ASTStmtReader::VisitConceptSpecializationExpr(
static concepts::Requirement::SubstitutionDiagnostic *
readSubstitutionDiagnostic(ASTRecordReader &Record) {
- std::string SubstitutedEntity = Record.readString();
+ const ASTContext &C = Record.getContext();
+ StringRef SubstitutedEntity = C.backupStr(Record.readString());
SourceLocation DiagLoc = Record.readSourceLocation();
- std::string DiagMessage = Record.readString();
+ StringRef DiagMessage = C.backupStr(Record.readString());
+
return new (Record.getContext())
concepts::Requirement::SubstitutionDiagnostic{SubstitutedEntity, DiagLoc,
DiagMessage};
@@ -908,26 +910,21 @@ void ASTStmtReader::VisitRequiresExpr(RequiresExpr *E) {
std::move(*Req));
} break;
case concepts::Requirement::RK_Nested: {
+ ASTContext &C = Record.getContext();
bool HasInvalidConstraint = Record.readInt();
if (HasInvalidConstraint) {
- std::string InvalidConstraint = Record.readString();
- char *InvalidConstraintBuf =
- new (Record.getContext()) char[InvalidConstraint.size()];
- std::copy(InvalidConstraint.begin(), InvalidConstraint.end(),
- InvalidConstraintBuf);
- R = new (Record.getContext()) concepts::NestedRequirement(
- Record.getContext(),
- StringRef(InvalidConstraintBuf, InvalidConstraint.size()),
+ StringRef InvalidConstraint = C.backupStr(Record.readString());
+ R = new (C) concepts::NestedRequirement(
+ Record.getContext(), InvalidConstraint,
readConstraintSatisfaction(Record));
break;
}
Expr *E = Record.readExpr();
if (E->isInstantiationDependent())
- R = new (Record.getContext()) concepts::NestedRequirement(E);
+ R = new (C) concepts::NestedRequirement(E);
else
- R = new (Record.getContext())
- concepts::NestedRequirement(Record.getContext(), E,
- readConstraintSatisfaction(Record));
+ R = new (C) concepts::NestedRequirement(
+ C, E, readConstraintSatisfaction(Record));
} break;
}
if (!R)
@@ -956,14 +953,22 @@ void ASTStmtReader::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
E->setRBracketLoc(readSourceLocation());
}
-void ASTStmtReader::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
+void ASTStmtReader::VisitArraySectionExpr(ArraySectionExpr *E) {
VisitExpr(E);
+ E->ASType = Record.readEnum<ArraySectionExpr::ArraySectionType>();
+
E->setBase(Record.readSubExpr());
E->setLowerBound(Record.readSubExpr());
E->setLength(Record.readSubExpr());
- E->setStride(Record.readSubExpr());
+
+ if (E->isOMPArraySection())
+ E->setStride(Record.readSubExpr());
+
E->setColonLocFirst(readSourceLocation());
- E->setColonLocSecond(readSourceLocation());
+
+ if (E->isOMPArraySection())
+ E->setColonLocSecond(readSourceLocation());
+
E->setRBracketLoc(readSourceLocation());
}
@@ -1047,30 +1052,22 @@ void ASTStmtReader::VisitMemberExpr(MemberExpr *E) {
E->MemberDNLoc = Record.readDeclarationNameLoc(E->MemberDecl->getDeclName());
E->MemberLoc = Record.readSourceLocation();
E->MemberExprBits.IsArrow = CurrentUnpackingBits->getNextBit();
- E->MemberExprBits.HasQualifierOrFoundDecl = HasQualifier || HasFoundDecl;
+ E->MemberExprBits.HasQualifier = HasQualifier;
+ E->MemberExprBits.HasFoundDecl = HasFoundDecl;
E->MemberExprBits.HasTemplateKWAndArgsInfo = HasTemplateInfo;
E->MemberExprBits.HadMultipleCandidates = CurrentUnpackingBits->getNextBit();
E->MemberExprBits.NonOdrUseReason =
CurrentUnpackingBits->getNextBits(/*Width=*/2);
E->MemberExprBits.OperatorLoc = Record.readSourceLocation();
- if (HasQualifier || HasFoundDecl) {
- DeclAccessPair FoundDecl;
- if (HasFoundDecl) {
- auto *FoundD = Record.readDeclAs<NamedDecl>();
- auto AS = (AccessSpecifier)CurrentUnpackingBits->getNextBits(/*Width=*/2);
- FoundDecl = DeclAccessPair::make(FoundD, AS);
- } else {
- FoundDecl = DeclAccessPair::make(E->MemberDecl,
- E->MemberDecl->getAccess());
- }
- E->getTrailingObjects<MemberExprNameQualifier>()->FoundDecl = FoundDecl;
+ if (HasQualifier)
+ new (E->getTrailingObjects<NestedNameSpecifierLoc>())
+ NestedNameSpecifierLoc(Record.readNestedNameSpecifierLoc());
- NestedNameSpecifierLoc QualifierLoc;
- if (HasQualifier)
- QualifierLoc = Record.readNestedNameSpecifierLoc();
- E->getTrailingObjects<MemberExprNameQualifier>()->QualifierLoc =
- QualifierLoc;
+ if (HasFoundDecl) {
+ auto *FoundD = Record.readDeclAs<NamedDecl>();
+ auto AS = (AccessSpecifier)CurrentUnpackingBits->getNextBits(/*Width=*/2);
+ *E->getTrailingObjects<DeclAccessPair>() = DeclAccessPair::make(FoundD, AS);
}
if (HasTemplateInfo)
@@ -1323,6 +1320,16 @@ void ASTStmtReader::VisitSourceLocExpr(SourceLocExpr *E) {
E->SourceLocExprBits.Kind = Record.readInt();
}
+void ASTStmtReader::VisitEmbedExpr(EmbedExpr *E) {
+ VisitExpr(E);
+ E->EmbedKeywordLoc = readSourceLocation();
+ EmbedDataStorage *Data = new (Record.getContext()) EmbedDataStorage;
+ Data->BinaryData = cast<StringLiteral>(Record.readSubStmt());
+ E->Data = Data;
+ E->Begin = Record.readInt();
+ E->NumOfElements = Record.readInt();
+}
+
void ASTStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) {
VisitExpr(E);
E->setAmpAmpLoc(readSourceLocation());
@@ -1849,6 +1856,7 @@ void ASTStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
VisitExpr(E);
E->setLocation(readSourceLocation());
E->setImplicit(Record.readInt());
+ E->setCapturedByCopyInLambdaWithExplicitObjectParameter(Record.readInt());
}
void ASTStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) {
@@ -2103,7 +2111,6 @@ void ASTStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
void ASTStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
VisitOverloadExpr(E);
E->UnresolvedLookupExprBits.RequiresADL = CurrentUnpackingBits->getNextBit();
- E->UnresolvedLookupExprBits.Overloaded = CurrentUnpackingBits->getNextBit();
E->NamingClass = readDeclAs<CXXRecordDecl>();
}
@@ -2174,6 +2181,19 @@ void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
}
}
+void ASTStmtReader::VisitPackIndexingExpr(PackIndexingExpr *E) {
+ VisitExpr(E);
+ E->TransformedExpressions = Record.readInt();
+ E->ExpandedToEmptyPack = Record.readInt();
+ E->EllipsisLoc = readSourceLocation();
+ E->RSquareLoc = readSourceLocation();
+ E->SubExprs[0] = Record.readStmt();
+ E->SubExprs[1] = Record.readStmt();
+ auto **Exprs = E->getTrailingObjects<Expr *>();
+ for (unsigned I = 0; I < E->TransformedExpressions; ++I)
+ Exprs[I] = Record.readExpr();
+}
+
void ASTStmtReader::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
VisitExpr(E);
@@ -2415,6 +2435,14 @@ void ASTStmtReader::VisitOMPUnrollDirective(OMPUnrollDirective *D) {
VisitOMPLoopTransformationDirective(D);
}
+void ASTStmtReader::VisitOMPReverseDirective(OMPReverseDirective *D) {
+ VisitOMPLoopTransformationDirective(D);
+}
+
+void ASTStmtReader::VisitOMPInterchangeDirective(OMPInterchangeDirective *D) {
+ VisitOMPLoopTransformationDirective(D);
+}
+
void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
D->setHasCancel(Record.readBool());
@@ -2764,6 +2792,7 @@ void ASTStmtReader::VisitOMPTeamsGenericLoopDirective(
void ASTStmtReader::VisitOMPTargetTeamsGenericLoopDirective(
OMPTargetTeamsGenericLoopDirective *D) {
VisitOMPLoopDirective(D);
+ D->setCanBeParallelFor(Record.readBool());
}
void ASTStmtReader::VisitOMPParallelGenericLoopDirective(
@@ -2777,6 +2806,34 @@ void ASTStmtReader::VisitOMPTargetParallelGenericLoopDirective(
}
//===----------------------------------------------------------------------===//
+// OpenACC Constructs/Directives.
+//===----------------------------------------------------------------------===//
+void ASTStmtReader::VisitOpenACCConstructStmt(OpenACCConstructStmt *S) {
+ (void)Record.readInt();
+ S->Kind = Record.readEnum<OpenACCDirectiveKind>();
+ S->Range = Record.readSourceRange();
+ S->DirectiveLoc = Record.readSourceLocation();
+ Record.readOpenACCClauseList(S->Clauses);
+}
+
+void ASTStmtReader::VisitOpenACCAssociatedStmtConstruct(
+ OpenACCAssociatedStmtConstruct *S) {
+ VisitOpenACCConstructStmt(S);
+ S->setAssociatedStmt(Record.readSubStmt());
+}
+
+void ASTStmtReader::VisitOpenACCComputeConstruct(OpenACCComputeConstruct *S) {
+ VisitStmt(S);
+ VisitOpenACCAssociatedStmtConstruct(S);
+ S->findAndSetChildLoops();
+}
+
+void ASTStmtReader::VisitOpenACCLoopConstruct(OpenACCLoopConstruct *S) {
+ VisitStmt(S);
+ VisitOpenACCAssociatedStmtConstruct(S);
+}
+
+//===----------------------------------------------------------------------===//
// ASTReader Implementation
//===----------------------------------------------------------------------===//
@@ -3064,8 +3121,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) MatrixSubscriptExpr(Empty);
break;
- case EXPR_OMP_ARRAY_SECTION:
- S = new (Context) OMPArraySectionExpr(Empty);
+ case EXPR_ARRAY_SECTION:
+ S = new (Context) ArraySectionExpr(Empty);
break;
case EXPR_OMP_ARRAY_SHAPING:
@@ -3191,6 +3248,10 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) SourceLocExpr(Empty);
break;
+ case EXPR_BUILTIN_PP_EMBED:
+ S = new (Context) EmbedExpr(Empty);
+ break;
+
case EXPR_ADDR_LABEL:
S = new (Context) AddrLabelExpr(Empty);
break;
@@ -3401,6 +3462,22 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_REVERSE_DIRECTIVE: {
+ assert(Record[ASTStmtReader::NumStmtFields] == 1 &&
+ "Reverse directive accepts only a single loop");
+ assert(Record[ASTStmtReader::NumStmtFields + 1] == 0 &&
+ "Reverse directive has no clauses");
+ S = OMPReverseDirective::CreateEmpty(Context);
+ break;
+ }
+
+ case STMT_OMP_INTERCHANGE_DIRECTIVE: {
+ unsigned NumLoops = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPInterchangeDirective::CreateEmpty(Context, NumClauses, NumLoops);
+ break;
+ }
+
case STMT_OMP_FOR_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
@@ -4102,6 +4179,12 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
/*NumPartialArgs=*/Record[ASTStmtReader::NumExprFields]);
break;
+ case EXPR_PACK_INDEXING:
+ S = PackIndexingExpr::CreateDeserialized(
+ Context,
+ /*TransformedExprs=*/Record[ASTStmtReader::NumExprFields]);
+ break;
+
case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM:
S = new (Context) SubstNonTypeTemplateParmExpr(Empty);
break;
@@ -4188,7 +4271,16 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) ConceptSpecializationExpr(Empty);
break;
}
-
+ case STMT_OPENACC_COMPUTE_CONSTRUCT: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ S = OpenACCComputeConstruct::CreateEmpty(Context, NumClauses);
+ break;
+ }
+ case STMT_OPENACC_LOOP_CONSTRUCT: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ S = OpenACCLoopConstruct::CreateEmpty(Context, NumClauses);
+ break;
+ }
case EXPR_REQUIRES:
unsigned numLocalParameters = Record[ASTStmtReader::NumExprFields];
unsigned numRequirement = Record[ASTStmtReader::NumExprFields + 1];
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
index 378a1f86bd53..e907ddb88949 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
@@ -29,6 +29,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/OpenACCClause.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/RawCommentList.h"
#include "clang/AST/TemplateName.h"
@@ -44,6 +45,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Basic/OpenACCKinds.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
@@ -63,6 +65,8 @@
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaCUDA.h"
+#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/Weak.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTReader.h"
@@ -161,79 +165,108 @@ static TypeCode getTypeCodeForTypeClass(Type::TypeClass id) {
namespace {
-std::set<const FileEntry *> GetAffectingModuleMaps(const Preprocessor &PP,
- Module *RootModule) {
- SmallVector<const Module *> ModulesToProcess{RootModule};
+std::optional<std::set<const FileEntry *>>
+GetAffectingModuleMaps(const Preprocessor &PP, Module *RootModule) {
+ if (!PP.getHeaderSearchInfo()
+ .getHeaderSearchOpts()
+ .ModulesPruneNonAffectingModuleMaps)
+ return std::nullopt;
const HeaderSearch &HS = PP.getHeaderSearchInfo();
-
- SmallVector<OptionalFileEntryRef, 16> FilesByUID;
- HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
-
- if (FilesByUID.size() > HS.header_file_size())
- FilesByUID.resize(HS.header_file_size());
-
- for (unsigned UID = 0, LastUID = FilesByUID.size(); UID != LastUID; ++UID) {
- OptionalFileEntryRef File = FilesByUID[UID];
- if (!File)
- continue;
-
- const HeaderFileInfo *HFI =
- HS.getExistingFileInfo(*File, /*WantExternal*/ false);
- if (!HFI || (HFI->isModuleHeader && !HFI->isCompilingModuleHeader))
- continue;
-
- for (const auto &KH : HS.findResolvedModulesForHeader(*File)) {
- if (!KH.getModule())
- continue;
- ModulesToProcess.push_back(KH.getModule());
- }
- }
-
const ModuleMap &MM = HS.getModuleMap();
- SourceManager &SourceMgr = PP.getSourceManager();
- std::set<const FileEntry *> ModuleMaps{};
- auto CollectIncludingModuleMaps = [&](FileEntryRef F) {
- if (!ModuleMaps.insert(F).second)
+ std::set<const FileEntry *> ModuleMaps;
+ std::set<const Module *> ProcessedModules;
+ auto CollectModuleMapsForHierarchy = [&](const Module *M) {
+ M = M->getTopLevelModule();
+
+ if (!ProcessedModules.insert(M).second)
return;
- FileID FID = SourceMgr.translateFile(F);
- SourceLocation Loc = SourceMgr.getIncludeLoc(FID);
- // The include location of inferred module maps can point into the header
- // file that triggered the inferring. Cut off the walk if that's the case.
- while (Loc.isValid() && isModuleMap(SourceMgr.getFileCharacteristic(Loc))) {
- FID = SourceMgr.getFileID(Loc);
- if (!ModuleMaps.insert(*SourceMgr.getFileEntryRefForID(FID)).second)
- break;
- Loc = SourceMgr.getIncludeLoc(FID);
- }
- };
- std::set<const Module *> ProcessedModules;
- auto CollectIncludingMapsFromAncestors = [&](const Module *M) {
- for (const Module *Mod = M; Mod; Mod = Mod->Parent) {
- if (!ProcessedModules.insert(Mod).second)
- break;
+ std::queue<const Module *> Q;
+ Q.push(M);
+ while (!Q.empty()) {
+ const Module *Mod = Q.front();
+ Q.pop();
+
// The containing module map is affecting, because it's being pointed
// into by Module::DefinitionLoc.
- if (auto ModuleMapFile = MM.getContainingModuleMapFile(Mod))
- CollectIncludingModuleMaps(*ModuleMapFile);
- // For inferred modules, the module map that allowed inferring is not in
- // the include chain of the virtual containing module map file. It did
- // affect the compilation, though.
- if (auto ModuleMapFile = MM.getModuleMapFileForUniquing(Mod))
- CollectIncludingModuleMaps(*ModuleMapFile);
+ if (auto FE = MM.getContainingModuleMapFile(Mod))
+ ModuleMaps.insert(*FE);
+ // For inferred modules, the module map that allowed inferring is not
+ // related to the virtual containing module map file. It did affect the
+ // compilation, though.
+ if (auto FE = MM.getModuleMapFileForUniquing(Mod))
+ ModuleMaps.insert(*FE);
+
+ for (auto *SubM : Mod->submodules())
+ Q.push(SubM);
}
};
- for (const Module *CurrentModule : ModulesToProcess) {
- CollectIncludingMapsFromAncestors(CurrentModule);
+ // Handle all the affecting modules referenced from the root module.
+
+ CollectModuleMapsForHierarchy(RootModule);
+
+ std::queue<const Module *> Q;
+ Q.push(RootModule);
+ while (!Q.empty()) {
+ const Module *CurrentModule = Q.front();
+ Q.pop();
+
for (const Module *ImportedModule : CurrentModule->Imports)
- CollectIncludingMapsFromAncestors(ImportedModule);
+ CollectModuleMapsForHierarchy(ImportedModule);
for (const Module *UndeclaredModule : CurrentModule->UndeclaredUses)
- CollectIncludingMapsFromAncestors(UndeclaredModule);
+ CollectModuleMapsForHierarchy(UndeclaredModule);
+
+ for (auto *M : CurrentModule->submodules())
+ Q.push(M);
}
+ // Handle textually-included headers that belong to other modules.
+
+ SmallVector<OptionalFileEntryRef, 16> FilesByUID;
+ HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
+
+ if (FilesByUID.size() > HS.header_file_size())
+ FilesByUID.resize(HS.header_file_size());
+
+ for (unsigned UID = 0, LastUID = FilesByUID.size(); UID != LastUID; ++UID) {
+ OptionalFileEntryRef File = FilesByUID[UID];
+ if (!File)
+ continue;
+
+ const HeaderFileInfo *HFI = HS.getExistingLocalFileInfo(*File);
+ if (!HFI)
+ continue; // We have no information on this being a header file.
+ if (!HFI->isCompilingModuleHeader && HFI->isModuleHeader)
+ continue; // Modular header, handled in the above module-based loop.
+ if (!HFI->isCompilingModuleHeader && !HFI->IsLocallyIncluded)
+ continue; // Non-modular header not included locally is not affecting.
+
+ for (const auto &KH : HS.findResolvedModulesForHeader(*File))
+ if (const Module *M = KH.getModule())
+ CollectModuleMapsForHierarchy(M);
+ }
+
+ // FIXME: This algorithm is not correct for module map hierarchies where
+ // module map file defining a (sub)module of a top-level module X includes
+ // a module map file that defines a (sub)module of another top-level module Y.
+ // Whenever X is affecting and Y is not, "replaying" this PCM file will fail
+ // when parsing module map files for X due to not knowing about the `extern`
+ // module map for Y.
+ //
+ // We don't have a good way to fix it here. We could mark all children of
+ // affecting module map files as being affecting as well, but that's
+ // expensive. SourceManager does not model the edge from parent to child
+ // SLocEntries, so instead, we would need to iterate over leaf module map
+ // files, walk up their include hierarchy and check whether we arrive at an
+ // affecting module map.
+ //
+ // Instead of complicating and slowing down this function, we should probably
+ // just ban module map hierarchies where module map defining a (sub)module X
+ // includes a module map defining a module that's not a submodule of X.
+
return ModuleMaps;
}
@@ -318,6 +351,10 @@ void TypeLocWriter::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
// nothing to do
}
+void TypeLocWriter::VisitArrayParameterTypeLoc(ArrayParameterTypeLoc TL) {
+ // nothing to do
+}
+
void TypeLocWriter::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
addSourceLocation(TL.getCaretLoc());
}
@@ -482,6 +519,10 @@ void ASTRecordWriter::AddConceptReference(const ConceptReference *CR) {
AddASTTemplateArgumentListInfo(CR->getTemplateArgsAsWritten());
}
+void TypeLocWriter::VisitPackIndexingTypeLoc(PackIndexingTypeLoc TL) {
+ addSourceLocation(TL.getEllipsisLoc());
+}
+
void TypeLocWriter::VisitAutoTypeLoc(AutoTypeLoc TL) {
addSourceLocation(TL.getNameLoc());
auto *CR = TL.getConceptReference();
@@ -510,6 +551,10 @@ void TypeLocWriter::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
Record.AddAttr(TL.getAttr());
}
+void TypeLocWriter::VisitCountAttributedTypeLoc(CountAttributedTypeLoc TL) {
+ // Nothing to do
+}
+
void TypeLocWriter::VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc TL) {
// Nothing to do.
}
@@ -784,6 +829,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_ARRAY_TYPE_TRAIT);
RECORD(EXPR_PACK_EXPANSION);
RECORD(EXPR_SIZEOF_PACK);
+ RECORD(EXPR_PACK_INDEXING);
RECORD(EXPR_SUBST_NON_TYPE_TEMPLATE_PARM);
RECORD(EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK);
RECORD(EXPR_FUNCTION_PARM_PACK);
@@ -850,6 +896,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(WEAK_UNDECLARED_IDENTIFIERS);
RECORD(PENDING_IMPLICIT_INSTANTIATIONS);
RECORD(UPDATE_VISIBLE);
+ RECORD(DELAYED_NAMESPACE_LEXICAL_VISIBLE_RECORD);
RECORD(DECL_UPDATE_OFFSETS);
RECORD(DECL_UPDATES);
RECORD(CUDA_SPECIAL_DECL_REFS);
@@ -879,6 +926,8 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(PP_CONDITIONAL_STACK);
RECORD(DECLS_TO_CHECK_FOR_DEFERRED_DIAGS);
RECORD(PP_ASSUME_NONNULL_LOC);
+ RECORD(PP_UNSAFE_BUFFER_USAGE);
+ RECORD(VTABLES_TO_EMIT);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
@@ -1002,6 +1051,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DECL_UNRESOLVED_USING_VALUE);
RECORD(DECL_UNRESOLVED_USING_TYPENAME);
RECORD(DECL_LINKAGE_SPEC);
+ RECORD(DECL_EXPORT);
RECORD(DECL_CXX_RECORD);
RECORD(DECL_CXX_METHOD);
RECORD(DECL_CXX_CONSTRUCTOR);
@@ -1142,26 +1192,72 @@ ASTWriter::createSignature() const {
return std::make_pair(ASTBlockHash, Signature);
}
+ASTFileSignature ASTWriter::createSignatureForNamedModule() const {
+ llvm::SHA1 Hasher;
+ Hasher.update(StringRef(Buffer.data(), Buffer.size()));
+
+ assert(WritingModule);
+ assert(WritingModule->isNamedModule());
+
+ // We need to combine all the export imported modules no matter
+ // we used it or not.
+ for (auto [ExportImported, _] : WritingModule->Exports)
+ Hasher.update(ExportImported->Signature);
+
+ // We combine all the used modules to make sure the signature is precise.
+ // Consider the case like:
+ //
+ // // a.cppm
+ // export module a;
+ // export inline int a() { ... }
+ //
+ // // b.cppm
+ // export module b;
+ // import a;
+ // export inline int b() { return a(); }
+ //
+ // Since both `a()` and `b()` are inline, we need to make sure the BMI of
+ // `b.pcm` will change after the implementation of `a()` changes. We can't
+ // get that naturally since we won't record the body of `a()` during the
+ // writing process. We can't reuse ODRHash here since ODRHash won't calculate
+ // the called function recursively. So ODRHash will be problematic if `a()`
+ // calls other inline functions.
+ //
+ // Probably we can solve this by a new hash mechanism. But the safety and
+ // efficiency may a problem too. Here we just combine the hash value of the
+ // used modules conservatively.
+ for (Module *M : TouchedTopLevelModules)
+ Hasher.update(M->Signature);
+
+ return ASTFileSignature::create(Hasher.result());
+}
+
+static void BackpatchSignatureAt(llvm::BitstreamWriter &Stream,
+ const ASTFileSignature &S, uint64_t BitNo) {
+ for (uint8_t Byte : S) {
+ Stream.BackpatchByte(BitNo, Byte);
+ BitNo += 8;
+ }
+}
+
ASTFileSignature ASTWriter::backpatchSignature() {
+ if (isWritingStdCXXNamedModules()) {
+ ASTFileSignature Signature = createSignatureForNamedModule();
+ BackpatchSignatureAt(Stream, Signature, SignatureOffset);
+ return Signature;
+ }
+
if (!WritingModule ||
!PP->getHeaderSearchInfo().getHeaderSearchOpts().ModulesHashContent)
return {};
// For implicit modules, write the hash of the PCM as its signature.
-
- auto BackpatchSignatureAt = [&](const ASTFileSignature &S, uint64_t BitNo) {
- for (uint8_t Byte : S) {
- Stream.BackpatchByte(BitNo, Byte);
- BitNo += 8;
- }
- };
-
ASTFileSignature ASTBlockHash;
ASTFileSignature Signature;
std::tie(ASTBlockHash, Signature) = createSignature();
- BackpatchSignatureAt(ASTBlockHash, ASTBlockHashOffset);
- BackpatchSignatureAt(Signature, SignatureOffset);
+ BackpatchSignatureAt(Stream, ASTBlockHash, ASTBlockHashOffset);
+ BackpatchSignatureAt(Stream, Signature, SignatureOffset);
return Signature;
}
@@ -1178,9 +1274,11 @@ void ASTWriter::writeUnhashedControlBlock(Preprocessor &PP,
RecordData Record;
Stream.EnterSubblock(UNHASHED_CONTROL_BLOCK_ID, 5);
- // For implicit modules, write the hash of the PCM as its signature.
- if (WritingModule &&
- PP.getHeaderSearchInfo().getHeaderSearchOpts().ModulesHashContent) {
+ // For implicit modules and C++20 named modules, write the hash of the PCM as
+ // its signature.
+ if (isWritingStdCXXNamedModules() ||
+ (WritingModule &&
+ PP.getHeaderSearchInfo().getHeaderSearchOpts().ModulesHashContent)) {
// At this point, we don't know the actual signature of the file or the AST
// block - we're only able to compute those at the end of the serialization
// process. Let's store dummy signatures for now, and replace them with the
@@ -1191,21 +1289,24 @@ void ASTWriter::writeUnhashedControlBlock(Preprocessor &PP,
auto Dummy = ASTFileSignature::createDummy();
SmallString<128> Blob{Dummy.begin(), Dummy.end()};
- auto Abbrev = std::make_shared<BitCodeAbbrev>();
- Abbrev->Add(BitCodeAbbrevOp(AST_BLOCK_HASH));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
- unsigned ASTBlockHashAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
+ // We don't need AST Block hash in named modules.
+ if (!isWritingStdCXXNamedModules()) {
+ auto Abbrev = std::make_shared<BitCodeAbbrev>();
+ Abbrev->Add(BitCodeAbbrevOp(AST_BLOCK_HASH));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned ASTBlockHashAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
- Abbrev = std::make_shared<BitCodeAbbrev>();
+ Record.push_back(AST_BLOCK_HASH);
+ Stream.EmitRecordWithBlob(ASTBlockHashAbbrev, Record, Blob);
+ ASTBlockHashOffset = Stream.GetCurrentBitNo() - Blob.size() * 8;
+ Record.clear();
+ }
+
+ auto Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(SIGNATURE));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned SignatureAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
- Record.push_back(AST_BLOCK_HASH);
- Stream.EmitRecordWithBlob(ASTBlockHashAbbrev, Record, Blob);
- ASTBlockHashOffset = Stream.GetCurrentBitNo() - Blob.size() * 8;
- Record.clear();
-
Record.push_back(SIGNATURE);
Stream.EmitRecordWithBlob(SignatureAbbrev, Record, Blob);
SignatureOffset = Stream.GetCurrentBitNo() - Blob.size() * 8;
@@ -1265,18 +1366,30 @@ void ASTWriter::writeUnhashedControlBlock(Preprocessor &PP,
WritePragmaDiagnosticMappings(Diags, /* isModule = */ WritingModule);
// Header search entry usage.
- auto HSEntryUsage = PP.getHeaderSearchInfo().computeUserEntryUsage();
- auto Abbrev = std::make_shared<BitCodeAbbrev>();
- Abbrev->Add(BitCodeAbbrevOp(HEADER_SEARCH_ENTRY_USAGE));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Number of bits.
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Bit vector.
- unsigned HSUsageAbbrevCode = Stream.EmitAbbrev(std::move(Abbrev));
{
+ auto HSEntryUsage = PP.getHeaderSearchInfo().computeUserEntryUsage();
+ auto Abbrev = std::make_shared<BitCodeAbbrev>();
+ Abbrev->Add(BitCodeAbbrevOp(HEADER_SEARCH_ENTRY_USAGE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Number of bits.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Bit vector.
+ unsigned HSUsageAbbrevCode = Stream.EmitAbbrev(std::move(Abbrev));
RecordData::value_type Record[] = {HEADER_SEARCH_ENTRY_USAGE,
HSEntryUsage.size()};
Stream.EmitRecordWithBlob(HSUsageAbbrevCode, Record, bytes(HSEntryUsage));
}
+ // VFS usage.
+ {
+ auto VFSUsage = PP.getHeaderSearchInfo().collectVFSUsageAndClear();
+ auto Abbrev = std::make_shared<BitCodeAbbrev>();
+ Abbrev->Add(BitCodeAbbrevOp(VFS_USAGE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Number of bits.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Bit vector.
+ unsigned VFSUsageAbbrevCode = Stream.EmitAbbrev(std::move(Abbrev));
+ RecordData::value_type Record[] = {VFS_USAGE, VFSUsage.size()};
+ Stream.EmitRecordWithBlob(VFSUsageAbbrevCode, Record, bytes(VFSUsage));
+ }
+
// Leave the options block.
Stream.ExitBlock();
UnhashedControlBlockRange.second = Stream.GetCurrentBitNo() >> 3;
@@ -1350,7 +1463,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
(!PP.getHeaderSearchInfo()
.getHeaderSearchOpts()
.ModuleMapFileHomeIsCwd ||
- WritingModule->Directory->getName() != StringRef("."))) {
+ WritingModule->Directory->getName() != ".")) {
// Module directory.
auto Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(MODULE_DIRECTORY));
@@ -1595,6 +1708,18 @@ struct InputFileEntry {
} // namespace
+SourceLocation ASTWriter::getAffectingIncludeLoc(const SourceManager &SourceMgr,
+ const SrcMgr::FileInfo &File) {
+ SourceLocation IncludeLoc = File.getIncludeLoc();
+ if (IncludeLoc.isValid()) {
+ FileID IncludeFID = SourceMgr.getFileID(IncludeLoc);
+ assert(IncludeFID.isValid() && "IncludeLoc in invalid file");
+ if (!IsSLocAffecting[IncludeFID.ID])
+ IncludeLoc = SourceLocation();
+ }
+ return IncludeLoc;
+}
+
void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
HeaderSearchOptions &HSOpts) {
using namespace llvm;
@@ -1648,26 +1773,22 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
Entry.IsSystemFile = isSystem(File.getFileCharacteristic());
Entry.IsTransient = Cache->IsTransient;
Entry.BufferOverridden = Cache->BufferOverridden;
- Entry.IsTopLevel = File.getIncludeLoc().isInvalid();
+ Entry.IsTopLevel = getAffectingIncludeLoc(SourceMgr, File).isInvalid();
Entry.IsModuleMap = isModuleMap(File.getFileCharacteristic());
- auto ContentHash = hash_code(-1);
+ uint64_t ContentHash = 0;
if (PP->getHeaderSearchInfo()
.getHeaderSearchOpts()
.ValidateASTInputFilesContent) {
auto MemBuff = Cache->getBufferIfLoaded();
if (MemBuff)
- ContentHash = hash_value(MemBuff->getBuffer());
+ ContentHash = xxh3_64bits(MemBuff->getBuffer());
else
PP->Diag(SourceLocation(), diag::err_module_unable_to_hash_content)
<< Entry.File.getName();
}
- auto CH = llvm::APInt(64, ContentHash);
- Entry.ContentHash[0] =
- static_cast<uint32_t>(CH.getLoBits(32).getZExtValue());
- Entry.ContentHash[1] =
- static_cast<uint32_t>(CH.getHiBits(32).getZExtValue());
-
+ Entry.ContentHash[0] = uint32_t(ContentHash);
+ Entry.ContentHash[1] = uint32_t(ContentHash >> 32);
if (Entry.IsSystemFile)
SystemFiles.push_back(Entry);
else
@@ -1848,9 +1969,15 @@ namespace {
llvm::PointerIntPair<Module *, 2, ModuleMap::ModuleHeaderRole>;
struct data_type {
- const HeaderFileInfo &HFI;
+ data_type(const HeaderFileInfo &HFI, bool AlreadyIncluded,
+ ArrayRef<ModuleMap::KnownHeader> KnownHeaders,
+ UnresolvedModule Unresolved)
+ : HFI(HFI), AlreadyIncluded(AlreadyIncluded),
+ KnownHeaders(KnownHeaders), Unresolved(Unresolved) {}
+
+ HeaderFileInfo HFI;
bool AlreadyIncluded;
- ArrayRef<ModuleMap::KnownHeader> KnownHeaders;
+ SmallVector<ModuleMap::KnownHeader, 1> KnownHeaders;
UnresolvedModule Unresolved;
};
using data_type_ref = const data_type &;
@@ -1862,13 +1989,16 @@ namespace {
// The hash is based only on size/time of the file, so that the reader can
// match even when symlinking or excess path elements ("foo/../", "../")
// change the form of the name. However, complete path is still the key.
- return llvm::hash_combine(key.Size, key.ModTime);
+ uint8_t buf[sizeof(key.Size) + sizeof(key.ModTime)];
+ memcpy(buf, &key.Size, sizeof(key.Size));
+ memcpy(buf + sizeof(key.Size), &key.ModTime, sizeof(key.ModTime));
+ return llvm::xxh3_64bits(buf);
}
std::pair<unsigned, unsigned>
EmitKeyDataLength(raw_ostream& Out, key_type_ref key, data_type_ref Data) {
unsigned KeyLen = key.Filename.size() + 1 + 8 + 8;
- unsigned DataLen = 1 + 4 + 4;
+ unsigned DataLen = 1 + sizeof(IdentifierID) + 4;
for (auto ModInfo : Data.KnownHeaders)
if (Writer.getLocalOrImportedSubmoduleID(ModInfo.getModule()))
DataLen += 4;
@@ -1903,10 +2033,11 @@ namespace {
| Data.HFI.IndexHeaderMapHeader;
LE.write<uint8_t>(Flags);
- if (!Data.HFI.ControllingMacro)
- LE.write<uint32_t>(Data.HFI.ControllingMacroID);
+ if (Data.HFI.LazyControllingMacro.isID())
+ LE.write<IdentifierID>(Data.HFI.LazyControllingMacro.getID());
else
- LE.write<uint32_t>(Writer.getIdentifierRef(Data.HFI.ControllingMacro));
+ LE.write<IdentifierID>(
+ Writer.getIdentifierRef(Data.HFI.LazyControllingMacro.getPtr()));
unsigned Offset = 0;
if (!Data.HFI.Framework.empty()) {
@@ -2020,16 +2151,13 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
if (!File)
continue;
- // Get the file info. This will load info from the external source if
- // necessary. Skip emitting this file if we have no information on it
- // as a header file (in which case HFI will be null) or if it hasn't
- // changed since it was loaded. Also skip it if it's for a modular header
- // from a different module; in that case, we rely on the module(s)
- // containing the header to provide this information.
- const HeaderFileInfo *HFI =
- HS.getExistingFileInfo(*File, /*WantExternal*/!Chain);
- if (!HFI || (HFI->isModuleHeader && !HFI->isCompilingModuleHeader))
- continue;
+ const HeaderFileInfo *HFI = HS.getExistingLocalFileInfo(*File);
+ if (!HFI)
+ continue; // We have no information on this being a header file.
+ if (!HFI->isCompilingModuleHeader && HFI->isModuleHeader)
+ continue; // Header file info is tracked by the owning module file.
+ if (!HFI->isCompilingModuleHeader && !PP->alreadyIncluded(*File))
+ continue; // Non-modular header not included is not needed.
// Massage the file path into an appropriate form.
StringRef Filename = File->getName();
@@ -2178,7 +2306,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
SLocEntryOffsets.push_back(Offset);
// Starting offset of this entry within this module, so skip the dummy.
Record.push_back(getAdjustedOffset(SLoc->getOffset()) - 2);
- AddSourceLocation(File.getIncludeLoc(), Record);
+ AddSourceLocation(getAffectingIncludeLoc(SourceMgr, File), Record);
Record.push_back(File.getFileCharacteristic()); // FIXME: stable encoding
Record.push_back(File.hasLineDirectives());
@@ -2398,6 +2526,12 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
Record.clear();
}
+ // Write the safe buffer opt-out region map in PP
+ for (SourceLocation &S : PP.serializeSafeBufferOptOutMap())
+ AddSourceLocation(S, Record);
+ Stream.EmitRecord(PP_UNSAFE_BUFFER_USAGE, Record);
+ Record.clear();
+
// Enter the preprocessor block.
Stream.EnterSubblock(PREPROCESSOR_BLOCK_ID, 3);
@@ -2636,8 +2770,10 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec,
uint64_t Offset = Stream.GetCurrentBitNo() - MacroOffsetsBase;
assert((Offset >> 32) == 0 && "Preprocessed entity offset too large");
- PreprocessedEntityOffsets.push_back(
- PPEntityOffset(getAdjustedRange((*E)->getSourceRange()), Offset));
+ SourceRange R = getAdjustedRange((*E)->getSourceRange());
+ PreprocessedEntityOffsets.emplace_back(
+ getRawSourceLocationEncoding(R.getBegin()),
+ getRawSourceLocationEncoding(R.getEnd()), Offset);
if (auto *MD = dyn_cast<MacroDefinitionRecord>(*E)) {
// Record this macro definition's ID.
@@ -2704,7 +2840,9 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec,
std::vector<PPSkippedRange> SerializedSkippedRanges;
SerializedSkippedRanges.reserve(SkippedRanges.size());
for (auto const& Range : SkippedRanges)
- SerializedSkippedRanges.emplace_back(Range);
+ SerializedSkippedRanges.emplace_back(
+ getRawSourceLocationEncoding(Range.getBegin()),
+ getRawSourceLocationEncoding(Range.getEnd()));
using namespace llvm;
auto Abbrev = std::make_shared<BitCodeAbbrev>();
@@ -2870,8 +3008,8 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
ParentID = SubmoduleIDs[Mod->Parent];
}
- uint64_t DefinitionLoc =
- SourceLocationEncoding::encode(getAdjustedLocation(Mod->DefinitionLoc));
+ SourceLocationEncoding::RawLocEncoding DefinitionLoc =
+ getRawSourceLocationEncoding(getAdjustedLocation(Mod->DefinitionLoc));
// Emit the definition of the block.
{
@@ -2895,8 +3033,8 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
// Emit the requirements.
for (const auto &R : Mod->Requirements) {
- RecordData::value_type Record[] = {SUBMODULE_REQUIRES, R.second};
- Stream.EmitRecordWithBlob(RequiresAbbrev, Record, R.first);
+ RecordData::value_type Record[] = {SUBMODULE_REQUIRES, R.RequiredState};
+ Stream.EmitRecordWithBlob(RequiresAbbrev, Record, R.FeatureName);
}
// Emit the umbrella header, if there is one.
@@ -2997,10 +3135,12 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Stream.EmitRecordWithBlob(ConfigMacroAbbrev, Record, CM);
}
- // Emit the initializers, if any.
+ // Emit the reachable initializers.
+ // The initializer may only be unreachable in reduced BMI.
RecordData Inits;
for (Decl *D : Context->getModuleInitializers(Mod))
- Inits.push_back(GetDeclRef(D));
+ if (wasDeclEmitted(D))
+ AddDeclRef(D, Inits);
if (!Inits.empty())
Stream.EmitRecord(SUBMODULE_INITIALIZERS, Inits);
@@ -3079,9 +3219,7 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
}
// Sort by diag::kind for deterministic output.
- llvm::sort(Mappings, [](const auto &LHS, const auto &RHS) {
- return LHS.first < RHS.first;
- });
+ llvm::sort(Mappings, llvm::less_first());
for (const auto &I : Mappings) {
Record.push_back(I.first);
@@ -3106,9 +3244,7 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
continue;
++NumLocations;
- SourceLocation Loc = Diag.SourceMgr->getComposedLoc(FileIDAndFile.first, 0);
- assert(!Loc.isInvalid() && "start loc for valid FileID is invalid");
- AddSourceLocation(Loc, Record);
+ AddFileID(FileIDAndFile.first, Record);
Record.push_back(FileIDAndFile.second.StateTransitions.size());
for (auto &StatePoint : FileIDAndFile.second.StateTransitions) {
@@ -3139,22 +3275,23 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
/// Write the representation of a type to the AST stream.
void ASTWriter::WriteType(QualType T) {
TypeIdx &IdxRef = TypeIdxs[T];
- if (IdxRef.getIndex() == 0) // we haven't seen this type before.
- IdxRef = TypeIdx(NextTypeID++);
+ if (IdxRef.getValue() == 0) // we haven't seen this type before.
+ IdxRef = TypeIdx(0, NextTypeID++);
TypeIdx Idx = IdxRef;
- assert(Idx.getIndex() >= FirstTypeID && "Re-writing a type from a prior AST");
+ assert(Idx.getModuleFileIndex() == 0 && "Re-writing a type from a prior AST");
+ assert(Idx.getValue() >= FirstTypeID && "Writing predefined type");
// Emit the type's representation.
uint64_t Offset = ASTTypeWriter(*this).write(T) - DeclTypesBlockStartOffset;
// Record the offset for this type.
- unsigned Index = Idx.getIndex() - FirstTypeID;
+ uint64_t Index = Idx.getValue() - FirstTypeID;
if (TypeOffsets.size() == Index)
TypeOffsets.emplace_back(Offset);
else if (TypeOffsets.size() < Index) {
TypeOffsets.resize(Index + 1);
- TypeOffsets[Index].setBitOffset(Offset);
+ TypeOffsets[Index].set(Offset);
} else {
llvm_unreachable("Types emitted in wrong order");
}
@@ -3164,21 +3301,48 @@ void ASTWriter::WriteType(QualType T) {
// Declaration Serialization
//===----------------------------------------------------------------------===//
+static bool IsInternalDeclFromFileContext(const Decl *D) {
+ auto *ND = dyn_cast<NamedDecl>(D);
+ if (!ND)
+ return false;
+
+ if (!D->getDeclContext()->getRedeclContext()->isFileContext())
+ return false;
+
+ return ND->getFormalLinkage() == Linkage::Internal;
+}
+
/// Write the block containing all of the declaration IDs
/// lexically declared within the given DeclContext.
///
/// \returns the offset of the DECL_CONTEXT_LEXICAL block within the
/// bitstream, or 0 if no block was written.
uint64_t ASTWriter::WriteDeclContextLexicalBlock(ASTContext &Context,
- DeclContext *DC) {
+ const DeclContext *DC) {
if (DC->decls_empty())
return 0;
+ // In reduced BMI, we don't care the declarations in functions.
+ if (GeneratingReducedBMI && DC->isFunctionOrMethod())
+ return 0;
+
uint64_t Offset = Stream.GetCurrentBitNo();
- SmallVector<uint32_t, 128> KindDeclPairs;
+ SmallVector<DeclID, 128> KindDeclPairs;
for (const auto *D : DC->decls()) {
+ if (DoneWritingDeclsAndTypes && !wasDeclEmitted(D))
+ continue;
+
+ // We don't need to write decls with internal linkage into reduced BMI.
+ // If such decls gets emitted due to it get used from inline functions,
+ // the program illegal. However, there are too many use of static inline
+ // functions in the global module fragment and it will be breaking change
+ // to forbid that. So we have to allow to emit such declarations from GMF.
+ if (GeneratingReducedBMI && !D->isFromExplicitGlobalModule() &&
+ IsInternalDeclFromFileContext(D))
+ continue;
+
KindDeclPairs.push_back(D->getKind());
- KindDeclPairs.push_back(GetDeclRef(D));
+ KindDeclPairs.push_back(GetDeclRef(D).getRawValue());
}
++NumLexicalDeclContexts;
@@ -3195,12 +3359,10 @@ void ASTWriter::WriteTypeDeclOffsets() {
auto Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(TYPE_OFFSET));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of types
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // base type index
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // types block
unsigned TypeOffsetAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
{
- RecordData::value_type Record[] = {TYPE_OFFSET, TypeOffsets.size(),
- FirstTypeID - NUM_PREDEF_TYPE_IDS};
+ RecordData::value_type Record[] = {TYPE_OFFSET, TypeOffsets.size()};
Stream.EmitRecordWithBlob(TypeOffsetAbbrev, Record, bytes(TypeOffsets));
}
@@ -3208,12 +3370,10 @@ void ASTWriter::WriteTypeDeclOffsets() {
Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(DECL_OFFSET));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of declarations
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // base decl ID
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // declarations block
unsigned DeclOffsetAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
{
- RecordData::value_type Record[] = {DECL_OFFSET, DeclOffsets.size(),
- FirstDeclID - NUM_PREDEF_DECL_IDS};
+ RecordData::value_type Record[] = {DECL_OFFSET, DeclOffsets.size()};
Stream.EmitRecordWithBlob(DeclOffsetAbbrev, Record, bytes(DeclOffsets));
}
}
@@ -3234,7 +3394,7 @@ void ASTWriter::WriteFileDeclIDsMap() {
Info.FirstDeclIndex = FileGroupedDeclIDs.size();
llvm::stable_sort(Info.DeclIDs);
for (auto &LocDeclEntry : Info.DeclIDs)
- FileGroupedDeclIDs.push_back(LocDeclEntry.second);
+ FileGroupedDeclIDs.push_back(LocDeclEntry.second.getRawValue());
}
auto Abbrev = std::make_shared<BitCodeAbbrev>();
@@ -3305,16 +3465,18 @@ public:
std::pair<unsigned, unsigned>
EmitKeyDataLength(raw_ostream& Out, Selector Sel,
data_type_ref Methods) {
- unsigned KeyLen = 2 + (Sel.getNumArgs()? Sel.getNumArgs() * 4 : 4);
+ unsigned KeyLen =
+ 2 + (Sel.getNumArgs() ? Sel.getNumArgs() * sizeof(IdentifierID)
+ : sizeof(IdentifierID));
unsigned DataLen = 4 + 2 + 2; // 2 bytes for each of the method counts
for (const ObjCMethodList *Method = &Methods.Instance; Method;
Method = Method->getNext())
if (ShouldWriteMethodListNode(Method))
- DataLen += 4;
+ DataLen += sizeof(DeclID);
for (const ObjCMethodList *Method = &Methods.Factory; Method;
Method = Method->getNext())
if (ShouldWriteMethodListNode(Method))
- DataLen += 4;
+ DataLen += sizeof(DeclID);
return emitULEBKeyDataLength(KeyLen, DataLen, Out);
}
@@ -3330,7 +3492,7 @@ public:
if (N == 0)
N = 1;
for (unsigned I = 0; I != N; ++I)
- LE.write<uint32_t>(
+ LE.write<IdentifierID>(
Writer.getIdentifierRef(Sel.getIdentifierInfoForSlot(I)));
}
@@ -3372,11 +3534,11 @@ public:
for (const ObjCMethodList *Method = &Methods.Instance; Method;
Method = Method->getNext())
if (ShouldWriteMethodListNode(Method))
- LE.write<uint32_t>(Writer.getDeclID(Method->getMethod()));
+ LE.write<DeclID>((DeclID)Writer.getDeclID(Method->getMethod()));
for (const ObjCMethodList *Method = &Methods.Factory; Method;
Method = Method->getNext())
if (ShouldWriteMethodListNode(Method))
- LE.write<uint32_t>(Writer.getDeclID(Method->getMethod()));
+ LE.write<DeclID>((DeclID)Writer.getDeclID(Method->getMethod()));
assert(Out.tell() - Start == DataLen && "Data length is wrong");
}
@@ -3398,7 +3560,7 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
using namespace llvm;
// Do we have to do anything at all?
- if (SemaRef.MethodPool.empty() && SelectorIDs.empty())
+ if (SemaRef.ObjC().MethodPool.empty() && SelectorIDs.empty())
return;
unsigned NumTableEntries = 0;
// Create and write out the blob that contains selectors and the method pool.
@@ -3412,13 +3574,14 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
for (auto &SelectorAndID : SelectorIDs) {
Selector S = SelectorAndID.first;
SelectorID ID = SelectorAndID.second;
- Sema::GlobalMethodPool::iterator F = SemaRef.MethodPool.find(S);
+ SemaObjC::GlobalMethodPool::iterator F =
+ SemaRef.ObjC().MethodPool.find(S);
ASTMethodPoolTrait::data_type Data = {
ID,
ObjCMethodList(),
ObjCMethodList()
};
- if (F != SemaRef.MethodPool.end()) {
+ if (F != SemaRef.ObjC().MethodPool.end()) {
Data.Instance = F->second.first;
Data.Factory = F->second.second;
}
@@ -3503,7 +3666,7 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
void ASTWriter::WriteReferencedSelectorsPool(Sema &SemaRef) {
using namespace llvm;
- if (SemaRef.ReferencedSelectors.empty())
+ if (SemaRef.ObjC().ReferencedSelectors.empty())
return;
RecordData Record;
@@ -3512,7 +3675,7 @@ void ASTWriter::WriteReferencedSelectorsPool(Sema &SemaRef) {
// Note: this writes out all references even for a dependent AST. But it is
// very tricky to fix, and given that @selector shouldn't really appear in
// headers, probably not worth it. It's not a correctness issue.
- for (auto &SelectorAndLocation : SemaRef.ReferencedSelectors) {
+ for (auto &SelectorAndLocation : SemaRef.ObjC().ReferencedSelectors) {
Selector Sel = SelectorAndLocation.first;
SourceLocation Loc = SelectorAndLocation.second;
Writer.AddSelectorRef(Sel);
@@ -3567,6 +3730,29 @@ static NamedDecl *getDeclForLocalLookup(const LangOptions &LangOpts,
namespace {
+bool IsInterestingIdentifier(const IdentifierInfo *II, uint64_t MacroOffset,
+ bool IsModule, bool IsCPlusPlus) {
+ bool NeedDecls = !IsModule || !IsCPlusPlus;
+
+ bool IsInteresting =
+ II->getNotableIdentifierID() != tok::NotableIdentifierKind::not_notable ||
+ II->getBuiltinID() != Builtin::ID::NotBuiltin ||
+ II->getObjCKeywordID() != tok::ObjCKeywordKind::objc_not_keyword;
+ if (MacroOffset || II->isPoisoned() || (!IsModule && IsInteresting) ||
+ II->hasRevertedTokenIDToIdentifier() ||
+ (NeedDecls && II->getFETokenInfo()))
+ return true;
+
+ return false;
+}
+
+bool IsInterestingNonMacroIdentifier(const IdentifierInfo *II,
+ ASTWriter &Writer) {
+ bool IsModule = Writer.isWritingModule();
+ bool IsCPlusPlus = Writer.getLangOpts().CPlusPlus;
+ return IsInterestingIdentifier(II, /*MacroOffset=*/0, IsModule, IsCPlusPlus);
+}
+
class ASTIdentifierTableTrait {
ASTWriter &Writer;
Preprocessor &PP;
@@ -3580,20 +3766,15 @@ class ASTIdentifierTableTrait {
/// doesn't check whether the name has macros defined; use PublicMacroIterator
/// to check that.
bool isInterestingIdentifier(const IdentifierInfo *II, uint64_t MacroOffset) {
- if (MacroOffset || II->isPoisoned() ||
- (!IsModule && II->getObjCOrBuiltinID()) ||
- II->hasRevertedTokenIDToIdentifier() ||
- (NeedDecls && II->getFETokenInfo()))
- return true;
-
- return false;
+ return IsInterestingIdentifier(II, MacroOffset, IsModule,
+ Writer.getLangOpts().CPlusPlus);
}
public:
- using key_type = IdentifierInfo *;
+ using key_type = const IdentifierInfo *;
using key_type_ref = key_type;
- using data_type = IdentID;
+ using data_type = IdentifierID;
using data_type_ref = data_type;
using hash_value_type = unsigned;
@@ -3617,12 +3798,8 @@ public:
return isInterestingIdentifier(II, MacroOffset);
}
- bool isInterestingNonMacroIdentifier(const IdentifierInfo *II) {
- return isInterestingIdentifier(II, 0);
- }
-
std::pair<unsigned, unsigned>
- EmitKeyDataLength(raw_ostream& Out, IdentifierInfo* II, IdentID ID) {
+ EmitKeyDataLength(raw_ostream &Out, const IdentifierInfo *II, IdentifierID ID) {
// Record the location of the identifier data. This is used when generating
// the mapping from persistent IDs to strings.
Writer.SetIdentifierOffset(II, Out.tell());
@@ -3636,7 +3813,7 @@ public:
InterestingIdentifierOffsets->push_back(Out.tell());
unsigned KeyLen = II->getLength() + 1;
- unsigned DataLen = 4; // 4 bytes for the persistent ID << 1
+ unsigned DataLen = sizeof(IdentifierID); // bytes for the persistent ID << 1
if (isInterestingIdentifier(II, MacroOffset)) {
DataLen += 2; // 2 bytes for builtin ID
DataLen += 2; // 2 bytes for flags
@@ -3644,29 +3821,29 @@ public:
DataLen += 4; // MacroDirectives offset.
if (NeedDecls)
- DataLen += std::distance(IdResolver.begin(II), IdResolver.end()) * 4;
+ DataLen += std::distance(IdResolver.begin(II), IdResolver.end()) *
+ sizeof(DeclID);
}
return emitULEBKeyDataLength(KeyLen, DataLen, Out);
}
- void EmitKey(raw_ostream& Out, const IdentifierInfo* II,
- unsigned KeyLen) {
+ void EmitKey(raw_ostream &Out, const IdentifierInfo *II, unsigned KeyLen) {
Out.write(II->getNameStart(), KeyLen);
}
- void EmitData(raw_ostream& Out, IdentifierInfo* II,
- IdentID ID, unsigned) {
+ void EmitData(raw_ostream &Out, const IdentifierInfo *II, IdentifierID ID,
+ unsigned) {
using namespace llvm::support;
endian::Writer LE(Out, llvm::endianness::little);
auto MacroOffset = Writer.getMacroDirectivesOffset(II);
if (!isInterestingIdentifier(II, MacroOffset)) {
- LE.write<uint32_t>(ID << 1);
+ LE.write<IdentifierID>(ID << 1);
return;
}
- LE.write<uint32_t>((ID << 1) | 0x01);
+ LE.write<IdentifierID>((ID << 1) | 0x01);
uint32_t Bits = (uint32_t)II->getObjCOrBuiltinID();
assert((Bits & 0xffff) == Bits && "ObjCOrBuiltinID too big for ASTReader.");
LE.write<uint16_t>(Bits);
@@ -3691,14 +3868,18 @@ public:
// Only emit declarations that aren't from a chained PCH, though.
SmallVector<NamedDecl *, 16> Decls(IdResolver.decls(II));
for (NamedDecl *D : llvm::reverse(Decls))
- LE.write<uint32_t>(
- Writer.getDeclID(getDeclForLocalLookup(PP.getLangOpts(), D)));
+ LE.write<DeclID>((DeclID)Writer.getDeclID(
+ getDeclForLocalLookup(PP.getLangOpts(), D)));
}
}
};
} // namespace
+/// If the \param IdentifierID ID is a local Identifier ID. If the higher
+/// bits of ID is 0, it implies that the ID doesn't come from AST files.
+static bool isLocalIdentifierID(IdentifierID ID) { return !(ID >> 32); }
+
/// Write the identifier table into the AST file.
///
/// The identifier table consists of a blob containing string data
@@ -3718,32 +3899,17 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
ASTIdentifierTableTrait Trait(*this, PP, IdResolver, IsModule,
IsModule ? &InterestingIdents : nullptr);
- // Look for any identifiers that were named while processing the
- // headers, but are otherwise not needed. We add these to the hash
- // table to enable checking of the predefines buffer in the case
- // where the user adds new macro definitions when building the AST
- // file.
- SmallVector<const IdentifierInfo *, 128> IIs;
- for (const auto &ID : PP.getIdentifierTable())
- if (Trait.isInterestingNonMacroIdentifier(ID.second))
- IIs.push_back(ID.second);
- // Sort the identifiers lexicographically before getting the references so
- // that their order is stable.
- llvm::sort(IIs, llvm::deref<std::less<>>());
- for (const IdentifierInfo *II : IIs)
- getIdentifierRef(II);
-
// Create the on-disk hash table representation. We only store offsets
// for identifiers that appear here for the first time.
IdentifierOffsets.resize(NextIdentID - FirstIdentID);
for (auto IdentIDPair : IdentifierIDs) {
- auto *II = const_cast<IdentifierInfo *>(IdentIDPair.first);
- IdentID ID = IdentIDPair.second;
+ const IdentifierInfo *II = IdentIDPair.first;
+ IdentifierID ID = IdentIDPair.second;
assert(II && "NULL identifier in identifier table");
+
// Write out identifiers if either the ID is local or the identifier has
// changed since it was loaded.
- if (ID >= FirstIdentID || !Chain || !II->isFromAST()
- || II->hasChangedSinceDeserialization() ||
+ if (isLocalIdentifierID(ID) || II->hasChangedSinceDeserialization() ||
(Trait.needDecls() &&
II->hasFETokenInfoChangedSinceDeserialization()))
Generator.insert(II, ID, Trait);
@@ -3777,7 +3943,6 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
auto Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(IDENTIFIER_OFFSET));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of identifiers
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned IdentifierOffsetAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
@@ -3787,8 +3952,7 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
#endif
RecordData::value_type Record[] = {IDENTIFIER_OFFSET,
- IdentifierOffsets.size(),
- FirstIdentID - NUM_PREDEF_IDENT_IDS};
+ IdentifierOffsets.size()};
Stream.EmitRecordWithBlob(IdentifierOffsetAbbrev, Record,
bytes(IdentifierOffsets));
@@ -3798,6 +3962,13 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
Stream.EmitRecord(INTERESTING_IDENTIFIERS, InterestingIdents);
}
+void ASTWriter::handleVTable(CXXRecordDecl *RD) {
+ if (!RD->isInNamedModule())
+ return;
+
+ PendingEmittingVTables.push_back(RD);
+}
+
//===----------------------------------------------------------------------===//
// DeclContext's Name Lookup Table Serialization
//===----------------------------------------------------------------------===//
@@ -3807,7 +3978,7 @@ namespace {
// Trait used for the on-disk hash table used in the method pool.
class ASTDeclContextNameLookupTrait {
ASTWriter &Writer;
- llvm::SmallVector<DeclID, 64> DeclIDs;
+ llvm::SmallVector<LocalDeclID, 64> DeclIDs;
public:
using key_type = DeclarationNameKey;
@@ -3826,15 +3997,31 @@ public:
data_type getData(const Coll &Decls) {
unsigned Start = DeclIDs.size();
for (NamedDecl *D : Decls) {
- DeclIDs.push_back(
- Writer.GetDeclRef(getDeclForLocalLookup(Writer.getLangOpts(), D)));
+ NamedDecl *DeclForLocalLookup =
+ getDeclForLocalLookup(Writer.getLangOpts(), D);
+
+ if (Writer.getDoneWritingDeclsAndTypes() &&
+ !Writer.wasDeclEmitted(DeclForLocalLookup))
+ continue;
+
+ // Try to avoid writing internal decls to reduced BMI.
+ // See comments in ASTWriter::WriteDeclContextLexicalBlock for details.
+ if (Writer.isGeneratingReducedBMI() &&
+ !DeclForLocalLookup->isFromExplicitGlobalModule() &&
+ IsInternalDeclFromFileContext(DeclForLocalLookup))
+ continue;
+
+ DeclIDs.push_back(Writer.GetDeclRef(DeclForLocalLookup));
}
return std::make_pair(Start, DeclIDs.size());
}
data_type ImportData(const reader::ASTDeclContextNameLookupTrait::data_type &FromReader) {
unsigned Start = DeclIDs.size();
- llvm::append_range(DeclIDs, FromReader);
+ DeclIDs.insert(
+ DeclIDs.end(),
+ DeclIDIterator<GlobalDeclID, LocalDeclID>(FromReader.begin()),
+ DeclIDIterator<GlobalDeclID, LocalDeclID>(FromReader.end()));
return std::make_pair(Start, DeclIDs.size());
}
@@ -3862,11 +4049,13 @@ public:
unsigned KeyLen = 1;
switch (Name.getKind()) {
case DeclarationName::Identifier:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXDeductionGuideName:
+ KeyLen += sizeof(IdentifierID);
+ break;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
- case DeclarationName::CXXLiteralOperatorName:
- case DeclarationName::CXXDeductionGuideName:
KeyLen += 4;
break;
case DeclarationName::CXXOperatorName:
@@ -3879,8 +4068,8 @@ public:
break;
}
- // 4 bytes for each DeclID.
- unsigned DataLen = 4 * (Lookup.second - Lookup.first);
+ // length of DeclIDs.
+ unsigned DataLen = sizeof(DeclID) * (Lookup.second - Lookup.first);
return emitULEBKeyDataLength(KeyLen, DataLen, Out);
}
@@ -3894,7 +4083,7 @@ public:
case DeclarationName::Identifier:
case DeclarationName::CXXLiteralOperatorName:
case DeclarationName::CXXDeductionGuideName:
- LE.write<uint32_t>(Writer.getIdentifierRef(Name.getIdentifier()));
+ LE.write<IdentifierID>(Writer.getIdentifierRef(Name.getIdentifier()));
return;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
@@ -3923,7 +4112,7 @@ public:
endian::Writer LE(Out, llvm::endianness::little);
uint64_t Start = Out.tell(); (void)Start;
for (unsigned I = Lookup.first, N = Lookup.second; I != N; ++I)
- LE.write<uint32_t>(DeclIDs[I]);
+ LE.write<DeclID>((DeclID)DeclIDs[I]);
assert(Out.tell() - Start == DataLen && "Data length is wrong");
}
};
@@ -3936,11 +4125,28 @@ bool ASTWriter::isLookupResultExternal(StoredDeclsList &Result,
DC->hasNeedToReconcileExternalVisibleStorage();
}
-bool ASTWriter::isLookupResultEntirelyExternal(StoredDeclsList &Result,
- DeclContext *DC) {
- for (auto *D : Result.getLookupResult())
- if (!getDeclForLocalLookup(getLangOpts(), D)->isFromASTFile())
- return false;
+/// Returns ture if all of the lookup result are either external, not emitted or
+/// predefined. In such cases, the lookup result is not interesting and we don't
+/// need to record the result in the current being written module. Return false
+/// otherwise.
+static bool isLookupResultNotInteresting(ASTWriter &Writer,
+ StoredDeclsList &Result) {
+ for (auto *D : Result.getLookupResult()) {
+ auto *LocalD = getDeclForLocalLookup(Writer.getLangOpts(), D);
+ if (LocalD->isFromASTFile())
+ continue;
+
+ // We can only be sure whether the local declaration is reachable
+ // after we done writing the declarations and types.
+ if (Writer.getDoneWritingDeclsAndTypes() && !Writer.wasDeclEmitted(LocalD))
+ continue;
+
+ // We don't need to emit the predefined decls.
+ if (Writer.isDeclPredefined(LocalD))
+ continue;
+
+ return false;
+ }
return true;
}
@@ -3978,8 +4184,17 @@ ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
// don't need to write an entry for the name at all. If we can't
// write out a lookup set without performing more deserialization,
// just skip this entry.
- if (isLookupResultExternal(Result, DC) &&
- isLookupResultEntirelyExternal(Result, DC))
+ //
+ // Also in reduced BMI, we'd like to avoid writing unreachable
+ // declarations in GMF, so we need to avoid writing declarations
+ // that entirely external or unreachable.
+ //
+ // FIMXE: It looks sufficient to test
+ // isLookupResultNotInteresting here. But due to bug we have
+ // to test isLookupResultExternal here. See
+ // https://github.com/llvm/llvm-project/issues/61065 for details.
+ if ((GeneratingReducedBMI || isLookupResultExternal(Result, DC)) &&
+ isLookupResultNotInteresting(*this, Result))
continue;
// We also skip empty results. If any of the results could be external and
@@ -4170,9 +4385,21 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
continue;
}
- for (NamedDecl *ND : Result)
- if (!ND->isFromASTFile())
- GetDeclRef(ND);
+ for (NamedDecl *ND : Result) {
+ if (ND->isFromASTFile())
+ continue;
+
+ if (DoneWritingDeclsAndTypes && !wasDeclEmitted(ND))
+ continue;
+
+ // We don't need to force emitting internal decls into reduced BMI.
+ // See comments in ASTWriter::WriteDeclContextLexicalBlock for details.
+ if (GeneratingReducedBMI && !ND->isFromExplicitGlobalModule() &&
+ IsInternalDeclFromFileContext(ND))
+ continue;
+
+ GetDeclRef(ND);
+ }
}
return 0;
@@ -4233,7 +4460,8 @@ void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
DC = cast<DeclContext>(Chain->getKeyDeclaration(cast<Decl>(DC)));
// Write the lookup table
- RecordData::value_type Record[] = {UPDATE_VISIBLE, getDeclID(cast<Decl>(DC))};
+ RecordData::value_type Record[] = {UPDATE_VISIBLE,
+ getDeclID(cast<Decl>(DC)).getRawValue()};
Stream.EmitRecordWithBlob(UpdateVisibleAbbrev, Record, LookupTable);
}
@@ -4263,8 +4491,8 @@ void ASTWriter::WriteOpenCLExtensions(Sema &SemaRef) {
Stream.EmitRecord(OPENCL_EXTENSIONS, Record);
}
void ASTWriter::WriteCUDAPragmas(Sema &SemaRef) {
- if (SemaRef.ForceCUDAHostDeviceDepth > 0) {
- RecordData::value_type Record[] = {SemaRef.ForceCUDAHostDeviceDepth};
+ if (SemaRef.CUDA().ForceHostDeviceDepth > 0) {
+ RecordData::value_type Record[] = {SemaRef.CUDA().ForceHostDeviceDepth};
Stream.EmitRecord(CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH, Record);
}
}
@@ -4287,7 +4515,7 @@ void ASTWriter::WriteObjCCategories() {
Cat = Class->known_categories_begin(),
CatEnd = Class->known_categories_end();
Cat != CatEnd; ++Cat, ++Size) {
- assert(getDeclID(*Cat) != 0 && "Bogus category");
+ assert(getDeclID(*Cat).isValid() && "Bogus category");
AddDeclRef(*Cat, Categories);
}
@@ -4578,11 +4806,18 @@ void ASTWriter::AddVersionTuple(const VersionTuple &Version,
/// Note that the identifier II occurs at the given offset
/// within the identifier table.
void ASTWriter::SetIdentifierOffset(const IdentifierInfo *II, uint32_t Offset) {
- IdentID ID = IdentifierIDs[II];
+ IdentifierID ID = IdentifierIDs[II];
// Only store offsets new to this AST file. Other identifier names are looked
// up earlier in the chain and thus don't need an offset.
- if (ID >= FirstIdentID)
- IdentifierOffsets[ID - FirstIdentID] = Offset;
+ if (!isLocalIdentifierID(ID))
+ return;
+
+ // For local identifiers, the module file index must be 0.
+
+ assert(ID != 0);
+ ID -= NUM_PREDEF_IDENT_IDS;
+ assert(ID < IdentifierOffsets.size());
+ IdentifierOffsets[ID] = Offset;
}
/// Note that the selector Sel occurs at the given offset
@@ -4601,10 +4836,12 @@ ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream,
SmallVectorImpl<char> &Buffer,
InMemoryModuleCache &ModuleCache,
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions,
- bool IncludeTimestamps, bool BuildingImplicitModule)
+ bool IncludeTimestamps, bool BuildingImplicitModule,
+ bool GeneratingReducedBMI)
: Stream(Stream), Buffer(Buffer), ModuleCache(ModuleCache),
IncludeTimestamps(IncludeTimestamps),
- BuildingImplicitModule(BuildingImplicitModule) {
+ BuildingImplicitModule(BuildingImplicitModule),
+ GeneratingReducedBMI(GeneratingReducedBMI) {
for (const auto &Ext : Extensions) {
if (auto Writer = Ext->createExtensionWriter(*this))
ModuleFileExtensionWriters.push_back(std::move(Writer));
@@ -4659,15 +4896,23 @@ ASTFileSignature ASTWriter::WriteAST(Sema &SemaRef, StringRef OutputFile,
}
template<typename Vector>
-static void AddLazyVectorDecls(ASTWriter &Writer, Vector &Vec,
- ASTWriter::RecordData &Record) {
+static void AddLazyVectorDecls(ASTWriter &Writer, Vector &Vec) {
+ for (typename Vector::iterator I = Vec.begin(nullptr, true), E = Vec.end();
+ I != E; ++I) {
+ Writer.GetDeclRef(*I);
+ }
+}
+
+template <typename Vector>
+static void AddLazyVectorEmiitedDecls(ASTWriter &Writer, Vector &Vec,
+ ASTWriter::RecordData &Record) {
for (typename Vector::iterator I = Vec.begin(nullptr, true), E = Vec.end();
I != E; ++I) {
- Writer.AddDeclRef(*I, Record);
+ Writer.AddEmittedDeclRef(*I, Record);
}
}
-void ASTWriter::collectNonAffectingInputFiles() {
+void ASTWriter::computeNonAffectingInputFiles() {
SourceManager &SrcMgr = PP->getSourceManager();
unsigned N = SrcMgr.local_sloc_entry_size();
@@ -4699,9 +4944,16 @@ void ASTWriter::collectNonAffectingInputFiles() {
if (!Cache->OrigEntry)
continue;
- if (!isModuleMap(File.getFileCharacteristic()) ||
- AffectingModuleMaps.empty() ||
- llvm::is_contained(AffectingModuleMaps, *Cache->OrigEntry))
+ // Don't prune anything other than module maps.
+ if (!isModuleMap(File.getFileCharacteristic()))
+ continue;
+
+ // Don't prune module maps if all are guaranteed to be affecting.
+ if (!AffectingModuleMaps)
+ continue;
+
+ // Don't prune module maps that are affecting.
+ if (llvm::is_contained(*AffectingModuleMaps, *Cache->OrigEntry))
continue;
IsSLocAffecting[I] = false;
@@ -4727,32 +4979,43 @@ void ASTWriter::collectNonAffectingInputFiles() {
NonAffectingFileIDAdjustments.push_back(FileIDAdjustment);
NonAffectingOffsetAdjustments.push_back(OffsetAdjustment);
}
-}
-
-ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
- Module *WritingModule) {
- using namespace llvm;
- bool isModule = WritingModule != nullptr;
+ if (!PP->getHeaderSearchInfo().getHeaderSearchOpts().ModulesIncludeVFSUsage)
+ return;
- // Make sure that the AST reader knows to finalize itself.
- if (Chain)
- Chain->finalizeForWriting();
+ FileManager &FileMgr = PP->getFileManager();
+ FileMgr.trackVFSUsage(true);
+ // Lookup the paths in the VFS to trigger `-ivfsoverlay` usage tracking.
+ for (StringRef Path :
+ PP->getHeaderSearchInfo().getHeaderSearchOpts().VFSOverlayFiles)
+ FileMgr.getVirtualFileSystem().exists(Path);
+ for (unsigned I = 1; I != N; ++I) {
+ if (IsSLocAffecting[I]) {
+ const SrcMgr::SLocEntry *SLoc = &SrcMgr.getLocalSLocEntry(I);
+ if (!SLoc->isFile())
+ continue;
+ const SrcMgr::FileInfo &File = SLoc->getFile();
+ const SrcMgr::ContentCache *Cache = &File.getContentCache();
+ if (!Cache->OrigEntry)
+ continue;
+ FileMgr.getVirtualFileSystem().exists(
+ Cache->OrigEntry->getNameAsRequested());
+ }
+ }
+ FileMgr.trackVFSUsage(false);
+}
+void ASTWriter::PrepareWritingSpecialDecls(Sema &SemaRef) {
ASTContext &Context = SemaRef.Context;
- Preprocessor &PP = SemaRef.PP;
-
- // This needs to be done very early, since everything that writes
- // SourceLocations or FileIDs depends on it.
- collectNonAffectingInputFiles();
- writeUnhashedControlBlock(PP, Context);
+ bool isModule = WritingModule != nullptr;
// Set up predefined declaration IDs.
auto RegisterPredefDecl = [&] (Decl *D, PredefinedDeclIDs ID) {
if (D) {
assert(D->isCanonicalDecl() && "predefined decl is not canonical");
DeclIDs[D] = ID;
+ PredefinedDecls.insert(D);
}
};
RegisterPredefDecl(Context.getTranslationUnitDecl(),
@@ -4782,103 +5045,283 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
RegisterPredefDecl(Context.TypePackElementDecl,
PREDEF_DECL_TYPE_PACK_ELEMENT_ID);
- // Build a record containing all of the tentative definitions in this file, in
+ const TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
+
+ // Force all top level declarations to be emitted.
+ //
+ // We start emitting top level declarations from the module purview to
+ // implement the eliding unreachable declaration feature.
+ for (const auto *D : TU->noload_decls()) {
+ if (D->isFromASTFile())
+ continue;
+
+ if (GeneratingReducedBMI) {
+ if (D->isFromExplicitGlobalModule())
+ continue;
+
+ // Don't force emitting static entities.
+ //
+ // Technically, all static entities shouldn't be in reduced BMI. The
+ // language also specifies that the program exposes TU-local entities
+ // is ill-formed. However, in practice, there are a lot of projects
+ // uses `static inline` in the headers. So we can't get rid of all
+ // static entities in reduced BMI now.
+ if (IsInternalDeclFromFileContext(D))
+ continue;
+ }
+
+ // If we're writing C++ named modules, don't emit declarations which are
+ // not from modules by default. They may be built in declarations (be
+ // handled above) or implcit declarations (see the implementation of
+ // `Sema::Initialize()` for example).
+ if (isWritingStdCXXNamedModules() && !D->getOwningModule() &&
+ D->isImplicit())
+ continue;
+
+ GetDeclRef(D);
+ }
+
+ if (GeneratingReducedBMI)
+ return;
+
+ // Writing all of the tentative definitions in this file, in
// TentativeDefinitions order. Generally, this record will be empty for
// headers.
RecordData TentativeDefinitions;
- AddLazyVectorDecls(*this, SemaRef.TentativeDefinitions, TentativeDefinitions);
+ AddLazyVectorDecls(*this, SemaRef.TentativeDefinitions);
- // Build a record containing all of the file scoped decls in this file.
- RecordData UnusedFileScopedDecls;
+ // Writing all of the file scoped decls in this file.
if (!isModule)
- AddLazyVectorDecls(*this, SemaRef.UnusedFileScopedDecls,
- UnusedFileScopedDecls);
+ AddLazyVectorDecls(*this, SemaRef.UnusedFileScopedDecls);
- // Build a record containing all of the delegating constructors we still need
+ // Writing all of the delegating constructors we still need
// to resolve.
- RecordData DelegatingCtorDecls;
if (!isModule)
- AddLazyVectorDecls(*this, SemaRef.DelegatingCtorDecls, DelegatingCtorDecls);
+ AddLazyVectorDecls(*this, SemaRef.DelegatingCtorDecls);
- // Write the set of weak, undeclared identifiers. We always write the
- // entire table, since later PCH files in a PCH chain are only interested in
- // the results at the end of the chain.
- RecordData WeakUndeclaredIdentifiers;
- for (const auto &WeakUndeclaredIdentifierList :
- SemaRef.WeakUndeclaredIdentifiers) {
- const IdentifierInfo *const II = WeakUndeclaredIdentifierList.first;
- for (const auto &WI : WeakUndeclaredIdentifierList.second) {
- AddIdentifierRef(II, WeakUndeclaredIdentifiers);
- AddIdentifierRef(WI.getAlias(), WeakUndeclaredIdentifiers);
- AddSourceLocation(WI.getLocation(), WeakUndeclaredIdentifiers);
+ // Writing all of the ext_vector declarations.
+ AddLazyVectorDecls(*this, SemaRef.ExtVectorDecls);
+
+ // Writing all of the VTable uses information.
+ if (!SemaRef.VTableUses.empty())
+ for (unsigned I = 0, N = SemaRef.VTableUses.size(); I != N; ++I)
+ GetDeclRef(SemaRef.VTableUses[I].first);
+
+ // Writing all of the UnusedLocalTypedefNameCandidates.
+ for (const TypedefNameDecl *TD : SemaRef.UnusedLocalTypedefNameCandidates)
+ GetDeclRef(TD);
+
+ // Writing all of pending implicit instantiations.
+ for (const auto &I : SemaRef.PendingInstantiations)
+ GetDeclRef(I.first);
+ assert(SemaRef.PendingLocalImplicitInstantiations.empty() &&
+ "There are local ones at end of translation unit!");
+
+ // Writing some declaration references.
+ if (SemaRef.StdNamespace || SemaRef.StdBadAlloc || SemaRef.StdAlignValT) {
+ GetDeclRef(SemaRef.getStdNamespace());
+ GetDeclRef(SemaRef.getStdBadAlloc());
+ GetDeclRef(SemaRef.getStdAlignValT());
+ }
+
+ if (Context.getcudaConfigureCallDecl())
+ GetDeclRef(Context.getcudaConfigureCallDecl());
+
+ // Writing all of the known namespaces.
+ for (const auto &I : SemaRef.KnownNamespaces)
+ if (!I.second)
+ GetDeclRef(I.first);
+
+ // Writing all used, undefined objects that require definitions.
+ SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
+ SemaRef.getUndefinedButUsed(Undefined);
+ for (const auto &I : Undefined)
+ GetDeclRef(I.first);
+
+ // Writing all delete-expressions that we would like to
+ // analyze later in AST.
+ if (!isModule)
+ for (const auto &DeleteExprsInfo :
+ SemaRef.getMismatchingDeleteExpressions())
+ GetDeclRef(DeleteExprsInfo.first);
+
+ // Make sure visible decls, added to DeclContexts previously loaded from
+ // an AST file, are registered for serialization. Likewise for template
+ // specializations added to imported templates.
+ for (const auto *I : DeclsToEmitEvenIfUnreferenced)
+ GetDeclRef(I);
+ DeclsToEmitEvenIfUnreferenced.clear();
+
+ // Make sure all decls associated with an identifier are registered for
+ // serialization, if we're storing decls with identifiers.
+ if (!WritingModule || !getLangOpts().CPlusPlus) {
+ llvm::SmallVector<const IdentifierInfo*, 256> IIs;
+ for (const auto &ID : SemaRef.PP.getIdentifierTable()) {
+ const IdentifierInfo *II = ID.second;
+ if (!Chain || !II->isFromAST() || II->hasChangedSinceDeserialization())
+ IIs.push_back(II);
}
+ // Sort the identifiers to visit based on their name.
+ llvm::sort(IIs, llvm::deref<std::less<>>());
+ for (const IdentifierInfo *II : IIs)
+ for (const Decl *D : SemaRef.IdResolver.decls(II))
+ GetDeclRef(D);
}
- // Build a record containing all of the ext_vector declarations.
+ // Write all of the DeclsToCheckForDeferredDiags.
+ for (auto *D : SemaRef.DeclsToCheckForDeferredDiags)
+ GetDeclRef(D);
+
+ // Write all classes that need to emit the vtable definitions if required.
+ if (isWritingStdCXXNamedModules())
+ for (CXXRecordDecl *RD : PendingEmittingVTables)
+ GetDeclRef(RD);
+ else
+ PendingEmittingVTables.clear();
+}
+
+void ASTWriter::WriteSpecialDeclRecords(Sema &SemaRef) {
+ ASTContext &Context = SemaRef.Context;
+
+ bool isModule = WritingModule != nullptr;
+
+ // Write the record containing external, unnamed definitions.
+ if (!EagerlyDeserializedDecls.empty())
+ Stream.EmitRecord(EAGERLY_DESERIALIZED_DECLS, EagerlyDeserializedDecls);
+
+ if (!ModularCodegenDecls.empty())
+ Stream.EmitRecord(MODULAR_CODEGEN_DECLS, ModularCodegenDecls);
+
+ // Write the record containing tentative definitions.
+ RecordData TentativeDefinitions;
+ AddLazyVectorEmiitedDecls(*this, SemaRef.TentativeDefinitions,
+ TentativeDefinitions);
+ if (!TentativeDefinitions.empty())
+ Stream.EmitRecord(TENTATIVE_DEFINITIONS, TentativeDefinitions);
+
+ // Write the record containing unused file scoped decls.
+ RecordData UnusedFileScopedDecls;
+ if (!isModule)
+ AddLazyVectorEmiitedDecls(*this, SemaRef.UnusedFileScopedDecls,
+ UnusedFileScopedDecls);
+ if (!UnusedFileScopedDecls.empty())
+ Stream.EmitRecord(UNUSED_FILESCOPED_DECLS, UnusedFileScopedDecls);
+
+ // Write the record containing ext_vector type names.
RecordData ExtVectorDecls;
- AddLazyVectorDecls(*this, SemaRef.ExtVectorDecls, ExtVectorDecls);
+ AddLazyVectorEmiitedDecls(*this, SemaRef.ExtVectorDecls, ExtVectorDecls);
+ if (!ExtVectorDecls.empty())
+ Stream.EmitRecord(EXT_VECTOR_DECLS, ExtVectorDecls);
- // Build a record containing all of the VTable uses information.
+ // Write the record containing VTable uses information.
RecordData VTableUses;
if (!SemaRef.VTableUses.empty()) {
for (unsigned I = 0, N = SemaRef.VTableUses.size(); I != N; ++I) {
- AddDeclRef(SemaRef.VTableUses[I].first, VTableUses);
+ CXXRecordDecl *D = SemaRef.VTableUses[I].first;
+ if (!wasDeclEmitted(D))
+ continue;
+
+ AddDeclRef(D, VTableUses);
AddSourceLocation(SemaRef.VTableUses[I].second, VTableUses);
- VTableUses.push_back(SemaRef.VTablesUsed[SemaRef.VTableUses[I].first]);
+ VTableUses.push_back(SemaRef.VTablesUsed[D]);
}
+ Stream.EmitRecord(VTABLE_USES, VTableUses);
}
- // Build a record containing all of the UnusedLocalTypedefNameCandidates.
+ // Write the record containing potentially unused local typedefs.
RecordData UnusedLocalTypedefNameCandidates;
for (const TypedefNameDecl *TD : SemaRef.UnusedLocalTypedefNameCandidates)
- AddDeclRef(TD, UnusedLocalTypedefNameCandidates);
+ AddEmittedDeclRef(TD, UnusedLocalTypedefNameCandidates);
+ if (!UnusedLocalTypedefNameCandidates.empty())
+ Stream.EmitRecord(UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES,
+ UnusedLocalTypedefNameCandidates);
- // Build a record containing all of pending implicit instantiations.
+ // Write the record containing pending implicit instantiations.
RecordData PendingInstantiations;
for (const auto &I : SemaRef.PendingInstantiations) {
+ if (!wasDeclEmitted(I.first))
+ continue;
+
AddDeclRef(I.first, PendingInstantiations);
AddSourceLocation(I.second, PendingInstantiations);
}
- assert(SemaRef.PendingLocalImplicitInstantiations.empty() &&
- "There are local ones at end of translation unit!");
+ if (!PendingInstantiations.empty())
+ Stream.EmitRecord(PENDING_IMPLICIT_INSTANTIATIONS, PendingInstantiations);
- // Build a record containing some declaration references.
+ // Write the record containing declaration references of Sema.
RecordData SemaDeclRefs;
if (SemaRef.StdNamespace || SemaRef.StdBadAlloc || SemaRef.StdAlignValT) {
- AddDeclRef(SemaRef.getStdNamespace(), SemaDeclRefs);
- AddDeclRef(SemaRef.getStdBadAlloc(), SemaDeclRefs);
- AddDeclRef(SemaRef.getStdAlignValT(), SemaDeclRefs);
+ auto AddEmittedDeclRefOrZero = [this, &SemaDeclRefs](Decl *D) {
+ if (!D || !wasDeclEmitted(D))
+ SemaDeclRefs.push_back(0);
+ else
+ AddDeclRef(D, SemaDeclRefs);
+ };
+
+ AddEmittedDeclRefOrZero(SemaRef.getStdNamespace());
+ AddEmittedDeclRefOrZero(SemaRef.getStdBadAlloc());
+ AddEmittedDeclRefOrZero(SemaRef.getStdAlignValT());
}
+ if (!SemaDeclRefs.empty())
+ Stream.EmitRecord(SEMA_DECL_REFS, SemaDeclRefs);
+ // Write the record containing decls to be checked for deferred diags.
+ RecordData DeclsToCheckForDeferredDiags;
+ for (auto *D : SemaRef.DeclsToCheckForDeferredDiags)
+ if (wasDeclEmitted(D))
+ AddDeclRef(D, DeclsToCheckForDeferredDiags);
+ if (!DeclsToCheckForDeferredDiags.empty())
+ Stream.EmitRecord(DECLS_TO_CHECK_FOR_DEFERRED_DIAGS,
+ DeclsToCheckForDeferredDiags);
+
+ // Write the record containing CUDA-specific declaration references.
RecordData CUDASpecialDeclRefs;
- if (Context.getcudaConfigureCallDecl()) {
- AddDeclRef(Context.getcudaConfigureCallDecl(), CUDASpecialDeclRefs);
+ if (auto *CudaCallDecl = Context.getcudaConfigureCallDecl();
+ CudaCallDecl && wasDeclEmitted(CudaCallDecl)) {
+ AddDeclRef(CudaCallDecl, CUDASpecialDeclRefs);
+ Stream.EmitRecord(CUDA_SPECIAL_DECL_REFS, CUDASpecialDeclRefs);
}
- // Build a record containing all of the known namespaces.
+ // Write the delegating constructors.
+ RecordData DelegatingCtorDecls;
+ if (!isModule)
+ AddLazyVectorEmiitedDecls(*this, SemaRef.DelegatingCtorDecls,
+ DelegatingCtorDecls);
+ if (!DelegatingCtorDecls.empty())
+ Stream.EmitRecord(DELEGATING_CTORS, DelegatingCtorDecls);
+
+ // Write the known namespaces.
RecordData KnownNamespaces;
for (const auto &I : SemaRef.KnownNamespaces) {
- if (!I.second)
+ if (!I.second && wasDeclEmitted(I.first))
AddDeclRef(I.first, KnownNamespaces);
}
+ if (!KnownNamespaces.empty())
+ Stream.EmitRecord(KNOWN_NAMESPACES, KnownNamespaces);
- // Build a record of all used, undefined objects that require definitions.
+ // Write the undefined internal functions and variables, and inline functions.
RecordData UndefinedButUsed;
-
SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
SemaRef.getUndefinedButUsed(Undefined);
for (const auto &I : Undefined) {
+ if (!wasDeclEmitted(I.first))
+ continue;
+
AddDeclRef(I.first, UndefinedButUsed);
AddSourceLocation(I.second, UndefinedButUsed);
}
+ if (!UndefinedButUsed.empty())
+ Stream.EmitRecord(UNDEFINED_BUT_USED, UndefinedButUsed);
- // Build a record containing all delete-expressions that we would like to
+ // Write all delete-expressions that we would like to
// analyze later in AST.
RecordData DeleteExprsToAnalyze;
-
if (!isModule) {
for (const auto &DeleteExprsInfo :
SemaRef.getMismatchingDeleteExpressions()) {
+ if (!wasDeclEmitted(DeleteExprsInfo.first))
+ continue;
+
AddDeclRef(DeleteExprsInfo.first, DeleteExprsToAnalyze);
DeleteExprsToAnalyze.push_back(DeleteExprsInfo.second.size());
for (const auto &DeleteLoc : DeleteExprsInfo.second) {
@@ -4887,6 +5330,98 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
}
}
}
+ if (!DeleteExprsToAnalyze.empty())
+ Stream.EmitRecord(DELETE_EXPRS_TO_ANALYZE, DeleteExprsToAnalyze);
+
+ RecordData VTablesToEmit;
+ for (CXXRecordDecl *RD : PendingEmittingVTables) {
+ if (!wasDeclEmitted(RD))
+ continue;
+
+ AddDeclRef(RD, VTablesToEmit);
+ }
+
+ if (!VTablesToEmit.empty())
+ Stream.EmitRecord(VTABLES_TO_EMIT, VTablesToEmit);
+}
+
+ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
+ Module *WritingModule) {
+ using namespace llvm;
+
+ bool isModule = WritingModule != nullptr;
+
+ // Make sure that the AST reader knows to finalize itself.
+ if (Chain)
+ Chain->finalizeForWriting();
+
+ ASTContext &Context = SemaRef.Context;
+ Preprocessor &PP = SemaRef.PP;
+
+ // This needs to be done very early, since everything that writes
+ // SourceLocations or FileIDs depends on it.
+ computeNonAffectingInputFiles();
+
+ writeUnhashedControlBlock(PP, Context);
+
+ // Don't reuse type ID and Identifier ID from readers for C++ standard named
+ // modules since we want to support no-transitive-change model for named
+ // modules. The theory for no-transitive-change model is,
+ // for a user of a named module, the user can only access the indirectly
+ // imported decls via the directly imported module. So that it is possible to
+ // control what matters to the users when writing the module. It would be
+ // problematic if the users can reuse the type IDs and identifier IDs from
+ // indirectly imported modules arbitrarily. So we choose to clear these ID
+ // here.
+ if (isWritingStdCXXNamedModules()) {
+ TypeIdxs.clear();
+ IdentifierIDs.clear();
+ }
+
+ // Look for any identifiers that were named while processing the
+ // headers, but are otherwise not needed. We add these to the hash
+ // table to enable checking of the predefines buffer in the case
+ // where the user adds new macro definitions when building the AST
+ // file.
+ //
+ // We do this before emitting any Decl and Types to make sure the
+ // Identifier ID is stable.
+ SmallVector<const IdentifierInfo *, 128> IIs;
+ for (const auto &ID : PP.getIdentifierTable())
+ if (IsInterestingNonMacroIdentifier(ID.second, *this))
+ IIs.push_back(ID.second);
+ // Sort the identifiers lexicographically before getting the references so
+ // that their order is stable.
+ llvm::sort(IIs, llvm::deref<std::less<>>());
+ for (const IdentifierInfo *II : IIs)
+ getIdentifierRef(II);
+
+ // Write the set of weak, undeclared identifiers. We always write the
+ // entire table, since later PCH files in a PCH chain are only interested in
+ // the results at the end of the chain.
+ RecordData WeakUndeclaredIdentifiers;
+ for (const auto &WeakUndeclaredIdentifierList :
+ SemaRef.WeakUndeclaredIdentifiers) {
+ const IdentifierInfo *const II = WeakUndeclaredIdentifierList.first;
+ for (const auto &WI : WeakUndeclaredIdentifierList.second) {
+ AddIdentifierRef(II, WeakUndeclaredIdentifiers);
+ AddIdentifierRef(WI.getAlias(), WeakUndeclaredIdentifiers);
+ AddSourceLocation(WI.getLocation(), WeakUndeclaredIdentifiers);
+ }
+ }
+
+ // Form the record of special types.
+ RecordData SpecialTypes;
+ AddTypeRef(Context.getRawCFConstantStringType(), SpecialTypes);
+ AddTypeRef(Context.getFILEType(), SpecialTypes);
+ AddTypeRef(Context.getjmp_bufType(), SpecialTypes);
+ AddTypeRef(Context.getsigjmp_bufType(), SpecialTypes);
+ AddTypeRef(Context.ObjCIdRedefinitionType, SpecialTypes);
+ AddTypeRef(Context.ObjCClassRedefinitionType, SpecialTypes);
+ AddTypeRef(Context.ObjCSelRedefinitionType, SpecialTypes);
+ AddTypeRef(Context.getucontext_tType(), SpecialTypes);
+
+ PrepareWritingSpecialDecls(SemaRef);
// Write the control block
WriteControlBlock(PP, Context, isysroot);
@@ -4904,83 +5439,6 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
Stream.EmitRecord(METADATA_OLD_FORMAT, Record);
}
- // Create a lexical update block containing all of the declarations in the
- // translation unit that do not come from other AST files.
- const TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
- SmallVector<uint32_t, 128> NewGlobalKindDeclPairs;
- for (const auto *D : TU->noload_decls()) {
- if (!D->isFromASTFile()) {
- NewGlobalKindDeclPairs.push_back(D->getKind());
- NewGlobalKindDeclPairs.push_back(GetDeclRef(D));
- }
- }
-
- auto Abv = std::make_shared<BitCodeAbbrev>();
- Abv->Add(llvm::BitCodeAbbrevOp(TU_UPDATE_LEXICAL));
- Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
- unsigned TuUpdateLexicalAbbrev = Stream.EmitAbbrev(std::move(Abv));
- {
- RecordData::value_type Record[] = {TU_UPDATE_LEXICAL};
- Stream.EmitRecordWithBlob(TuUpdateLexicalAbbrev, Record,
- bytes(NewGlobalKindDeclPairs));
- }
-
- // And a visible updates block for the translation unit.
- Abv = std::make_shared<BitCodeAbbrev>();
- Abv->Add(llvm::BitCodeAbbrevOp(UPDATE_VISIBLE));
- Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
- Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
- UpdateVisibleAbbrev = Stream.EmitAbbrev(std::move(Abv));
- WriteDeclContextVisibleUpdate(TU);
-
- // If we have any extern "C" names, write out a visible update for them.
- if (Context.ExternCContext)
- WriteDeclContextVisibleUpdate(Context.ExternCContext);
-
- // If the translation unit has an anonymous namespace, and we don't already
- // have an update block for it, write it as an update block.
- // FIXME: Why do we not do this if there's already an update block?
- if (NamespaceDecl *NS = TU->getAnonymousNamespace()) {
- ASTWriter::UpdateRecord &Record = DeclUpdates[TU];
- if (Record.empty())
- Record.push_back(DeclUpdate(UPD_CXX_ADDED_ANONYMOUS_NAMESPACE, NS));
- }
-
- // Add update records for all mangling numbers and static local numbers.
- // These aren't really update records, but this is a convenient way of
- // tagging this rare extra data onto the declarations.
- for (const auto &Number : Context.MangleNumbers)
- if (!Number.first->isFromASTFile())
- DeclUpdates[Number.first].push_back(DeclUpdate(UPD_MANGLING_NUMBER,
- Number.second));
- for (const auto &Number : Context.StaticLocalNumbers)
- if (!Number.first->isFromASTFile())
- DeclUpdates[Number.first].push_back(DeclUpdate(UPD_STATIC_LOCAL_NUMBER,
- Number.second));
-
- // Make sure visible decls, added to DeclContexts previously loaded from
- // an AST file, are registered for serialization. Likewise for template
- // specializations added to imported templates.
- for (const auto *I : DeclsToEmitEvenIfUnreferenced) {
- GetDeclRef(I);
- }
-
- // Make sure all decls associated with an identifier are registered for
- // serialization, if we're storing decls with identifiers.
- if (!WritingModule || !getLangOpts().CPlusPlus) {
- llvm::SmallVector<const IdentifierInfo*, 256> IIs;
- for (const auto &ID : PP.getIdentifierTable()) {
- const IdentifierInfo *II = ID.second;
- if (!Chain || !II->isFromAST() || II->hasChangedSinceDeserialization())
- IIs.push_back(II);
- }
- // Sort the identifiers to visit based on their name.
- llvm::sort(IIs, llvm::deref<std::less<>>());
- for (const IdentifierInfo *II : IIs)
- for (const Decl *D : SemaRef.IdResolver.decls(II))
- GetDeclRef(D);
- }
-
// For method pool in the module, if it contains an entry for a selector,
// the entry should be complete, containing everything introduced by that
// module and all modules it imports. It's possible that the entry is out of
@@ -4992,18 +5450,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
for (auto &SelectorAndID : SelectorIDs)
AllSelectors.push_back(SelectorAndID.first);
for (auto &Selector : AllSelectors)
- SemaRef.updateOutOfDateSelector(Selector);
-
- // Form the record of special types.
- RecordData SpecialTypes;
- AddTypeRef(Context.getRawCFConstantStringType(), SpecialTypes);
- AddTypeRef(Context.getFILEType(), SpecialTypes);
- AddTypeRef(Context.getjmp_bufType(), SpecialTypes);
- AddTypeRef(Context.getsigjmp_bufType(), SpecialTypes);
- AddTypeRef(Context.ObjCIdRedefinitionType, SpecialTypes);
- AddTypeRef(Context.ObjCClassRedefinitionType, SpecialTypes);
- AddTypeRef(Context.ObjCSelRedefinitionType, SpecialTypes);
- AddTypeRef(Context.getucontext_tType(), SpecialTypes);
+ SemaRef.ObjC().updateOutOfDateSelector(Selector);
if (Chain) {
// Write the mapping information describing our module dependencies and how
@@ -5055,15 +5502,11 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// These values should be unique within a chain, since they will be read
// as keys into ContinuousRangeMaps.
- writeBaseIDOrNone(M.SLocEntryBaseOffset, M.LocalNumSLocEntries);
- writeBaseIDOrNone(M.BaseIdentifierID, M.LocalNumIdentifiers);
writeBaseIDOrNone(M.BaseMacroID, M.LocalNumMacros);
writeBaseIDOrNone(M.BasePreprocessedEntityID,
M.NumPreprocessedEntities);
writeBaseIDOrNone(M.BaseSubmoduleID, M.LocalNumSubmodules);
writeBaseIDOrNone(M.BaseSelectorID, M.LocalNumSelectors);
- writeBaseIDOrNone(M.BaseDeclID, M.LocalNumDecls);
- writeBaseIDOrNone(M.BaseTypeIndex, M.LocalNumTypes);
}
}
RecordData::value_type Record[] = {MODULE_OFFSET_MAP};
@@ -5071,38 +5514,8 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
Buffer.data(), Buffer.size());
}
- // Build a record containing all of the DeclsToCheckForDeferredDiags.
- SmallVector<serialization::DeclID, 64> DeclsToCheckForDeferredDiags;
- for (auto *D : SemaRef.DeclsToCheckForDeferredDiags)
- DeclsToCheckForDeferredDiags.push_back(GetDeclRef(D));
-
- RecordData DeclUpdatesOffsetsRecord;
-
- // Keep writing types, declarations, and declaration update records
- // until we've emitted all of them.
- Stream.EnterSubblock(DECLTYPES_BLOCK_ID, /*bits for abbreviations*/5);
- DeclTypesBlockStartOffset = Stream.GetCurrentBitNo();
- WriteTypeAbbrevs();
- WriteDeclAbbrevs();
- do {
- WriteDeclUpdatesBlocks(DeclUpdatesOffsetsRecord);
- while (!DeclTypesToEmit.empty()) {
- DeclOrType DOT = DeclTypesToEmit.front();
- DeclTypesToEmit.pop();
- if (DOT.isType())
- WriteType(DOT.getType());
- else
- WriteDecl(Context, DOT.getDecl());
- }
- } while (!DeclUpdates.empty());
- Stream.ExitBlock();
-
- DoneWritingDeclsAndTypes = true;
+ WriteDeclAndTypes(Context);
- // These things can only be done once we've written out decls and types.
- WriteTypeDeclOffsets();
- if (!DeclUpdatesOffsetsRecord.empty())
- Stream.EmitRecord(DECL_UPDATE_OFFSETS, DeclUpdatesOffsetsRecord);
WriteFileDeclIDsMap();
WriteSourceManagerBlock(Context.getSourceManager(), PP);
WriteComments();
@@ -5122,75 +5535,13 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
Stream.EmitRecord(SPECIAL_TYPES, SpecialTypes);
- // Write the record containing external, unnamed definitions.
- if (!EagerlyDeserializedDecls.empty())
- Stream.EmitRecord(EAGERLY_DESERIALIZED_DECLS, EagerlyDeserializedDecls);
-
- if (!ModularCodegenDecls.empty())
- Stream.EmitRecord(MODULAR_CODEGEN_DECLS, ModularCodegenDecls);
-
- // Write the record containing tentative definitions.
- if (!TentativeDefinitions.empty())
- Stream.EmitRecord(TENTATIVE_DEFINITIONS, TentativeDefinitions);
-
- // Write the record containing unused file scoped decls.
- if (!UnusedFileScopedDecls.empty())
- Stream.EmitRecord(UNUSED_FILESCOPED_DECLS, UnusedFileScopedDecls);
+ WriteSpecialDeclRecords(SemaRef);
// Write the record containing weak undeclared identifiers.
if (!WeakUndeclaredIdentifiers.empty())
Stream.EmitRecord(WEAK_UNDECLARED_IDENTIFIERS,
WeakUndeclaredIdentifiers);
- // Write the record containing ext_vector type names.
- if (!ExtVectorDecls.empty())
- Stream.EmitRecord(EXT_VECTOR_DECLS, ExtVectorDecls);
-
- // Write the record containing VTable uses information.
- if (!VTableUses.empty())
- Stream.EmitRecord(VTABLE_USES, VTableUses);
-
- // Write the record containing potentially unused local typedefs.
- if (!UnusedLocalTypedefNameCandidates.empty())
- Stream.EmitRecord(UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES,
- UnusedLocalTypedefNameCandidates);
-
- // Write the record containing pending implicit instantiations.
- if (!PendingInstantiations.empty())
- Stream.EmitRecord(PENDING_IMPLICIT_INSTANTIATIONS, PendingInstantiations);
-
- // Write the record containing declaration references of Sema.
- if (!SemaDeclRefs.empty())
- Stream.EmitRecord(SEMA_DECL_REFS, SemaDeclRefs);
-
- // Write the record containing decls to be checked for deferred diags.
- if (!DeclsToCheckForDeferredDiags.empty())
- Stream.EmitRecord(DECLS_TO_CHECK_FOR_DEFERRED_DIAGS,
- DeclsToCheckForDeferredDiags);
-
- // Write the record containing CUDA-specific declaration references.
- if (!CUDASpecialDeclRefs.empty())
- Stream.EmitRecord(CUDA_SPECIAL_DECL_REFS, CUDASpecialDeclRefs);
-
- // Write the delegating constructors.
- if (!DelegatingCtorDecls.empty())
- Stream.EmitRecord(DELEGATING_CTORS, DelegatingCtorDecls);
-
- // Write the known namespaces.
- if (!KnownNamespaces.empty())
- Stream.EmitRecord(KNOWN_NAMESPACES, KnownNamespaces);
-
- // Write the undefined internal functions and variables, and inline functions.
- if (!UndefinedButUsed.empty())
- Stream.EmitRecord(UNDEFINED_BUT_USED, UndefinedButUsed);
-
- if (!DeleteExprsToAnalyze.empty())
- Stream.EmitRecord(DELETE_EXPRS_TO_ANALYZE, DeleteExprsToAnalyze);
-
- // Write the visible updates to DeclContexts.
- for (auto *DC : UpdatedDeclContexts)
- WriteDeclContextVisibleUpdate(DC);
-
if (!WritingModule) {
// Write the submodules that were imported, if any.
struct ModuleInfo {
@@ -5255,6 +5606,142 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
return backpatchSignature();
}
+void ASTWriter::EnteringModulePurview() {
+ // In C++20 named modules, all entities before entering the module purview
+ // lives in the GMF.
+ if (GeneratingReducedBMI)
+ DeclUpdatesFromGMF.swap(DeclUpdates);
+}
+
+// Add update records for all mangling numbers and static local numbers.
+// These aren't really update records, but this is a convenient way of
+// tagging this rare extra data onto the declarations.
+void ASTWriter::AddedManglingNumber(const Decl *D, unsigned Number) {
+ if (D->isFromASTFile())
+ return;
+
+ DeclUpdates[D].push_back(DeclUpdate(UPD_MANGLING_NUMBER, Number));
+}
+void ASTWriter::AddedStaticLocalNumbers(const Decl *D, unsigned Number) {
+ if (D->isFromASTFile())
+ return;
+
+ DeclUpdates[D].push_back(DeclUpdate(UPD_STATIC_LOCAL_NUMBER, Number));
+}
+
+void ASTWriter::AddedAnonymousNamespace(const TranslationUnitDecl *TU,
+ NamespaceDecl *AnonNamespace) {
+ // If the translation unit has an anonymous namespace, and we don't already
+ // have an update block for it, write it as an update block.
+ // FIXME: Why do we not do this if there's already an update block?
+ if (NamespaceDecl *NS = TU->getAnonymousNamespace()) {
+ ASTWriter::UpdateRecord &Record = DeclUpdates[TU];
+ if (Record.empty())
+ Record.push_back(DeclUpdate(UPD_CXX_ADDED_ANONYMOUS_NAMESPACE, NS));
+ }
+}
+
+void ASTWriter::WriteDeclAndTypes(ASTContext &Context) {
+ // Keep writing types, declarations, and declaration update records
+ // until we've emitted all of them.
+ RecordData DeclUpdatesOffsetsRecord;
+ Stream.EnterSubblock(DECLTYPES_BLOCK_ID, /*bits for abbreviations*/5);
+ DeclTypesBlockStartOffset = Stream.GetCurrentBitNo();
+ WriteTypeAbbrevs();
+ WriteDeclAbbrevs();
+ do {
+ WriteDeclUpdatesBlocks(DeclUpdatesOffsetsRecord);
+ while (!DeclTypesToEmit.empty()) {
+ DeclOrType DOT = DeclTypesToEmit.front();
+ DeclTypesToEmit.pop();
+ if (DOT.isType())
+ WriteType(DOT.getType());
+ else
+ WriteDecl(Context, DOT.getDecl());
+ }
+ } while (!DeclUpdates.empty());
+
+ DoneWritingDeclsAndTypes = true;
+
+ // DelayedNamespace is only meaningful in reduced BMI.
+ // See the comments of DelayedNamespace for details.
+ assert(DelayedNamespace.empty() || GeneratingReducedBMI);
+ RecordData DelayedNamespaceRecord;
+ for (NamespaceDecl *NS : DelayedNamespace) {
+ uint64_t LexicalOffset = WriteDeclContextLexicalBlock(Context, NS);
+ uint64_t VisibleOffset = WriteDeclContextVisibleBlock(Context, NS);
+
+ // Write the offset relative to current block.
+ if (LexicalOffset)
+ LexicalOffset -= DeclTypesBlockStartOffset;
+
+ if (VisibleOffset)
+ VisibleOffset -= DeclTypesBlockStartOffset;
+
+ AddDeclRef(NS, DelayedNamespaceRecord);
+ DelayedNamespaceRecord.push_back(LexicalOffset);
+ DelayedNamespaceRecord.push_back(VisibleOffset);
+ }
+
+ // The process of writing lexical and visible block for delayed namespace
+ // shouldn't introduce any new decls, types or update to emit.
+ assert(DeclTypesToEmit.empty());
+ assert(DeclUpdates.empty());
+
+ Stream.ExitBlock();
+
+ // These things can only be done once we've written out decls and types.
+ WriteTypeDeclOffsets();
+ if (!DeclUpdatesOffsetsRecord.empty())
+ Stream.EmitRecord(DECL_UPDATE_OFFSETS, DeclUpdatesOffsetsRecord);
+
+ if (!DelayedNamespaceRecord.empty())
+ Stream.EmitRecord(DELAYED_NAMESPACE_LEXICAL_VISIBLE_RECORD,
+ DelayedNamespaceRecord);
+
+ const TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
+ // Create a lexical update block containing all of the declarations in the
+ // translation unit that do not come from other AST files.
+ SmallVector<DeclID, 128> NewGlobalKindDeclPairs;
+ for (const auto *D : TU->noload_decls()) {
+ if (D->isFromASTFile())
+ continue;
+
+ // In reduced BMI, skip unreached declarations.
+ if (!wasDeclEmitted(D))
+ continue;
+
+ NewGlobalKindDeclPairs.push_back(D->getKind());
+ NewGlobalKindDeclPairs.push_back(GetDeclRef(D).getRawValue());
+ }
+
+ auto Abv = std::make_shared<llvm::BitCodeAbbrev>();
+ Abv->Add(llvm::BitCodeAbbrevOp(TU_UPDATE_LEXICAL));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
+ unsigned TuUpdateLexicalAbbrev = Stream.EmitAbbrev(std::move(Abv));
+
+ RecordData::value_type Record[] = {TU_UPDATE_LEXICAL};
+ Stream.EmitRecordWithBlob(TuUpdateLexicalAbbrev, Record,
+ bytes(NewGlobalKindDeclPairs));
+
+ Abv = std::make_shared<llvm::BitCodeAbbrev>();
+ Abv->Add(llvm::BitCodeAbbrevOp(UPDATE_VISIBLE));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
+ UpdateVisibleAbbrev = Stream.EmitAbbrev(std::move(Abv));
+
+ // And a visible updates block for the translation unit.
+ WriteDeclContextVisibleUpdate(TU);
+
+ // If we have any extern "C" names, write out a visible update for them.
+ if (Context.ExternCContext)
+ WriteDeclContextVisibleUpdate(Context.ExternCContext);
+
+ // Write the visible updates to DeclContexts.
+ for (auto *DC : UpdatedDeclContexts)
+ WriteDeclContextVisibleUpdate(DC);
+}
+
void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
if (DeclUpdates.empty())
return;
@@ -5286,7 +5773,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION:
case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE:
assert(Update.getDecl() && "no decl to add?");
- Record.push_back(GetDeclRef(Update.getDecl()));
+ Record.AddDeclRef(Update.getDecl());
break;
case UPD_CXX_ADDED_FUNCTION_DEFINITION:
@@ -5299,8 +5786,8 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
break;
case UPD_CXX_INSTANTIATED_DEFAULT_ARGUMENT:
- Record.AddStmt(const_cast<Expr *>(
- cast<ParmVarDecl>(Update.getDecl())->getDefaultArg()));
+ Record.writeStmtRef(
+ cast<ParmVarDecl>(Update.getDecl())->getDefaultArg());
break;
case UPD_CXX_INSTANTIATED_DEFAULT_MEMBER_INITIALIZER:
@@ -5314,8 +5801,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
Record.push_back(RD->isParamDestroyedInCallee());
Record.push_back(llvm::to_underlying(RD->getArgPassingRestrictions()));
Record.AddCXXDefinitionData(RD);
- Record.AddOffset(WriteDeclContextLexicalBlock(
- *Context, const_cast<CXXRecordDecl *>(RD)));
+ Record.AddOffset(WriteDeclContextLexicalBlock(*Context, RD));
// This state is sometimes updated by template instantiation, when we
// switch from the specialization referring to the template declaration
@@ -5411,21 +5897,23 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
// Add a trailing update record, if any. These must go last because we
// lazily load their attached statement.
- if (HasUpdatedBody) {
- const auto *Def = cast<FunctionDecl>(D);
- Record.push_back(UPD_CXX_ADDED_FUNCTION_DEFINITION);
- Record.push_back(Def->isInlined());
- Record.AddSourceLocation(Def->getInnerLocStart());
- Record.AddFunctionDefinition(Def);
- } else if (HasAddedVarDefinition) {
- const auto *VD = cast<VarDecl>(D);
- Record.push_back(UPD_CXX_ADDED_VAR_DEFINITION);
- Record.push_back(VD->isInline());
- Record.push_back(VD->isInlineSpecified());
- Record.AddVarDeclInit(VD);
+ if (!GeneratingReducedBMI || !CanElideDeclDef(D)) {
+ if (HasUpdatedBody) {
+ const auto *Def = cast<FunctionDecl>(D);
+ Record.push_back(UPD_CXX_ADDED_FUNCTION_DEFINITION);
+ Record.push_back(Def->isInlined());
+ Record.AddSourceLocation(Def->getInnerLocStart());
+ Record.AddFunctionDefinition(Def);
+ } else if (HasAddedVarDefinition) {
+ const auto *VD = cast<VarDecl>(D);
+ Record.push_back(UPD_CXX_ADDED_VAR_DEFINITION);
+ Record.push_back(VD->isInline());
+ Record.push_back(VD->isInlineSpecified());
+ Record.AddVarDeclInit(VD);
+ }
}
- OffsetsRecord.push_back(GetDeclRef(D));
+ AddDeclRef(D, OffsetsRecord);
OffsetsRecord.push_back(Record.Emit(DECL_UPDATES));
}
}
@@ -5502,10 +5990,34 @@ void ASTWriter::AddFileID(FileID FID, RecordDataImpl &Record) {
Record.push_back(getAdjustedFileID(FID).getOpaqueValue());
}
+SourceLocationEncoding::RawLocEncoding
+ASTWriter::getRawSourceLocationEncoding(SourceLocation Loc, LocSeq *Seq) {
+ unsigned BaseOffset = 0;
+ unsigned ModuleFileIndex = 0;
+
+ // See SourceLocationEncoding.h for the encoding details.
+ if (Context->getSourceManager().isLoadedSourceLocation(Loc) &&
+ Loc.isValid()) {
+ assert(getChain());
+ auto SLocMapI = getChain()->GlobalSLocOffsetMap.find(
+ SourceManager::MaxLoadedOffset - Loc.getOffset() - 1);
+ assert(SLocMapI != getChain()->GlobalSLocOffsetMap.end() &&
+ "Corrupted global sloc offset map");
+ ModuleFile *F = SLocMapI->second;
+ BaseOffset = F->SLocEntryBaseOffset - 2;
+ // 0 means the location is not loaded. So we need to add 1 to the index to
+ // make it clear.
+ ModuleFileIndex = F->Index + 1;
+ assert(&getChain()->getModuleManager()[F->Index] == F);
+ }
+
+ return SourceLocationEncoding::encode(Loc, BaseOffset, ModuleFileIndex, Seq);
+}
+
void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record,
SourceLocationSequence *Seq) {
Loc = getAdjustedLocation(Loc);
- Record.push_back(SourceLocationEncoding::encode(Loc, Seq));
+ Record.push_back(getRawSourceLocationEncoding(Loc, Seq));
}
void ASTWriter::AddSourceRange(SourceRange Range, RecordDataImpl &Record,
@@ -5522,11 +6034,11 @@ void ASTWriter::AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Recor
Record.push_back(getIdentifierRef(II));
}
-IdentID ASTWriter::getIdentifierRef(const IdentifierInfo *II) {
+IdentifierID ASTWriter::getIdentifierRef(const IdentifierInfo *II) {
if (!II)
return 0;
- IdentID &ID = IdentifierIDs[II];
+ IdentifierID &ID = IdentifierIDs[II];
if (ID == 0)
ID = NextIdentID++;
return ID;
@@ -5650,6 +6162,31 @@ void ASTWriter::AddTypeRef(QualType T, RecordDataImpl &Record) {
Record.push_back(GetOrCreateTypeID(T));
}
+template <typename IdxForTypeTy>
+static TypeID MakeTypeID(ASTContext &Context, QualType T,
+ IdxForTypeTy IdxForType) {
+ if (T.isNull())
+ return PREDEF_TYPE_NULL_ID;
+
+ unsigned FastQuals = T.getLocalFastQualifiers();
+ T.removeLocalFastQualifiers();
+
+ if (T.hasLocalNonFastQualifiers())
+ return IdxForType(T).asTypeID(FastQuals);
+
+ assert(!T.hasLocalQualifiers());
+
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T.getTypePtr()))
+ return TypeIdxFromBuiltin(BT).asTypeID(FastQuals);
+
+ if (T == Context.AutoDeductTy)
+ return TypeIdx(0, PREDEF_TYPE_AUTO_DEDUCT).asTypeID(FastQuals);
+ if (T == Context.AutoRRefDeductTy)
+ return TypeIdx(0, PREDEF_TYPE_AUTO_RREF_DEDUCT).asTypeID(FastQuals);
+
+ return IdxForType(T).asTypeID(FastQuals);
+}
+
TypeID ASTWriter::GetOrCreateTypeID(QualType T) {
assert(Context);
return MakeTypeID(*Context, T, [&](QualType T) -> TypeIdx {
@@ -5658,7 +6195,7 @@ TypeID ASTWriter::GetOrCreateTypeID(QualType T) {
assert(!T.getLocalFastQualifiers());
TypeIdx &Idx = TypeIdxs[T];
- if (Idx.getIndex() == 0) {
+ if (Idx.getValue() == 0) {
if (DoneWritingDeclsAndTypes) {
assert(0 && "New type seen after serializing all the types to emit!");
return TypeIdx();
@@ -5666,48 +6203,54 @@ TypeID ASTWriter::GetOrCreateTypeID(QualType T) {
// We haven't seen this type before. Assign it a new ID and put it
// into the queue of types to emit.
- Idx = TypeIdx(NextTypeID++);
+ Idx = TypeIdx(0, NextTypeID++);
DeclTypesToEmit.push(T);
}
return Idx;
});
}
-TypeID ASTWriter::getTypeID(QualType T) const {
- assert(Context);
- return MakeTypeID(*Context, T, [&](QualType T) -> TypeIdx {
- if (T.isNull())
- return TypeIdx();
- assert(!T.getLocalFastQualifiers());
+void ASTWriter::AddEmittedDeclRef(const Decl *D, RecordDataImpl &Record) {
+ if (!wasDeclEmitted(D))
+ return;
- TypeIdxMap::const_iterator I = TypeIdxs.find(T);
- assert(I != TypeIdxs.end() && "Type not emitted!");
- return I->second;
- });
+ AddDeclRef(D, Record);
}
void ASTWriter::AddDeclRef(const Decl *D, RecordDataImpl &Record) {
- Record.push_back(GetDeclRef(D));
+ Record.push_back(GetDeclRef(D).getRawValue());
}
-DeclID ASTWriter::GetDeclRef(const Decl *D) {
+LocalDeclID ASTWriter::GetDeclRef(const Decl *D) {
assert(WritingAST && "Cannot request a declaration ID before AST writing");
if (!D) {
- return 0;
+ return LocalDeclID();
+ }
+
+ // If the DeclUpdate from the GMF gets touched, emit it.
+ if (auto *Iter = DeclUpdatesFromGMF.find(D);
+ Iter != DeclUpdatesFromGMF.end()) {
+ for (DeclUpdate &Update : Iter->second)
+ DeclUpdates[D].push_back(Update);
+ DeclUpdatesFromGMF.erase(Iter);
}
// If D comes from an AST file, its declaration ID is already known and
// fixed.
- if (D->isFromASTFile())
- return D->getGlobalID();
+ if (D->isFromASTFile()) {
+ if (isWritingStdCXXNamedModules() && D->getOwningModule())
+ TouchedTopLevelModules.insert(D->getOwningModule()->getTopLevelModule());
+
+ return LocalDeclID(D->getGlobalID());
+ }
assert(!(reinterpret_cast<uintptr_t>(D) & 0x01) && "Invalid decl pointer");
- DeclID &ID = DeclIDs[D];
- if (ID == 0) {
+ LocalDeclID &ID = DeclIDs[D];
+ if (ID.isInvalid()) {
if (DoneWritingDeclsAndTypes) {
assert(0 && "New decl seen after serializing all the decls to emit!");
- return 0;
+ return LocalDeclID();
}
// We haven't seen this declaration before. Give it a new ID and
@@ -5719,21 +6262,37 @@ DeclID ASTWriter::GetDeclRef(const Decl *D) {
return ID;
}
-DeclID ASTWriter::getDeclID(const Decl *D) {
+LocalDeclID ASTWriter::getDeclID(const Decl *D) {
if (!D)
- return 0;
+ return LocalDeclID();
// If D comes from an AST file, its declaration ID is already known and
// fixed.
if (D->isFromASTFile())
- return D->getGlobalID();
+ return LocalDeclID(D->getGlobalID());
assert(DeclIDs.contains(D) && "Declaration not emitted!");
return DeclIDs[D];
}
-void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
- assert(ID);
+bool ASTWriter::wasDeclEmitted(const Decl *D) const {
+ assert(D);
+
+ assert(DoneWritingDeclsAndTypes &&
+ "wasDeclEmitted should only be called after writing declarations");
+
+ if (D->isFromASTFile())
+ return true;
+
+ bool Emitted = DeclIDs.contains(D);
+ assert((Emitted || (!D->getOwningModule() && isWritingStdCXXNamedModules()) ||
+ GeneratingReducedBMI) &&
+ "The declaration within modules can only be omitted in reduced BMI.");
+ return Emitted;
+}
+
+void ASTWriter::associateDeclWithFile(const Decl *D, LocalDeclID ID) {
+ assert(ID.isValid());
assert(D);
SourceLocation Loc = D->getLocation();
@@ -5765,7 +6324,7 @@ void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
if (!Info)
Info = std::make_unique<DeclIDInFileInfo>();
- std::pair<unsigned, serialization::DeclID> LocDecl(Offset, ID);
+ std::pair<unsigned, LocalDeclID> LocDecl(Offset, ID);
LocDeclIDsTy &Decls = Info->DeclIDs;
Decls.push_back(LocDecl);
}
@@ -5898,7 +6457,7 @@ void ASTRecordWriter::AddTemplateParameterList(
AddDeclRef(P);
if (const Expr *RequiresClause = TemplateParams->getRequiresClause()) {
Record->push_back(true);
- AddStmt(const_cast<Expr*>(RequiresClause));
+ writeStmtRef(RequiresClause);
} else {
Record->push_back(false);
}
@@ -6010,9 +6569,6 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
BitsPacker DefinitionBits;
- bool ShouldSkipCheckingODR = D->shouldSkipCheckingODR();
- DefinitionBits.addBit(ShouldSkipCheckingODR);
-
#define FIELD(Name, Width, Merge) \
if (!DefinitionBits.canWriteNextNBits(Width)) { \
Record->push_back(DefinitionBits); \
@@ -6025,17 +6581,17 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
Record->push_back(DefinitionBits);
- // We only perform ODR checks for decls not in GMF.
- if (!ShouldSkipCheckingODR)
- // getODRHash will compute the ODRHash if it has not been previously
- // computed.
- Record->push_back(D->getODRHash());
+ // getODRHash will compute the ODRHash if it has not been previously
+ // computed.
+ Record->push_back(D->getODRHash());
- bool ModulesDebugInfo =
- Writer->Context->getLangOpts().ModulesDebugInfo && !D->isDependentType();
- Record->push_back(ModulesDebugInfo);
- if (ModulesDebugInfo)
- Writer->ModularCodegenDecls.push_back(Writer->GetDeclRef(D));
+ bool ModulesCodegen =
+ !D->isDependentType() &&
+ (Writer->Context->getLangOpts().ModulesDebugInfo ||
+ D->isInNamedModule());
+ Record->push_back(ModulesCodegen);
+ if (ModulesCodegen)
+ Writer->AddDeclRef(D, Writer->ModularCodegenDecls);
// IsLambda bit is already saved.
@@ -6139,23 +6695,30 @@ void ASTWriter::ReaderInitialized(ASTReader *Reader) {
// Note, this will get called multiple times, once one the reader starts up
// and again each time it's done reading a PCH or module.
- FirstDeclID = NUM_PREDEF_DECL_IDS + Chain->getTotalNumDecls();
- FirstTypeID = NUM_PREDEF_TYPE_IDS + Chain->getTotalNumTypes();
- FirstIdentID = NUM_PREDEF_IDENT_IDS + Chain->getTotalNumIdentifiers();
FirstMacroID = NUM_PREDEF_MACRO_IDS + Chain->getTotalNumMacros();
FirstSubmoduleID = NUM_PREDEF_SUBMODULE_IDS + Chain->getTotalNumSubmodules();
FirstSelectorID = NUM_PREDEF_SELECTOR_IDS + Chain->getTotalNumSelectors();
- NextDeclID = FirstDeclID;
- NextTypeID = FirstTypeID;
- NextIdentID = FirstIdentID;
NextMacroID = FirstMacroID;
NextSelectorID = FirstSelectorID;
NextSubmoduleID = FirstSubmoduleID;
}
-void ASTWriter::IdentifierRead(IdentID ID, IdentifierInfo *II) {
- // Always keep the highest ID. See \p TypeRead() for more information.
- IdentID &StoredID = IdentifierIDs[II];
+void ASTWriter::IdentifierRead(IdentifierID ID, IdentifierInfo *II) {
+ // Don't reuse Type ID from external modules for named modules. See the
+ // comments in WriteASTCore for details.
+ if (isWritingStdCXXNamedModules())
+ return;
+
+ IdentifierID &StoredID = IdentifierIDs[II];
+ unsigned OriginalModuleFileIndex = StoredID >> 32;
+
+ // Always keep the local identifier ID. See \p TypeRead() for more
+ // information.
+ if (OriginalModuleFileIndex == 0 && StoredID)
+ return;
+
+ // Otherwise, keep the highest ID since the module file comes later has
+ // higher module file indexes.
if (ID > StoredID)
StoredID = ID;
}
@@ -6168,16 +6731,38 @@ void ASTWriter::MacroRead(serialization::MacroID ID, MacroInfo *MI) {
}
void ASTWriter::TypeRead(TypeIdx Idx, QualType T) {
- // Always take the highest-numbered type index. This copes with an interesting
+ // Don't reuse Type ID from external modules for named modules. See the
+ // comments in WriteASTCore for details.
+ if (isWritingStdCXXNamedModules())
+ return;
+
+ // Always take the type index that comes in later module files.
+ // This copes with an interesting
// case for chained AST writing where we schedule writing the type and then,
// later, deserialize the type from another AST. In this case, we want to
- // keep the higher-numbered entry so that we can properly write it out to
+ // keep the entry from a later module so that we can properly write it out to
// the AST file.
TypeIdx &StoredIdx = TypeIdxs[T];
- if (Idx.getIndex() >= StoredIdx.getIndex())
+
+ // Ignore it if the type comes from the current being written module file.
+ // Since the current module file being written logically has the highest
+ // index.
+ unsigned ModuleFileIndex = StoredIdx.getModuleFileIndex();
+ if (ModuleFileIndex == 0 && StoredIdx.getValue())
+ return;
+
+ // Otherwise, keep the highest ID since the module file comes later has
+ // higher module file indexes.
+ if (Idx.getModuleFileIndex() >= StoredIdx.getModuleFileIndex())
StoredIdx = Idx;
}
+void ASTWriter::PredefinedDeclBuilt(PredefinedDeclIDs ID, const Decl *D) {
+ assert(D->isCanonicalDecl() && "predefined decl is not canonical");
+ DeclIDs[D] = LocalDeclID(ID);
+ PredefinedDecls.insert(D);
+}
+
void ASTWriter::SelectorRead(SelectorID ID, Selector S) {
// Always keep the highest ID. See \p TypeRead() for more information.
SelectorID &StoredID = SelectorIDs[S];
@@ -6659,6 +7244,8 @@ void OMPClauseWriter::VisitOMPReleaseClause(OMPReleaseClause *) {}
void OMPClauseWriter::VisitOMPRelaxedClause(OMPRelaxedClause *) {}
+void OMPClauseWriter::VisitOMPWeakClause(OMPWeakClause *) {}
+
void OMPClauseWriter::VisitOMPThreadsClause(OMPThreadsClause *) {}
void OMPClauseWriter::VisitOMPSIMDClause(OMPSIMDClause *) {}
@@ -7325,3 +7912,208 @@ void ASTRecordWriter::writeOMPChildren(OMPChildren *Data) {
for (unsigned I = 0, E = Data->getNumChildren(); I < E; ++I)
AddStmt(Data->getChildren()[I]);
}
+
+void ASTRecordWriter::writeOpenACCVarList(const OpenACCClauseWithVarList *C) {
+ writeUInt32(C->getVarList().size());
+ for (Expr *E : C->getVarList())
+ AddStmt(E);
+}
+
+void ASTRecordWriter::writeOpenACCIntExprList(ArrayRef<Expr *> Exprs) {
+ writeUInt32(Exprs.size());
+ for (Expr *E : Exprs)
+ AddStmt(E);
+}
+
+void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) {
+ writeEnum(C->getClauseKind());
+ writeSourceLocation(C->getBeginLoc());
+ writeSourceLocation(C->getEndLoc());
+
+ switch (C->getClauseKind()) {
+ case OpenACCClauseKind::Default: {
+ const auto *DC = cast<OpenACCDefaultClause>(C);
+ writeSourceLocation(DC->getLParenLoc());
+ writeEnum(DC->getDefaultClauseKind());
+ return;
+ }
+ case OpenACCClauseKind::If: {
+ const auto *IC = cast<OpenACCIfClause>(C);
+ writeSourceLocation(IC->getLParenLoc());
+ AddStmt(const_cast<Expr*>(IC->getConditionExpr()));
+ return;
+ }
+ case OpenACCClauseKind::Self: {
+ const auto *SC = cast<OpenACCSelfClause>(C);
+ writeSourceLocation(SC->getLParenLoc());
+ writeBool(SC->hasConditionExpr());
+ if (SC->hasConditionExpr())
+ AddStmt(const_cast<Expr*>(SC->getConditionExpr()));
+ return;
+ }
+ case OpenACCClauseKind::NumGangs: {
+ const auto *NGC = cast<OpenACCNumGangsClause>(C);
+ writeSourceLocation(NGC->getLParenLoc());
+ writeUInt32(NGC->getIntExprs().size());
+ for (Expr *E : NGC->getIntExprs())
+ AddStmt(E);
+ return;
+ }
+ case OpenACCClauseKind::NumWorkers: {
+ const auto *NWC = cast<OpenACCNumWorkersClause>(C);
+ writeSourceLocation(NWC->getLParenLoc());
+ AddStmt(const_cast<Expr*>(NWC->getIntExpr()));
+ return;
+ }
+ case OpenACCClauseKind::VectorLength: {
+ const auto *NWC = cast<OpenACCVectorLengthClause>(C);
+ writeSourceLocation(NWC->getLParenLoc());
+ AddStmt(const_cast<Expr*>(NWC->getIntExpr()));
+ return;
+ }
+ case OpenACCClauseKind::Private: {
+ const auto *PC = cast<OpenACCPrivateClause>(C);
+ writeSourceLocation(PC->getLParenLoc());
+ writeOpenACCVarList(PC);
+ return;
+ }
+ case OpenACCClauseKind::FirstPrivate: {
+ const auto *FPC = cast<OpenACCFirstPrivateClause>(C);
+ writeSourceLocation(FPC->getLParenLoc());
+ writeOpenACCVarList(FPC);
+ return;
+ }
+ case OpenACCClauseKind::Attach: {
+ const auto *AC = cast<OpenACCAttachClause>(C);
+ writeSourceLocation(AC->getLParenLoc());
+ writeOpenACCVarList(AC);
+ return;
+ }
+ case OpenACCClauseKind::DevicePtr: {
+ const auto *DPC = cast<OpenACCDevicePtrClause>(C);
+ writeSourceLocation(DPC->getLParenLoc());
+ writeOpenACCVarList(DPC);
+ return;
+ }
+ case OpenACCClauseKind::NoCreate: {
+ const auto *NCC = cast<OpenACCNoCreateClause>(C);
+ writeSourceLocation(NCC->getLParenLoc());
+ writeOpenACCVarList(NCC);
+ return;
+ }
+ case OpenACCClauseKind::Present: {
+ const auto *PC = cast<OpenACCPresentClause>(C);
+ writeSourceLocation(PC->getLParenLoc());
+ writeOpenACCVarList(PC);
+ return;
+ }
+ case OpenACCClauseKind::Copy:
+ case OpenACCClauseKind::PCopy:
+ case OpenACCClauseKind::PresentOrCopy: {
+ const auto *CC = cast<OpenACCCopyClause>(C);
+ writeSourceLocation(CC->getLParenLoc());
+ writeOpenACCVarList(CC);
+ return;
+ }
+ case OpenACCClauseKind::CopyIn:
+ case OpenACCClauseKind::PCopyIn:
+ case OpenACCClauseKind::PresentOrCopyIn: {
+ const auto *CIC = cast<OpenACCCopyInClause>(C);
+ writeSourceLocation(CIC->getLParenLoc());
+ writeBool(CIC->isReadOnly());
+ writeOpenACCVarList(CIC);
+ return;
+ }
+ case OpenACCClauseKind::CopyOut:
+ case OpenACCClauseKind::PCopyOut:
+ case OpenACCClauseKind::PresentOrCopyOut: {
+ const auto *COC = cast<OpenACCCopyOutClause>(C);
+ writeSourceLocation(COC->getLParenLoc());
+ writeBool(COC->isZero());
+ writeOpenACCVarList(COC);
+ return;
+ }
+ case OpenACCClauseKind::Create:
+ case OpenACCClauseKind::PCreate:
+ case OpenACCClauseKind::PresentOrCreate: {
+ const auto *CC = cast<OpenACCCreateClause>(C);
+ writeSourceLocation(CC->getLParenLoc());
+ writeBool(CC->isZero());
+ writeOpenACCVarList(CC);
+ return;
+ }
+ case OpenACCClauseKind::Async: {
+ const auto *AC = cast<OpenACCAsyncClause>(C);
+ writeSourceLocation(AC->getLParenLoc());
+ writeBool(AC->hasIntExpr());
+ if (AC->hasIntExpr())
+ AddStmt(const_cast<Expr*>(AC->getIntExpr()));
+ return;
+ }
+ case OpenACCClauseKind::Wait: {
+ const auto *WC = cast<OpenACCWaitClause>(C);
+ writeSourceLocation(WC->getLParenLoc());
+ writeBool(WC->getDevNumExpr());
+ if (Expr *DNE = WC->getDevNumExpr())
+ AddStmt(DNE);
+ writeSourceLocation(WC->getQueuesLoc());
+
+ writeOpenACCIntExprList(WC->getQueueIdExprs());
+ return;
+ }
+ case OpenACCClauseKind::DeviceType:
+ case OpenACCClauseKind::DType: {
+ const auto *DTC = cast<OpenACCDeviceTypeClause>(C);
+ writeSourceLocation(DTC->getLParenLoc());
+ writeUInt32(DTC->getArchitectures().size());
+ for (const DeviceTypeArgument &Arg : DTC->getArchitectures()) {
+ writeBool(Arg.first);
+ if (Arg.first)
+ AddIdentifierRef(Arg.first);
+ writeSourceLocation(Arg.second);
+ }
+ return;
+ }
+ case OpenACCClauseKind::Reduction: {
+ const auto *RC = cast<OpenACCReductionClause>(C);
+ writeSourceLocation(RC->getLParenLoc());
+ writeEnum(RC->getReductionOp());
+ writeOpenACCVarList(RC);
+ return;
+ }
+ case OpenACCClauseKind::Seq:
+ case OpenACCClauseKind::Independent:
+ case OpenACCClauseKind::Auto:
+ // Nothing to do here, there is no additional information beyond the
+ // begin/end loc and clause kind.
+ return;
+
+ case OpenACCClauseKind::Finalize:
+ case OpenACCClauseKind::IfPresent:
+ case OpenACCClauseKind::Worker:
+ case OpenACCClauseKind::Vector:
+ case OpenACCClauseKind::NoHost:
+ case OpenACCClauseKind::UseDevice:
+ case OpenACCClauseKind::Delete:
+ case OpenACCClauseKind::Detach:
+ case OpenACCClauseKind::Device:
+ case OpenACCClauseKind::DeviceResident:
+ case OpenACCClauseKind::Host:
+ case OpenACCClauseKind::Link:
+ case OpenACCClauseKind::Collapse:
+ case OpenACCClauseKind::Bind:
+ case OpenACCClauseKind::DeviceNum:
+ case OpenACCClauseKind::DefaultAsync:
+ case OpenACCClauseKind::Tile:
+ case OpenACCClauseKind::Gang:
+ case OpenACCClauseKind::Invalid:
+ llvm_unreachable("Clause serialization not yet implemented");
+ }
+ llvm_unreachable("Invalid Clause Kind");
+}
+
+void ASTRecordWriter::writeOpenACCClauseList(
+ ArrayRef<const OpenACCClause *> Clauses) {
+ for (const OpenACCClause *Clause : Clauses)
+ writeOpenACCClause(Clause);
+}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
index 42583c09f009..ff1334340874 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -40,11 +40,14 @@ namespace clang {
serialization::DeclCode Code;
unsigned AbbrevToUse;
+ bool GeneratingReducedBMI = false;
+
public:
ASTDeclWriter(ASTWriter &Writer, ASTContext &Context,
- ASTWriter::RecordDataImpl &Record)
+ ASTWriter::RecordDataImpl &Record, bool GeneratingReducedBMI)
: Writer(Writer), Context(Context), Record(Writer, Record),
- Code((serialization::DeclCode)0), AbbrevToUse(0) {}
+ Code((serialization::DeclCode)0), AbbrevToUse(0),
+ GeneratingReducedBMI(GeneratingReducedBMI) {}
uint64_t Emit(Decl *D) {
if (!Code)
@@ -220,9 +223,9 @@ namespace clang {
assert(!Common->LazySpecializations);
}
- ArrayRef<DeclID> LazySpecializations;
+ ArrayRef<GlobalDeclID> LazySpecializations;
if (auto *LS = Common->LazySpecializations)
- LazySpecializations = llvm::ArrayRef(LS + 1, LS[0]);
+ LazySpecializations = llvm::ArrayRef(LS + 1, LS[0].getRawValue());
// Add a slot to the record for the number of specializations.
unsigned I = Record.size();
@@ -240,7 +243,9 @@ namespace clang {
assert(D->isCanonicalDecl() && "non-canonical decl in set");
AddFirstDeclFromEachModule(D, /*IncludeLocal*/true);
}
- Record.append(LazySpecializations.begin(), LazySpecializations.end());
+ Record.append(
+ DeclIDIterator<GlobalDeclID, DeclID>(LazySpecializations.begin()),
+ DeclIDIterator<GlobalDeclID, DeclID>(LazySpecializations.end()));
// Update the size entry we added earlier.
Record[I] = Record.size() - I - 1;
@@ -270,6 +275,34 @@ namespace clang {
};
}
+bool clang::CanElideDeclDef(const Decl *D) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isInlined() || FD->isConstexpr())
+ return false;
+
+ if (FD->isDependentContext())
+ return false;
+
+ if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return false;
+ }
+
+ if (auto *VD = dyn_cast<VarDecl>(D)) {
+ if (!VD->getDeclContext()->getRedeclContext()->isFileContext() ||
+ VD->isInline() || VD->isConstexpr() || isa<ParmVarDecl>(VD) ||
+ // Constant initialized variable may not affect the ABI, but they
+ // may be used in constant evaluation in the frontend, so we have
+ // to remain them.
+ VD->hasConstantInitialization())
+ return false;
+
+ if (VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return false;
+ }
+
+ return true;
+}
+
void ASTDeclWriter::Visit(Decl *D) {
DeclVisitor<ASTDeclWriter>::Visit(D);
@@ -285,9 +318,12 @@ void ASTDeclWriter::Visit(Decl *D) {
// have been written. We want it last because we will not read it back when
// retrieving it from the AST, we'll just lazily set the offset.
if (auto *FD = dyn_cast<FunctionDecl>(D)) {
- Record.push_back(FD->doesThisDeclarationHaveABody());
- if (FD->doesThisDeclarationHaveABody())
- Record.AddFunctionDefinition(FD);
+ if (!GeneratingReducedBMI || !CanElideDeclDef(FD)) {
+ Record.push_back(FD->doesThisDeclarationHaveABody());
+ if (FD->doesThisDeclarationHaveABody())
+ Record.AddFunctionDefinition(FD);
+ } else
+ Record.push_back(0);
}
// Similar to FunctionDecls, handle VarDecl's initializer here and write it
@@ -295,7 +331,10 @@ void ASTDeclWriter::Visit(Decl *D) {
// we have finished recursive deserialization, because it can recursively
// refer back to the variable.
if (auto *VD = dyn_cast<VarDecl>(D)) {
- Record.AddVarDeclInit(VD);
+ if (!GeneratingReducedBMI || !CanElideDeclDef(VD))
+ Record.AddVarDeclInit(VD);
+ else
+ Record.push_back(0);
}
// And similarly for FieldDecls. We already serialized whether there is a
@@ -488,16 +527,12 @@ void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
BitsPacker EnumDeclBits;
EnumDeclBits.addBits(D->getNumPositiveBits(), /*BitWidth=*/8);
EnumDeclBits.addBits(D->getNumNegativeBits(), /*BitWidth=*/8);
- bool ShouldSkipCheckingODR = D->shouldSkipCheckingODR();
- EnumDeclBits.addBit(ShouldSkipCheckingODR);
EnumDeclBits.addBit(D->isScoped());
EnumDeclBits.addBit(D->isScopedUsingClassTag());
EnumDeclBits.addBit(D->isFixed());
Record.push_back(EnumDeclBits);
- // We only perform ODR checks for decls not in GMF.
- if (!ShouldSkipCheckingODR)
- Record.push_back(D->getODRHash());
+ Record.push_back(D->getODRHash());
if (MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo()) {
Record.AddDeclRef(MemberInfo->getInstantiatedFrom());
@@ -514,7 +549,7 @@ void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
!D->isTopLevelDeclInObjCContainer() &&
!CXXRecordDecl::classofKind(D->getKind()) &&
!D->getIntegerTypeSourceInfo() && !D->getMemberSpecializationInfo() &&
- !needsAnonymousDeclarationNumber(D) && !D->shouldSkipCheckingODR() &&
+ !needsAnonymousDeclarationNumber(D) &&
D->getDeclName().getNameKind() == DeclarationName::Identifier)
AbbrevToUse = Writer.getDeclEnumAbbrev();
@@ -680,8 +715,6 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
// FIXME: stable encoding
FunctionDeclBits.addBits(llvm::to_underlying(D->getLinkageInternal()), 3);
FunctionDeclBits.addBits((uint32_t)D->getStorageClass(), /*BitWidth=*/3);
- bool ShouldSkipCheckingODR = D->shouldSkipCheckingODR();
- FunctionDeclBits.addBit(ShouldSkipCheckingODR);
FunctionDeclBits.addBit(D->isInlineSpecified());
FunctionDeclBits.addBit(D->isInlined());
FunctionDeclBits.addBit(D->hasSkippedBody());
@@ -707,12 +740,17 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
if (D->isExplicitlyDefaulted())
Record.AddSourceLocation(D->getDefaultLoc());
- // We only perform ODR checks for decls not in GMF.
- if (!ShouldSkipCheckingODR)
- Record.push_back(D->getODRHash());
+ Record.push_back(D->getODRHash());
+
+ if (D->isDefaulted() || D->isDeletedAsWritten()) {
+ if (auto *FDI = D->getDefalutedOrDeletedInfo()) {
+ // Store both that there is an DefaultedOrDeletedInfo and whether it
+ // contains a DeletedMessage.
+ StringLiteral *DeletedMessage = FDI->getDeletedMessage();
+ Record.push_back(1 | (DeletedMessage ? 2 : 0));
+ if (DeletedMessage)
+ Record.AddStmt(DeletedMessage);
- if (D->isDefaulted()) {
- if (auto *FDI = D->getDefaultedFunctionInfo()) {
Record.push_back(FDI->getUnqualifiedLookups().size());
for (DeclAccessPair P : FDI->getUnqualifiedLookups()) {
Record.AddDeclRef(P.getDecl());
@@ -1122,7 +1160,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(VarDeclBits);
if (ModulesCodegen)
- Writer.ModularCodegenDecls.push_back(Writer.GetDeclRef(D));
+ Writer.AddDeclRef(D, Writer.ModularCodegenDecls);
if (D->hasAttr<BlocksAttr>()) {
BlockVarCopyInit Init = Writer.Context->getBlockVarCopyInit(D);
@@ -1337,7 +1375,7 @@ void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
Record.AddSourceLocation(D->getBeginLoc());
Record.AddSourceLocation(D->getRBraceLoc());
- if (D->isOriginalNamespace())
+ if (D->isFirstDecl())
Record.AddDeclRef(D->getAnonymousNamespace());
Code = serialization::DECL_NAMESPACE;
@@ -1491,8 +1529,14 @@ void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
if (D->isThisDeclarationADefinition())
Record.AddCXXDefinitionData(D);
+ if (D->isCompleteDefinition() && D->isInNamedModule())
+ Writer.AddDeclRef(D, Writer.ModularCodegenDecls);
+
// Store (what we currently believe to be) the key function to avoid
// deserializing every method so we can compute it.
+ //
+ // FIXME: Avoid adding the key function if the class is defined in
+ // module purview since in that case the key function is meaningless.
if (D->isCompleteDefinition())
Record.AddDeclRef(Context.getCurrentKeyFunction(D));
@@ -1514,8 +1558,7 @@ void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
D->getFirstDecl() == D->getMostRecentDecl() && !D->isInvalidDecl() &&
!D->hasAttrs() && !D->isTopLevelDeclInObjCContainer() &&
D->getDeclName().getNameKind() == DeclarationName::Identifier &&
- !D->shouldSkipCheckingODR() && !D->hasExtInfo() &&
- !D->isExplicitlyDefaulted()) {
+ !D->hasExtInfo() && !D->isExplicitlyDefaulted()) {
if (D->getTemplatedKind() == FunctionDecl::TK_NonTemplate ||
D->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate ||
D->getTemplatedKind() == FunctionDecl::TK_MemberSpecialization ||
@@ -1681,6 +1724,15 @@ void ASTDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (D->isFirstDecl())
AddTemplateSpecializations(D);
+
+ // Force emitting the corresponding deduction guide in reduced BMI mode.
+ // Otherwise, the deduction guide may be optimized out incorrectly.
+ if (Writer.isGeneratingReducedBMI()) {
+ auto Name = Context.DeclarationNames.getCXXDeductionGuideName(D);
+ for (auto *DG : D->getDeclContext()->noload_lookup(Name))
+ Writer.GetDeclRef(DG->getCanonicalDecl());
+ }
+
Code = serialization::DECL_CLASS_TEMPLATE;
}
@@ -1710,20 +1762,28 @@ void ASTDeclWriter::VisitClassTemplateSpecializationDecl(
Record.AddDeclRef(D->getSpecializedTemplate()->getCanonicalDecl());
}
- // Explicit info.
- Record.AddTypeSourceInfo(D->getTypeAsWritten());
- if (D->getTypeAsWritten()) {
- Record.AddSourceLocation(D->getExternLoc());
+ bool ExplicitInstantiation =
+ D->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDeclaration ||
+ D->getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition;
+ Record.push_back(ExplicitInstantiation);
+ if (ExplicitInstantiation) {
+ Record.AddSourceLocation(D->getExternKeywordLoc());
Record.AddSourceLocation(D->getTemplateKeywordLoc());
}
+ const ASTTemplateArgumentListInfo *ArgsWritten =
+ D->getTemplateArgsAsWritten();
+ Record.push_back(!!ArgsWritten);
+ if (ArgsWritten)
+ Record.AddASTTemplateArgumentListInfo(ArgsWritten);
+
Code = serialization::DECL_CLASS_TEMPLATE_SPECIALIZATION;
}
void ASTDeclWriter::VisitClassTemplatePartialSpecializationDecl(
ClassTemplatePartialSpecializationDecl *D) {
Record.AddTemplateParameterList(D->getTemplateParameters());
- Record.AddASTTemplateArgumentListInfo(D->getTemplateArgsAsWritten());
VisitClassTemplateSpecializationDecl(D);
@@ -1757,13 +1817,22 @@ void ASTDeclWriter::VisitVarTemplateSpecializationDecl(
Record.AddTemplateArgumentList(&D->getTemplateInstantiationArgs());
}
- // Explicit info.
- Record.AddTypeSourceInfo(D->getTypeAsWritten());
- if (D->getTypeAsWritten()) {
- Record.AddSourceLocation(D->getExternLoc());
+ bool ExplicitInstantiation =
+ D->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDeclaration ||
+ D->getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition;
+ Record.push_back(ExplicitInstantiation);
+ if (ExplicitInstantiation) {
+ Record.AddSourceLocation(D->getExternKeywordLoc());
Record.AddSourceLocation(D->getTemplateKeywordLoc());
}
+ const ASTTemplateArgumentListInfo *ArgsWritten =
+ D->getTemplateArgsAsWritten();
+ Record.push_back(!!ArgsWritten);
+ if (ArgsWritten)
+ Record.AddASTTemplateArgumentListInfo(ArgsWritten);
+
Record.AddTemplateArgumentList(&D->getTemplateArgs());
Record.AddSourceLocation(D->getPointOfInstantiation());
Record.push_back(D->getSpecializationKind());
@@ -1784,7 +1853,6 @@ void ASTDeclWriter::VisitVarTemplateSpecializationDecl(
void ASTDeclWriter::VisitVarTemplatePartialSpecializationDecl(
VarTemplatePartialSpecializationDecl *D) {
Record.AddTemplateParameterList(D->getTemplateParameters());
- Record.AddASTTemplateArgumentListInfo(D->getTemplateArgsAsWritten());
VisitVarTemplateSpecializationDecl(D);
@@ -1812,7 +1880,7 @@ void ASTDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
Record.push_back(D->wasDeclaredWithTypename());
const TypeConstraint *TC = D->getTypeConstraint();
- assert((bool)TC == D->hasTypeConstraint());
+ Record.push_back(/*TypeConstraintInitialized=*/TC != nullptr);
if (TC) {
auto *CR = TC->getConceptReference();
Record.push_back(CR != nullptr);
@@ -1828,9 +1896,9 @@ void ASTDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
!D->defaultArgumentWasInherited();
Record.push_back(OwnsDefaultArg);
if (OwnsDefaultArg)
- Record.AddTypeSourceInfo(D->getDefaultArgumentInfo());
+ Record.AddTemplateArgumentLoc(D->getDefaultArgument());
- if (!TC && !OwnsDefaultArg &&
+ if (!D->hasTypeConstraint() && !OwnsDefaultArg &&
D->getDeclContext() == D->getLexicalDeclContext() &&
!D->isInvalidDecl() && !D->hasAttrs() &&
!D->isTopLevelDeclInObjCContainer() && !D->isImplicit() &&
@@ -1870,7 +1938,7 @@ void ASTDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
!D->defaultArgumentWasInherited();
Record.push_back(OwnsDefaultArg);
if (OwnsDefaultArg)
- Record.AddStmt(D->getDefaultArgument());
+ Record.AddTemplateArgumentLoc(D->getDefaultArgument());
Code = serialization::DECL_NON_TYPE_TEMPLATE_PARM;
}
}
@@ -1883,6 +1951,7 @@ void ASTDeclWriter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
Record.push_back(D->getNumExpansionTemplateParameters());
VisitTemplateDecl(D);
+ Record.push_back(D->wasDeclaredWithTypename());
// TemplateParmPosition.
Record.push_back(D->getDepth());
Record.push_back(D->getPosition());
@@ -1924,8 +1993,22 @@ void ASTDeclWriter::VisitDeclContext(DeclContext *DC) {
"You need to update the serializer after you change the "
"DeclContextBits");
- Record.AddOffset(Writer.WriteDeclContextLexicalBlock(Context, DC));
- Record.AddOffset(Writer.WriteDeclContextVisibleBlock(Context, DC));
+ uint64_t LexicalOffset = 0;
+ uint64_t VisibleOffset = 0;
+
+ if (Writer.isGeneratingReducedBMI() && isa<NamespaceDecl>(DC) &&
+ cast<NamespaceDecl>(DC)->isFromExplicitGlobalModule()) {
+ // In reduced BMI, delay writing lexical and visible block for namespace
+ // in the global module fragment. See the comments of DelayedNamespace for
+ // details.
+ Writer.DelayedNamespace.push_back(cast<NamespaceDecl>(DC));
+ } else {
+ LexicalOffset = Writer.WriteDeclContextLexicalBlock(Context, DC);
+ VisibleOffset = Writer.WriteDeclContextVisibleBlock(Context, DC);
+ }
+
+ Record.AddOffset(LexicalOffset);
+ Record.AddOffset(VisibleOffset);
}
const Decl *ASTWriter::getFirstLocalDecl(const Decl *D) {
@@ -2478,6 +2561,7 @@ void ASTWriter::WriteDeclAbbrevs() {
// TemplateTypeParmDecl
Abv->Add(
BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // wasDeclaredWithTypename
+ Abv->Add(BitCodeAbbrevOp(0)); // TypeConstraintInitialized
Abv->Add(BitCodeAbbrevOp(0)); // OwnsDefaultArg
DeclTemplateTypeParmAbbrev = Stream.EmitAbbrev(std::move(Abv));
@@ -2718,10 +2802,10 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
"serializing");
// Determine the ID for this declaration.
- serialization::DeclID ID;
+ LocalDeclID ID;
assert(!D->isFromASTFile() && "should not be emitting imported decl");
- serialization::DeclID &IDR = DeclIDs[D];
- if (IDR == 0)
+ LocalDeclID &IDR = DeclIDs[D];
+ if (IDR.isInvalid())
IDR = NextDeclID++;
ID = IDR;
@@ -2729,7 +2813,7 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
assert(ID >= FirstDeclID && "invalid decl ID");
RecordData Record;
- ASTDeclWriter W(*this, Context, Record);
+ ASTDeclWriter W(*this, Context, Record, GeneratingReducedBMI);
// Build a record for this declaration
W.Visit(D);
@@ -2739,14 +2823,16 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
// Record the offset for this declaration
SourceLocation Loc = D->getLocation();
- unsigned Index = ID - FirstDeclID;
+ SourceLocationEncoding::RawLocEncoding RawLoc =
+ getRawSourceLocationEncoding(getAdjustedLocation(Loc));
+
+ unsigned Index = ID.getRawValue() - FirstDeclID.getRawValue();
if (DeclOffsets.size() == Index)
- DeclOffsets.emplace_back(getAdjustedLocation(Loc), Offset,
- DeclTypesBlockStartOffset);
+ DeclOffsets.emplace_back(RawLoc, Offset, DeclTypesBlockStartOffset);
else if (DeclOffsets.size() < Index) {
// FIXME: Can/should this happen?
DeclOffsets.resize(Index+1);
- DeclOffsets[Index].setLocation(getAdjustedLocation(Loc));
+ DeclOffsets[Index].setRawLoc(RawLoc);
DeclOffsets[Index].setBitOffset(Offset, DeclTypesBlockStartOffset);
} else {
llvm_unreachable("declarations should be emitted in ID order");
@@ -2759,7 +2845,7 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
// Note declarations that should be deserialized eagerly so that we can add
// them to a record in the AST file later.
if (isRequiredDecl(D, Context, WritingModule))
- EagerlyDeserializedDecls.push_back(ID);
+ AddDeclRef(D, EagerlyDeserializedDecls);
}
void ASTRecordWriter::AddFunctionDefinition(const FunctionDecl *FD) {
@@ -2795,7 +2881,7 @@ void ASTRecordWriter::AddFunctionDefinition(const FunctionDecl *FD) {
}
Record->push_back(ModulesCodegen);
if (ModulesCodegen)
- Writer->ModularCodegenDecls.push_back(Writer->GetDeclRef(FD));
+ Writer->AddDeclRef(FD, Writer->ModularCodegenDecls);
if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) {
Record->push_back(CD->getNumCtorInitializers());
if (CD->getNumCtorInitializers())
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
index e5836f5dcbe9..caa222277f06 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -19,7 +19,7 @@
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Lex/Token.h"
-#include "clang/Sema/DeclSpec.h"
+#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTRecordWriter.h"
#include "llvm/Bitstream/BitstreamWriter.h"
using namespace clang;
@@ -38,7 +38,7 @@ namespace clang {
unsigned AbbrevToUse;
/// A helper that can help us to write a packed bit across function
- /// calls. For example, we may write seperate bits in seperate functions:
+ /// calls. For example, we may write separate bits in separate functions:
///
/// void VisitA(A* a) {
/// Record.push_back(a->isSomething());
@@ -474,14 +474,12 @@ addConstraintSatisfaction(ASTRecordWriter &Record,
if (!Satisfaction.IsSatisfied) {
Record.push_back(Satisfaction.NumRecords);
for (const auto &DetailRecord : Satisfaction) {
- Record.AddStmt(const_cast<Expr *>(DetailRecord.first));
- auto *E = DetailRecord.second.dyn_cast<Expr *>();
- Record.push_back(E == nullptr);
+ auto *E = DetailRecord.dyn_cast<Expr *>();
+ Record.push_back(/* IsDiagnostic */ E == nullptr);
if (E)
Record.AddStmt(E);
else {
- auto *Diag = DetailRecord.second.get<std::pair<SourceLocation,
- StringRef> *>();
+ auto *Diag = DetailRecord.get<std::pair<SourceLocation, StringRef> *>();
Record.AddSourceLocation(Diag->first);
Record.AddString(Diag->second);
}
@@ -881,16 +879,21 @@ void ASTStmtWriter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
Code = serialization::EXPR_ARRAY_SUBSCRIPT;
}
-void ASTStmtWriter::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
+void ASTStmtWriter::VisitArraySectionExpr(ArraySectionExpr *E) {
VisitExpr(E);
+ Record.writeEnum(E->ASType);
Record.AddStmt(E->getBase());
Record.AddStmt(E->getLowerBound());
Record.AddStmt(E->getLength());
- Record.AddStmt(E->getStride());
+ if (E->isOMPArraySection())
+ Record.AddStmt(E->getStride());
Record.AddSourceLocation(E->getColonLocFirst());
- Record.AddSourceLocation(E->getColonLocSecond());
+
+ if (E->isOMPArraySection())
+ Record.AddSourceLocation(E->getColonLocSecond());
+
Record.AddSourceLocation(E->getRBracketLoc());
- Code = serialization::EXPR_OMP_ARRAY_SECTION;
+ Code = serialization::EXPR_ARRAY_SECTION;
}
void ASTStmtWriter::VisitOMPArrayShapingExpr(OMPArrayShapingExpr *E) {
@@ -970,10 +973,7 @@ void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
VisitExpr(E);
bool HasQualifier = E->hasQualifier();
- bool HasFoundDecl =
- E->hasQualifierOrFoundDecl() &&
- (E->getFoundDecl().getDecl() != E->getMemberDecl() ||
- E->getFoundDecl().getAccess() != E->getMemberDecl()->getAccess());
+ bool HasFoundDecl = E->hasFoundDecl();
bool HasTemplateInfo = E->hasTemplateKWAndArgsInfo();
unsigned NumTemplateArgs = E->getNumTemplateArgs();
@@ -995,15 +995,15 @@ void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
CurrentPackingBits.addBits(E->isNonOdrUse(), /*Width=*/2);
Record.AddSourceLocation(E->getOperatorLoc());
+ if (HasQualifier)
+ Record.AddNestedNameSpecifierLoc(E->getQualifierLoc());
+
if (HasFoundDecl) {
DeclAccessPair FoundDecl = E->getFoundDecl();
Record.AddDeclRef(FoundDecl.getDecl());
CurrentPackingBits.addBits(FoundDecl.getAccess(), /*BitWidth=*/2);
}
- if (HasQualifier)
- Record.AddNestedNameSpecifierLoc(E->getQualifierLoc());
-
if (HasTemplateInfo)
AddTemplateKWAndArgsInfo(*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
E->getTrailingObjects<TemplateArgumentLoc>());
@@ -1261,6 +1261,16 @@ void ASTStmtWriter::VisitSourceLocExpr(SourceLocExpr *E) {
Code = serialization::EXPR_SOURCE_LOC;
}
+void ASTStmtWriter::VisitEmbedExpr(EmbedExpr *E) {
+ VisitExpr(E);
+ Record.AddSourceLocation(E->getBeginLoc());
+ Record.AddSourceLocation(E->getEndLoc());
+ Record.AddStmt(E->getDataStringLiteral());
+ Record.writeUInt32(E->getStartingElementPos());
+ Record.writeUInt32(E->getDataElementCount());
+ Code = serialization::EXPR_BUILTIN_PP_EMBED;
+}
+
void ASTStmtWriter::VisitAddrLabelExpr(AddrLabelExpr *E) {
VisitExpr(E);
Record.AddSourceLocation(E->getAmpAmpLoc());
@@ -1842,6 +1852,7 @@ void ASTStmtWriter::VisitCXXThisExpr(CXXThisExpr *E) {
VisitExpr(E);
Record.AddSourceLocation(E->getLocation());
Record.push_back(E->isImplicit());
+ Record.push_back(E->isCapturedByCopyInLambdaWithExplicitObjectParameter());
Code = serialization::EXPR_CXX_THIS;
}
@@ -2085,9 +2096,24 @@ void ASTStmtWriter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
void ASTStmtWriter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
VisitOverloadExpr(E);
CurrentPackingBits.addBit(E->requiresADL());
- CurrentPackingBits.addBit(E->isOverloaded());
Record.AddDeclRef(E->getNamingClass());
Code = serialization::EXPR_CXX_UNRESOLVED_LOOKUP;
+
+ if (Writer.isWritingStdCXXNamedModules() && Writer.getChain()) {
+ // Referencing all the possible declarations to make sure the change get
+ // propagted.
+ DeclarationName Name = E->getName();
+ for (auto *Found :
+ Writer.getASTContext().getTranslationUnitDecl()->lookup(Name))
+ if (Found->isFromASTFile())
+ Writer.GetDeclRef(Found);
+
+ llvm::SmallVector<NamespaceDecl *> ExternalNSs;
+ Writer.getChain()->ReadKnownNamespaces(ExternalNSs);
+ for (auto *NS : ExternalNSs)
+ for (auto *Found : NS->lookup(Name))
+ Writer.GetDeclRef(Found);
+ }
}
void ASTStmtWriter::VisitTypeTraitExpr(TypeTraitExpr *E) {
@@ -2153,6 +2179,19 @@ void ASTStmtWriter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
Code = serialization::EXPR_SIZEOF_PACK;
}
+void ASTStmtWriter::VisitPackIndexingExpr(PackIndexingExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->TransformedExpressions);
+ Record.push_back(E->ExpandedToEmptyPack);
+ Record.AddSourceLocation(E->getEllipsisLoc());
+ Record.AddSourceLocation(E->getRSquareLoc());
+ Record.AddStmt(E->getPackIdExpression());
+ Record.AddStmt(E->getIndexExpr());
+ for (Expr *Sub : E->getExpressions())
+ Record.AddStmt(Sub);
+ Code = serialization::EXPR_PACK_INDEXING;
+}
+
void ASTStmtWriter::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
VisitExpr(E);
@@ -2398,6 +2437,16 @@ void ASTStmtWriter::VisitOMPUnrollDirective(OMPUnrollDirective *D) {
Code = serialization::STMT_OMP_UNROLL_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPReverseDirective(OMPReverseDirective *D) {
+ VisitOMPLoopTransformationDirective(D);
+ Code = serialization::STMT_OMP_REVERSE_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPInterchangeDirective(OMPInterchangeDirective *D) {
+ VisitOMPLoopTransformationDirective(D);
+ Code = serialization::STMT_OMP_INTERCHANGE_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
Record.writeBool(D->hasCancel());
@@ -2810,6 +2859,7 @@ void ASTStmtWriter::VisitOMPTeamsGenericLoopDirective(
void ASTStmtWriter::VisitOMPTargetTeamsGenericLoopDirective(
OMPTargetTeamsGenericLoopDirective *D) {
VisitOMPLoopDirective(D);
+ Record.writeBool(D->canBeParallelFor());
Code = serialization::STMT_OMP_TARGET_TEAMS_GENERIC_LOOP_DIRECTIVE;
}
@@ -2826,6 +2876,35 @@ void ASTStmtWriter::VisitOMPTargetParallelGenericLoopDirective(
}
//===----------------------------------------------------------------------===//
+// OpenACC Constructs/Directives.
+//===----------------------------------------------------------------------===//
+void ASTStmtWriter::VisitOpenACCConstructStmt(OpenACCConstructStmt *S) {
+ Record.push_back(S->clauses().size());
+ Record.writeEnum(S->Kind);
+ Record.AddSourceRange(S->Range);
+ Record.AddSourceLocation(S->DirectiveLoc);
+ Record.writeOpenACCClauseList(S->clauses());
+}
+
+void ASTStmtWriter::VisitOpenACCAssociatedStmtConstruct(
+ OpenACCAssociatedStmtConstruct *S) {
+ VisitOpenACCConstructStmt(S);
+ Record.AddStmt(S->getAssociatedStmt());
+}
+
+void ASTStmtWriter::VisitOpenACCComputeConstruct(OpenACCComputeConstruct *S) {
+ VisitStmt(S);
+ VisitOpenACCAssociatedStmtConstruct(S);
+ Code = serialization::STMT_OPENACC_COMPUTE_CONSTRUCT;
+}
+
+void ASTStmtWriter::VisitOpenACCLoopConstruct(OpenACCLoopConstruct *S) {
+ VisitStmt(S);
+ VisitOpenACCAssociatedStmtConstruct(S);
+ Code = serialization::STMT_OPENACC_LOOP_CONSTRUCT;
+}
+
+//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp b/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp
index cf8084333811..cc06106a4770 100644
--- a/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp
@@ -12,7 +12,9 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/SemaConsumer.h"
#include "clang/Serialization/ASTWriter.h"
@@ -21,15 +23,16 @@
using namespace clang;
PCHGenerator::PCHGenerator(
- const Preprocessor &PP, InMemoryModuleCache &ModuleCache,
- StringRef OutputFile, StringRef isysroot, std::shared_ptr<PCHBuffer> Buffer,
+ Preprocessor &PP, InMemoryModuleCache &ModuleCache, StringRef OutputFile,
+ StringRef isysroot, std::shared_ptr<PCHBuffer> Buffer,
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions,
bool AllowASTWithErrors, bool IncludeTimestamps,
- bool BuildingImplicitModule, bool ShouldCacheASTInMemory)
+ bool BuildingImplicitModule, bool ShouldCacheASTInMemory,
+ bool GeneratingReducedBMI)
: PP(PP), OutputFile(OutputFile), isysroot(isysroot.str()),
SemaPtr(nullptr), Buffer(std::move(Buffer)), Stream(this->Buffer->Data),
Writer(Stream, this->Buffer->Data, ModuleCache, Extensions,
- IncludeTimestamps, BuildingImplicitModule),
+ IncludeTimestamps, BuildingImplicitModule, GeneratingReducedBMI),
AllowASTWithErrors(AllowASTWithErrors),
ShouldCacheASTInMemory(ShouldCacheASTInMemory) {
this->Buffer->IsComplete = false;
@@ -38,6 +41,21 @@ PCHGenerator::PCHGenerator(
PCHGenerator::~PCHGenerator() {
}
+Module *PCHGenerator::getEmittingModule(ASTContext &) {
+ Module *M = nullptr;
+
+ if (PP.getLangOpts().isCompilingModule()) {
+ M = PP.getHeaderSearchInfo().lookupModule(PP.getLangOpts().CurrentModule,
+ SourceLocation(),
+ /*AllowSearch*/ false);
+ if (!M)
+ assert(PP.getDiagnostics().hasErrorOccurred() &&
+ "emitting module but current module doesn't exist");
+ }
+
+ return M;
+}
+
void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
// Don't create a PCH if there were fatal failures during module loading.
if (PP.getModuleLoader().HadFatalFailure)
@@ -47,16 +65,7 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
if (hasErrors && !AllowASTWithErrors)
return;
- Module *Module = nullptr;
- if (PP.getLangOpts().isCompilingModule()) {
- Module = PP.getHeaderSearchInfo().lookupModule(
- PP.getLangOpts().CurrentModule, SourceLocation(),
- /*AllowSearch*/ false);
- if (!Module) {
- assert(hasErrors && "emitting module but current module doesn't exist");
- return;
- }
- }
+ Module *Module = getEmittingModule(Ctx);
// Errors that do not prevent the PCH from being written should not cause the
// overall compilation to fail either.
@@ -78,3 +87,53 @@ ASTMutationListener *PCHGenerator::GetASTMutationListener() {
ASTDeserializationListener *PCHGenerator::GetASTDeserializationListener() {
return &Writer;
}
+
+void PCHGenerator::anchor() {}
+
+CXX20ModulesGenerator::CXX20ModulesGenerator(Preprocessor &PP,
+ InMemoryModuleCache &ModuleCache,
+ StringRef OutputFile,
+ bool GeneratingReducedBMI)
+ : PCHGenerator(
+ PP, ModuleCache, OutputFile, llvm::StringRef(),
+ std::make_shared<PCHBuffer>(),
+ /*Extensions=*/ArrayRef<std::shared_ptr<ModuleFileExtension>>(),
+ /*AllowASTWithErrors*/ false, /*IncludeTimestamps=*/false,
+ /*BuildingImplicitModule=*/false, /*ShouldCacheASTInMemory=*/false,
+ GeneratingReducedBMI) {}
+
+Module *CXX20ModulesGenerator::getEmittingModule(ASTContext &Ctx) {
+ Module *M = Ctx.getCurrentNamedModule();
+ assert(M && M->isNamedModuleUnit() &&
+ "CXX20ModulesGenerator should only be used with C++20 Named modules.");
+ return M;
+}
+
+void CXX20ModulesGenerator::HandleTranslationUnit(ASTContext &Ctx) {
+ // FIMXE: We'd better to wrap such options to a new class ASTWriterOptions
+ // since this is not about searching header really.
+ HeaderSearchOptions &HSOpts =
+ getPreprocessor().getHeaderSearchInfo().getHeaderSearchOpts();
+ HSOpts.ModulesSkipDiagnosticOptions = true;
+ HSOpts.ModulesSkipHeaderSearchPaths = true;
+
+ PCHGenerator::HandleTranslationUnit(Ctx);
+
+ if (!isComplete())
+ return;
+
+ std::error_code EC;
+ auto OS = std::make_unique<llvm::raw_fd_ostream>(getOutputFile(), EC);
+ if (EC) {
+ getDiagnostics().Report(diag::err_fe_unable_to_open_output)
+ << getOutputFile() << EC.message() << "\n";
+ return;
+ }
+
+ *OS << getBufferPtr()->Data;
+ OS->flush();
+}
+
+void CXX20ModulesGenerator::anchor() {}
+
+void ReducedBMIGenerator::anchor() {}
diff --git a/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp b/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
index dd4fc3e00905..1163943c5dff 100644
--- a/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
@@ -13,7 +13,6 @@
#include "clang/Serialization/GlobalModuleIndex.h"
#include "ASTReaderInternals.h"
#include "clang/Basic/FileManager.h"
-#include "clang/Lex/HeaderSearch.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ModuleFile.h"
#include "clang/Serialization/PCHContainerOperations.h"
@@ -89,10 +88,8 @@ public:
static std::pair<unsigned, unsigned>
ReadKeyDataLength(const unsigned char*& d) {
using namespace llvm::support;
- unsigned KeyLen =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
- unsigned DataLen =
- endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
+ unsigned KeyLen = endian::readNext<uint16_t, llvm::endianness::little>(d);
+ unsigned DataLen = endian::readNext<uint16_t, llvm::endianness::little>(d);
return std::make_pair(KeyLen, DataLen);
}
@@ -113,8 +110,7 @@ public:
data_type Result;
while (DataLen > 0) {
- unsigned ID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
+ unsigned ID = endian::readNext<uint32_t, llvm::endianness::little>(d);
Result.push_back(ID);
DataLen -= 4;
}
@@ -514,8 +510,8 @@ namespace {
// The first bit indicates whether this identifier is interesting.
// That's all we care about.
using namespace llvm::support;
- unsigned RawID =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
+ IdentifierID RawID =
+ endian::readNext<IdentifierID, llvm::endianness::little>(d);
bool IsInteresting = RawID & 0x01;
return std::make_pair(k, IsInteresting);
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ModuleFile.cpp b/contrib/llvm-project/clang/lib/Serialization/ModuleFile.cpp
index db896fd36115..4858cdbda554 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ModuleFile.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ModuleFile.cpp
@@ -59,11 +59,9 @@ LLVM_DUMP_METHOD void ModuleFile::dump() {
// Remapping tables.
llvm::errs() << " Base source location offset: " << SLocEntryBaseOffset
<< '\n';
- dumpLocalRemap("Source location offset local -> global map", SLocRemap);
llvm::errs() << " Base identifier ID: " << BaseIdentifierID << '\n'
<< " Number of identifiers: " << LocalNumIdentifiers << '\n';
- dumpLocalRemap("Identifier ID local -> global map", IdentifierRemap);
llvm::errs() << " Base macro ID: " << BaseMacroID << '\n'
<< " Number of macros: " << LocalNumMacros << '\n';
@@ -86,9 +84,7 @@ LLVM_DUMP_METHOD void ModuleFile::dump() {
llvm::errs() << " Base type index: " << BaseTypeIndex << '\n'
<< " Number of types: " << LocalNumTypes << '\n';
- dumpLocalRemap("Type index local -> global map", TypeRemap);
- llvm::errs() << " Base decl ID: " << BaseDeclID << '\n'
+ llvm::errs() << " Base decl index: " << BaseDeclIndex << '\n'
<< " Number of decls: " << LocalNumDecls << '\n';
- dumpLocalRemap("Decl ID local -> global map", DeclRemap);
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ModuleFileExtension.cpp b/contrib/llvm-project/clang/lib/Serialization/ModuleFileExtension.cpp
index 95fff41e0d7a..729529b5fca1 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ModuleFileExtension.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ModuleFileExtension.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
#include "clang/Serialization/ModuleFileExtension.h"
-#include "llvm/ADT/Hashing.h"
+
using namespace clang;
char ModuleFileExtension::ID = 0;
diff --git a/contrib/llvm-project/clang/lib/Serialization/MultiOnDiskHashTable.h b/contrib/llvm-project/clang/lib/Serialization/MultiOnDiskHashTable.h
index 2402a628b512..a0d75ec3a9e7 100644
--- a/contrib/llvm-project/clang/lib/Serialization/MultiOnDiskHashTable.h
+++ b/contrib/llvm-project/clang/lib/Serialization/MultiOnDiskHashTable.h
@@ -200,11 +200,11 @@ public:
storage_type Ptr = Data;
uint32_t BucketOffset =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Ptr);
+ endian::readNext<uint32_t, llvm::endianness::little>(Ptr);
// Read the list of overridden files.
uint32_t NumFiles =
- endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Ptr);
+ endian::readNext<uint32_t, llvm::endianness::little>(Ptr);
// FIXME: Add a reserve() to TinyPtrVector so that we don't need to make
// an additional copy.
llvm::SmallVector<file_type, 16> OverriddenFiles;
diff --git a/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp b/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp
index 56ca3394385b..4aedb7debcff 100644
--- a/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp
@@ -12,8 +12,6 @@
#include "clang/Serialization/PCHContainerOperations.h"
#include "clang/AST/ASTConsumer.h"
-#include "clang/Lex/ModuleLoader.h"
-#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/Support/raw_ostream.h"
#include <utility>
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 6c7a1601402e..3f837564cf47 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -33,7 +33,84 @@ using namespace taint;
using llvm::formatv;
namespace {
-enum OOB_Kind { OOB_Precedes, OOB_Exceeds, OOB_Taint };
+/// If `E` is a "clean" array subscript expression, return the type of the
+/// accessed element. If the base of the subscript expression is modified by
+/// pointer arithmetic (and not the beginning of a "full" memory region), this
+/// always returns nullopt because that's the right (or the least bad) thing to
+/// do for the diagnostic output that's relying on this.
+static std::optional<QualType> determineElementType(const Expr *E,
+ const CheckerContext &C) {
+ const auto *ASE = dyn_cast<ArraySubscriptExpr>(E);
+ if (!ASE)
+ return std::nullopt;
+
+ const MemRegion *SubscriptBaseReg = C.getSVal(ASE->getBase()).getAsRegion();
+ if (!SubscriptBaseReg)
+ return std::nullopt;
+
+ // The base of the subscript expression is affected by pointer arithmetics,
+ // so we want to report byte offsets instead of indices.
+ if (isa<ElementRegion>(SubscriptBaseReg->StripCasts()))
+ return std::nullopt;
+
+ return ASE->getType();
+}
+
+static std::optional<int64_t>
+determineElementSize(const std::optional<QualType> T, const CheckerContext &C) {
+ if (!T)
+ return std::nullopt;
+ return C.getASTContext().getTypeSizeInChars(*T).getQuantity();
+}
+
+class StateUpdateReporter {
+ const SubRegion *Reg;
+ const NonLoc ByteOffsetVal;
+ const std::optional<QualType> ElementType;
+ const std::optional<int64_t> ElementSize;
+ bool AssumedNonNegative = false;
+ std::optional<NonLoc> AssumedUpperBound = std::nullopt;
+
+public:
+ StateUpdateReporter(const SubRegion *R, NonLoc ByteOffsVal, const Expr *E,
+ CheckerContext &C)
+ : Reg(R), ByteOffsetVal(ByteOffsVal),
+ ElementType(determineElementType(E, C)),
+ ElementSize(determineElementSize(ElementType, C)) {}
+
+ void recordNonNegativeAssumption() { AssumedNonNegative = true; }
+ void recordUpperBoundAssumption(NonLoc UpperBoundVal) {
+ AssumedUpperBound = UpperBoundVal;
+ }
+
+ bool assumedNonNegative() { return AssumedNonNegative; }
+
+ const NoteTag *createNoteTag(CheckerContext &C) const;
+
+private:
+ std::string getMessage(PathSensitiveBugReport &BR) const;
+
+ /// Return true if information about the value of `Sym` can put constraints
+ /// on some symbol which is interesting within the bug report `BR`.
+ /// In particular, this returns true when `Sym` is interesting within `BR`;
+ /// but it also returns true if `Sym` is an expression that contains integer
+ /// constants and a single symbolic operand which is interesting (in `BR`).
+ /// We need to use this instead of plain `BR.isInteresting()` because if we
+ /// are analyzing code like
+ /// int array[10];
+ /// int f(int arg) {
+ /// return array[arg] && array[arg + 10];
+ /// }
+ /// then the byte offsets are `arg * 4` and `(arg + 10) * 4`, which are not
+ /// sub-expressions of each other (but `getSimplifiedOffsets` is smart enough
+ /// to detect this out of bounds access).
+ static bool providesInformationAboutInteresting(SymbolRef Sym,
+ PathSensitiveBugReport &BR);
+ static bool providesInformationAboutInteresting(SVal SV,
+ PathSensitiveBugReport &BR) {
+ return providesInformationAboutInteresting(SV.getAsSymbol(), BR);
+ }
+};
struct Messages {
std::string Short, Full;
@@ -54,11 +131,19 @@ class ArrayBoundCheckerV2 : public Checker<check::PostStmt<ArraySubscriptExpr>,
void performCheck(const Expr *E, CheckerContext &C) const;
- void reportOOB(CheckerContext &C, ProgramStateRef ErrorState, OOB_Kind Kind,
- NonLoc Offset, Messages Msgs) const;
+ void reportOOB(CheckerContext &C, ProgramStateRef ErrorState, Messages Msgs,
+ NonLoc Offset, std::optional<NonLoc> Extent,
+ bool IsTaintBug = false) const;
+
+ static void markPartsInteresting(PathSensitiveBugReport &BR,
+ ProgramStateRef ErrorState, NonLoc Val,
+ bool MarkTaint);
static bool isFromCtypeMacro(const Stmt *S, ASTContext &AC);
+ static bool isIdiomaticPastTheEndPtr(const Expr *E, ProgramStateRef State,
+ NonLoc Offset, NonLoc Limit,
+ CheckerContext &C);
static bool isInAddressOf(const Stmt *S, ASTContext &AC);
public:
@@ -133,12 +218,26 @@ computeOffset(ProgramStateRef State, SValBuilder &SVB, SVal Location) {
return std::nullopt;
}
-// TODO: once the constraint manager is smart enough to handle non simplified
-// symbolic expressions remove this function. Note that this can not be used in
-// the constraint manager as is, since this does not handle overflows. It is
-// safe to assume, however, that memory offsets will not overflow.
-// NOTE: callers of this function need to be aware of the effects of overflows
-// and signed<->unsigned conversions!
+// NOTE: This function is the "heart" of this checker. It simplifies
+// inequalities with transformations that are valid (and very elementary) in
+// pure mathematics, but become invalid if we use them in C++ number model
+// where the calculations may overflow.
+// Due to the overflow issues I think it's impossible (or at least not
+// practical) to integrate this kind of simplification into the resolution of
+// arbitrary inequalities (i.e. the code of `evalBinOp`); but this function
+// produces valid results when the calculations are handling memory offsets
+// and every value is well below SIZE_MAX.
+// TODO: This algorithm should be moved to a central location where it's
+// available for other checkers that need to compare memory offsets.
+// NOTE: the simplification preserves the order of the two operands in a
+// mathematical sense, but it may change the result produced by a C++
+// comparison operator (and the automatic type conversions).
+// For example, consider a comparison "X+1 < 0", where the LHS is stored as a
+// size_t and the RHS is stored in an int. (As size_t is unsigned, this
+// comparison is false for all values of "X".) However, the simplification may
+// turn it into "X < -1", which is still always false in a mathematical sense,
+// but can produce a true result when evaluated by `evalBinOp` (which follows
+// the rules of C++ and casts -1 to SIZE_MAX).
static std::pair<NonLoc, nonloc::ConcreteInt>
getSimplifiedOffsets(NonLoc offset, nonloc::ConcreteInt extent,
SValBuilder &svalBuilder) {
@@ -171,6 +270,16 @@ getSimplifiedOffsets(NonLoc offset, nonloc::ConcreteInt extent,
return std::pair<NonLoc, nonloc::ConcreteInt>(offset, extent);
}
+static bool isNegative(SValBuilder &SVB, ProgramStateRef State, NonLoc Value) {
+ const llvm::APSInt *MaxV = SVB.getMaxValue(State, Value);
+ return MaxV && MaxV->isNegative();
+}
+
+static bool isUnsigned(SValBuilder &SVB, NonLoc Value) {
+ QualType T = Value.getType(SVB.getContext());
+ return T->isUnsignedIntegerType();
+}
+
// Evaluate the comparison Value < Threshold with the help of the custom
// simplification algorithm defined for this checker. Return a pair of states,
// where the first one corresponds to "value below threshold" and the second
@@ -184,18 +293,38 @@ compareValueToThreshold(ProgramStateRef State, NonLoc Value, NonLoc Threshold,
if (auto ConcreteThreshold = Threshold.getAs<nonloc::ConcreteInt>()) {
std::tie(Value, Threshold) = getSimplifiedOffsets(Value, *ConcreteThreshold, SVB);
}
- if (auto ConcreteThreshold = Threshold.getAs<nonloc::ConcreteInt>()) {
- QualType T = Value.getType(SVB.getContext());
- if (T->isUnsignedIntegerType() && ConcreteThreshold->getValue().isNegative()) {
- // In this case we reduced the bound check to a comparison of the form
- // (symbol or value with unsigned type) < (negative number)
- // which is always false. We are handling these cases separately because
- // evalBinOpNN can perform a signed->unsigned conversion that turns the
- // negative number into a huge positive value and leads to wildly
- // inaccurate conclusions.
+
+ // We want to perform a _mathematical_ comparison between the numbers `Value`
+ // and `Threshold`; but `evalBinOpNN` evaluates a C/C++ operator that may
+ // perform automatic conversions. For example the number -1 is less than the
+ // number 1000, but -1 < `1000ull` will evaluate to `false` because the `int`
+ // -1 is converted to ULONGLONG_MAX.
+ // To avoid automatic conversions, we evaluate the "obvious" cases without
+ // calling `evalBinOpNN`:
+ if (isNegative(SVB, State, Value) && isUnsigned(SVB, Threshold)) {
+ if (CheckEquality) {
+ // negative_value == unsigned_threshold is always false
return {nullptr, State};
}
+ // negative_value < unsigned_threshold is always true
+ return {State, nullptr};
+ }
+ if (isUnsigned(SVB, Value) && isNegative(SVB, State, Threshold)) {
+ // unsigned_value == negative_threshold and
+ // unsigned_value < negative_threshold are both always false
+ return {nullptr, State};
}
+ // FIXME: These special cases are sufficient for handling real-world
+ // comparisons, but in theory there could be contrived situations where
+ // automatic conversion of a symbolic value (which can be negative and can be
+ // positive) leads to incorrect results.
+ // NOTE: We NEED to use the `evalBinOpNN` call in the "common" case, because
+ // we want to ensure that assumptions coming from this precondition and
+ // assumptions coming from regular C/C++ operator calls are represented by
+ // constraints on the same symbolic expression. A solution that would
+ // evaluate these "mathematical" compariosns through a separate pathway would
+ // be a step backwards in this sense.
+
const BinaryOperatorKind OpKind = CheckEquality ? BO_EQ : BO_LT;
auto BelowThreshold =
SVB.evalBinOpNN(State, OpKind, Value, Threshold, SVB.getConditionType())
@@ -239,27 +368,44 @@ static std::optional<int64_t> getConcreteValue(NonLoc SV) {
return std::nullopt;
}
-static std::string getShortMsg(OOB_Kind Kind, std::string RegName) {
- static const char *ShortMsgTemplates[] = {
- "Out of bound access to memory preceding {0}",
- "Out of bound access to memory after the end of {0}",
- "Potential out of bound access to {0} with tainted offset"};
-
- return formatv(ShortMsgTemplates[Kind], RegName);
+static std::optional<int64_t> getConcreteValue(std::optional<NonLoc> SV) {
+ return SV ? getConcreteValue(*SV) : std::nullopt;
}
static Messages getPrecedesMsgs(const SubRegion *Region, NonLoc Offset) {
- std::string RegName = getRegionName(Region);
- SmallString<128> Buf;
- llvm::raw_svector_ostream Out(Buf);
- Out << "Access of " << RegName << " at negative byte offset";
- if (auto ConcreteIdx = Offset.getAs<nonloc::ConcreteInt>())
- Out << ' ' << ConcreteIdx->getValue();
- return {getShortMsg(OOB_Precedes, RegName), std::string(Buf)};
+ std::string RegName = getRegionName(Region), OffsetStr = "";
+
+ if (auto ConcreteOffset = getConcreteValue(Offset))
+ OffsetStr = formatv(" {0}", ConcreteOffset);
+
+ return {
+ formatv("Out of bound access to memory preceding {0}", RegName),
+ formatv("Access of {0} at negative byte offset{1}", RegName, OffsetStr)};
+}
+
+/// Try to divide `Val1` and `Val2` (in place) by `Divisor` and return true if
+/// it can be performed (`Divisor` is nonzero and there is no remainder). The
+/// values `Val1` and `Val2` may be nullopt and in that case the corresponding
+/// division is considered to be successful.
+static bool tryDividePair(std::optional<int64_t> &Val1,
+ std::optional<int64_t> &Val2, int64_t Divisor) {
+ if (!Divisor)
+ return false;
+ const bool Val1HasRemainder = Val1 && *Val1 % Divisor;
+ const bool Val2HasRemainder = Val2 && *Val2 % Divisor;
+ if (!Val1HasRemainder && !Val2HasRemainder) {
+ if (Val1)
+ *Val1 /= Divisor;
+ if (Val2)
+ *Val2 /= Divisor;
+ return true;
+ }
+ return false;
}
static Messages getExceedsMsgs(ASTContext &ACtx, const SubRegion *Region,
- NonLoc Offset, NonLoc Extent, SVal Location) {
+ NonLoc Offset, NonLoc Extent, SVal Location,
+ bool AlsoMentionUnderflow) {
std::string RegName = getRegionName(Region);
const auto *EReg = Location.getAsRegion()->getAs<ElementRegion>();
assert(EReg && "this checker only handles element access");
@@ -268,18 +414,10 @@ static Messages getExceedsMsgs(ASTContext &ACtx, const SubRegion *Region,
std::optional<int64_t> OffsetN = getConcreteValue(Offset);
std::optional<int64_t> ExtentN = getConcreteValue(Extent);
- bool UseByteOffsets = true;
- if (int64_t ElemSize = ACtx.getTypeSizeInChars(ElemType).getQuantity()) {
- const bool OffsetHasRemainder = OffsetN && *OffsetN % ElemSize;
- const bool ExtentHasRemainder = ExtentN && *ExtentN % ElemSize;
- if (!OffsetHasRemainder && !ExtentHasRemainder) {
- UseByteOffsets = false;
- if (OffsetN)
- *OffsetN /= ElemSize;
- if (ExtentN)
- *ExtentN /= ElemSize;
- }
- }
+ int64_t ElemSize = ACtx.getTypeSizeInChars(ElemType).getQuantity();
+
+ bool UseByteOffsets = !tryDividePair(OffsetN, ExtentN, ElemSize);
+ const char *OffsetOrIndex = UseByteOffsets ? "byte offset" : "index";
SmallString<256> Buf;
llvm::raw_svector_ostream Out(Buf);
@@ -287,10 +425,12 @@ static Messages getExceedsMsgs(ASTContext &ACtx, const SubRegion *Region,
if (!ExtentN && !UseByteOffsets)
Out << "'" << ElemType.getAsString() << "' element in ";
Out << RegName << " at ";
- if (OffsetN) {
- Out << (UseByteOffsets ? "byte offset " : "index ") << *OffsetN;
+ if (AlsoMentionUnderflow) {
+ Out << "a negative or overflowing " << OffsetOrIndex;
+ } else if (OffsetN) {
+ Out << OffsetOrIndex << " " << *OffsetN;
} else {
- Out << "an overflowing " << (UseByteOffsets ? "byte offset" : "index");
+ Out << "an overflowing " << OffsetOrIndex;
}
if (ExtentN) {
Out << ", while it holds only ";
@@ -307,28 +447,107 @@ static Messages getExceedsMsgs(ASTContext &ACtx, const SubRegion *Region,
Out << "s";
}
- return {getShortMsg(OOB_Exceeds, RegName), std::string(Buf)};
+ return {formatv("Out of bound access to memory {0} {1}",
+ AlsoMentionUnderflow ? "around" : "after the end of",
+ RegName),
+ std::string(Buf)};
}
-static Messages getTaintMsgs(const SubRegion *Region, const char *OffsetName) {
+static Messages getTaintMsgs(const SubRegion *Region, const char *OffsetName,
+ bool AlsoMentionUnderflow) {
std::string RegName = getRegionName(Region);
return {formatv("Potential out of bound access to {0} with tainted {1}",
RegName, OffsetName),
- formatv("Access of {0} with a tainted {1} that may be too large",
- RegName, OffsetName)};
+ formatv("Access of {0} with a tainted {1} that may be {2}too large",
+ RegName, OffsetName,
+ AlsoMentionUnderflow ? "negative or " : "")};
}
-void ArrayBoundCheckerV2::performCheck(const Expr *E, CheckerContext &C) const {
- // NOTE: Instead of using ProgramState::assumeInBound(), we are prototyping
- // some new logic here that reasons directly about memory region extents.
- // Once that logic is more mature, we can bring it back to assumeInBound()
- // for all clients to use.
- //
- // The algorithm we are using here for bounds checking is to see if the
- // memory access is within the extent of the base region. Since we
- // have some flexibility in defining the base region, we can achieve
- // various levels of conservatism in our buffer overflow checking.
+const NoteTag *StateUpdateReporter::createNoteTag(CheckerContext &C) const {
+ // Don't create a note tag if we didn't assume anything:
+ if (!AssumedNonNegative && !AssumedUpperBound)
+ return nullptr;
+
+ return C.getNoteTag([*this](PathSensitiveBugReport &BR) -> std::string {
+ return getMessage(BR);
+ });
+}
+std::string StateUpdateReporter::getMessage(PathSensitiveBugReport &BR) const {
+ bool ShouldReportNonNegative = AssumedNonNegative;
+ if (!providesInformationAboutInteresting(ByteOffsetVal, BR)) {
+ if (AssumedUpperBound &&
+ providesInformationAboutInteresting(*AssumedUpperBound, BR)) {
+ // Even if the byte offset isn't interesting (e.g. it's a constant value),
+ // the assumption can still be interesting if it provides information
+ // about an interesting symbolic upper bound.
+ ShouldReportNonNegative = false;
+ } else {
+ // We don't have anything interesting, don't report the assumption.
+ return "";
+ }
+ }
+
+ std::optional<int64_t> OffsetN = getConcreteValue(ByteOffsetVal);
+ std::optional<int64_t> ExtentN = getConcreteValue(AssumedUpperBound);
+
+ const bool UseIndex =
+ ElementSize && tryDividePair(OffsetN, ExtentN, *ElementSize);
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream Out(Buf);
+ Out << "Assuming ";
+ if (UseIndex) {
+ Out << "index ";
+ if (OffsetN)
+ Out << "'" << OffsetN << "' ";
+ } else if (AssumedUpperBound) {
+ Out << "byte offset ";
+ if (OffsetN)
+ Out << "'" << OffsetN << "' ";
+ } else {
+ Out << "offset ";
+ }
+
+ Out << "is";
+ if (ShouldReportNonNegative) {
+ Out << " non-negative";
+ }
+ if (AssumedUpperBound) {
+ if (ShouldReportNonNegative)
+ Out << " and";
+ Out << " less than ";
+ if (ExtentN)
+ Out << *ExtentN << ", ";
+ if (UseIndex && ElementType)
+ Out << "the number of '" << ElementType->getAsString()
+ << "' elements in ";
+ else
+ Out << "the extent of ";
+ Out << getRegionName(Reg);
+ }
+ return std::string(Out.str());
+}
+
+bool StateUpdateReporter::providesInformationAboutInteresting(
+ SymbolRef Sym, PathSensitiveBugReport &BR) {
+ if (!Sym)
+ return false;
+ for (SymbolRef PartSym : Sym->symbols()) {
+ // The interestingess mark may appear on any layer as we're stripping off
+ // the SymIntExpr, UnarySymExpr etc. layers...
+ if (BR.isInteresting(PartSym))
+ return true;
+ // ...but if both sides of the expression are symbolic, then there is no
+ // practical algorithm to produce separate constraints for the two
+ // operands (from the single combined result).
+ if (isa<SymSymExpr>(PartSym))
+ return false;
+ }
+ return false;
+}
+
+void ArrayBoundCheckerV2::performCheck(const Expr *E, CheckerContext &C) const {
const SVal Location = C.getSVal(E);
// The header ctype.h (from e.g. glibc) implements the isXXXXX() macros as
@@ -350,6 +569,10 @@ void ArrayBoundCheckerV2::performCheck(const Expr *E, CheckerContext &C) const {
auto [Reg, ByteOffset] = *RawOffset;
+ // The state updates will be reported as a single note tag, which will be
+ // composed by this helper class.
+ StateUpdateReporter SUR(Reg, ByteOffset, E, C);
+
// CHECK LOWER BOUND
const MemSpaceRegion *Space = Reg->getMemorySpace();
if (!(isa<SymbolicRegion>(Reg) && isa<UnknownSpaceRegion>(Space))) {
@@ -363,13 +586,22 @@ void ArrayBoundCheckerV2::performCheck(const Expr *E, CheckerContext &C) const {
auto [PrecedesLowerBound, WithinLowerBound] = compareValueToThreshold(
State, ByteOffset, SVB.makeZeroArrayIndex(), SVB);
- if (PrecedesLowerBound && !WithinLowerBound) {
- // We know that the index definitely precedes the lower bound.
- Messages Msgs = getPrecedesMsgs(Reg, ByteOffset);
- reportOOB(C, PrecedesLowerBound, OOB_Precedes, ByteOffset, Msgs);
- return;
+ if (PrecedesLowerBound) {
+ // The offset may be invalid (negative)...
+ if (!WithinLowerBound) {
+ // ...and it cannot be valid (>= 0), so report an error.
+ Messages Msgs = getPrecedesMsgs(Reg, ByteOffset);
+ reportOOB(C, PrecedesLowerBound, Msgs, ByteOffset, std::nullopt);
+ return;
+ }
+ // ...but it can be valid as well, so the checker will (optimistically)
+ // assume that it's valid and mention this in the note tag.
+ SUR.recordNonNegativeAssumption();
}
+ // Actually update the state. The "if" only fails in the extremely unlikely
+ // case when compareValueToThreshold returns {nullptr, nullptr} becasue
+ // evalBinOpNN fails to evaluate the less-than operator.
if (WithinLowerBound)
State = WithinLowerBound;
}
@@ -377,70 +609,118 @@ void ArrayBoundCheckerV2::performCheck(const Expr *E, CheckerContext &C) const {
// CHECK UPPER BOUND
DefinedOrUnknownSVal Size = getDynamicExtent(State, Reg, SVB);
if (auto KnownSize = Size.getAs<NonLoc>()) {
+ // In a situation where both underflow and overflow are possible (but the
+ // index is either tainted or known to be invalid), the logic of this
+ // checker will first assume that the offset is non-negative, and then
+ // (with this additional assumption) it will detect an overflow error.
+ // In this situation the warning message should mention both possibilities.
+ bool AlsoMentionUnderflow = SUR.assumedNonNegative();
+
auto [WithinUpperBound, ExceedsUpperBound] =
compareValueToThreshold(State, ByteOffset, *KnownSize, SVB);
if (ExceedsUpperBound) {
+ // The offset may be invalid (>= Size)...
if (!WithinUpperBound) {
- // We know that the index definitely exceeds the upper bound.
- if (isa<ArraySubscriptExpr>(E) && isInAddressOf(E, C.getASTContext())) {
- // ...but this is within an addressof expression, so we need to check
- // for the exceptional case that `&array[size]` is valid.
- auto [EqualsToThreshold, NotEqualToThreshold] =
- compareValueToThreshold(ExceedsUpperBound, ByteOffset, *KnownSize,
- SVB, /*CheckEquality=*/true);
- if (EqualsToThreshold && !NotEqualToThreshold) {
- // We are definitely in the exceptional case, so return early
- // instead of reporting a bug.
- C.addTransition(EqualsToThreshold);
- return;
- }
+ // ...and it cannot be within bounds, so report an error, unless we can
+ // definitely determine that this is an idiomatic `&array[size]`
+ // expression that calculates the past-the-end pointer.
+ if (isIdiomaticPastTheEndPtr(E, ExceedsUpperBound, ByteOffset,
+ *KnownSize, C)) {
+ C.addTransition(ExceedsUpperBound, SUR.createNoteTag(C));
+ return;
}
- Messages Msgs = getExceedsMsgs(C.getASTContext(), Reg, ByteOffset,
- *KnownSize, Location);
- reportOOB(C, ExceedsUpperBound, OOB_Exceeds, ByteOffset, Msgs);
+
+ Messages Msgs =
+ getExceedsMsgs(C.getASTContext(), Reg, ByteOffset, *KnownSize,
+ Location, AlsoMentionUnderflow);
+ reportOOB(C, ExceedsUpperBound, Msgs, ByteOffset, KnownSize);
return;
}
+ // ...and it can be valid as well...
if (isTainted(State, ByteOffset)) {
- // Both cases are possible, but the offset is tainted, so report.
- std::string RegName = getRegionName(Reg);
+ // ...but it's tainted, so report an error.
- // Diagnostic detail: "tainted offset" is always correct, but the
- // common case is that 'idx' is tainted in 'arr[idx]' and then it's
+ // Diagnostic detail: saying "tainted offset" is always correct, but
+ // the common case is that 'idx' is tainted in 'arr[idx]' and then it's
// nicer to say "tainted index".
const char *OffsetName = "offset";
if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(E))
if (isTainted(State, ASE->getIdx(), C.getLocationContext()))
OffsetName = "index";
- Messages Msgs = getTaintMsgs(Reg, OffsetName);
- reportOOB(C, ExceedsUpperBound, OOB_Taint, ByteOffset, Msgs);
+ Messages Msgs = getTaintMsgs(Reg, OffsetName, AlsoMentionUnderflow);
+ reportOOB(C, ExceedsUpperBound, Msgs, ByteOffset, KnownSize,
+ /*IsTaintBug=*/true);
return;
}
+ // ...and it isn't tainted, so the checker will (optimistically) assume
+ // that the offset is in bounds and mention this in the note tag.
+ SUR.recordUpperBoundAssumption(*KnownSize);
}
+ // Actually update the state. The "if" only fails in the extremely unlikely
+ // case when compareValueToThreshold returns {nullptr, nullptr} becasue
+ // evalBinOpNN fails to evaluate the less-than operator.
if (WithinUpperBound)
State = WithinUpperBound;
}
- C.addTransition(State);
+ // Add a transition, reporting the state updates that we accumulated.
+ C.addTransition(State, SUR.createNoteTag(C));
+}
+
+void ArrayBoundCheckerV2::markPartsInteresting(PathSensitiveBugReport &BR,
+ ProgramStateRef ErrorState,
+ NonLoc Val, bool MarkTaint) {
+ if (SymbolRef Sym = Val.getAsSymbol()) {
+ // If the offset is a symbolic value, iterate over its "parts" with
+ // `SymExpr::symbols()` and mark each of them as interesting.
+ // For example, if the offset is `x*4 + y` then we put interestingness onto
+ // the SymSymExpr `x*4 + y`, the SymIntExpr `x*4` and the two data symbols
+ // `x` and `y`.
+ for (SymbolRef PartSym : Sym->symbols())
+ BR.markInteresting(PartSym);
+ }
+
+ if (MarkTaint) {
+ // If the issue that we're reporting depends on the taintedness of the
+ // offset, then put interestingness onto symbols that could be the origin
+ // of the taint. Note that this may find symbols that did not appear in
+ // `Sym->symbols()` (because they're only loosely connected to `Val`).
+ for (SymbolRef Sym : getTaintedSymbols(ErrorState, Val))
+ BR.markInteresting(Sym);
+ }
}
void ArrayBoundCheckerV2::reportOOB(CheckerContext &C,
- ProgramStateRef ErrorState, OOB_Kind Kind,
- NonLoc Offset, Messages Msgs) const {
+ ProgramStateRef ErrorState, Messages Msgs,
+ NonLoc Offset, std::optional<NonLoc> Extent,
+ bool IsTaintBug /*=false*/) const {
ExplodedNode *ErrorNode = C.generateErrorNode(ErrorState);
if (!ErrorNode)
return;
auto BR = std::make_unique<PathSensitiveBugReport>(
- Kind == OOB_Taint ? TaintBT : BT, Msgs.Short, Msgs.Full, ErrorNode);
-
- // Track back the propagation of taintedness.
- if (Kind == OOB_Taint)
- for (SymbolRef Sym : getTaintedSymbols(ErrorState, Offset))
- BR->markInteresting(Sym);
+ IsTaintBug ? TaintBT : BT, Msgs.Short, Msgs.Full, ErrorNode);
+
+ // FIXME: ideally we would just call trackExpressionValue() and that would
+ // "do the right thing": mark the relevant symbols as interesting, track the
+ // control dependencies and statements storing the relevant values and add
+ // helpful diagnostic pieces. However, right now trackExpressionValue() is
+ // a heap of unreliable heuristics, so it would cause several issues:
+ // - Interestingness is not applied consistently, e.g. if `array[x+10]`
+ // causes an overflow, then `x` is not marked as interesting.
+ // - We get irrelevant diagnostic pieces, e.g. in the code
+ // `int *p = (int*)malloc(2*sizeof(int)); p[3] = 0;`
+ // it places a "Storing uninitialized value" note on the `malloc` call
+ // (which is technically true, but irrelevant).
+ // If trackExpressionValue() becomes reliable, it should be applied instead
+ // of this custom markPartsInteresting().
+ markPartsInteresting(*BR, ErrorState, Offset, IsTaintBug);
+ if (Extent)
+ markPartsInteresting(*BR, ErrorState, *Extent, IsTaintBug);
C.emitReport(std::move(BR));
}
@@ -476,6 +756,18 @@ bool ArrayBoundCheckerV2::isInAddressOf(const Stmt *S, ASTContext &ACtx) {
return UnaryOp && UnaryOp->getOpcode() == UO_AddrOf;
}
+bool ArrayBoundCheckerV2::isIdiomaticPastTheEndPtr(const Expr *E,
+ ProgramStateRef State,
+ NonLoc Offset, NonLoc Limit,
+ CheckerContext &C) {
+ if (isa<ArraySubscriptExpr>(E) && isInAddressOf(E, C.getASTContext())) {
+ auto [EqualsToThreshold, NotEqualToThreshold] = compareValueToThreshold(
+ State, Offset, Limit, C.getSValBuilder(), /*CheckEquality=*/true);
+ return EqualsToThreshold && !NotEqualToThreshold;
+ }
+ return false;
+}
+
void ento::registerArrayBoundCheckerV2(CheckerManager &mgr) {
mgr.registerChecker<ArrayBoundCheckerV2>();
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index c72a97cc01e9..80f128b917b2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -542,10 +542,10 @@ namespace {
class CFRetainReleaseChecker : public Checker<check::PreCall> {
mutable APIMisuse BT{this, "null passed to CF memory management function"};
const CallDescriptionSet ModelledCalls = {
- {{"CFRetain"}, 1},
- {{"CFRelease"}, 1},
- {{"CFMakeCollectable"}, 1},
- {{"CFAutorelease"}, 1},
+ {CDM::CLibrary, {"CFRetain"}, 1},
+ {CDM::CLibrary, {"CFRelease"}, 1},
+ {CDM::CLibrary, {"CFMakeCollectable"}, 1},
+ {CDM::CLibrary, {"CFAutorelease"}, 1},
};
public:
@@ -555,10 +555,6 @@ public:
void CFRetainReleaseChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- // TODO: Make this check part of CallDescription.
- if (!Call.isGlobalCFunction())
- return;
-
// Check if we called CFRetain/CFRelease/CFMakeCollectable/CFAutorelease.
if (!ModelledCalls.contains(Call))
return;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
index 66e080adb138..4cd2f2802f30 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
@@ -20,48 +20,186 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+
+#include <iterator>
+#include <utility>
+#include <variant>
using namespace clang;
using namespace ento;
namespace {
+
+struct CritSectionMarker {
+ const Expr *LockExpr{};
+ const MemRegion *LockReg{};
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.Add(LockExpr);
+ ID.Add(LockReg);
+ }
+
+ [[nodiscard]] constexpr bool
+ operator==(const CritSectionMarker &Other) const noexcept {
+ return LockExpr == Other.LockExpr && LockReg == Other.LockReg;
+ }
+ [[nodiscard]] constexpr bool
+ operator!=(const CritSectionMarker &Other) const noexcept {
+ return !(*this == Other);
+ }
+};
+
+class CallDescriptionBasedMatcher {
+ CallDescription LockFn;
+ CallDescription UnlockFn;
+
+public:
+ CallDescriptionBasedMatcher(CallDescription &&LockFn,
+ CallDescription &&UnlockFn)
+ : LockFn(std::move(LockFn)), UnlockFn(std::move(UnlockFn)) {}
+ [[nodiscard]] bool matches(const CallEvent &Call, bool IsLock) const {
+ if (IsLock) {
+ return LockFn.matches(Call);
+ }
+ return UnlockFn.matches(Call);
+ }
+};
+
+class FirstArgMutexDescriptor : public CallDescriptionBasedMatcher {
+public:
+ FirstArgMutexDescriptor(CallDescription &&LockFn, CallDescription &&UnlockFn)
+ : CallDescriptionBasedMatcher(std::move(LockFn), std::move(UnlockFn)) {}
+
+ [[nodiscard]] const MemRegion *getRegion(const CallEvent &Call, bool) const {
+ return Call.getArgSVal(0).getAsRegion();
+ }
+};
+
+class MemberMutexDescriptor : public CallDescriptionBasedMatcher {
+public:
+ MemberMutexDescriptor(CallDescription &&LockFn, CallDescription &&UnlockFn)
+ : CallDescriptionBasedMatcher(std::move(LockFn), std::move(UnlockFn)) {}
+
+ [[nodiscard]] const MemRegion *getRegion(const CallEvent &Call, bool) const {
+ return cast<CXXMemberCall>(Call).getCXXThisVal().getAsRegion();
+ }
+};
+
+class RAIIMutexDescriptor {
+ mutable const IdentifierInfo *Guard{};
+ mutable bool IdentifierInfoInitialized{};
+ mutable llvm::SmallString<32> GuardName{};
+
+ void initIdentifierInfo(const CallEvent &Call) const {
+ if (!IdentifierInfoInitialized) {
+ // In case of checking C code, or when the corresponding headers are not
+ // included, we might end up query the identifier table every time when
+ // this function is called instead of early returning it. To avoid this, a
+ // bool variable (IdentifierInfoInitialized) is used and the function will
+ // be run only once.
+ const auto &ASTCtx = Call.getState()->getStateManager().getContext();
+ Guard = &ASTCtx.Idents.get(GuardName);
+ }
+ }
+
+ template <typename T> bool matchesImpl(const CallEvent &Call) const {
+ const T *C = dyn_cast<T>(&Call);
+ if (!C)
+ return false;
+ const IdentifierInfo *II =
+ cast<CXXRecordDecl>(C->getDecl()->getParent())->getIdentifier();
+ return II == Guard;
+ }
+
+public:
+ RAIIMutexDescriptor(StringRef GuardName) : GuardName(GuardName) {}
+ [[nodiscard]] bool matches(const CallEvent &Call, bool IsLock) const {
+ initIdentifierInfo(Call);
+ if (IsLock) {
+ return matchesImpl<CXXConstructorCall>(Call);
+ }
+ return matchesImpl<CXXDestructorCall>(Call);
+ }
+ [[nodiscard]] const MemRegion *getRegion(const CallEvent &Call,
+ bool IsLock) const {
+ const MemRegion *LockRegion = nullptr;
+ if (IsLock) {
+ if (std::optional<SVal> Object = Call.getReturnValueUnderConstruction()) {
+ LockRegion = Object->getAsRegion();
+ }
+ } else {
+ LockRegion = cast<CXXDestructorCall>(Call).getCXXThisVal().getAsRegion();
+ }
+ return LockRegion;
+ }
+};
+
+using MutexDescriptor =
+ std::variant<FirstArgMutexDescriptor, MemberMutexDescriptor,
+ RAIIMutexDescriptor>;
+
class BlockInCriticalSectionChecker : public Checker<check::PostCall> {
- mutable IdentifierInfo *IILockGuard = nullptr;
- mutable IdentifierInfo *IIUniqueLock = nullptr;
- mutable bool IdentifierInfoInitialized = false;
-
- const CallDescription LockFn{{"lock"}};
- const CallDescription UnlockFn{{"unlock"}};
- const CallDescription SleepFn{{"sleep"}};
- const CallDescription GetcFn{{"getc"}};
- const CallDescription FgetsFn{{"fgets"}};
- const CallDescription ReadFn{{"read"}};
- const CallDescription RecvFn{{"recv"}};
- const CallDescription PthreadLockFn{{"pthread_mutex_lock"}};
- const CallDescription PthreadTryLockFn{{"pthread_mutex_trylock"}};
- const CallDescription PthreadUnlockFn{{"pthread_mutex_unlock"}};
- const CallDescription MtxLock{{"mtx_lock"}};
- const CallDescription MtxTimedLock{{"mtx_timedlock"}};
- const CallDescription MtxTryLock{{"mtx_trylock"}};
- const CallDescription MtxUnlock{{"mtx_unlock"}};
-
- const llvm::StringLiteral ClassLockGuard{"lock_guard"};
- const llvm::StringLiteral ClassUniqueLock{"unique_lock"};
+private:
+ const std::array<MutexDescriptor, 8> MutexDescriptors{
+ // NOTE: There are standard library implementations where some methods
+ // of `std::mutex` are inherited from an implementation detail base
+ // class, and those aren't matched by the name specification {"std",
+ // "mutex", "lock"}.
+ // As a workaround here we omit the class name and only require the
+ // presence of the name parts "std" and "lock"/"unlock".
+ // TODO: Ensure that CallDescription understands inherited methods.
+ MemberMutexDescriptor(
+ {/*MatchAs=*/CDM::CXXMethod,
+ /*QualifiedName=*/{"std", /*"mutex",*/ "lock"},
+ /*RequiredArgs=*/0},
+ {CDM::CXXMethod, {"std", /*"mutex",*/ "unlock"}, 0}),
+ FirstArgMutexDescriptor({CDM::CLibrary, {"pthread_mutex_lock"}, 1},
+ {CDM::CLibrary, {"pthread_mutex_unlock"}, 1}),
+ FirstArgMutexDescriptor({CDM::CLibrary, {"mtx_lock"}, 1},
+ {CDM::CLibrary, {"mtx_unlock"}, 1}),
+ FirstArgMutexDescriptor({CDM::CLibrary, {"pthread_mutex_trylock"}, 1},
+ {CDM::CLibrary, {"pthread_mutex_unlock"}, 1}),
+ FirstArgMutexDescriptor({CDM::CLibrary, {"mtx_trylock"}, 1},
+ {CDM::CLibrary, {"mtx_unlock"}, 1}),
+ FirstArgMutexDescriptor({CDM::CLibrary, {"mtx_timedlock"}, 1},
+ {CDM::CLibrary, {"mtx_unlock"}, 1}),
+ RAIIMutexDescriptor("lock_guard"),
+ RAIIMutexDescriptor("unique_lock")};
+
+ const CallDescriptionSet BlockingFunctions{{CDM::CLibrary, {"sleep"}},
+ {CDM::CLibrary, {"getc"}},
+ {CDM::CLibrary, {"fgets"}},
+ {CDM::CLibrary, {"read"}},
+ {CDM::CLibrary, {"recv"}}};
const BugType BlockInCritSectionBugType{
this, "Call to blocking function in critical section", "Blocking Error"};
- void initIdentifierInfo(ASTContext &Ctx) const;
+ void reportBlockInCritSection(const CallEvent &call, CheckerContext &C) const;
- void reportBlockInCritSection(SymbolRef FileDescSym,
- const CallEvent &call,
- CheckerContext &C) const;
+ [[nodiscard]] const NoteTag *createCritSectionNote(CritSectionMarker M,
+ CheckerContext &C) const;
-public:
- bool isBlockingFunction(const CallEvent &Call) const;
- bool isLockFunction(const CallEvent &Call) const;
- bool isUnlockFunction(const CallEvent &Call) const;
+ [[nodiscard]] std::optional<MutexDescriptor>
+ checkDescriptorMatch(const CallEvent &Call, CheckerContext &C,
+ bool IsLock) const;
+
+ void handleLock(const MutexDescriptor &Mutex, const CallEvent &Call,
+ CheckerContext &C) const;
+ void handleUnlock(const MutexDescriptor &Mutex, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ [[nodiscard]] bool isBlockingInCritSection(const CallEvent &Call,
+ CheckerContext &C) const;
+
+public:
/// Process unlock.
/// Process lock.
/// Process blocking functions (sleep, getc, fgets, read, recv)
@@ -70,73 +208,115 @@ public:
} // end anonymous namespace
-REGISTER_TRAIT_WITH_PROGRAMSTATE(MutexCounter, unsigned)
-
-void BlockInCriticalSectionChecker::initIdentifierInfo(ASTContext &Ctx) const {
- if (!IdentifierInfoInitialized) {
- /* In case of checking C code, or when the corresponding headers are not
- * included, we might end up query the identifier table every time when this
- * function is called instead of early returning it. To avoid this, a bool
- * variable (IdentifierInfoInitialized) is used and the function will be run
- * only once. */
- IILockGuard = &Ctx.Idents.get(ClassLockGuard);
- IIUniqueLock = &Ctx.Idents.get(ClassUniqueLock);
- IdentifierInfoInitialized = true;
- }
+REGISTER_LIST_WITH_PROGRAMSTATE(ActiveCritSections, CritSectionMarker)
+
+// Iterator traits for ImmutableList data structure
+// that enable the use of STL algorithms.
+// TODO: Move these to llvm::ImmutableList when overhauling immutable data
+// structures for proper iterator concept support.
+template <>
+struct std::iterator_traits<
+ typename llvm::ImmutableList<CritSectionMarker>::iterator> {
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = CritSectionMarker;
+ using difference_type = std::ptrdiff_t;
+ using reference = CritSectionMarker &;
+ using pointer = CritSectionMarker *;
+};
+
+std::optional<MutexDescriptor>
+BlockInCriticalSectionChecker::checkDescriptorMatch(const CallEvent &Call,
+ CheckerContext &C,
+ bool IsLock) const {
+ const auto Descriptor =
+ llvm::find_if(MutexDescriptors, [&Call, IsLock](auto &&Descriptor) {
+ return std::visit(
+ [&Call, IsLock](auto &&DescriptorImpl) {
+ return DescriptorImpl.matches(Call, IsLock);
+ },
+ Descriptor);
+ });
+ if (Descriptor != MutexDescriptors.end())
+ return *Descriptor;
+ return std::nullopt;
}
-bool BlockInCriticalSectionChecker::isBlockingFunction(const CallEvent &Call) const {
- return matchesAny(Call, SleepFn, GetcFn, FgetsFn, ReadFn, RecvFn);
+static const MemRegion *getRegion(const CallEvent &Call,
+ const MutexDescriptor &Descriptor,
+ bool IsLock) {
+ return std::visit(
+ [&Call, IsLock](auto &&Descriptor) {
+ return Descriptor.getRegion(Call, IsLock);
+ },
+ Descriptor);
}
-bool BlockInCriticalSectionChecker::isLockFunction(const CallEvent &Call) const {
- if (const auto *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
- auto IdentifierInfo = Ctor->getDecl()->getParent()->getIdentifier();
- if (IdentifierInfo == IILockGuard || IdentifierInfo == IIUniqueLock)
- return true;
- }
+void BlockInCriticalSectionChecker::handleLock(
+ const MutexDescriptor &LockDescriptor, const CallEvent &Call,
+ CheckerContext &C) const {
+ const MemRegion *MutexRegion =
+ getRegion(Call, LockDescriptor, /*IsLock=*/true);
+ if (!MutexRegion)
+ return;
- return matchesAny(Call, LockFn, PthreadLockFn, PthreadTryLockFn, MtxLock,
- MtxTimedLock, MtxTryLock);
+ const CritSectionMarker MarkToAdd{Call.getOriginExpr(), MutexRegion};
+ ProgramStateRef StateWithLockEvent =
+ C.getState()->add<ActiveCritSections>(MarkToAdd);
+ C.addTransition(StateWithLockEvent, createCritSectionNote(MarkToAdd, C));
}
-bool BlockInCriticalSectionChecker::isUnlockFunction(const CallEvent &Call) const {
- if (const auto *Dtor = dyn_cast<CXXDestructorCall>(&Call)) {
- const auto *DRecordDecl = cast<CXXRecordDecl>(Dtor->getDecl()->getParent());
- auto IdentifierInfo = DRecordDecl->getIdentifier();
- if (IdentifierInfo == IILockGuard || IdentifierInfo == IIUniqueLock)
- return true;
+void BlockInCriticalSectionChecker::handleUnlock(
+ const MutexDescriptor &UnlockDescriptor, const CallEvent &Call,
+ CheckerContext &C) const {
+ const MemRegion *MutexRegion =
+ getRegion(Call, UnlockDescriptor, /*IsLock=*/false);
+ if (!MutexRegion)
+ return;
+
+ ProgramStateRef State = C.getState();
+ const auto ActiveSections = State->get<ActiveCritSections>();
+ const auto MostRecentLock =
+ llvm::find_if(ActiveSections, [MutexRegion](auto &&Marker) {
+ return Marker.LockReg == MutexRegion;
+ });
+ if (MostRecentLock == ActiveSections.end())
+ return;
+
+ // Build a new ImmutableList without this element.
+ auto &Factory = State->get_context<ActiveCritSections>();
+ llvm::ImmutableList<CritSectionMarker> NewList = Factory.getEmptyList();
+ for (auto It = ActiveSections.begin(), End = ActiveSections.end(); It != End;
+ ++It) {
+ if (It != MostRecentLock)
+ NewList = Factory.add(*It, NewList);
}
- return matchesAny(Call, UnlockFn, PthreadUnlockFn, MtxUnlock);
+ State = State->set<ActiveCritSections>(NewList);
+ C.addTransition(State);
+}
+
+bool BlockInCriticalSectionChecker::isBlockingInCritSection(
+ const CallEvent &Call, CheckerContext &C) const {
+ return BlockingFunctions.contains(Call) &&
+ !C.getState()->get<ActiveCritSections>().isEmpty();
}
void BlockInCriticalSectionChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
- initIdentifierInfo(C.getASTContext());
-
- if (!isBlockingFunction(Call)
- && !isLockFunction(Call)
- && !isUnlockFunction(Call))
- return;
-
- ProgramStateRef State = C.getState();
- unsigned mutexCount = State->get<MutexCounter>();
- if (isUnlockFunction(Call) && mutexCount > 0) {
- State = State->set<MutexCounter>(--mutexCount);
- C.addTransition(State);
- } else if (isLockFunction(Call)) {
- State = State->set<MutexCounter>(++mutexCount);
- C.addTransition(State);
- } else if (mutexCount > 0) {
- SymbolRef BlockDesc = Call.getReturnValue().getAsSymbol();
- reportBlockInCritSection(BlockDesc, Call, C);
+ if (isBlockingInCritSection(Call, C)) {
+ reportBlockInCritSection(Call, C);
+ } else if (std::optional<MutexDescriptor> LockDesc =
+ checkDescriptorMatch(Call, C, /*IsLock=*/true)) {
+ handleLock(*LockDesc, Call, C);
+ } else if (std::optional<MutexDescriptor> UnlockDesc =
+ checkDescriptorMatch(Call, C, /*IsLock=*/false)) {
+ handleUnlock(*UnlockDesc, Call, C);
}
}
void BlockInCriticalSectionChecker::reportBlockInCritSection(
- SymbolRef BlockDescSym, const CallEvent &Call, CheckerContext &C) const {
- ExplodedNode *ErrNode = C.generateNonFatalErrorNode();
+ const CallEvent &Call, CheckerContext &C) const {
+ ExplodedNode *ErrNode = C.generateNonFatalErrorNode(C.getState());
if (!ErrNode)
return;
@@ -147,14 +327,63 @@ void BlockInCriticalSectionChecker::reportBlockInCritSection(
auto R = std::make_unique<PathSensitiveBugReport>(BlockInCritSectionBugType,
os.str(), ErrNode);
R->addRange(Call.getSourceRange());
- R->markInteresting(BlockDescSym);
+ R->markInteresting(Call.getReturnValue());
C.emitReport(std::move(R));
}
+const NoteTag *
+BlockInCriticalSectionChecker::createCritSectionNote(CritSectionMarker M,
+ CheckerContext &C) const {
+ const BugType *BT = &this->BlockInCritSectionBugType;
+ return C.getNoteTag([M, BT](PathSensitiveBugReport &BR,
+ llvm::raw_ostream &OS) {
+ if (&BR.getBugType() != BT)
+ return;
+
+ // Get the lock events for the mutex of the current line's lock event.
+ const auto CritSectionBegins =
+ BR.getErrorNode()->getState()->get<ActiveCritSections>();
+ llvm::SmallVector<CritSectionMarker, 4> LocksForMutex;
+ llvm::copy_if(
+ CritSectionBegins, std::back_inserter(LocksForMutex),
+ [M](const auto &Marker) { return Marker.LockReg == M.LockReg; });
+ if (LocksForMutex.empty())
+ return;
+
+ // As the ImmutableList builds the locks by prepending them, we
+ // reverse the list to get the correct order.
+ std::reverse(LocksForMutex.begin(), LocksForMutex.end());
+
+ // Find the index of the lock expression in the list of all locks for a
+ // given mutex (in acquisition order).
+ const auto Position =
+ llvm::find_if(std::as_const(LocksForMutex), [M](const auto &Marker) {
+ return Marker.LockExpr == M.LockExpr;
+ });
+ if (Position == LocksForMutex.end())
+ return;
+
+ // If there is only one lock event, we don't need to specify how many times
+ // the critical section was entered.
+ if (LocksForMutex.size() == 1) {
+ OS << "Entering critical section here";
+ return;
+ }
+
+ const auto IndexOfLock =
+ std::distance(std::as_const(LocksForMutex).begin(), Position);
+
+ const auto OrdinalOfLock = IndexOfLock + 1;
+ OS << "Entering critical section for the " << OrdinalOfLock
+ << llvm::getOrdinalSuffix(OrdinalOfLock) << " time here";
+ });
+}
+
void ento::registerBlockInCriticalSectionChecker(CheckerManager &mgr) {
mgr.registerChecker<BlockInCriticalSectionChecker>();
}
-bool ento::shouldRegisterBlockInCriticalSectionChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterBlockInCriticalSectionChecker(
+ const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index a09db6d2d0ec..837cbbce8f45 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -23,19 +23,19 @@ using namespace clang;
using namespace ento;
namespace {
- class BoolAssignmentChecker : public Checker< check::Bind > {
- const BugType BT{this, "Assignment of a non-Boolean value"};
- void emitReport(ProgramStateRef state, CheckerContext &C,
- bool IsTainted = false) const;
-
- public:
- void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
- };
+class BoolAssignmentChecker : public Checker<check::Bind> {
+ const BugType BT{this, "Assignment of a non-Boolean value"};
+ void emitReport(ProgramStateRef State, CheckerContext &C,
+ bool IsTainted = false) const;
+
+public:
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
+};
} // end anonymous namespace
-void BoolAssignmentChecker::emitReport(ProgramStateRef state, CheckerContext &C,
+void BoolAssignmentChecker::emitReport(ProgramStateRef State, CheckerContext &C,
bool IsTainted) const {
- if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
StringRef Msg = IsTainted ? "Might assign a tainted non-Boolean value"
: "Assignment of a non-Boolean value";
C.emitReport(std::make_unique<PathSensitiveBugReport>(BT, Msg, N));
@@ -47,59 +47,58 @@ static bool isBooleanType(QualType Ty) {
return true;
if (const TypedefType *TT = Ty->getAs<TypedefType>())
- return TT->getDecl()->getName() == "BOOL" || // Objective-C
- TT->getDecl()->getName() == "_Bool" || // stdbool.h < C99
- TT->getDecl()->getName() == "Boolean"; // MacTypes.h
+ return TT->getDecl()->getName() == "BOOL" || // Objective-C
+ TT->getDecl()->getName() == "_Bool" || // stdbool.h < C99
+ TT->getDecl()->getName() == "Boolean"; // MacTypes.h
return false;
}
-void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+void BoolAssignmentChecker::checkBind(SVal Loc, SVal Val, const Stmt *S,
CheckerContext &C) const {
// We are only interested in stores into Booleans.
const TypedValueRegion *TR =
- dyn_cast_or_null<TypedValueRegion>(loc.getAsRegion());
+ dyn_cast_or_null<TypedValueRegion>(Loc.getAsRegion());
if (!TR)
return;
- QualType valTy = TR->getValueType();
+ QualType RegTy = TR->getValueType();
- if (!isBooleanType(valTy))
+ if (!isBooleanType(RegTy))
return;
// Get the value of the right-hand side. We only care about values
// that are defined (UnknownVals and UndefinedVals are handled by other
// checkers).
- std::optional<NonLoc> NV = val.getAs<NonLoc>();
+ std::optional<NonLoc> NV = Val.getAs<NonLoc>();
if (!NV)
return;
// Check if the assigned value meets our criteria for correctness. It must
// be a value that is either 0 or 1. One way to check this is to see if
// the value is possibly < 0 (for a negative value) or greater than 1.
- ProgramStateRef state = C.getState();
- SValBuilder &svalBuilder = C.getSValBuilder();
- BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
+ ProgramStateRef State = C.getState();
+ BasicValueFactory &BVF = C.getSValBuilder().getBasicValueFactory();
ConstraintManager &CM = C.getConstraintManager();
- llvm::APSInt Zero = BVF.getValue(0, valTy);
- llvm::APSInt One = BVF.getValue(1, valTy);
+ llvm::APSInt Zero = BVF.getValue(0, RegTy);
+ llvm::APSInt One = BVF.getValue(1, RegTy);
ProgramStateRef StIn, StOut;
- std::tie(StIn, StOut) = CM.assumeInclusiveRangeDual(state, *NV, Zero, One);
+ std::tie(StIn, StOut) = CM.assumeInclusiveRangeDual(State, *NV, Zero, One);
if (!StIn)
emitReport(StOut, C);
- if (StIn && StOut && taint::isTainted(state, *NV))
+ if (StIn && StOut && taint::isTainted(State, *NV))
emitReport(StOut, C, /*IsTainted=*/true);
}
-void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
- mgr.registerChecker<BoolAssignmentChecker>();
+void ento::registerBoolAssignmentChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<BoolAssignmentChecker>();
}
-bool ento::shouldRegisterBoolAssignmentChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterBoolAssignmentChecker(const CheckerManager &Mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 61521c259ca9..b198b1c2ff4d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -6,7 +6,11 @@
//
//===----------------------------------------------------------------------===//
//
-// This checker evaluates clang builtin functions.
+// This checker evaluates "standalone" clang builtin functions that are not
+// just special-cased variants of well-known non-builtin functions.
+// Builtin functions like __builtin_memcpy and __builtin_alloca should be
+// evaluated by the same checker that handles their non-builtin variant to
+// ensure that the two variants are handled consistently.
//
//===----------------------------------------------------------------------===//
@@ -14,6 +18,7 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
@@ -26,8 +31,40 @@ namespace {
class BuiltinFunctionChecker : public Checker<eval::Call> {
public:
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+
+private:
+ // From: clang/include/clang/Basic/Builtins.def
+ // C++ standard library builtins in namespace 'std'.
+ const CallDescriptionSet BuiltinLikeStdFunctions{
+ {CDM::SimpleFunc, {"std", "addressof"}}, //
+ {CDM::SimpleFunc, {"std", "__addressof"}}, //
+ {CDM::SimpleFunc, {"std", "as_const"}}, //
+ {CDM::SimpleFunc, {"std", "forward"}}, //
+ {CDM::SimpleFunc, {"std", "forward_like"}}, //
+ {CDM::SimpleFunc, {"std", "move"}}, //
+ {CDM::SimpleFunc, {"std", "move_if_noexcept"}}, //
+ };
+
+ bool isBuiltinLikeFunction(const CallEvent &Call) const;
};
+} // namespace
+
+bool BuiltinFunctionChecker::isBuiltinLikeFunction(
+ const CallEvent &Call) const {
+ const auto *FD = llvm::dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ if (!FD || FD->getNumParams() != 1)
+ return false;
+
+ if (QualType RetTy = FD->getReturnType();
+ !RetTy->isPointerType() && !RetTy->isReferenceType())
+ return false;
+
+ if (QualType ParmTy = FD->getParamDecl(0)->getType();
+ !ParmTy->isPointerType() && !ParmTy->isReferenceType())
+ return false;
+
+ return BuiltinLikeStdFunctions.contains(Call);
}
bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
@@ -40,11 +77,17 @@ bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
const LocationContext *LCtx = C.getLocationContext();
const Expr *CE = Call.getOriginExpr();
+ if (isBuiltinLikeFunction(Call)) {
+ C.addTransition(state->BindExpr(CE, LCtx, Call.getArgSVal(0)));
+ return true;
+ }
+
switch (FD->getBuiltinID()) {
default:
return false;
- case Builtin::BI__builtin_assume: {
+ case Builtin::BI__builtin_assume:
+ case Builtin::BI__assume: {
assert (Call.getNumArgs() > 0);
SVal Arg = Call.getArgSVal(0);
if (Arg.isUndef())
@@ -79,25 +122,6 @@ bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
return true;
}
- case Builtin::BI__builtin_alloca_with_align:
- case Builtin::BI__builtin_alloca: {
- SValBuilder &SVB = C.getSValBuilder();
- const loc::MemRegionVal R =
- SVB.getAllocaRegionVal(CE, C.getLocationContext(), C.blockCount());
-
- // Set the extent of the region in bytes. This enables us to use the SVal
- // of the argument directly. If we saved the extent in bits, it'd be more
- // difficult to reason about values like symbol*8.
- auto Size = Call.getArgSVal(0);
- if (auto DefSize = Size.getAs<DefinedOrUnknownSVal>()) {
- // This `getAs()` is mostly paranoia, because core.CallAndMessage reports
- // undefined function arguments (unless it's disabled somehow).
- state = setDynamicExtent(state, R.getRegion(), *DefSize, SVB);
- }
- C.addTransition(state->BindExpr(CE, LCtx, R));
- return true;
- }
-
case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size:
case Builtin::BI__builtin_constant_p: {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index b7b64c3da4f6..8dd08f14b272 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "InterCheckerAPI.h"
+#include "clang/AST/OperationKinds.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
@@ -22,10 +23,13 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include <functional>
#include <optional>
@@ -124,53 +128,73 @@ public:
const CallEvent &)>;
CallDescriptionMap<FnCheck> Callbacks = {
- {{CDF_MaybeBuiltin, {"memcpy"}, 3},
+ {{CDM::CLibraryMaybeHardened, {"memcpy"}, 3},
std::bind(&CStringChecker::evalMemcpy, _1, _2, _3, CK_Regular)},
- {{CDF_MaybeBuiltin, {"wmemcpy"}, 3},
+ {{CDM::CLibraryMaybeHardened, {"wmemcpy"}, 3},
std::bind(&CStringChecker::evalMemcpy, _1, _2, _3, CK_Wide)},
- {{CDF_MaybeBuiltin, {"mempcpy"}, 3},
+ {{CDM::CLibraryMaybeHardened, {"mempcpy"}, 3},
std::bind(&CStringChecker::evalMempcpy, _1, _2, _3, CK_Regular)},
- {{CDF_None, {"wmempcpy"}, 3},
+ {{CDM::CLibraryMaybeHardened, {"wmempcpy"}, 3},
std::bind(&CStringChecker::evalMempcpy, _1, _2, _3, CK_Wide)},
- {{CDF_MaybeBuiltin, {"memcmp"}, 3},
+ {{CDM::CLibrary, {"memcmp"}, 3},
std::bind(&CStringChecker::evalMemcmp, _1, _2, _3, CK_Regular)},
- {{CDF_MaybeBuiltin, {"wmemcmp"}, 3},
+ {{CDM::CLibrary, {"wmemcmp"}, 3},
std::bind(&CStringChecker::evalMemcmp, _1, _2, _3, CK_Wide)},
- {{CDF_MaybeBuiltin, {"memmove"}, 3},
+ {{CDM::CLibraryMaybeHardened, {"memmove"}, 3},
std::bind(&CStringChecker::evalMemmove, _1, _2, _3, CK_Regular)},
- {{CDF_MaybeBuiltin, {"wmemmove"}, 3},
+ {{CDM::CLibraryMaybeHardened, {"wmemmove"}, 3},
std::bind(&CStringChecker::evalMemmove, _1, _2, _3, CK_Wide)},
- {{CDF_MaybeBuiltin, {"memset"}, 3}, &CStringChecker::evalMemset},
- {{CDF_MaybeBuiltin, {"explicit_memset"}, 3}, &CStringChecker::evalMemset},
- {{CDF_MaybeBuiltin, {"strcpy"}, 2}, &CStringChecker::evalStrcpy},
- {{CDF_MaybeBuiltin, {"strncpy"}, 3}, &CStringChecker::evalStrncpy},
- {{CDF_MaybeBuiltin, {"stpcpy"}, 2}, &CStringChecker::evalStpcpy},
- {{CDF_MaybeBuiltin, {"strlcpy"}, 3}, &CStringChecker::evalStrlcpy},
- {{CDF_MaybeBuiltin, {"strcat"}, 2}, &CStringChecker::evalStrcat},
- {{CDF_MaybeBuiltin, {"strncat"}, 3}, &CStringChecker::evalStrncat},
- {{CDF_MaybeBuiltin, {"strlcat"}, 3}, &CStringChecker::evalStrlcat},
- {{CDF_MaybeBuiltin, {"strlen"}, 1}, &CStringChecker::evalstrLength},
- {{CDF_MaybeBuiltin, {"wcslen"}, 1}, &CStringChecker::evalstrLength},
- {{CDF_MaybeBuiltin, {"strnlen"}, 2}, &CStringChecker::evalstrnLength},
- {{CDF_MaybeBuiltin, {"wcsnlen"}, 2}, &CStringChecker::evalstrnLength},
- {{CDF_MaybeBuiltin, {"strcmp"}, 2}, &CStringChecker::evalStrcmp},
- {{CDF_MaybeBuiltin, {"strncmp"}, 3}, &CStringChecker::evalStrncmp},
- {{CDF_MaybeBuiltin, {"strcasecmp"}, 2}, &CStringChecker::evalStrcasecmp},
- {{CDF_MaybeBuiltin, {"strncasecmp"}, 3},
- &CStringChecker::evalStrncasecmp},
- {{CDF_MaybeBuiltin, {"strsep"}, 2}, &CStringChecker::evalStrsep},
- {{CDF_MaybeBuiltin, {"bcopy"}, 3}, &CStringChecker::evalBcopy},
- {{CDF_MaybeBuiltin, {"bcmp"}, 3},
+ {{CDM::CLibraryMaybeHardened, {"memset"}, 3},
+ &CStringChecker::evalMemset},
+ {{CDM::CLibrary, {"explicit_memset"}, 3}, &CStringChecker::evalMemset},
+ // FIXME: C23 introduces 'memset_explicit', maybe also model that
+ {{CDM::CLibraryMaybeHardened, {"strcpy"}, 2},
+ &CStringChecker::evalStrcpy},
+ {{CDM::CLibraryMaybeHardened, {"strncpy"}, 3},
+ &CStringChecker::evalStrncpy},
+ {{CDM::CLibraryMaybeHardened, {"stpcpy"}, 2},
+ &CStringChecker::evalStpcpy},
+ {{CDM::CLibraryMaybeHardened, {"strlcpy"}, 3},
+ &CStringChecker::evalStrlcpy},
+ {{CDM::CLibraryMaybeHardened, {"strcat"}, 2},
+ &CStringChecker::evalStrcat},
+ {{CDM::CLibraryMaybeHardened, {"strncat"}, 3},
+ &CStringChecker::evalStrncat},
+ {{CDM::CLibraryMaybeHardened, {"strlcat"}, 3},
+ &CStringChecker::evalStrlcat},
+ {{CDM::CLibraryMaybeHardened, {"strlen"}, 1},
+ &CStringChecker::evalstrLength},
+ {{CDM::CLibrary, {"wcslen"}, 1}, &CStringChecker::evalstrLength},
+ {{CDM::CLibraryMaybeHardened, {"strnlen"}, 2},
+ &CStringChecker::evalstrnLength},
+ {{CDM::CLibrary, {"wcsnlen"}, 2}, &CStringChecker::evalstrnLength},
+ {{CDM::CLibrary, {"strcmp"}, 2}, &CStringChecker::evalStrcmp},
+ {{CDM::CLibrary, {"strncmp"}, 3}, &CStringChecker::evalStrncmp},
+ {{CDM::CLibrary, {"strcasecmp"}, 2}, &CStringChecker::evalStrcasecmp},
+ {{CDM::CLibrary, {"strncasecmp"}, 3}, &CStringChecker::evalStrncasecmp},
+ {{CDM::CLibrary, {"strsep"}, 2}, &CStringChecker::evalStrsep},
+ {{CDM::CLibrary, {"bcopy"}, 3}, &CStringChecker::evalBcopy},
+ {{CDM::CLibrary, {"bcmp"}, 3},
std::bind(&CStringChecker::evalMemcmp, _1, _2, _3, CK_Regular)},
- {{CDF_MaybeBuiltin, {"bzero"}, 2}, &CStringChecker::evalBzero},
- {{CDF_MaybeBuiltin, {"explicit_bzero"}, 2}, &CStringChecker::evalBzero},
- {{CDF_MaybeBuiltin, {"sprintf"}, 2}, &CStringChecker::evalSprintf},
- {{CDF_MaybeBuiltin, {"snprintf"}, 2}, &CStringChecker::evalSnprintf},
+ {{CDM::CLibrary, {"bzero"}, 2}, &CStringChecker::evalBzero},
+ {{CDM::CLibraryMaybeHardened, {"explicit_bzero"}, 2},
+ &CStringChecker::evalBzero},
+
+ // When recognizing calls to the following variadic functions, we accept
+ // any number of arguments in the call (std::nullopt = accept any
+ // number), but check that in the declaration there are 2 and 3
+ // parameters respectively. (Note that the parameter count does not
+ // include the "...". Calls where the number of arguments is too small
+ // will be discarded by the callback.)
+ {{CDM::CLibraryMaybeHardened, {"sprintf"}, std::nullopt, 2},
+ &CStringChecker::evalSprintf},
+ {{CDM::CLibraryMaybeHardened, {"snprintf"}, std::nullopt, 3},
+ &CStringChecker::evalSnprintf},
};
// These require a bit of special handling.
- CallDescription StdCopy{{"std", "copy"}, 3},
- StdCopyBackward{{"std", "copy_backward"}, 3};
+ CallDescription StdCopy{CDM::SimpleFunc, {"std", "copy"}, 3},
+ StdCopyBackward{CDM::SimpleFunc, {"std", "copy_backward"}, 3};
FnCheck identifyCall(const CallEvent &Call, CheckerContext &C) const;
void evalMemcpy(CheckerContext &C, const CallEvent &Call, CharKind CK) const;
@@ -219,7 +243,7 @@ public:
void evalSprintf(CheckerContext &C, const CallEvent &Call) const;
void evalSnprintf(CheckerContext &C, const CallEvent &Call) const;
void evalSprintfCommon(CheckerContext &C, const CallEvent &Call,
- bool IsBounded, bool IsBuiltin) const;
+ bool IsBounded) const;
// Utility methods
std::pair<ProgramStateRef , ProgramStateRef >
@@ -284,6 +308,10 @@ public:
// Re-usable checks
ProgramStateRef checkNonNull(CheckerContext &C, ProgramStateRef State,
AnyArgExpr Arg, SVal l) const;
+ // Check whether the origin region behind \p Element (like the actual array
+ // region \p Element is from) is initialized.
+ ProgramStateRef checkInit(CheckerContext &C, ProgramStateRef state,
+ AnyArgExpr Buffer, SVal Element, SVal Size) const;
ProgramStateRef CheckLocation(CheckerContext &C, ProgramStateRef state,
AnyArgExpr Buffer, SVal Element,
AccessKind Access,
@@ -309,7 +337,7 @@ public:
const Stmt *S, StringRef WarningMsg) const;
void emitAdditionOverflowBug(CheckerContext &C, ProgramStateRef State) const;
void emitUninitializedReadBug(CheckerContext &C, ProgramStateRef State,
- const Expr *E) const;
+ const Expr *E, StringRef Msg) const;
ProgramStateRef checkAdditionOverflow(CheckerContext &C,
ProgramStateRef state,
NonLoc left,
@@ -331,16 +359,16 @@ REGISTER_MAP_WITH_PROGRAMSTATE(CStringLength, const MemRegion *, SVal)
// Individual checks and utility methods.
//===----------------------------------------------------------------------===//
-std::pair<ProgramStateRef , ProgramStateRef >
-CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef state, SVal V,
+std::pair<ProgramStateRef, ProgramStateRef>
+CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef State, SVal V,
QualType Ty) {
std::optional<DefinedSVal> val = V.getAs<DefinedSVal>();
if (!val)
- return std::pair<ProgramStateRef , ProgramStateRef >(state, state);
+ return std::pair<ProgramStateRef, ProgramStateRef>(State, State);
SValBuilder &svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal zero = svalBuilder.makeZeroVal(Ty);
- return state->assume(svalBuilder.evalEQ(state, *val, zero));
+ return State->assume(svalBuilder.evalEQ(State, *val, zero));
}
ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
@@ -373,6 +401,149 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
return stateNonNull;
}
+static std::optional<NonLoc> getIndex(ProgramStateRef State,
+ const ElementRegion *ER, CharKind CK) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ ASTContext &Ctx = SVB.getContext();
+
+ if (CK == CharKind::Regular) {
+ if (ER->getValueType() != Ctx.CharTy)
+ return {};
+ return ER->getIndex();
+ }
+
+ if (ER->getValueType() != Ctx.WideCharTy)
+ return {};
+
+ QualType SizeTy = Ctx.getSizeType();
+ NonLoc WideSize =
+ SVB.makeIntVal(Ctx.getTypeSizeInChars(Ctx.WideCharTy).getQuantity(),
+ SizeTy)
+ .castAs<NonLoc>();
+ SVal Offset =
+ SVB.evalBinOpNN(State, BO_Mul, ER->getIndex(), WideSize, SizeTy);
+ if (Offset.isUnknown())
+ return {};
+ return Offset.castAs<NonLoc>();
+}
+
+// Basically 1 -> 1st, 12 -> 12th, etc.
+static void printIdxWithOrdinalSuffix(llvm::raw_ostream &Os, unsigned Idx) {
+ Os << Idx << llvm::getOrdinalSuffix(Idx);
+}
+
+ProgramStateRef CStringChecker::checkInit(CheckerContext &C,
+ ProgramStateRef State,
+ AnyArgExpr Buffer, SVal Element,
+ SVal Size) const {
+
+ // If a previous check has failed, propagate the failure.
+ if (!State)
+ return nullptr;
+
+ const MemRegion *R = Element.getAsRegion();
+ const auto *ER = dyn_cast_or_null<ElementRegion>(R);
+ if (!ER)
+ return State;
+
+ const auto *SuperR = ER->getSuperRegion()->getAs<TypedValueRegion>();
+ if (!SuperR)
+ return State;
+
+ // FIXME: We ought to able to check objects as well. Maybe
+ // UninitializedObjectChecker could help?
+ if (!SuperR->getValueType()->isArrayType())
+ return State;
+
+ SValBuilder &SVB = C.getSValBuilder();
+ ASTContext &Ctx = SVB.getContext();
+
+ const QualType ElemTy = Ctx.getBaseElementType(SuperR->getValueType());
+ const NonLoc Zero = SVB.makeZeroArrayIndex();
+
+ std::optional<Loc> FirstElementVal =
+ State->getLValue(ElemTy, Zero, loc::MemRegionVal(SuperR)).getAs<Loc>();
+ if (!FirstElementVal)
+ return State;
+
+ // Ensure that we wouldn't read uninitialized value.
+ if (Filter.CheckCStringUninitializedRead &&
+ State->getSVal(*FirstElementVal).isUndef()) {
+ llvm::SmallString<258> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "The first element of the ";
+ printIdxWithOrdinalSuffix(OS, Buffer.ArgumentIndex + 1);
+ OS << " argument is undefined";
+ emitUninitializedReadBug(C, State, Buffer.Expression, OS.str());
+ return nullptr;
+ }
+
+ // We won't check whether the entire region is fully initialized -- lets just
+ // check that the first and the last element is. So, onto checking the last
+ // element:
+ const QualType IdxTy = SVB.getArrayIndexType();
+
+ NonLoc ElemSize =
+ SVB.makeIntVal(Ctx.getTypeSizeInChars(ElemTy).getQuantity(), IdxTy)
+ .castAs<NonLoc>();
+
+ // FIXME: Check that the size arg to the cstring function is divisible by
+ // size of the actual element type?
+
+ // The type of the argument to the cstring function is either char or wchar,
+ // but thats not the type of the original array (or memory region).
+ // Suppose the following:
+ // int t[5];
+ // memcpy(dst, t, sizeof(t) / sizeof(t[0]));
+ // When checking whether t is fully initialized, we see it as char array of
+ // size sizeof(int)*5. If we check the last element as a character, we read
+ // the last byte of an integer, which will be undefined. But just because
+ // that value is undefined, it doesn't mean that the element is uninitialized!
+ // For this reason, we need to retrieve the actual last element with the
+ // correct type.
+
+ // Divide the size argument to the cstring function by the actual element
+ // type. This value will be size of the array, or the index to the
+ // past-the-end element.
+ std::optional<NonLoc> Offset =
+ SVB.evalBinOpNN(State, clang::BO_Div, Size.castAs<NonLoc>(), ElemSize,
+ IdxTy)
+ .getAs<NonLoc>();
+
+ // Retrieve the index of the last element.
+ const NonLoc One = SVB.makeIntVal(1, IdxTy).castAs<NonLoc>();
+ SVal LastIdx = SVB.evalBinOpNN(State, BO_Sub, *Offset, One, IdxTy);
+
+ if (!Offset)
+ return State;
+
+ SVal LastElementVal =
+ State->getLValue(ElemTy, LastIdx, loc::MemRegionVal(SuperR));
+ if (!isa<Loc>(LastElementVal))
+ return State;
+
+ if (Filter.CheckCStringUninitializedRead &&
+ State->getSVal(LastElementVal.castAs<Loc>()).isUndef()) {
+ const llvm::APSInt *IdxInt = LastIdx.getAsInteger();
+ // If we can't get emit a sensible last element index, just bail out --
+ // prefer to emit nothing in favour of emitting garbage quality reports.
+ if (!IdxInt) {
+ C.addSink();
+ return nullptr;
+ }
+ llvm::SmallString<258> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "The last accessed element (at index ";
+ OS << IdxInt->getExtValue();
+ OS << ") in the ";
+ printIdxWithOrdinalSuffix(OS, Buffer.ArgumentIndex + 1);
+ OS << " argument is undefined";
+ emitUninitializedReadBug(C, State, Buffer.Expression, OS.str());
+ return nullptr;
+ }
+ return State;
+}
+
// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor?
ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
ProgramStateRef state,
@@ -393,38 +564,17 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
if (!ER)
return state;
- SValBuilder &svalBuilder = C.getSValBuilder();
- ASTContext &Ctx = svalBuilder.getContext();
-
// Get the index of the accessed element.
- NonLoc Idx = ER->getIndex();
-
- if (CK == CharKind::Regular) {
- if (ER->getValueType() != Ctx.CharTy)
- return state;
- } else {
- if (ER->getValueType() != Ctx.WideCharTy)
- return state;
-
- QualType SizeTy = Ctx.getSizeType();
- NonLoc WideSize =
- svalBuilder
- .makeIntVal(Ctx.getTypeSizeInChars(Ctx.WideCharTy).getQuantity(),
- SizeTy)
- .castAs<NonLoc>();
- SVal Offset = svalBuilder.evalBinOpNN(state, BO_Mul, Idx, WideSize, SizeTy);
- if (Offset.isUnknown())
- return state;
- Idx = Offset.castAs<NonLoc>();
- }
+ std::optional<NonLoc> Idx = getIndex(state, ER, CK);
+ if (!Idx)
+ return state;
// Get the size of the array.
const auto *superReg = cast<SubRegion>(ER->getSuperRegion());
DefinedOrUnknownSVal Size =
getDynamicExtent(state, superReg, C.getSValBuilder());
- ProgramStateRef StInBound, StOutBound;
- std::tie(StInBound, StOutBound) = state->assumeInBoundDual(Idx, Size);
+ auto [StInBound, StOutBound] = state->assumeInBoundDual(*Idx, Size);
if (StOutBound && !StInBound) {
// These checks are either enabled by the CString out-of-bounds checker
// explicitly or implicitly by the Malloc checker.
@@ -439,15 +589,6 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
return nullptr;
}
- // Ensure that we wouldn't read uninitialized value.
- if (Access == AccessKind::read) {
- if (Filter.CheckCStringUninitializedRead &&
- StInBound->getSVal(ER).isUndef()) {
- emitUninitializedReadBug(C, StInBound, Buffer.Expression);
- return nullptr;
- }
- }
-
// Array bound check succeeded. From this point forward the array bound
// should always succeed.
return StInBound;
@@ -482,6 +623,7 @@ CStringChecker::CheckBufferAccess(CheckerContext &C, ProgramStateRef State,
// Check if the first byte of the buffer is accessible.
State = CheckLocation(C, State, Buffer, BufStart, Access, CK);
+
if (!State)
return nullptr;
@@ -506,6 +648,8 @@ CStringChecker::CheckBufferAccess(CheckerContext &C, ProgramStateRef State,
SVal BufEnd =
svalBuilder.evalBinOpLN(State, BO_Add, *BufLoc, LastOffset, PtrTy);
State = CheckLocation(C, State, Buffer, BufEnd, Access, CK);
+ if (Access == AccessKind::read)
+ State = checkInit(C, State, Buffer, BufEnd, *Length);
// If the buffer isn't large enough, abort.
if (!State)
@@ -674,16 +818,17 @@ void CStringChecker::emitNullArgBug(CheckerContext &C, ProgramStateRef State,
void CStringChecker::emitUninitializedReadBug(CheckerContext &C,
ProgramStateRef State,
- const Expr *E) const {
+ const Expr *E,
+ StringRef Msg) const {
if (ExplodedNode *N = C.generateErrorNode(State)) {
- const char *Msg =
- "Bytes string function accesses uninitialized/garbage values";
if (!BT_UninitRead)
BT_UninitRead.reset(new BugType(Filter.CheckNameCStringUninitializedRead,
"Accessing unitialized/garbage values"));
auto Report =
std::make_unique<PathSensitiveBugReport>(*BT_UninitRead, Msg, N);
+ Report->addNote("Other elements might also be undefined",
+ Report->getLocation());
Report->addRange(E->getSourceRange());
bugreporter::trackExpressionValue(N, E, *Report);
C.emitReport(std::move(Report));
@@ -1318,6 +1463,9 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, const CallEvent &Call,
// If the size can be nonzero, we have to check the other arguments.
if (stateNonZeroSize) {
+ // TODO: If Size is tainted and we cannot prove that it is smaller or equal
+ // to the size of the destination buffer, then emit a warning
+ // that an attacker may provoke a buffer overflow error.
state = stateNonZeroSize;
// Ensure the destination is not null. If it is NULL there will be a
@@ -2468,27 +2616,26 @@ void CStringChecker::evalBzero(CheckerContext &C, const CallEvent &Call) const {
void CStringChecker::evalSprintf(CheckerContext &C,
const CallEvent &Call) const {
CurrentFunctionDescription = "'sprintf'";
- const auto *CE = cast<CallExpr>(Call.getOriginExpr());
- bool IsBI = CE->getBuiltinCallee() == Builtin::BI__builtin___sprintf_chk;
- evalSprintfCommon(C, Call, /* IsBounded */ false, IsBI);
+ evalSprintfCommon(C, Call, /* IsBounded = */ false);
}
void CStringChecker::evalSnprintf(CheckerContext &C,
const CallEvent &Call) const {
CurrentFunctionDescription = "'snprintf'";
- const auto *CE = cast<CallExpr>(Call.getOriginExpr());
- bool IsBI = CE->getBuiltinCallee() == Builtin::BI__builtin___snprintf_chk;
- evalSprintfCommon(C, Call, /* IsBounded */ true, IsBI);
+ evalSprintfCommon(C, Call, /* IsBounded = */ true);
}
void CStringChecker::evalSprintfCommon(CheckerContext &C, const CallEvent &Call,
- bool IsBounded, bool IsBuiltin) const {
+ bool IsBounded) const {
ProgramStateRef State = C.getState();
const auto *CE = cast<CallExpr>(Call.getOriginExpr());
DestinationArgExpr Dest = {{Call.getArgExpr(0), 0}};
const auto NumParams = Call.parameters().size();
- assert(CE->getNumArgs() >= NumParams);
+ if (CE->getNumArgs() < NumParams) {
+ // This is an invalid call, let's just ignore it.
+ return;
+ }
const auto AllArguments =
llvm::make_range(CE->getArgs(), CE->getArgs() + CE->getNumArgs());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp
index b4dee1e300e8..1b1226a7f1a7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp
@@ -220,11 +220,11 @@ CXXDeleteChecker::PtrCastVisitor::VisitNode(const ExplodedNode *N,
/*addPosRange=*/true);
}
-void ento::registerCXXArrayDeleteChecker(CheckerManager &mgr) {
+void ento::registerArrayDeleteChecker(CheckerManager &mgr) {
mgr.registerChecker<CXXArrayDeleteChecker>();
}
-bool ento::shouldRegisterCXXArrayDeleteChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterArrayDeleteChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index a50772f881f7..2cff97a591b8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -68,7 +68,7 @@ static bool evenFlexibleArraySize(ASTContext &Ctx, CharUnits RegionSize,
FlexSize = Ctx.getTypeSizeInChars(ElemType);
if (ArrayTy->getSize() == 1 && TypeSize > FlexSize)
TypeSize -= FlexSize;
- else if (ArrayTy->getSize() != 0)
+ else if (!ArrayTy->isZeroSize())
return false;
} else if (RD->hasFlexibleArrayMember()) {
FlexSize = Ctx.getTypeSizeInChars(ElemType);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
index f02d20d45678..c7479d74eafc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
@@ -56,23 +56,23 @@ public:
private:
// These are known in the LLVM project. The pairs are in the following form:
- // {{{namespace, call}, argument-count}, {callback, kind}}
+ // {{match-mode, {namespace, call}, argument-count}, {callback, kind}}
const CallDescriptionMap<std::pair<CastCheck, CallKind>> CDM = {
- {{{"llvm", "cast"}, 1},
+ {{CDM::SimpleFunc, {"llvm", "cast"}, 1},
{&CastValueChecker::evalCast, CallKind::Function}},
- {{{"llvm", "dyn_cast"}, 1},
+ {{CDM::SimpleFunc, {"llvm", "dyn_cast"}, 1},
{&CastValueChecker::evalDynCast, CallKind::Function}},
- {{{"llvm", "cast_or_null"}, 1},
+ {{CDM::SimpleFunc, {"llvm", "cast_or_null"}, 1},
{&CastValueChecker::evalCastOrNull, CallKind::Function}},
- {{{"llvm", "dyn_cast_or_null"}, 1},
+ {{CDM::SimpleFunc, {"llvm", "dyn_cast_or_null"}, 1},
{&CastValueChecker::evalDynCastOrNull, CallKind::Function}},
- {{{"clang", "castAs"}, 0},
+ {{CDM::CXXMethod, {"clang", "castAs"}, 0},
{&CastValueChecker::evalCastAs, CallKind::Method}},
- {{{"clang", "getAs"}, 0},
+ {{CDM::CXXMethod, {"clang", "getAs"}, 0},
{&CastValueChecker::evalGetAs, CallKind::Method}},
- {{{"llvm", "isa"}, 1},
+ {{CDM::SimpleFunc, {"llvm", "isa"}, 1},
{&CastValueChecker::evalIsa, CallKind::InstanceOf}},
- {{{"llvm", "isa_and_nonnull"}, 1},
+ {{CDM::SimpleFunc, {"llvm", "isa_and_nonnull"}, 1},
{&CastValueChecker::evalIsaAndNonNull, CallKind::InstanceOf}}};
void evalCast(const CallEvent &Call, DefinedOrUnknownSVal DV,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 978bc0bb082f..9d3aeff465ca 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -247,8 +247,8 @@ void ObjCDeallocChecker::checkASTDecl(const ObjCImplementationDecl *D,
PathDiagnosticLocation DLoc =
PathDiagnosticLocation::createBegin(D, BR.getSourceManager());
- BR.EmitBasicReport(D, this, Name, categories::CoreFoundationObjectiveC,
- OS.str(), DLoc);
+ BR.EmitBasicReport(D, this, Name, categories::CoreFoundationObjectiveC, Buf,
+ DLoc);
return;
}
}
@@ -585,7 +585,7 @@ void ObjCDeallocChecker::diagnoseMissingReleases(CheckerContext &C) const {
" before '[super dealloc]'";
auto BR = std::make_unique<PathSensitiveBugReport>(MissingReleaseBugType,
- OS.str(), ErrNode);
+ Buf, ErrNode);
C.emitReport(std::move(BR));
}
@@ -706,8 +706,8 @@ bool ObjCDeallocChecker::diagnoseExtraRelease(SymbolRef ReleasedValue,
OS << " property but was released in 'dealloc'";
}
- auto BR = std::make_unique<PathSensitiveBugReport>(ExtraReleaseBugType,
- OS.str(), ErrNode);
+ auto BR = std::make_unique<PathSensitiveBugReport>(ExtraReleaseBugType, Buf,
+ ErrNode);
BR->addRange(M.getOriginExpr()->getSourceRange());
C.emitReport(std::move(BR));
@@ -749,7 +749,7 @@ bool ObjCDeallocChecker::diagnoseMistakenDealloc(SymbolRef DeallocedValue,
<< "' should be released rather than deallocated";
auto BR = std::make_unique<PathSensitiveBugReport>(MistakenDeallocBugType,
- OS.str(), ErrNode);
+ Buf, ErrNode);
BR->addRange(M.getOriginExpr()->getSourceRange());
C.emitReport(std::move(BR));
@@ -768,8 +768,8 @@ void ObjCDeallocChecker::initIdentifierInfoAndSelectors(
Block_releaseII = &Ctx.Idents.get("_Block_release");
CIFilterII = &Ctx.Idents.get("CIFilter");
- IdentifierInfo *DeallocII = &Ctx.Idents.get("dealloc");
- IdentifierInfo *ReleaseII = &Ctx.Idents.get("release");
+ const IdentifierInfo *DeallocII = &Ctx.Idents.get("dealloc");
+ const IdentifierInfo *ReleaseII = &Ctx.Idents.get("release");
DeallocSel = Ctx.Selectors.getSelector(0, &DeallocII);
ReleaseSel = Ctx.Selectors.getSelector(0, &ReleaseII);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
deleted file mode 100644
index 0d2551f11583..000000000000
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
+++ /dev/null
@@ -1,96 +0,0 @@
-//==- CheckSizeofPointer.cpp - Check for sizeof on pointers ------*- C++ -*-==//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a check for unintended use of sizeof() on pointer
-// expressions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
-#include "clang/AST/StmtVisitor.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
-#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
-
-using namespace clang;
-using namespace ento;
-
-namespace {
-class WalkAST : public StmtVisitor<WalkAST> {
- BugReporter &BR;
- const CheckerBase *Checker;
- AnalysisDeclContext* AC;
-
-public:
- WalkAST(BugReporter &br, const CheckerBase *checker, AnalysisDeclContext *ac)
- : BR(br), Checker(checker), AC(ac) {}
- void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
- void VisitStmt(Stmt *S) { VisitChildren(S); }
- void VisitChildren(Stmt *S);
-};
-}
-
-void WalkAST::VisitChildren(Stmt *S) {
- for (Stmt *Child : S->children())
- if (Child)
- Visit(Child);
-}
-
-// CWE-467: Use of sizeof() on a Pointer Type
-void WalkAST::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
- if (E->getKind() != UETT_SizeOf)
- return;
-
- // If an explicit type is used in the code, usually the coder knows what they are
- // doing.
- if (E->isArgumentType())
- return;
-
- QualType T = E->getTypeOfArgument();
- if (T->isPointerType()) {
-
- // Many false positives have the form 'sizeof *p'. This is reasonable
- // because people know what they are doing when they intentionally
- // dereference the pointer.
- Expr *ArgEx = E->getArgumentExpr();
- if (!isa<DeclRefExpr>(ArgEx->IgnoreParens()))
- return;
-
- PathDiagnosticLocation ELoc =
- PathDiagnosticLocation::createBegin(E, BR.getSourceManager(), AC);
- BR.EmitBasicReport(AC->getDecl(), Checker,
- "Potential unintended use of sizeof() on pointer type",
- categories::LogicError,
- "The code calls sizeof() on a pointer type. "
- "This can produce an unexpected result.",
- ELoc, ArgEx->getSourceRange());
- }
-}
-
-//===----------------------------------------------------------------------===//
-// SizeofPointerChecker
-//===----------------------------------------------------------------------===//
-
-namespace {
-class SizeofPointerChecker : public Checker<check::ASTCodeBody> {
-public:
- void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
- BugReporter &BR) const {
- WalkAST walker(BR, this, mgr.getAnalysisDeclContext(D));
- walker.Visit(D->getBody());
- }
-};
-}
-
-void ento::registerSizeofPointerChecker(CheckerManager &mgr) {
- mgr.registerChecker<SizeofPointerChecker>();
-}
-
-bool ento::shouldRegisterSizeofPointerChecker(const CheckerManager &mgr) {
- return true;
-}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index 3e5e2b913914..153a1b1acbfa 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -33,30 +33,36 @@ namespace ento {
/// checking.
///
/// \sa CheckerContext
-class CheckerDocumentation : public Checker< check::PreStmt<ReturnStmt>,
- check::PostStmt<DeclStmt>,
- check::PreObjCMessage,
- check::PostObjCMessage,
- check::ObjCMessageNil,
- check::PreCall,
- check::PostCall,
- check::BranchCondition,
- check::NewAllocator,
- check::Location,
- check::Bind,
- check::DeadSymbols,
- check::BeginFunction,
- check::EndFunction,
- check::EndAnalysis,
- check::EndOfTranslationUnit,
- eval::Call,
- eval::Assume,
- check::LiveSymbols,
- check::RegionChanges,
- check::PointerEscape,
- check::ConstPointerEscape,
- check::Event<ImplicitNullDerefEvent>,
- check::ASTDecl<FunctionDecl> > {
+class CheckerDocumentation
+ : public Checker<
+ // clang-format off
+ check::ASTCodeBody,
+ check::ASTDecl<FunctionDecl>,
+ check::BeginFunction,
+ check::Bind,
+ check::BranchCondition,
+ check::ConstPointerEscape,
+ check::DeadSymbols,
+ check::EndAnalysis,
+ check::EndFunction,
+ check::EndOfTranslationUnit,
+ check::Event<ImplicitNullDerefEvent>,
+ check::LiveSymbols,
+ check::Location,
+ check::NewAllocator,
+ check::ObjCMessageNil,
+ check::PointerEscape,
+ check::PostCall,
+ check::PostObjCMessage,
+ check::PostStmt<DeclStmt>,
+ check::PreCall,
+ check::PreObjCMessage,
+ check::PreStmt<ReturnStmt>,
+ check::RegionChanges,
+ eval::Assume,
+ eval::Call
+ // clang-format on
+ > {
public:
/// Pre-visit the Statement.
///
@@ -137,10 +143,7 @@ public:
/// (2) and (3). Post-call for the allocator is called after step (1).
/// Pre-statement for the new-expression is called on step (4) when the value
/// of the expression is evaluated.
- /// \param NE The C++ new-expression that triggered the allocation.
- /// \param Target The allocated region, casted to the class type.
- void checkNewAllocator(const CXXNewExpr *NE, SVal Target,
- CheckerContext &) const {}
+ void checkNewAllocator(const CXXAllocatorCall &, CheckerContext &) const {}
/// Called on a load from and a store to a location.
///
@@ -226,7 +229,7 @@ public:
/// first one wins.
///
/// eval::Call
- bool evalCall(const CallExpr *CE, CheckerContext &C) const { return true; }
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const { return true; }
/// Handles assumptions on symbolic values.
///
@@ -324,11 +327,26 @@ public:
void checkASTDecl(const FunctionDecl *D,
AnalysisManager &Mgr,
BugReporter &BR) const {}
+
+ /// Check every declaration that has a statement body in the AST.
+ ///
+ /// As AST traversal callback, which should only be used when the checker is
+ /// not path sensitive. It will be called for every Declaration in the AST.
+ void checkASTCodeBody(const Decl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const {}
};
void CheckerDocumentation::checkPostStmt(const DeclStmt *DS,
CheckerContext &C) const {
}
+void registerCheckerDocumentationChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<CheckerDocumentation>();
+}
+
+bool shouldRegisterCheckerDocumentationChecker(const CheckerManager &) {
+ return false;
+}
+
} // end namespace ento
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index be7be15022d3..3a0a01c23de0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -43,7 +43,8 @@ class ChrootChecker : public Checker<eval::Call, check::PreCall> {
// This bug refers to possibly break out of a chroot() jail.
const BugType BT_BreakJail{this, "Break out of jail"};
- const CallDescription Chroot{{"chroot"}, 1}, Chdir{{"chdir"}, 1};
+ const CallDescription Chroot{CDM::CLibrary, {"chroot"}, 1},
+ Chdir{CDM::CLibrary, {"chdir"}, 1};
public:
ChrootChecker() {}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
index 65a2ec4076fd..55ed809bfed6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
@@ -72,26 +72,31 @@ public:
SVal) const;
CallDescriptionMap<NoItParamFn> NoIterParamFunctions = {
- {{{"clear"}, 0}, &ContainerModeling::handleClear},
- {{{"assign"}, 2}, &ContainerModeling::handleAssign},
- {{{"push_back"}, 1}, &ContainerModeling::handlePushBack},
- {{{"emplace_back"}, 1}, &ContainerModeling::handlePushBack},
- {{{"pop_back"}, 0}, &ContainerModeling::handlePopBack},
- {{{"push_front"}, 1}, &ContainerModeling::handlePushFront},
- {{{"emplace_front"}, 1}, &ContainerModeling::handlePushFront},
- {{{"pop_front"}, 0}, &ContainerModeling::handlePopFront},
+ {{CDM::CXXMethod, {"clear"}, 0}, &ContainerModeling::handleClear},
+ {{CDM::CXXMethod, {"assign"}, 2}, &ContainerModeling::handleAssign},
+ {{CDM::CXXMethod, {"push_back"}, 1}, &ContainerModeling::handlePushBack},
+ {{CDM::CXXMethod, {"emplace_back"}, 1},
+ &ContainerModeling::handlePushBack},
+ {{CDM::CXXMethod, {"pop_back"}, 0}, &ContainerModeling::handlePopBack},
+ {{CDM::CXXMethod, {"push_front"}, 1},
+ &ContainerModeling::handlePushFront},
+ {{CDM::CXXMethod, {"emplace_front"}, 1},
+ &ContainerModeling::handlePushFront},
+ {{CDM::CXXMethod, {"pop_front"}, 0}, &ContainerModeling::handlePopFront},
};
CallDescriptionMap<OneItParamFn> OneIterParamFunctions = {
- {{{"insert"}, 2}, &ContainerModeling::handleInsert},
- {{{"emplace"}, 2}, &ContainerModeling::handleInsert},
- {{{"erase"}, 1}, &ContainerModeling::handleErase},
- {{{"erase_after"}, 1}, &ContainerModeling::handleEraseAfter},
+ {{CDM::CXXMethod, {"insert"}, 2}, &ContainerModeling::handleInsert},
+ {{CDM::CXXMethod, {"emplace"}, 2}, &ContainerModeling::handleInsert},
+ {{CDM::CXXMethod, {"erase"}, 1}, &ContainerModeling::handleErase},
+ {{CDM::CXXMethod, {"erase_after"}, 1},
+ &ContainerModeling::handleEraseAfter},
};
CallDescriptionMap<TwoItParamFn> TwoIterParamFunctions = {
- {{{"erase"}, 2}, &ContainerModeling::handleErase},
- {{{"erase_after"}, 2}, &ContainerModeling::handleEraseAfter},
+ {{CDM::CXXMethod, {"erase"}, 2}, &ContainerModeling::handleErase},
+ {{CDM::CXXMethod, {"erase_after"}, 2},
+ &ContainerModeling::handleEraseAfter},
};
};
@@ -770,6 +775,10 @@ const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
Type = RefT->getPointeeType();
}
+ if (const auto *PtrT = Type->getAs<PointerType>()) {
+ Type = PtrT->getPointeeType();
+ }
+
return Type->getUnqualifiedDesugaredType()->getAsCXXRecordDecl();
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp
index 97f769b1c451..d3830a01dd0c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp
@@ -42,9 +42,9 @@ class DebugContainerModeling
CheckerContext &) const;
CallDescriptionMap<FnCheck> Callbacks = {
- {{{"clang_analyzer_container_begin"}, 1},
+ {{CDM::SimpleFunc, {"clang_analyzer_container_begin"}, 1},
&DebugContainerModeling::analyzerContainerBegin},
- {{{"clang_analyzer_container_end"}, 1},
+ {{CDM::SimpleFunc, {"clang_analyzer_container_end"}, 1},
&DebugContainerModeling::analyzerContainerEnd},
};
@@ -52,7 +52,7 @@ public:
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
};
-} //namespace
+} // namespace
bool DebugContainerModeling::evalCall(const CallEvent &Call,
CheckerContext &C) const {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
index ff479c7b0ac8..203743dacda6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
@@ -43,11 +43,11 @@ class DebugIteratorModeling
CheckerContext &) const;
CallDescriptionMap<FnCheck> Callbacks = {
- {{{"clang_analyzer_iterator_position"}, 1},
+ {{CDM::SimpleFunc, {"clang_analyzer_iterator_position"}, 1},
&DebugIteratorModeling::analyzerIteratorPosition},
- {{{"clang_analyzer_iterator_container"}, 1},
+ {{CDM::SimpleFunc, {"clang_analyzer_iterator_container"}, 1},
&DebugIteratorModeling::analyzerIteratorContainer},
- {{{"clang_analyzer_iterator_validity"}, 1},
+ {{CDM::SimpleFunc, {"clang_analyzer_iterator_validity"}, 1},
&DebugIteratorModeling::analyzerIteratorValidity},
};
@@ -55,7 +55,7 @@ public:
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
};
-} //namespace
+} // namespace
bool DebugIteratorModeling::evalCall(const CallEvent &Call,
CheckerContext &C) const {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index a678c3827e7f..0355eede75ea 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -31,11 +31,13 @@ class DereferenceChecker
: public Checker< check::Location,
check::Bind,
EventDispatcher<ImplicitNullDerefEvent> > {
- enum DerefKind { NullPointer, UndefinedPointerValue };
+ enum DerefKind { NullPointer, UndefinedPointerValue, AddressOfLabel };
BugType BT_Null{this, "Dereference of null pointer", categories::LogicError};
BugType BT_Undef{this, "Dereference of undefined pointer value",
categories::LogicError};
+ BugType BT_Label{this, "Dereference of the address of a label",
+ categories::LogicError};
void reportBug(DerefKind K, ProgramStateRef State, const Stmt *S,
CheckerContext &C) const;
@@ -167,6 +169,11 @@ void DereferenceChecker::reportBug(DerefKind K, ProgramStateRef State,
DerefStr1 = " results in an undefined pointer dereference";
DerefStr2 = " results in a dereference of an undefined pointer value";
break;
+ case DerefKind::AddressOfLabel:
+ BT = &BT_Label;
+ DerefStr1 = " results in an undefined pointer dereference";
+ DerefStr2 = " results in a dereference of an address of a label";
+ break;
};
// Generate an error node.
@@ -188,9 +195,9 @@ void DereferenceChecker::reportBug(DerefKind K, ProgramStateRef State,
os << DerefStr1;
break;
}
- case Stmt::OMPArraySectionExprClass: {
+ case Stmt::ArraySectionExprClass: {
os << "Array access";
- const OMPArraySectionExpr *AE = cast<OMPArraySectionExpr>(S);
+ const ArraySectionExpr *AE = cast<ArraySectionExpr>(S);
AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
State.get(), N->getLocationContext());
os << DerefStr1;
@@ -287,6 +294,12 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
if (V.isUndef())
return;
+ // One should never write to label addresses.
+ if (auto Label = L.getAs<loc::GotoLabel>()) {
+ reportBug(DerefKind::AddressOfLabel, C.getState(), S, C);
+ return;
+ }
+
const MemRegion *MR = L.getAsRegion();
const TypedValueRegion *TVR = dyn_cast_or_null<TypedValueRegion>(MR);
if (!TVR)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
index 49486ea796c2..fc174e29be47 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
@@ -1,4 +1,4 @@
-//=- DirectIvarAssignment.cpp - Check rules on ObjC properties -*- C++ ----*-==//
+//===- DirectIvarAssignment.cpp - Check rules on ObjC properties -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp
index 265185e64107..72fd6781a756 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp
@@ -17,7 +17,7 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
@@ -205,7 +205,7 @@ void ErrnoChecker::checkPreCall(const CallEvent &Call,
// Probably 'strerror'?
if (CallF->isExternC() && CallF->isGlobal() &&
C.getSourceManager().isInSystemHeader(CallF->getLocation()) &&
- !isErrno(CallF)) {
+ !isErrnoLocationCall(Call)) {
if (getErrnoState(C.getState()) == MustBeChecked) {
std::optional<ento::Loc> ErrnoLoc = getErrnoLoc(C.getState());
assert(ErrnoLoc && "ErrnoLoc should exist if an errno state is set.");
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
index 1b34ea0e056e..6ffc05f06742 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
@@ -39,10 +39,15 @@ namespace {
// Name of the "errno" variable.
// FIXME: Is there a system where it is not called "errno" but is a variable?
const char *ErrnoVarName = "errno";
+
// Names of functions that return a location of the "errno" value.
// FIXME: Are there other similar function names?
-const char *ErrnoLocationFuncNames[] = {"__errno_location", "___errno",
- "__errno", "_errno", "__error"};
+CallDescriptionSet ErrnoLocationCalls{
+ {CDM::CLibrary, {"__errno_location"}, 0, 0},
+ {CDM::CLibrary, {"___errno"}, 0, 0},
+ {CDM::CLibrary, {"__errno"}, 0, 0},
+ {CDM::CLibrary, {"_errno"}, 0, 0},
+ {CDM::CLibrary, {"__error"}, 0, 0}};
class ErrnoModeling
: public Checker<check::ASTDecl<TranslationUnitDecl>, check::BeginFunction,
@@ -54,16 +59,10 @@ public:
void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
- // The declaration of an "errno" variable or "errno location" function.
- mutable const Decl *ErrnoDecl = nullptr;
-
private:
- // FIXME: Names from `ErrnoLocationFuncNames` are used to build this set.
- CallDescriptionSet ErrnoLocationCalls{{{"__errno_location"}, 0, 0},
- {{"___errno"}, 0, 0},
- {{"__errno"}, 0, 0},
- {{"_errno"}, 0, 0},
- {{"__error"}, 0, 0}};
+ // The declaration of an "errno" variable on systems where errno is
+ // represented by a variable (and not a function that queries its location).
+ mutable const VarDecl *ErrnoDecl = nullptr;
};
} // namespace
@@ -74,9 +73,13 @@ REGISTER_TRAIT_WITH_PROGRAMSTATE(ErrnoRegion, const MemRegion *)
REGISTER_TRAIT_WITH_PROGRAMSTATE(ErrnoState, errno_modeling::ErrnoCheckState)
-/// Search for a variable called "errno" in the AST.
-/// Return nullptr if not found.
-static const VarDecl *getErrnoVar(ASTContext &ACtx) {
+void ErrnoModeling::checkASTDecl(const TranslationUnitDecl *D,
+ AnalysisManager &Mgr, BugReporter &BR) const {
+ // Try to find the declaration of the external variable `int errno;`.
+ // There are also C library implementations, where the `errno` location is
+ // accessed via a function that returns its address; in those environments
+ // this callback has no effect.
+ ASTContext &ACtx = Mgr.getASTContext();
IdentifierInfo &II = ACtx.Idents.get(ErrnoVarName);
auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
auto Found = llvm::find_if(LookupRes, [&ACtx](const Decl *D) {
@@ -86,47 +89,8 @@ static const VarDecl *getErrnoVar(ASTContext &ACtx) {
VD->getType().getCanonicalType() == ACtx.IntTy;
return false;
});
- if (Found == LookupRes.end())
- return nullptr;
-
- return cast<VarDecl>(*Found);
-}
-
-/// Search for a function with a specific name that is used to return a pointer
-/// to "errno".
-/// Return nullptr if no such function was found.
-static const FunctionDecl *getErrnoFunc(ASTContext &ACtx) {
- SmallVector<const Decl *> LookupRes;
- for (StringRef ErrnoName : ErrnoLocationFuncNames) {
- IdentifierInfo &II = ACtx.Idents.get(ErrnoName);
- llvm::append_range(LookupRes, ACtx.getTranslationUnitDecl()->lookup(&II));
- }
-
- auto Found = llvm::find_if(LookupRes, [&ACtx](const Decl *D) {
- if (auto *FD = dyn_cast<FunctionDecl>(D))
- return ACtx.getSourceManager().isInSystemHeader(FD->getLocation()) &&
- FD->isExternC() && FD->getNumParams() == 0 &&
- FD->getReturnType().getCanonicalType() ==
- ACtx.getPointerType(ACtx.IntTy);
- return false;
- });
- if (Found == LookupRes.end())
- return nullptr;
-
- return cast<FunctionDecl>(*Found);
-}
-
-void ErrnoModeling::checkASTDecl(const TranslationUnitDecl *D,
- AnalysisManager &Mgr, BugReporter &BR) const {
- // Try to find an usable `errno` value.
- // It can be an external variable called "errno" or a function that returns a
- // pointer to the "errno" value. This function can have different names.
- // The actual case is dependent on the C library implementation, we
- // can only search for a match in one of these variations.
- // We assume that exactly one of these cases might be true.
- ErrnoDecl = getErrnoVar(Mgr.getASTContext());
- if (!ErrnoDecl)
- ErrnoDecl = getErrnoFunc(Mgr.getASTContext());
+ if (Found != LookupRes.end())
+ ErrnoDecl = cast<VarDecl>(*Found);
}
void ErrnoModeling::checkBeginFunction(CheckerContext &C) const {
@@ -136,25 +100,18 @@ void ErrnoModeling::checkBeginFunction(CheckerContext &C) const {
ASTContext &ACtx = C.getASTContext();
ProgramStateRef State = C.getState();
- if (const auto *ErrnoVar = dyn_cast_or_null<VarDecl>(ErrnoDecl)) {
- // There is an external 'errno' variable.
- // Use its memory region.
- // The memory region for an 'errno'-like variable is allocated in system
- // space by MemRegionManager.
- const MemRegion *ErrnoR =
- State->getRegion(ErrnoVar, C.getLocationContext());
+ const MemRegion *ErrnoR = nullptr;
+
+ if (ErrnoDecl) {
+ // There is an external 'errno' variable, so we can simply use the memory
+ // region that's associated with it.
+ ErrnoR = State->getRegion(ErrnoDecl, C.getLocationContext());
assert(ErrnoR && "Memory region should exist for the 'errno' variable.");
- State = State->set<ErrnoRegion>(ErrnoR);
- State =
- errno_modeling::setErrnoValue(State, C, 0, errno_modeling::Irrelevant);
- C.addTransition(State);
- } else if (ErrnoDecl) {
- assert(isa<FunctionDecl>(ErrnoDecl) && "Invalid errno location function.");
- // There is a function that returns the location of 'errno'.
- // We must create a memory region for it in system space.
- // Currently a symbolic region is used with an artifical symbol.
- // FIXME: It is better to have a custom (new) kind of MemRegion for such
- // cases.
+ } else {
+ // There is no 'errno' variable, so create a new symbolic memory region
+ // that can be used to model the return value of the "get the location of
+ // errno" internal functions.
+ // NOTE: this `SVal` is created even if errno is not defined or used.
SValBuilder &SVB = C.getSValBuilder();
MemRegionManager &RMgr = C.getStateManager().getRegionManager();
@@ -162,27 +119,31 @@ void ErrnoModeling::checkBeginFunction(CheckerContext &C) const {
RMgr.getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
// Create an artifical symbol for the region.
- // It is not possible to associate a statement or expression in this case.
+ // Note that it is not possible to associate a statement or expression in
+ // this case and the `symbolTag` (opaque pointer tag) is just the address
+ // of the data member `ErrnoDecl` of the singleton `ErrnoModeling` checker
+ // object.
const SymbolConjured *Sym = SVB.conjureSymbol(
nullptr, C.getLocationContext(),
ACtx.getLValueReferenceType(ACtx.IntTy), C.blockCount(), &ErrnoDecl);
// The symbolic region is untyped, create a typed sub-region in it.
// The ElementRegion is used to make the errno region a typed region.
- const MemRegion *ErrnoR = RMgr.getElementRegion(
+ ErrnoR = RMgr.getElementRegion(
ACtx.IntTy, SVB.makeZeroArrayIndex(),
RMgr.getSymbolicRegion(Sym, GlobalSystemSpace), C.getASTContext());
- State = State->set<ErrnoRegion>(ErrnoR);
- State =
- errno_modeling::setErrnoValue(State, C, 0, errno_modeling::Irrelevant);
- C.addTransition(State);
}
+ assert(ErrnoR);
+ State = State->set<ErrnoRegion>(ErrnoR);
+ State =
+ errno_modeling::setErrnoValue(State, C, 0, errno_modeling::Irrelevant);
+ C.addTransition(State);
}
bool ErrnoModeling::evalCall(const CallEvent &Call, CheckerContext &C) const {
// Return location of "errno" at a call to an "errno address returning"
// function.
- if (ErrnoLocationCalls.contains(Call)) {
+ if (errno_modeling::isErrnoLocationCall(Call)) {
ProgramStateRef State = C.getState();
const MemRegion *ErrnoR = State->get<ErrnoRegion>();
@@ -260,14 +221,8 @@ ProgramStateRef clearErrnoState(ProgramStateRef State) {
return setErrnoState(State, Irrelevant);
}
-bool isErrno(const Decl *D) {
- if (const auto *VD = dyn_cast_or_null<VarDecl>(D))
- if (const IdentifierInfo *II = VD->getIdentifier())
- return II->getName() == ErrnoVarName;
- if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
- if (const IdentifierInfo *II = FD->getIdentifier())
- return llvm::is_contained(ErrnoLocationFuncNames, II->getName());
- return false;
+bool isErrnoLocationCall(const CallEvent &CE) {
+ return ErrnoLocationCalls.contains(CE);
}
const NoteTag *getErrnoNoteTag(CheckerContext &C, const std::string &Message) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
index 6b53572fe5e2..95da8a28d325 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
@@ -71,12 +71,9 @@ ProgramStateRef setErrnoState(ProgramStateRef State, ErrnoCheckState EState);
/// Clear state of errno (make it irrelevant).
ProgramStateRef clearErrnoState(ProgramStateRef State);
-/// Determine if a `Decl` node related to 'errno'.
-/// This is true if the declaration is the errno variable or a function
-/// that returns a pointer to the 'errno' value (usually the 'errno' macro is
-/// defined with this function). \p D is not required to be a canonical
-/// declaration.
-bool isErrno(const Decl *D);
+/// Determine if `Call` is a call to an internal function that returns the
+/// location of `errno` (in environments where errno is accessed this way).
+bool isErrnoLocationCall(const CallEvent &Call);
/// Create a NoteTag that displays the message if the 'errno' memory region is
/// marked as interesting, and resets the interestingness.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp
index c46ebee0c94f..6076a6bc7897 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp
@@ -70,13 +70,15 @@ private:
using EvalFn = std::function<void(CheckerContext &, const CallEvent &)>;
const CallDescriptionMap<EvalFn> TestCalls{
- {{{"ErrnoTesterChecker_setErrno"}, 1}, &ErrnoTesterChecker::evalSetErrno},
- {{{"ErrnoTesterChecker_getErrno"}, 0}, &ErrnoTesterChecker::evalGetErrno},
- {{{"ErrnoTesterChecker_setErrnoIfError"}, 0},
+ {{CDM::SimpleFunc, {"ErrnoTesterChecker_setErrno"}, 1},
+ &ErrnoTesterChecker::evalSetErrno},
+ {{CDM::SimpleFunc, {"ErrnoTesterChecker_getErrno"}, 0},
+ &ErrnoTesterChecker::evalGetErrno},
+ {{CDM::SimpleFunc, {"ErrnoTesterChecker_setErrnoIfError"}, 0},
&ErrnoTesterChecker::evalSetErrnoIfError},
- {{{"ErrnoTesterChecker_setErrnoIfErrorRange"}, 0},
+ {{CDM::SimpleFunc, {"ErrnoTesterChecker_setErrnoIfErrorRange"}, 0},
&ErrnoTesterChecker::evalSetErrnoIfErrorRange},
- {{{"ErrnoTesterChecker_setErrnoCheckState"}, 0},
+ {{CDM::SimpleFunc, {"ErrnoTesterChecker_setErrnoCheckState"}, 0},
&ErrnoTesterChecker::evalSetErrnoCheckState}};
};
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index 4ceaf933d0bf..b89a6e2588c9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -27,6 +27,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/YAMLTraits.h"
#include <limits>
@@ -59,13 +60,6 @@ constexpr llvm::StringLiteral MsgSanitizeSystemArgs =
"Untrusted data is passed to a system call "
"(CERT/STR02-C. Sanitize data passed to complex subsystems)";
-/// Check if tainted data is used as a buffer size in strn.. functions,
-/// and allocators.
-constexpr llvm::StringLiteral MsgTaintedBufferSize =
- "Untrusted data is used to specify the buffer size "
- "(CERT/STR31-C. Guarantee that storage for strings has sufficient space "
- "for character data and the null terminator)";
-
/// Check if tainted data is used as a custom sink's parameter.
constexpr llvm::StringLiteral MsgCustomSink =
"Untrusted data is passed to a user-defined sink";
@@ -298,14 +292,6 @@ public:
return {{}, {}, std::move(SrcArgs), std::move(DstArgs)};
}
- /// Make a rule that taints all PropDstArgs if any of PropSrcArgs is tainted.
- static GenericTaintRule
- SinkProp(ArgSet &&SinkArgs, ArgSet &&SrcArgs, ArgSet &&DstArgs,
- std::optional<StringRef> Msg = std::nullopt) {
- return {
- std::move(SinkArgs), {}, std::move(SrcArgs), std::move(DstArgs), Msg};
- }
-
/// Process a function which could either be a taint source, a taint sink, a
/// taint filter or a taint propagator.
void process(const GenericTaintChecker &Checker, const CallEvent &Call,
@@ -406,26 +392,24 @@ public:
bool generateReportIfTainted(const Expr *E, StringRef Msg,
CheckerContext &C) const;
-private:
- const BugType BT{this, "Use of Untrusted Data", categories::TaintedData};
+ bool isTaintReporterCheckerEnabled = false;
+ std::optional<BugType> BT;
+private:
bool checkUncontrolledFormatString(const CallEvent &Call,
CheckerContext &C) const;
void taintUnsafeSocketProtocol(const CallEvent &Call,
CheckerContext &C) const;
- /// Default taint rules are initalized with the help of a CheckerContext to
- /// access the names of built-in functions like memcpy.
+ /// The taint rules are initalized with the help of a CheckerContext to
+ /// access user-provided configuration.
void initTaintRules(CheckerContext &C) const;
- /// CallDescription currently cannot restrict matches to the global namespace
- /// only, which is why multiple CallDescriptionMaps are used, as we want to
- /// disambiguate global C functions from functions inside user-defined
- /// namespaces.
- // TODO: Remove separation to simplify matching logic once CallDescriptions
- // are more expressive.
-
+ // TODO: The two separate `CallDescriptionMap`s were introduced when
+ // `CallDescription` was unable to restrict matches to the global namespace
+ // only. This limitation no longer exists, so the following two maps should
+ // be unified.
mutable std::optional<RuleLookupTy> StaticTaintRules;
mutable std::optional<RuleLookupTy> DynamicTaintRules;
};
@@ -521,7 +505,8 @@ void GenericTaintRuleParser::consumeRulesFromConfig(const Config &C,
GenericTaintRule &&Rule,
RulesContTy &Rules) {
NamePartsTy NameParts = parseNameParts(C);
- Rules.emplace_back(CallDescription(NameParts), std::move(Rule));
+ Rules.emplace_back(CallDescription(CDM::Unspecified, NameParts),
+ std::move(Rule));
}
void GenericTaintRuleParser::parseConfig(const std::string &Option,
@@ -587,205 +572,236 @@ void GenericTaintChecker::initTaintRules(CheckerContext &C) const {
std::vector<std::pair<CallDescription, GenericTaintRule>>;
using TR = GenericTaintRule;
- const Builtin::Context &BI = C.getASTContext().BuiltinInfo;
-
RulesConstructionTy GlobalCRules{
// Sources
- {{{"fdopen"}}, TR::Source({{ReturnValueIndex}})},
- {{{"fopen"}}, TR::Source({{ReturnValueIndex}})},
- {{{"freopen"}}, TR::Source({{ReturnValueIndex}})},
- {{{"getch"}}, TR::Source({{ReturnValueIndex}})},
- {{{"getchar"}}, TR::Source({{ReturnValueIndex}})},
- {{{"getchar_unlocked"}}, TR::Source({{ReturnValueIndex}})},
- {{{"gets"}}, TR::Source({{0}, ReturnValueIndex})},
- {{{"gets_s"}}, TR::Source({{0}, ReturnValueIndex})},
- {{{"scanf"}}, TR::Source({{}, 1})},
- {{{"scanf_s"}}, TR::Source({{}, {1}})},
- {{{"wgetch"}}, TR::Source({{}, ReturnValueIndex})},
+ {{CDM::CLibrary, {"fdopen"}}, TR::Source({{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"fopen"}}, TR::Source({{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"freopen"}}, TR::Source({{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"getch"}}, TR::Source({{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"getchar"}}, TR::Source({{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"getchar_unlocked"}}, TR::Source({{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"gets"}}, TR::Source({{0, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"gets_s"}}, TR::Source({{0, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"scanf"}}, TR::Source({{}, 1})},
+ {{CDM::CLibrary, {"scanf_s"}}, TR::Source({{}, 1})},
+ {{CDM::CLibrary, {"wgetch"}}, TR::Source({{ReturnValueIndex}})},
// Sometimes the line between taint sources and propagators is blurry.
// _IO_getc is choosen to be a source, but could also be a propagator.
// This way it is simpler, as modeling it as a propagator would require
// to model the possible sources of _IO_FILE * values, which the _IO_getc
// function takes as parameters.
- {{{"_IO_getc"}}, TR::Source({{ReturnValueIndex}})},
- {{{"getcwd"}}, TR::Source({{0, ReturnValueIndex}})},
- {{{"getwd"}}, TR::Source({{0, ReturnValueIndex}})},
- {{{"readlink"}}, TR::Source({{1, ReturnValueIndex}})},
- {{{"readlinkat"}}, TR::Source({{2, ReturnValueIndex}})},
- {{{"get_current_dir_name"}}, TR::Source({{ReturnValueIndex}})},
- {{{"gethostname"}}, TR::Source({{0}})},
- {{{"getnameinfo"}}, TR::Source({{2, 4}})},
- {{{"getseuserbyname"}}, TR::Source({{1, 2}})},
- {{{"getgroups"}}, TR::Source({{1, ReturnValueIndex}})},
- {{{"getlogin"}}, TR::Source({{ReturnValueIndex}})},
- {{{"getlogin_r"}}, TR::Source({{0}})},
+ {{CDM::CLibrary, {"_IO_getc"}}, TR::Source({{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"getcwd"}}, TR::Source({{0, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"getwd"}}, TR::Source({{0, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"readlink"}}, TR::Source({{1, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"readlinkat"}}, TR::Source({{2, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"get_current_dir_name"}},
+ TR::Source({{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"gethostname"}}, TR::Source({{0}})},
+ {{CDM::CLibrary, {"getnameinfo"}}, TR::Source({{2, 4}})},
+ {{CDM::CLibrary, {"getseuserbyname"}}, TR::Source({{1, 2}})},
+ {{CDM::CLibrary, {"getgroups"}}, TR::Source({{1, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"getlogin"}}, TR::Source({{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"getlogin_r"}}, TR::Source({{0}})},
// Props
- {{{"accept"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"atoi"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"atol"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"atoll"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"fgetc"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"fgetln"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"fgets"}}, TR::Prop({{2}}, {{0, ReturnValueIndex}})},
- {{{"fgetws"}}, TR::Prop({{2}}, {{0, ReturnValueIndex}})},
- {{{"fscanf"}}, TR::Prop({{0}}, {{}, 2})},
- {{{"fscanf_s"}}, TR::Prop({{0}}, {{}, {2}})},
- {{{"sscanf"}}, TR::Prop({{0}}, {{}, 2})},
-
- {{{"getc"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"getc_unlocked"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"getdelim"}}, TR::Prop({{3}}, {{0}})},
- {{{"getline"}}, TR::Prop({{2}}, {{0}})},
- {{{"getw"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"pread"}}, TR::Prop({{0, 1, 2, 3}}, {{1, ReturnValueIndex}})},
- {{{"read"}}, TR::Prop({{0, 2}}, {{1, ReturnValueIndex}})},
- {{{"strchr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"strrchr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"tolower"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"toupper"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"fread"}}, TR::Prop({{3}}, {{0, ReturnValueIndex}})},
- {{{"recv"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
- {{{"recvfrom"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
-
- {{{"ttyname"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"ttyname_r"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
-
- {{{"basename"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"dirname"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"fnmatch"}}, TR::Prop({{1}}, {{ReturnValueIndex}})},
- {{{"memchr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"memrchr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"rawmemchr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
-
- {{{"mbtowc"}}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
- {{{"wctomb"}}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
- {{{"wcwidth"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
-
- {{{"memcmp"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
- {{{"memcpy"}}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
- {{{"memmove"}}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
- // If memmem was called with a tainted needle and the search was
- // successful, that would mean that the value pointed by the return value
- // has the same content as the needle. If we choose to go by the policy of
- // content equivalence implies taintedness equivalence, that would mean
- // haystack should be considered a propagation source argument.
- {{{"memmem"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
-
- // The comment for memmem above also applies to strstr.
- {{{"strstr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"strcasestr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
-
- {{{"strchrnul"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
-
- {{{"index"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"rindex"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"accept"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"atoi"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"atol"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"atoll"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"fgetc"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"fgetln"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"fgets"}},
+ TR::Prop({{2}}, {{0, ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"fgetws"}},
+ TR::Prop({{2}}, {{0, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"fscanf"}}, TR::Prop({{0}}, {{}, 2})},
+ {{CDM::CLibrary, {"fscanf_s"}}, TR::Prop({{0}}, {{}, 2})},
+ {{CDM::CLibrary, {"sscanf"}}, TR::Prop({{0}}, {{}, 2})},
+ {{CDM::CLibrary, {"sscanf_s"}}, TR::Prop({{0}}, {{}, 2})},
+
+ {{CDM::CLibrary, {"getc"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"getc_unlocked"}},
+ TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"getdelim"}}, TR::Prop({{3}}, {{0}})},
+ // TODO: this intends to match the C function `getline()`, but the call
+ // description also matches the C++ function `std::getline()`; it should
+ // be ruled out by some additional logic.
+ {{CDM::CLibrary, {"getline"}}, TR::Prop({{2}}, {{0}})},
+ {{CDM::CLibrary, {"getw"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"pread"}},
+ TR::Prop({{0, 1, 2, 3}}, {{1, ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"read"}},
+ TR::Prop({{0, 2}}, {{1, ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"fread"}},
+ TR::Prop({{3}}, {{0, ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"recv"}},
+ TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"recvfrom"}},
+ TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+
+ {{CDM::CLibrary, {"ttyname"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"ttyname_r"}},
+ TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+
+ {{CDM::CLibrary, {"basename"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"dirname"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"fnmatch"}}, TR::Prop({{1}}, {{ReturnValueIndex}})},
+
+ {{CDM::CLibrary, {"mbtowc"}}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"wctomb"}}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"wcwidth"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{CDM::CLibrary, {"memcmp"}},
+ TR::Prop({{0, 1, 2}}, {{ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"memcpy"}},
+ TR::Prop({{1, 2}}, {{0, ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"memmove"}},
+ TR::Prop({{1, 2}}, {{0, ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"bcopy"}}, TR::Prop({{0, 2}}, {{1}})},
+
+ // Note: "memmem" and its variants search for a byte sequence ("needle")
+ // in a larger area ("haystack"). Currently we only propagate taint from
+ // the haystack to the result, but in theory tampering with the needle
+ // could also produce incorrect results.
+ {{CDM::CLibrary, {"memmem"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strstr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strcasestr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ // Analogously, the following functions search for a byte within a buffer
+ // and we only propagate taint from the buffer to the result.
+ {{CDM::CLibraryMaybeHardened, {"memchr"}},
+ TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"memrchr"}},
+ TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"rawmemchr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"strchr"}},
+ TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"strrchr"}},
+ TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"strchrnul"}},
+ TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"index"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"rindex"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
// FIXME: In case of arrays, only the first element of the array gets
// tainted.
- {{{"qsort"}}, TR::Prop({{0}}, {{0}})},
- {{{"qsort_r"}}, TR::Prop({{0}}, {{0}})},
-
- {{{"strcmp"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
- {{{"strcasecmp"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
- {{{"strncmp"}}, TR::Prop({{0, 1, 2}}, {{ReturnValueIndex}})},
- {{{"strncasecmp"}}, TR::Prop({{0, 1, 2}}, {{ReturnValueIndex}})},
- {{{"strspn"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
- {{{"strcspn"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
- {{{"strpbrk"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"strndup"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"strndupa"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"qsort"}}, TR::Prop({{0}}, {{0}})},
+ {{CDM::CLibrary, {"qsort_r"}}, TR::Prop({{0}}, {{0}})},
+
+ {{CDM::CLibrary, {"strcmp"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strcasecmp"}},
+ TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strncmp"}},
+ TR::Prop({{0, 1, 2}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strncasecmp"}},
+ TR::Prop({{0, 1, 2}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strspn"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strcspn"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strpbrk"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{CDM::CLibrary, {"strndup"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strndupa"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strdup"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strdupa"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"wcsdup"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
// strlen, wcslen, strnlen and alike intentionally don't propagate taint.
// See the details here: https://github.com/llvm/llvm-project/pull/66086
- {{{"strtol"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
- {{{"strtoll"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
- {{{"strtoul"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
- {{{"strtoull"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
-
- {{{"isalnum"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"isalpha"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"isascii"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"isblank"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"iscntrl"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"isdigit"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"isgraph"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"islower"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"isprint"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"ispunct"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"isspace"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"isupper"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{{"isxdigit"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
-
- {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrncat)}},
- TR::Prop({{1, 2}}, {{0, ReturnValueIndex}})},
- {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrlcpy)}},
- TR::Prop({{1, 2}}, {{0}})},
- {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrlcat)}},
- TR::Prop({{1, 2}}, {{0}})},
- {{CDF_MaybeBuiltin, {{"snprintf"}}},
- TR::Prop({{1}, 3}, {{0, ReturnValueIndex}})},
- {{CDF_MaybeBuiltin, {{"sprintf"}}},
- TR::Prop({{1}, 2}, {{0, ReturnValueIndex}})},
- {{CDF_MaybeBuiltin, {{"strcpy"}}},
- TR::Prop({{1}}, {{0, ReturnValueIndex}})},
- {{CDF_MaybeBuiltin, {{"stpcpy"}}},
+ {{CDM::CLibrary, {"strtol"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strtoll"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strtoul"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{CDM::CLibrary, {"strtoull"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+
+ {{CDM::CLibrary, {"tolower"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"toupper"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{CDM::CLibrary, {"isalnum"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"isalpha"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"isascii"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"isblank"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"iscntrl"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"isdigit"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"isgraph"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"islower"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"isprint"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"ispunct"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"isspace"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"isupper"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibrary, {"isxdigit"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{CDM::CLibraryMaybeHardened, {"strcpy"}},
TR::Prop({{1}}, {{0, ReturnValueIndex}})},
- {{CDF_MaybeBuiltin, {{"strcat"}}},
+ {{CDM::CLibraryMaybeHardened, {"stpcpy"}},
TR::Prop({{1}}, {{0, ReturnValueIndex}})},
- {{CDF_MaybeBuiltin, {{"wcsncat"}}},
- TR::Prop({{1}}, {{0, ReturnValueIndex}})},
- {{CDF_MaybeBuiltin, {{"strdup"}}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{CDF_MaybeBuiltin, {{"strdupa"}}},
- TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{CDF_MaybeBuiltin, {{"wcsdup"}}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"strcat"}},
+ TR::Prop({{0, 1}}, {{0, ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"wcsncat"}},
+ TR::Prop({{0, 1}}, {{0, ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"strncpy"}},
+ TR::Prop({{1, 2}}, {{0, ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"strncat"}},
+ TR::Prop({{0, 1, 2}}, {{0, ReturnValueIndex}})},
+ {{CDM::CLibraryMaybeHardened, {"strlcpy"}}, TR::Prop({{1, 2}}, {{0}})},
+ {{CDM::CLibraryMaybeHardened, {"strlcat"}}, TR::Prop({{0, 1, 2}}, {{0}})},
+
+ // Usually the matching mode `CDM::CLibraryMaybeHardened` is sufficient
+ // for unified handling of a function `FOO()` and its hardened variant
+ // `__FOO_chk()`, but in the "sprintf" family the extra parameters of the
+ // hardened variants are inserted into the middle of the parameter list,
+ // so that would not work in their case.
+ // int snprintf(char * str, size_t maxlen, const char * format, ...);
+ {{CDM::CLibrary, {"snprintf"}},
+ TR::Prop({{1, 2}, 3}, {{0, ReturnValueIndex}})},
+ // int sprintf(char * str, const char * format, ...);
+ {{CDM::CLibrary, {"sprintf"}},
+ TR::Prop({{1}, 2}, {{0, ReturnValueIndex}})},
+ // int __snprintf_chk(char * str, size_t maxlen, int flag, size_t strlen,
+ // const char * format, ...);
+ {{CDM::CLibrary, {"__snprintf_chk"}},
+ TR::Prop({{1, 4}, 5}, {{0, ReturnValueIndex}})},
+ // int __sprintf_chk(char * str, int flag, size_t strlen, const char *
+ // format, ...);
+ {{CDM::CLibrary, {"__sprintf_chk"}},
+ TR::Prop({{3}, 4}, {{0, ReturnValueIndex}})},
// Sinks
- {{{"system"}}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
- {{{"popen"}}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
- {{{"execl"}}, TR::Sink({{}, {0}}, MsgSanitizeSystemArgs)},
- {{{"execle"}}, TR::Sink({{}, {0}}, MsgSanitizeSystemArgs)},
- {{{"execlp"}}, TR::Sink({{}, {0}}, MsgSanitizeSystemArgs)},
- {{{"execv"}}, TR::Sink({{0, 1}}, MsgSanitizeSystemArgs)},
- {{{"execve"}}, TR::Sink({{0, 1, 2}}, MsgSanitizeSystemArgs)},
- {{{"fexecve"}}, TR::Sink({{0, 1, 2}}, MsgSanitizeSystemArgs)},
- {{{"execvp"}}, TR::Sink({{0, 1}}, MsgSanitizeSystemArgs)},
- {{{"execvpe"}}, TR::Sink({{0, 1, 2}}, MsgSanitizeSystemArgs)},
- {{{"dlopen"}}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
- {{CDF_MaybeBuiltin, {{"malloc"}}}, TR::Sink({{0}}, MsgTaintedBufferSize)},
- {{CDF_MaybeBuiltin, {{"calloc"}}}, TR::Sink({{0}}, MsgTaintedBufferSize)},
- {{CDF_MaybeBuiltin, {{"alloca"}}}, TR::Sink({{0}}, MsgTaintedBufferSize)},
- {{CDF_MaybeBuiltin, {{"memccpy"}}},
- TR::Sink({{3}}, MsgTaintedBufferSize)},
- {{CDF_MaybeBuiltin, {{"realloc"}}},
- TR::Sink({{1}}, MsgTaintedBufferSize)},
- {{{{"setproctitle"}}}, TR::Sink({{0}, 1}, MsgUncontrolledFormatString)},
- {{{{"setproctitle_fast"}}},
+ {{CDM::CLibrary, {"system"}}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{CDM::CLibrary, {"popen"}}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{CDM::CLibrary, {"execl"}}, TR::Sink({{}, {0}}, MsgSanitizeSystemArgs)},
+ {{CDM::CLibrary, {"execle"}}, TR::Sink({{}, {0}}, MsgSanitizeSystemArgs)},
+ {{CDM::CLibrary, {"execlp"}}, TR::Sink({{}, {0}}, MsgSanitizeSystemArgs)},
+ {{CDM::CLibrary, {"execv"}}, TR::Sink({{0, 1}}, MsgSanitizeSystemArgs)},
+ {{CDM::CLibrary, {"execve"}},
+ TR::Sink({{0, 1, 2}}, MsgSanitizeSystemArgs)},
+ {{CDM::CLibrary, {"fexecve"}},
+ TR::Sink({{0, 1, 2}}, MsgSanitizeSystemArgs)},
+ {{CDM::CLibrary, {"execvp"}}, TR::Sink({{0, 1}}, MsgSanitizeSystemArgs)},
+ {{CDM::CLibrary, {"execvpe"}},
+ TR::Sink({{0, 1, 2}}, MsgSanitizeSystemArgs)},
+ {{CDM::CLibrary, {"dlopen"}}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+
+ // malloc, calloc, alloca, realloc, memccpy
+ // are intentionally not marked as taint sinks because unconditional
+ // reporting for these functions generates many false positives.
+ // These taint sinks should be implemented in other checkers with more
+ // sophisticated sanitation heuristics.
+
+ {{CDM::CLibrary, {"setproctitle"}},
TR::Sink({{0}, 1}, MsgUncontrolledFormatString)},
+ {{CDM::CLibrary, {"setproctitle_fast"}},
+ TR::Sink({{0}, 1}, MsgUncontrolledFormatString)}};
- // SinkProps
- {{CDF_MaybeBuiltin, BI.getName(Builtin::BImemcpy)},
- TR::SinkProp({{2}}, {{1, 2}}, {{0, ReturnValueIndex}},
- MsgTaintedBufferSize)},
- {{CDF_MaybeBuiltin, {BI.getName(Builtin::BImemmove)}},
- TR::SinkProp({{2}}, {{1, 2}}, {{0, ReturnValueIndex}},
- MsgTaintedBufferSize)},
- {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrncpy)}},
- TR::SinkProp({{2}}, {{1, 2}}, {{0, ReturnValueIndex}},
- MsgTaintedBufferSize)},
- {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrndup)}},
- TR::SinkProp({{1}}, {{0, 1}}, {{ReturnValueIndex}},
- MsgTaintedBufferSize)},
- {{CDF_MaybeBuiltin, {{"bcopy"}}},
- TR::SinkProp({{2}}, {{0, 2}}, {{1}}, MsgTaintedBufferSize)}};
-
- // `getenv` returns taint only in untrusted environments.
if (TR::UntrustedEnv(C)) {
// void setproctitle_init(int argc, char *argv[], char *envp[])
+ // TODO: replace `MsgCustomSink` with a message that fits this situation.
+ GlobalCRules.push_back({{CDM::CLibrary, {"setproctitle_init"}},
+ TR::Sink({{1, 2}}, MsgCustomSink)});
+
+ // `getenv` returns taint only in untrusted environments.
GlobalCRules.push_back(
- {{{"setproctitle_init"}}, TR::Sink({{1, 2}}, MsgCustomSink)});
- GlobalCRules.push_back({{{"getenv"}}, TR::Source({{ReturnValueIndex}})});
+ {{CDM::CLibrary, {"getenv"}}, TR::Source({{ReturnValueIndex}})});
}
StaticTaintRules.emplace(std::make_move_iterator(GlobalCRules.begin()),
@@ -1019,6 +1035,8 @@ bool GenericTaintRule::UntrustedEnv(CheckerContext &C) {
bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
CheckerContext &C) const {
assert(E);
+ if (!isTaintReporterCheckerEnabled)
+ return false;
std::optional<SVal> TaintedSVal =
getTaintedPointeeOrPointer(C.getState(), C.getSVal(E));
@@ -1026,13 +1044,14 @@ bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
return false;
// Generate diagnostic.
- if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- auto report = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
+ assert(BT);
+ static CheckerProgramPointTag Tag(BT->getCheckerName(), Msg);
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(C.getState(), &Tag)) {
+ auto report = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
report->addRange(E->getSourceRange());
for (auto TaintedSym : getTaintedSymbols(C.getState(), *TaintedSVal)) {
report->markInteresting(TaintedSym);
}
-
C.emitReport(std::move(report));
return true;
}
@@ -1089,15 +1108,14 @@ void GenericTaintChecker::taintUnsafeSocketProtocol(const CallEvent &Call,
const IdentifierInfo *ID = Call.getCalleeIdentifier();
if (!ID)
return;
- if (!ID->getName().equals("socket"))
+ if (ID->getName() != "socket")
return;
SourceLocation DomLoc = Call.getArgExpr(0)->getExprLoc();
StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
// Allow internal communication protocols.
- bool SafeProtocol = DomName.equals("AF_SYSTEM") ||
- DomName.equals("AF_LOCAL") || DomName.equals("AF_UNIX") ||
- DomName.equals("AF_RESERVED_36");
+ bool SafeProtocol = DomName == "AF_SYSTEM" || DomName == "AF_LOCAL" ||
+ DomName == "AF_UNIX" || DomName == "AF_RESERVED_36";
if (SafeProtocol)
return;
@@ -1109,10 +1127,21 @@ void GenericTaintChecker::taintUnsafeSocketProtocol(const CallEvent &Call,
}
/// Checker registration
-void ento::registerGenericTaintChecker(CheckerManager &Mgr) {
+void ento::registerTaintPropagationChecker(CheckerManager &Mgr) {
Mgr.registerChecker<GenericTaintChecker>();
}
+bool ento::shouldRegisterTaintPropagationChecker(const CheckerManager &mgr) {
+ return true;
+}
+
+void ento::registerGenericTaintChecker(CheckerManager &Mgr) {
+ GenericTaintChecker *checker = Mgr.getChecker<GenericTaintChecker>();
+ checker->isTaintReporterCheckerEnabled = true;
+ checker->BT.emplace(Mgr.getCurrentCheckerName(), "Use of Untrusted Data",
+ categories::TaintedData);
+}
+
bool ento::shouldRegisterGenericTaintChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
index 1cf81b54e77d..7ac34ef8164e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
@@ -350,7 +350,7 @@ static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
return false;
case Stmt::CallExprClass:
case Stmt::ArraySubscriptExprClass:
- case Stmt::OMPArraySectionExprClass:
+ case Stmt::ArraySectionExprClass:
case Stmt::OMPArrayShapingExprClass:
case Stmt::OMPIteratorExprClass:
case Stmt::ImplicitCastExprClass:
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
index b673b51c4623..261db2b2a704 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
@@ -35,9 +35,28 @@ namespace {
class InnerPointerChecker
: public Checker<check::DeadSymbols, check::PostCall> {
- CallDescription AppendFn, AssignFn, AddressofFn, AddressofFn_, ClearFn,
- CStrFn, DataFn, DataMemberFn, EraseFn, InsertFn, PopBackFn, PushBackFn,
- ReplaceFn, ReserveFn, ResizeFn, ShrinkToFitFn, SwapFn;
+ CallDescriptionSet InvalidatingMemberFunctions{
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "append"}),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "assign"}),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "clear"}),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "erase"}),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "insert"}),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "pop_back"}),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "push_back"}),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "replace"}),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "reserve"}),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "resize"}),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "shrink_to_fit"}),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "swap"})};
+
+ CallDescriptionSet AddressofFunctions{
+ CallDescription(CDM::SimpleFunc, {"std", "addressof"}),
+ CallDescription(CDM::SimpleFunc, {"std", "__addressof"})};
+
+ CallDescriptionSet InnerPointerAccessFunctions{
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "c_str"}),
+ CallDescription(CDM::SimpleFunc, {"std", "data"}, 1),
+ CallDescription(CDM::CXXMethod, {"std", "basic_string", "data"})};
public:
class InnerPointerBRVisitor : public BugReporterVisitor {
@@ -71,30 +90,10 @@ public:
}
};
- InnerPointerChecker()
- : AppendFn({"std", "basic_string", "append"}),
- AssignFn({"std", "basic_string", "assign"}),
- AddressofFn({"std", "addressof"}), AddressofFn_({"std", "__addressof"}),
- ClearFn({"std", "basic_string", "clear"}),
- CStrFn({"std", "basic_string", "c_str"}), DataFn({"std", "data"}, 1),
- DataMemberFn({"std", "basic_string", "data"}),
- EraseFn({"std", "basic_string", "erase"}),
- InsertFn({"std", "basic_string", "insert"}),
- PopBackFn({"std", "basic_string", "pop_back"}),
- PushBackFn({"std", "basic_string", "push_back"}),
- ReplaceFn({"std", "basic_string", "replace"}),
- ReserveFn({"std", "basic_string", "reserve"}),
- ResizeFn({"std", "basic_string", "resize"}),
- ShrinkToFitFn({"std", "basic_string", "shrink_to_fit"}),
- SwapFn({"std", "basic_string", "swap"}) {}
-
/// Check whether the called member function potentially invalidates
/// pointers referring to the container object's inner buffer.
bool isInvalidatingMemberFunction(const CallEvent &Call) const;
- /// Check whether the called function returns a raw inner pointer.
- bool isInnerPointerAccessFunction(const CallEvent &Call) const;
-
/// Mark pointer symbols associated with the given memory region released
/// in the program state.
void markPtrSymbolsReleased(const CallEvent &Call, ProgramStateRef State,
@@ -127,14 +126,7 @@ bool InnerPointerChecker::isInvalidatingMemberFunction(
return false;
}
return isa<CXXDestructorCall>(Call) ||
- matchesAny(Call, AppendFn, AssignFn, ClearFn, EraseFn, InsertFn,
- PopBackFn, PushBackFn, ReplaceFn, ReserveFn, ResizeFn,
- ShrinkToFitFn, SwapFn);
-}
-
-bool InnerPointerChecker::isInnerPointerAccessFunction(
- const CallEvent &Call) const {
- return matchesAny(Call, CStrFn, DataFn, DataMemberFn);
+ InvalidatingMemberFunctions.contains(Call);
}
void InnerPointerChecker::markPtrSymbolsReleased(const CallEvent &Call,
@@ -181,7 +173,7 @@ void InnerPointerChecker::checkFunctionArguments(const CallEvent &Call,
// std::addressof functions accepts a non-const reference as an argument,
// but doesn't modify it.
- if (matchesAny(Call, AddressofFn, AddressofFn_))
+ if (AddressofFunctions.contains(Call))
continue;
markPtrSymbolsReleased(Call, State, ArgRegion, C);
@@ -221,7 +213,7 @@ void InnerPointerChecker::checkPostCall(const CallEvent &Call,
}
}
- if (isInnerPointerAccessFunction(Call)) {
+ if (InnerPointerAccessFunctions.contains(Call)) {
if (isa<SimpleFunctionCall>(Call)) {
// NOTE: As of now, we only have one free access function: std::data.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
index 3f5856a3efbe..2e21f619a133 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
@@ -47,7 +47,7 @@ public:
};
-} //namespace
+} // namespace
void InvalidatedIteratorChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
index a95e811c2a41..5649454b4cd4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
@@ -129,19 +129,20 @@ class IteratorModeling
CallDescriptionMap<AdvanceFn> AdvanceLikeFunctions = {
// template<class InputIt, class Distance>
// void advance(InputIt& it, Distance n);
- {{{"std", "advance"}, 2}, &IteratorModeling::handleAdvance},
+ {{CDM::SimpleFunc, {"std", "advance"}, 2},
+ &IteratorModeling::handleAdvance},
// template<class BidirIt>
// BidirIt prev(
// BidirIt it,
// typename std::iterator_traits<BidirIt>::difference_type n = 1);
- {{{"std", "prev"}, 2}, &IteratorModeling::handlePrev},
+ {{CDM::SimpleFunc, {"std", "prev"}, 2}, &IteratorModeling::handlePrev},
// template<class ForwardIt>
// ForwardIt next(
// ForwardIt it,
// typename std::iterator_traits<ForwardIt>::difference_type n = 1);
- {{{"std", "next"}, 2}, &IteratorModeling::handleNext},
+ {{CDM::SimpleFunc, {"std", "next"}, 2}, &IteratorModeling::handleNext},
};
public:
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
index c8828219dd73..4dd2f700a2a0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
@@ -56,10 +56,15 @@ public:
using AdvanceFn = void (IteratorRangeChecker::*)(CheckerContext &, SVal,
SVal) const;
+ // FIXME: these three functions are also listed in IteratorModeling.cpp,
+ // perhaps unify their handling?
CallDescriptionMap<AdvanceFn> AdvanceFunctions = {
- {{{"std", "advance"}, 2}, &IteratorRangeChecker::verifyAdvance},
- {{{"std", "prev"}, 2}, &IteratorRangeChecker::verifyPrev},
- {{{"std", "next"}, 2}, &IteratorRangeChecker::verifyNext},
+ {{CDM::SimpleFunc, {"std", "advance"}, 2},
+ &IteratorRangeChecker::verifyAdvance},
+ {{CDM::SimpleFunc, {"std", "prev"}, 2},
+ &IteratorRangeChecker::verifyPrev},
+ {{CDM::SimpleFunc, {"std", "next"}, 2},
+ &IteratorRangeChecker::verifyNext},
};
};
@@ -68,7 +73,7 @@ bool isAheadOfRange(ProgramStateRef State, const IteratorPosition &Pos);
bool isBehindPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos);
bool isZero(ProgramStateRef State, NonLoc Val);
-} //namespace
+} // namespace
void IteratorRangeChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index fa51aa80216b..1cb3848cfed2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -41,7 +41,7 @@ static bool InNamespace(const Decl *D, StringRef NS) {
if (!ND)
return false;
const IdentifierInfo *II = ND->getIdentifier();
- if (!II || !II->getName().equals(NS))
+ if (!II || II->getName() != NS)
return false;
return isa<TranslationUnitDecl>(ND->getDeclContext());
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index 812d787e2e37..f524c4c067c8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -154,11 +154,11 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(UISearchDisplayController, setSearchResultsTitle, 0)
NEW_RECEIVER(UITabBarItem)
- IdentifierInfo *initWithTitleUITabBarItemTag[] = {
+ const IdentifierInfo *initWithTitleUITabBarItemTag[] = {
&Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("image"),
&Ctx.Idents.get("tag")};
ADD_METHOD(UITabBarItem, initWithTitleUITabBarItemTag, 3, 0)
- IdentifierInfo *initWithTitleUITabBarItemImage[] = {
+ const IdentifierInfo *initWithTitleUITabBarItemImage[] = {
&Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("image"),
&Ctx.Idents.get("selectedImage")};
ADD_METHOD(UITabBarItem, initWithTitleUITabBarItemImage, 3, 0)
@@ -171,7 +171,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSStatusItem, setToolTip, 0)
NEW_RECEIVER(UITableViewRowAction)
- IdentifierInfo *rowActionWithStyleUITableViewRowAction[] = {
+ const IdentifierInfo *rowActionWithStyleUITableViewRowAction[] = {
&Ctx.Idents.get("rowActionWithStyle"), &Ctx.Idents.get("title"),
&Ctx.Idents.get("handler")};
ADD_METHOD(UITableViewRowAction, rowActionWithStyleUITableViewRowAction, 3, 1)
@@ -183,19 +183,19 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
NEW_RECEIVER(NSButton)
ADD_UNARY_METHOD(NSButton, setTitle, 0)
ADD_UNARY_METHOD(NSButton, setAlternateTitle, 0)
- IdentifierInfo *radioButtonWithTitleNSButton[] = {
+ const IdentifierInfo *radioButtonWithTitleNSButton[] = {
&Ctx.Idents.get("radioButtonWithTitle"), &Ctx.Idents.get("target"),
&Ctx.Idents.get("action")};
ADD_METHOD(NSButton, radioButtonWithTitleNSButton, 3, 0)
- IdentifierInfo *buttonWithTitleNSButtonImage[] = {
+ const IdentifierInfo *buttonWithTitleNSButtonImage[] = {
&Ctx.Idents.get("buttonWithTitle"), &Ctx.Idents.get("image"),
&Ctx.Idents.get("target"), &Ctx.Idents.get("action")};
ADD_METHOD(NSButton, buttonWithTitleNSButtonImage, 4, 0)
- IdentifierInfo *checkboxWithTitleNSButton[] = {
+ const IdentifierInfo *checkboxWithTitleNSButton[] = {
&Ctx.Idents.get("checkboxWithTitle"), &Ctx.Idents.get("target"),
&Ctx.Idents.get("action")};
ADD_METHOD(NSButton, checkboxWithTitleNSButton, 3, 0)
- IdentifierInfo *buttonWithTitleNSButtonTarget[] = {
+ const IdentifierInfo *buttonWithTitleNSButtonTarget[] = {
&Ctx.Idents.get("buttonWithTitle"), &Ctx.Idents.get("target"),
&Ctx.Idents.get("action")};
ADD_METHOD(NSButton, buttonWithTitleNSButtonTarget, 3, 0)
@@ -215,8 +215,8 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSTabViewItem, setToolTip, 0)
NEW_RECEIVER(NSBrowser)
- IdentifierInfo *setTitleNSBrowser[] = {&Ctx.Idents.get("setTitle"),
- &Ctx.Idents.get("ofColumn")};
+ const IdentifierInfo *setTitleNSBrowser[] = {&Ctx.Idents.get("setTitle"),
+ &Ctx.Idents.get("ofColumn")};
ADD_METHOD(NSBrowser, setTitleNSBrowser, 2, 0)
NEW_RECEIVER(UIAccessibilityElement)
@@ -225,14 +225,14 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(UIAccessibilityElement, setAccessibilityValue, 0)
NEW_RECEIVER(UIAlertAction)
- IdentifierInfo *actionWithTitleUIAlertAction[] = {
+ const IdentifierInfo *actionWithTitleUIAlertAction[] = {
&Ctx.Idents.get("actionWithTitle"), &Ctx.Idents.get("style"),
&Ctx.Idents.get("handler")};
ADD_METHOD(UIAlertAction, actionWithTitleUIAlertAction, 3, 0)
NEW_RECEIVER(NSPopUpButton)
ADD_UNARY_METHOD(NSPopUpButton, addItemWithTitle, 0)
- IdentifierInfo *insertItemWithTitleNSPopUpButton[] = {
+ const IdentifierInfo *insertItemWithTitleNSPopUpButton[] = {
&Ctx.Idents.get("insertItemWithTitle"), &Ctx.Idents.get("atIndex")};
ADD_METHOD(NSPopUpButton, insertItemWithTitleNSPopUpButton, 2, 0)
ADD_UNARY_METHOD(NSPopUpButton, removeItemWithTitle, 0)
@@ -240,7 +240,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSPopUpButton, setTitle, 0)
NEW_RECEIVER(NSTableViewRowAction)
- IdentifierInfo *rowActionWithStyleNSTableViewRowAction[] = {
+ const IdentifierInfo *rowActionWithStyleNSTableViewRowAction[] = {
&Ctx.Idents.get("rowActionWithStyle"), &Ctx.Idents.get("title"),
&Ctx.Idents.get("handler")};
ADD_METHOD(NSTableViewRowAction, rowActionWithStyleNSTableViewRowAction, 3, 1)
@@ -273,10 +273,10 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSTableColumn, setHeaderToolTip, 0)
NEW_RECEIVER(NSSegmentedControl)
- IdentifierInfo *setLabelNSSegmentedControl[] = {
+ const IdentifierInfo *setLabelNSSegmentedControl[] = {
&Ctx.Idents.get("setLabel"), &Ctx.Idents.get("forSegment")};
ADD_METHOD(NSSegmentedControl, setLabelNSSegmentedControl, 2, 0)
- IdentifierInfo *setToolTipNSSegmentedControl[] = {
+ const IdentifierInfo *setToolTipNSSegmentedControl[] = {
&Ctx.Idents.get("setToolTip"), &Ctx.Idents.get("forSegment")};
ADD_METHOD(NSSegmentedControl, setToolTipNSSegmentedControl, 2, 0)
@@ -301,8 +301,8 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSAccessibility, setAccessibilityHelp, 0)
NEW_RECEIVER(NSMatrix)
- IdentifierInfo *setToolTipNSMatrix[] = {&Ctx.Idents.get("setToolTip"),
- &Ctx.Idents.get("forCell")};
+ const IdentifierInfo *setToolTipNSMatrix[] = {&Ctx.Idents.get("setToolTip"),
+ &Ctx.Idents.get("forCell")};
ADD_METHOD(NSMatrix, setToolTipNSMatrix, 2, 0)
NEW_RECEIVER(NSPrintPanel)
@@ -317,13 +317,13 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSSlider, setTitle, 0)
NEW_RECEIVER(UIMenuItem)
- IdentifierInfo *initWithTitleUIMenuItem[] = {&Ctx.Idents.get("initWithTitle"),
- &Ctx.Idents.get("action")};
+ const IdentifierInfo *initWithTitleUIMenuItem[] = {
+ &Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("action")};
ADD_METHOD(UIMenuItem, initWithTitleUIMenuItem, 2, 0)
ADD_UNARY_METHOD(UIMenuItem, setTitle, 0)
NEW_RECEIVER(UIAlertController)
- IdentifierInfo *alertControllerWithTitleUIAlertController[] = {
+ const IdentifierInfo *alertControllerWithTitleUIAlertController[] = {
&Ctx.Idents.get("alertControllerWithTitle"), &Ctx.Idents.get("message"),
&Ctx.Idents.get("preferredStyle")};
ADD_METHOD(UIAlertController, alertControllerWithTitleUIAlertController, 3, 1)
@@ -331,19 +331,19 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(UIAlertController, setMessage, 0)
NEW_RECEIVER(UIApplicationShortcutItem)
- IdentifierInfo *initWithTypeUIApplicationShortcutItemIcon[] = {
+ const IdentifierInfo *initWithTypeUIApplicationShortcutItemIcon[] = {
&Ctx.Idents.get("initWithType"), &Ctx.Idents.get("localizedTitle"),
&Ctx.Idents.get("localizedSubtitle"), &Ctx.Idents.get("icon"),
&Ctx.Idents.get("userInfo")};
ADD_METHOD(UIApplicationShortcutItem,
initWithTypeUIApplicationShortcutItemIcon, 5, 1)
- IdentifierInfo *initWithTypeUIApplicationShortcutItem[] = {
+ const IdentifierInfo *initWithTypeUIApplicationShortcutItem[] = {
&Ctx.Idents.get("initWithType"), &Ctx.Idents.get("localizedTitle")};
ADD_METHOD(UIApplicationShortcutItem, initWithTypeUIApplicationShortcutItem,
2, 1)
NEW_RECEIVER(UIActionSheet)
- IdentifierInfo *initWithTitleUIActionSheet[] = {
+ const IdentifierInfo *initWithTitleUIActionSheet[] = {
&Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("delegate"),
&Ctx.Idents.get("cancelButtonTitle"),
&Ctx.Idents.get("destructiveButtonTitle"),
@@ -353,7 +353,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(UIActionSheet, setTitle, 0)
NEW_RECEIVER(UIAccessibilityCustomAction)
- IdentifierInfo *initWithNameUIAccessibilityCustomAction[] = {
+ const IdentifierInfo *initWithNameUIAccessibilityCustomAction[] = {
&Ctx.Idents.get("initWithName"), &Ctx.Idents.get("target"),
&Ctx.Idents.get("selector")};
ADD_METHOD(UIAccessibilityCustomAction,
@@ -382,7 +382,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
NEW_RECEIVER(NSAttributedString)
ADD_UNARY_METHOD(NSAttributedString, initWithString, 0)
- IdentifierInfo *initWithStringNSAttributedString[] = {
+ const IdentifierInfo *initWithStringNSAttributedString[] = {
&Ctx.Idents.get("initWithString"), &Ctx.Idents.get("attributes")};
ADD_METHOD(NSAttributedString, initWithStringNSAttributedString, 2, 0)
@@ -390,7 +390,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSText, setString, 0)
NEW_RECEIVER(UIKeyCommand)
- IdentifierInfo *keyCommandWithInputUIKeyCommand[] = {
+ const IdentifierInfo *keyCommandWithInputUIKeyCommand[] = {
&Ctx.Idents.get("keyCommandWithInput"), &Ctx.Idents.get("modifierFlags"),
&Ctx.Idents.get("action"), &Ctx.Idents.get("discoverabilityTitle")};
ADD_METHOD(UIKeyCommand, keyCommandWithInputUIKeyCommand, 4, 3)
@@ -400,7 +400,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(UILabel, setText, 0)
NEW_RECEIVER(NSAlert)
- IdentifierInfo *alertWithMessageTextNSAlert[] = {
+ const IdentifierInfo *alertWithMessageTextNSAlert[] = {
&Ctx.Idents.get("alertWithMessageText"), &Ctx.Idents.get("defaultButton"),
&Ctx.Idents.get("alternateButton"), &Ctx.Idents.get("otherButton"),
&Ctx.Idents.get("informativeTextWithFormat")};
@@ -415,13 +415,13 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(UIMutableApplicationShortcutItem, setLocalizedSubtitle, 0)
NEW_RECEIVER(UIButton)
- IdentifierInfo *setTitleUIButton[] = {&Ctx.Idents.get("setTitle"),
- &Ctx.Idents.get("forState")};
+ const IdentifierInfo *setTitleUIButton[] = {&Ctx.Idents.get("setTitle"),
+ &Ctx.Idents.get("forState")};
ADD_METHOD(UIButton, setTitleUIButton, 2, 0)
NEW_RECEIVER(NSWindow)
ADD_UNARY_METHOD(NSWindow, setTitle, 0)
- IdentifierInfo *minFrameWidthWithTitleNSWindow[] = {
+ const IdentifierInfo *minFrameWidthWithTitleNSWindow[] = {
&Ctx.Idents.get("minFrameWidthWithTitle"), &Ctx.Idents.get("styleMask")};
ADD_METHOD(NSWindow, minFrameWidthWithTitleNSWindow, 2, 0)
ADD_UNARY_METHOD(NSWindow, setMiniwindowTitle, 0)
@@ -430,7 +430,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSPathCell, setPlaceholderString, 0)
NEW_RECEIVER(UIDocumentMenuViewController)
- IdentifierInfo *addOptionWithTitleUIDocumentMenuViewController[] = {
+ const IdentifierInfo *addOptionWithTitleUIDocumentMenuViewController[] = {
&Ctx.Idents.get("addOptionWithTitle"), &Ctx.Idents.get("image"),
&Ctx.Idents.get("order"), &Ctx.Idents.get("handler")};
ADD_METHOD(UIDocumentMenuViewController,
@@ -442,7 +442,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(UINavigationItem, setPrompt, 0)
NEW_RECEIVER(UIAlertView)
- IdentifierInfo *initWithTitleUIAlertView[] = {
+ const IdentifierInfo *initWithTitleUIAlertView[] = {
&Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("message"),
&Ctx.Idents.get("delegate"), &Ctx.Idents.get("cancelButtonTitle"),
&Ctx.Idents.get("otherButtonTitles")};
@@ -474,11 +474,11 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSProgress, setLocalizedAdditionalDescription, 0)
NEW_RECEIVER(NSSegmentedCell)
- IdentifierInfo *setLabelNSSegmentedCell[] = {&Ctx.Idents.get("setLabel"),
- &Ctx.Idents.get("forSegment")};
+ const IdentifierInfo *setLabelNSSegmentedCell[] = {
+ &Ctx.Idents.get("setLabel"), &Ctx.Idents.get("forSegment")};
ADD_METHOD(NSSegmentedCell, setLabelNSSegmentedCell, 2, 0)
- IdentifierInfo *setToolTipNSSegmentedCell[] = {&Ctx.Idents.get("setToolTip"),
- &Ctx.Idents.get("forSegment")};
+ const IdentifierInfo *setToolTipNSSegmentedCell[] = {
+ &Ctx.Idents.get("setToolTip"), &Ctx.Idents.get("forSegment")};
ADD_METHOD(NSSegmentedCell, setToolTipNSSegmentedCell, 2, 0)
NEW_RECEIVER(NSUndoManager)
@@ -487,7 +487,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSUndoManager, redoMenuTitleForUndoActionName, 0)
NEW_RECEIVER(NSMenuItem)
- IdentifierInfo *initWithTitleNSMenuItem[] = {
+ const IdentifierInfo *initWithTitleNSMenuItem[] = {
&Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("action"),
&Ctx.Idents.get("keyEquivalent")};
ADD_METHOD(NSMenuItem, initWithTitleNSMenuItem, 3, 0)
@@ -495,11 +495,11 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSMenuItem, setToolTip, 0)
NEW_RECEIVER(NSPopUpButtonCell)
- IdentifierInfo *initTextCellNSPopUpButtonCell[] = {
+ const IdentifierInfo *initTextCellNSPopUpButtonCell[] = {
&Ctx.Idents.get("initTextCell"), &Ctx.Idents.get("pullsDown")};
ADD_METHOD(NSPopUpButtonCell, initTextCellNSPopUpButtonCell, 2, 0)
ADD_UNARY_METHOD(NSPopUpButtonCell, addItemWithTitle, 0)
- IdentifierInfo *insertItemWithTitleNSPopUpButtonCell[] = {
+ const IdentifierInfo *insertItemWithTitleNSPopUpButtonCell[] = {
&Ctx.Idents.get("insertItemWithTitle"), &Ctx.Idents.get("atIndex")};
ADD_METHOD(NSPopUpButtonCell, insertItemWithTitleNSPopUpButtonCell, 2, 0)
ADD_UNARY_METHOD(NSPopUpButtonCell, removeItemWithTitle, 0)
@@ -511,11 +511,11 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
NEW_RECEIVER(NSMenu)
ADD_UNARY_METHOD(NSMenu, initWithTitle, 0)
- IdentifierInfo *insertItemWithTitleNSMenu[] = {
+ const IdentifierInfo *insertItemWithTitleNSMenu[] = {
&Ctx.Idents.get("insertItemWithTitle"), &Ctx.Idents.get("action"),
&Ctx.Idents.get("keyEquivalent"), &Ctx.Idents.get("atIndex")};
ADD_METHOD(NSMenu, insertItemWithTitleNSMenu, 4, 0)
- IdentifierInfo *addItemWithTitleNSMenu[] = {
+ const IdentifierInfo *addItemWithTitleNSMenu[] = {
&Ctx.Idents.get("addItemWithTitle"), &Ctx.Idents.get("action"),
&Ctx.Idents.get("keyEquivalent")};
ADD_METHOD(NSMenu, addItemWithTitleNSMenu, 3, 0)
@@ -526,15 +526,15 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
NEW_RECEIVER(NSForm)
ADD_UNARY_METHOD(NSForm, addEntry, 0)
- IdentifierInfo *insertEntryNSForm[] = {&Ctx.Idents.get("insertEntry"),
- &Ctx.Idents.get("atIndex")};
+ const IdentifierInfo *insertEntryNSForm[] = {&Ctx.Idents.get("insertEntry"),
+ &Ctx.Idents.get("atIndex")};
ADD_METHOD(NSForm, insertEntryNSForm, 2, 0)
NEW_RECEIVER(NSTextFieldCell)
ADD_UNARY_METHOD(NSTextFieldCell, setPlaceholderString, 0)
NEW_RECEIVER(NSUserNotificationAction)
- IdentifierInfo *actionWithIdentifierNSUserNotificationAction[] = {
+ const IdentifierInfo *actionWithIdentifierNSUserNotificationAction[] = {
&Ctx.Idents.get("actionWithIdentifier"), &Ctx.Idents.get("title")};
ADD_METHOD(NSUserNotificationAction,
actionWithIdentifierNSUserNotificationAction, 2, 1)
@@ -544,7 +544,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(UITextField, setPlaceholder, 0)
NEW_RECEIVER(UIBarButtonItem)
- IdentifierInfo *initWithTitleUIBarButtonItem[] = {
+ const IdentifierInfo *initWithTitleUIBarButtonItem[] = {
&Ctx.Idents.get("initWithTitle"), &Ctx.Idents.get("style"),
&Ctx.Idents.get("target"), &Ctx.Idents.get("action")};
ADD_METHOD(UIBarButtonItem, initWithTitleUIBarButtonItem, 4, 0)
@@ -553,16 +553,16 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(UIViewController, setTitle, 0)
NEW_RECEIVER(UISegmentedControl)
- IdentifierInfo *insertSegmentWithTitleUISegmentedControl[] = {
+ const IdentifierInfo *insertSegmentWithTitleUISegmentedControl[] = {
&Ctx.Idents.get("insertSegmentWithTitle"), &Ctx.Idents.get("atIndex"),
&Ctx.Idents.get("animated")};
ADD_METHOD(UISegmentedControl, insertSegmentWithTitleUISegmentedControl, 3, 0)
- IdentifierInfo *setTitleUISegmentedControl[] = {
+ const IdentifierInfo *setTitleUISegmentedControl[] = {
&Ctx.Idents.get("setTitle"), &Ctx.Idents.get("forSegmentAtIndex")};
ADD_METHOD(UISegmentedControl, setTitleUISegmentedControl, 2, 0)
NEW_RECEIVER(NSAccessibilityCustomRotorItemResult)
- IdentifierInfo
+ const IdentifierInfo
*initWithItemLoadingTokenNSAccessibilityCustomRotorItemResult[] = {
&Ctx.Idents.get("initWithItemLoadingToken"),
&Ctx.Idents.get("customLabel")};
@@ -571,7 +571,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSAccessibilityCustomRotorItemResult, setCustomLabel, 0)
NEW_RECEIVER(UIContextualAction)
- IdentifierInfo *contextualActionWithStyleUIContextualAction[] = {
+ const IdentifierInfo *contextualActionWithStyleUIContextualAction[] = {
&Ctx.Idents.get("contextualActionWithStyle"), &Ctx.Idents.get("title"),
&Ctx.Idents.get("handler")};
ADD_METHOD(UIContextualAction, contextualActionWithStyleUIContextualAction, 3,
@@ -579,7 +579,7 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(UIContextualAction, setTitle, 0)
NEW_RECEIVER(NSAccessibilityCustomRotor)
- IdentifierInfo *initWithLabelNSAccessibilityCustomRotor[] = {
+ const IdentifierInfo *initWithLabelNSAccessibilityCustomRotor[] = {
&Ctx.Idents.get("initWithLabel"), &Ctx.Idents.get("itemSearchDelegate")};
ADD_METHOD(NSAccessibilityCustomRotor,
initWithLabelNSAccessibilityCustomRotor, 2, 0)
@@ -590,11 +590,11 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSWindowTab, setToolTip, 0)
NEW_RECEIVER(NSAccessibilityCustomAction)
- IdentifierInfo *initWithNameNSAccessibilityCustomAction[] = {
+ const IdentifierInfo *initWithNameNSAccessibilityCustomAction[] = {
&Ctx.Idents.get("initWithName"), &Ctx.Idents.get("handler")};
ADD_METHOD(NSAccessibilityCustomAction,
initWithNameNSAccessibilityCustomAction, 2, 0)
- IdentifierInfo *initWithNameTargetNSAccessibilityCustomAction[] = {
+ const IdentifierInfo *initWithNameTargetNSAccessibilityCustomAction[] = {
&Ctx.Idents.get("initWithName"), &Ctx.Idents.get("target"),
&Ctx.Idents.get("selector")};
ADD_METHOD(NSAccessibilityCustomAction,
@@ -618,12 +618,12 @@ void NonLocalizedStringChecker::initLocStringsMethods(ASTContext &Ctx) const {
if (!LSM.empty())
return;
- IdentifierInfo *LocalizedStringMacro[] = {
+ const IdentifierInfo *LocalizedStringMacro[] = {
&Ctx.Idents.get("localizedStringForKey"), &Ctx.Idents.get("value"),
&Ctx.Idents.get("table")};
LSM_INSERT_SELECTOR("NSBundle", LocalizedStringMacro, 3)
LSM_INSERT_UNARY("NSDateFormatter", "stringFromDate")
- IdentifierInfo *LocalizedStringFromDate[] = {
+ const IdentifierInfo *LocalizedStringFromDate[] = {
&Ctx.Idents.get("localizedStringFromDate"), &Ctx.Idents.get("dateStyle"),
&Ctx.Idents.get("timeStyle")};
LSM_INSERT_SELECTOR("NSDateFormatter", LocalizedStringFromDate, 3)
@@ -903,7 +903,7 @@ static inline bool isNSStringType(QualType T, ASTContext &Ctx) {
if (!Cls)
return false;
- IdentifierInfo *ClsName = Cls->getIdentifier();
+ const IdentifierInfo *ClsName = Cls->getIdentifier();
// FIXME: Should we walk the chain of classes?
return ClsName == &Ctx.Idents.get("NSString") ||
@@ -1159,7 +1159,7 @@ void EmptyLocalizationContextChecker::MethodCrawler::VisitObjCMessageExpr(
}
if (isAnyIdentifier(Result.getKind())) {
- if (Result.getRawIdentifier().equals("nil")) {
+ if (Result.getRawIdentifier() == "nil") {
reportEmptyContextError(ME);
return;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
index 153a0a51e980..9757a00f1fb2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
@@ -46,13 +46,13 @@ class MIGChecker : public Checker<check::PostCall, check::PreStmt<ReturnStmt>,
// additionally an argument of a MIG routine, the checker keeps track of that
// information and issues a warning when an error is returned from the
// respective routine.
- std::vector<std::pair<CallDescription, unsigned>> Deallocators = {
+ CallDescriptionMap<unsigned> Deallocators = {
#define CALL(required_args, deallocated_arg, ...) \
- {{{__VA_ARGS__}, required_args}, deallocated_arg}
- // E.g., if the checker sees a C function 'vm_deallocate' that is
- // defined on class 'IOUserClient' that has exactly 3 parameters, it knows
- // that argument #1 (starting from 0, i.e. the second argument) is going
- // to be consumed in the sense of the MIG consume-on-success convention.
+ {{CDM::SimpleFunc, {__VA_ARGS__}, required_args}, deallocated_arg}
+ // E.g., if the checker sees a C function 'vm_deallocate' that has
+ // exactly 3 parameters, it knows that argument #1 (starting from 0, i.e.
+ // the second argument) is going to be consumed in the sense of the MIG
+ // consume-on-success convention.
CALL(3, 1, "vm_deallocate"),
CALL(3, 1, "mach_vm_deallocate"),
CALL(2, 0, "mig_deallocate"),
@@ -78,6 +78,9 @@ class MIGChecker : public Checker<check::PostCall, check::PreStmt<ReturnStmt>,
CALL(1, 0, "thread_inspect_deallocate"),
CALL(1, 0, "upl_deallocate"),
CALL(1, 0, "vm_map_deallocate"),
+#undef CALL
+#define CALL(required_args, deallocated_arg, ...) \
+ {{CDM::CXXMethod, {__VA_ARGS__}, required_args}, deallocated_arg}
// E.g., if the checker sees a method 'releaseAsyncReference64()' that is
// defined on class 'IOUserClient' that takes exactly 1 argument, it knows
// that the argument is going to be consumed in the sense of the MIG
@@ -87,7 +90,7 @@ class MIGChecker : public Checker<check::PostCall, check::PreStmt<ReturnStmt>,
#undef CALL
};
- CallDescription OsRefRetain{{"os_ref_retain"}, 1};
+ CallDescription OsRefRetain{CDM::SimpleFunc, {"os_ref_retain"}, 1};
void checkReturnAux(const ReturnStmt *RS, CheckerContext &C) const;
@@ -198,15 +201,12 @@ void MIGChecker::checkPostCall(const CallEvent &Call, CheckerContext &C) const {
if (!isInMIGCall(C))
return;
- auto I = llvm::find_if(Deallocators,
- [&](const std::pair<CallDescription, unsigned> &Item) {
- return Item.first.matches(Call);
- });
- if (I == Deallocators.end())
+ const unsigned *ArgIdxPtr = Deallocators.lookup(Call);
+ if (!ArgIdxPtr)
return;
ProgramStateRef State = C.getState();
- unsigned ArgIdx = I->second;
+ unsigned ArgIdx = *ArgIdxPtr;
SVal Arg = Call.getArgSVal(ArgIdx);
const ParmVarDecl *PVD = getOriginParam(Arg, C);
if (!PVD || State->contains<RefCountedParameters>(PVD))
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 79ab05f2c786..fe202c79ed62 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -46,6 +46,7 @@
#include "AllocationState.h"
#include "InterCheckerAPI.h"
+#include "NoOwnershipChangeVisitor.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
@@ -60,6 +61,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Lexer.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -78,13 +80,11 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetOperations.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include <climits>
#include <functional>
#include <optional>
#include <utility>
@@ -322,6 +322,7 @@ public:
CK_NewDeleteLeaksChecker,
CK_MismatchedDeallocatorChecker,
CK_InnerPointerChecker,
+ CK_TaintedAllocChecker,
CK_NumCheckKinds
};
@@ -365,6 +366,7 @@ private:
mutable std::unique_ptr<BugType> BT_MismatchedDealloc;
mutable std::unique_ptr<BugType> BT_OffsetFree[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BT_UseZerroAllocated[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_TaintedAlloc;
#define CHECK_FN(NAME) \
void NAME(const CallEvent &Call, CheckerContext &C) const;
@@ -382,6 +384,8 @@ private:
CHECK_FN(checkGMemdup)
CHECK_FN(checkGMallocN)
CHECK_FN(checkGMallocN0)
+ CHECK_FN(preGetdelim)
+ CHECK_FN(checkGetdelim)
CHECK_FN(checkReallocN)
CHECK_FN(checkOwnershipAttr)
@@ -391,57 +395,82 @@ private:
using CheckFn = std::function<void(const MallocChecker *,
const CallEvent &Call, CheckerContext &C)>;
+ const CallDescriptionMap<CheckFn> PreFnMap{
+ // NOTE: the following CallDescription also matches the C++ standard
+ // library function std::getline(); the callback will filter it out.
+ {{CDM::CLibrary, {"getline"}, 3}, &MallocChecker::preGetdelim},
+ {{CDM::CLibrary, {"getdelim"}, 4}, &MallocChecker::preGetdelim},
+ };
+
const CallDescriptionMap<CheckFn> FreeingMemFnMap{
- {{{"free"}, 1}, &MallocChecker::checkFree},
- {{{"if_freenameindex"}, 1}, &MallocChecker::checkIfFreeNameIndex},
- {{{"kfree"}, 1}, &MallocChecker::checkFree},
- {{{"g_free"}, 1}, &MallocChecker::checkFree},
+ {{CDM::CLibrary, {"free"}, 1}, &MallocChecker::checkFree},
+ {{CDM::CLibrary, {"if_freenameindex"}, 1},
+ &MallocChecker::checkIfFreeNameIndex},
+ {{CDM::CLibrary, {"kfree"}, 1}, &MallocChecker::checkFree},
+ {{CDM::CLibrary, {"g_free"}, 1}, &MallocChecker::checkFree},
};
bool isFreeingCall(const CallEvent &Call) const;
static bool isFreeingOwnershipAttrCall(const FunctionDecl *Func);
- friend class NoOwnershipChangeVisitor;
+ friend class NoMemOwnershipChangeVisitor;
CallDescriptionMap<CheckFn> AllocatingMemFnMap{
- {{{"alloca"}, 1}, &MallocChecker::checkAlloca},
- {{{"_alloca"}, 1}, &MallocChecker::checkAlloca},
- {{{"malloc"}, 1}, &MallocChecker::checkBasicAlloc},
- {{{"malloc"}, 3}, &MallocChecker::checkKernelMalloc},
- {{{"calloc"}, 2}, &MallocChecker::checkCalloc},
- {{{"valloc"}, 1}, &MallocChecker::checkBasicAlloc},
- {{CDF_MaybeBuiltin, {"strndup"}, 2}, &MallocChecker::checkStrdup},
- {{CDF_MaybeBuiltin, {"strdup"}, 1}, &MallocChecker::checkStrdup},
- {{{"_strdup"}, 1}, &MallocChecker::checkStrdup},
- {{{"kmalloc"}, 2}, &MallocChecker::checkKernelMalloc},
- {{{"if_nameindex"}, 1}, &MallocChecker::checkIfNameIndex},
- {{CDF_MaybeBuiltin, {"wcsdup"}, 1}, &MallocChecker::checkStrdup},
- {{CDF_MaybeBuiltin, {"_wcsdup"}, 1}, &MallocChecker::checkStrdup},
- {{{"g_malloc"}, 1}, &MallocChecker::checkBasicAlloc},
- {{{"g_malloc0"}, 1}, &MallocChecker::checkGMalloc0},
- {{{"g_try_malloc"}, 1}, &MallocChecker::checkBasicAlloc},
- {{{"g_try_malloc0"}, 1}, &MallocChecker::checkGMalloc0},
- {{{"g_memdup"}, 2}, &MallocChecker::checkGMemdup},
- {{{"g_malloc_n"}, 2}, &MallocChecker::checkGMallocN},
- {{{"g_malloc0_n"}, 2}, &MallocChecker::checkGMallocN0},
- {{{"g_try_malloc_n"}, 2}, &MallocChecker::checkGMallocN},
- {{{"g_try_malloc0_n"}, 2}, &MallocChecker::checkGMallocN0},
+ {{CDM::CLibrary, {"alloca"}, 1}, &MallocChecker::checkAlloca},
+ {{CDM::CLibrary, {"_alloca"}, 1}, &MallocChecker::checkAlloca},
+ // The line for "alloca" also covers "__builtin_alloca", but the
+ // _with_align variant must be listed separately because it takes an
+ // extra argument:
+ {{CDM::CLibrary, {"__builtin_alloca_with_align"}, 2},
+ &MallocChecker::checkAlloca},
+ {{CDM::CLibrary, {"malloc"}, 1}, &MallocChecker::checkBasicAlloc},
+ {{CDM::CLibrary, {"malloc"}, 3}, &MallocChecker::checkKernelMalloc},
+ {{CDM::CLibrary, {"calloc"}, 2}, &MallocChecker::checkCalloc},
+ {{CDM::CLibrary, {"valloc"}, 1}, &MallocChecker::checkBasicAlloc},
+ {{CDM::CLibrary, {"strndup"}, 2}, &MallocChecker::checkStrdup},
+ {{CDM::CLibrary, {"strdup"}, 1}, &MallocChecker::checkStrdup},
+ {{CDM::CLibrary, {"_strdup"}, 1}, &MallocChecker::checkStrdup},
+ {{CDM::CLibrary, {"kmalloc"}, 2}, &MallocChecker::checkKernelMalloc},
+ {{CDM::CLibrary, {"if_nameindex"}, 1}, &MallocChecker::checkIfNameIndex},
+ {{CDM::CLibrary, {"wcsdup"}, 1}, &MallocChecker::checkStrdup},
+ {{CDM::CLibrary, {"_wcsdup"}, 1}, &MallocChecker::checkStrdup},
+ {{CDM::CLibrary, {"g_malloc"}, 1}, &MallocChecker::checkBasicAlloc},
+ {{CDM::CLibrary, {"g_malloc0"}, 1}, &MallocChecker::checkGMalloc0},
+ {{CDM::CLibrary, {"g_try_malloc"}, 1}, &MallocChecker::checkBasicAlloc},
+ {{CDM::CLibrary, {"g_try_malloc0"}, 1}, &MallocChecker::checkGMalloc0},
+ {{CDM::CLibrary, {"g_memdup"}, 2}, &MallocChecker::checkGMemdup},
+ {{CDM::CLibrary, {"g_malloc_n"}, 2}, &MallocChecker::checkGMallocN},
+ {{CDM::CLibrary, {"g_malloc0_n"}, 2}, &MallocChecker::checkGMallocN0},
+ {{CDM::CLibrary, {"g_try_malloc_n"}, 2}, &MallocChecker::checkGMallocN},
+ {{CDM::CLibrary, {"g_try_malloc0_n"}, 2}, &MallocChecker::checkGMallocN0},
};
CallDescriptionMap<CheckFn> ReallocatingMemFnMap{
- {{{"realloc"}, 2},
+ {{CDM::CLibrary, {"realloc"}, 2},
std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
- {{{"reallocf"}, 2},
+ {{CDM::CLibrary, {"reallocf"}, 2},
std::bind(&MallocChecker::checkRealloc, _1, _2, _3, true)},
- {{{"g_realloc"}, 2},
+ {{CDM::CLibrary, {"g_realloc"}, 2},
std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
- {{{"g_try_realloc"}, 2},
+ {{CDM::CLibrary, {"g_try_realloc"}, 2},
std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
- {{{"g_realloc_n"}, 3}, &MallocChecker::checkReallocN},
- {{{"g_try_realloc_n"}, 3}, &MallocChecker::checkReallocN},
+ {{CDM::CLibrary, {"g_realloc_n"}, 3}, &MallocChecker::checkReallocN},
+ {{CDM::CLibrary, {"g_try_realloc_n"}, 3}, &MallocChecker::checkReallocN},
+
+ // NOTE: the following CallDescription also matches the C++ standard
+ // library function std::getline(); the callback will filter it out.
+ {{CDM::CLibrary, {"getline"}, 3}, &MallocChecker::checkGetdelim},
+ {{CDM::CLibrary, {"getdelim"}, 4}, &MallocChecker::checkGetdelim},
};
bool isMemCall(const CallEvent &Call) const;
+ void reportTaintBug(StringRef Msg, ProgramStateRef State, CheckerContext &C,
+ llvm::ArrayRef<SymbolRef> TaintedSyms,
+ AllocationFamily Family) const;
+
+ void checkTaintedness(CheckerContext &C, const CallEvent &Call,
+ const SVal SizeSVal, ProgramStateRef State,
+ AllocationFamily Family) const;
// TODO: Remove mutable by moving the initializtaion to the registry function.
mutable std::optional<uint64_t> KernelZeroFlagVal;
@@ -501,9 +530,9 @@ private:
/// malloc leaves it undefined.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after allocation.
- [[nodiscard]] static ProgramStateRef
+ [[nodiscard]] ProgramStateRef
MallocMemAux(CheckerContext &C, const CallEvent &Call, const Expr *SizeEx,
- SVal Init, ProgramStateRef State, AllocationFamily Family);
+ SVal Init, ProgramStateRef State, AllocationFamily Family) const;
/// Models memory allocation.
///
@@ -514,9 +543,10 @@ private:
/// malloc leaves it undefined.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after allocation.
- [[nodiscard]] static ProgramStateRef
- MallocMemAux(CheckerContext &C, const CallEvent &Call, SVal Size, SVal Init,
- ProgramStateRef State, AllocationFamily Family);
+ [[nodiscard]] ProgramStateRef MallocMemAux(CheckerContext &C,
+ const CallEvent &Call, SVal Size,
+ SVal Init, ProgramStateRef State,
+ AllocationFamily Family) const;
// Check if this malloc() for special flags. At present that means M_ZERO or
// __GFP_ZERO (in which case, treat it like calloc).
@@ -588,11 +618,14 @@ private:
/// }
/// \param [in] ReturnsNullOnFailure Whether the memory deallocation function
/// we're modeling returns with Null on failure.
+ /// \param [in] ArgValOpt Optional value to use for the argument instead of
+ /// the one obtained from ArgExpr.
/// \returns The ProgramState right after deallocation.
[[nodiscard]] ProgramStateRef
FreeMemAux(CheckerContext &C, const Expr *ArgExpr, const CallEvent &Call,
ProgramStateRef State, bool Hold, bool &IsKnownToBeAllocated,
- AllocationFamily Family, bool ReturnsNullOnFailure = false) const;
+ AllocationFamily Family, bool ReturnsNullOnFailure = false,
+ std::optional<SVal> ArgValOpt = {}) const;
// TODO: Needs some refactoring, as all other deallocation modeling
// functions are suffering from out parameters and messy code due to how
@@ -626,8 +659,9 @@ private:
/// \param [in] Call The expression that reallocated memory
/// \param [in] State The \c ProgramState right before reallocation.
/// \returns The ProgramState right after allocation.
- [[nodiscard]] static ProgramStateRef
- CallocMem(CheckerContext &C, const CallEvent &Call, ProgramStateRef State);
+ [[nodiscard]] ProgramStateRef CallocMem(CheckerContext &C,
+ const CallEvent &Call,
+ ProgramStateRef State) const;
/// See if deallocation happens in a suspicious context. If so, escape the
/// pointers that otherwise would have been deallocated and return true.
@@ -730,61 +764,8 @@ private:
//===----------------------------------------------------------------------===//
namespace {
-class NoOwnershipChangeVisitor final : public NoStateChangeFuncVisitor {
- // The symbol whose (lack of) ownership change we are interested in.
- SymbolRef Sym;
- const MallocChecker &Checker;
- using OwnerSet = llvm::SmallPtrSet<const MemRegion *, 8>;
-
- // Collect which entities point to the allocated memory, and could be
- // responsible for deallocating it.
- class OwnershipBindingsHandler : public StoreManager::BindingsHandler {
- SymbolRef Sym;
- OwnerSet &Owners;
-
- public:
- OwnershipBindingsHandler(SymbolRef Sym, OwnerSet &Owners)
- : Sym(Sym), Owners(Owners) {}
-
- bool HandleBinding(StoreManager &SMgr, Store Store, const MemRegion *Region,
- SVal Val) override {
- if (Val.getAsSymbol() == Sym)
- Owners.insert(Region);
- return true;
- }
-
- LLVM_DUMP_METHOD void dump() const { dumpToStream(llvm::errs()); }
- LLVM_DUMP_METHOD void dumpToStream(llvm::raw_ostream &out) const {
- out << "Owners: {\n";
- for (const MemRegion *Owner : Owners) {
- out << " ";
- Owner->dumpToStream(out);
- out << ",\n";
- }
- out << "}\n";
- }
- };
-
+class NoMemOwnershipChangeVisitor final : public NoOwnershipChangeVisitor {
protected:
- OwnerSet getOwnersAtNode(const ExplodedNode *N) {
- OwnerSet Ret;
-
- ProgramStateRef State = N->getState();
- OwnershipBindingsHandler Handler{Sym, Ret};
- State->getStateManager().getStoreManager().iterBindings(State->getStore(),
- Handler);
- return Ret;
- }
-
- LLVM_DUMP_METHOD static std::string
- getFunctionName(const ExplodedNode *CallEnterN) {
- if (const CallExpr *CE = llvm::dyn_cast_or_null<CallExpr>(
- CallEnterN->getLocationAs<CallEnter>()->getCallExpr()))
- if (const FunctionDecl *FD = CE->getDirectCallee())
- return FD->getQualifiedNameAsString();
- return "";
- }
-
/// Syntactically checks whether the callee is a deallocating function. Since
/// we have no path-sensitive information on this call (we would need a
/// CallEvent instead of a CallExpr for that), its possible that a
@@ -793,8 +774,9 @@ protected:
/// See namespace `memory_passed_to_fn_call_free_through_fn_ptr` in
/// clang/test/Analysis/NewDeleteLeaks.cpp.
bool isFreeingCallAsWritten(const CallExpr &Call) const {
- if (Checker.FreeingMemFnMap.lookupAsWritten(Call) ||
- Checker.ReallocatingMemFnMap.lookupAsWritten(Call))
+ const auto *MallocChk = static_cast<const MallocChecker *>(&Checker);
+ if (MallocChk->FreeingMemFnMap.lookupAsWritten(Call) ||
+ MallocChk->ReallocatingMemFnMap.lookupAsWritten(Call))
return true;
if (const auto *Func =
@@ -804,23 +786,21 @@ protected:
return false;
}
+ bool hasResourceStateChanged(ProgramStateRef CallEnterState,
+ ProgramStateRef CallExitEndState) final {
+ return CallEnterState->get<RegionState>(Sym) !=
+ CallExitEndState->get<RegionState>(Sym);
+ }
+
/// Heuristically guess whether the callee intended to free memory. This is
/// done syntactically, because we are trying to argue about alternative
/// paths of execution, and as a consequence we don't have path-sensitive
/// information.
- bool doesFnIntendToHandleOwnership(const Decl *Callee, ASTContext &ACtx) {
+ bool doesFnIntendToHandleOwnership(const Decl *Callee,
+ ASTContext &ACtx) final {
using namespace clang::ast_matchers;
const FunctionDecl *FD = dyn_cast<FunctionDecl>(Callee);
- // Given that the stack frame was entered, the body should always be
- // theoretically obtainable. In case of body farms, the synthesized body
- // is not attached to declaration, thus triggering the '!FD->hasBody()'
- // branch. That said, would a synthesized body ever intend to handle
- // ownership? As of today they don't. And if they did, how would we
- // put notes inside it, given that it doesn't match any source locations?
- if (!FD || !FD->hasBody())
- return false;
-
auto Matches = match(findAll(stmt(anyOf(cxxDeleteExpr().bind("delete"),
callExpr().bind("call")))),
*FD->getBody(), ACtx);
@@ -838,30 +818,7 @@ protected:
return false;
}
- bool wasModifiedInFunction(const ExplodedNode *CallEnterN,
- const ExplodedNode *CallExitEndN) override {
- if (!doesFnIntendToHandleOwnership(
- CallExitEndN->getFirstPred()->getLocationContext()->getDecl(),
- CallExitEndN->getState()->getAnalysisManager().getASTContext()))
- return true;
-
- if (CallEnterN->getState()->get<RegionState>(Sym) !=
- CallExitEndN->getState()->get<RegionState>(Sym))
- return true;
-
- OwnerSet CurrOwners = getOwnersAtNode(CallEnterN);
- OwnerSet ExitOwners = getOwnersAtNode(CallExitEndN);
-
- // Owners in the current set may be purged from the analyzer later on.
- // If a variable is dead (is not referenced directly or indirectly after
- // some point), it will be removed from the Store before the end of its
- // actual lifetime.
- // This means that if the ownership status didn't change, CurrOwners
- // must be a superset of, but not necessarily equal to ExitOwners.
- return !llvm::set_is_subset(ExitOwners, CurrOwners);
- }
-
- static PathDiagnosticPieceRef emitNote(const ExplodedNode *N) {
+ PathDiagnosticPieceRef emitNote(const ExplodedNode *N) final {
PathDiagnosticLocation L = PathDiagnosticLocation::create(
N->getLocation(),
N->getState()->getStateManager().getContext().getSourceManager());
@@ -870,42 +827,9 @@ protected:
"later deallocation");
}
- PathDiagnosticPieceRef
- maybeEmitNoteForObjCSelf(PathSensitiveBugReport &R,
- const ObjCMethodCall &Call,
- const ExplodedNode *N) override {
- // TODO: Implement.
- return nullptr;
- }
-
- PathDiagnosticPieceRef
- maybeEmitNoteForCXXThis(PathSensitiveBugReport &R,
- const CXXConstructorCall &Call,
- const ExplodedNode *N) override {
- // TODO: Implement.
- return nullptr;
- }
-
- PathDiagnosticPieceRef
- maybeEmitNoteForParameters(PathSensitiveBugReport &R, const CallEvent &Call,
- const ExplodedNode *N) override {
- // TODO: Factor the logic of "what constitutes as an entity being passed
- // into a function call" out by reusing the code in
- // NoStoreFuncVisitor::maybeEmitNoteForParameters, maybe by incorporating
- // the printing technology in UninitializedObject's FieldChainInfo.
- ArrayRef<ParmVarDecl *> Parameters = Call.parameters();
- for (unsigned I = 0; I < Call.getNumArgs() && I < Parameters.size(); ++I) {
- SVal V = Call.getArgSVal(I);
- if (V.getAsSymbol() == Sym)
- return emitNote(N);
- }
- return nullptr;
- }
-
public:
- NoOwnershipChangeVisitor(SymbolRef Sym, const MallocChecker *Checker)
- : NoStateChangeFuncVisitor(bugreporter::TrackingKind::Thorough), Sym(Sym),
- Checker(*Checker) {}
+ NoMemOwnershipChangeVisitor(SymbolRef Sym, const MallocChecker *Checker)
+ : NoOwnershipChangeVisitor(Sym, Checker) {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int Tag = 0;
@@ -1242,9 +1166,6 @@ static bool isStandardRealloc(const CallEvent &Call) {
assert(FD);
ASTContext &AC = FD->getASTContext();
- if (isa<CXXMethodDecl>(FD))
- return false;
-
return FD->getDeclaredReturnType().getDesugaredType(AC) == AC.VoidPtrTy &&
FD->getParamDecl(0)->getType().getDesugaredType(AC) == AC.VoidPtrTy &&
FD->getParamDecl(1)->getType().getDesugaredType(AC) ==
@@ -1256,9 +1177,6 @@ static bool isGRealloc(const CallEvent &Call) {
assert(FD);
ASTContext &AC = FD->getASTContext();
- if (isa<CXXMethodDecl>(FD))
- return false;
-
return FD->getDeclaredReturnType().getDesugaredType(AC) == AC.VoidPtrTy &&
FD->getParamDecl(0)->getType().getDesugaredType(AC) == AC.VoidPtrTy &&
FD->getParamDecl(1)->getType().getDesugaredType(AC) ==
@@ -1267,14 +1185,14 @@ static bool isGRealloc(const CallEvent &Call) {
void MallocChecker::checkRealloc(const CallEvent &Call, CheckerContext &C,
bool ShouldFreeOnFail) const {
- // HACK: CallDescription currently recognizes non-standard realloc functions
- // as standard because it doesn't check the type, or wether its a non-method
- // function. This should be solved by making CallDescription smarter.
- // Mind that this came from a bug report, and all other functions suffer from
- // this.
- // https://bugs.llvm.org/show_bug.cgi?id=46253
+ // Ignore calls to functions whose type does not match the expected type of
+ // either the standard realloc or g_realloc from GLib.
+ // FIXME: Should we perform this kind of checking consistently for each
+ // function? If yes, then perhaps extend the `CallDescription` interface to
+ // handle this.
if (!isStandardRealloc(Call) && !isGRealloc(Call))
return;
+
ProgramStateRef State = C.getState();
State = ReallocMemAux(C, Call, ShouldFreeOnFail, State, AF_Malloc);
State = ProcessZeroAllocCheck(Call, 1, State);
@@ -1423,6 +1341,62 @@ void MallocChecker::checkGMallocN0(const CallEvent &Call,
C.addTransition(State);
}
+static bool isFromStdNamespace(const CallEvent &Call) {
+ const Decl *FD = Call.getDecl();
+ assert(FD && "a CallDescription cannot match a call without a Decl");
+ return FD->isInStdNamespace();
+}
+
+void MallocChecker::preGetdelim(const CallEvent &Call,
+ CheckerContext &C) const {
+ // Discard calls to the C++ standard library function std::getline(), which
+ // is completely unrelated to the POSIX getline() that we're checking.
+ if (isFromStdNamespace(Call))
+ return;
+
+ ProgramStateRef State = C.getState();
+ const auto LinePtr = getPointeeVal(Call.getArgSVal(0), State);
+ if (!LinePtr)
+ return;
+
+ // FreeMemAux takes IsKnownToBeAllocated as an output parameter, and it will
+ // be true after the call if the symbol was registered by this checker.
+ // We do not need this value here, as FreeMemAux will take care
+ // of reporting any violation of the preconditions.
+ bool IsKnownToBeAllocated = false;
+ State = FreeMemAux(C, Call.getArgExpr(0), Call, State, false,
+ IsKnownToBeAllocated, AF_Malloc, false, LinePtr);
+ if (State)
+ C.addTransition(State);
+}
+
+void MallocChecker::checkGetdelim(const CallEvent &Call,
+ CheckerContext &C) const {
+ // Discard calls to the C++ standard library function std::getline(), which
+ // is completely unrelated to the POSIX getline() that we're checking.
+ if (isFromStdNamespace(Call))
+ return;
+
+ ProgramStateRef State = C.getState();
+ // Handle the post-conditions of getline and getdelim:
+ // Register the new conjured value as an allocated buffer.
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ SValBuilder &SVB = C.getSValBuilder();
+
+ const auto LinePtr =
+ getPointeeVal(Call.getArgSVal(0), State)->getAs<DefinedSVal>();
+ const auto Size =
+ getPointeeVal(Call.getArgSVal(1), State)->getAs<DefinedSVal>();
+ if (!LinePtr || !Size || !LinePtr->getAsRegion())
+ return;
+
+ State = setDynamicExtent(State, LinePtr->getAsRegion(), *Size, SVB);
+ C.addTransition(MallocUpdateRefState(C, CE, State, AF_Malloc, *LinePtr));
+}
+
void MallocChecker::checkReallocN(const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
@@ -1622,6 +1596,11 @@ MallocChecker::processNewAllocation(const CXXAllocatorCall &Call,
// MallocUpdateRefState() instead of MallocMemAux() which breaks the
// existing binding.
SVal Target = Call.getObjectUnderConstruction();
+ if (Call.getOriginExpr()->isArray()) {
+ if (auto SizeEx = NE->getArraySize())
+ checkTaintedness(C, Call, C.getSVal(*SizeEx), State, AF_CXXNewArray);
+ }
+
State = MallocUpdateRefState(C, NE, State, Family, Target);
State = ProcessZeroAllocCheck(Call, 0, State, Target);
return State;
@@ -1654,7 +1633,7 @@ static std::optional<bool> getFreeWhenDoneArg(const ObjCMethodCall &Call) {
// FIXME: We should not rely on fully-constrained symbols being folded.
for (unsigned i = 1; i < S.getNumArgs(); ++i)
- if (S.getNameForSlot(i).equals("freeWhenDone"))
+ if (S.getNameForSlot(i) == "freeWhenDone")
return !Call.getArgSVal(i).isZeroConstant();
return std::nullopt;
@@ -1706,7 +1685,7 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
const CallEvent &Call,
const Expr *SizeEx, SVal Init,
ProgramStateRef State,
- AllocationFamily Family) {
+ AllocationFamily Family) const {
if (!State)
return nullptr;
@@ -1714,10 +1693,66 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
return MallocMemAux(C, Call, C.getSVal(SizeEx), Init, State, Family);
}
+void MallocChecker::reportTaintBug(StringRef Msg, ProgramStateRef State,
+ CheckerContext &C,
+ llvm::ArrayRef<SymbolRef> TaintedSyms,
+ AllocationFamily Family) const {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State, this)) {
+ if (!BT_TaintedAlloc)
+ BT_TaintedAlloc.reset(new BugType(CheckNames[CK_TaintedAllocChecker],
+ "Tainted Memory Allocation",
+ categories::TaintedData));
+ auto R = std::make_unique<PathSensitiveBugReport>(*BT_TaintedAlloc, Msg, N);
+ for (auto TaintedSym : TaintedSyms) {
+ R->markInteresting(TaintedSym);
+ }
+ C.emitReport(std::move(R));
+ }
+}
+
+void MallocChecker::checkTaintedness(CheckerContext &C, const CallEvent &Call,
+ const SVal SizeSVal, ProgramStateRef State,
+ AllocationFamily Family) const {
+ if (!ChecksEnabled[CK_TaintedAllocChecker])
+ return;
+ std::vector<SymbolRef> TaintedSyms =
+ taint::getTaintedSymbols(State, SizeSVal);
+ if (TaintedSyms.empty())
+ return;
+
+ SValBuilder &SVB = C.getSValBuilder();
+ QualType SizeTy = SVB.getContext().getSizeType();
+ QualType CmpTy = SVB.getConditionType();
+ // In case the symbol is tainted, we give a warning if the
+ // size is larger than SIZE_MAX/4
+ BasicValueFactory &BVF = SVB.getBasicValueFactory();
+ const llvm::APSInt MaxValInt = BVF.getMaxValue(SizeTy);
+ NonLoc MaxLength =
+ SVB.makeIntVal(MaxValInt / APSIntType(MaxValInt).getValue(4));
+ std::optional<NonLoc> SizeNL = SizeSVal.getAs<NonLoc>();
+ auto Cmp = SVB.evalBinOpNN(State, BO_GE, *SizeNL, MaxLength, CmpTy)
+ .getAs<DefinedOrUnknownSVal>();
+ if (!Cmp)
+ return;
+ auto [StateTooLarge, StateNotTooLarge] = State->assume(*Cmp);
+ if (!StateTooLarge && StateNotTooLarge) {
+ // We can prove that size is not too large so there is no issue.
+ return;
+ }
+
+ std::string Callee = "Memory allocation function";
+ if (Call.getCalleeIdentifier())
+ Callee = Call.getCalleeIdentifier()->getName().str();
+ reportTaintBug(
+ Callee + " is called with a tainted (potentially attacker controlled) "
+ "value. Make sure the value is bound checked.",
+ State, C, TaintedSyms, Family);
+}
+
ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
const CallEvent &Call, SVal Size,
SVal Init, ProgramStateRef State,
- AllocationFamily Family) {
+ AllocationFamily Family) const {
if (!State)
return nullptr;
@@ -1746,6 +1781,8 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
if (Size.isUndef())
Size = UnknownVal();
+ checkTaintedness(C, Call, Size, State, AF_Malloc);
+
// Set the region's extent.
State = setDynamicExtent(State, RetVal.getAsRegion(),
Size.castAs<DefinedOrUnknownSVal>(), SVB);
@@ -1769,9 +1806,18 @@ static ProgramStateRef MallocUpdateRefState(CheckerContext &C, const Expr *E,
return nullptr;
SymbolRef Sym = RetVal->getAsLocSymbol();
+
// This is a return value of a function that was not inlined, such as malloc()
// or new(). We've checked that in the caller. Therefore, it must be a symbol.
assert(Sym);
+ // FIXME: In theory this assertion should fail for `alloca()` calls (because
+ // `AllocaRegion`s are not symbolic); but in practice this does not happen.
+ // As the current code appears to work correctly, I'm not touching this issue
+ // now, but it would be good to investigate and clarify this.
+ // Also note that perhaps the special `AllocaRegion` should be replaced by
+ // `SymbolicRegion` (or turned into a subclass of `SymbolicRegion`) to enable
+ // proper tracking of memory allocated by `alloca()` -- and after that change
+ // this assertion would become valid again.
// Set the symbol's state to Allocated.
return State->set<RegionState>(Sym, RefState::getAllocated(Family, E));
@@ -1895,15 +1941,17 @@ static void printExpectedDeallocName(raw_ostream &os, AllocationFamily Family) {
}
}
-ProgramStateRef MallocChecker::FreeMemAux(
- CheckerContext &C, const Expr *ArgExpr, const CallEvent &Call,
- ProgramStateRef State, bool Hold, bool &IsKnownToBeAllocated,
- AllocationFamily Family, bool ReturnsNullOnFailure) const {
+ProgramStateRef
+MallocChecker::FreeMemAux(CheckerContext &C, const Expr *ArgExpr,
+ const CallEvent &Call, ProgramStateRef State,
+ bool Hold, bool &IsKnownToBeAllocated,
+ AllocationFamily Family, bool ReturnsNullOnFailure,
+ std::optional<SVal> ArgValOpt) const {
if (!State)
return nullptr;
- SVal ArgVal = C.getSVal(ArgExpr);
+ SVal ArgVal = ArgValOpt.value_or(C.getSVal(ArgExpr));
if (!isa<DefinedOrUnknownSVal>(ArgVal))
return nullptr;
DefinedOrUnknownSVal location = ArgVal.castAs<DefinedOrUnknownSVal>();
@@ -2673,7 +2721,7 @@ MallocChecker::ReallocMemAux(CheckerContext &C, const CallEvent &Call,
ProgramStateRef MallocChecker::CallocMem(CheckerContext &C,
const CallEvent &Call,
- ProgramStateRef State) {
+ ProgramStateRef State) const {
if (!State)
return nullptr;
@@ -2790,7 +2838,7 @@ void MallocChecker::HandleLeak(SymbolRef Sym, ExplodedNode *N,
R->markInteresting(Sym);
R->addVisitor<MallocBugVisitor>(Sym, true);
if (ShouldRegisterNoOwnershipChangeVisitor)
- R->addVisitor<NoOwnershipChangeVisitor>(Sym, this);
+ R->addVisitor<NoMemOwnershipChangeVisitor>(Sym, this);
C.emitReport(std::move(R));
}
@@ -2881,6 +2929,13 @@ void MallocChecker::checkPreCall(const CallEvent &Call,
return;
}
+ // We need to handle getline pre-conditions here before the pointed region
+ // gets invalidated by StreamChecker
+ if (const auto *PreFN = PreFnMap.lookup(Call)) {
+ (*PreFN)(this, Call, C);
+ return;
+ }
+
// We will check for double free in the post visit.
if (const AnyFunctionCall *FC = dyn_cast<AnyFunctionCall>(&Call)) {
const FunctionDecl *FD = FC->getDecl();
@@ -3160,7 +3215,7 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
if (FirstSlot.starts_with("addPointer") ||
FirstSlot.starts_with("insertPointer") ||
FirstSlot.starts_with("replacePointer") ||
- FirstSlot.equals("valueWithPointer")) {
+ FirstSlot == "valueWithPointer") {
return true;
}
@@ -3356,7 +3411,7 @@ static bool isReferenceCountingPointerDestructor(const CXXDestructorDecl *DD) {
if (N.contains_insensitive("ptr") || N.contains_insensitive("pointer")) {
if (N.contains_insensitive("ref") || N.contains_insensitive("cnt") ||
N.contains_insensitive("intrusive") ||
- N.contains_insensitive("shared")) {
+ N.contains_insensitive("shared") || N.ends_with_insensitive("rc")) {
return true;
}
}
@@ -3388,13 +3443,24 @@ PathDiagnosticPieceRef MallocBugVisitor::VisitNode(const ExplodedNode *N,
// original reference count is positive, we should not report use-after-frees
// on objects deleted in such destructors. This can probably be improved
// through better shared pointer modeling.
- if (ReleaseDestructorLC) {
+ if (ReleaseDestructorLC && (ReleaseDestructorLC == CurrentLC ||
+ ReleaseDestructorLC->isParentOf(CurrentLC))) {
if (const auto *AE = dyn_cast<AtomicExpr>(S)) {
+ // Check for manual use of atomic builtins.
AtomicExpr::AtomicOp Op = AE->getOp();
if (Op == AtomicExpr::AO__c11_atomic_fetch_add ||
Op == AtomicExpr::AO__c11_atomic_fetch_sub) {
- if (ReleaseDestructorLC == CurrentLC ||
- ReleaseDestructorLC->isParentOf(CurrentLC)) {
+ BR.markInvalid(getTag(), S);
+ }
+ } else if (const auto *CE = dyn_cast<CallExpr>(S)) {
+ // Check for `std::atomic` and such. This covers both regular method calls
+ // and operator calls.
+ if (const auto *MD =
+ dyn_cast_or_null<CXXMethodDecl>(CE->getDirectCallee())) {
+ const CXXRecordDecl *RD = MD->getParent();
+ // A bit wobbly with ".contains()" because it may be like
+ // "__atomic_base" or something.
+ if (StringRef(RD->getNameAsString()).contains("atomic")) {
BR.markInvalid(getTag(), S);
}
}
@@ -3628,3 +3694,4 @@ REGISTER_CHECKER(MallocChecker)
REGISTER_CHECKER(NewDeleteChecker)
REGISTER_CHECKER(NewDeleteLeaksChecker)
REGISTER_CHECKER(MismatchedDeallocatorChecker)
+REGISTER_CHECKER(TaintedAllocChecker)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
index 2e31c16e457c..cd1dd1b2fc51 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
@@ -27,8 +27,8 @@ using namespace ento;
namespace {
class MmapWriteExecChecker : public Checker<check::PreCall> {
- CallDescription MmapFn;
- CallDescription MprotectFn;
+ CallDescription MmapFn{CDM::CLibrary, {"mmap"}, 6};
+ CallDescription MprotectFn{CDM::CLibrary, {"mprotect"}, 3};
static int ProtWrite;
static int ProtExec;
static int ProtRead;
@@ -36,7 +36,6 @@ class MmapWriteExecChecker : public Checker<check::PreCall> {
"Security"};
public:
- MmapWriteExecChecker() : MmapFn({"mmap"}, 6), MprotectFn({"mprotect"}, 3) {}
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
int ProtExecOv;
int ProtReadOv;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoOwnershipChangeVisitor.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoOwnershipChangeVisitor.cpp
new file mode 100644
index 000000000000..22b5ebfd6fab
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoOwnershipChangeVisitor.cpp
@@ -0,0 +1,118 @@
+//===--------------------------------------------------------------*- C++ -*--//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "NoOwnershipChangeVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "llvm/ADT/SetOperations.h"
+
+using namespace clang;
+using namespace ento;
+using OwnerSet = NoOwnershipChangeVisitor::OwnerSet;
+
+namespace {
+// Collect which entities point to the allocated memory, and could be
+// responsible for deallocating it.
+class OwnershipBindingsHandler : public StoreManager::BindingsHandler {
+ SymbolRef Sym;
+ OwnerSet &Owners;
+
+public:
+ OwnershipBindingsHandler(SymbolRef Sym, OwnerSet &Owners)
+ : Sym(Sym), Owners(Owners) {}
+
+ bool HandleBinding(StoreManager &SMgr, Store Store, const MemRegion *Region,
+ SVal Val) override {
+ if (Val.getAsSymbol() == Sym)
+ Owners.insert(Region);
+ return true;
+ }
+
+ LLVM_DUMP_METHOD void dump() const { dumpToStream(llvm::errs()); }
+ LLVM_DUMP_METHOD void dumpToStream(llvm::raw_ostream &out) const {
+ out << "Owners: {\n";
+ for (const MemRegion *Owner : Owners) {
+ out << " ";
+ Owner->dumpToStream(out);
+ out << ",\n";
+ }
+ out << "}\n";
+ }
+};
+} // namespace
+
+OwnerSet NoOwnershipChangeVisitor::getOwnersAtNode(const ExplodedNode *N) {
+ OwnerSet Ret;
+
+ ProgramStateRef State = N->getState();
+ OwnershipBindingsHandler Handler{Sym, Ret};
+ State->getStateManager().getStoreManager().iterBindings(State->getStore(),
+ Handler);
+ return Ret;
+}
+
+LLVM_DUMP_METHOD std::string
+NoOwnershipChangeVisitor::getFunctionName(const ExplodedNode *CallEnterN) {
+ if (const CallExpr *CE = llvm::dyn_cast_or_null<CallExpr>(
+ CallEnterN->getLocationAs<CallEnter>()->getCallExpr()))
+ if (const FunctionDecl *FD = CE->getDirectCallee())
+ return FD->getQualifiedNameAsString();
+ return "";
+}
+
+bool NoOwnershipChangeVisitor::wasModifiedInFunction(
+ const ExplodedNode *CallEnterN, const ExplodedNode *CallExitEndN) {
+ const Decl *Callee =
+ CallExitEndN->getFirstPred()->getLocationContext()->getDecl();
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(Callee);
+
+ // Given that the stack frame was entered, the body should always be
+ // theoretically obtainable. In case of body farms, the synthesized body
+ // is not attached to declaration, thus triggering the '!FD->hasBody()'
+ // branch. That said, would a synthesized body ever intend to handle
+ // ownership? As of today they don't. And if they did, how would we
+ // put notes inside it, given that it doesn't match any source locations?
+ if (!FD || !FD->hasBody())
+ return false;
+ if (!doesFnIntendToHandleOwnership(
+ Callee,
+ CallExitEndN->getState()->getAnalysisManager().getASTContext()))
+ return true;
+
+ if (hasResourceStateChanged(CallEnterN->getState(), CallExitEndN->getState()))
+ return true;
+
+ OwnerSet CurrOwners = getOwnersAtNode(CallEnterN);
+ OwnerSet ExitOwners = getOwnersAtNode(CallExitEndN);
+
+ // Owners in the current set may be purged from the analyzer later on.
+ // If a variable is dead (is not referenced directly or indirectly after
+ // some point), it will be removed from the Store before the end of its
+ // actual lifetime.
+ // This means that if the ownership status didn't change, CurrOwners
+ // must be a superset of, but not necessarily equal to ExitOwners.
+ return !llvm::set_is_subset(ExitOwners, CurrOwners);
+}
+
+PathDiagnosticPieceRef NoOwnershipChangeVisitor::maybeEmitNoteForParameters(
+ PathSensitiveBugReport &R, const CallEvent &Call, const ExplodedNode *N) {
+ // TODO: Factor the logic of "what constitutes as an entity being passed
+ // into a function call" out by reusing the code in
+ // NoStoreFuncVisitor::maybeEmitNoteForParameters, maybe by incorporating
+ // the printing technology in UninitializedObject's FieldChainInfo.
+ ArrayRef<ParmVarDecl *> Parameters = Call.parameters();
+ for (unsigned I = 0; I < Call.getNumArgs() && I < Parameters.size(); ++I) {
+ SVal V = Call.getArgSVal(I);
+ if (V.getAsSymbol() == Sym)
+ return emitNote(N);
+ }
+ return nullptr;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoOwnershipChangeVisitor.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoOwnershipChangeVisitor.h
new file mode 100644
index 000000000000..027f1a156a7c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoOwnershipChangeVisitor.h
@@ -0,0 +1,77 @@
+//===--------------------------------------------------------------*- C++ -*--//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+
+namespace clang {
+namespace ento {
+
+class NoOwnershipChangeVisitor : public NoStateChangeFuncVisitor {
+protected:
+ // The symbol whose (lack of) ownership change we are interested in.
+ SymbolRef Sym;
+ const CheckerBase &Checker;
+
+ LLVM_DUMP_METHOD static std::string
+ getFunctionName(const ExplodedNode *CallEnterN);
+
+ /// Heuristically guess whether the callee intended to free the resource. This
+ /// is done syntactically, because we are trying to argue about alternative
+ /// paths of execution, and as a consequence we don't have path-sensitive
+ /// information.
+ virtual bool doesFnIntendToHandleOwnership(const Decl *Callee,
+ ASTContext &ACtx) = 0;
+
+ virtual bool hasResourceStateChanged(ProgramStateRef CallEnterState,
+ ProgramStateRef CallExitEndState) = 0;
+
+ bool wasModifiedInFunction(const ExplodedNode *CallEnterN,
+ const ExplodedNode *CallExitEndN) final;
+
+ virtual PathDiagnosticPieceRef emitNote(const ExplodedNode *N) = 0;
+
+ PathDiagnosticPieceRef maybeEmitNoteForObjCSelf(PathSensitiveBugReport &R,
+ const ObjCMethodCall &Call,
+ const ExplodedNode *N) final {
+ // TODO: Implement.
+ return nullptr;
+ }
+
+ PathDiagnosticPieceRef maybeEmitNoteForCXXThis(PathSensitiveBugReport &R,
+ const CXXConstructorCall &Call,
+ const ExplodedNode *N) final {
+ // TODO: Implement.
+ return nullptr;
+ }
+
+ // Set this to final, effectively dispatch to emitNote.
+ PathDiagnosticPieceRef
+ maybeEmitNoteForParameters(PathSensitiveBugReport &R, const CallEvent &Call,
+ const ExplodedNode *N) final;
+
+public:
+ using OwnerSet = llvm::SmallPtrSet<const MemRegion *, 8>;
+
+private:
+ OwnerSet getOwnersAtNode(const ExplodedNode *N);
+
+public:
+ NoOwnershipChangeVisitor(SymbolRef Sym, const CheckerBase *Checker)
+ : NoStateChangeFuncVisitor(bugreporter::TrackingKind::Thorough), Sym(Sym),
+ Checker(*Checker) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int Tag = 0;
+ ID.AddPointer(&Tag);
+ ID.AddPointer(Sym);
+ }
+};
+} // namespace ento
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index 06f1ad00eaf2..60934e51febe 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -1082,7 +1082,8 @@ void NullabilityChecker::checkPostObjCMessage(const ObjCMethodCall &M,
M.getMessageKind() == OCM_PropertyAccess && !C.wasInlined) {
bool LookupResolved = false;
if (const MemRegion *ReceiverRegion = getTrackRegion(M.getReceiverSVal())) {
- if (IdentifierInfo *Ident = M.getSelector().getIdentifierInfoForSlot(0)) {
+ if (const IdentifierInfo *Ident =
+ M.getSelector().getIdentifierInfoForSlot(0)) {
LookupResolved = true;
ObjectPropPair Key = std::make_pair(ReceiverRegion, Ident);
const ConstrainedPropertyVal *PrevPropVal =
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
index 514f53b4804f..e7fd14d4558b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
@@ -1,4 +1,4 @@
-//===- ObjCAutoreleaseWriteChecker.cpp ----------------------------*- C++ -*-==//
+//===- ObjCAutoreleaseWriteChecker.cpp ---------------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
index 2b008d1c775a..6978d81faf1c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -101,14 +101,14 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
const Expr *Arg = nullptr;
unsigned ArgNum;
- if (Name.equals("CFArrayCreate") || Name.equals("CFSetCreate")) {
+ if (Name == "CFArrayCreate" || Name == "CFSetCreate") {
if (CE->getNumArgs() != 4)
return;
ArgNum = 1;
Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
if (hasPointerToPointerSizedType(Arg))
return;
- } else if (Name.equals("CFDictionaryCreate")) {
+ } else if (Name == "CFDictionaryCreate") {
if (CE->getNumArgs() != 6)
return;
// Check first argument.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index 28e88245ca95..4937af3b91c2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -82,7 +82,7 @@ void ObjCContainersChecker::checkPostStmt(const CallExpr *CE,
return;
// Add array size information to the state.
- if (Name.equals("CFArrayCreate")) {
+ if (Name == "CFArrayCreate") {
if (CE->getNumArgs() < 3)
return;
// Note, we can visit the Create method in the post-visit because
@@ -92,7 +92,7 @@ void ObjCContainersChecker::checkPostStmt(const CallExpr *CE,
return;
}
- if (Name.equals("CFArrayGetCount")) {
+ if (Name == "CFArrayGetCount") {
addSizeInfo(CE->getArg(0), CE, C);
return;
}
@@ -105,7 +105,7 @@ void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
return;
// Check the array access.
- if (Name.equals("CFArrayGetValueAtIndex")) {
+ if (Name == "CFArrayGetValueAtIndex") {
ProgramStateRef State = C.getState();
// Retrieve the size.
// Find out if we saw this array symbol before and have information about
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
index 598b368e74d4..03dab4f7ada7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -107,7 +107,7 @@ void ObjCSuperCallChecker::fillSelectors(ASTContext &Ctx,
assert(Descriptor.ArgumentCount <= 1); // No multi-argument selectors yet.
// Get the selector.
- IdentifierInfo *II = &Ctx.Idents.get(Descriptor.SelectorName);
+ const IdentifierInfo *II = &Ctx.Idents.get(Descriptor.SelectorName);
Selector Sel = Ctx.Selectors.getSelector(Descriptor.ArgumentCount, &II);
ClassSelectors.insert(Sel);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index eb40711812e1..a6c4186cb15b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -26,8 +26,8 @@ namespace {
class ObjCSuperDeallocChecker
: public Checker<check::PostObjCMessage, check::PreObjCMessage,
check::PreCall, check::Location> {
- mutable IdentifierInfo *IIdealloc = nullptr;
- mutable IdentifierInfo *IINSObject = nullptr;
+ mutable const IdentifierInfo *IIdealloc = nullptr;
+ mutable const IdentifierInfo *IINSObject = nullptr;
mutable Selector SELdealloc;
const BugType DoubleSuperDeallocBugType{
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index 1c2d84254d46..23014ff95487 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -118,8 +118,7 @@ static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
// (d) are unnamed bitfields
if (Ivar->getAccessControl() != ObjCIvarDecl::Private ||
Ivar->hasAttr<UnusedAttr>() || Ivar->hasAttr<IBOutletAttr>() ||
- Ivar->hasAttr<IBOutletCollectionAttr>() ||
- Ivar->isUnnamedBitfield())
+ Ivar->hasAttr<IBOutletCollectionAttr>() || Ivar->isUnnamedBitField())
continue;
M[Ivar] = Unused;
@@ -161,8 +160,8 @@ static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
PathDiagnosticLocation L =
PathDiagnosticLocation::create(Ivar, BR.getSourceManager());
- BR.EmitBasicReport(D, Checker, "Unused instance variable", "Optimization",
- os.str(), L);
+ BR.EmitBasicReport(ID, Checker, "Unused instance variable",
+ "Optimization", os.str(), L);
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index eee9449f3180..4f35d9442ad9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -117,7 +117,7 @@ public:
return;
uint64_t Elts = 0;
if (const ConstantArrayType *CArrTy = dyn_cast<ConstantArrayType>(ArrTy))
- Elts = CArrTy->getSize().getZExtValue();
+ Elts = CArrTy->getZExtSize();
if (Elts == 0)
return;
const RecordType *RT = ArrTy->getElementType()->getAs<RecordType>();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index 2438cf30b39b..eea93a41f138 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "llvm/ADT/StringRef.h"
using namespace clang;
@@ -26,16 +27,85 @@ namespace {
class PointerSubChecker
: public Checker< check::PreStmt<BinaryOperator> > {
const BugType BT{this, "Pointer subtraction"};
+ const llvm::StringLiteral Msg_MemRegionDifferent =
+ "Subtraction of two pointers that do not point into the same array "
+ "is undefined behavior.";
+ const llvm::StringLiteral Msg_LargeArrayIndex =
+ "Using an array index greater than the array size at pointer subtraction "
+ "is undefined behavior.";
+ const llvm::StringLiteral Msg_NegativeArrayIndex =
+ "Using a negative array index at pointer subtraction "
+ "is undefined behavior.";
+ const llvm::StringLiteral Msg_BadVarIndex =
+ "Indexing the address of a variable with other than 1 at this place "
+ "is undefined behavior.";
+
+ bool checkArrayBounds(CheckerContext &C, const Expr *E,
+ const ElementRegion *ElemReg,
+ const MemRegion *Reg) const;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
};
}
+bool PointerSubChecker::checkArrayBounds(CheckerContext &C, const Expr *E,
+ const ElementRegion *ElemReg,
+ const MemRegion *Reg) const {
+ if (!ElemReg)
+ return true;
+
+ auto ReportBug = [&](const llvm::StringLiteral &Msg) {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
+ R->addRange(E->getSourceRange());
+ C.emitReport(std::move(R));
+ }
+ };
+
+ ProgramStateRef State = C.getState();
+ const MemRegion *SuperReg = ElemReg->getSuperRegion();
+ SValBuilder &SVB = C.getSValBuilder();
+
+ if (SuperReg == Reg) {
+ if (const llvm::APSInt *I = SVB.getKnownValue(State, ElemReg->getIndex());
+ I && (!I->isOne() && !I->isZero()))
+ ReportBug(Msg_BadVarIndex);
+ return false;
+ }
+
+ DefinedOrUnknownSVal ElemCount =
+ getDynamicElementCount(State, SuperReg, SVB, ElemReg->getElementType());
+ auto IndexTooLarge = SVB.evalBinOp(C.getState(), BO_GT, ElemReg->getIndex(),
+ ElemCount, SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (IndexTooLarge) {
+ ProgramStateRef S1, S2;
+ std::tie(S1, S2) = C.getState()->assume(*IndexTooLarge);
+ if (S1 && !S2) {
+ ReportBug(Msg_LargeArrayIndex);
+ return false;
+ }
+ }
+ auto IndexTooSmall = SVB.evalBinOp(State, BO_LT, ElemReg->getIndex(),
+ SVB.makeZeroVal(SVB.getArrayIndexType()),
+ SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (IndexTooSmall) {
+ ProgramStateRef S1, S2;
+ std::tie(S1, S2) = State->assume(*IndexTooSmall);
+ if (S1 && !S2) {
+ ReportBug(Msg_NegativeArrayIndex);
+ return false;
+ }
+ }
+ return true;
+}
+
void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
CheckerContext &C) const {
// When doing pointer subtraction, if the two pointers do not point to the
- // same memory chunk, emit a warning.
+ // same array, emit a warning.
if (B->getOpcode() != BO_Sub)
return;
@@ -44,26 +114,58 @@ void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
const MemRegion *LR = LV.getAsRegion();
const MemRegion *RR = RV.getAsRegion();
-
- if (!(LR && RR))
+ if (!LR || !RR)
return;
- const MemRegion *BaseLR = LR->getBaseRegion();
- const MemRegion *BaseRR = RR->getBaseRegion();
+ // Allow subtraction of identical pointers.
+ if (LR == RR)
+ return;
- if (BaseLR == BaseRR)
+ // No warning if one operand is unknown.
+ if (isa<SymbolicRegion>(LR) || isa<SymbolicRegion>(RR))
return;
- // Allow arithmetic on different symbolic regions.
- if (isa<SymbolicRegion>(BaseLR) || isa<SymbolicRegion>(BaseRR))
+ const auto *ElemLR = dyn_cast<ElementRegion>(LR);
+ const auto *ElemRR = dyn_cast<ElementRegion>(RR);
+
+ if (!checkArrayBounds(C, B->getLHS(), ElemLR, RR))
return;
+ if (!checkArrayBounds(C, B->getRHS(), ElemRR, LR))
+ return;
+
+ const ValueDecl *DiffDeclL = nullptr;
+ const ValueDecl *DiffDeclR = nullptr;
+
+ if (ElemLR && ElemRR) {
+ const MemRegion *SuperLR = ElemLR->getSuperRegion();
+ const MemRegion *SuperRR = ElemRR->getSuperRegion();
+ if (SuperLR == SuperRR)
+ return;
+ // Allow arithmetic on different symbolic regions.
+ if (isa<SymbolicRegion>(SuperLR) || isa<SymbolicRegion>(SuperRR))
+ return;
+ if (const auto *SuperDLR = dyn_cast<DeclRegion>(SuperLR))
+ DiffDeclL = SuperDLR->getDecl();
+ if (const auto *SuperDRR = dyn_cast<DeclRegion>(SuperRR))
+ DiffDeclR = SuperDRR->getDecl();
+ }
if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- constexpr llvm::StringLiteral Msg =
- "Subtraction of two pointers that do not point to the same memory "
- "chunk may cause incorrect result.";
- auto R = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
+ auto R =
+ std::make_unique<PathSensitiveBugReport>(BT, Msg_MemRegionDifferent, N);
R->addRange(B->getSourceRange());
+ // The declarations may be identical even if the regions are different:
+ // struct { int array[10]; } a, b;
+ // do_something(&a.array[5] - &b.array[5]);
+ // In this case don't emit notes.
+ if (DiffDeclL != DiffDeclR) {
+ if (DiffDeclL)
+ R->addNote("Array at the left-hand side of subtraction",
+ {DiffDeclL, C.getSourceManager()});
+ if (DiffDeclR)
+ R->addNote("Array at the right-hand side of subtraction",
+ {DiffDeclR, C.getSourceManager()});
+ }
C.emitReport(std::move(R));
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index fa8572cf85ed..86530086ff1b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -87,7 +87,8 @@ private:
CheckerKind CheckKind) const;
CallDescriptionMap<FnCheck> PThreadCallbacks = {
// Init.
- {{{"pthread_mutex_init"}, 2}, &PthreadLockChecker::InitAnyLock},
+ {{CDM::CLibrary, {"pthread_mutex_init"}, 2},
+ &PthreadLockChecker::InitAnyLock},
// TODO: pthread_rwlock_init(2 arguments).
// TODO: lck_mtx_init(3 arguments).
// TODO: lck_mtx_alloc_init(2 arguments) => returns the mutex.
@@ -95,74 +96,106 @@ private:
// TODO: lck_rw_alloc_init(2 arguments) => returns the mutex.
// Acquire.
- {{{"pthread_mutex_lock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
- {{{"pthread_rwlock_rdlock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
- {{{"pthread_rwlock_wrlock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
- {{{"lck_mtx_lock"}, 1}, &PthreadLockChecker::AcquireXNULock},
- {{{"lck_rw_lock_exclusive"}, 1}, &PthreadLockChecker::AcquireXNULock},
- {{{"lck_rw_lock_shared"}, 1}, &PthreadLockChecker::AcquireXNULock},
+ {{CDM::CLibrary, {"pthread_mutex_lock"}, 1},
+ &PthreadLockChecker::AcquirePthreadLock},
+ {{CDM::CLibrary, {"pthread_rwlock_rdlock"}, 1},
+ &PthreadLockChecker::AcquirePthreadLock},
+ {{CDM::CLibrary, {"pthread_rwlock_wrlock"}, 1},
+ &PthreadLockChecker::AcquirePthreadLock},
+ {{CDM::CLibrary, {"lck_mtx_lock"}, 1},
+ &PthreadLockChecker::AcquireXNULock},
+ {{CDM::CLibrary, {"lck_rw_lock_exclusive"}, 1},
+ &PthreadLockChecker::AcquireXNULock},
+ {{CDM::CLibrary, {"lck_rw_lock_shared"}, 1},
+ &PthreadLockChecker::AcquireXNULock},
// Try.
- {{{"pthread_mutex_trylock"}, 1}, &PthreadLockChecker::TryPthreadLock},
- {{{"pthread_rwlock_tryrdlock"}, 1}, &PthreadLockChecker::TryPthreadLock},
- {{{"pthread_rwlock_trywrlock"}, 1}, &PthreadLockChecker::TryPthreadLock},
- {{{"lck_mtx_try_lock"}, 1}, &PthreadLockChecker::TryXNULock},
- {{{"lck_rw_try_lock_exclusive"}, 1}, &PthreadLockChecker::TryXNULock},
- {{{"lck_rw_try_lock_shared"}, 1}, &PthreadLockChecker::TryXNULock},
+ {{CDM::CLibrary, {"pthread_mutex_trylock"}, 1},
+ &PthreadLockChecker::TryPthreadLock},
+ {{CDM::CLibrary, {"pthread_rwlock_tryrdlock"}, 1},
+ &PthreadLockChecker::TryPthreadLock},
+ {{CDM::CLibrary, {"pthread_rwlock_trywrlock"}, 1},
+ &PthreadLockChecker::TryPthreadLock},
+ {{CDM::CLibrary, {"lck_mtx_try_lock"}, 1},
+ &PthreadLockChecker::TryXNULock},
+ {{CDM::CLibrary, {"lck_rw_try_lock_exclusive"}, 1},
+ &PthreadLockChecker::TryXNULock},
+ {{CDM::CLibrary, {"lck_rw_try_lock_shared"}, 1},
+ &PthreadLockChecker::TryXNULock},
// Release.
- {{{"pthread_mutex_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{{"pthread_rwlock_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{{"lck_mtx_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{{"lck_rw_unlock_exclusive"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{{"lck_rw_unlock_shared"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{{"lck_rw_done"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{CDM::CLibrary, {"pthread_mutex_unlock"}, 1},
+ &PthreadLockChecker::ReleaseAnyLock},
+ {{CDM::CLibrary, {"pthread_rwlock_unlock"}, 1},
+ &PthreadLockChecker::ReleaseAnyLock},
+ {{CDM::CLibrary, {"lck_mtx_unlock"}, 1},
+ &PthreadLockChecker::ReleaseAnyLock},
+ {{CDM::CLibrary, {"lck_rw_unlock_exclusive"}, 1},
+ &PthreadLockChecker::ReleaseAnyLock},
+ {{CDM::CLibrary, {"lck_rw_unlock_shared"}, 1},
+ &PthreadLockChecker::ReleaseAnyLock},
+ {{CDM::CLibrary, {"lck_rw_done"}, 1},
+ &PthreadLockChecker::ReleaseAnyLock},
// Destroy.
- {{{"pthread_mutex_destroy"}, 1}, &PthreadLockChecker::DestroyPthreadLock},
- {{{"lck_mtx_destroy"}, 2}, &PthreadLockChecker::DestroyXNULock},
+ {{CDM::CLibrary, {"pthread_mutex_destroy"}, 1},
+ &PthreadLockChecker::DestroyPthreadLock},
+ {{CDM::CLibrary, {"lck_mtx_destroy"}, 2},
+ &PthreadLockChecker::DestroyXNULock},
// TODO: pthread_rwlock_destroy(1 argument).
// TODO: lck_rw_destroy(2 arguments).
};
CallDescriptionMap<FnCheck> FuchsiaCallbacks = {
// Init.
- {{{"spin_lock_init"}, 1}, &PthreadLockChecker::InitAnyLock},
+ {{CDM::CLibrary, {"spin_lock_init"}, 1},
+ &PthreadLockChecker::InitAnyLock},
// Acquire.
- {{{"spin_lock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
- {{{"spin_lock_save"}, 3}, &PthreadLockChecker::AcquirePthreadLock},
- {{{"sync_mutex_lock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
- {{{"sync_mutex_lock_with_waiter"}, 1},
+ {{CDM::CLibrary, {"spin_lock"}, 1},
+ &PthreadLockChecker::AcquirePthreadLock},
+ {{CDM::CLibrary, {"spin_lock_save"}, 3},
+ &PthreadLockChecker::AcquirePthreadLock},
+ {{CDM::CLibrary, {"sync_mutex_lock"}, 1},
+ &PthreadLockChecker::AcquirePthreadLock},
+ {{CDM::CLibrary, {"sync_mutex_lock_with_waiter"}, 1},
&PthreadLockChecker::AcquirePthreadLock},
// Try.
- {{{"spin_trylock"}, 1}, &PthreadLockChecker::TryFuchsiaLock},
- {{{"sync_mutex_trylock"}, 1}, &PthreadLockChecker::TryFuchsiaLock},
- {{{"sync_mutex_timedlock"}, 2}, &PthreadLockChecker::TryFuchsiaLock},
+ {{CDM::CLibrary, {"spin_trylock"}, 1},
+ &PthreadLockChecker::TryFuchsiaLock},
+ {{CDM::CLibrary, {"sync_mutex_trylock"}, 1},
+ &PthreadLockChecker::TryFuchsiaLock},
+ {{CDM::CLibrary, {"sync_mutex_timedlock"}, 2},
+ &PthreadLockChecker::TryFuchsiaLock},
// Release.
- {{{"spin_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{{"spin_unlock_restore"}, 3}, &PthreadLockChecker::ReleaseAnyLock},
- {{{"sync_mutex_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{CDM::CLibrary, {"spin_unlock"}, 1},
+ &PthreadLockChecker::ReleaseAnyLock},
+ {{CDM::CLibrary, {"spin_unlock_restore"}, 3},
+ &PthreadLockChecker::ReleaseAnyLock},
+ {{CDM::CLibrary, {"sync_mutex_unlock"}, 1},
+ &PthreadLockChecker::ReleaseAnyLock},
};
CallDescriptionMap<FnCheck> C11Callbacks = {
// Init.
- {{{"mtx_init"}, 2}, &PthreadLockChecker::InitAnyLock},
+ {{CDM::CLibrary, {"mtx_init"}, 2}, &PthreadLockChecker::InitAnyLock},
// Acquire.
- {{{"mtx_lock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{CDM::CLibrary, {"mtx_lock"}, 1},
+ &PthreadLockChecker::AcquirePthreadLock},
// Try.
- {{{"mtx_trylock"}, 1}, &PthreadLockChecker::TryC11Lock},
- {{{"mtx_timedlock"}, 2}, &PthreadLockChecker::TryC11Lock},
+ {{CDM::CLibrary, {"mtx_trylock"}, 1}, &PthreadLockChecker::TryC11Lock},
+ {{CDM::CLibrary, {"mtx_timedlock"}, 2}, &PthreadLockChecker::TryC11Lock},
// Release.
- {{{"mtx_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{CDM::CLibrary, {"mtx_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
// Destroy
- {{{"mtx_destroy"}, 1}, &PthreadLockChecker::DestroyPthreadLock},
+ {{CDM::CLibrary, {"mtx_destroy"}, 1},
+ &PthreadLockChecker::DestroyPthreadLock},
};
ProgramStateRef resolvePossiblyDestroyedMutex(ProgramStateRef state,
@@ -258,13 +291,9 @@ REGISTER_MAP_WITH_PROGRAMSTATE(DestroyRetVal, const MemRegion *, SymbolRef)
void PthreadLockChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
- // An additional umbrella check that all functions modeled by this checker
- // are global C functions.
- // TODO: Maybe make this the default behavior of CallDescription
- // with exactly one identifier?
// FIXME: Try to handle cases when the implementation was inlined rather
// than just giving up.
- if (!Call.isGlobalCFunction() || C.wasInlined)
+ if (C.wasInlined)
return;
if (const FnCheck *Callback = PThreadCallbacks.lookup(Call))
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PutenvStackArrayChecker.cpp
index eae162cda693..bf81d57bf82f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PutenvStackArrayChecker.cpp
@@ -1,4 +1,4 @@
-//== PutenvWithAutoChecker.cpp --------------------------------- -*- C++ -*--=//
+//== PutenvStackArrayChecker.cpp ------------------------------- -*- C++ -*--=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,13 +6,13 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines PutenvWithAutoChecker which finds calls of ``putenv``
-// function with automatic variable as the argument.
+// This file defines PutenvStackArrayChecker which finds calls of ``putenv``
+// function with automatic array variable as the argument.
// https://wiki.sei.cmu.edu/confluence/x/6NYxBQ
//
//===----------------------------------------------------------------------===//
-#include "../AllocationState.h"
+#include "AllocationState.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -26,31 +26,36 @@ using namespace clang;
using namespace ento;
namespace {
-class PutenvWithAutoChecker : public Checker<check::PostCall> {
+class PutenvStackArrayChecker : public Checker<check::PostCall> {
private:
- BugType BT{this, "'putenv' function should not be called with auto variables",
+ BugType BT{this, "'putenv' called with stack-allocated string",
categories::SecurityError};
- const CallDescription Putenv{{"putenv"}, 1};
+ const CallDescription Putenv{CDM::CLibrary, {"putenv"}, 1};
public:
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
};
} // namespace
-void PutenvWithAutoChecker::checkPostCall(const CallEvent &Call,
- CheckerContext &C) const {
+void PutenvStackArrayChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
if (!Putenv.matches(Call))
return;
SVal ArgV = Call.getArgSVal(0);
const Expr *ArgExpr = Call.getArgExpr(0);
- const MemSpaceRegion *MSR = ArgV.getAsRegion()->getMemorySpace();
- if (!isa<StackSpaceRegion>(MSR))
+ const auto *SSR =
+ dyn_cast<StackSpaceRegion>(ArgV.getAsRegion()->getMemorySpace());
+ if (!SSR)
+ return;
+ const auto *StackFrameFuncD =
+ dyn_cast_or_null<FunctionDecl>(SSR->getStackFrame()->getDecl());
+ if (StackFrameFuncD && StackFrameFuncD->isMain())
return;
StringRef ErrorMsg = "The 'putenv' function should not be called with "
- "arguments that have automatic storage";
+ "arrays that have automatic storage";
ExplodedNode *N = C.generateErrorNode();
auto Report = std::make_unique<PathSensitiveBugReport>(BT, ErrorMsg, N);
@@ -60,8 +65,10 @@ void PutenvWithAutoChecker::checkPostCall(const CallEvent &Call,
C.emitReport(std::move(Report));
}
-void ento::registerPutenvWithAuto(CheckerManager &Mgr) {
- Mgr.registerChecker<PutenvWithAutoChecker>();
+void ento::registerPutenvStackArray(CheckerManager &Mgr) {
+ Mgr.registerChecker<PutenvStackArrayChecker>();
}
-bool ento::shouldRegisterPutenvWithAuto(const CheckerManager &) { return true; }
+bool ento::shouldRegisterPutenvStackArray(const CheckerManager &) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index c3acb73ba717..f73c9007c183 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -411,11 +411,11 @@ annotateConsumedSummaryMismatch(const ExplodedNode *N,
}
}
- if (os.str().empty())
+ if (sbuf.empty())
return nullptr;
PathDiagnosticLocation L = PathDiagnosticLocation::create(CallExitLoc, SM);
- return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+ return std::make_shared<PathDiagnosticEventPiece>(L, sbuf);
}
/// Annotate the parameter at the analysis entry point.
@@ -446,7 +446,7 @@ annotateStartParameter(const ExplodedNode *N, SymbolRef Sym,
assert(CurrT->getCount() == 0);
os << "0";
}
- return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+ return std::make_shared<PathDiagnosticEventPiece>(L, s);
}
PathDiagnosticPieceRef
@@ -493,7 +493,7 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
if (PrevT && IsFreeUnowned && CurrV.isNotOwned() && PrevT->isOwned()) {
os << "Object is now not exclusively owned";
auto Pos = PathDiagnosticLocation::create(N->getLocation(), SM);
- return std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
+ return std::make_shared<PathDiagnosticEventPiece>(Pos, sbuf);
}
// This is the allocation site since the previous node had no bindings
@@ -535,7 +535,7 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
}
PathDiagnosticLocation Pos(S, SM, N->getLocationContext());
- return std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
+ return std::make_shared<PathDiagnosticEventPiece>(Pos, sbuf);
}
// Gather up the effects that were performed on the object at this
@@ -582,13 +582,13 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
if (!shouldGenerateNote(os, PrevT, CurrV, DeallocSent))
return nullptr;
- if (os.str().empty())
+ if (sbuf.empty())
return nullptr; // We have nothing to say!
const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
N->getLocationContext());
- auto P = std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
+ auto P = std::make_shared<PathDiagnosticEventPiece>(Pos, sbuf);
// Add the range by scanning the children of the statement for any bindings
// to Sym.
@@ -831,7 +831,7 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
<< RV->getCount();
}
- return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+ return std::make_shared<PathDiagnosticEventPiece>(L, sbuf);
}
RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
@@ -977,7 +977,7 @@ void RefLeakReport::findBindingToReport(CheckerContext &Ctx,
// something like derived regions if we want to construct SVal from
// Sym. Instead, we take the value that is definitely stored in that
// region, thus guaranteeing that trackStoredValue will work.
- bugreporter::trackStoredValue(AllVarBindings[0].second.castAs<KnownSVal>(),
+ bugreporter::trackStoredValue(AllVarBindings[0].second,
AllocBindingToReport, *this);
} else {
AllocBindingToReport = AllocFirstBinding;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
index c3112ebe4e79..3da571adfa44 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
@@ -1,4 +1,4 @@
-//===- ReturnValueChecker - Applies guaranteed return values ----*- C++ -*-===//
+//===- ReturnValueChecker - Check methods always returning true -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,8 +6,13 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines ReturnValueChecker, which checks for calls with guaranteed
-// boolean return value. It ensures the return value of each function call.
+// This defines ReturnValueChecker, which models a very specific coding
+// convention within the LLVM/Clang codebase: there several classes that have
+// Error() methods which always return true.
+// This checker was introduced to eliminate false positives caused by this
+// peculiar "always returns true" invariant. (Normally, the analyzer assumes
+// that a function returning `bool` can return both `true` and `false`, because
+// otherwise it could've been a `void` function.)
//
//===----------------------------------------------------------------------===//
@@ -18,43 +23,40 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/FormatVariadic.h"
#include <optional>
using namespace clang;
using namespace ento;
+using llvm::formatv;
namespace {
-class ReturnValueChecker : public Checker<check::PostCall, check::EndFunction> {
+class ReturnValueChecker : public Checker<check::PostCall> {
public:
- // It sets the predefined invariant ('CDM') if the current call not break it.
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
- // It reports whether a predefined invariant ('CDM') is broken.
- void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
-
private:
- // The pairs are in the following form: {{{class, call}}, return value}
- const CallDescriptionMap<bool> CDM = {
+ const CallDescriptionSet Methods = {
// These are known in the LLVM project: 'Error()'
- {{{"ARMAsmParser", "Error"}}, true},
- {{{"HexagonAsmParser", "Error"}}, true},
- {{{"LLLexer", "Error"}}, true},
- {{{"LLParser", "Error"}}, true},
- {{{"MCAsmParser", "Error"}}, true},
- {{{"MCAsmParserExtension", "Error"}}, true},
- {{{"TGParser", "Error"}}, true},
- {{{"X86AsmParser", "Error"}}, true},
+ {CDM::CXXMethod, {"ARMAsmParser", "Error"}},
+ {CDM::CXXMethod, {"HexagonAsmParser", "Error"}},
+ {CDM::CXXMethod, {"LLLexer", "Error"}},
+ {CDM::CXXMethod, {"LLParser", "Error"}},
+ {CDM::CXXMethod, {"MCAsmParser", "Error"}},
+ {CDM::CXXMethod, {"MCAsmParserExtension", "Error"}},
+ {CDM::CXXMethod, {"TGParser", "Error"}},
+ {CDM::CXXMethod, {"X86AsmParser", "Error"}},
// 'TokError()'
- {{{"LLParser", "TokError"}}, true},
- {{{"MCAsmParser", "TokError"}}, true},
- {{{"MCAsmParserExtension", "TokError"}}, true},
- {{{"TGParser", "TokError"}}, true},
+ {CDM::CXXMethod, {"LLParser", "TokError"}},
+ {CDM::CXXMethod, {"MCAsmParser", "TokError"}},
+ {CDM::CXXMethod, {"MCAsmParserExtension", "TokError"}},
+ {CDM::CXXMethod, {"TGParser", "TokError"}},
// 'error()'
- {{{"MIParser", "error"}}, true},
- {{{"WasmAsmParser", "error"}}, true},
- {{{"WebAssemblyAsmParser", "error"}}, true},
+ {CDM::CXXMethod, {"MIParser", "error"}},
+ {CDM::CXXMethod, {"WasmAsmParser", "error"}},
+ {CDM::CXXMethod, {"WebAssemblyAsmParser", "error"}},
// Other
- {{{"AsmParser", "printError"}}, true}};
+ {CDM::CXXMethod, {"AsmParser", "printError"}}};
};
} // namespace
@@ -68,100 +70,32 @@ static std::string getName(const CallEvent &Call) {
return Name;
}
-// The predefinitions ('CDM') could break due to the ever growing code base.
-// Check for the expected invariants and see whether they apply.
-static std::optional<bool> isInvariantBreak(bool ExpectedValue, SVal ReturnV,
- CheckerContext &C) {
- auto ReturnDV = ReturnV.getAs<DefinedOrUnknownSVal>();
- if (!ReturnDV)
- return std::nullopt;
-
- if (ExpectedValue)
- return C.getState()->isNull(*ReturnDV).isConstrainedTrue();
-
- return C.getState()->isNull(*ReturnDV).isConstrainedFalse();
-}
-
void ReturnValueChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
- const bool *RawExpectedValue = CDM.lookup(Call);
- if (!RawExpectedValue)
+ if (!Methods.contains(Call))
return;
- SVal ReturnV = Call.getReturnValue();
- bool ExpectedValue = *RawExpectedValue;
- std::optional<bool> IsInvariantBreak =
- isInvariantBreak(ExpectedValue, ReturnV, C);
- if (!IsInvariantBreak)
- return;
+ auto ReturnV = Call.getReturnValue().getAs<DefinedOrUnknownSVal>();
- // If the invariant is broken it is reported by 'checkEndFunction()'.
- if (*IsInvariantBreak)
+ if (!ReturnV)
return;
- std::string Name = getName(Call);
- const NoteTag *CallTag = C.getNoteTag(
- [Name, ExpectedValue](PathSensitiveBugReport &) -> std::string {
- SmallString<128> Msg;
- llvm::raw_svector_ostream Out(Msg);
-
- Out << '\'' << Name << "' returns "
- << (ExpectedValue ? "true" : "false");
- return std::string(Out.str());
- },
- /*IsPrunable=*/true);
-
ProgramStateRef State = C.getState();
- State = State->assume(ReturnV.castAs<DefinedOrUnknownSVal>(), ExpectedValue);
- C.addTransition(State, CallTag);
-}
-
-void ReturnValueChecker::checkEndFunction(const ReturnStmt *RS,
- CheckerContext &C) const {
- if (!RS || !RS->getRetValue())
+ if (ProgramStateRef StTrue = State->assume(*ReturnV, true)) {
+ // The return value can be true, so transition to a state where it's true.
+ std::string Msg =
+ formatv("'{0}' returns true (by convention)", getName(Call));
+ C.addTransition(StTrue, C.getNoteTag(Msg, /*IsPrunable=*/true));
return;
-
- // We cannot get the caller in the top-frame.
- const StackFrameContext *SFC = C.getStackFrame();
- if (C.getStackFrame()->inTopFrame())
- return;
-
- ProgramStateRef State = C.getState();
- CallEventManager &CMgr = C.getStateManager().getCallEventManager();
- CallEventRef<> Call = CMgr.getCaller(SFC, State);
- if (!Call)
- return;
-
- const bool *RawExpectedValue = CDM.lookup(*Call);
- if (!RawExpectedValue)
- return;
-
- SVal ReturnV = State->getSVal(RS->getRetValue(), C.getLocationContext());
- bool ExpectedValue = *RawExpectedValue;
- std::optional<bool> IsInvariantBreak =
- isInvariantBreak(ExpectedValue, ReturnV, C);
- if (!IsInvariantBreak)
- return;
-
- // If the invariant is appropriate it is reported by 'checkPostCall()'.
- if (!*IsInvariantBreak)
- return;
-
- std::string Name = getName(*Call);
- const NoteTag *CallTag = C.getNoteTag(
- [Name, ExpectedValue](BugReport &BR) -> std::string {
- SmallString<128> Msg;
- llvm::raw_svector_ostream Out(Msg);
-
- // The following is swapped because the invariant is broken.
- Out << '\'' << Name << "' returns "
- << (ExpectedValue ? "false" : "true");
-
- return std::string(Out.str());
- },
- /*IsPrunable=*/false);
-
- C.addTransition(State, CallTag);
+ }
+ // Paranoia: if the return value is known to be false (which is highly
+ // unlikely, it's easy to ensure that the method always returns true), then
+ // produce a note that highlights that this unusual situation.
+ // Note that this checker is 'hidden' so it cannot produce a bug report.
+ std::string Msg = formatv("'{0}' returned false, breaking the convention "
+ "that it always returns true",
+ getName(Call));
+ C.addTransition(State, C.getNoteTag(Msg, /*IsPrunable=*/true));
}
void ento::registerReturnValueChecker(CheckerManager &Mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
index 788f2875863c..e037719b9029 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
@@ -1,4 +1,4 @@
-//===-- STLAlgorithmModeling.cpp -----------------------------------*- C++ -*--//
+//===-- STLAlgorithmModeling.cpp ----------------------------------*- C++ -*--//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -33,29 +33,50 @@ class STLAlgorithmModeling : public Checker<eval::Call> {
const CallExpr *) const;
const CallDescriptionMap<FnCheck> Callbacks = {
- {{{"std", "find"}, 3}, &STLAlgorithmModeling::evalFind},
- {{{"std", "find"}, 4}, &STLAlgorithmModeling::evalFind},
- {{{"std", "find_if"}, 3}, &STLAlgorithmModeling::evalFind},
- {{{"std", "find_if"}, 4}, &STLAlgorithmModeling::evalFind},
- {{{"std", "find_if_not"}, 3}, &STLAlgorithmModeling::evalFind},
- {{{"std", "find_if_not"}, 4}, &STLAlgorithmModeling::evalFind},
- {{{"std", "find_first_of"}, 4}, &STLAlgorithmModeling::evalFind},
- {{{"std", "find_first_of"}, 5}, &STLAlgorithmModeling::evalFind},
- {{{"std", "find_first_of"}, 6}, &STLAlgorithmModeling::evalFind},
- {{{"std", "find_end"}, 4}, &STLAlgorithmModeling::evalFind},
- {{{"std", "find_end"}, 5}, &STLAlgorithmModeling::evalFind},
- {{{"std", "find_end"}, 6}, &STLAlgorithmModeling::evalFind},
- {{{"std", "lower_bound"}, 3}, &STLAlgorithmModeling::evalFind},
- {{{"std", "lower_bound"}, 4}, &STLAlgorithmModeling::evalFind},
- {{{"std", "upper_bound"}, 3}, &STLAlgorithmModeling::evalFind},
- {{{"std", "upper_bound"}, 4}, &STLAlgorithmModeling::evalFind},
- {{{"std", "search"}, 3}, &STLAlgorithmModeling::evalFind},
- {{{"std", "search"}, 4}, &STLAlgorithmModeling::evalFind},
- {{{"std", "search"}, 5}, &STLAlgorithmModeling::evalFind},
- {{{"std", "search"}, 6}, &STLAlgorithmModeling::evalFind},
- {{{"std", "search_n"}, 4}, &STLAlgorithmModeling::evalFind},
- {{{"std", "search_n"}, 5}, &STLAlgorithmModeling::evalFind},
- {{{"std", "search_n"}, 6}, &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find_if"}, 3},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find_if"}, 4},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find_if_not"}, 3},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find_if_not"}, 4},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find_first_of"}, 4},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find_first_of"}, 5},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find_first_of"}, 6},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find_end"}, 4},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find_end"}, 5},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "find_end"}, 6},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "lower_bound"}, 3},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "lower_bound"}, 4},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "upper_bound"}, 3},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "upper_bound"}, 4},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "search"}, 3},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "search"}, 4},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "search"}, 5},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "search"}, 6},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "search_n"}, 4},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "search_n"}, 5},
+ &STLAlgorithmModeling::evalFind},
+ {{CDM::SimpleFunc, {"std", "search_n"}, 6},
+ &STLAlgorithmModeling::evalFind},
};
public:
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SetgidSetuidOrderChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SetgidSetuidOrderChecker.cpp
new file mode 100644
index 000000000000..dbe3fd33a6b4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SetgidSetuidOrderChecker.cpp
@@ -0,0 +1,196 @@
+//===-- SetgidSetuidOrderChecker.cpp - check privilege revocation calls ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker to detect possible reversed order of privilege
+// revocations when 'setgid' and 'setuid' is used.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+enum SetPrivilegeFunctionKind { Irrelevant, Setuid, Setgid };
+
+class SetgidSetuidOrderChecker : public Checker<check::PostCall, eval::Assume> {
+ const BugType BT{this, "Possible wrong order of privilege revocation"};
+
+ const CallDescription SetuidDesc{CDM::CLibrary, {"setuid"}, 1};
+ const CallDescription SetgidDesc{CDM::CLibrary, {"setgid"}, 1};
+
+ const CallDescription GetuidDesc{CDM::CLibrary, {"getuid"}, 0};
+ const CallDescription GetgidDesc{CDM::CLibrary, {"getgid"}, 0};
+
+ const CallDescriptionSet OtherSetPrivilegeDesc{
+ {CDM::CLibrary, {"seteuid"}, 1}, {CDM::CLibrary, {"setegid"}, 1},
+ {CDM::CLibrary, {"setreuid"}, 2}, {CDM::CLibrary, {"setregid"}, 2},
+ {CDM::CLibrary, {"setresuid"}, 3}, {CDM::CLibrary, {"setresgid"}, 3}};
+
+public:
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ ProgramStateRef evalAssume(ProgramStateRef State, SVal Cond,
+ bool Assumption) const;
+
+private:
+ void processSetuid(ProgramStateRef State, const CallEvent &Call,
+ CheckerContext &C) const;
+ void processSetgid(ProgramStateRef State, const CallEvent &Call,
+ CheckerContext &C) const;
+ void processOther(ProgramStateRef State, const CallEvent &Call,
+ CheckerContext &C) const;
+ /// Check if a function like \c getuid or \c getgid is called directly from
+ /// the first argument of function called from \a Call.
+ bool isFunctionCalledInArg(const CallDescription &Desc,
+ const CallEvent &Call) const;
+ void emitReport(ProgramStateRef State, CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+/// Store if there was a call to 'setuid(getuid())' or 'setgid(getgid())' not
+/// followed by other different privilege-change functions.
+/// If the value \c Setuid is stored and a 'setgid(getgid())' call is found we
+/// have found the bug to be reported. Value \c Setgid is used too to prevent
+/// warnings at a setgid-setuid-setgid sequence.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(LastSetPrivilegeCall, SetPrivilegeFunctionKind)
+/// Store the symbol value of the last 'setuid(getuid())' call. This is used to
+/// detect if the result is compared to -1 and avoid warnings on that branch
+/// (which is the failure branch of the call), and for identification of note
+/// tags.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(LastSetuidCallSVal, SymbolRef)
+
+void SetgidSetuidOrderChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (SetuidDesc.matches(Call)) {
+ processSetuid(State, Call, C);
+ } else if (SetgidDesc.matches(Call)) {
+ processSetgid(State, Call, C);
+ } else if (OtherSetPrivilegeDesc.contains(Call)) {
+ processOther(State, Call, C);
+ }
+}
+
+ProgramStateRef SetgidSetuidOrderChecker::evalAssume(ProgramStateRef State,
+ SVal Cond,
+ bool Assumption) const {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ SymbolRef LastSetuidSym = State->get<LastSetuidCallSVal>();
+ if (!LastSetuidSym)
+ return State;
+
+ // Check if the most recent call to 'setuid(getuid())' is assumed to be != 0.
+ // It should be only -1 at failure, but we want to accept a "!= 0" check too.
+ // (But now an invalid failure check like "!= 1" will be recognized as correct
+ // too. The "invalid failure check" is a different bug that is not the scope
+ // of this checker.)
+ auto FailComparison =
+ SVB.evalBinOpNN(State, BO_NE, nonloc::SymbolVal(LastSetuidSym),
+ SVB.makeIntVal(0, /*isUnsigned=*/false),
+ SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!FailComparison)
+ return State;
+ if (auto IsFailBranch = State->assume(*FailComparison);
+ IsFailBranch.first && !IsFailBranch.second) {
+ // This is the 'setuid(getuid())' != 0 case.
+ // On this branch we do not want to emit warning.
+ State = State->set<LastSetPrivilegeCall>(Irrelevant);
+ State = State->set<LastSetuidCallSVal>(SymbolRef{});
+ }
+ return State;
+}
+
+void SetgidSetuidOrderChecker::processSetuid(ProgramStateRef State,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ bool IsSetuidWithGetuid = isFunctionCalledInArg(GetuidDesc, Call);
+ if (State->get<LastSetPrivilegeCall>() != Setgid && IsSetuidWithGetuid) {
+ SymbolRef RetSym = Call.getReturnValue().getAsSymbol();
+ State = State->set<LastSetPrivilegeCall>(Setuid);
+ State = State->set<LastSetuidCallSVal>(RetSym);
+ const NoteTag *Note = C.getNoteTag([this,
+ RetSym](PathSensitiveBugReport &BR) {
+ if (!BR.isInteresting(RetSym) || &BR.getBugType() != &this->BT)
+ return "";
+ return "Call to 'setuid' found here that removes superuser privileges";
+ });
+ C.addTransition(State, Note);
+ return;
+ }
+ State = State->set<LastSetPrivilegeCall>(Irrelevant);
+ State = State->set<LastSetuidCallSVal>(SymbolRef{});
+ C.addTransition(State);
+}
+
+void SetgidSetuidOrderChecker::processSetgid(ProgramStateRef State,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ bool IsSetgidWithGetgid = isFunctionCalledInArg(GetgidDesc, Call);
+ if (State->get<LastSetPrivilegeCall>() == Setuid) {
+ if (IsSetgidWithGetgid) {
+ State = State->set<LastSetPrivilegeCall>(Irrelevant);
+ emitReport(State, C);
+ return;
+ }
+ State = State->set<LastSetPrivilegeCall>(Irrelevant);
+ } else {
+ State = State->set<LastSetPrivilegeCall>(IsSetgidWithGetgid ? Setgid
+ : Irrelevant);
+ }
+ State = State->set<LastSetuidCallSVal>(SymbolRef{});
+ C.addTransition(State);
+}
+
+void SetgidSetuidOrderChecker::processOther(ProgramStateRef State,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ State = State->set<LastSetuidCallSVal>(SymbolRef{});
+ State = State->set<LastSetPrivilegeCall>(Irrelevant);
+ C.addTransition(State);
+}
+
+bool SetgidSetuidOrderChecker::isFunctionCalledInArg(
+ const CallDescription &Desc, const CallEvent &Call) const {
+ if (const auto *CallInArg0 =
+ dyn_cast<CallExpr>(Call.getArgExpr(0)->IgnoreParenImpCasts()))
+ return Desc.matchesAsWritten(*CallInArg0);
+ return false;
+}
+
+void SetgidSetuidOrderChecker::emitReport(ProgramStateRef State,
+ CheckerContext &C) const {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
+ llvm::StringLiteral Msg =
+ "A 'setgid(getgid())' call following a 'setuid(getuid())' "
+ "call is likely to fail; probably the order of these "
+ "statements is wrong";
+ auto Report = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
+ Report->markInteresting(State->get<LastSetuidCallSVal>());
+ C.emitReport(std::move(Report));
+ }
+}
+
+void ento::registerSetgidSetuidOrderChecker(CheckerManager &mgr) {
+ mgr.registerChecker<SetgidSetuidOrderChecker>();
+}
+
+bool ento::shouldRegisterSetgidSetuidOrderChecker(const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
index 7cbe271dfbf9..5152624d00f4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -1,4 +1,4 @@
-//===-- SimpleStreamChecker.cpp -----------------------------------------*- C++ -*--//
+//===-- SimpleStreamChecker.cpp -----------------------------------*- C++ -*--//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -52,8 +52,8 @@ class SimpleStreamChecker : public Checker<check::PostCall,
check::PreCall,
check::DeadSymbols,
check::PointerEscape> {
- const CallDescription OpenFn{{"fopen"}, 2};
- const CallDescription CloseFn{{"fclose"}, 1};
+ const CallDescription OpenFn{CDM::CLibrary, {"fopen"}, 2};
+ const CallDescription CloseFn{CDM::CLibrary, {"fclose"}, 1};
const BugType DoubleCloseBugType{this, "Double fclose",
"Unix Stream API Error"};
@@ -92,9 +92,6 @@ REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
void SimpleStreamChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
- if (!Call.isGlobalCFunction())
- return;
-
if (!OpenFn.matches(Call))
return;
@@ -111,9 +108,6 @@ void SimpleStreamChecker::checkPostCall(const CallEvent &Call,
void SimpleStreamChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- if (!Call.isGlobalCFunction())
- return;
-
if (!CloseFn.matches(Call))
return;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
index 268fc742f050..505020d4bb39 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
@@ -86,14 +86,14 @@ private:
using SmartPtrMethodHandlerFn =
void (SmartPtrModeling::*)(const CallEvent &Call, CheckerContext &) const;
CallDescriptionMap<SmartPtrMethodHandlerFn> SmartPtrMethodHandlers{
- {{{"reset"}}, &SmartPtrModeling::handleReset},
- {{{"release"}}, &SmartPtrModeling::handleRelease},
- {{{"swap"}, 1}, &SmartPtrModeling::handleSwapMethod},
- {{{"get"}}, &SmartPtrModeling::handleGet}};
- const CallDescription StdSwapCall{{"std", "swap"}, 2};
- const CallDescription StdMakeUniqueCall{{"std", "make_unique"}};
- const CallDescription StdMakeUniqueForOverwriteCall{
- {"std", "make_unique_for_overwrite"}};
+ {{CDM::CXXMethod, {"reset"}}, &SmartPtrModeling::handleReset},
+ {{CDM::CXXMethod, {"release"}}, &SmartPtrModeling::handleRelease},
+ {{CDM::CXXMethod, {"swap"}, 1}, &SmartPtrModeling::handleSwapMethod},
+ {{CDM::CXXMethod, {"get"}}, &SmartPtrModeling::handleGet}};
+ const CallDescription StdSwapCall{CDM::SimpleFunc, {"std", "swap"}, 2};
+ const CallDescriptionSet MakeUniqueVariants{
+ {CDM::SimpleFunc, {"std", "make_unique"}},
+ {CDM::SimpleFunc, {"std", "make_unique_for_overwrite"}}};
};
} // end of anonymous namespace
@@ -296,7 +296,7 @@ bool SmartPtrModeling::evalCall(const CallEvent &Call,
return handleSwap(State, Call.getArgSVal(0), Call.getArgSVal(1), C);
}
- if (matchesAny(Call, StdMakeUniqueCall, StdMakeUniqueForOverwriteCall)) {
+ if (MakeUniqueVariants.contains(Call)) {
if (!ModelSmartPtrDereference)
return false;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index fcd907a9bb0d..8f4bd17afc85 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -30,7 +30,7 @@
// was not consciously intended, and therefore it might have been unreachable.
//
// This checker uses eval::Call for modeling pure functions (functions without
-// side effets), for which their `Summary' is a precise model. This avoids
+// side effects), for which their `Summary' is a precise model. This avoids
// unnecessary invalidation passes. Conflicts with other checkers are unlikely
// because if the function has no other effects, other checkers would probably
// never want to improve upon the modeling done by this checker.
@@ -672,7 +672,7 @@ class StdLibraryFunctionsChecker
StringRef getNote() const { return Note; }
};
- using ArgTypes = std::vector<std::optional<QualType>>;
+ using ArgTypes = ArrayRef<std::optional<QualType>>;
using RetType = std::optional<QualType>;
// A placeholder type, we use it whenever we do not care about the concrete
@@ -1746,7 +1746,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
}
// Add the same summary for different names with the Signature explicitly
// given.
- void operator()(std::vector<StringRef> Names, Signature Sign, Summary Sum) {
+ void operator()(ArrayRef<StringRef> Names, Signature Sign, Summary Sum) {
for (StringRef Name : Names)
operator()(Name, Sign, Sum);
}
@@ -2023,13 +2023,6 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
{{EOFv, EOFv}, {0, UCharRangeMax}},
"an unsigned char value or EOF")));
- // The getc() family of functions that returns either a char or an EOF.
- addToFunctionSummaryMap(
- {"getc", "fgetc"}, Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange,
- {{EOFv, EOFv}, {0, UCharRangeMax}})},
- ErrnoIrrelevant));
addToFunctionSummaryMap(
"getchar", Signature(ArgTypes{}, RetType{IntTy}),
Summary(NoEvalCall)
@@ -2139,7 +2132,17 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
std::move(GetenvSummary));
}
- if (ModelPOSIX) {
+ if (!ModelPOSIX) {
+ // Without POSIX use of 'errno' is not specified (in these cases).
+ // Add these functions without 'errno' checks.
+ addToFunctionSummaryMap(
+ {"getc", "fgetc"}, Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}})},
+ ErrnoIrrelevant)
+ .ArgConstraint(NotNull(ArgNo(0))));
+ } else {
const auto ReturnsZeroOrMinusOne =
ConstraintSet{ReturnValueCondition(WithinRange, Range(-1, 0))};
const auto ReturnsZero =
@@ -2204,6 +2207,16 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2))));
+ // FILE *popen(const char *command, const char *type);
+ addToFunctionSummaryMap(
+ "popen",
+ Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{FilePtrTy}),
+ Summary(NoEvalCall)
+ .Case({NotNull(Ret)}, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
// int fclose(FILE *stream);
addToFunctionSummaryMap(
"fclose", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
@@ -2212,6 +2225,72 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.Case(ReturnsEOF, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
+ // int pclose(FILE *stream);
+ addToFunctionSummaryMap(
+ "pclose", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, {{0, IntMax}})},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ std::optional<QualType> Off_tTy = lookupTy("off_t");
+ std::optional<RangeInt> Off_tMax = getMaxValue(Off_tTy);
+
+ // int fgetc(FILE *stream);
+ // 'getc' is the same as 'fgetc' but may be a macro
+ addToFunctionSummaryMap(
+ {"getc", "fgetc"}, Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, {{0, UCharRangeMax}})},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ReturnValueCondition(WithinRange, SingleValue(EOFv))},
+ ErrnoIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int fputc(int c, FILE *stream);
+ // 'putc' is the same as 'fputc' but may be a macro
+ addToFunctionSummaryMap(
+ {"putc", "fputc"},
+ Signature(ArgTypes{IntTy, FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case({ArgumentCondition(0, WithinRange, Range(0, UCharRangeMax)),
+ ReturnValueCondition(BO_EQ, ArgNo(0))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ArgumentCondition(0, OutOfRange, Range(0, UCharRangeMax)),
+ ReturnValueCondition(WithinRange, Range(0, UCharRangeMax))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ReturnValueCondition(WithinRange, SingleValue(EOFv))},
+ ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // char *fgets(char *restrict s, int n, FILE *restrict stream);
+ addToFunctionSummaryMap(
+ "fgets",
+ Signature(ArgTypes{CharPtrRestrictTy, IntTy, FilePtrRestrictTy},
+ RetType{CharPtrTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(BO_EQ, ArgNo(0))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(ArgumentCondition(1, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(
+ BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2))));
+
+ // int fputs(const char *restrict s, FILE *restrict stream);
+ addToFunctionSummaryMap(
+ "fputs",
+ Signature(ArgTypes{ConstCharPtrRestrictTy, FilePtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsNonnegative, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ReturnValueCondition(WithinRange, SingleValue(EOFv))},
+ ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
// int ungetc(int c, FILE *stream);
addToFunctionSummaryMap(
"ungetc", Signature(ArgTypes{IntTy, FilePtrTy}, RetType{IntTy}),
@@ -2231,9 +2310,6 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
0, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}}))
.ArgConstraint(NotNull(ArgNo(1))));
- std::optional<QualType> Off_tTy = lookupTy("off_t");
- std::optional<RangeInt> Off_tMax = getMaxValue(Off_tTy);
-
// int fseek(FILE *stream, long offset, int whence);
// FIXME: It can be possible to get the 'SEEK_' values (like EOFv) and use
// these for condition of arg 2.
@@ -2312,12 +2388,15 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(NotNull(ArgNo(0))));
// int fileno(FILE *stream);
+ // According to POSIX 'fileno' may fail and set 'errno'.
+ // But in Linux it may fail only if the specified file pointer is invalid.
+ // At many places 'fileno' is used without check for failure and a failure
+ // case here would produce a large amount of likely false positive warnings.
+ // To avoid this, we assume here that it does not fail.
addToFunctionSummaryMap(
"fileno", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
- GenericSuccessMsg)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .Case(ReturnsValidFileDescriptor, ErrnoUnchanged, GenericSuccessMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// void rewind(FILE *stream);
@@ -2827,21 +2906,6 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
- // FILE *popen(const char *command, const char *type);
- // FIXME: Improve for errno modeling.
- addToFunctionSummaryMap(
- "popen",
- Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{FilePtrTy}),
- Summary(NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(NotNull(ArgNo(1))));
-
- // int pclose(FILE *stream);
- // FIXME: Improve for errno modeling.
- addToFunctionSummaryMap(
- "pclose", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
- Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
-
// int close(int fildes);
addToFunctionSummaryMap(
"close", Signature(ArgTypes{IntTy}, RetType{IntTy}),
@@ -2988,12 +3052,16 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// char *realpath(const char *restrict file_name,
// char *restrict resolved_name);
- // FIXME: Improve for errno modeling.
+ // FIXME: If the argument 'resolved_name' is not NULL, macro 'PATH_MAX'
+ // should be defined in "limits.h" to guarrantee a success.
addToFunctionSummaryMap(
"realpath",
Signature(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy},
RetType{CharPtrTy}),
- Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+ Summary(NoEvalCall)
+ .Case({NotNull(Ret)}, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
QualType CharPtrConstPtr = getPointerTy(getConstTy(CharPtrTy));
@@ -3002,7 +3070,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"execv",
Signature(ArgTypes{ConstCharPtrTy, CharPtrConstPtr}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsMinusOne, ErrnoIrrelevant)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int execvp(const char *file, char *const argv[]);
@@ -3010,7 +3078,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"execvp",
Signature(ArgTypes{ConstCharPtrTy, CharPtrConstPtr}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsMinusOne, ErrnoIrrelevant)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int getopt(int argc, char * const argv[], const char *optstring);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
index f7b7befe28ee..19877964bd90 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
@@ -129,9 +129,11 @@ static llvm::StringRef indefiniteArticleBasedOnVowel(char a) {
class StdVariantChecker : public Checker<eval::Call, check::RegionChanges> {
// Call descriptors to find relevant calls
- CallDescription VariantConstructor{{"std", "variant", "variant"}};
- CallDescription VariantAssignmentOperator{{"std", "variant", "operator="}};
- CallDescription StdGet{{"std", "get"}, 1, 1};
+ CallDescription VariantConstructor{CDM::CXXMethod,
+ {"std", "variant", "variant"}};
+ CallDescription VariantAssignmentOperator{CDM::CXXMethod,
+ {"std", "variant", "operator="}};
+ CallDescription StdGet{CDM::SimpleFunc, {"std", "get"}, 1, 1};
BugType BadVariantType{this, "BadVariantType", "BadVariantType"};
@@ -295,4 +297,4 @@ bool clang::ento::shouldRegisterStdVariantChecker(
void clang::ento::registerStdVariantChecker(clang::ento::CheckerManager &mgr) {
mgr.registerChecker<StdVariantChecker>();
-} \ No newline at end of file
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 07727b339d96..e8d538388e56 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -10,6 +10,9 @@
//
//===----------------------------------------------------------------------===//
+#include "NoOwnershipChangeVisitor.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -21,6 +24,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/Sequence.h"
#include <functional>
#include <optional>
@@ -73,6 +77,12 @@ struct StreamErrorState {
/// Returns if the StreamErrorState is a valid object.
operator bool() const { return NoError || FEof || FError; }
+ LLVM_DUMP_METHOD void dump() const { dumpToStream(llvm::errs()); }
+ LLVM_DUMP_METHOD void dumpToStream(llvm::raw_ostream &os) const {
+ os << "NoError: " << NoError << ", FEof: " << FEof
+ << ", FError: " << FError;
+ }
+
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddBoolean(NoError);
ID.AddBoolean(FEof);
@@ -97,6 +107,18 @@ struct StreamState {
OpenFailed /// The last open operation has failed.
} State;
+ StringRef getKindStr() const {
+ switch (State) {
+ case Opened:
+ return "Opened";
+ case Closed:
+ return "Closed";
+ case OpenFailed:
+ return "OpenFailed";
+ }
+ llvm_unreachable("Unknown StreamState!");
+ }
+
/// State of the error flags.
/// Ignored in non-opened stream state but must be NoError.
StreamErrorState const ErrorState;
@@ -145,6 +167,9 @@ struct StreamState {
return StreamState{L, OpenFailed, {}, false};
}
+ LLVM_DUMP_METHOD void dump() const { dumpToStream(llvm::errs()); }
+ LLVM_DUMP_METHOD void dumpToStream(llvm::raw_ostream &os) const;
+
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(LastOperation);
ID.AddInteger(State);
@@ -155,6 +180,11 @@ struct StreamState {
} // namespace
+// This map holds the state of a stream.
+// The stream is identified with a SymbolRef that is created when a stream
+// opening function is modeled by the checker.
+REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
+
//===----------------------------------------------------------------------===//
// StreamChecker class and utility functions.
//===----------------------------------------------------------------------===//
@@ -168,12 +198,23 @@ using FnCheck = std::function<void(const StreamChecker *, const FnDescription *,
using ArgNoTy = unsigned int;
static const ArgNoTy ArgNone = std::numeric_limits<ArgNoTy>::max();
+const char *FeofNote = "Assuming stream reaches end-of-file here";
+const char *FerrorNote = "Assuming this stream operation fails";
+
struct FnDescription {
FnCheck PreFn;
FnCheck EvalFn;
ArgNoTy StreamArgNo;
};
+LLVM_DUMP_METHOD void StreamState::dumpToStream(llvm::raw_ostream &os) const {
+ os << "{Kind: " << getKindStr() << ", Last operation: " << LastOperation
+ << ", ErrorState: ";
+ ErrorState.dumpToStream(os);
+ os << ", FilePos: " << (FilePositionIndeterminate ? "Indeterminate" : "OK")
+ << '}';
+}
+
/// Get the value of the stream argument out of the passed call event.
/// The call should contain a function that is described by Desc.
SVal getStreamArg(const FnDescription *Desc, const CallEvent &Call) {
@@ -208,6 +249,10 @@ ProgramStateRef bindInt(uint64_t Value, ProgramStateRef State,
return State;
}
+inline void assertStreamStateOpened(const StreamState *SS) {
+ assert(SS->isOpened() && "Stream is expected to be opened");
+}
+
class StreamChecker : public Checker<check::PreCall, eval::Call,
check::DeadSymbols, check::PointerEscape> {
BugType BT_FileNull{this, "NULL stream pointer", "Stream handling error"};
@@ -231,91 +276,167 @@ public:
const CallEvent *Call,
PointerEscapeKind Kind) const;
+ const BugType *getBT_StreamEof() const { return &BT_StreamEof; }
+ const BugType *getBT_IndeterminatePosition() const {
+ return &BT_IndeterminatePosition;
+ }
+
+ const NoteTag *constructSetEofNoteTag(CheckerContext &C,
+ SymbolRef StreamSym) const {
+ return C.getNoteTag([this, StreamSym](PathSensitiveBugReport &BR) {
+ if (!BR.isInteresting(StreamSym) ||
+ &BR.getBugType() != this->getBT_StreamEof())
+ return "";
+
+ BR.markNotInteresting(StreamSym);
+
+ return FeofNote;
+ });
+ }
+
+ const NoteTag *constructSetErrorNoteTag(CheckerContext &C,
+ SymbolRef StreamSym) const {
+ return C.getNoteTag([this, StreamSym](PathSensitiveBugReport &BR) {
+ if (!BR.isInteresting(StreamSym) ||
+ &BR.getBugType() != this->getBT_IndeterminatePosition())
+ return "";
+
+ BR.markNotInteresting(StreamSym);
+
+ return FerrorNote;
+ });
+ }
+
+ const NoteTag *constructSetEofOrErrorNoteTag(CheckerContext &C,
+ SymbolRef StreamSym) const {
+ return C.getNoteTag([this, StreamSym](PathSensitiveBugReport &BR) {
+ if (!BR.isInteresting(StreamSym))
+ return "";
+
+ if (&BR.getBugType() == this->getBT_StreamEof()) {
+ BR.markNotInteresting(StreamSym);
+ return FeofNote;
+ }
+ if (&BR.getBugType() == this->getBT_IndeterminatePosition()) {
+ BR.markNotInteresting(StreamSym);
+ return FerrorNote;
+ }
+
+ return "";
+ });
+ }
+
/// If true, evaluate special testing stream functions.
bool TestMode = false;
- const BugType *getBT_StreamEof() const { return &BT_StreamEof; }
+ /// If true, generate failure branches for cases that are often not checked.
+ bool PedanticMode = false;
+
+ const CallDescription FCloseDesc = {CDM::CLibrary, {"fclose"}, 1};
private:
CallDescriptionMap<FnDescription> FnDescriptions = {
- {{{"fopen"}, 2}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
- {{{"fdopen"}, 2}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
- {{{"freopen"}, 3},
+ {{CDM::CLibrary, {"fopen"}, 2},
+ {nullptr, &StreamChecker::evalFopen, ArgNone}},
+ {{CDM::CLibrary, {"fdopen"}, 2},
+ {nullptr, &StreamChecker::evalFopen, ArgNone}},
+ {{CDM::CLibrary, {"freopen"}, 3},
{&StreamChecker::preFreopen, &StreamChecker::evalFreopen, 2}},
- {{{"tmpfile"}, 0}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
- {{{"fclose"}, 1},
- {&StreamChecker::preDefault, &StreamChecker::evalFclose, 0}},
- {{{"fread"}, 4},
- {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
+ {{CDM::CLibrary, {"tmpfile"}, 0},
+ {nullptr, &StreamChecker::evalFopen, ArgNone}},
+ {FCloseDesc, {&StreamChecker::preDefault, &StreamChecker::evalFclose, 0}},
+ {{CDM::CLibrary, {"fread"}, 4},
+ {&StreamChecker::preRead,
std::bind(&StreamChecker::evalFreadFwrite, _1, _2, _3, _4, true), 3}},
- {{{"fwrite"}, 4},
- {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
+ {{CDM::CLibrary, {"fwrite"}, 4},
+ {&StreamChecker::preWrite,
std::bind(&StreamChecker::evalFreadFwrite, _1, _2, _3, _4, false), 3}},
- {{{"fgetc"}, 1},
- {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
+ {{CDM::CLibrary, {"fgetc"}, 1},
+ {&StreamChecker::preRead,
std::bind(&StreamChecker::evalFgetx, _1, _2, _3, _4, true), 0}},
- {{{"fgets"}, 3},
- {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
+ {{CDM::CLibrary, {"fgets"}, 3},
+ {&StreamChecker::preRead,
std::bind(&StreamChecker::evalFgetx, _1, _2, _3, _4, false), 2}},
- {{{"fputc"}, 2},
- {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
+ {{CDM::CLibrary, {"getc"}, 1},
+ {&StreamChecker::preRead,
+ std::bind(&StreamChecker::evalFgetx, _1, _2, _3, _4, true), 0}},
+ {{CDM::CLibrary, {"fputc"}, 2},
+ {&StreamChecker::preWrite,
std::bind(&StreamChecker::evalFputx, _1, _2, _3, _4, true), 1}},
- {{{"fputs"}, 2},
- {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
+ {{CDM::CLibrary, {"fputs"}, 2},
+ {&StreamChecker::preWrite,
std::bind(&StreamChecker::evalFputx, _1, _2, _3, _4, false), 1}},
- {{{"fprintf"}},
- {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
+ {{CDM::CLibrary, {"putc"}, 2},
+ {&StreamChecker::preWrite,
+ std::bind(&StreamChecker::evalFputx, _1, _2, _3, _4, true), 1}},
+ {{CDM::CLibrary, {"fprintf"}},
+ {&StreamChecker::preWrite,
+ std::bind(&StreamChecker::evalFprintf, _1, _2, _3, _4), 0}},
+ {{CDM::CLibrary, {"vfprintf"}, 3},
+ {&StreamChecker::preWrite,
std::bind(&StreamChecker::evalFprintf, _1, _2, _3, _4), 0}},
- {{{"fscanf"}},
- {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
+ {{CDM::CLibrary, {"fscanf"}},
+ {&StreamChecker::preRead,
std::bind(&StreamChecker::evalFscanf, _1, _2, _3, _4), 0}},
- {{{"ungetc"}, 2},
- {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
+ {{CDM::CLibrary, {"vfscanf"}, 3},
+ {&StreamChecker::preRead,
+ std::bind(&StreamChecker::evalFscanf, _1, _2, _3, _4), 0}},
+ {{CDM::CLibrary, {"ungetc"}, 2},
+ {&StreamChecker::preWrite,
std::bind(&StreamChecker::evalUngetc, _1, _2, _3, _4), 1}},
- {{{"getdelim"}, 4},
- {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
+ {{CDM::CLibrary, {"getdelim"}, 4},
+ {&StreamChecker::preRead,
std::bind(&StreamChecker::evalGetdelim, _1, _2, _3, _4), 3}},
- {{{"getline"}, 3},
- {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
+ {{CDM::CLibrary, {"getline"}, 3},
+ {&StreamChecker::preRead,
std::bind(&StreamChecker::evalGetdelim, _1, _2, _3, _4), 2}},
- {{{"fseek"}, 3},
+ {{CDM::CLibrary, {"fseek"}, 3},
{&StreamChecker::preFseek, &StreamChecker::evalFseek, 0}},
- {{{"fseeko"}, 3},
+ {{CDM::CLibrary, {"fseeko"}, 3},
{&StreamChecker::preFseek, &StreamChecker::evalFseek, 0}},
- {{{"ftell"}, 1},
- {&StreamChecker::preDefault, &StreamChecker::evalFtell, 0}},
- {{{"ftello"}, 1},
- {&StreamChecker::preDefault, &StreamChecker::evalFtell, 0}},
- {{{"fflush"}, 1},
+ {{CDM::CLibrary, {"ftell"}, 1},
+ {&StreamChecker::preWrite, &StreamChecker::evalFtell, 0}},
+ {{CDM::CLibrary, {"ftello"}, 1},
+ {&StreamChecker::preWrite, &StreamChecker::evalFtell, 0}},
+ {{CDM::CLibrary, {"fflush"}, 1},
{&StreamChecker::preFflush, &StreamChecker::evalFflush, 0}},
- {{{"rewind"}, 1},
+ {{CDM::CLibrary, {"rewind"}, 1},
{&StreamChecker::preDefault, &StreamChecker::evalRewind, 0}},
- {{{"fgetpos"}, 2},
- {&StreamChecker::preDefault, &StreamChecker::evalFgetpos, 0}},
- {{{"fsetpos"}, 2},
+ {{CDM::CLibrary, {"fgetpos"}, 2},
+ {&StreamChecker::preWrite, &StreamChecker::evalFgetpos, 0}},
+ {{CDM::CLibrary, {"fsetpos"}, 2},
{&StreamChecker::preDefault, &StreamChecker::evalFsetpos, 0}},
- {{{"clearerr"}, 1},
+ {{CDM::CLibrary, {"clearerr"}, 1},
{&StreamChecker::preDefault, &StreamChecker::evalClearerr, 0}},
- {{{"feof"}, 1},
+ {{CDM::CLibrary, {"feof"}, 1},
{&StreamChecker::preDefault,
std::bind(&StreamChecker::evalFeofFerror, _1, _2, _3, _4, ErrorFEof),
0}},
- {{{"ferror"}, 1},
+ {{CDM::CLibrary, {"ferror"}, 1},
{&StreamChecker::preDefault,
std::bind(&StreamChecker::evalFeofFerror, _1, _2, _3, _4, ErrorFError),
0}},
- {{{"fileno"}, 1}, {&StreamChecker::preDefault, nullptr, 0}},
+ {{CDM::CLibrary, {"fileno"}, 1},
+ {&StreamChecker::preDefault, &StreamChecker::evalFileno, 0}},
};
CallDescriptionMap<FnDescription> FnTestDescriptions = {
- {{{"StreamTesterChecker_make_feof_stream"}, 1},
+ {{CDM::SimpleFunc, {"StreamTesterChecker_make_feof_stream"}, 1},
+ {nullptr,
+ std::bind(&StreamChecker::evalSetFeofFerror, _1, _2, _3, _4, ErrorFEof,
+ false),
+ 0}},
+ {{CDM::SimpleFunc, {"StreamTesterChecker_make_ferror_stream"}, 1},
{nullptr,
- std::bind(&StreamChecker::evalSetFeofFerror, _1, _2, _3, _4, ErrorFEof),
+ std::bind(&StreamChecker::evalSetFeofFerror, _1, _2, _3, _4,
+ ErrorFError, false),
0}},
- {{{"StreamTesterChecker_make_ferror_stream"}, 1},
+ {{CDM::SimpleFunc,
+ {"StreamTesterChecker_make_ferror_indeterminate_stream"},
+ 1},
{nullptr,
std::bind(&StreamChecker::evalSetFeofFerror, _1, _2, _3, _4,
- ErrorFError),
+ ErrorFError, true),
0}},
};
@@ -327,6 +448,8 @@ private:
mutable int SeekCurVal = 1;
/// Expanded value of SEEK_END, 2 if not found.
mutable int SeekEndVal = 2;
+ /// The built-in va_list type is platform-specific
+ mutable QualType VaListType;
void evalFopen(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
@@ -339,8 +462,11 @@ private:
void evalFclose(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
- void preReadWrite(const FnDescription *Desc, const CallEvent &Call,
- CheckerContext &C, bool IsRead) const;
+ void preRead(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void preWrite(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
void evalFreadFwrite(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C, bool IsFread) const;
@@ -391,8 +517,8 @@ private:
const StreamErrorState &ErrorKind) const;
void evalSetFeofFerror(const FnDescription *Desc, const CallEvent &Call,
- CheckerContext &C,
- const StreamErrorState &ErrorKind) const;
+ CheckerContext &C, const StreamErrorState &ErrorKind,
+ bool Indeterminate) const;
void preFflush(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
@@ -400,6 +526,9 @@ private:
void evalFflush(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
+ void evalFileno(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
/// Check that the stream (in StreamVal) is not NULL.
/// If it can only be NULL a fatal error is emitted and nullptr returned.
/// Otherwise the return value is a new state where the stream is constrained
@@ -449,11 +578,10 @@ private:
const FnDescription *lookupFn(const CallEvent &Call) const {
// Recognize "global C functions" with only integral or pointer arguments
// (and matching name) as stream functions.
- if (!Call.isGlobalCFunction())
- return nullptr;
for (auto *P : Call.parameters()) {
QualType T = P->getType();
- if (!T->isIntegralOrEnumerationType() && !T->isPointerType())
+ if (!T->isIntegralOrEnumerationType() && !T->isPointerType() &&
+ T.getCanonicalType() != VaListType)
return nullptr;
}
@@ -462,8 +590,8 @@ private:
/// Generate a message for BugReporterVisitor if the stored symbol is
/// marked as interesting by the actual bug report.
- const NoteTag *constructNoteTag(CheckerContext &C, SymbolRef StreamSym,
- const std::string &Message) const {
+ const NoteTag *constructLeakNoteTag(CheckerContext &C, SymbolRef StreamSym,
+ const std::string &Message) const {
return C.getNoteTag([this, StreamSym,
Message](PathSensitiveBugReport &BR) -> std::string {
if (BR.isInteresting(StreamSym) && &BR.getBugType() == &BT_ResourceLeak)
@@ -472,19 +600,6 @@ private:
});
}
- const NoteTag *constructSetEofNoteTag(CheckerContext &C,
- SymbolRef StreamSym) const {
- return C.getNoteTag([this, StreamSym](PathSensitiveBugReport &BR) {
- if (!BR.isInteresting(StreamSym) ||
- &BR.getBugType() != this->getBT_StreamEof())
- return "";
-
- BR.markNotInteresting(StreamSym);
-
- return "Assuming stream reaches end-of-file here";
- });
- }
-
void initMacroValues(CheckerContext &C) const {
if (EofVal)
return;
@@ -505,6 +620,10 @@ private:
SeekCurVal = *OptInt;
}
+ void initVaListType(CheckerContext &C) const {
+ VaListType = C.getASTContext().getBuiltinVaListType().getCanonicalType();
+ }
+
/// Searches for the ExplodedNode where the file descriptor was acquired for
/// StreamSym.
static const ExplodedNode *getAcquisitionSite(const ExplodedNode *N,
@@ -512,16 +631,159 @@ private:
CheckerContext &C);
};
+struct StreamOperationEvaluator {
+ SValBuilder &SVB;
+ const ASTContext &ACtx;
+
+ SymbolRef StreamSym = nullptr;
+ const StreamState *SS = nullptr;
+ const CallExpr *CE = nullptr;
+ StreamErrorState NewES;
+
+ StreamOperationEvaluator(CheckerContext &C)
+ : SVB(C.getSValBuilder()), ACtx(C.getASTContext()) {
+ ;
+ }
+
+ bool Init(const FnDescription *Desc, const CallEvent &Call, CheckerContext &C,
+ ProgramStateRef State) {
+ StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return false;
+ SS = State->get<StreamMap>(StreamSym);
+ if (!SS)
+ return false;
+ NewES = SS->ErrorState;
+ CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return false;
+
+ assertStreamStateOpened(SS);
+
+ return true;
+ }
+
+ bool isStreamEof() const { return SS->ErrorState == ErrorFEof; }
+
+ NonLoc getZeroVal(const CallEvent &Call) {
+ return *SVB.makeZeroVal(Call.getResultType()).getAs<NonLoc>();
+ }
+
+ ProgramStateRef setStreamState(ProgramStateRef State,
+ const StreamState &NewSS) {
+ NewES = NewSS.ErrorState;
+ return State->set<StreamMap>(StreamSym, NewSS);
+ }
+
+ ProgramStateRef makeAndBindRetVal(ProgramStateRef State, CheckerContext &C) {
+ NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ return State->BindExpr(CE, C.getLocationContext(), RetVal);
+ }
+
+ ProgramStateRef bindReturnValue(ProgramStateRef State, CheckerContext &C,
+ uint64_t Val) {
+ return State->BindExpr(CE, C.getLocationContext(),
+ SVB.makeIntVal(Val, CE->getCallReturnType(ACtx)));
+ }
+
+ ProgramStateRef bindReturnValue(ProgramStateRef State, CheckerContext &C,
+ SVal Val) {
+ return State->BindExpr(CE, C.getLocationContext(), Val);
+ }
+
+ ProgramStateRef bindNullReturnValue(ProgramStateRef State,
+ CheckerContext &C) {
+ return State->BindExpr(CE, C.getLocationContext(),
+ C.getSValBuilder().makeNullWithType(CE->getType()));
+ }
+
+ ProgramStateRef assumeBinOpNN(ProgramStateRef State,
+ BinaryOperator::Opcode Op, NonLoc LHS,
+ NonLoc RHS) {
+ auto Cond = SVB.evalBinOpNN(State, Op, LHS, RHS, SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!Cond)
+ return nullptr;
+ return State->assume(*Cond, true);
+ }
+
+ ConstraintManager::ProgramStatePair
+ makeRetValAndAssumeDual(ProgramStateRef State, CheckerContext &C) {
+ DefinedSVal RetVal = makeRetVal(C, CE);
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
+ return C.getConstraintManager().assumeDual(State, RetVal);
+ }
+
+ const NoteTag *getFailureNoteTag(const StreamChecker *Ch, CheckerContext &C) {
+ bool SetFeof = NewES.FEof && !SS->ErrorState.FEof;
+ bool SetFerror = NewES.FError && !SS->ErrorState.FError;
+ if (SetFeof && !SetFerror)
+ return Ch->constructSetEofNoteTag(C, StreamSym);
+ if (!SetFeof && SetFerror)
+ return Ch->constructSetErrorNoteTag(C, StreamSym);
+ if (SetFeof && SetFerror)
+ return Ch->constructSetEofOrErrorNoteTag(C, StreamSym);
+ return nullptr;
+ }
+};
+
} // end anonymous namespace
-// This map holds the state of a stream.
-// The stream is identified with a SymbolRef that is created when a stream
-// opening function is modeled by the checker.
-REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
+//===----------------------------------------------------------------------===//
+// Definition of NoStreamStateChangeVisitor.
+//===----------------------------------------------------------------------===//
-inline void assertStreamStateOpened(const StreamState *SS) {
- assert(SS->isOpened() && "Stream is expected to be opened");
-}
+namespace {
+class NoStreamStateChangeVisitor final : public NoOwnershipChangeVisitor {
+protected:
+ /// Syntactically checks whether the callee is a closing function. Since
+ /// we have no path-sensitive information on this call (we would need a
+ /// CallEvent instead of a CallExpr for that), its possible that a
+ /// closing function was called indirectly through a function pointer,
+ /// but we are not able to tell, so this is a best effort analysis.
+ bool isClosingCallAsWritten(const CallExpr &Call) const {
+ const auto *StreamChk = static_cast<const StreamChecker *>(&Checker);
+ return StreamChk->FCloseDesc.matchesAsWritten(Call);
+ }
+
+ bool doesFnIntendToHandleOwnership(const Decl *Callee,
+ ASTContext &ACtx) final {
+ using namespace clang::ast_matchers;
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(Callee);
+
+ auto Matches =
+ match(findAll(callExpr().bind("call")), *FD->getBody(), ACtx);
+ for (BoundNodes Match : Matches) {
+ if (const auto *Call = Match.getNodeAs<CallExpr>("call"))
+ if (isClosingCallAsWritten(*Call))
+ return true;
+ }
+ // TODO: Ownership might change with an attempt to store stream object, not
+ // only through closing it. Check for attempted stores as well.
+ return false;
+ }
+
+ bool hasResourceStateChanged(ProgramStateRef CallEnterState,
+ ProgramStateRef CallExitEndState) final {
+ return CallEnterState->get<StreamMap>(Sym) !=
+ CallExitEndState->get<StreamMap>(Sym);
+ }
+
+ PathDiagnosticPieceRef emitNote(const ExplodedNode *N) override {
+ PathDiagnosticLocation L = PathDiagnosticLocation::create(
+ N->getLocation(),
+ N->getState()->getStateManager().getContext().getSourceManager());
+ return std::make_shared<PathDiagnosticEventPiece>(
+ L, "Returning without closing stream object or storing it for later "
+ "release");
+ }
+
+public:
+ NoStreamStateChangeVisitor(SymbolRef Sym, const StreamChecker *Checker)
+ : NoOwnershipChangeVisitor(Sym, Checker) {}
+};
+
+} // end anonymous namespace
const ExplodedNode *StreamChecker::getAcquisitionSite(const ExplodedNode *N,
SymbolRef StreamSym,
@@ -544,6 +806,59 @@ const ExplodedNode *StreamChecker::getAcquisitionSite(const ExplodedNode *N,
return nullptr;
}
+static std::optional<int64_t> getKnownValue(ProgramStateRef State, SVal V) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ if (const llvm::APSInt *Int = SVB.getKnownValue(State, V))
+ return Int->tryExtValue();
+ return std::nullopt;
+}
+
+/// Invalidate only the requested elements instead of the whole buffer.
+/// This is basically a refinement of the more generic 'escapeArgs' or
+/// the plain old 'invalidateRegions'.
+static ProgramStateRef
+escapeByStartIndexAndCount(ProgramStateRef State, const CallEvent &Call,
+ unsigned BlockCount, const SubRegion *Buffer,
+ QualType ElemType, int64_t StartIndex,
+ int64_t ElementCount) {
+ constexpr auto DoNotInvalidateSuperRegion =
+ RegionAndSymbolInvalidationTraits::InvalidationKinds::
+ TK_DoNotInvalidateSuperRegion;
+
+ const LocationContext *LCtx = Call.getLocationContext();
+ const ASTContext &Ctx = State->getStateManager().getContext();
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ auto &RegionManager = Buffer->getMemRegionManager();
+
+ SmallVector<SVal> EscapingVals;
+ EscapingVals.reserve(ElementCount);
+
+ RegionAndSymbolInvalidationTraits ITraits;
+ for (auto Idx : llvm::seq(StartIndex, StartIndex + ElementCount)) {
+ NonLoc Index = SVB.makeArrayIndex(Idx);
+ const auto *Element =
+ RegionManager.getElementRegion(ElemType, Index, Buffer, Ctx);
+ EscapingVals.push_back(loc::MemRegionVal(Element));
+ ITraits.setTrait(Element, DoNotInvalidateSuperRegion);
+ }
+ return State->invalidateRegions(
+ EscapingVals, Call.getOriginExpr(), BlockCount, LCtx,
+ /*CausesPointerEscape=*/false,
+ /*InvalidatedSymbols=*/nullptr, &Call, &ITraits);
+}
+
+static ProgramStateRef escapeArgs(ProgramStateRef State, CheckerContext &C,
+ const CallEvent &Call,
+ ArrayRef<unsigned int> EscapingArgs) {
+ auto GetArgSVal = [&Call](int Idx) { return Call.getArgSVal(Idx); };
+ auto EscapingVals = to_vector(map_range(EscapingArgs, GetArgSVal));
+ State = State->invalidateRegions(EscapingVals, Call.getOriginExpr(),
+ C.blockCount(), C.getLocationContext(),
+ /*CausesPointerEscape=*/false,
+ /*InvalidatedSymbols=*/nullptr);
+ return State;
+}
+
//===----------------------------------------------------------------------===//
// Methods of StreamChecker.
//===----------------------------------------------------------------------===//
@@ -551,6 +866,7 @@ const ExplodedNode *StreamChecker::getAcquisitionSite(const ExplodedNode *N,
void StreamChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
initMacroValues(C);
+ initVaListType(C);
const FnDescription *Desc = lookupFn(Call);
if (!Desc || !Desc->PreFn)
@@ -596,7 +912,7 @@ void StreamChecker::evalFopen(const FnDescription *Desc, const CallEvent &Call,
StateNull->set<StreamMap>(RetSym, StreamState::getOpenFailed(Desc));
C.addTransition(StateNotNull,
- constructNoteTag(C, RetSym, "Stream opened here"));
+ constructLeakNoteTag(C, RetSym, "Stream opened here"));
C.addTransition(StateNull);
}
@@ -654,47 +970,29 @@ void StreamChecker::evalFreopen(const FnDescription *Desc,
StateRetNull->set<StreamMap>(StreamSym, StreamState::getOpenFailed(Desc));
C.addTransition(StateRetNotNull,
- constructNoteTag(C, StreamSym, "Stream reopened here"));
+ constructLeakNoteTag(C, StreamSym, "Stream reopened here"));
C.addTransition(StateRetNull);
}
void StreamChecker::evalFclose(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- SymbolRef Sym = getStreamArg(Desc, Call).getAsSymbol();
- if (!Sym)
- return;
-
- const StreamState *SS = State->get<StreamMap>(Sym);
- if (!SS)
- return;
-
- auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
- assertStreamStateOpened(SS);
-
// Close the File Descriptor.
// Regardless if the close fails or not, stream becomes "closed"
// and can not be used any more.
- State = State->set<StreamMap>(Sym, StreamState::getClosed(Desc));
+ State = E.setStreamState(State, StreamState::getClosed(Desc));
// Return 0 on success, EOF on failure.
- SValBuilder &SVB = C.getSValBuilder();
- ProgramStateRef StateSuccess = State->BindExpr(
- CE, C.getLocationContext(), SVB.makeIntVal(0, C.getASTContext().IntTy));
- ProgramStateRef StateFailure =
- State->BindExpr(CE, C.getLocationContext(),
- SVB.makeIntVal(*EofVal, C.getASTContext().IntTy));
-
- C.addTransition(StateSuccess);
- C.addTransition(StateFailure);
+ C.addTransition(E.bindReturnValue(State, C, 0));
+ C.addTransition(E.bindReturnValue(State, C, *EofVal));
}
-void StreamChecker::preReadWrite(const FnDescription *Desc,
- const CallEvent &Call, CheckerContext &C,
- bool IsRead) const {
+void StreamChecker::preRead(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
ProgramStateRef State = C.getState();
SVal StreamVal = getStreamArg(Desc, Call);
State = ensureStreamNonNull(StreamVal, Call.getArgExpr(Desc->StreamArgNo), C,
@@ -708,11 +1006,6 @@ void StreamChecker::preReadWrite(const FnDescription *Desc,
if (!State)
return;
- if (!IsRead) {
- C.addTransition(State);
- return;
- }
-
SymbolRef Sym = StreamVal.getAsSymbol();
if (Sym && State->get<StreamMap>(Sym)) {
const StreamState *SS = State->get<StreamMap>(Sym);
@@ -723,16 +1016,103 @@ void StreamChecker::preReadWrite(const FnDescription *Desc,
}
}
+void StreamChecker::preWrite(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal StreamVal = getStreamArg(Desc, Call);
+ State = ensureStreamNonNull(StreamVal, Call.getArgExpr(Desc->StreamArgNo), C,
+ State);
+ if (!State)
+ return;
+ State = ensureStreamOpened(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureNoFilePositionIndeterminate(StreamVal, C, State);
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+
+static QualType getPointeeType(const MemRegion *R) {
+ if (!R)
+ return {};
+ if (const auto *ER = dyn_cast<ElementRegion>(R))
+ return ER->getElementType();
+ if (const auto *TR = dyn_cast<TypedValueRegion>(R))
+ return TR->getValueType();
+ if (const auto *SR = dyn_cast<SymbolicRegion>(R))
+ return SR->getPointeeStaticType();
+ return {};
+}
+
+static std::optional<NonLoc> getStartIndex(SValBuilder &SVB,
+ const MemRegion *R) {
+ if (!R)
+ return std::nullopt;
+
+ auto Zero = [&SVB] {
+ BasicValueFactory &BVF = SVB.getBasicValueFactory();
+ return nonloc::ConcreteInt(BVF.getIntValue(0, /*isUnsigned=*/false));
+ };
+
+ if (const auto *ER = dyn_cast<ElementRegion>(R))
+ return ER->getIndex();
+ if (isa<TypedValueRegion>(R))
+ return Zero();
+ if (isa<SymbolicRegion>(R))
+ return Zero();
+ return std::nullopt;
+}
+
+static ProgramStateRef
+tryToInvalidateFReadBufferByElements(ProgramStateRef State, CheckerContext &C,
+ const CallEvent &Call, NonLoc SizeVal,
+ NonLoc NMembVal) {
+ // Try to invalidate the individual elements.
+ const auto *Buffer =
+ dyn_cast_or_null<SubRegion>(Call.getArgSVal(0).getAsRegion());
+
+ const ASTContext &Ctx = C.getASTContext();
+ QualType ElemTy = getPointeeType(Buffer);
+ std::optional<SVal> StartElementIndex =
+ getStartIndex(C.getSValBuilder(), Buffer);
+
+ // Drop the outermost ElementRegion to get the buffer.
+ if (const auto *ER = dyn_cast_or_null<ElementRegion>(Buffer))
+ Buffer = dyn_cast<SubRegion>(ER->getSuperRegion());
+
+ std::optional<int64_t> CountVal = getKnownValue(State, NMembVal);
+ std::optional<int64_t> Size = getKnownValue(State, SizeVal);
+ std::optional<int64_t> StartIndexVal =
+ getKnownValue(State, StartElementIndex.value_or(UnknownVal()));
+
+ if (!ElemTy.isNull() && CountVal && Size && StartIndexVal) {
+ int64_t NumBytesRead = Size.value() * CountVal.value();
+ int64_t ElemSizeInChars = Ctx.getTypeSizeInChars(ElemTy).getQuantity();
+ if (ElemSizeInChars == 0)
+ return nullptr;
+
+ bool IncompleteLastElement = (NumBytesRead % ElemSizeInChars) != 0;
+ int64_t NumCompleteOrIncompleteElementsRead =
+ NumBytesRead / ElemSizeInChars + IncompleteLastElement;
+
+ constexpr int MaxInvalidatedElementsLimit = 64;
+ if (NumCompleteOrIncompleteElementsRead <= MaxInvalidatedElementsLimit) {
+ return escapeByStartIndexAndCount(State, Call, C.blockCount(), Buffer,
+ ElemTy, *StartIndexVal,
+ NumCompleteOrIncompleteElementsRead);
+ }
+ }
+ return nullptr;
+}
+
void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
const CallEvent &Call, CheckerContext &C,
bool IsFread) const {
ProgramStateRef State = C.getState();
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
- return;
-
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
std::optional<NonLoc> SizeVal = Call.getArgSVal(1).getAs<NonLoc>();
@@ -742,115 +1122,90 @@ void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
if (!NMembVal)
return;
- const StreamState *OldSS = State->get<StreamMap>(StreamSym);
- if (!OldSS)
- return;
-
- assertStreamStateOpened(OldSS);
-
// C'99 standard, §7.19.8.1.3, the return value of fread:
// The fread function returns the number of elements successfully read, which
// may be less than nmemb if a read error or end-of-file is encountered. If
// size or nmemb is zero, fread returns zero and the contents of the array and
// the state of the stream remain unchanged.
-
if (State->isNull(*SizeVal).isConstrainedTrue() ||
State->isNull(*NMembVal).isConstrainedTrue()) {
// This is the "size or nmemb is zero" case.
// Just return 0, do nothing more (not clear the error flags).
- State = bindInt(0, State, C, CE);
- C.addTransition(State);
+ C.addTransition(E.bindReturnValue(State, C, 0));
return;
}
+ // At read, invalidate the buffer in any case of error or success,
+ // except if EOF was already present.
+ if (IsFread && !E.isStreamEof()) {
+ // Try to invalidate the individual elements.
+ // Otherwise just fall back to invalidating the whole buffer.
+ ProgramStateRef InvalidatedState = tryToInvalidateFReadBufferByElements(
+ State, C, Call, *SizeVal, *NMembVal);
+ State =
+ InvalidatedState ? InvalidatedState : escapeArgs(State, C, Call, {0});
+ }
+
// Generate a transition for the success state.
// If we know the state to be FEOF at fread, do not add a success state.
- if (!IsFread || (OldSS->ErrorState != ErrorFEof)) {
+ if (!IsFread || !E.isStreamEof()) {
ProgramStateRef StateNotFailed =
- State->BindExpr(CE, C.getLocationContext(), *NMembVal);
+ State->BindExpr(E.CE, C.getLocationContext(), *NMembVal);
StateNotFailed =
- StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ E.setStreamState(StateNotFailed, StreamState::getOpened(Desc));
C.addTransition(StateNotFailed);
}
// Add transition for the failed state.
- NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
- ProgramStateRef StateFailed =
- State->BindExpr(CE, C.getLocationContext(), RetVal);
- SValBuilder &SVB = C.getSValBuilder();
- auto Cond =
- SVB.evalBinOpNN(State, BO_LT, RetVal, *NMembVal, SVB.getConditionType())
- .getAs<DefinedOrUnknownSVal>();
- if (!Cond)
+ // At write, add failure case only if "pedantic mode" is on.
+ if (!IsFread && !PedanticMode)
return;
- StateFailed = StateFailed->assume(*Cond, true);
+
+ NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ ProgramStateRef StateFailed =
+ State->BindExpr(E.CE, C.getLocationContext(), RetVal);
+ StateFailed = E.assumeBinOpNN(StateFailed, BO_LT, RetVal, *NMembVal);
if (!StateFailed)
return;
StreamErrorState NewES;
if (IsFread)
- NewES =
- (OldSS->ErrorState == ErrorFEof) ? ErrorFEof : ErrorFEof | ErrorFError;
+ NewES = E.isStreamEof() ? ErrorFEof : ErrorFEof | ErrorFError;
else
NewES = ErrorFError;
// If a (non-EOF) error occurs, the resulting value of the file position
// indicator for the stream is indeterminate.
- StreamState NewSS = StreamState::getOpened(Desc, NewES, !NewES.isFEof());
- StateFailed = StateFailed->set<StreamMap>(StreamSym, NewSS);
- if (IsFread && OldSS->ErrorState != ErrorFEof)
- C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
- else
- C.addTransition(StateFailed);
+ StateFailed = E.setStreamState(
+ StateFailed, StreamState::getOpened(Desc, NewES, !NewES.isFEof()));
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFgetx(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C, bool SingleChar) const {
- ProgramStateRef State = C.getState();
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
- return;
-
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
-
- const StreamState *OldSS = State->get<StreamMap>(StreamSym);
- if (!OldSS)
- return;
-
- assertStreamStateOpened(OldSS);
-
// `fgetc` returns the read character on success, otherwise returns EOF.
// `fgets` returns the read buffer address on success, otherwise returns NULL.
- if (OldSS->ErrorState != ErrorFEof) {
+ ProgramStateRef State = C.getState();
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
+ return;
+
+ if (!E.isStreamEof()) {
+ // If there was already EOF, assume that read buffer is not changed.
+ // Otherwise it may change at success or failure.
+ State = escapeArgs(State, C, Call, {0});
if (SingleChar) {
// Generate a transition for the success state of `fgetc`.
- NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
ProgramStateRef StateNotFailed =
- State->BindExpr(CE, C.getLocationContext(), RetVal);
- SValBuilder &SVB = C.getSValBuilder();
- ASTContext &ASTC = C.getASTContext();
+ State->BindExpr(E.CE, C.getLocationContext(), RetVal);
// The returned 'unsigned char' of `fgetc` is converted to 'int',
// so we need to check if it is in range [0, 255].
- auto CondLow =
- SVB.evalBinOp(State, BO_GE, RetVal, SVB.makeZeroVal(ASTC.IntTy),
- SVB.getConditionType())
- .getAs<DefinedOrUnknownSVal>();
- auto CondHigh =
- SVB.evalBinOp(State, BO_LE, RetVal,
- SVB.makeIntVal(SVB.getBasicValueFactory()
- .getMaxValue(ASTC.UnsignedCharTy)
- .getLimitedValue(),
- ASTC.IntTy),
- SVB.getConditionType())
- .getAs<DefinedOrUnknownSVal>();
- if (!CondLow || !CondHigh)
- return;
- StateNotFailed = StateNotFailed->assume(*CondLow, true);
- if (!StateNotFailed)
- return;
- StateNotFailed = StateNotFailed->assume(*CondHigh, true);
+ StateNotFailed = StateNotFailed->assumeInclusiveRange(
+ RetVal,
+ E.SVB.getBasicValueFactory().getValue(0, E.ACtx.UnsignedCharTy),
+ E.SVB.getBasicValueFactory().getMaxValue(E.ACtx.UnsignedCharTy),
+ true);
if (!StateNotFailed)
return;
C.addTransition(StateNotFailed);
@@ -861,9 +1216,9 @@ void StreamChecker::evalFgetx(const FnDescription *Desc, const CallEvent &Call,
if (!GetBuf)
return;
ProgramStateRef StateNotFailed =
- State->BindExpr(CE, C.getLocationContext(), *GetBuf);
- StateNotFailed = StateNotFailed->set<StreamMap>(
- StreamSym, StreamState::getOpened(Desc));
+ State->BindExpr(E.CE, C.getLocationContext(), *GetBuf);
+ StateNotFailed =
+ E.setStreamState(StateNotFailed, StreamState::getOpened(Desc));
C.addTransition(StateNotFailed);
}
}
@@ -871,147 +1226,111 @@ void StreamChecker::evalFgetx(const FnDescription *Desc, const CallEvent &Call,
// Add transition for the failed state.
ProgramStateRef StateFailed;
if (SingleChar)
- StateFailed = bindInt(*EofVal, State, C, CE);
+ StateFailed = E.bindReturnValue(State, C, *EofVal);
else
- StateFailed =
- State->BindExpr(CE, C.getLocationContext(),
- C.getSValBuilder().makeNullWithType(CE->getType()));
+ StateFailed = E.bindNullReturnValue(State, C);
// If a (non-EOF) error occurs, the resulting value of the file position
// indicator for the stream is indeterminate.
StreamErrorState NewES =
- OldSS->ErrorState == ErrorFEof ? ErrorFEof : ErrorFEof | ErrorFError;
- StreamState NewSS = StreamState::getOpened(Desc, NewES, !NewES.isFEof());
- StateFailed = StateFailed->set<StreamMap>(StreamSym, NewSS);
- if (OldSS->ErrorState != ErrorFEof)
- C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
- else
- C.addTransition(StateFailed);
+ E.isStreamEof() ? ErrorFEof : ErrorFEof | ErrorFError;
+ StateFailed = E.setStreamState(
+ StateFailed, StreamState::getOpened(Desc, NewES, !NewES.isFEof()));
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFputx(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C, bool IsSingleChar) const {
- ProgramStateRef State = C.getState();
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
- return;
-
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
+ // `fputc` returns the written character on success, otherwise returns EOF.
+ // `fputs` returns a nonnegative value on success, otherwise returns EOF.
- const StreamState *OldSS = State->get<StreamMap>(StreamSym);
- if (!OldSS)
+ ProgramStateRef State = C.getState();
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
- assertStreamStateOpened(OldSS);
-
- // `fputc` returns the written character on success, otherwise returns EOF.
- // `fputs` returns a non negative value on sucecess, otherwise returns EOF.
-
if (IsSingleChar) {
// Generate a transition for the success state of `fputc`.
std::optional<NonLoc> PutVal = Call.getArgSVal(0).getAs<NonLoc>();
if (!PutVal)
return;
ProgramStateRef StateNotFailed =
- State->BindExpr(CE, C.getLocationContext(), *PutVal);
+ State->BindExpr(E.CE, C.getLocationContext(), *PutVal);
StateNotFailed =
- StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ E.setStreamState(StateNotFailed, StreamState::getOpened(Desc));
C.addTransition(StateNotFailed);
} else {
// Generate a transition for the success state of `fputs`.
- NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
ProgramStateRef StateNotFailed =
- State->BindExpr(CE, C.getLocationContext(), RetVal);
- SValBuilder &SVB = C.getSValBuilder();
- auto &ASTC = C.getASTContext();
- auto Cond = SVB.evalBinOp(State, BO_GE, RetVal, SVB.makeZeroVal(ASTC.IntTy),
- SVB.getConditionType())
- .getAs<DefinedOrUnknownSVal>();
- if (!Cond)
- return;
- StateNotFailed = StateNotFailed->assume(*Cond, true);
+ State->BindExpr(E.CE, C.getLocationContext(), RetVal);
+ StateNotFailed =
+ E.assumeBinOpNN(StateNotFailed, BO_GE, RetVal, E.getZeroVal(Call));
if (!StateNotFailed)
return;
StateNotFailed =
- StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ E.setStreamState(StateNotFailed, StreamState::getOpened(Desc));
C.addTransition(StateNotFailed);
}
+ if (!PedanticMode)
+ return;
+
// Add transition for the failed state. The resulting value of the file
// position indicator for the stream is indeterminate.
- ProgramStateRef StateFailed = bindInt(*EofVal, State, C, CE);
- StreamState NewSS = StreamState::getOpened(Desc, ErrorFError, true);
- StateFailed = StateFailed->set<StreamMap>(StreamSym, NewSS);
- C.addTransition(StateFailed);
+ ProgramStateRef StateFailed = E.bindReturnValue(State, C, *EofVal);
+ StateFailed = E.setStreamState(
+ StateFailed, StreamState::getOpened(Desc, ErrorFError, true));
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFprintf(const FnDescription *Desc,
const CallEvent &Call,
CheckerContext &C) const {
- ProgramStateRef State = C.getState();
if (Call.getNumArgs() < 2)
return;
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
- return;
-
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
- const StreamState *OldSS = State->get<StreamMap>(StreamSym);
- if (!OldSS)
+ ProgramStateRef State = C.getState();
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
- assertStreamStateOpened(OldSS);
-
- NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
- State = State->BindExpr(CE, C.getLocationContext(), RetVal);
- SValBuilder &SVB = C.getSValBuilder();
- auto &ACtx = C.getASTContext();
- auto Cond = SVB.evalBinOp(State, BO_GE, RetVal, SVB.makeZeroVal(ACtx.IntTy),
- SVB.getConditionType())
- .getAs<DefinedOrUnknownSVal>();
+ NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ State = State->BindExpr(E.CE, C.getLocationContext(), RetVal);
+ auto Cond =
+ E.SVB
+ .evalBinOp(State, BO_GE, RetVal, E.SVB.makeZeroVal(E.ACtx.IntTy),
+ E.SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
if (!Cond)
return;
ProgramStateRef StateNotFailed, StateFailed;
std::tie(StateNotFailed, StateFailed) = State->assume(*Cond);
StateNotFailed =
- StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ E.setStreamState(StateNotFailed, StreamState::getOpened(Desc));
C.addTransition(StateNotFailed);
+ if (!PedanticMode)
+ return;
+
// Add transition for the failed state. The resulting value of the file
// position indicator for the stream is indeterminate.
- StateFailed = StateFailed->set<StreamMap>(
- StreamSym, StreamState::getOpened(Desc, ErrorFError, true));
- C.addTransition(StateFailed);
+ StateFailed = E.setStreamState(
+ StateFailed, StreamState::getOpened(Desc, ErrorFError, true));
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFscanf(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
- ProgramStateRef State = C.getState();
if (Call.getNumArgs() < 2)
return;
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
- return;
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
-
- const StreamState *OldSS = State->get<StreamMap>(StreamSym);
- if (!OldSS)
+ ProgramStateRef State = C.getState();
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
- assertStreamStateOpened(OldSS);
-
- SValBuilder &SVB = C.getSValBuilder();
- ASTContext &ACtx = C.getASTContext();
-
// Add the success state.
// In this context "success" means there is not an EOF or other read error
// before any item is matched in 'fscanf'. But there may be match failure,
@@ -1020,19 +1339,25 @@ void StreamChecker::evalFscanf(const FnDescription *Desc, const CallEvent &Call,
// then EOF or read error happens. Now this case is handled like a "success"
// case, and no error flags are set on the stream. This is probably not
// accurate, and the POSIX documentation does not tell more.
- if (OldSS->ErrorState != ErrorFEof) {
- NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ if (!E.isStreamEof()) {
+ NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
ProgramStateRef StateNotFailed =
- State->BindExpr(CE, C.getLocationContext(), RetVal);
- auto RetGeZero =
- SVB.evalBinOp(StateNotFailed, BO_GE, RetVal,
- SVB.makeZeroVal(ACtx.IntTy), SVB.getConditionType())
- .getAs<DefinedOrUnknownSVal>();
- if (!RetGeZero)
+ State->BindExpr(E.CE, C.getLocationContext(), RetVal);
+ StateNotFailed =
+ E.assumeBinOpNN(StateNotFailed, BO_GE, RetVal, E.getZeroVal(Call));
+ if (!StateNotFailed)
return;
- StateNotFailed = StateNotFailed->assume(*RetGeZero, true);
- C.addTransition(StateNotFailed);
+ if (auto const *Callee = Call.getCalleeIdentifier();
+ !Callee || Callee->getName() != "vfscanf") {
+ SmallVector<unsigned int> EscArgs;
+ for (auto EscArg : llvm::seq(2u, Call.getNumArgs()))
+ EscArgs.push_back(EscArg);
+ StateNotFailed = escapeArgs(StateNotFailed, C, Call, EscArgs);
+ }
+
+ if (StateNotFailed)
+ C.addTransition(StateNotFailed);
}
// Add transition for the failed state.
@@ -1041,43 +1366,28 @@ void StreamChecker::evalFscanf(const FnDescription *Desc, const CallEvent &Call,
// be set but it is not further specified if all are required to be set.
// Documentation does not mention, but file position will be set to
// indeterminate similarly as at 'fread'.
- ProgramStateRef StateFailed = bindInt(*EofVal, State, C, CE);
- StreamErrorState NewES = (OldSS->ErrorState == ErrorFEof)
- ? ErrorFEof
- : ErrorNone | ErrorFEof | ErrorFError;
- StreamState NewSS = StreamState::getOpened(Desc, NewES, !NewES.isFEof());
- StateFailed = StateFailed->set<StreamMap>(StreamSym, NewSS);
- if (OldSS->ErrorState != ErrorFEof)
- C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
- else
- C.addTransition(StateFailed);
+ ProgramStateRef StateFailed = E.bindReturnValue(State, C, *EofVal);
+ StreamErrorState NewES =
+ E.isStreamEof() ? ErrorFEof : ErrorNone | ErrorFEof | ErrorFError;
+ StateFailed = E.setStreamState(
+ StateFailed, StreamState::getOpened(Desc, NewES, !NewES.isFEof()));
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalUngetc(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
-
- const StreamState *OldSS = State->get<StreamMap>(StreamSym);
- if (!OldSS)
- return;
-
- assertStreamStateOpened(OldSS);
-
// Generate a transition for the success state.
std::optional<NonLoc> PutVal = Call.getArgSVal(0).getAs<NonLoc>();
if (!PutVal)
return;
- ProgramStateRef StateNotFailed =
- State->BindExpr(CE, C.getLocationContext(), *PutVal);
+ ProgramStateRef StateNotFailed = E.bindReturnValue(State, C, *PutVal);
StateNotFailed =
- StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ E.setStreamState(StateNotFailed, StreamState::getOpened(Desc));
C.addTransition(StateNotFailed);
// Add transition for the failed state.
@@ -1086,9 +1396,8 @@ void StreamChecker::evalUngetc(const FnDescription *Desc, const CallEvent &Call,
// the same transition as the success state.
// In this case only one state transition is added by the analyzer (the two
// new states may be similar).
- ProgramStateRef StateFailed = bindInt(*EofVal, State, C, CE);
- StateFailed =
- StateFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ ProgramStateRef StateFailed = E.bindReturnValue(State, C, *EofVal);
+ StateFailed = E.setStreamState(StateFailed, StreamState::getOpened(Desc));
C.addTransition(StateFailed);
}
@@ -1096,39 +1405,42 @@ void StreamChecker::evalGetdelim(const FnDescription *Desc,
const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
-
- const StreamState *OldSS = State->get<StreamMap>(StreamSym);
- if (!OldSS)
- return;
-
- assertStreamStateOpened(OldSS);
-
// Upon successful completion, the getline() and getdelim() functions shall
// return the number of bytes written into the buffer.
// If the end-of-file indicator for the stream is set, the function shall
// return -1.
// If an error occurs, the function shall return -1 and set 'errno'.
- // Add transition for the successful state.
- if (OldSS->ErrorState != ErrorFEof) {
- NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
- ProgramStateRef StateNotFailed =
- State->BindExpr(CE, C.getLocationContext(), RetVal);
- SValBuilder &SVB = C.getSValBuilder();
- auto Cond =
- SVB.evalBinOp(State, BO_GE, RetVal, SVB.makeZeroVal(CE->getType()),
- SVB.getConditionType())
- .getAs<DefinedOrUnknownSVal>();
- if (!Cond)
- return;
- StateNotFailed = StateNotFailed->assume(*Cond, true);
+ if (!E.isStreamEof()) {
+ // Escape buffer and size (may change by the call).
+ // May happen even at error (partial read?).
+ State = escapeArgs(State, C, Call, {0, 1});
+
+ // Add transition for the successful state.
+ NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ ProgramStateRef StateNotFailed = E.bindReturnValue(State, C, RetVal);
+ StateNotFailed =
+ E.assumeBinOpNN(StateNotFailed, BO_GE, RetVal, E.getZeroVal(Call));
+
+ // On success, a buffer is allocated.
+ auto NewLinePtr = getPointeeVal(Call.getArgSVal(0), State);
+ if (NewLinePtr && isa<DefinedOrUnknownSVal>(*NewLinePtr))
+ StateNotFailed = StateNotFailed->assume(
+ NewLinePtr->castAs<DefinedOrUnknownSVal>(), true);
+
+ // The buffer size `*n` must be enough to hold the whole line, and
+ // greater than the return value, since it has to account for '\0'.
+ SVal SizePtrSval = Call.getArgSVal(1);
+ auto NVal = getPointeeVal(SizePtrSval, State);
+ if (NVal && isa<NonLoc>(*NVal)) {
+ StateNotFailed = E.assumeBinOpNN(StateNotFailed, BO_GT,
+ NVal->castAs<NonLoc>(), RetVal);
+ StateNotFailed = E.bindReturnValue(StateNotFailed, C, RetVal);
+ }
if (!StateNotFailed)
return;
C.addTransition(StateNotFailed);
@@ -1137,15 +1449,16 @@ void StreamChecker::evalGetdelim(const FnDescription *Desc,
// Add transition for the failed state.
// If a (non-EOF) error occurs, the resulting value of the file position
// indicator for the stream is indeterminate.
- ProgramStateRef StateFailed = bindInt(-1, State, C, CE);
+ ProgramStateRef StateFailed = E.bindReturnValue(State, C, -1);
StreamErrorState NewES =
- OldSS->ErrorState == ErrorFEof ? ErrorFEof : ErrorFEof | ErrorFError;
- StreamState NewSS = StreamState::getOpened(Desc, NewES, !NewES.isFEof());
- StateFailed = StateFailed->set<StreamMap>(StreamSym, NewSS);
- if (OldSS->ErrorState != ErrorFEof)
- C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
- else
- C.addTransition(StateFailed);
+ E.isStreamEof() ? ErrorFEof : ErrorFEof | ErrorFError;
+ StateFailed = E.setStreamState(
+ StateFailed, StreamState::getOpened(Desc, NewES, !NewES.isFEof()));
+ // On failure, the content of the buffer is undefined.
+ if (auto NewLinePtr = getPointeeVal(Call.getArgSVal(0), State))
+ StateFailed = StateFailed->bindLoc(*NewLinePtr, UndefinedVal(),
+ C.getLocationContext());
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::preFseek(const FnDescription *Desc, const CallEvent &Call,
@@ -1169,73 +1482,43 @@ void StreamChecker::preFseek(const FnDescription *Desc, const CallEvent &Call,
void StreamChecker::evalFseek(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
+ // Add success state.
+ ProgramStateRef StateNotFailed = E.bindReturnValue(State, C, 0);
+ // No failure: Reset the state to opened with no error.
+ StateNotFailed =
+ E.setStreamState(StateNotFailed, StreamState::getOpened(Desc));
+ C.addTransition(StateNotFailed);
- // Ignore the call if the stream is not tracked.
- if (!State->get<StreamMap>(StreamSym))
+ if (!PedanticMode)
return;
- const llvm::APSInt *PosV =
- C.getSValBuilder().getKnownValue(State, Call.getArgSVal(1));
- const llvm::APSInt *WhenceV =
- C.getSValBuilder().getKnownValue(State, Call.getArgSVal(2));
-
- DefinedSVal RetVal = makeRetVal(C, CE);
-
- // Make expression result.
- State = State->BindExpr(CE, C.getLocationContext(), RetVal);
-
- // Bifurcate the state into failed and non-failed.
- // Return zero on success, nonzero on error.
- ProgramStateRef StateNotFailed, StateFailed;
- std::tie(StateFailed, StateNotFailed) =
- C.getConstraintManager().assumeDual(State, RetVal);
-
- // Reset the state to opened with no error.
- StateNotFailed =
- StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
- // We get error.
- // It is possible that fseek fails but sets none of the error flags.
+ // Add failure state.
+ // At error it is possible that fseek fails but sets none of the error flags.
// If fseek failed, assume that the file position becomes indeterminate in any
// case.
- StreamErrorState NewErrS = ErrorNone | ErrorFError;
- // Setting the position to start of file never produces EOF error.
- if (!(PosV && *PosV == 0 && WhenceV && *WhenceV == SeekSetVal))
- NewErrS = NewErrS | ErrorFEof;
- StateFailed = StateFailed->set<StreamMap>(
- StreamSym, StreamState::getOpened(Desc, NewErrS, true));
-
- C.addTransition(StateNotFailed);
- C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
+ // It is allowed to set the position beyond the end of the file. EOF error
+ // should not occur.
+ ProgramStateRef StateFailed = E.bindReturnValue(State, C, -1);
+ StateFailed = E.setStreamState(
+ StateFailed, StreamState::getOpened(Desc, ErrorNone | ErrorFError, true));
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFgetpos(const FnDescription *Desc,
const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- SymbolRef Sym = getStreamArg(Desc, Call).getAsSymbol();
- if (!Sym)
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
- // Do not evaluate if stream is not found.
- if (!State->get<StreamMap>(Sym))
- return;
-
- auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
-
- DefinedSVal RetVal = makeRetVal(C, CE);
- State = State->BindExpr(CE, C.getLocationContext(), RetVal);
ProgramStateRef StateNotFailed, StateFailed;
- std::tie(StateFailed, StateNotFailed) =
- C.getConstraintManager().assumeDual(State, RetVal);
+ std::tie(StateFailed, StateNotFailed) = E.makeRetValAndAssumeDual(State, C);
+ StateNotFailed = escapeArgs(StateNotFailed, C, Call, {1});
// This function does not affect the stream state.
// Still we add success and failure state with the appropriate return value.
@@ -1248,70 +1531,46 @@ void StreamChecker::evalFsetpos(const FnDescription *Desc,
const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
- return;
-
- const StreamState *SS = State->get<StreamMap>(StreamSym);
- if (!SS)
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
- auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
-
- assertStreamStateOpened(SS);
-
- DefinedSVal RetVal = makeRetVal(C, CE);
- State = State->BindExpr(CE, C.getLocationContext(), RetVal);
ProgramStateRef StateNotFailed, StateFailed;
- std::tie(StateFailed, StateNotFailed) =
- C.getConstraintManager().assumeDual(State, RetVal);
+ std::tie(StateFailed, StateNotFailed) = E.makeRetValAndAssumeDual(State, C);
- StateNotFailed = StateNotFailed->set<StreamMap>(
- StreamSym, StreamState::getOpened(Desc, ErrorNone, false));
+ StateNotFailed = E.setStreamState(
+ StateNotFailed, StreamState::getOpened(Desc, ErrorNone, false));
+ C.addTransition(StateNotFailed);
+
+ if (!PedanticMode)
+ return;
// At failure ferror could be set.
// The standards do not tell what happens with the file position at failure.
// But we can assume that it is dangerous to make a next I/O operation after
// the position was not set correctly (similar to 'fseek').
- StateFailed = StateFailed->set<StreamMap>(
- StreamSym, StreamState::getOpened(Desc, ErrorNone | ErrorFError, true));
+ StateFailed = E.setStreamState(
+ StateFailed, StreamState::getOpened(Desc, ErrorNone | ErrorFError, true));
- C.addTransition(StateNotFailed);
- C.addTransition(StateFailed);
+ C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
void StreamChecker::evalFtell(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- SymbolRef Sym = getStreamArg(Desc, Call).getAsSymbol();
- if (!Sym)
- return;
-
- if (!State->get<StreamMap>(Sym))
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
- auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
-
- SValBuilder &SVB = C.getSValBuilder();
- NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
ProgramStateRef StateNotFailed =
- State->BindExpr(CE, C.getLocationContext(), RetVal);
- auto Cond =
- SVB.evalBinOp(State, BO_GE, RetVal, SVB.makeZeroVal(Call.getResultType()),
- SVB.getConditionType())
- .getAs<DefinedOrUnknownSVal>();
- if (!Cond)
- return;
- StateNotFailed = StateNotFailed->assume(*Cond, true);
+ State->BindExpr(E.CE, C.getLocationContext(), RetVal);
+ StateNotFailed =
+ E.assumeBinOpNN(StateNotFailed, BO_GE, RetVal, E.getZeroVal(Call));
if (!StateNotFailed)
return;
- ProgramStateRef StateFailed = State->BindExpr(
- CE, C.getLocationContext(), SVB.makeIntVal(-1, Call.getResultType()));
+ ProgramStateRef StateFailed = E.bindReturnValue(State, C, -1);
// This function does not affect the stream state.
// Still we add success and failure state with the appropriate return value.
@@ -1323,112 +1582,12 @@ void StreamChecker::evalFtell(const FnDescription *Desc, const CallEvent &Call,
void StreamChecker::evalRewind(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
- return;
-
- const StreamState *SS = State->get<StreamMap>(StreamSym);
- if (!SS)
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
return;
- auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
-
- assertStreamStateOpened(SS);
-
- State = State->set<StreamMap>(StreamSym,
- StreamState::getOpened(Desc, ErrorNone, false));
-
- C.addTransition(State);
-}
-
-void StreamChecker::evalClearerr(const FnDescription *Desc,
- const CallEvent &Call,
- CheckerContext &C) const {
- ProgramStateRef State = C.getState();
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
- return;
-
- const StreamState *SS = State->get<StreamMap>(StreamSym);
- if (!SS)
- return;
-
- assertStreamStateOpened(SS);
-
- // FilePositionIndeterminate is not cleared.
- State = State->set<StreamMap>(
- StreamSym,
- StreamState::getOpened(Desc, ErrorNone, SS->FilePositionIndeterminate));
- C.addTransition(State);
-}
-
-void StreamChecker::evalFeofFerror(const FnDescription *Desc,
- const CallEvent &Call, CheckerContext &C,
- const StreamErrorState &ErrorKind) const {
- ProgramStateRef State = C.getState();
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- if (!StreamSym)
- return;
-
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
-
- const StreamState *SS = State->get<StreamMap>(StreamSym);
- if (!SS)
- return;
-
- assertStreamStateOpened(SS);
-
- if (SS->ErrorState & ErrorKind) {
- // Execution path with error of ErrorKind.
- // Function returns true.
- // From now on it is the only one error state.
- ProgramStateRef TrueState = bindAndAssumeTrue(State, C, CE);
- C.addTransition(TrueState->set<StreamMap>(
- StreamSym, StreamState::getOpened(Desc, ErrorKind,
- SS->FilePositionIndeterminate &&
- !ErrorKind.isFEof())));
- }
- if (StreamErrorState NewES = SS->ErrorState & (~ErrorKind)) {
- // Execution path(s) with ErrorKind not set.
- // Function returns false.
- // New error state is everything before minus ErrorKind.
- ProgramStateRef FalseState = bindInt(0, State, C, CE);
- C.addTransition(FalseState->set<StreamMap>(
- StreamSym,
- StreamState::getOpened(
- Desc, NewES, SS->FilePositionIndeterminate && !NewES.isFEof())));
- }
-}
-
-void StreamChecker::preDefault(const FnDescription *Desc, const CallEvent &Call,
- CheckerContext &C) const {
- ProgramStateRef State = C.getState();
- SVal StreamVal = getStreamArg(Desc, Call);
- State = ensureStreamNonNull(StreamVal, Call.getArgExpr(Desc->StreamArgNo), C,
- State);
- if (!State)
- return;
- State = ensureStreamOpened(StreamVal, C, State);
- if (!State)
- return;
-
- C.addTransition(State);
-}
-
-void StreamChecker::evalSetFeofFerror(const FnDescription *Desc,
- const CallEvent &Call, CheckerContext &C,
- const StreamErrorState &ErrorKind) const {
- ProgramStateRef State = C.getState();
- SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
- assert(StreamSym && "Operation not permitted on non-symbolic stream value.");
- const StreamState *SS = State->get<StreamMap>(StreamSym);
- assert(SS && "Stream should be tracked by the checker.");
- State = State->set<StreamMap>(
- StreamSym, StreamState::getOpened(SS->LastOperation, ErrorKind));
+ State =
+ E.setStreamState(State, StreamState::getOpened(Desc, ErrorNone, false));
C.addTransition(State);
}
@@ -1510,6 +1669,106 @@ void StreamChecker::evalFflush(const FnDescription *Desc, const CallEvent &Call,
C.addTransition(StateFailed);
}
+void StreamChecker::evalClearerr(const FnDescription *Desc,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
+ return;
+
+ // FilePositionIndeterminate is not cleared.
+ State = E.setStreamState(
+ State,
+ StreamState::getOpened(Desc, ErrorNone, E.SS->FilePositionIndeterminate));
+ C.addTransition(State);
+}
+
+void StreamChecker::evalFeofFerror(const FnDescription *Desc,
+ const CallEvent &Call, CheckerContext &C,
+ const StreamErrorState &ErrorKind) const {
+ ProgramStateRef State = C.getState();
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
+ return;
+
+ if (E.SS->ErrorState & ErrorKind) {
+ // Execution path with error of ErrorKind.
+ // Function returns true.
+ // From now on it is the only one error state.
+ ProgramStateRef TrueState = bindAndAssumeTrue(State, C, E.CE);
+ C.addTransition(E.setStreamState(
+ TrueState, StreamState::getOpened(Desc, ErrorKind,
+ E.SS->FilePositionIndeterminate &&
+ !ErrorKind.isFEof())));
+ }
+ if (StreamErrorState NewES = E.SS->ErrorState & (~ErrorKind)) {
+ // Execution path(s) with ErrorKind not set.
+ // Function returns false.
+ // New error state is everything before minus ErrorKind.
+ ProgramStateRef FalseState = E.bindReturnValue(State, C, 0);
+ C.addTransition(E.setStreamState(
+ FalseState,
+ StreamState::getOpened(
+ Desc, NewES, E.SS->FilePositionIndeterminate && !NewES.isFEof())));
+ }
+}
+
+void StreamChecker::evalFileno(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ // Fileno should fail only if the passed pointer is invalid.
+ // Some of the preconditions are checked already in preDefault.
+ // Here we can assume that the operation does not fail, because if we
+ // introduced a separate branch where fileno() returns -1, then it would cause
+ // many unexpected and unwanted warnings in situations where fileno() is
+ // called on valid streams.
+ // The stream error states are not modified by 'fileno', and 'errno' is also
+ // left unchanged (so this evalCall does not invalidate it, but we have a
+ // custom evalCall instead of the default that would invalidate it).
+ ProgramStateRef State = C.getState();
+ StreamOperationEvaluator E(C);
+ if (!E.Init(Desc, Call, C, State))
+ return;
+
+ NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
+ State = State->BindExpr(E.CE, C.getLocationContext(), RetVal);
+ State = E.assumeBinOpNN(State, BO_GE, RetVal, E.getZeroVal(Call));
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+
+void StreamChecker::preDefault(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal StreamVal = getStreamArg(Desc, Call);
+ State = ensureStreamNonNull(StreamVal, Call.getArgExpr(Desc->StreamArgNo), C,
+ State);
+ if (!State)
+ return;
+ State = ensureStreamOpened(StreamVal, C, State);
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+
+void StreamChecker::evalSetFeofFerror(const FnDescription *Desc,
+ const CallEvent &Call, CheckerContext &C,
+ const StreamErrorState &ErrorKind,
+ bool Indeterminate) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ assert(StreamSym && "Operation not permitted on non-symbolic stream value.");
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ assert(SS && "Stream should be tracked by the checker.");
+ State = State->set<StreamMap>(
+ StreamSym,
+ StreamState::getOpened(SS->LastOperation, ErrorKind, Indeterminate));
+ C.addTransition(State);
+}
+
ProgramStateRef
StreamChecker::ensureStreamNonNull(SVal StreamVal, const Expr *StreamE,
CheckerContext &C,
@@ -1608,18 +1867,22 @@ ProgramStateRef StreamChecker::ensureNoFilePositionIndeterminate(
if (!N)
return nullptr;
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
- BT_IndeterminatePosition, BugMessage, N));
+ auto R = std::make_unique<PathSensitiveBugReport>(
+ BT_IndeterminatePosition, BugMessage, N);
+ R->markInteresting(Sym);
+ C.emitReport(std::move(R));
return State->set<StreamMap>(
Sym, StreamState::getOpened(SS->LastOperation, ErrorFEof, false));
}
// Known or unknown error state without FEOF possible.
// Stop analysis, report error.
- ExplodedNode *N = C.generateErrorNode(State);
- if (N)
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
- BT_IndeterminatePosition, BugMessage, N));
+ if (ExplodedNode *N = C.generateErrorNode(State)) {
+ auto R = std::make_unique<PathSensitiveBugReport>(
+ BT_IndeterminatePosition, BugMessage, N);
+ R->markInteresting(Sym);
+ C.emitReport(std::move(R));
+ }
return nullptr;
}
@@ -1701,6 +1964,7 @@ StreamChecker::reportLeaks(const SmallVector<SymbolRef, 2> &LeakedSyms,
LocUsedForUniqueing,
StreamOpenNode->getLocationContext()->getDecl());
R->markInteresting(LeakSym);
+ R->addVisitor<NoStreamStateChangeVisitor>(LeakSym, this);
C.emitReport(std::move(R));
}
@@ -1758,7 +2022,9 @@ ProgramStateRef StreamChecker::checkPointerEscape(
//===----------------------------------------------------------------------===//
void ento::registerStreamChecker(CheckerManager &Mgr) {
- Mgr.registerChecker<StreamChecker>();
+ auto *Checker = Mgr.registerChecker<StreamChecker>();
+ Checker->PedanticMode =
+ Mgr.getAnalyzerOptions().getCheckerBooleanOption(Checker, "Pedantic");
}
bool ento::shouldRegisterStreamChecker(const CheckerManager &Mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp
index 2dc9e29ca906..8f1c31763e21 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp
@@ -27,7 +27,7 @@ class StringChecker : public Checker<check::PreCall> {
mutable const FunctionDecl *StringConstCharPtrCtor = nullptr;
mutable CanQualType SizeTypeTy;
const CallDescription TwoParamStdStringCtor = {
- {"std", "basic_string", "basic_string"}, 2, 2};
+ CDM::CXXMethod, {"std", "basic_string", "basic_string"}, 2, 2};
bool isCharToStringCtor(const CallEvent &Call, const ASTContext &ACtx) const;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
index 6de33da107a3..dec461296fed 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
@@ -13,7 +13,6 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/FoldingSet.h"
@@ -96,4 +95,4 @@ void handleConstructorAndAssignment(const CallEvent &Call, CheckerContext &C,
} // namespace clang::ento::tagged_union_modeling
-#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAGGEDUNIONMODELING_H \ No newline at end of file
+#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAGGEDUNIONMODELING_H
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
index 4edb671753bf..0bb5739db4b7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
@@ -12,6 +12,7 @@
#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include <optional>
@@ -216,21 +217,17 @@ std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
std::vector<SymbolRef> TaintedSymbols;
if (!Reg)
return TaintedSymbols;
- // Element region (array element) is tainted if either the base or the offset
- // are tainted.
+
+ // Element region (array element) is tainted if the offset is tainted.
if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg)) {
std::vector<SymbolRef> TaintedIndex =
getTaintedSymbolsImpl(State, ER->getIndex(), K, returnFirstOnly);
llvm::append_range(TaintedSymbols, TaintedIndex);
if (returnFirstOnly && !TaintedSymbols.empty())
return TaintedSymbols; // return early if needed
- std::vector<SymbolRef> TaintedSuperRegion =
- getTaintedSymbolsImpl(State, ER->getSuperRegion(), K, returnFirstOnly);
- llvm::append_range(TaintedSymbols, TaintedSuperRegion);
- if (returnFirstOnly && !TaintedSymbols.empty())
- return TaintedSymbols; // return early if needed
}
+ // Symbolic region is tainted if the corresponding symbol is tainted.
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg)) {
std::vector<SymbolRef> TaintedRegions =
getTaintedSymbolsImpl(State, SR->getSymbol(), K, returnFirstOnly);
@@ -239,6 +236,8 @@ std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
return TaintedSymbols; // return early if needed
}
+ // Any subregion (including Element and Symbolic regions) is tainted if its
+ // super-region is tainted.
if (const SubRegion *ER = dyn_cast<SubRegion>(Reg)) {
std::vector<SymbolRef> TaintedSubRegions =
getTaintedSymbolsImpl(State, ER->getSuperRegion(), K, returnFirstOnly);
@@ -258,6 +257,12 @@ std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
if (!Sym)
return TaintedSymbols;
+ // HACK:https://discourse.llvm.org/t/rfc-make-istainted-and-complex-symbols-friends/79570
+ if (const auto &Opts = State->getAnalysisManager().getAnalyzerOptions();
+ Sym->computeComplexity() > Opts.MaxTaintedSymbolComplexity) {
+ return {};
+ }
+
// Traverse all the symbols this symbol depends on to see if any are tainted.
for (SymbolRef SubSym : Sym->symbols()) {
if (!isa<SymbolData>(SubSym))
@@ -318,4 +323,4 @@ std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
}
}
return TaintedSymbols;
-} \ No newline at end of file
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index b05ce610067c..da2d16ca9b5d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -17,7 +17,10 @@
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -39,27 +42,42 @@ enum class OpenVariant {
namespace {
-class UnixAPIMisuseChecker : public Checker< check::PreStmt<CallExpr> > {
+class UnixAPIMisuseChecker
+ : public Checker<check::PreCall, check::ASTDecl<TranslationUnitDecl>> {
const BugType BT_open{this, "Improper use of 'open'", categories::UnixAPI};
+ const BugType BT_getline{this, "Improper use of getdelim",
+ categories::UnixAPI};
const BugType BT_pthreadOnce{this, "Improper use of 'pthread_once'",
categories::UnixAPI};
+ const BugType BT_ArgumentNull{this, "NULL pointer", categories::UnixAPI};
mutable std::optional<uint64_t> Val_O_CREAT;
+ ProgramStateRef
+ EnsurePtrNotNull(SVal PtrVal, const Expr *PtrExpr, CheckerContext &C,
+ ProgramStateRef State, const StringRef PtrDescr,
+ std::optional<std::reference_wrapper<const BugType>> BT =
+ std::nullopt) const;
+
+ ProgramStateRef EnsureGetdelimBufferAndSizeCorrect(
+ SVal LinePtrPtrSVal, SVal SizePtrSVal, const Expr *LinePtrPtrExpr,
+ const Expr *SizePtrExpr, CheckerContext &C, ProgramStateRef State) const;
+
public:
- void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager &Mgr,
+ BugReporter &BR) const;
- void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
- void CheckOpenAt(CheckerContext &C, const CallExpr *CE) const;
- void CheckPthreadOnce(CheckerContext &C, const CallExpr *CE) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
- void CheckOpenVariant(CheckerContext &C,
- const CallExpr *CE, OpenVariant Variant) const;
+ void CheckOpen(CheckerContext &C, const CallEvent &Call) const;
+ void CheckOpenAt(CheckerContext &C, const CallEvent &Call) const;
+ void CheckGetDelim(CheckerContext &C, const CallEvent &Call) const;
+ void CheckPthreadOnce(CheckerContext &C, const CallEvent &Call) const;
- void ReportOpenBug(CheckerContext &C,
- ProgramStateRef State,
- const char *Msg,
- SourceRange SR) const;
+ void CheckOpenVariant(CheckerContext &C, const CallEvent &Call,
+ OpenVariant Variant) const;
+ void ReportOpenBug(CheckerContext &C, ProgramStateRef State, const char *Msg,
+ SourceRange SR) const;
};
class UnixAPIPortabilityChecker : public Checker< check::PreStmt<CallExpr> > {
@@ -90,15 +108,53 @@ private:
const char *fn) const;
};
-} //end anonymous namespace
+} // end anonymous namespace
+
+ProgramStateRef UnixAPIMisuseChecker::EnsurePtrNotNull(
+ SVal PtrVal, const Expr *PtrExpr, CheckerContext &C, ProgramStateRef State,
+ const StringRef PtrDescr,
+ std::optional<std::reference_wrapper<const BugType>> BT) const {
+ const auto Ptr = PtrVal.getAs<DefinedSVal>();
+ if (!Ptr)
+ return State;
+
+ const auto [PtrNotNull, PtrNull] = State->assume(*Ptr);
+ if (!PtrNotNull && PtrNull) {
+ if (ExplodedNode *N = C.generateErrorNode(PtrNull)) {
+ auto R = std::make_unique<PathSensitiveBugReport>(
+ BT.value_or(std::cref(BT_ArgumentNull)),
+ (PtrDescr + " pointer might be NULL.").str(), N);
+ if (PtrExpr)
+ bugreporter::trackExpressionValue(N, PtrExpr, *R);
+ C.emitReport(std::move(R));
+ }
+ return nullptr;
+ }
+
+ return PtrNotNull;
+}
+
+void UnixAPIMisuseChecker::checkASTDecl(const TranslationUnitDecl *TU,
+ AnalysisManager &Mgr,
+ BugReporter &) const {
+ // The definition of O_CREAT is platform specific.
+ // Try to get the macro value from the preprocessor.
+ Val_O_CREAT = tryExpandAsInteger("O_CREAT", Mgr.getPreprocessor());
+ // If we failed, fall-back to known values.
+ if (!Val_O_CREAT) {
+ if (TU->getASTContext().getTargetInfo().getTriple().getVendor() ==
+ llvm::Triple::Apple)
+ Val_O_CREAT = 0x0200;
+ }
+}
//===----------------------------------------------------------------------===//
// "open" (man 2 open)
//===----------------------------------------------------------------------===/
-void UnixAPIMisuseChecker::checkPreStmt(const CallExpr *CE,
+void UnixAPIMisuseChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- const FunctionDecl *FD = C.getCalleeDecl(CE);
+ const FunctionDecl *FD = dyn_cast_if_present<FunctionDecl>(Call.getDecl());
if (!FD || FD->getKind() != Decl::Function)
return;
@@ -113,13 +169,16 @@ void UnixAPIMisuseChecker::checkPreStmt(const CallExpr *CE,
return;
if (FName == "open")
- CheckOpen(C, CE);
+ CheckOpen(C, Call);
else if (FName == "openat")
- CheckOpenAt(C, CE);
+ CheckOpenAt(C, Call);
else if (FName == "pthread_once")
- CheckPthreadOnce(C, CE);
+ CheckPthreadOnce(C, Call);
+
+ else if (is_contained({"getdelim", "getline"}, FName))
+ CheckGetDelim(C, Call);
}
void UnixAPIMisuseChecker::ReportOpenBug(CheckerContext &C,
ProgramStateRef State,
@@ -135,17 +194,17 @@ void UnixAPIMisuseChecker::ReportOpenBug(CheckerContext &C,
}
void UnixAPIMisuseChecker::CheckOpen(CheckerContext &C,
- const CallExpr *CE) const {
- CheckOpenVariant(C, CE, OpenVariant::Open);
+ const CallEvent &Call) const {
+ CheckOpenVariant(C, Call, OpenVariant::Open);
}
void UnixAPIMisuseChecker::CheckOpenAt(CheckerContext &C,
- const CallExpr *CE) const {
- CheckOpenVariant(C, CE, OpenVariant::OpenAt);
+ const CallEvent &Call) const {
+ CheckOpenVariant(C, Call, OpenVariant::OpenAt);
}
void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
- const CallExpr *CE,
+ const CallEvent &Call,
OpenVariant Variant) const {
// The index of the argument taking the flags open flags (O_RDONLY,
// O_WRONLY, O_CREAT, etc.),
@@ -174,11 +233,11 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
ProgramStateRef state = C.getState();
- if (CE->getNumArgs() < MinArgCount) {
+ if (Call.getNumArgs() < MinArgCount) {
// The frontend should issue a warning for this case. Just return.
return;
- } else if (CE->getNumArgs() == MaxArgCount) {
- const Expr *Arg = CE->getArg(CreateModeArgIndex);
+ } else if (Call.getNumArgs() == MaxArgCount) {
+ const Expr *Arg = Call.getArgExpr(CreateModeArgIndex);
QualType QT = Arg->getType();
if (!QT->isIntegerType()) {
SmallString<256> SBuf;
@@ -192,36 +251,24 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
Arg->getSourceRange());
return;
}
- } else if (CE->getNumArgs() > MaxArgCount) {
+ } else if (Call.getNumArgs() > MaxArgCount) {
SmallString<256> SBuf;
llvm::raw_svector_ostream OS(SBuf);
OS << "Call to '" << VariantName << "' with more than " << MaxArgCount
<< " arguments";
- ReportOpenBug(C, state,
- SBuf.c_str(),
- CE->getArg(MaxArgCount)->getSourceRange());
+ ReportOpenBug(C, state, SBuf.c_str(),
+ Call.getArgExpr(MaxArgCount)->getSourceRange());
return;
}
- // The definition of O_CREAT is platform specific. We need a better way
- // of querying this information from the checking environment.
if (!Val_O_CREAT) {
- if (C.getASTContext().getTargetInfo().getTriple().getVendor()
- == llvm::Triple::Apple)
- Val_O_CREAT = 0x0200;
- else {
- // FIXME: We need a more general way of getting the O_CREAT value.
- // We could possibly grovel through the preprocessor state, but
- // that would require passing the Preprocessor object to the ExprEngine.
- // See also: MallocChecker.cpp / M_ZERO.
- return;
- }
+ return;
}
// Now check if oflags has O_CREAT set.
- const Expr *oflagsEx = CE->getArg(FlagsArgIndex);
- const SVal V = C.getSVal(oflagsEx);
+ const Expr *oflagsEx = Call.getArgExpr(FlagsArgIndex);
+ const SVal V = Call.getArgSVal(FlagsArgIndex);
if (!isa<NonLoc>(V)) {
// The case where 'V' can be a location can only be due to a bad header,
// so in this case bail out.
@@ -247,7 +294,7 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
if (!(trueState && !falseState))
return;
- if (CE->getNumArgs() < MaxArgCount) {
+ if (Call.getNumArgs() < MaxArgCount) {
SmallString<256> SBuf;
llvm::raw_svector_ostream OS(SBuf);
OS << "Call to '" << VariantName << "' requires a "
@@ -261,22 +308,109 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
}
//===----------------------------------------------------------------------===//
+// getdelim and getline
+//===----------------------------------------------------------------------===//
+
+ProgramStateRef UnixAPIMisuseChecker::EnsureGetdelimBufferAndSizeCorrect(
+ SVal LinePtrPtrSVal, SVal SizePtrSVal, const Expr *LinePtrPtrExpr,
+ const Expr *SizePtrExpr, CheckerContext &C, ProgramStateRef State) const {
+ static constexpr llvm::StringLiteral SizeGreaterThanBufferSize =
+ "The buffer from the first argument is smaller than the size "
+ "specified by the second parameter";
+ static constexpr llvm::StringLiteral SizeUndef =
+ "The buffer from the first argument is not NULL, but the size specified "
+ "by the second parameter is undefined.";
+
+ auto EmitBugReport = [this, &C, SizePtrExpr, LinePtrPtrExpr](
+ ProgramStateRef BugState, StringRef ErrMsg) {
+ if (ExplodedNode *N = C.generateErrorNode(BugState)) {
+ auto R = std::make_unique<PathSensitiveBugReport>(BT_getline, ErrMsg, N);
+ bugreporter::trackExpressionValue(N, SizePtrExpr, *R);
+ bugreporter::trackExpressionValue(N, LinePtrPtrExpr, *R);
+ C.emitReport(std::move(R));
+ }
+ };
+
+ // We have a pointer to a pointer to the buffer, and a pointer to the size.
+ // We want what they point at.
+ auto LinePtrSVal = getPointeeVal(LinePtrPtrSVal, State)->getAs<DefinedSVal>();
+ auto NSVal = getPointeeVal(SizePtrSVal, State);
+ if (!LinePtrSVal || !NSVal || NSVal->isUnknown())
+ return nullptr;
+
+ assert(LinePtrPtrExpr && SizePtrExpr);
+
+ const auto [LinePtrNotNull, LinePtrNull] = State->assume(*LinePtrSVal);
+ if (LinePtrNotNull && !LinePtrNull) {
+ // If `*lineptr` is not null, but `*n` is undefined, there is UB.
+ if (NSVal->isUndef()) {
+ EmitBugReport(LinePtrNotNull, SizeUndef);
+ return nullptr;
+ }
+
+ // If it is defined, and known, its size must be less than or equal to
+ // the buffer size.
+ auto NDefSVal = NSVal->getAs<DefinedSVal>();
+ auto &SVB = C.getSValBuilder();
+ auto LineBufSize =
+ getDynamicExtent(LinePtrNotNull, LinePtrSVal->getAsRegion(), SVB);
+ auto LineBufSizeGtN = SVB.evalBinOp(LinePtrNotNull, BO_GE, LineBufSize,
+ *NDefSVal, SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!LineBufSizeGtN)
+ return LinePtrNotNull;
+ if (auto LineBufSizeOk = LinePtrNotNull->assume(*LineBufSizeGtN, true))
+ return LineBufSizeOk;
+
+ EmitBugReport(LinePtrNotNull, SizeGreaterThanBufferSize);
+ return nullptr;
+ }
+ return State;
+}
+
+void UnixAPIMisuseChecker::CheckGetDelim(CheckerContext &C,
+ const CallEvent &Call) const {
+ ProgramStateRef State = C.getState();
+
+ // The parameter `n` must not be NULL.
+ SVal SizePtrSval = Call.getArgSVal(1);
+ State = EnsurePtrNotNull(SizePtrSval, Call.getArgExpr(1), C, State, "Size");
+ if (!State)
+ return;
+
+ // The parameter `lineptr` must not be NULL.
+ SVal LinePtrPtrSVal = Call.getArgSVal(0);
+ State =
+ EnsurePtrNotNull(LinePtrPtrSVal, Call.getArgExpr(0), C, State, "Line");
+ if (!State)
+ return;
+
+ State = EnsureGetdelimBufferAndSizeCorrect(LinePtrPtrSVal, SizePtrSval,
+ Call.getArgExpr(0),
+ Call.getArgExpr(1), C, State);
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+
+//===----------------------------------------------------------------------===//
// pthread_once
//===----------------------------------------------------------------------===//
void UnixAPIMisuseChecker::CheckPthreadOnce(CheckerContext &C,
- const CallExpr *CE) const {
+ const CallEvent &Call) const {
// This is similar to 'CheckDispatchOnce' in the MacOSXAPIChecker.
// They can possibly be refactored.
- if (CE->getNumArgs() < 1)
+ if (Call.getNumArgs() < 1)
return;
// Check if the first argument is stack allocated. If so, issue a warning
// because that's likely to be bad news.
ProgramStateRef state = C.getState();
- const MemRegion *R = C.getSVal(CE->getArg(0)).getAsRegion();
+ const MemRegion *R = Call.getArgSVal(0).getAsRegion();
if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
return;
@@ -298,7 +432,7 @@ void UnixAPIMisuseChecker::CheckPthreadOnce(CheckerContext &C,
auto report =
std::make_unique<PathSensitiveBugReport>(BT_pthreadOnce, os.str(), N);
- report->addRange(CE->getArg(0)->getSourceRange());
+ report->addRange(Call.getArgExpr(0)->getSourceRange());
C.emitReport(std::move(report));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index d24a124f5ffe..7ce9a5b5bb6d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -159,6 +159,8 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
SL = DL.asLocation();
if (SR.isInvalid() || !SL.isValid())
continue;
+ if (isa<CXXTryStmt>(S))
+ continue;
}
else
continue;
@@ -254,4 +256,4 @@ void ento::registerUnreachableCodeChecker(CheckerManager &mgr) {
bool ento::shouldRegisterUnreachableCodeChecker(const CheckerManager &mgr) {
return true;
-}
+} \ No newline at end of file
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index d76fe4991869..87d255eeffc1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -164,12 +164,6 @@ ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
if (SizeV.isUnknown())
return nullptr;
- // Check if the size is tainted.
- if (isTainted(State, SizeV)) {
- reportTaintBug(SizeE, State, C, SizeV);
- return nullptr;
- }
-
// Check if the size is zero.
DefinedSVal SizeD = SizeV.castAs<DefinedSVal>();
@@ -192,10 +186,10 @@ ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
SVal LessThanZeroVal =
SVB.evalBinOp(State, BO_LT, SizeD, Zero, SVB.getConditionType());
+ ProgramStateRef StatePos, StateNeg;
if (std::optional<DefinedSVal> LessThanZeroDVal =
LessThanZeroVal.getAs<DefinedSVal>()) {
ConstraintManager &CM = C.getConstraintManager();
- ProgramStateRef StatePos, StateNeg;
std::tie(StateNeg, StatePos) = CM.assumeDual(State, *LessThanZeroDVal);
if (StateNeg && !StatePos) {
@@ -205,6 +199,12 @@ ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
State = StatePos;
}
+ // Check if the size is tainted.
+ if ((StateNeg || StateZero) && isTainted(State, SizeV)) {
+ reportTaintBug(SizeE, State, C, SizeV);
+ return nullptr;
+ }
+
return State;
}
@@ -218,7 +218,7 @@ void VLASizeChecker::reportTaintBug(const Expr *SizeE, ProgramStateRef State,
SmallString<256> buf;
llvm::raw_svector_ostream os(buf);
os << "Declared variable-length array (VLA) ";
- os << "has tainted size";
+ os << "has tainted (attacker controlled) size that can be 0 or negative";
auto report = std::make_unique<PathSensitiveBugReport>(TaintBT, os.str(), N);
report->addRange(SizeE->getSourceRange());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
index 2d1b873abf73..28320f46f237 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
@@ -100,32 +100,31 @@ private:
};
const SmallVector<ValistChecker::VAListAccepter, 15>
- ValistChecker::VAListAccepters = {{{{"vfprintf"}, 3}, 2},
- {{{"vfscanf"}, 3}, 2},
- {{{"vprintf"}, 2}, 1},
- {{{"vscanf"}, 2}, 1},
- {{{"vsnprintf"}, 4}, 3},
- {{{"vsprintf"}, 3}, 2},
- {{{"vsscanf"}, 3}, 2},
- {{{"vfwprintf"}, 3}, 2},
- {{{"vfwscanf"}, 3}, 2},
- {{{"vwprintf"}, 2}, 1},
- {{{"vwscanf"}, 2}, 1},
- {{{"vswprintf"}, 4}, 3},
+ ValistChecker::VAListAccepters = {{{CDM::CLibrary, {"vfprintf"}, 3}, 2},
+ {{CDM::CLibrary, {"vfscanf"}, 3}, 2},
+ {{CDM::CLibrary, {"vprintf"}, 2}, 1},
+ {{CDM::CLibrary, {"vscanf"}, 2}, 1},
+ {{CDM::CLibrary, {"vsnprintf"}, 4}, 3},
+ {{CDM::CLibrary, {"vsprintf"}, 3}, 2},
+ {{CDM::CLibrary, {"vsscanf"}, 3}, 2},
+ {{CDM::CLibrary, {"vfwprintf"}, 3}, 2},
+ {{CDM::CLibrary, {"vfwscanf"}, 3}, 2},
+ {{CDM::CLibrary, {"vwprintf"}, 2}, 1},
+ {{CDM::CLibrary, {"vwscanf"}, 2}, 1},
+ {{CDM::CLibrary, {"vswprintf"}, 4}, 3},
// vswprintf is the wide version of
// vsnprintf, vsprintf has no wide version
- {{{"vswscanf"}, 3}, 2}};
+ {{CDM::CLibrary, {"vswscanf"}, 3}, 2}};
-const CallDescription ValistChecker::VaStart({"__builtin_va_start"}, /*Args=*/2,
+const CallDescription ValistChecker::VaStart(CDM::CLibrary,
+ {"__builtin_va_start"}, /*Args=*/2,
/*Params=*/1),
- ValistChecker::VaCopy({"__builtin_va_copy"}, 2),
- ValistChecker::VaEnd({"__builtin_va_end"}, 1);
+ ValistChecker::VaCopy(CDM::CLibrary, {"__builtin_va_copy"}, 2),
+ ValistChecker::VaEnd(CDM::CLibrary, {"__builtin_va_end"}, 1);
} // end anonymous namespace
void ValistChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- if (!Call.isGlobalCFunction())
- return;
if (VaStart.matches(Call))
checkVAListStartCall(Call, C, false);
else if (VaCopy.matches(Call))
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
index 64028b277021..be07cf51eefb 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
@@ -16,15 +16,41 @@
namespace clang {
-std::pair<const Expr *, bool>
-tryToFindPtrOrigin(const Expr *E, bool StopAtFirstRefCountedObj) {
+bool tryToFindPtrOrigin(
+ const Expr *E, bool StopAtFirstRefCountedObj,
+ std::function<bool(const clang::Expr *, bool)> callback) {
while (E) {
+ if (auto *tempExpr = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = tempExpr->getSubExpr();
+ continue;
+ }
+ if (auto *tempExpr = dyn_cast<CXXBindTemporaryExpr>(E)) {
+ E = tempExpr->getSubExpr();
+ continue;
+ }
+ if (auto *tempExpr = dyn_cast<CXXTemporaryObjectExpr>(E)) {
+ if (auto *C = tempExpr->getConstructor()) {
+ if (auto *Class = C->getParent(); Class && isRefCounted(Class))
+ return callback(E, true);
+ break;
+ }
+ }
+ if (auto *tempExpr = dyn_cast<ParenExpr>(E)) {
+ E = tempExpr->getSubExpr();
+ continue;
+ }
+ if (auto *Expr = dyn_cast<ConditionalOperator>(E)) {
+ return tryToFindPtrOrigin(Expr->getTrueExpr(), StopAtFirstRefCountedObj,
+ callback) &&
+ tryToFindPtrOrigin(Expr->getFalseExpr(), StopAtFirstRefCountedObj,
+ callback);
+ }
if (auto *cast = dyn_cast<CastExpr>(E)) {
if (StopAtFirstRefCountedObj) {
if (auto *ConversionFunc =
dyn_cast_or_null<FunctionDecl>(cast->getConversionFunction())) {
if (isCtorOfRefCounted(ConversionFunc))
- return {E, true};
+ return callback(E, true);
}
}
// FIXME: This can give false "origin" that would lead to false negatives
@@ -34,13 +60,15 @@ tryToFindPtrOrigin(const Expr *E, bool StopAtFirstRefCountedObj) {
}
if (auto *call = dyn_cast<CallExpr>(E)) {
if (auto *memberCall = dyn_cast<CXXMemberCallExpr>(call)) {
- std::optional<bool> IsGetterOfRefCt = isGetterOfRefCounted(memberCall->getMethodDecl());
- if (IsGetterOfRefCt && *IsGetterOfRefCt) {
- E = memberCall->getImplicitObjectArgument();
- if (StopAtFirstRefCountedObj) {
- return {E, true};
+ if (auto *decl = memberCall->getMethodDecl()) {
+ std::optional<bool> IsGetterOfRefCt = isGetterOfRefCounted(decl);
+ if (IsGetterOfRefCt && *IsGetterOfRefCt) {
+ E = memberCall->getImplicitObjectArgument();
+ if (StopAtFirstRefCountedObj) {
+ return callback(E, true);
+ }
+ continue;
}
- continue;
}
}
@@ -54,12 +82,18 @@ tryToFindPtrOrigin(const Expr *E, bool StopAtFirstRefCountedObj) {
if (auto *callee = call->getDirectCallee()) {
if (isCtorOfRefCounted(callee)) {
if (StopAtFirstRefCountedObj)
- return {E, true};
+ return callback(E, true);
E = call->getArg(0);
continue;
}
+ if (isReturnValueRefCounted(callee))
+ return callback(E, true);
+
+ if (isSingleton(callee))
+ return callback(E, true);
+
if (isPtrConversion(callee)) {
E = call->getArg(0);
continue;
@@ -75,7 +109,7 @@ tryToFindPtrOrigin(const Expr *E, bool StopAtFirstRefCountedObj) {
break;
}
// Some other expression.
- return {E, false};
+ return callback(E, false);
}
bool isASafeCallArg(const Expr *E) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
index e35ea4ef05dd..e972924e0c52 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
@@ -13,6 +13,7 @@
#include "llvm/ADT/APInt.h"
#include "llvm/Support/Casting.h"
+#include <functional>
#include <string>
#include <utility>
@@ -48,10 +49,12 @@ class Expr;
/// represents ref-counted object during the traversal we return relevant
/// sub-expression and true.
///
-/// \returns subexpression that we traversed to and if \p
-/// StopAtFirstRefCountedObj is true we also return whether we stopped early.
-std::pair<const clang::Expr *, bool>
-tryToFindPtrOrigin(const clang::Expr *E, bool StopAtFirstRefCountedObj);
+/// Calls \p callback with the subexpression that we traversed to and if \p
+/// StopAtFirstRefCountedObj is true we also specify whether we stopped early.
+/// Returns false if any of calls to callbacks returned false. Otherwise true.
+bool tryToFindPtrOrigin(
+ const clang::Expr *E, bool StopAtFirstRefCountedObj,
+ std::function<bool(const clang::Expr *, bool)> callback);
/// For \p E referring to a ref-countable/-counted pointer/reference we return
/// whether it's a safe call argument. Examples: function parameter or
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
index c753ed84a700..69a0eb3086ab 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
@@ -34,7 +34,7 @@ private:
public:
NoUncountedMemberChecker()
: Bug(this,
- "Member variable is a raw-poiner/reference to reference-countable "
+ "Member variable is a raw-pointer/reference to reference-countable "
"type",
"WebKit coding guidelines") {}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
index d2b663410580..49bbff194216 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtVisitor.h"
#include <optional>
using namespace clang;
@@ -84,6 +85,7 @@ std::optional<bool> isRefCountable(const CXXRecordDecl* R)
if (AnyInconclusiveBase)
return std::nullopt;
+ Paths.clear();
const auto hasPublicDerefInBase =
[&AnyInconclusiveBase](const CXXBaseSpecifier *Base, CXXBasePath &) {
auto hasDerefInBase = clang::hasPublicMethodInBase(Base, "deref");
@@ -101,15 +103,18 @@ std::optional<bool> isRefCountable(const CXXRecordDecl* R)
return hasRef && hasDeref;
}
+bool isRefType(const std::string &Name) {
+ return Name == "Ref" || Name == "RefAllowingPartiallyDestroyed" ||
+ Name == "RefPtr" || Name == "RefPtrAllowingPartiallyDestroyed";
+}
+
bool isCtorOfRefCounted(const clang::FunctionDecl *F) {
assert(F);
- const auto &FunctionName = safeGetName(F);
-
- return FunctionName == "Ref" || FunctionName == "makeRef"
+ const std::string &FunctionName = safeGetName(F);
- || FunctionName == "RefPtr" || FunctionName == "makeRefPtr"
-
- || FunctionName == "UniqueRef" || FunctionName == "makeUniqueRef" ||
+ return isRefType(FunctionName) || FunctionName == "makeRef" ||
+ FunctionName == "makeRefPtr" || FunctionName == "UniqueRef" ||
+ FunctionName == "makeUniqueRef" ||
FunctionName == "makeUniqueRefWithoutFastMallocCheck"
|| FunctionName == "String" || FunctionName == "AtomString" ||
@@ -118,6 +123,26 @@ bool isCtorOfRefCounted(const clang::FunctionDecl *F) {
|| FunctionName == "Identifier";
}
+bool isReturnValueRefCounted(const clang::FunctionDecl *F) {
+ assert(F);
+ QualType type = F->getReturnType();
+ while (!type.isNull()) {
+ if (auto *elaboratedT = type->getAs<ElaboratedType>()) {
+ type = elaboratedT->desugar();
+ continue;
+ }
+ if (auto *specialT = type->getAs<TemplateSpecializationType>()) {
+ if (auto *decl = specialT->getTemplateName().getAsTemplateDecl()) {
+ auto name = decl->getNameAsString();
+ return isRefType(name);
+ }
+ return false;
+ }
+ return false;
+ }
+ return false;
+}
+
std::optional<bool> isUncounted(const CXXRecordDecl* Class)
{
// Keep isRefCounted first as it's cheaper.
@@ -150,19 +175,18 @@ std::optional<bool> isGetterOfRefCounted(const CXXMethodDecl* M)
if (isa<CXXMethodDecl>(M)) {
const CXXRecordDecl *calleeMethodsClass = M->getParent();
auto className = safeGetName(calleeMethodsClass);
- auto methodName = safeGetName(M);
+ auto method = safeGetName(M);
- if (((className == "Ref" || className == "RefPtr") &&
- methodName == "get") ||
+ if ((isRefType(className) && (method == "get" || method == "ptr")) ||
((className == "String" || className == "AtomString" ||
className == "AtomStringImpl" || className == "UniqueString" ||
className == "UniqueStringImpl" || className == "Identifier") &&
- methodName == "impl"))
+ method == "impl"))
return true;
// Ref<T> -> T conversion
// FIXME: Currently allowing any Ref<T> -> whatever cast.
- if (className == "Ref" || className == "RefPtr") {
+ if (isRefType(className)) {
if (auto *maybeRefToRawOperator = dyn_cast<CXXConversionDecl>(M)) {
if (auto *targetConversionType =
maybeRefToRawOperator->getConversionType().getTypePtrOrNull()) {
@@ -179,7 +203,7 @@ bool isRefCounted(const CXXRecordDecl *R) {
if (auto *TmplR = R->getTemplateInstantiationPattern()) {
// FIXME: String/AtomString/UniqueString
const auto &ClassName = safeGetName(TmplR);
- return ClassName == "RefPtr" || ClassName == "Ref";
+ return isRefType(ClassName);
}
return false;
}
@@ -192,11 +216,360 @@ bool isPtrConversion(const FunctionDecl *F) {
// FIXME: check # of params == 1
const auto FunctionName = safeGetName(F);
if (FunctionName == "getPtr" || FunctionName == "WeakPtr" ||
- FunctionName == "dynamicDowncast"
- || FunctionName == "downcast" || FunctionName == "bitwise_cast")
+ FunctionName == "dynamicDowncast" || FunctionName == "downcast" ||
+ FunctionName == "checkedDowncast" ||
+ FunctionName == "uncheckedDowncast" || FunctionName == "bitwise_cast")
return true;
return false;
}
+bool isSingleton(const FunctionDecl *F) {
+ assert(F);
+ // FIXME: check # of params == 1
+ if (auto *MethodDecl = dyn_cast<CXXMethodDecl>(F)) {
+ if (!MethodDecl->isStatic())
+ return false;
+ }
+ const auto &Name = safeGetName(F);
+ std::string SingletonStr = "singleton";
+ auto index = Name.find(SingletonStr);
+ return index != std::string::npos &&
+ index == Name.size() - SingletonStr.size();
+}
+
+// We only care about statements so let's use the simple
+// (non-recursive) visitor.
+class TrivialFunctionAnalysisVisitor
+ : public ConstStmtVisitor<TrivialFunctionAnalysisVisitor, bool> {
+
+ // Returns false if at least one child is non-trivial.
+ bool VisitChildren(const Stmt *S) {
+ for (const Stmt *Child : S->children()) {
+ if (Child && !Visit(Child))
+ return false;
+ }
+
+ return true;
+ }
+
+ template <typename CheckFunction>
+ bool WithCachedResult(const Stmt *S, CheckFunction Function) {
+ // If the statement isn't in the cache, conservatively assume that
+ // it's not trivial until analysis completes. Insert false to the cache
+ // first to avoid infinite recursion.
+ auto [It, IsNew] = Cache.insert(std::make_pair(S, false));
+ if (!IsNew)
+ return It->second;
+ bool Result = Function();
+ Cache[S] = Result;
+ return Result;
+ }
+
+public:
+ using CacheTy = TrivialFunctionAnalysis::CacheTy;
+
+ TrivialFunctionAnalysisVisitor(CacheTy &Cache) : Cache(Cache) {}
+
+ bool IsFunctionTrivial(const Decl *D) {
+ auto CacheIt = Cache.find(D);
+ if (CacheIt != Cache.end())
+ return CacheIt->second;
+
+ // Treat a recursive function call to be trivial until proven otherwise.
+ auto [RecursiveIt, IsNew] = RecursiveFn.insert(std::make_pair(D, true));
+ if (!IsNew)
+ return RecursiveIt->second;
+
+ bool Result = [&]() {
+ if (auto *CtorDecl = dyn_cast<CXXConstructorDecl>(D)) {
+ for (auto *CtorInit : CtorDecl->inits()) {
+ if (!Visit(CtorInit->getInit()))
+ return false;
+ }
+ }
+ const Stmt *Body = D->getBody();
+ if (!Body)
+ return false;
+ return Visit(Body);
+ }();
+
+ if (!Result) {
+ // D and its mutually recursive callers are all non-trivial.
+ for (auto &It : RecursiveFn)
+ It.second = false;
+ }
+ RecursiveIt = RecursiveFn.find(D);
+ assert(RecursiveIt != RecursiveFn.end());
+ Result = RecursiveIt->second;
+ RecursiveFn.erase(RecursiveIt);
+ Cache[D] = Result;
+
+ return Result;
+ }
+
+ bool VisitStmt(const Stmt *S) {
+ // All statements are non-trivial unless overriden later.
+ // Don't even recurse into children by default.
+ return false;
+ }
+
+ bool VisitCompoundStmt(const CompoundStmt *CS) {
+ // A compound statement is allowed as long each individual sub-statement
+ // is trivial.
+ return WithCachedResult(CS, [&]() { return VisitChildren(CS); });
+ }
+
+ bool VisitReturnStmt(const ReturnStmt *RS) {
+ // A return statement is allowed as long as the return value is trivial.
+ if (auto *RV = RS->getRetValue())
+ return Visit(RV);
+ return true;
+ }
+
+ bool VisitDeclStmt(const DeclStmt *DS) { return VisitChildren(DS); }
+ bool VisitDoStmt(const DoStmt *DS) { return VisitChildren(DS); }
+ bool VisitIfStmt(const IfStmt *IS) {
+ return WithCachedResult(IS, [&]() { return VisitChildren(IS); });
+ }
+ bool VisitForStmt(const ForStmt *FS) {
+ return WithCachedResult(FS, [&]() { return VisitChildren(FS); });
+ }
+ bool VisitCXXForRangeStmt(const CXXForRangeStmt *FS) {
+ return WithCachedResult(FS, [&]() { return VisitChildren(FS); });
+ }
+ bool VisitWhileStmt(const WhileStmt *WS) {
+ return WithCachedResult(WS, [&]() { return VisitChildren(WS); });
+ }
+ bool VisitSwitchStmt(const SwitchStmt *SS) { return VisitChildren(SS); }
+ bool VisitCaseStmt(const CaseStmt *CS) { return VisitChildren(CS); }
+ bool VisitDefaultStmt(const DefaultStmt *DS) { return VisitChildren(DS); }
+
+ // break, continue, goto, and label statements are always trivial.
+ bool VisitBreakStmt(const BreakStmt *) { return true; }
+ bool VisitContinueStmt(const ContinueStmt *) { return true; }
+ bool VisitGotoStmt(const GotoStmt *) { return true; }
+ bool VisitLabelStmt(const LabelStmt *) { return true; }
+
+ bool VisitUnaryOperator(const UnaryOperator *UO) {
+ // Unary operators are trivial if its operand is trivial except co_await.
+ return UO->getOpcode() != UO_Coawait && Visit(UO->getSubExpr());
+ }
+
+ bool VisitBinaryOperator(const BinaryOperator *BO) {
+ // Binary operators are trivial if their operands are trivial.
+ return Visit(BO->getLHS()) && Visit(BO->getRHS());
+ }
+
+ bool VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) {
+ // Compound assignment operator such as |= is trivial if its
+ // subexpresssions are trivial.
+ return VisitChildren(CAO);
+ }
+
+ bool VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) {
+ return VisitChildren(ASE);
+ }
+
+ bool VisitConditionalOperator(const ConditionalOperator *CO) {
+ // Ternary operators are trivial if their conditions & values are trivial.
+ return VisitChildren(CO);
+ }
+
+ bool VisitAtomicExpr(const AtomicExpr *E) { return VisitChildren(E); }
+
+ bool VisitStaticAssertDecl(const StaticAssertDecl *SAD) {
+ // Any static_assert is considered trivial.
+ return true;
+ }
+
+ bool VisitCallExpr(const CallExpr *CE) {
+ if (!checkArguments(CE))
+ return false;
+
+ auto *Callee = CE->getDirectCallee();
+ if (!Callee)
+ return false;
+ const auto &Name = safeGetName(Callee);
+
+ if (Callee->isInStdNamespace() &&
+ (Name == "addressof" || Name == "forward" || Name == "move"))
+ return true;
+
+ if (Name == "WTFCrashWithInfo" || Name == "WTFBreakpointTrap" ||
+ Name == "WTFCrashWithSecurityImplication" || Name == "WTFCrash" ||
+ Name == "WTFReportAssertionFailure" || Name == "isMainThread" ||
+ Name == "isMainThreadOrGCThread" || Name == "isMainRunLoop" ||
+ Name == "isWebThread" || Name == "isUIThread" ||
+ Name == "mayBeGCThread" || Name == "compilerFenceForCrash" ||
+ Name == "bitwise_cast" || Name.find("__builtin") == 0)
+ return true;
+
+ return IsFunctionTrivial(Callee);
+ }
+
+ bool
+ VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E) {
+ // Non-type template paramter is compile time constant and trivial.
+ return true;
+ }
+
+ bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E) {
+ return VisitChildren(E);
+ }
+
+ bool VisitPredefinedExpr(const PredefinedExpr *E) {
+ // A predefined identifier such as "func" is considered trivial.
+ return true;
+ }
+
+ bool VisitCXXMemberCallExpr(const CXXMemberCallExpr *MCE) {
+ if (!checkArguments(MCE))
+ return false;
+
+ bool TrivialThis = Visit(MCE->getImplicitObjectArgument());
+ if (!TrivialThis)
+ return false;
+
+ auto *Callee = MCE->getMethodDecl();
+ if (!Callee)
+ return false;
+
+ std::optional<bool> IsGetterOfRefCounted = isGetterOfRefCounted(Callee);
+ if (IsGetterOfRefCounted && *IsGetterOfRefCounted)
+ return true;
+
+ // Recursively descend into the callee to confirm that it's trivial as well.
+ return IsFunctionTrivial(Callee);
+ }
+
+ bool VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *OCE) {
+ if (!checkArguments(OCE))
+ return false;
+ auto *Callee = OCE->getCalleeDecl();
+ if (!Callee)
+ return false;
+ // Recursively descend into the callee to confirm that it's trivial as well.
+ return IsFunctionTrivial(Callee);
+ }
+
+ bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) {
+ if (auto *Expr = E->getExpr()) {
+ if (!Visit(Expr))
+ return false;
+ }
+ return true;
+ }
+
+ bool checkArguments(const CallExpr *CE) {
+ for (const Expr *Arg : CE->arguments()) {
+ if (Arg && !Visit(Arg))
+ return false;
+ }
+ return true;
+ }
+
+ bool VisitCXXConstructExpr(const CXXConstructExpr *CE) {
+ for (const Expr *Arg : CE->arguments()) {
+ if (Arg && !Visit(Arg))
+ return false;
+ }
+
+ // Recursively descend into the callee to confirm that it's trivial.
+ return IsFunctionTrivial(CE->getConstructor());
+ }
+
+ bool VisitCXXNewExpr(const CXXNewExpr *NE) { return VisitChildren(NE); }
+
+ bool VisitImplicitCastExpr(const ImplicitCastExpr *ICE) {
+ return Visit(ICE->getSubExpr());
+ }
+
+ bool VisitExplicitCastExpr(const ExplicitCastExpr *ECE) {
+ return Visit(ECE->getSubExpr());
+ }
+
+ bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *VMT) {
+ return Visit(VMT->getSubExpr());
+ }
+
+ bool VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *BTE) {
+ if (auto *Temp = BTE->getTemporary()) {
+ if (!TrivialFunctionAnalysis::isTrivialImpl(Temp->getDestructor(), Cache))
+ return false;
+ }
+ return Visit(BTE->getSubExpr());
+ }
+
+ bool VisitExprWithCleanups(const ExprWithCleanups *EWC) {
+ return Visit(EWC->getSubExpr());
+ }
+
+ bool VisitParenExpr(const ParenExpr *PE) { return Visit(PE->getSubExpr()); }
+
+ bool VisitInitListExpr(const InitListExpr *ILE) {
+ for (const Expr *Child : ILE->inits()) {
+ if (Child && !Visit(Child))
+ return false;
+ }
+ return true;
+ }
+
+ bool VisitMemberExpr(const MemberExpr *ME) {
+ // Field access is allowed but the base pointer may itself be non-trivial.
+ return Visit(ME->getBase());
+ }
+
+ bool VisitCXXThisExpr(const CXXThisExpr *CTE) {
+ // The expression 'this' is always trivial, be it explicit or implicit.
+ return true;
+ }
+
+ bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ // nullptr is trivial.
+ return true;
+ }
+
+ bool VisitDeclRefExpr(const DeclRefExpr *DRE) {
+ // The use of a variable is trivial.
+ return true;
+ }
+
+ // Constant literal expressions are always trivial
+ bool VisitIntegerLiteral(const IntegerLiteral *E) { return true; }
+ bool VisitFloatingLiteral(const FloatingLiteral *E) { return true; }
+ bool VisitFixedPointLiteral(const FixedPointLiteral *E) { return true; }
+ bool VisitCharacterLiteral(const CharacterLiteral *E) { return true; }
+ bool VisitStringLiteral(const StringLiteral *E) { return true; }
+ bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { return true; }
+
+ bool VisitConstantExpr(const ConstantExpr *CE) {
+ // Constant expressions are trivial.
+ return true;
+ }
+
+private:
+ CacheTy &Cache;
+ CacheTy RecursiveFn;
+};
+
+bool TrivialFunctionAnalysis::isTrivialImpl(
+ const Decl *D, TrivialFunctionAnalysis::CacheTy &Cache) {
+ TrivialFunctionAnalysisVisitor V(Cache);
+ return V.IsFunctionTrivial(D);
+}
+
+bool TrivialFunctionAnalysis::isTrivialImpl(
+ const Stmt *S, TrivialFunctionAnalysis::CacheTy &Cache) {
+ // If the statement isn't in the cache, conservatively assume that
+ // it's not trivial until analysis completes. Unlike a function case,
+ // we don't insert an entry into the cache until Visit returns
+ // since Visit* functions themselves make use of the cache.
+
+ TrivialFunctionAnalysisVisitor V(Cache);
+ bool Result = V.Visit(S);
+ assert(Cache.contains(S) && "Top-level statement not properly cached!");
+ return Result;
+}
+
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
index 45b21cc09184..ec1db1cc3358 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
@@ -10,13 +10,17 @@
#define LLVM_CLANG_ANALYZER_WEBKIT_PTRTYPESEMANTICS_H
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
#include <optional>
namespace clang {
class CXXBaseSpecifier;
class CXXMethodDecl;
class CXXRecordDecl;
+class Decl;
class FunctionDecl;
+class Stmt;
class Type;
// Ref-countability of a type is implicitly defined by Ref<T> and RefPtr<T>
@@ -46,10 +50,16 @@ std::optional<bool> isUncounted(const clang::CXXRecordDecl* Class);
/// class, false if not, std::nullopt if inconclusive.
std::optional<bool> isUncountedPtr(const clang::Type* T);
+/// \returns true if Name is a RefPtr, Ref, or its variant, false if not.
+bool isRefType(const std::string &Name);
+
/// \returns true if \p F creates ref-countable object from uncounted parameter,
/// false if not.
bool isCtorOfRefCounted(const clang::FunctionDecl *F);
+/// \returns true if \p F returns a ref-counted object, false if not.
+bool isReturnValueRefCounted(const clang::FunctionDecl *F);
+
/// \returns true if \p M is getter of a ref-counted class, false if not.
std::optional<bool> isGetterOfRefCounted(const clang::CXXMethodDecl* Method);
@@ -57,6 +67,28 @@ std::optional<bool> isGetterOfRefCounted(const clang::CXXMethodDecl* Method);
/// pointer types.
bool isPtrConversion(const FunctionDecl *F);
+/// \returns true if \p F is a static singleton function.
+bool isSingleton(const FunctionDecl *F);
+
+/// An inter-procedural analysis facility that detects functions with "trivial"
+/// behavior with respect to reference counting, such as simple field getters.
+class TrivialFunctionAnalysis {
+public:
+ /// \returns true if \p D is a "trivial" function.
+ bool isTrivial(const Decl *D) const { return isTrivialImpl(D, TheCache); }
+ bool isTrivial(const Stmt *S) const { return isTrivialImpl(S, TheCache); }
+
+private:
+ friend class TrivialFunctionAnalysisVisitor;
+
+ using CacheTy =
+ llvm::DenseMap<llvm::PointerUnion<const Decl *, const Stmt *>, bool>;
+ mutable CacheTy TheCache{};
+
+ static bool isTrivialImpl(const Decl *D, CacheTy &Cache);
+ static bool isTrivialImpl(const Stmt *S, CacheTy &Cache);
+};
+
} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
index d879c110b75d..9df108e28ecd 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
@@ -6,20 +6,121 @@
//
//===----------------------------------------------------------------------===//
+#include "ASTUtils.h"
#include "DiagOutputUtils.h"
#include "PtrTypesSemantics.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
#include <optional>
using namespace clang;
using namespace ento;
namespace {
+
+class DerefFuncDeleteExprVisitor
+ : public ConstStmtVisitor<DerefFuncDeleteExprVisitor, bool> {
+ // Returns true if any of child statements return true.
+ bool VisitChildren(const Stmt *S) {
+ for (const Stmt *Child : S->children()) {
+ if (Child && Visit(Child))
+ return true;
+ }
+ return false;
+ }
+
+ bool VisitBody(const Stmt *Body) {
+ if (!Body)
+ return false;
+
+ auto [It, IsNew] = VisitedBody.insert(Body);
+ if (!IsNew) // This body is recursive
+ return false;
+
+ return Visit(Body);
+ }
+
+public:
+ DerefFuncDeleteExprVisitor(const TemplateArgumentList &ArgList,
+ const CXXRecordDecl *ClassDecl)
+ : ArgList(&ArgList), ClassDecl(ClassDecl) {}
+
+ DerefFuncDeleteExprVisitor(const CXXRecordDecl *ClassDecl)
+ : ClassDecl(ClassDecl) {}
+
+ std::optional<bool> HasSpecializedDelete(CXXMethodDecl *Decl) {
+ if (auto *Body = Decl->getBody())
+ return VisitBody(Body);
+ if (Decl->getTemplateInstantiationPattern())
+ return std::nullopt; // Indeterminate. There was no concrete instance.
+ return false;
+ }
+
+ bool VisitCallExpr(const CallExpr *CE) {
+ const Decl *D = CE->getCalleeDecl();
+ if (D && D->hasBody())
+ return VisitBody(D->getBody());
+ return false;
+ }
+
+ bool VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
+ auto *Arg = E->getArgument();
+ while (Arg) {
+ if (auto *Paren = dyn_cast<ParenExpr>(Arg))
+ Arg = Paren->getSubExpr();
+ else if (auto *Cast = dyn_cast<CastExpr>(Arg)) {
+ Arg = Cast->getSubExpr();
+ auto CastType = Cast->getType();
+ if (auto *PtrType = dyn_cast<PointerType>(CastType)) {
+ auto PointeeType = PtrType->getPointeeType();
+ while (auto *ET = dyn_cast<ElaboratedType>(PointeeType)) {
+ if (ET->isSugared())
+ PointeeType = ET->desugar();
+ }
+ if (auto *ParmType = dyn_cast<TemplateTypeParmType>(PointeeType)) {
+ if (ArgList) {
+ auto ParmIndex = ParmType->getIndex();
+ auto Type = ArgList->get(ParmIndex).getAsType();
+ if (Type->getAsCXXRecordDecl() == ClassDecl)
+ return true;
+ }
+ } else if (auto *RD = dyn_cast<RecordType>(PointeeType)) {
+ if (RD->getDecl() == ClassDecl)
+ return true;
+ } else if (auto *ST =
+ dyn_cast<SubstTemplateTypeParmType>(PointeeType)) {
+ auto Type = ST->getReplacementType();
+ if (auto *RD = dyn_cast<RecordType>(Type)) {
+ if (RD->getDecl() == ClassDecl)
+ return true;
+ }
+ }
+ }
+ } else
+ break;
+ }
+ return false;
+ }
+
+ bool VisitStmt(const Stmt *S) { return VisitChildren(S); }
+
+ // Return false since the contents of lambda isn't necessarily executed.
+ // If it is executed, VisitCallExpr above will visit its body.
+ bool VisitLambdaExpr(const LambdaExpr *) { return false; }
+
+private:
+ const TemplateArgumentList *ArgList{nullptr};
+ const CXXRecordDecl *ClassDecl;
+ llvm::DenseSet<const Stmt *> VisitedBody;
+};
+
class RefCntblBaseVirtualDtorChecker
: public Checker<check::ASTDecl<TranslationUnitDecl>> {
private:
@@ -50,60 +151,93 @@ public:
bool shouldVisitImplicitCode() const { return false; }
bool VisitCXXRecordDecl(const CXXRecordDecl *RD) {
- Checker->visitCXXRecordDecl(RD);
+ if (!RD->hasDefinition())
+ return true;
+
+ Decls.insert(RD);
+
+ for (auto &Base : RD->bases()) {
+ const auto AccSpec = Base.getAccessSpecifier();
+ if (AccSpec == AS_protected || AccSpec == AS_private ||
+ (AccSpec == AS_none && RD->isClass()))
+ continue;
+
+ QualType T = Base.getType();
+ if (T.isNull())
+ continue;
+
+ const CXXRecordDecl *C = T->getAsCXXRecordDecl();
+ if (!C)
+ continue;
+
+ if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(C)) {
+ for (auto &Arg : CTSD->getTemplateArgs().asArray()) {
+ if (Arg.getKind() != TemplateArgument::Type)
+ continue;
+ auto TemplT = Arg.getAsType();
+ if (TemplT.isNull())
+ continue;
+
+ bool IsCRTP = TemplT->getAsCXXRecordDecl() == RD;
+ if (!IsCRTP)
+ continue;
+ CRTPs.insert(C);
+ }
+ }
+ }
+
return true;
}
+
+ llvm::SetVector<const CXXRecordDecl *> Decls;
+ llvm::DenseSet<const CXXRecordDecl *> CRTPs;
};
LocalVisitor visitor(this);
visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ for (auto *RD : visitor.Decls) {
+ if (visitor.CRTPs.contains(RD))
+ continue;
+ visitCXXRecordDecl(RD);
+ }
}
void visitCXXRecordDecl(const CXXRecordDecl *RD) const {
if (shouldSkipDecl(RD))
return;
- CXXBasePaths Paths;
- Paths.setOrigin(RD);
-
- const CXXBaseSpecifier *ProblematicBaseSpecifier = nullptr;
- const CXXRecordDecl *ProblematicBaseClass = nullptr;
+ for (auto &Base : RD->bases()) {
+ const auto AccSpec = Base.getAccessSpecifier();
+ if (AccSpec == AS_protected || AccSpec == AS_private ||
+ (AccSpec == AS_none && RD->isClass()))
+ continue;
- const auto IsPublicBaseRefCntblWOVirtualDtor =
- [RD, &ProblematicBaseSpecifier,
- &ProblematicBaseClass](const CXXBaseSpecifier *Base, CXXBasePath &) {
- const auto AccSpec = Base->getAccessSpecifier();
- if (AccSpec == AS_protected || AccSpec == AS_private ||
- (AccSpec == AS_none && RD->isClass()))
- return false;
+ auto hasRefInBase = clang::hasPublicMethodInBase(&Base, "ref");
+ auto hasDerefInBase = clang::hasPublicMethodInBase(&Base, "deref");
- auto hasRefInBase = clang::hasPublicMethodInBase(Base, "ref");
- auto hasDerefInBase = clang::hasPublicMethodInBase(Base, "deref");
+ bool hasRef = hasRefInBase && *hasRefInBase != nullptr;
+ bool hasDeref = hasDerefInBase && *hasDerefInBase != nullptr;
- bool hasRef = hasRefInBase && *hasRefInBase != nullptr;
- bool hasDeref = hasDerefInBase && *hasDerefInBase != nullptr;
+ QualType T = Base.getType();
+ if (T.isNull())
+ continue;
- QualType T = Base->getType();
- if (T.isNull())
- return false;
+ const CXXRecordDecl *C = T->getAsCXXRecordDecl();
+ if (!C)
+ continue;
- const CXXRecordDecl *C = T->getAsCXXRecordDecl();
- if (!C)
- return false;
- bool AnyInconclusiveBase = false;
- const auto hasPublicRefInBase =
- [&AnyInconclusiveBase](const CXXBaseSpecifier *Base,
- CXXBasePath &) {
- auto hasRefInBase = clang::hasPublicMethodInBase(Base, "ref");
- if (!hasRefInBase) {
- AnyInconclusiveBase = true;
- return false;
- }
- return (*hasRefInBase) != nullptr;
- };
- const auto hasPublicDerefInBase = [&AnyInconclusiveBase](
- const CXXBaseSpecifier *Base,
- CXXBasePath &) {
+ bool AnyInconclusiveBase = false;
+ const auto hasPublicRefInBase =
+ [&AnyInconclusiveBase](const CXXBaseSpecifier *Base, CXXBasePath &) {
+ auto hasRefInBase = clang::hasPublicMethodInBase(Base, "ref");
+ if (!hasRefInBase) {
+ AnyInconclusiveBase = true;
+ return false;
+ }
+ return (*hasRefInBase) != nullptr;
+ };
+ const auto hasPublicDerefInBase =
+ [&AnyInconclusiveBase](const CXXBaseSpecifier *Base, CXXBasePath &) {
auto hasDerefInBase = clang::hasPublicMethodInBase(Base, "deref");
if (!hasDerefInBase) {
AnyInconclusiveBase = true;
@@ -111,28 +245,42 @@ public:
}
return (*hasDerefInBase) != nullptr;
};
- CXXBasePaths Paths;
- Paths.setOrigin(C);
- hasRef = hasRef || C->lookupInBases(hasPublicRefInBase, Paths,
+ CXXBasePaths Paths;
+ Paths.setOrigin(C);
+ hasRef = hasRef || C->lookupInBases(hasPublicRefInBase, Paths,
+ /*LookupInDependent =*/true);
+ hasDeref = hasDeref || C->lookupInBases(hasPublicDerefInBase, Paths,
/*LookupInDependent =*/true);
- hasDeref = hasDeref || C->lookupInBases(hasPublicDerefInBase, Paths,
- /*LookupInDependent =*/true);
- if (AnyInconclusiveBase || !hasRef || !hasDeref)
- return false;
-
- const auto *Dtor = C->getDestructor();
- if (!Dtor || !Dtor->isVirtual()) {
- ProblematicBaseSpecifier = Base;
- ProblematicBaseClass = C;
- return true;
- }
-
- return false;
- };
-
- if (RD->lookupInBases(IsPublicBaseRefCntblWOVirtualDtor, Paths,
- /*LookupInDependent =*/true)) {
- reportBug(RD, ProblematicBaseSpecifier, ProblematicBaseClass);
+ if (AnyInconclusiveBase || !hasRef || !hasDeref)
+ continue;
+
+ auto HasSpecializedDelete = isClassWithSpecializedDelete(C, RD);
+ if (!HasSpecializedDelete || *HasSpecializedDelete)
+ continue;
+ if (C->lookupInBases(
+ [&](const CXXBaseSpecifier *Base, CXXBasePath &) {
+ auto *T = Base->getType().getTypePtrOrNull();
+ if (!T)
+ return false;
+ auto *R = T->getAsCXXRecordDecl();
+ if (!R)
+ return false;
+ auto Result = isClassWithSpecializedDelete(R, RD);
+ if (!Result)
+ AnyInconclusiveBase = true;
+ return Result && *Result;
+ },
+ Paths, /*LookupInDependent =*/true))
+ continue;
+ if (AnyInconclusiveBase)
+ continue;
+
+ const auto *Dtor = C->getDestructor();
+ if (!Dtor || !Dtor->isVirtual()) {
+ auto *ProblematicBaseSpecifier = &Base;
+ auto *ProblematicBaseClass = C;
+ reportBug(RD, ProblematicBaseSpecifier, ProblematicBaseClass);
+ }
}
}
@@ -164,6 +312,46 @@ public:
return false;
}
+ static bool isRefCountedClass(const CXXRecordDecl *D) {
+ if (!D->getTemplateInstantiationPattern())
+ return false;
+ auto *NsDecl = D->getParent();
+ if (!NsDecl || !isa<NamespaceDecl>(NsDecl))
+ return false;
+ auto NamespaceName = safeGetName(NsDecl);
+ auto ClsNameStr = safeGetName(D);
+ StringRef ClsName = ClsNameStr; // FIXME: Make safeGetName return StringRef.
+ return NamespaceName == "WTF" &&
+ (ClsName.ends_with("RefCounted") ||
+ ClsName == "ThreadSafeRefCountedAndCanMakeThreadSafeWeakPtr");
+ }
+
+ static std::optional<bool>
+ isClassWithSpecializedDelete(const CXXRecordDecl *C,
+ const CXXRecordDecl *DerivedClass) {
+ if (auto *ClsTmplSpDecl = dyn_cast<ClassTemplateSpecializationDecl>(C)) {
+ for (auto *MethodDecl : C->methods()) {
+ if (safeGetName(MethodDecl) == "deref") {
+ DerefFuncDeleteExprVisitor Visitor(ClsTmplSpDecl->getTemplateArgs(),
+ DerivedClass);
+ auto Result = Visitor.HasSpecializedDelete(MethodDecl);
+ if (!Result || *Result)
+ return Result;
+ }
+ }
+ return false;
+ }
+ for (auto *MethodDecl : C->methods()) {
+ if (safeGetName(MethodDecl) == "deref") {
+ DerefFuncDeleteExprVisitor Visitor(DerivedClass);
+ auto Result = Visitor.HasSpecializedDelete(MethodDecl);
+ if (!Result || *Result)
+ return Result;
+ }
+ }
+ return false;
+ }
+
void reportBug(const CXXRecordDecl *DerivedClass,
const CXXBaseSpecifier *BaseSpec,
const CXXRecordDecl *ProblematicBaseClass) const {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
index 31ccae8b097b..704c082a4d1d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
@@ -32,6 +32,8 @@ class UncountedCallArgsChecker
"WebKit coding guidelines"};
mutable BugReporter *BR;
+ TrivialFunctionAnalysis TFA;
+
public:
void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
@@ -51,6 +53,13 @@ public:
bool shouldVisitTemplateInstantiations() const { return true; }
bool shouldVisitImplicitCode() const { return false; }
+ bool TraverseClassTemplateDecl(ClassTemplateDecl *Decl) {
+ if (isRefType(safeGetName(Decl)))
+ return true;
+ return RecursiveASTVisitor<LocalVisitor>::TraverseClassTemplateDecl(
+ Decl);
+ }
+
bool VisitCallExpr(const CallExpr *CE) {
Checker->visitCallExpr(CE);
return true;
@@ -70,6 +79,20 @@ public:
// or std::function call operator).
unsigned ArgIdx = isa<CXXOperatorCallExpr>(CE) && isa_and_nonnull<CXXMethodDecl>(F);
+ if (auto *MemberCallExpr = dyn_cast<CXXMemberCallExpr>(CE)) {
+ if (auto *MD = MemberCallExpr->getMethodDecl()) {
+ auto name = safeGetName(MD);
+ if (name == "ref" || name == "deref")
+ return;
+ }
+ auto *E = MemberCallExpr->getImplicitObjectArgument();
+ QualType ArgType = MemberCallExpr->getObjectType();
+ std::optional<bool> IsUncounted =
+ isUncounted(ArgType->getAsCXXRecordDecl());
+ if (IsUncounted && *IsUncounted && !isPtrOriginSafe(E))
+ reportBugOnThis(E);
+ }
+
for (auto P = F->param_begin();
// FIXME: Also check variadic function parameters.
// FIXME: Also check default function arguments. Probably a different
@@ -91,25 +114,10 @@ public:
const auto *Arg = CE->getArg(ArgIdx);
- std::pair<const clang::Expr *, bool> ArgOrigin =
- tryToFindPtrOrigin(Arg, true);
+ if (auto *defaultArg = dyn_cast<CXXDefaultArgExpr>(Arg))
+ Arg = defaultArg->getExpr();
- // Temporary ref-counted object created as part of the call argument
- // would outlive the call.
- if (ArgOrigin.second)
- continue;
-
- if (isa<CXXNullPtrLiteralExpr>(ArgOrigin.first)) {
- // foo(nullptr)
- continue;
- }
- if (isa<IntegerLiteral>(ArgOrigin.first)) {
- // FIXME: Check the value.
- // foo(NULL)
- continue;
- }
-
- if (isASafeCallArg(ArgOrigin.first))
+ if (isPtrOriginSafe(Arg))
continue;
reportBug(Arg, *P);
@@ -117,7 +125,35 @@ public:
}
}
+ bool isPtrOriginSafe(const Expr *Arg) const {
+ return tryToFindPtrOrigin(Arg, /*StopAtFirstRefCountedObj=*/true,
+ [](const clang::Expr *ArgOrigin, bool IsSafe) {
+ if (IsSafe)
+ return true;
+ if (isa<CXXNullPtrLiteralExpr>(ArgOrigin)) {
+ // foo(nullptr)
+ return true;
+ }
+ if (isa<IntegerLiteral>(ArgOrigin)) {
+ // FIXME: Check the value.
+ // foo(NULL)
+ return true;
+ }
+ if (isASafeCallArg(ArgOrigin))
+ return true;
+ return false;
+ });
+ }
+
bool shouldSkipCall(const CallExpr *CE) const {
+ const auto *Callee = CE->getDirectCallee();
+
+ if (BR->getSourceManager().isInSystemHeader(CE->getExprLoc()))
+ return true;
+
+ if (Callee && TFA.isTrivial(Callee))
+ return true;
+
if (CE->getNumArgs() == 0)
return false;
@@ -125,14 +161,26 @@ public:
// of object on LHS.
if (auto *MemberOp = dyn_cast<CXXOperatorCallExpr>(CE)) {
// Note: assignemnt to built-in type isn't derived from CallExpr.
+ if (MemberOp->getOperator() ==
+ OO_Equal) { // Ignore assignment to Ref/RefPtr.
+ auto *callee = MemberOp->getDirectCallee();
+ if (auto *calleeDecl = dyn_cast<CXXMethodDecl>(callee)) {
+ if (const CXXRecordDecl *classDecl = calleeDecl->getParent()) {
+ if (isRefCounted(classDecl))
+ return true;
+ }
+ }
+ }
if (MemberOp->isAssignmentOp())
return false;
}
- const auto *Callee = CE->getDirectCallee();
if (!Callee)
return false;
+ if (isMethodOnWTFContainerType(Callee))
+ return true;
+
auto overloadedOperatorType = Callee->getOverloadedOperator();
if (overloadedOperatorType == OO_EqualEqual ||
overloadedOperatorType == OO_ExclaimEqual ||
@@ -148,18 +196,51 @@ public:
auto name = safeGetName(Callee);
if (name == "adoptRef" || name == "getPtr" || name == "WeakPtr" ||
- name == "dynamicDowncast" || name == "downcast" || name == "bitwise_cast" ||
- name == "is" || name == "equal" || name == "hash" ||
- name == "isType"
+ name == "dynamicDowncast" || name == "downcast" ||
+ name == "checkedDowncast" || name == "uncheckedDowncast" ||
+ name == "bitwise_cast" || name == "is" || name == "equal" ||
+ name == "hash" || name == "isType" ||
// FIXME: Most/all of these should be implemented via attributes.
- || name == "equalIgnoringASCIICase" ||
+ name == "equalIgnoringASCIICase" ||
name == "equalIgnoringASCIICaseCommon" ||
- name == "equalIgnoringNullity")
+ name == "equalIgnoringNullity" || name == "toString")
return true;
return false;
}
+ bool isMethodOnWTFContainerType(const FunctionDecl *Decl) const {
+ if (!isa<CXXMethodDecl>(Decl))
+ return false;
+ auto *ClassDecl = Decl->getParent();
+ if (!ClassDecl || !isa<CXXRecordDecl>(ClassDecl))
+ return false;
+
+ auto *NsDecl = ClassDecl->getParent();
+ if (!NsDecl || !isa<NamespaceDecl>(NsDecl))
+ return false;
+
+ auto MethodName = safeGetName(Decl);
+ auto ClsNameStr = safeGetName(ClassDecl);
+ StringRef ClsName = ClsNameStr; // FIXME: Make safeGetName return StringRef.
+ auto NamespaceName = safeGetName(NsDecl);
+ // FIXME: These should be implemented via attributes.
+ return NamespaceName == "WTF" &&
+ (MethodName == "find" || MethodName == "findIf" ||
+ MethodName == "reverseFind" || MethodName == "reverseFindIf" ||
+ MethodName == "findIgnoringASCIICase" || MethodName == "get" ||
+ MethodName == "inlineGet" || MethodName == "contains" ||
+ MethodName == "containsIf" ||
+ MethodName == "containsIgnoringASCIICase" ||
+ MethodName == "startsWith" || MethodName == "endsWith" ||
+ MethodName == "startsWithIgnoringASCIICase" ||
+ MethodName == "endsWithIgnoringASCIICase" ||
+ MethodName == "substring") &&
+ (ClsName.ends_with("Vector") || ClsName.ends_with("Set") ||
+ ClsName.ends_with("Map") || ClsName == "StringImpl" ||
+ ClsName.ends_with("String"));
+ }
+
void reportBug(const Expr *CallArg, const ParmVarDecl *Param) const {
assert(CallArg);
@@ -183,6 +264,19 @@ public:
Report->addRange(CallArg->getSourceRange());
BR->emitReport(std::move(Report));
}
+
+ void reportBugOnThis(const Expr *CallArg) const {
+ assert(CallArg);
+
+ const SourceLocation SrcLocToReport = CallArg->getSourceRange().getBegin();
+
+ PathDiagnosticLocation BSLoc(SrcLocToReport, BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(
+ Bug, "Call argument for 'this' parameter is uncounted and unsafe.",
+ BSLoc);
+ Report->addRange(CallArg->getSourceRange());
+ BR->emitReport(std::move(Report));
+ }
};
} // namespace
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp
index 5a72f53b12ed..274da0baf2ce 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp
@@ -26,28 +26,6 @@ using namespace ento;
namespace {
-// for ( int a = ...) ... true
-// for ( int a : ...) ... true
-// if ( int* a = ) ... true
-// anything else ... false
-bool isDeclaredInForOrIf(const VarDecl *Var) {
- assert(Var);
- auto &ASTCtx = Var->getASTContext();
- auto parent = ASTCtx.getParents(*Var);
-
- if (parent.size() == 1) {
- if (auto *DS = parent.begin()->get<DeclStmt>()) {
- DynTypedNodeList grandParent = ASTCtx.getParents(*DS);
- if (grandParent.size() == 1) {
- return grandParent.begin()->get<ForStmt>() ||
- grandParent.begin()->get<IfStmt>() ||
- grandParent.begin()->get<CXXForRangeStmt>();
- }
- }
- }
- return false;
-}
-
// FIXME: should be defined by anotations in the future
bool isRefcountedStringsHack(const VarDecl *V) {
assert(V);
@@ -143,6 +121,11 @@ public:
// want to visit those, so we make our own RecursiveASTVisitor.
struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
const UncountedLocalVarsChecker *Checker;
+
+ TrivialFunctionAnalysis TFA;
+
+ using Base = RecursiveASTVisitor<LocalVisitor>;
+
explicit LocalVisitor(const UncountedLocalVarsChecker *Checker)
: Checker(Checker) {
assert(Checker);
@@ -152,7 +135,49 @@ public:
bool shouldVisitImplicitCode() const { return false; }
bool VisitVarDecl(VarDecl *V) {
- Checker->visitVarDecl(V);
+ auto *Init = V->getInit();
+ if (Init && V->isLocalVarDecl())
+ Checker->visitVarDecl(V, Init);
+ return true;
+ }
+
+ bool VisitBinaryOperator(const BinaryOperator *BO) {
+ if (BO->isAssignmentOp()) {
+ if (auto *VarRef = dyn_cast<DeclRefExpr>(BO->getLHS())) {
+ if (auto *V = dyn_cast<VarDecl>(VarRef->getDecl()))
+ Checker->visitVarDecl(V, BO->getRHS());
+ }
+ }
+ return true;
+ }
+
+ bool TraverseIfStmt(IfStmt *IS) {
+ if (!TFA.isTrivial(IS))
+ return Base::TraverseIfStmt(IS);
+ return true;
+ }
+
+ bool TraverseForStmt(ForStmt *FS) {
+ if (!TFA.isTrivial(FS))
+ return Base::TraverseForStmt(FS);
+ return true;
+ }
+
+ bool TraverseCXXForRangeStmt(CXXForRangeStmt *FRS) {
+ if (!TFA.isTrivial(FRS))
+ return Base::TraverseCXXForRangeStmt(FRS);
+ return true;
+ }
+
+ bool TraverseWhileStmt(WhileStmt *WS) {
+ if (!TFA.isTrivial(WS))
+ return Base::TraverseWhileStmt(WS);
+ return true;
+ }
+
+ bool TraverseCompoundStmt(CompoundStmt *CS) {
+ if (!TFA.isTrivial(CS))
+ return Base::TraverseCompoundStmt(CS);
return true;
}
};
@@ -161,7 +186,7 @@ public:
visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
}
- void visitVarDecl(const VarDecl *V) const {
+ void visitVarDecl(const VarDecl *V, const Expr *Value) const {
if (shouldSkipVarDecl(V))
return;
@@ -171,73 +196,90 @@ public:
std::optional<bool> IsUncountedPtr = isUncountedPtr(ArgType);
if (IsUncountedPtr && *IsUncountedPtr) {
- const Expr *const InitExpr = V->getInit();
- if (!InitExpr)
- return; // FIXME: later on we might warn on uninitialized vars too
-
- const clang::Expr *const InitArgOrigin =
- tryToFindPtrOrigin(InitExpr, /*StopAtFirstRefCountedObj=*/false)
- .first;
- if (!InitArgOrigin)
- return;
-
- if (isa<CXXThisExpr>(InitArgOrigin))
+ if (tryToFindPtrOrigin(
+ Value, /*StopAtFirstRefCountedObj=*/false,
+ [&](const clang::Expr *InitArgOrigin, bool IsSafe) {
+ if (!InitArgOrigin)
+ return true;
+
+ if (isa<CXXThisExpr>(InitArgOrigin))
+ return true;
+
+ if (isa<CXXNullPtrLiteralExpr>(InitArgOrigin))
+ return true;
+
+ if (isa<IntegerLiteral>(InitArgOrigin))
+ return true;
+
+ if (auto *Ref = llvm::dyn_cast<DeclRefExpr>(InitArgOrigin)) {
+ if (auto *MaybeGuardian =
+ dyn_cast_or_null<VarDecl>(Ref->getFoundDecl())) {
+ const auto *MaybeGuardianArgType =
+ MaybeGuardian->getType().getTypePtr();
+ if (MaybeGuardianArgType) {
+ const CXXRecordDecl *const MaybeGuardianArgCXXRecord =
+ MaybeGuardianArgType->getAsCXXRecordDecl();
+ if (MaybeGuardianArgCXXRecord) {
+ if (MaybeGuardian->isLocalVarDecl() &&
+ (isRefCounted(MaybeGuardianArgCXXRecord) ||
+ isRefcountedStringsHack(MaybeGuardian)) &&
+ isGuardedScopeEmbeddedInGuardianScope(
+ V, MaybeGuardian))
+ return true;
+ }
+ }
+
+ // Parameters are guaranteed to be safe for the duration of
+ // the call by another checker.
+ if (isa<ParmVarDecl>(MaybeGuardian))
+ return true;
+ }
+ }
+
+ return false;
+ }))
return;
- if (auto *Ref = llvm::dyn_cast<DeclRefExpr>(InitArgOrigin)) {
- if (auto *MaybeGuardian =
- dyn_cast_or_null<VarDecl>(Ref->getFoundDecl())) {
- const auto *MaybeGuardianArgType =
- MaybeGuardian->getType().getTypePtr();
- if (!MaybeGuardianArgType)
- return;
- const CXXRecordDecl *const MaybeGuardianArgCXXRecord =
- MaybeGuardianArgType->getAsCXXRecordDecl();
- if (!MaybeGuardianArgCXXRecord)
- return;
-
- if (MaybeGuardian->isLocalVarDecl() &&
- (isRefCounted(MaybeGuardianArgCXXRecord) ||
- isRefcountedStringsHack(MaybeGuardian)) &&
- isGuardedScopeEmbeddedInGuardianScope(V, MaybeGuardian)) {
- return;
- }
-
- // Parameters are guaranteed to be safe for the duration of the call
- // by another checker.
- if (isa<ParmVarDecl>(MaybeGuardian))
- return;
- }
- }
-
- reportBug(V);
+ reportBug(V, Value);
}
}
bool shouldSkipVarDecl(const VarDecl *V) const {
assert(V);
- if (!V->isLocalVarDecl())
- return true;
-
- if (isDeclaredInForOrIf(V))
- return true;
-
- return false;
+ return BR->getSourceManager().isInSystemHeader(V->getLocation());
}
- void reportBug(const VarDecl *V) const {
+ void reportBug(const VarDecl *V, const Expr *Value) const {
assert(V);
SmallString<100> Buf;
llvm::raw_svector_ostream Os(Buf);
- Os << "Local variable ";
- printQuotedQualifiedName(Os, V);
- Os << " is uncounted and unsafe.";
-
- PathDiagnosticLocation BSLoc(V->getLocation(), BR->getSourceManager());
- auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
- Report->addRange(V->getSourceRange());
- BR->emitReport(std::move(Report));
+ if (dyn_cast<ParmVarDecl>(V)) {
+ Os << "Assignment to an uncounted parameter ";
+ printQuotedQualifiedName(Os, V);
+ Os << " is unsafe.";
+
+ PathDiagnosticLocation BSLoc(Value->getExprLoc(), BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ Report->addRange(Value->getSourceRange());
+ BR->emitReport(std::move(Report));
+ } else {
+ if (V->hasLocalStorage())
+ Os << "Local variable ";
+ else if (V->isStaticLocal())
+ Os << "Static local variable ";
+ else if (V->hasGlobalStorage())
+ Os << "Global variable ";
+ else
+ Os << "Variable ";
+ printQuotedQualifiedName(Os, V);
+ Os << " is uncounted and unsafe.";
+
+ PathDiagnosticLocation BSLoc(V->getLocation(), BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ Report->addRange(V->getSourceRange());
+ BR->emitReport(std::move(Report));
+ }
}
};
} // namespace
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp
index e5dd907c660d..fefe846b6911 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp
@@ -48,14 +48,19 @@ private:
bool InvalidatingGetEnv = false;
// GetEnv can be treated invalidating and non-invalidating as well.
- const CallDescription GetEnvCall{{"getenv"}, 1};
+ const CallDescription GetEnvCall{CDM::CLibrary, {"getenv"}, 1};
const CallDescriptionMap<HandlerFn> EnvpInvalidatingFunctions = {
- {{{"setenv"}, 3}, &InvalidPtrChecker::EnvpInvalidatingCall},
- {{{"unsetenv"}, 1}, &InvalidPtrChecker::EnvpInvalidatingCall},
- {{{"putenv"}, 1}, &InvalidPtrChecker::EnvpInvalidatingCall},
- {{{"_putenv_s"}, 2}, &InvalidPtrChecker::EnvpInvalidatingCall},
- {{{"_wputenv_s"}, 2}, &InvalidPtrChecker::EnvpInvalidatingCall},
+ {{CDM::CLibrary, {"setenv"}, 3},
+ &InvalidPtrChecker::EnvpInvalidatingCall},
+ {{CDM::CLibrary, {"unsetenv"}, 1},
+ &InvalidPtrChecker::EnvpInvalidatingCall},
+ {{CDM::CLibrary, {"putenv"}, 1},
+ &InvalidPtrChecker::EnvpInvalidatingCall},
+ {{CDM::CLibrary, {"_putenv_s"}, 2},
+ &InvalidPtrChecker::EnvpInvalidatingCall},
+ {{CDM::CLibrary, {"_wputenv_s"}, 2},
+ &InvalidPtrChecker::EnvpInvalidatingCall},
};
void postPreviousReturnInvalidatingCall(const CallEvent &Call,
@@ -63,13 +68,13 @@ private:
// SEI CERT ENV34-C
const CallDescriptionMap<HandlerFn> PreviousCallInvalidatingFunctions = {
- {{{"setlocale"}, 2},
+ {{CDM::CLibrary, {"setlocale"}, 2},
&InvalidPtrChecker::postPreviousReturnInvalidatingCall},
- {{{"strerror"}, 1},
+ {{CDM::CLibrary, {"strerror"}, 1},
&InvalidPtrChecker::postPreviousReturnInvalidatingCall},
- {{{"localeconv"}, 0},
+ {{CDM::CLibrary, {"localeconv"}, 0},
&InvalidPtrChecker::postPreviousReturnInvalidatingCall},
- {{{"asctime"}, 1},
+ {{CDM::CLibrary, {"asctime"}, 1},
&InvalidPtrChecker::postPreviousReturnInvalidatingCall},
};
@@ -205,8 +210,12 @@ void InvalidPtrChecker::postPreviousReturnInvalidatingCall(
CE, LCtx, CE->getType(), C.blockCount());
State = State->BindExpr(CE, LCtx, RetVal);
+ const auto *SymRegOfRetVal =
+ dyn_cast_or_null<SymbolicRegion>(RetVal.getAsRegion());
+ if (!SymRegOfRetVal)
+ return;
+
// Remember to this region.
- const auto *SymRegOfRetVal = cast<SymbolicRegion>(RetVal.getAsRegion());
const MemRegion *MR = SymRegOfRetVal->getBaseRegion();
State = State->set<PreviousCallResultMap>(FD, MR);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index f3e0a5f9f314..d73dc40cf03f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -35,6 +35,7 @@
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/Z3CrosscheckVisitor.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/CheckerRegistryData.h"
@@ -86,6 +87,14 @@ STATISTIC(MaxValidBugClassSize,
"The maximum number of bug reports in the same equivalence class "
"where at least one report is valid (not suppressed)");
+STATISTIC(NumTimesReportPassesZ3, "Number of reports passed Z3");
+STATISTIC(NumTimesReportRefuted, "Number of reports refuted by Z3");
+STATISTIC(NumTimesReportEQClassAborted,
+ "Number of times a report equivalence class was aborted by the Z3 "
+ "oracle heuristic");
+STATISTIC(NumTimesReportEQClassWasExhausted,
+ "Number of times all reports of an equivalence class was refuted");
+
BugReporterVisitor::~BugReporterVisitor() = default;
void BugReporterContext::anchor() {}
@@ -138,7 +147,8 @@ public:
public:
PathDiagnosticConstruct(const PathDiagnosticConsumer *PDC,
const ExplodedNode *ErrorNode,
- const PathSensitiveBugReport *R);
+ const PathSensitiveBugReport *R,
+ const Decl *AnalysisEntryPoint);
/// \returns the location context associated with the current position in the
/// bug path.
@@ -1323,24 +1333,26 @@ void PathDiagnosticBuilder::generatePathDiagnosticsForNode(
}
static std::unique_ptr<PathDiagnostic>
-generateDiagnosticForBasicReport(const BasicBugReport *R) {
+generateDiagnosticForBasicReport(const BasicBugReport *R,
+ const Decl *AnalysisEntryPoint) {
const BugType &BT = R->getBugType();
return std::make_unique<PathDiagnostic>(
BT.getCheckerName(), R->getDeclWithIssue(), BT.getDescription(),
R->getDescription(), R->getShortDescription(/*UseFallback=*/false),
BT.getCategory(), R->getUniqueingLocation(), R->getUniqueingDecl(),
- std::make_unique<FilesToLineNumsMap>());
+ AnalysisEntryPoint, std::make_unique<FilesToLineNumsMap>());
}
static std::unique_ptr<PathDiagnostic>
generateEmptyDiagnosticForReport(const PathSensitiveBugReport *R,
- const SourceManager &SM) {
+ const SourceManager &SM,
+ const Decl *AnalysisEntryPoint) {
const BugType &BT = R->getBugType();
return std::make_unique<PathDiagnostic>(
BT.getCheckerName(), R->getDeclWithIssue(), BT.getDescription(),
R->getDescription(), R->getShortDescription(/*UseFallback=*/false),
BT.getCategory(), R->getUniqueingLocation(), R->getUniqueingDecl(),
- findExecutedLines(SM, R->getErrorNode()));
+ AnalysisEntryPoint, findExecutedLines(SM, R->getErrorNode()));
}
static const Stmt *getStmtParent(const Stmt *S, const ParentMap &PM) {
@@ -1976,10 +1988,11 @@ static void updateExecutedLinesWithDiagnosticPieces(PathDiagnostic &PD) {
PathDiagnosticConstruct::PathDiagnosticConstruct(
const PathDiagnosticConsumer *PDC, const ExplodedNode *ErrorNode,
- const PathSensitiveBugReport *R)
+ const PathSensitiveBugReport *R, const Decl *AnalysisEntryPoint)
: Consumer(PDC), CurrentNode(ErrorNode),
SM(CurrentNode->getCodeDecl().getASTContext().getSourceManager()),
- PD(generateEmptyDiagnosticForReport(R, getSourceManager())) {
+ PD(generateEmptyDiagnosticForReport(R, getSourceManager(),
+ AnalysisEntryPoint)) {
LCM[&PD->getActivePath()] = ErrorNode->getLocationContext();
}
@@ -1993,13 +2006,14 @@ PathDiagnosticBuilder::PathDiagnosticBuilder(
std::unique_ptr<PathDiagnostic>
PathDiagnosticBuilder::generate(const PathDiagnosticConsumer *PDC) const {
- PathDiagnosticConstruct Construct(PDC, ErrorNode, R);
+ const Decl *EntryPoint = getBugReporter().getAnalysisEntryPoint();
+ PathDiagnosticConstruct Construct(PDC, ErrorNode, R, EntryPoint);
const SourceManager &SM = getSourceManager();
const AnalyzerOptions &Opts = getAnalyzerOptions();
if (!PDC->shouldGenerateDiagnostics())
- return generateEmptyDiagnosticForReport(R, getSourceManager());
+ return generateEmptyDiagnosticForReport(R, getSourceManager(), EntryPoint);
// Construct the final (warning) event for the bug report.
auto EndNotes = VisitorsDiagnostics->find(ErrorNode);
@@ -2184,7 +2198,7 @@ const Decl *PathSensitiveBugReport::getDeclWithIssue() const {
void BasicBugReport::Profile(llvm::FoldingSetNodeID& hash) const {
hash.AddInteger(static_cast<int>(getKind()));
hash.AddPointer(&BT);
- hash.AddString(Description);
+ hash.AddString(getShortDescription());
assert(Location.isValid());
Location.Profile(hash);
@@ -2199,7 +2213,7 @@ void BasicBugReport::Profile(llvm::FoldingSetNodeID& hash) const {
void PathSensitiveBugReport::Profile(llvm::FoldingSetNodeID &hash) const {
hash.AddInteger(static_cast<int>(getKind()));
hash.AddPointer(&BT);
- hash.AddString(Description);
+ hash.AddString(getShortDescription());
PathDiagnosticLocation UL = getUniqueingLocation();
if (UL.isValid()) {
UL.Profile(hash);
@@ -2467,7 +2481,9 @@ ProgramStateManager &PathSensitiveBugReporter::getStateManager() const {
return Eng.getStateManager();
}
-BugReporter::BugReporter(BugReporterData &d) : D(d) {}
+BugReporter::BugReporter(BugReporterData &D)
+ : D(D), UserSuppressions(D.getASTContext()) {}
+
BugReporter::~BugReporter() {
// Make sure reports are flushed.
assert(StrBugTypes.empty() &&
@@ -2827,6 +2843,7 @@ generateVisitorsDiagnostics(PathSensitiveBugReport *R,
std::optional<PathDiagnosticBuilder> PathDiagnosticBuilder::findValidReport(
ArrayRef<PathSensitiveBugReport *> &bugReports,
PathSensitiveBugReporter &Reporter) {
+ Z3CrosscheckOracle Z3Oracle(Reporter.getAnalyzerOptions());
BugPathGetter BugGraph(&Reporter.getGraph(), bugReports);
@@ -2857,21 +2874,35 @@ std::optional<PathDiagnosticBuilder> PathDiagnosticBuilder::findValidReport(
// If crosscheck is enabled, remove all visitors, add the refutation
// visitor and check again
R->clearVisitors();
- R->addVisitor<FalsePositiveRefutationBRVisitor>();
+ Z3CrosscheckVisitor::Z3Result CrosscheckResult;
+ R->addVisitor<Z3CrosscheckVisitor>(CrosscheckResult,
+ Reporter.getAnalyzerOptions());
// We don't overwrite the notes inserted by other visitors because the
// refutation manager does not add any new note to the path
generateVisitorsDiagnostics(R, BugPath->ErrorNode, BRC);
+ switch (Z3Oracle.interpretQueryResult(CrosscheckResult)) {
+ case Z3CrosscheckOracle::RejectReport:
+ ++NumTimesReportRefuted;
+ R->markInvalid("Infeasible constraints", /*Data=*/nullptr);
+ continue;
+ case Z3CrosscheckOracle::RejectEQClass:
+ ++NumTimesReportEQClassAborted;
+ return {};
+ case Z3CrosscheckOracle::AcceptReport:
+ ++NumTimesReportPassesZ3;
+ break;
+ }
}
- // Check if the bug is still valid
- if (R->isValid())
- return PathDiagnosticBuilder(
- std::move(BRC), std::move(BugPath->BugPath), BugPath->Report,
- BugPath->ErrorNode, std::move(visitorNotes));
+ assert(R->isValid());
+ return PathDiagnosticBuilder(std::move(BRC), std::move(BugPath->BugPath),
+ BugPath->Report, BugPath->ErrorNode,
+ std::move(visitorNotes));
}
}
+ ++NumTimesReportEQClassWasExhausted;
return {};
}
@@ -3121,6 +3152,16 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
Pieces.back()->addFixit(I);
updateExecutedLinesWithDiagnosticPieces(*PD);
+
+ // If we are debugging, let's have the entry point as the first note.
+ if (getAnalyzerOptions().AnalyzerDisplayProgress ||
+ getAnalyzerOptions().AnalyzerNoteAnalysisEntryPoints) {
+ const Decl *EntryPoint = getAnalysisEntryPoint();
+ Pieces.push_front(std::make_shared<PathDiagnosticEventPiece>(
+ PathDiagnosticLocation{EntryPoint->getLocation(), getSourceManager()},
+ "[debug] analyzing from " +
+ AnalysisDeclContext::getFunctionName(EntryPoint)));
+ }
Consumer->HandlePathDiagnostic(std::move(PD));
}
}
@@ -3209,7 +3250,8 @@ BugReporter::generateDiagnosticForConsumerMap(
auto *basicReport = cast<BasicBugReport>(exampleReport);
auto Out = std::make_unique<DiagnosticForConsumerMapTy>();
for (auto *Consumer : consumers)
- (*Out)[Consumer] = generateDiagnosticForBasicReport(basicReport);
+ (*Out)[Consumer] =
+ generateDiagnosticForBasicReport(basicReport, AnalysisEntryPoint);
return Out;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 2f9965036b9e..7102bf51a57e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -59,6 +59,7 @@
#include <deque>
#include <memory>
#include <optional>
+#include <stack>
#include <string>
#include <utility>
@@ -113,6 +114,9 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
// Pointer arithmetic: '*(x + 2)' -> 'x') etc.
if (const Expr *Inner = peelOffPointerArithmetic(B)) {
E = Inner;
+ } else if (B->isAssignmentOp()) {
+ // Follow LHS of assignments: '*p = 404' -> 'p'.
+ E = B->getLHS();
} else {
// Probably more arithmetic can be pattern-matched here,
// but for now give up.
@@ -1238,7 +1242,7 @@ public:
/// changes to its value in a nested stackframe could be pruned, and
/// this visitor can prevent that without polluting the bugpath too
/// much.
- StoreSiteFinder(bugreporter::TrackerRef ParentTracker, KnownSVal V,
+ StoreSiteFinder(bugreporter::TrackerRef ParentTracker, SVal V,
const MemRegion *R, TrackingOptions Options,
const StackFrameContext *OriginSFC = nullptr)
: TrackingBugReporterVisitor(ParentTracker), R(R), V(V), Options(Options),
@@ -2539,9 +2543,9 @@ public:
Report.addVisitor<UndefOrNullArgVisitor>(L->getRegion());
Result.FoundSomethingToTrack = true;
- if (auto KV = RVal.getAs<KnownSVal>())
+ if (!RVal.isUnknown())
Result.combineWith(
- getParentTracker().track(*KV, L->getRegion(), Opts, SFC));
+ getParentTracker().track(RVal, L->getRegion(), Opts, SFC));
}
const MemRegion *RegionRVal = RVal.getAsRegion();
@@ -2663,8 +2667,8 @@ Tracker::Result Tracker::track(const Expr *E, const ExplodedNode *N,
Tracker::Result Tracker::track(SVal V, const MemRegion *R, TrackingOptions Opts,
const StackFrameContext *Origin) {
- if (auto KV = V.getAs<KnownSVal>()) {
- Report.addVisitor<StoreSiteFinder>(this, *KV, R, Opts, Origin);
+ if (!V.isUnknown()) {
+ Report.addVisitor<StoreSiteFinder>(this, V, R, Opts, Origin);
return {true};
}
return {};
@@ -2692,7 +2696,7 @@ bool bugreporter::trackExpressionValue(const ExplodedNode *InputNode,
.FoundSomethingToTrack;
}
-void bugreporter::trackStoredValue(KnownSVal V, const MemRegion *R,
+void bugreporter::trackStoredValue(SVal V, const MemRegion *R,
PathSensitiveBugReport &Report,
TrackingOptions Opts,
const StackFrameContext *Origin) {
@@ -2883,6 +2887,16 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, BugReporterContext &BRC,
// previous program state we assuming the newly seen constraint information.
// If we cannot evaluate the condition (and the constraints are the same)
// the analyzer has no information about the value and just assuming it.
+ // FIXME: This logic is not entirely correct, because e.g. in code like
+ // void f(unsigned arg) {
+ // if (arg >= 0) {
+ // // ...
+ // }
+ // }
+ // it will say that the "arg >= 0" check is _assuming_ something new because
+ // the constraint that "$arg >= 0" is 1 was added to the list of known
+ // constraints. However, the unsigned value is always >= 0 so semantically
+ // this is not a "real" assumption.
bool IsAssuming =
!BRC.getStateManager().haveEqualConstraints(CurrentState, PrevState) ||
CurrentState->getSVal(Cond, LCtx).isUnknownOrUndef();
@@ -3434,82 +3448,6 @@ UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
}
//===----------------------------------------------------------------------===//
-// Implementation of FalsePositiveRefutationBRVisitor.
-//===----------------------------------------------------------------------===//
-
-FalsePositiveRefutationBRVisitor::FalsePositiveRefutationBRVisitor()
- : Constraints(ConstraintMap::Factory().getEmptyMap()) {}
-
-void FalsePositiveRefutationBRVisitor::finalizeVisitor(
- BugReporterContext &BRC, const ExplodedNode *EndPathNode,
- PathSensitiveBugReport &BR) {
- // Collect new constraints
- addConstraints(EndPathNode, /*OverwriteConstraintsOnExistingSyms=*/true);
-
- // Create a refutation manager
- llvm::SMTSolverRef RefutationSolver = llvm::CreateZ3Solver();
- ASTContext &Ctx = BRC.getASTContext();
-
- // Add constraints to the solver
- for (const auto &I : Constraints) {
- const SymbolRef Sym = I.first;
- auto RangeIt = I.second.begin();
-
- llvm::SMTExprRef SMTConstraints = SMTConv::getRangeExpr(
- RefutationSolver, Ctx, Sym, RangeIt->From(), RangeIt->To(),
- /*InRange=*/true);
- while ((++RangeIt) != I.second.end()) {
- SMTConstraints = RefutationSolver->mkOr(
- SMTConstraints, SMTConv::getRangeExpr(RefutationSolver, Ctx, Sym,
- RangeIt->From(), RangeIt->To(),
- /*InRange=*/true));
- }
-
- RefutationSolver->addConstraint(SMTConstraints);
- }
-
- // And check for satisfiability
- std::optional<bool> IsSAT = RefutationSolver->check();
- if (!IsSAT)
- return;
-
- if (!*IsSAT)
- BR.markInvalid("Infeasible constraints", EndPathNode->getLocationContext());
-}
-
-void FalsePositiveRefutationBRVisitor::addConstraints(
- const ExplodedNode *N, bool OverwriteConstraintsOnExistingSyms) {
- // Collect new constraints
- ConstraintMap NewCs = getConstraintMap(N->getState());
- ConstraintMap::Factory &CF = N->getState()->get_context<ConstraintMap>();
-
- // Add constraints if we don't have them yet
- for (auto const &C : NewCs) {
- const SymbolRef &Sym = C.first;
- if (!Constraints.contains(Sym)) {
- // This symbol is new, just add the constraint.
- Constraints = CF.add(Constraints, Sym, C.second);
- } else if (OverwriteConstraintsOnExistingSyms) {
- // Overwrite the associated constraint of the Symbol.
- Constraints = CF.remove(Constraints, Sym);
- Constraints = CF.add(Constraints, Sym, C.second);
- }
- }
-}
-
-PathDiagnosticPieceRef FalsePositiveRefutationBRVisitor::VisitNode(
- const ExplodedNode *N, BugReporterContext &, PathSensitiveBugReport &) {
- addConstraints(N, /*OverwriteConstraintsOnExistingSyms=*/false);
- return nullptr;
-}
-
-void FalsePositiveRefutationBRVisitor::Profile(
- llvm::FoldingSetNodeID &ID) const {
- static int Tag = 0;
- ID.AddPointer(&Tag);
-}
-
-//===----------------------------------------------------------------------===//
// Implementation of TagVisitor.
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp
index b5991e47a538..84004b8e5c1c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp
@@ -82,12 +82,12 @@ public:
CacheInitializer(ToInit).TraverseDecl(const_cast<Decl *>(D));
}
- bool VisitVarDecl(VarDecl *VD) {
+ bool VisitDecl(Decl *D) {
// Bug location could be somewhere in the init value of
// a freshly declared variable. Even though it looks like the
// user applied attribute to a statement, it will apply to a
// variable declaration, and this is where we check for it.
- return VisitAttributedNode(VD);
+ return VisitAttributedNode(D);
}
bool VisitAttributedStmt(AttributedStmt *AS) {
@@ -138,9 +138,31 @@ bool BugSuppression::isSuppressed(const BugReport &R) {
bool BugSuppression::isSuppressed(const PathDiagnosticLocation &Location,
const Decl *DeclWithIssue,
DiagnosticIdentifierList Hashtags) {
- if (!Location.isValid() || DeclWithIssue == nullptr)
+ if (!Location.isValid())
return false;
+ if (!DeclWithIssue) {
+ // FIXME: This defeats the purpose of passing DeclWithIssue to begin with.
+ // If this branch is ever hit, we're re-doing all the work we've already
+ // done as well as perform a lot of work we'll never need.
+ // Gladly, none of our on-by-default checkers currently need it.
+ DeclWithIssue = ACtx.getTranslationUnitDecl();
+ } else {
+ // This is the fast path. However, we should still consider the topmost
+ // declaration that isn't TranslationUnitDecl, because we should respect
+ // attributes on the entire declaration chain.
+ while (true) {
+ // Use the "lexical" parent. Eg., if the attribute is on a class, suppress
+ // warnings in inline methods but not in out-of-line methods.
+ const Decl *Parent =
+ dyn_cast_or_null<Decl>(DeclWithIssue->getLexicalDeclContext());
+ if (Parent == nullptr || isa<TranslationUnitDecl>(Parent))
+ break;
+
+ DeclWithIssue = Parent;
+ }
+ }
+
// While some warnings are attached to AST nodes (mostly path-sensitive
// checks), others are simply associated with a plain source location
// or range. Figuring out the node based on locations can be tricky,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp
index 94b2fde0a6f3..cd23b381f879 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp
@@ -35,25 +35,19 @@ static MaybeCount readRequiredParams(MaybeCount RequiredArgs,
return std::nullopt;
}
-ento::CallDescription::CallDescription(CallDescriptionFlags Flags,
+ento::CallDescription::CallDescription(Mode MatchAs,
ArrayRef<StringRef> QualifiedName,
MaybeCount RequiredArgs /*= None*/,
MaybeCount RequiredParams /*= None*/)
: RequiredArgs(RequiredArgs),
RequiredParams(readRequiredParams(RequiredArgs, RequiredParams)),
- Flags(Flags) {
+ MatchAs(MatchAs) {
assert(!QualifiedName.empty());
this->QualifiedName.reserve(QualifiedName.size());
llvm::transform(QualifiedName, std::back_inserter(this->QualifiedName),
[](StringRef From) { return From.str(); });
}
-/// Construct a CallDescription with default flags.
-ento::CallDescription::CallDescription(ArrayRef<StringRef> QualifiedName,
- MaybeCount RequiredArgs /*= None*/,
- MaybeCount RequiredParams /*= None*/)
- : CallDescription(CDF_None, QualifiedName, RequiredArgs, RequiredParams) {}
-
bool ento::CallDescription::matches(const CallEvent &Call) const {
// FIXME: Add ObjC Message support.
if (Call.getKind() == CE_ObjCMessage)
@@ -74,83 +68,89 @@ bool ento::CallDescription::matchesAsWritten(const CallExpr &CE) const {
return matchesImpl(FD, CE.getNumArgs(), FD->param_size());
}
-bool ento::CallDescription::matchesImpl(const FunctionDecl *Callee,
- size_t ArgCount,
- size_t ParamCount) const {
- const auto *FD = Callee;
- if (!FD)
- return false;
+bool ento::CallDescription::matchNameOnly(const NamedDecl *ND) const {
+ DeclarationName Name = ND->getDeclName();
+ if (const auto *NameII = Name.getAsIdentifierInfo()) {
+ if (!II)
+ II = &ND->getASTContext().Idents.get(getFunctionName());
- if (Flags & CDF_MaybeBuiltin) {
- return CheckerContext::isCLibraryFunction(FD, getFunctionName()) &&
- (!RequiredArgs || *RequiredArgs <= ArgCount) &&
- (!RequiredParams || *RequiredParams <= ParamCount);
+ return NameII == *II; // Fast case.
}
- if (!II) {
- II = &FD->getASTContext().Idents.get(getFunctionName());
- }
+ // Fallback to the slow stringification and comparison for:
+ // C++ overloaded operators, constructors, destructors, etc.
+ // FIXME This comparison is way SLOWER than comparing pointers.
+ // At some point in the future, we should compare FunctionDecl pointers.
+ return Name.getAsString() == getFunctionName();
+}
- const auto MatchNameOnly = [](const CallDescription &CD,
- const NamedDecl *ND) -> bool {
- DeclarationName Name = ND->getDeclName();
- if (const auto *II = Name.getAsIdentifierInfo())
- return II == *CD.II; // Fast case.
-
- // Fallback to the slow stringification and comparison for:
- // C++ overloaded operators, constructors, destructors, etc.
- // FIXME This comparison is way SLOWER than comparing pointers.
- // At some point in the future, we should compare FunctionDecl pointers.
- return Name.getAsString() == CD.getFunctionName();
+bool ento::CallDescription::matchQualifiedNameParts(const Decl *D) const {
+ const auto FindNextNamespaceOrRecord =
+ [](const DeclContext *Ctx) -> const DeclContext * {
+ while (Ctx && !isa<NamespaceDecl, RecordDecl>(Ctx))
+ Ctx = Ctx->getParent();
+ return Ctx;
};
- const auto ExactMatchArgAndParamCounts =
- [](size_t ArgCount, size_t ParamCount,
- const CallDescription &CD) -> bool {
- const bool ArgsMatch = !CD.RequiredArgs || *CD.RequiredArgs == ArgCount;
- const bool ParamsMatch =
- !CD.RequiredParams || *CD.RequiredParams == ParamCount;
- return ArgsMatch && ParamsMatch;
- };
+ auto QualifierPartsIt = begin_qualified_name_parts();
+ const auto QualifierPartsEndIt = end_qualified_name_parts();
+
+ // Match namespace and record names. Skip unrelated names if they don't
+ // match.
+ const DeclContext *Ctx = FindNextNamespaceOrRecord(D->getDeclContext());
+ for (; Ctx && QualifierPartsIt != QualifierPartsEndIt;
+ Ctx = FindNextNamespaceOrRecord(Ctx->getParent())) {
+ // If not matched just continue and try matching for the next one.
+ if (cast<NamedDecl>(Ctx)->getName() != *QualifierPartsIt)
+ continue;
+ ++QualifierPartsIt;
+ }
- const auto MatchQualifiedNameParts = [](const CallDescription &CD,
- const Decl *D) -> bool {
- const auto FindNextNamespaceOrRecord =
- [](const DeclContext *Ctx) -> const DeclContext * {
- while (Ctx && !isa<NamespaceDecl, RecordDecl>(Ctx))
- Ctx = Ctx->getParent();
- return Ctx;
- };
-
- auto QualifierPartsIt = CD.begin_qualified_name_parts();
- const auto QualifierPartsEndIt = CD.end_qualified_name_parts();
-
- // Match namespace and record names. Skip unrelated names if they don't
- // match.
- const DeclContext *Ctx = FindNextNamespaceOrRecord(D->getDeclContext());
- for (; Ctx && QualifierPartsIt != QualifierPartsEndIt;
- Ctx = FindNextNamespaceOrRecord(Ctx->getParent())) {
- // If not matched just continue and try matching for the next one.
- if (cast<NamedDecl>(Ctx)->getName() != *QualifierPartsIt)
- continue;
- ++QualifierPartsIt;
- }
+ // We matched if we consumed all expected qualifier segments.
+ return QualifierPartsIt == QualifierPartsEndIt;
+}
- // We matched if we consumed all expected qualifier segments.
- return QualifierPartsIt == QualifierPartsEndIt;
- };
+bool ento::CallDescription::matchesImpl(const FunctionDecl *FD, size_t ArgCount,
+ size_t ParamCount) const {
+ if (!FD)
+ return false;
+
+ const bool isMethod = isa<CXXMethodDecl>(FD);
+
+ if (MatchAs == Mode::SimpleFunc && isMethod)
+ return false;
- // Let's start matching...
- if (!ExactMatchArgAndParamCounts(ArgCount, ParamCount, *this))
+ if (MatchAs == Mode::CXXMethod && !isMethod)
return false;
- if (!MatchNameOnly(*this, FD))
+ if (MatchAs == Mode::CLibraryMaybeHardened) {
+ // In addition to accepting FOO() with CLibrary rules, we also want to
+ // accept calls to __FOO_chk() and __builtin___FOO_chk().
+ if (CheckerContext::isCLibraryFunction(FD) &&
+ CheckerContext::isHardenedVariantOf(FD, getFunctionName())) {
+ // Check that the actual argument/parameter counts are greater or equal
+ // to the required counts. (Setting a requirement to std::nullopt matches
+ // anything, so in that case value_or ensures that the value is compared
+ // with itself.)
+ return (RequiredArgs.value_or(ArgCount) <= ArgCount &&
+ RequiredParams.value_or(ParamCount) <= ParamCount);
+ }
+ }
+
+ if (RequiredArgs.value_or(ArgCount) != ArgCount ||
+ RequiredParams.value_or(ParamCount) != ParamCount)
+ return false;
+
+ if (MatchAs == Mode::CLibrary || MatchAs == Mode::CLibraryMaybeHardened)
+ return CheckerContext::isCLibraryFunction(FD, getFunctionName());
+
+ if (!matchNameOnly(FD))
return false;
if (!hasQualifiedNameParts())
return true;
- return MatchQualifiedNameParts(*this, FD);
+ return matchQualifiedNameParts(FD);
}
ento::CallDescriptionSet::CallDescriptionSet(
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index bc14aea27f67..0e317ec765ec 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -1408,9 +1408,12 @@ CallEventManager::getSimpleCall(const CallExpr *CE, ProgramStateRef State,
if (const auto *OpCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
const FunctionDecl *DirectCallee = OpCE->getDirectCallee();
- if (const auto *MD = dyn_cast<CXXMethodDecl>(DirectCallee))
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(DirectCallee)) {
if (MD->isImplicitObjectMemberFunction())
return create<CXXMemberOperatorCall>(OpCE, State, LCtx, ElemRef);
+ if (MD->isStatic())
+ return create<CXXStaticOperatorCall>(OpCE, State, LCtx, ElemRef);
+ }
} else if (CE->getCallee()->getType()->isBlockPointerType()) {
return create<BlockCall>(CE, State, LCtx, ElemRef);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
index d6d4cec9dd3d..96464b30c078 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -87,9 +87,11 @@ bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
if (!II)
return false;
- // Look through 'extern "C"' and anything similar invented in the future.
- // If this function is not in TU directly, it is not a C library function.
- if (!FD->getDeclContext()->getRedeclContext()->isTranslationUnit())
+ // C library functions are either declared directly within a TU (the common
+ // case) or they are accessed through the namespace `std` (when they are used
+ // in C++ via headers like <cstdlib>).
+ const DeclContext *DC = FD->getDeclContext()->getRedeclContext();
+ if (!(DC->isTranslationUnit() || DC->isStdNamespace()))
return false;
// If this function is not externally visible, it is not a C library function.
@@ -102,19 +104,30 @@ bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
return true;
StringRef FName = II->getName();
- if (FName.equals(Name))
+ if (FName == Name)
return true;
if (FName.starts_with("__inline") && FName.contains(Name))
return true;
- if (FName.starts_with("__") && FName.ends_with("_chk") &&
- FName.contains(Name))
- return true;
-
return false;
}
+bool CheckerContext::isHardenedVariantOf(const FunctionDecl *FD,
+ StringRef Name) {
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ return false;
+
+ auto CompletelyMatchesParts = [II](auto... Parts) -> bool {
+ StringRef FName = II->getName();
+ return (FName.consume_front(Parts) && ...) && FName.empty();
+ };
+
+ return CompletelyMatchesParts("__", Name, "_chk") ||
+ CompletelyMatchesParts("__builtin_", "__", Name, "_chk");
+}
+
StringRef CheckerContext::getMacroNameOrSpelling(SourceLocation &Loc) {
if (Loc.isMacroID())
return Lexer::getImmediateMacroName(Loc, getSourceManager(),
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
index 84ad20a54807..d7137a915b3d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include <optional>
namespace clang {
@@ -182,5 +183,12 @@ OperatorKind operationKindFromOverloadedOperator(OverloadedOperatorKind OOK,
}
}
+std::optional<SVal> getPointeeVal(SVal PtrSVal, ProgramStateRef State) {
+ if (const auto *Ptr = PtrSVal.getAsRegion()) {
+ return State->getSVal(Ptr);
+ }
+ return std::nullopt;
+}
+
} // namespace ento
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
index d3499e7a917d..8605fa149e4f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -222,18 +222,6 @@ void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
}
}
-bool CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
- unsigned Steps,
- ProgramStateRef InitState,
- ExplodedNodeSet &Dst) {
- bool DidNotFinish = ExecuteWorkList(L, Steps, InitState);
- for (ExplodedGraph::eop_iterator I = G.eop_begin(), E = G.eop_end(); I != E;
- ++I) {
- Dst.Add(*I);
- }
- return DidNotFinish;
-}
-
void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
const CFGBlock *Blk = L.getDst();
NodeBuilderContext BuilderCtx(*this, Blk, Pred);
@@ -637,8 +625,8 @@ ExplodedNode* NodeBuilder::generateNodeImpl(const ProgramPoint &Loc,
bool MarkAsSink) {
HasGeneratedNodes = true;
bool IsNew;
- ExplodedNode *N = C.Eng.G.getNode(Loc, State, MarkAsSink, &IsNew);
- N->addPredecessor(FromN, C.Eng.G);
+ ExplodedNode *N = C.getEngine().G.getNode(Loc, State, MarkAsSink, &IsNew);
+ N->addPredecessor(FromN, C.getEngine().G);
Frontier.erase(FromN);
if (!IsNew)
@@ -667,7 +655,7 @@ ExplodedNode *BranchNodeBuilder::generateNode(ProgramStateRef State,
if (!isFeasible(branch))
return nullptr;
- ProgramPoint Loc = BlockEdge(C.Block, branch ? DstT:DstF,
+ ProgramPoint Loc = BlockEdge(C.getBlock(), branch ? DstT : DstF,
NodePred->getLocationContext());
ExplodedNode *Succ = generateNodeImpl(Loc, State, NodePred);
return Succ;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 24e91a22fd68..c11468a08ae5 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1737,6 +1737,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::RecoveryExprClass:
case Stmt::CXXNoexceptExprClass:
case Stmt::PackExpansionExprClass:
+ case Stmt::PackIndexingExprClass:
case Stmt::SubstNonTypeTemplateParmPackExprClass:
case Stmt::FunctionParmPackExprClass:
case Stmt::CoroutineBodyStmtClass:
@@ -1810,7 +1811,9 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
+ case Stmt::OMPReverseDirectiveClass:
case Stmt::OMPTileDirectiveClass:
+ case Stmt::OMPInterchangeDirectiveClass:
case Stmt::OMPInteropDirectiveClass:
case Stmt::OMPDispatchDirectiveClass:
case Stmt::OMPMaskedDirectiveClass:
@@ -1820,6 +1823,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPParallelGenericLoopDirectiveClass:
case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
case Stmt::CapturedStmtClass:
+ case Stmt::OpenACCComputeConstructClass:
+ case Stmt::OpenACCLoopConstructClass:
case Stmt::OMPUnrollDirectiveClass:
case Stmt::OMPMetaDirectiveClass: {
const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
@@ -1923,6 +1928,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXRewrittenBinaryOperatorClass:
case Stmt::RequiresExprClass:
case Expr::CXXParenListInitExprClass:
+ case Stmt::EmbedExprClass:
// Fall through.
// Cases we intentionally don't evaluate, since they don't need
@@ -1946,7 +1952,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXPseudoDestructorExprClass:
case Stmt::SubstNonTypeTemplateParmExprClass:
case Stmt::CXXNullPtrLiteralExprClass:
- case Stmt::OMPArraySectionExprClass:
+ case Stmt::ArraySectionExprClass:
case Stmt::OMPArrayShapingExprClass:
case Stmt::OMPIteratorExprClass:
case Stmt::SYCLUniqueStableNameExprClass:
@@ -2054,11 +2060,17 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
llvm_unreachable("Support for MatrixSubscriptExpr is not implemented.");
break;
- case Stmt::GCCAsmStmtClass:
+ case Stmt::GCCAsmStmtClass: {
Bldr.takeNodes(Pred);
- VisitGCCAsmStmt(cast<GCCAsmStmt>(S), Pred, Dst);
+ ExplodedNodeSet PreVisit;
+ getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
+ ExplodedNodeSet PostVisit;
+ for (ExplodedNode *const N : PreVisit)
+ VisitGCCAsmStmt(cast<GCCAsmStmt>(S), N, PostVisit);
+ getCheckerManager().runCheckersForPostStmt(Dst, PostVisit, S, *this);
Bldr.addNodes(Dst);
break;
+ }
case Stmt::MSAsmStmtClass:
Bldr.takeNodes(Pred);
@@ -3893,7 +3905,7 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
State->printDOT(Out, N->getLocationContext(), Space);
Out << "\\l}\\l";
- return Out.str();
+ return Buf;
}
};
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 7e431f7e598c..7a900780384a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -330,7 +330,8 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_ConstructorConversion:
case CK_UserDefinedConversion:
case CK_FunctionToPointerDecay:
- case CK_BuiltinFnToFnPtr: {
+ case CK_BuiltinFnToFnPtr:
+ case CK_HLSLArrayRValue: {
// Copy the SVal of Ex to CastE.
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
@@ -520,7 +521,8 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
// Various C++ casts that are not handled yet.
case CK_ToUnion:
case CK_MatrixCast:
- case CK_VectorSplat: {
+ case CK_VectorSplat:
+ case CK_HLSLVectorTruncation: {
QualType resultType = CastE->getType();
if (CastE->isGLValue())
resultType = getContext().getPointerType(resultType);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 504fd7f05e0f..c50db1e0e2f8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -229,7 +229,7 @@ SVal ExprEngine::computeObjectUnderConstruction(
// We are on the top frame of the analysis. We do not know where is the
// object returned to. Conjure a symbolic region for the return value.
// TODO: We probably need a new MemRegion kind to represent the storage
- // of that SymbolicRegion, so that we cound produce a fancy symbol
+ // of that SymbolicRegion, so that we could produce a fancy symbol
// instead of an anonymous conjured symbol.
// TODO: Do we need to track the region to avoid having it dead
// too early? It does die too early, at least in C++17, but because
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 4755b6bfa6dc..9d3e4fc944fb 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -846,6 +846,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
const StackFrameContext *CallerSFC = CurLC->getStackFrame();
switch (Call.getKind()) {
case CE_Function:
+ case CE_CXXStaticOperator:
case CE_Block:
break;
case CE_CXXMember:
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index 86947b7929e9..fb5030d373c2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -69,6 +69,8 @@ class HTMLDiagnostics : public PathDiagnosticConsumer {
const Preprocessor &PP;
const bool SupportsCrossFileDiagnostics;
llvm::StringSet<> EmittedHashes;
+ html::RelexRewriteCacheRef RewriterCache =
+ html::instantiateRelexRewriteCache();
public:
HTMLDiagnostics(PathDiagnosticConsumerOptions DiagOpts,
@@ -309,10 +311,6 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
return;
}
- // FIXME: This causes each file to be re-parsed and syntax-highlighted
- // and macro-expanded separately for each report. We could cache such rewrites
- // across all reports and only re-do the part that's actually different:
- // the warning/note bubbles.
std::string report = GenerateHTML(D, R, SMgr, path, declName.c_str());
if (report.empty()) {
llvm::errs() << "warning: no diagnostics generated for main file.\n";
@@ -882,8 +880,8 @@ void HTMLDiagnostics::RewriteFile(Rewriter &R, const PathPieces &path,
// If we have a preprocessor, relex the file and syntax highlight.
// We might not have a preprocessor if we come from a deserialized AST file,
// for example.
- html::SyntaxHighlight(R, FID, PP);
- html::HighlightMacros(R, FID, PP);
+ html::SyntaxHighlight(R, FID, PP, RewriterCache);
+ html::HighlightMacros(R, FID, PP, RewriterCache);
}
void HTMLDiagnostics::HandlePiece(Rewriter &R, FileID BugFileID,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
index a80352816be6..7042f1aeb803 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
@@ -190,6 +190,17 @@ static bool isCapturedByReference(ExplodedNode *N, const DeclRefExpr *DR) {
return FD->getType()->isReferenceType();
}
+static bool isFoundInStmt(const Stmt *S, const VarDecl *VD) {
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ for (const Decl *D : DS->decls()) {
+ // Once we reach the declaration of the VD we can return.
+ if (D->getCanonicalDecl() == VD)
+ return true;
+ }
+ }
+ return false;
+}
+
// A loop counter is considered escaped if:
// case 1: It is a global variable.
// case 2: It is a reference parameter or a reference capture.
@@ -219,13 +230,19 @@ static bool isPossiblyEscaped(ExplodedNode *N, const DeclRefExpr *DR) {
continue;
}
- if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
- for (const Decl *D : DS->decls()) {
- // Once we reach the declaration of the VD we can return.
- if (D->getCanonicalDecl() == VD)
- return false;
+ if (isFoundInStmt(S, VD)) {
+ return false;
+ }
+
+ if (const auto *SS = dyn_cast<SwitchStmt>(S)) {
+ if (const auto *CST = dyn_cast<CompoundStmt>(SS->getBody())) {
+ for (const Stmt *CB : CST->body()) {
+ if (isFoundInStmt(CB, VD))
+ return false;
+ }
}
}
+
// Check the usage of the pass-by-ref function calls and adress-of operator
// on VD and reference initialized by VD.
ASTContext &ASTCtx =
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index 16db6b249dc9..693791c3aee8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -630,6 +630,17 @@ bool MemRegion::canPrintPrettyAsExpr() const {
return false;
}
+StringRef MemRegion::getKindStr() const {
+ switch (getKind()) {
+#define REGION(Id, Parent) \
+ case Id##Kind: \
+ return #Id;
+#include "clang/StaticAnalyzer/Core/PathSensitive/Regions.def"
+#undef REGION
+ }
+ llvm_unreachable("Unkown kind!");
+}
+
void MemRegion::printPretty(raw_ostream &os) const {
assert(canPrintPretty() && "This region cannot be printed pretty.");
os << "'";
@@ -720,13 +731,21 @@ std::string MemRegion::getDescriptiveName(bool UseQuotes) const {
CI->getValue().toString(Idx);
ArrayIndices = (llvm::Twine("[") + Idx.str() + "]" + ArrayIndices).str();
}
- // If not a ConcreteInt, try to obtain the variable
- // name by calling 'getDescriptiveName' recursively.
+ // Index is symbolic, but may have a descriptive name.
else {
- std::string Idx = ER->getDescriptiveName(false);
- if (!Idx.empty()) {
- ArrayIndices = (llvm::Twine("[") + Idx + "]" + ArrayIndices).str();
- }
+ auto SI = ER->getIndex().getAs<nonloc::SymbolVal>();
+ if (!SI)
+ return "";
+
+ const MemRegion *OR = SI->getAsSymbol()->getOriginRegion();
+ if (!OR)
+ return "";
+
+ std::string Idx = OR->getDescriptiveName(false);
+ if (Idx.empty())
+ return "";
+
+ ArrayIndices = (llvm::Twine("[") + Idx + "]" + ArrayIndices).str();
}
R = ER->getSuperRegion();
}
@@ -817,7 +836,7 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
};
auto IsArrayOfZero = [](const ArrayType *AT) {
const auto *CAT = dyn_cast<ConstantArrayType>(AT);
- return CAT && CAT->getSize() == 0;
+ return CAT && CAT->isZeroSize();
};
auto IsArrayOfOne = [](const ArrayType *AT) {
const auto *CAT = dyn_cast<ConstantArrayType>(AT);
@@ -1147,10 +1166,10 @@ MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr *CL,
return getSubRegion<CompoundLiteralRegion>(CL, sReg);
}
-const ElementRegion*
+const ElementRegion *
MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx,
- const SubRegion* superRegion,
- ASTContext &Ctx){
+ const SubRegion *superRegion,
+ const ASTContext &Ctx) {
QualType T = Ctx.getCanonicalType(elementType).getUnqualifiedType();
llvm::FoldingSetNodeID ID;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index f12f1a5ac970..f82cd944750a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -226,6 +226,20 @@ ProgramStateRef ProgramState::killBinding(Loc LV) const {
return makeWithStore(newStore);
}
+/// SymbolicRegions are expected to be wrapped by an ElementRegion as a
+/// canonical representation. As a canonical representation, SymbolicRegions
+/// should be wrapped by ElementRegions before getting a FieldRegion.
+/// See f8643a9b31c4029942f67d4534c9139b45173504 why.
+SVal ProgramState::wrapSymbolicRegion(SVal Val) const {
+ const auto *BaseReg = dyn_cast_or_null<SymbolicRegion>(Val.getAsRegion());
+ if (!BaseReg)
+ return Val;
+
+ StoreManager &SM = getStateManager().getStoreManager();
+ QualType ElemTy = BaseReg->getPointeeStaticType();
+ return loc::MemRegionVal{SM.GetElementZeroRegion(BaseReg, ElemTy)};
+}
+
ProgramStateRef
ProgramState::enterStackFrame(const CallEvent &Call,
const StackFrameContext *CalleeCtx) const {
@@ -451,6 +465,24 @@ void ProgramState::setStore(const StoreRef &newStore) {
store = newStoreStore;
}
+SVal ProgramState::getLValue(const FieldDecl *D, SVal Base) const {
+ Base = wrapSymbolicRegion(Base);
+ return getStateManager().StoreMgr->getLValueField(D, Base);
+}
+
+SVal ProgramState::getLValue(const IndirectFieldDecl *D, SVal Base) const {
+ StoreManager &SM = *getStateManager().StoreMgr;
+ Base = wrapSymbolicRegion(Base);
+
+ // FIXME: This should work with `SM.getLValueField(D->getAnonField(), Base)`,
+ // but that would break some tests. There is probably a bug somewhere that it
+ // would expose.
+ for (const auto *I : D->chain()) {
+ Base = SM.getLValueField(cast<FieldDecl>(I), Base);
+ }
+ return Base;
+}
+
//===----------------------------------------------------------------------===//
// State pretty-printing.
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 25d066c4652f..fab8e35962d7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -2333,7 +2333,8 @@ inline ProgramStateRef EquivalenceClass::merge(RangeSet::Factory &F,
//
// The moment we introduce symbolic casts, this restriction can be
// lifted.
- if (getType() != Other.getType())
+ if (getType()->getCanonicalTypeUnqualified() !=
+ Other.getType()->getCanonicalTypeUnqualified())
return State;
SymbolSet Members = getClassMembers(State);
@@ -3038,7 +3039,7 @@ ProgramStateRef RangeConstraintManager::setRange(ProgramStateRef State,
//===------------------------------------------------------------------------===
// assumeSymX methods: protected interface for RangeConstraintManager.
-//===------------------------------------------------------------------------===/
+//===------------------------------------------------------------------------===
// The syntax for ranges below is mathematical, using [x, y] for closed ranges
// and (x, y) for open ranges. These ranges are modular, corresponding with
@@ -3270,6 +3271,10 @@ void RangeConstraintManager::printJson(raw_ostream &Out, ProgramStateRef State,
void RangeConstraintManager::printValue(raw_ostream &Out, ProgramStateRef State,
SymbolRef Sym) {
const RangeSet RS = getRange(State, Sym);
+ if (RS.isEmpty()) {
+ Out << "<empty rangeset>";
+ return;
+ }
Out << RS.getBitWidth() << (RS.isUnsigned() ? "u:" : "s:");
RS.dump(Out);
}
@@ -3278,7 +3283,7 @@ static std::string toString(const SymbolRef &Sym) {
std::string S;
llvm::raw_string_ostream O(S);
Sym->dumpToStream(O);
- return O.str();
+ return S;
}
void RangeConstraintManager::printConstraints(raw_ostream &Out,
@@ -3349,7 +3354,7 @@ static std::string toString(ProgramStateRef State, EquivalenceClass Class) {
Out << "\"" << ClassMember << "\"";
}
Out << " ]";
- return Out.str();
+ return Str;
}
void RangeConstraintManager::printEquivalenceClasses(raw_ostream &Out,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index da9a1a1a4d1f..ba29c1231390 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -1166,7 +1166,7 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
// Compute lower and upper offsets for region within array.
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
- NumElements = CAT->getSize().getZExtValue();
+ NumElements = CAT->getZExtSize();
if (!NumElements) // We are not dealing with a constant size array
goto conjure_default;
QualType ElementTy = AT->getElementType();
@@ -1613,7 +1613,7 @@ getConstantArrayExtents(const ConstantArrayType *CAT) {
CAT = cast<ConstantArrayType>(CAT->getCanonicalTypeInternal());
SmallVector<uint64_t, 2> Extents;
do {
- Extents.push_back(CAT->getSize().getZExtValue());
+ Extents.push_back(CAT->getZExtSize());
} while ((CAT = dyn_cast<ConstantArrayType>(CAT->getElementType())));
return Extents;
}
@@ -2358,11 +2358,12 @@ StoreRef RegionStoreManager::killBinding(Store ST, Loc L) {
RegionBindingsRef
RegionStoreManager::bind(RegionBindingsConstRef B, Loc L, SVal V) {
- if (L.getAs<loc::ConcreteInt>())
+ // We only care about region locations.
+ auto MemRegVal = L.getAs<loc::MemRegionVal>();
+ if (!MemRegVal)
return B;
- // If we get here, the location should be a region.
- const MemRegion *R = L.castAs<loc::MemRegionVal>().getRegion();
+ const MemRegion *R = MemRegVal->getRegion();
// Check if the region is a struct region.
if (const TypedValueRegion* TR = dyn_cast<TypedValueRegion>(R)) {
@@ -2436,7 +2437,7 @@ std::optional<RegionBindingsRef> RegionStoreManager::tryBindSmallArray(
return std::nullopt;
// If the array is too big, create a LCV instead.
- uint64_t ArrSize = CAT->getSize().getLimitedValue();
+ uint64_t ArrSize = CAT->getLimitedSize();
if (ArrSize > SmallArrayLimit)
return std::nullopt;
@@ -2465,7 +2466,7 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
std::optional<uint64_t> Size;
if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(AT))
- Size = CAT->getSize().getZExtValue();
+ Size = CAT->getZExtSize();
// Check if the init expr is a literal. If so, bind the rvalue instead.
// FIXME: It's not responsibility of the Store to transform this lvalue
@@ -2570,7 +2571,7 @@ std::optional<RegionBindingsRef> RegionStoreManager::tryBindSmallStruct(
return std::nullopt;
for (const auto *FD : RD->fields()) {
- if (FD->isUnnamedBitfield())
+ if (FD->isUnnamedBitField())
continue;
// If there are too many fields, or if any of the fields are aggregates,
@@ -2697,7 +2698,7 @@ RegionBindingsRef RegionStoreManager::bindStruct(RegionBindingsConstRef B,
break;
// Skip any unnamed bitfields to stay in sync with the initializers.
- if (FI->isUnnamedBitfield())
+ if (FI->isUnnamedBitField())
continue;
QualType FTy = FI->getType();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
index 0e1351215bb4..291e4fa752a8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -271,7 +271,7 @@ void SVal::printJson(raw_ostream &Out, bool AddQuotes) const {
dumpToStream(TempOut);
- Out << JsonFormat(TempOut.str(), AddQuotes);
+ Out << JsonFormat(Buf, AddQuotes);
}
void SVal::dumpToStream(raw_ostream &os) const {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
index 67ca61bb56ba..b436dd746d21 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -472,7 +472,17 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
const auto *ElemR = dyn_cast<ElementRegion>(BaseRegion);
// Convert the offset to the appropriate size and signedness.
- Offset = svalBuilder.convertToArrayIndex(Offset).castAs<NonLoc>();
+ auto Off = svalBuilder.convertToArrayIndex(Offset).getAs<NonLoc>();
+ if (!Off) {
+ // Handle cases when LazyCompoundVal is used for an array index.
+ // Such case is possible if code does:
+ // char b[4];
+ // a[__builtin_bitcast(int, b)];
+ // Return UnknownVal, since we cannot model it.
+ return UnknownVal();
+ }
+
+ Offset = Off.value();
if (!ElemR) {
// If the base region is not an ElementRegion, create one.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Z3CrosscheckVisitor.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Z3CrosscheckVisitor.cpp
new file mode 100644
index 000000000000..739db951b3e1
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Z3CrosscheckVisitor.cpp
@@ -0,0 +1,160 @@
+//===- Z3CrosscheckVisitor.cpp - Crosscheck reports with Z3 -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the visitor and utilities around it for Z3 report
+// refutation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/BugReporter/Z3CrosscheckVisitor.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/SMTAPI.h"
+#include "llvm/Support/Timer.h"
+
+#define DEBUG_TYPE "Z3CrosscheckOracle"
+
+STATISTIC(NumZ3QueriesDone, "Number of Z3 queries done");
+STATISTIC(NumTimesZ3TimedOut, "Number of times Z3 query timed out");
+STATISTIC(NumTimesZ3ExhaustedRLimit,
+ "Number of times Z3 query exhausted the rlimit");
+STATISTIC(NumTimesZ3SpendsTooMuchTimeOnASingleEQClass,
+ "Number of times report equivalenece class was cut because it spent "
+ "too much time in Z3");
+
+STATISTIC(NumTimesZ3QueryAcceptsReport,
+ "Number of Z3 queries accepting a report");
+STATISTIC(NumTimesZ3QueryRejectReport,
+ "Number of Z3 queries rejecting a report");
+STATISTIC(NumTimesZ3QueryRejectEQClass,
+ "Number of times rejecting an report equivalenece class");
+
+using namespace clang;
+using namespace ento;
+
+Z3CrosscheckVisitor::Z3CrosscheckVisitor(Z3CrosscheckVisitor::Z3Result &Result,
+ const AnalyzerOptions &Opts)
+ : Constraints(ConstraintMap::Factory().getEmptyMap()), Result(Result),
+ Opts(Opts) {}
+
+void Z3CrosscheckVisitor::finalizeVisitor(BugReporterContext &BRC,
+ const ExplodedNode *EndPathNode,
+ PathSensitiveBugReport &BR) {
+ // Collect new constraints
+ addConstraints(EndPathNode, /*OverwriteConstraintsOnExistingSyms=*/true);
+
+ // Create a refutation manager
+ llvm::SMTSolverRef RefutationSolver = llvm::CreateZ3Solver();
+ if (Opts.Z3CrosscheckRLimitThreshold)
+ RefutationSolver->setUnsignedParam("rlimit",
+ Opts.Z3CrosscheckRLimitThreshold);
+ if (Opts.Z3CrosscheckTimeoutThreshold)
+ RefutationSolver->setUnsignedParam("timeout",
+ Opts.Z3CrosscheckTimeoutThreshold); // ms
+
+ ASTContext &Ctx = BRC.getASTContext();
+
+ // Add constraints to the solver
+ for (const auto &[Sym, Range] : Constraints) {
+ auto RangeIt = Range.begin();
+
+ llvm::SMTExprRef SMTConstraints = SMTConv::getRangeExpr(
+ RefutationSolver, Ctx, Sym, RangeIt->From(), RangeIt->To(),
+ /*InRange=*/true);
+ while ((++RangeIt) != Range.end()) {
+ SMTConstraints = RefutationSolver->mkOr(
+ SMTConstraints, SMTConv::getRangeExpr(RefutationSolver, Ctx, Sym,
+ RangeIt->From(), RangeIt->To(),
+ /*InRange=*/true));
+ }
+ RefutationSolver->addConstraint(SMTConstraints);
+ }
+
+ // And check for satisfiability
+ llvm::TimeRecord Start = llvm::TimeRecord::getCurrentTime(/*Start=*/true);
+ std::optional<bool> IsSAT = RefutationSolver->check();
+ llvm::TimeRecord Diff = llvm::TimeRecord::getCurrentTime(/*Start=*/false);
+ Diff -= Start;
+ Result = Z3Result{
+ IsSAT,
+ static_cast<unsigned>(Diff.getWallTime() * 1000),
+ RefutationSolver->getStatistics()->getUnsigned("rlimit count"),
+ };
+}
+
+void Z3CrosscheckVisitor::addConstraints(
+ const ExplodedNode *N, bool OverwriteConstraintsOnExistingSyms) {
+ // Collect new constraints
+ ConstraintMap NewCs = getConstraintMap(N->getState());
+ ConstraintMap::Factory &CF = N->getState()->get_context<ConstraintMap>();
+
+ // Add constraints if we don't have them yet
+ for (auto const &[Sym, Range] : NewCs) {
+ if (!Constraints.contains(Sym)) {
+ // This symbol is new, just add the constraint.
+ Constraints = CF.add(Constraints, Sym, Range);
+ } else if (OverwriteConstraintsOnExistingSyms) {
+ // Overwrite the associated constraint of the Symbol.
+ Constraints = CF.remove(Constraints, Sym);
+ Constraints = CF.add(Constraints, Sym, Range);
+ }
+ }
+}
+
+PathDiagnosticPieceRef
+Z3CrosscheckVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &,
+ PathSensitiveBugReport &) {
+ addConstraints(N, /*OverwriteConstraintsOnExistingSyms=*/false);
+ return nullptr;
+}
+
+void Z3CrosscheckVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
+ static int Tag = 0;
+ ID.AddPointer(&Tag);
+}
+
+Z3CrosscheckOracle::Z3Decision Z3CrosscheckOracle::interpretQueryResult(
+ const Z3CrosscheckVisitor::Z3Result &Query) {
+ ++NumZ3QueriesDone;
+ AccumulatedZ3QueryTimeInEqClass += Query.Z3QueryTimeMilliseconds;
+
+ if (Query.IsSAT && Query.IsSAT.value()) {
+ ++NumTimesZ3QueryAcceptsReport;
+ return AcceptReport;
+ }
+
+ // Suggest cutting the EQClass if certain heuristics trigger.
+ if (Opts.Z3CrosscheckTimeoutThreshold &&
+ Query.Z3QueryTimeMilliseconds >= Opts.Z3CrosscheckTimeoutThreshold) {
+ ++NumTimesZ3TimedOut;
+ ++NumTimesZ3QueryRejectEQClass;
+ return RejectEQClass;
+ }
+
+ if (Opts.Z3CrosscheckRLimitThreshold &&
+ Query.UsedRLimit >= Opts.Z3CrosscheckRLimitThreshold) {
+ ++NumTimesZ3ExhaustedRLimit;
+ ++NumTimesZ3QueryRejectEQClass;
+ return RejectEQClass;
+ }
+
+ if (Opts.Z3CrosscheckEQClassTimeoutThreshold &&
+ AccumulatedZ3QueryTimeInEqClass >
+ Opts.Z3CrosscheckEQClassTimeoutThreshold) {
+ ++NumTimesZ3SpendsTooMuchTimeOnASingleEQClass;
+ ++NumTimesZ3QueryRejectEQClass;
+ return RejectEQClass;
+ }
+
+ // If no cutoff heuristics trigger, and the report is "unsat" or "undef",
+ // then reject the report.
+ ++NumTimesZ3QueryRejectReport;
+ return RejectReport;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index b6ef40595e3c..03bc40804d73 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -527,7 +527,8 @@ static void reportAnalyzerFunctionMisuse(const AnalyzerOptions &Opts,
void AnalysisConsumer::runAnalysisOnTranslationUnit(ASTContext &C) {
BugReporter BR(*Mgr);
- TranslationUnitDecl *TU = C.getTranslationUnitDecl();
+ const TranslationUnitDecl *TU = C.getTranslationUnitDecl();
+ BR.setAnalysisEntryPoint(TU);
if (SyntaxCheckTimer)
SyntaxCheckTimer->startTimer();
checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
@@ -675,6 +676,7 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
DisplayFunction(D, Mode, IMode);
BugReporter BR(*Mgr);
+ BR.setAnalysisEntryPoint(D);
if (Mode & AM_Syntax) {
llvm::TimeRecord CheckerStartTime;
diff --git a/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 2de977a3dc72..e3718130ca06 100644
--- a/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -978,8 +978,7 @@ RVVIntrinsic::RVVIntrinsic(
bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme,
bool SupportOverloading, bool HasBuiltinAlias, StringRef ManualCodegen,
const RVVTypes &OutInTypes, const std::vector<int64_t> &NewIntrinsicTypes,
- const std::vector<StringRef> &RequiredFeatures, unsigned NF,
- Policy NewPolicyAttrs, bool HasFRMRoundModeOp)
+ unsigned NF, Policy NewPolicyAttrs, bool HasFRMRoundModeOp)
: IRName(IRName), IsMasked(IsMasked),
HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme),
SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias),
@@ -1150,11 +1149,6 @@ void RVVIntrinsic::updateNamesAndPolicy(
OverloadedName += suffix;
};
- // This follows the naming guideline under riscv-c-api-doc to add the
- // `__riscv_` suffix for all RVV intrinsics.
- Name = "__riscv_" + Name;
- OverloadedName = "__riscv_" + OverloadedName;
-
if (HasFRMRoundModeOp) {
Name += "_rm";
BuiltinName += "_rm";
diff --git a/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp b/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
index 0da087c33e3f..3abc689b93e8 100644
--- a/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
@@ -37,6 +37,9 @@ std::vector<std::string> getCommandLineArgsForTesting(TestLanguage Lang) {
case Lang_CXX20:
Args = {"-std=c++20", "-frtti"};
break;
+ case Lang_CXX23:
+ Args = {"-std=c++23", "-frtti"};
+ break;
case Lang_OBJC:
Args = {"-x", "objective-c", "-frtti", "-fobjc-nonfragile-abi"};
break;
@@ -73,6 +76,9 @@ std::vector<std::string> getCC1ArgsForTesting(TestLanguage Lang) {
case Lang_CXX20:
Args = {"-std=c++20"};
break;
+ case Lang_CXX23:
+ Args = {"-std=c++23"};
+ break;
case Lang_OBJC:
Args = {"-xobjective-c"};
break;
@@ -96,6 +102,7 @@ StringRef getFilenameForTesting(TestLanguage Lang) {
case Lang_CXX14:
case Lang_CXX17:
case Lang_CXX20:
+ case Lang_CXX23:
return "input.cc";
case Lang_OpenCL:
diff --git a/contrib/llvm-project/clang/lib/Testing/TestAST.cpp b/contrib/llvm-project/clang/lib/Testing/TestAST.cpp
index 3a50c2d9b5d0..fe8b93851613 100644
--- a/contrib/llvm-project/clang/lib/Testing/TestAST.cpp
+++ b/contrib/llvm-project/clang/lib/Testing/TestAST.cpp
@@ -13,6 +13,7 @@
#include "clang/Frontend/TextDiagnostic.h"
#include "clang/Testing/CommandLineArgs.h"
#include "llvm/ADT/ScopeExit.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "gtest/gtest.h"
@@ -106,6 +107,8 @@ TestAST::TestAST(const TestInputs &In) {
// Set up a VFS with only the virtual file visible.
auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
+ if (auto Err = VFS->setCurrentWorkingDirectory(In.WorkingDir))
+ ADD_FAILURE() << "Failed to setWD: " << Err.message();
VFS->addFile(Filename, /*ModificationTime=*/0,
llvm::MemoryBuffer::getMemBufferCopy(In.Code, Filename));
for (const auto &Extra : In.ExtraFiles)
diff --git a/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp b/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp
index f327d0139941..9cad8680447b 100644
--- a/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp
@@ -115,7 +115,7 @@ llvm::Error AllTUsToolExecutor::execute(
auto &Action = Actions.front();
{
- llvm::ThreadPool Pool(llvm::hardware_concurrency(ThreadCount));
+ llvm::DefaultThreadPool Pool(llvm::hardware_concurrency(ThreadCount));
for (std::string File : Files) {
Pool.async(
[&](std::string Path) {
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index 6f71650a3982..0cab17a34244 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -41,24 +41,25 @@ DependencyScanningWorkerFilesystem::readFile(StringRef Filename) {
return TentativeEntry(Stat, std::move(Buffer));
}
-EntryRef DependencyScanningWorkerFilesystem::scanForDirectivesIfNecessary(
- const CachedFileSystemEntry &Entry, StringRef Filename, bool Disable) {
- if (Entry.isError() || Entry.isDirectory() || Disable ||
- !shouldScanForDirectives(Filename))
- return EntryRef(Filename, Entry);
+bool DependencyScanningWorkerFilesystem::ensureDirectiveTokensArePopulated(
+ EntryRef Ref) {
+ auto &Entry = Ref.Entry;
+
+ if (Entry.isError() || Entry.isDirectory())
+ return false;
CachedFileContents *Contents = Entry.getCachedContents();
assert(Contents && "contents not initialized");
// Double-checked locking.
if (Contents->DepDirectives.load())
- return EntryRef(Filename, Entry);
+ return true;
std::lock_guard<std::mutex> GuardLock(Contents->ValueLock);
// Double-checked locking.
if (Contents->DepDirectives.load())
- return EntryRef(Filename, Entry);
+ return true;
SmallVector<dependency_directives_scan::Directive, 64> Directives;
// Scan the file for preprocessor directives that might affect the
@@ -69,16 +70,16 @@ EntryRef DependencyScanningWorkerFilesystem::scanForDirectivesIfNecessary(
Contents->DepDirectiveTokens.clear();
// FIXME: Propagate the diagnostic if desired by the client.
Contents->DepDirectives.store(new std::optional<DependencyDirectivesTy>());
- return EntryRef(Filename, Entry);
+ return false;
}
// This function performed double-checked locking using `DepDirectives`.
// Assigning it must be the last thing this function does, otherwise other
- // threads may skip the
- // critical section (`DepDirectives != nullptr`), leading to a data race.
+ // threads may skip the critical section (`DepDirectives != nullptr`), leading
+ // to a data race.
Contents->DepDirectives.store(
new std::optional<DependencyDirectivesTy>(std::move(Directives)));
- return EntryRef(Filename, Entry);
+ return true;
}
DependencyScanningFilesystemSharedCache::
@@ -112,8 +113,8 @@ DependencyScanningFilesystemSharedCache::CacheShard::findEntryByFilename(
StringRef Filename) const {
assert(llvm::sys::path::is_absolute_gnu(Filename));
std::lock_guard<std::mutex> LockGuard(CacheLock);
- auto It = EntriesByFilename.find(Filename);
- return It == EntriesByFilename.end() ? nullptr : It->getValue();
+ auto It = CacheByFilename.find(Filename);
+ return It == CacheByFilename.end() ? nullptr : It->getValue().first;
}
const CachedFileSystemEntry *
@@ -129,11 +130,16 @@ DependencyScanningFilesystemSharedCache::CacheShard::
getOrEmplaceEntryForFilename(StringRef Filename,
llvm::ErrorOr<llvm::vfs::Status> Stat) {
std::lock_guard<std::mutex> LockGuard(CacheLock);
- auto Insertion = EntriesByFilename.insert({Filename, nullptr});
- if (Insertion.second)
- Insertion.first->second =
+ auto [It, Inserted] = CacheByFilename.insert({Filename, {nullptr, nullptr}});
+ auto &[CachedEntry, CachedRealPath] = It->getValue();
+ if (!CachedEntry) {
+ // The entry is not present in the shared cache. Either the cache doesn't
+ // know about the file at all, or it only knows about its real path.
+ assert((Inserted || CachedRealPath) && "existing file with empty pair");
+ CachedEntry =
new (EntryStorage.Allocate()) CachedFileSystemEntry(std::move(Stat));
- return *Insertion.first->second;
+ }
+ return *CachedEntry;
}
const CachedFileSystemEntry &
@@ -141,16 +147,17 @@ DependencyScanningFilesystemSharedCache::CacheShard::getOrEmplaceEntryForUID(
llvm::sys::fs::UniqueID UID, llvm::vfs::Status Stat,
std::unique_ptr<llvm::MemoryBuffer> Contents) {
std::lock_guard<std::mutex> LockGuard(CacheLock);
- auto Insertion = EntriesByUID.insert({UID, nullptr});
- if (Insertion.second) {
+ auto [It, Inserted] = EntriesByUID.insert({UID, nullptr});
+ auto &CachedEntry = It->getSecond();
+ if (Inserted) {
CachedFileContents *StoredContents = nullptr;
if (Contents)
StoredContents = new (ContentsStorage.Allocate())
CachedFileContents(std::move(Contents));
- Insertion.first->second = new (EntryStorage.Allocate())
+ CachedEntry = new (EntryStorage.Allocate())
CachedFileSystemEntry(std::move(Stat), StoredContents);
}
- return *Insertion.first->second;
+ return *CachedEntry;
}
const CachedFileSystemEntry &
@@ -158,52 +165,59 @@ DependencyScanningFilesystemSharedCache::CacheShard::
getOrInsertEntryForFilename(StringRef Filename,
const CachedFileSystemEntry &Entry) {
std::lock_guard<std::mutex> LockGuard(CacheLock);
- return *EntriesByFilename.insert({Filename, &Entry}).first->getValue();
+ auto [It, Inserted] = CacheByFilename.insert({Filename, {&Entry, nullptr}});
+ auto &[CachedEntry, CachedRealPath] = It->getValue();
+ if (!Inserted || !CachedEntry)
+ CachedEntry = &Entry;
+ return *CachedEntry;
}
-/// Whitelist file extensions that should be minimized, treating no extension as
-/// a source file that should be minimized.
-///
-/// This is kinda hacky, it would be better if we knew what kind of file Clang
-/// was expecting instead.
-static bool shouldScanForDirectivesBasedOnExtension(StringRef Filename) {
- StringRef Ext = llvm::sys::path::extension(Filename);
- if (Ext.empty())
- return true; // C++ standard library
- return llvm::StringSwitch<bool>(Ext)
- .CasesLower(".c", ".cc", ".cpp", ".c++", ".cxx", true)
- .CasesLower(".h", ".hh", ".hpp", ".h++", ".hxx", true)
- .CasesLower(".m", ".mm", true)
- .CasesLower(".i", ".ii", ".mi", ".mmi", true)
- .CasesLower(".def", ".inc", true)
- .Default(false);
+const CachedRealPath *
+DependencyScanningFilesystemSharedCache::CacheShard::findRealPathByFilename(
+ StringRef Filename) const {
+ assert(llvm::sys::path::is_absolute_gnu(Filename));
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+ auto It = CacheByFilename.find(Filename);
+ return It == CacheByFilename.end() ? nullptr : It->getValue().second;
+}
+
+const CachedRealPath &DependencyScanningFilesystemSharedCache::CacheShard::
+ getOrEmplaceRealPathForFilename(StringRef Filename,
+ llvm::ErrorOr<llvm::StringRef> RealPath) {
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+
+ const CachedRealPath *&StoredRealPath = CacheByFilename[Filename].second;
+ if (!StoredRealPath) {
+ auto OwnedRealPath = [&]() -> CachedRealPath {
+ if (!RealPath)
+ return RealPath.getError();
+ return RealPath->str();
+ }();
+
+ StoredRealPath = new (RealPathStorage.Allocate())
+ CachedRealPath(std::move(OwnedRealPath));
+ }
+
+ return *StoredRealPath;
}
static bool shouldCacheStatFailures(StringRef Filename) {
StringRef Ext = llvm::sys::path::extension(Filename);
if (Ext.empty())
return false; // This may be the module cache directory.
- // Only cache stat failures on files that are not expected to change during
- // the build.
- StringRef FName = llvm::sys::path::filename(Filename);
- if (FName == "module.modulemap" || FName == "module.map")
- return true;
- return shouldScanForDirectivesBasedOnExtension(Filename);
+ return true;
}
DependencyScanningWorkerFilesystem::DependencyScanningWorkerFilesystem(
DependencyScanningFilesystemSharedCache &SharedCache,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS)
- : ProxyFileSystem(std::move(FS)), SharedCache(SharedCache),
+ : llvm::RTTIExtends<DependencyScanningWorkerFilesystem,
+ llvm::vfs::ProxyFileSystem>(std::move(FS)),
+ SharedCache(SharedCache),
WorkingDirForCacheLookup(llvm::errc::invalid_argument) {
updateWorkingDirForCacheLookup();
}
-bool DependencyScanningWorkerFilesystem::shouldScanForDirectives(
- StringRef Filename) {
- return shouldScanForDirectivesBasedOnExtension(Filename);
-}
-
const CachedFileSystemEntry &
DependencyScanningWorkerFilesystem::getOrEmplaceSharedEntryForUID(
TentativeEntry TEntry) {
@@ -257,32 +271,19 @@ DependencyScanningWorkerFilesystem::computeAndStoreResult(
llvm::ErrorOr<EntryRef>
DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
- StringRef OriginalFilename, bool DisableDirectivesScanning) {
- StringRef FilenameForLookup;
+ StringRef OriginalFilename) {
SmallString<256> PathBuf;
- if (llvm::sys::path::is_absolute_gnu(OriginalFilename)) {
- FilenameForLookup = OriginalFilename;
- } else if (!WorkingDirForCacheLookup) {
- return WorkingDirForCacheLookup.getError();
- } else {
- StringRef RelFilename = OriginalFilename;
- RelFilename.consume_front("./");
- PathBuf = *WorkingDirForCacheLookup;
- llvm::sys::path::append(PathBuf, RelFilename);
- FilenameForLookup = PathBuf.str();
- }
- assert(llvm::sys::path::is_absolute_gnu(FilenameForLookup));
+ auto FilenameForLookup = tryGetFilenameForLookup(OriginalFilename, PathBuf);
+ if (!FilenameForLookup)
+ return FilenameForLookup.getError();
+
if (const auto *Entry =
- findEntryByFilenameWithWriteThrough(FilenameForLookup))
- return scanForDirectivesIfNecessary(*Entry, OriginalFilename,
- DisableDirectivesScanning)
- .unwrapError();
- auto MaybeEntry = computeAndStoreResult(OriginalFilename, FilenameForLookup);
+ findEntryByFilenameWithWriteThrough(*FilenameForLookup))
+ return EntryRef(OriginalFilename, *Entry).unwrapError();
+ auto MaybeEntry = computeAndStoreResult(OriginalFilename, *FilenameForLookup);
if (!MaybeEntry)
return MaybeEntry.getError();
- return scanForDirectivesIfNecessary(*MaybeEntry, OriginalFilename,
- DisableDirectivesScanning)
- .unwrapError();
+ return EntryRef(OriginalFilename, *MaybeEntry).unwrapError();
}
llvm::ErrorOr<llvm::vfs::Status>
@@ -299,6 +300,17 @@ DependencyScanningWorkerFilesystem::status(const Twine &Path) {
return Result->getStatus();
}
+bool DependencyScanningWorkerFilesystem::exists(const Twine &Path) {
+ // While some VFS overlay filesystems may implement more-efficient
+ // mechanisms for `exists` queries, `DependencyScanningWorkerFilesystem`
+ // typically wraps `RealFileSystem` which does not specialize `exists`,
+ // so it is not likely to benefit from such optimizations. Instead,
+ // it is more-valuable to have this query go through the
+ // cached-`status` code-path of the `DependencyScanningWorkerFilesystem`.
+ llvm::ErrorOr<llvm::vfs::Status> Status = status(Path);
+ return Status && Status->exists();
+}
+
namespace {
/// The VFS that is used by clang consumes the \c CachedFileSystemEntry using
@@ -359,6 +371,54 @@ DependencyScanningWorkerFilesystem::openFileForRead(const Twine &Path) {
return DepScanFile::create(Result.get());
}
+std::error_code
+DependencyScanningWorkerFilesystem::getRealPath(const Twine &Path,
+ SmallVectorImpl<char> &Output) {
+ SmallString<256> OwnedFilename;
+ StringRef OriginalFilename = Path.toStringRef(OwnedFilename);
+
+ SmallString<256> PathBuf;
+ auto FilenameForLookup = tryGetFilenameForLookup(OriginalFilename, PathBuf);
+ if (!FilenameForLookup)
+ return FilenameForLookup.getError();
+
+ auto HandleCachedRealPath =
+ [&Output](const CachedRealPath &RealPath) -> std::error_code {
+ if (!RealPath)
+ return RealPath.getError();
+ Output.assign(RealPath->begin(), RealPath->end());
+ return {};
+ };
+
+ // If we already have the result in local cache, no work required.
+ if (const auto *RealPath =
+ LocalCache.findRealPathByFilename(*FilenameForLookup))
+ return HandleCachedRealPath(*RealPath);
+
+ // If we have the result in the shared cache, cache it locally.
+ auto &Shard = SharedCache.getShardForFilename(*FilenameForLookup);
+ if (const auto *ShardRealPath =
+ Shard.findRealPathByFilename(*FilenameForLookup)) {
+ const auto &RealPath = LocalCache.insertRealPathForFilename(
+ *FilenameForLookup, *ShardRealPath);
+ return HandleCachedRealPath(RealPath);
+ }
+
+ // If we don't know the real path, compute it...
+ std::error_code EC = getUnderlyingFS().getRealPath(OriginalFilename, Output);
+ llvm::ErrorOr<llvm::StringRef> ComputedRealPath = EC;
+ if (!EC)
+ ComputedRealPath = StringRef{Output.data(), Output.size()};
+
+ // ...and try to write it into the shared cache. In case some other thread won
+ // this race and already wrote its own result there, just adopt it. Write
+ // whatever is in the shared cache into the local one.
+ const auto &RealPath = Shard.getOrEmplaceRealPathForFilename(
+ *FilenameForLookup, ComputedRealPath);
+ return HandleCachedRealPath(
+ LocalCache.insertRealPathForFilename(*FilenameForLookup, RealPath));
+}
+
std::error_code DependencyScanningWorkerFilesystem::setCurrentWorkingDirectory(
const Twine &Path) {
std::error_code EC = ProxyFileSystem::setCurrentWorkingDirectory(Path);
@@ -379,3 +439,25 @@ void DependencyScanningWorkerFilesystem::updateWorkingDirForCacheLookup() {
assert(!WorkingDirForCacheLookup ||
llvm::sys::path::is_absolute_gnu(*WorkingDirForCacheLookup));
}
+
+llvm::ErrorOr<StringRef>
+DependencyScanningWorkerFilesystem::tryGetFilenameForLookup(
+ StringRef OriginalFilename, llvm::SmallVectorImpl<char> &PathBuf) const {
+ StringRef FilenameForLookup;
+ if (llvm::sys::path::is_absolute_gnu(OriginalFilename)) {
+ FilenameForLookup = OriginalFilename;
+ } else if (!WorkingDirForCacheLookup) {
+ return WorkingDirForCacheLookup.getError();
+ } else {
+ StringRef RelFilename = OriginalFilename;
+ RelFilename.consume_front("./");
+ PathBuf.assign(WorkingDirForCacheLookup->begin(),
+ WorkingDirForCacheLookup->end());
+ llvm::sys::path::append(PathBuf, RelFilename);
+ FilenameForLookup = StringRef{PathBuf.begin(), PathBuf.size()};
+ }
+ assert(llvm::sys::path::is_absolute_gnu(FilenameForLookup));
+ return FilenameForLookup;
+}
+
+const char DependencyScanningWorkerFilesystem::ID = 0;
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index 7ab4a699af6d..0f82f22d8b9a 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -9,6 +9,7 @@
#include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h"
#include "clang/Basic/DiagnosticDriver.h"
#include "clang/Basic/DiagnosticFrontend.h"
+#include "clang/Basic/DiagnosticSerialization.h"
#include "clang/CodeGen/ObjectFilePCHContainerOperations.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
@@ -23,6 +24,7 @@
#include "clang/Tooling/DependencyScanning/DependencyScanningService.h"
#include "clang/Tooling/DependencyScanning/ModuleDepCollector.h"
#include "clang/Tooling/Tooling.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
#include "llvm/TargetParser/Host.h"
@@ -59,6 +61,30 @@ private:
DependencyConsumer &C;
};
+static bool checkHeaderSearchPaths(const HeaderSearchOptions &HSOpts,
+ const HeaderSearchOptions &ExistingHSOpts,
+ DiagnosticsEngine *Diags,
+ const LangOptions &LangOpts) {
+ if (LangOpts.Modules) {
+ if (HSOpts.VFSOverlayFiles != ExistingHSOpts.VFSOverlayFiles) {
+ if (Diags) {
+ Diags->Report(diag::warn_pch_vfsoverlay_mismatch);
+ auto VFSNote = [&](int Type, ArrayRef<std::string> VFSOverlays) {
+ if (VFSOverlays.empty()) {
+ Diags->Report(diag::note_pch_vfsoverlay_empty) << Type;
+ } else {
+ std::string Files = llvm::join(VFSOverlays, "\n");
+ Diags->Report(diag::note_pch_vfsoverlay_files) << Type << Files;
+ }
+ };
+ VFSNote(0, HSOpts.VFSOverlayFiles);
+ VFSNote(1, ExistingHSOpts.VFSOverlayFiles);
+ }
+ }
+ }
+ return false;
+}
+
using PrebuiltModuleFilesT = decltype(HeaderSearchOptions::PrebuiltModuleFiles);
/// A listener that collects the imported modules and optionally the input
@@ -66,9 +92,14 @@ using PrebuiltModuleFilesT = decltype(HeaderSearchOptions::PrebuiltModuleFiles);
class PrebuiltModuleListener : public ASTReaderListener {
public:
PrebuiltModuleListener(PrebuiltModuleFilesT &PrebuiltModuleFiles,
- llvm::SmallVector<std::string> &NewModuleFiles)
+ llvm::SmallVector<std::string> &NewModuleFiles,
+ PrebuiltModuleVFSMapT &PrebuiltModuleVFSMap,
+ const HeaderSearchOptions &HSOpts,
+ const LangOptions &LangOpts, DiagnosticsEngine &Diags)
: PrebuiltModuleFiles(PrebuiltModuleFiles),
- NewModuleFiles(NewModuleFiles) {}
+ NewModuleFiles(NewModuleFiles),
+ PrebuiltModuleVFSMap(PrebuiltModuleVFSMap), ExistingHSOpts(HSOpts),
+ ExistingLangOpts(LangOpts), Diags(Diags) {}
bool needsImportVisitation() const override { return true; }
@@ -77,26 +108,62 @@ public:
NewModuleFiles.push_back(Filename.str());
}
+ void visitModuleFile(StringRef Filename,
+ serialization::ModuleKind Kind) override {
+ CurrentFile = Filename;
+ }
+
+ bool ReadHeaderSearchPaths(const HeaderSearchOptions &HSOpts,
+ bool Complain) override {
+ std::vector<std::string> VFSOverlayFiles = HSOpts.VFSOverlayFiles;
+ PrebuiltModuleVFSMap.insert(
+ {CurrentFile, llvm::StringSet<>(VFSOverlayFiles)});
+ return checkHeaderSearchPaths(
+ HSOpts, ExistingHSOpts, Complain ? &Diags : nullptr, ExistingLangOpts);
+ }
+
private:
PrebuiltModuleFilesT &PrebuiltModuleFiles;
llvm::SmallVector<std::string> &NewModuleFiles;
+ PrebuiltModuleVFSMapT &PrebuiltModuleVFSMap;
+ const HeaderSearchOptions &ExistingHSOpts;
+ const LangOptions &ExistingLangOpts;
+ DiagnosticsEngine &Diags;
+ std::string CurrentFile;
};
/// Visit the given prebuilt module and collect all of the modules it
/// transitively imports and contributing input files.
-static void visitPrebuiltModule(StringRef PrebuiltModuleFilename,
+static bool visitPrebuiltModule(StringRef PrebuiltModuleFilename,
CompilerInstance &CI,
- PrebuiltModuleFilesT &ModuleFiles) {
+ PrebuiltModuleFilesT &ModuleFiles,
+ PrebuiltModuleVFSMapT &PrebuiltModuleVFSMap,
+ DiagnosticsEngine &Diags) {
// List of module files to be processed.
- llvm::SmallVector<std::string> Worklist{PrebuiltModuleFilename.str()};
- PrebuiltModuleListener Listener(ModuleFiles, Worklist);
-
- while (!Worklist.empty())
- ASTReader::readASTFileControlBlock(
- Worklist.pop_back_val(), CI.getFileManager(), CI.getModuleCache(),
- CI.getPCHContainerReader(),
- /*FindModuleFileExtensions=*/false, Listener,
- /*ValidateDiagnosticOptions=*/false);
+ llvm::SmallVector<std::string> Worklist;
+ PrebuiltModuleListener Listener(ModuleFiles, Worklist, PrebuiltModuleVFSMap,
+ CI.getHeaderSearchOpts(), CI.getLangOpts(),
+ Diags);
+
+ Listener.visitModuleFile(PrebuiltModuleFilename,
+ serialization::MK_ExplicitModule);
+ if (ASTReader::readASTFileControlBlock(
+ PrebuiltModuleFilename, CI.getFileManager(), CI.getModuleCache(),
+ CI.getPCHContainerReader(),
+ /*FindModuleFileExtensions=*/false, Listener,
+ /*ValidateDiagnosticOptions=*/false, ASTReader::ARR_OutOfDate))
+ return true;
+
+ while (!Worklist.empty()) {
+ Listener.visitModuleFile(Worklist.back(), serialization::MK_ExplicitModule);
+ if (ASTReader::readASTFileControlBlock(
+ Worklist.pop_back_val(), CI.getFileManager(), CI.getModuleCache(),
+ CI.getPCHContainerReader(),
+ /*FindModuleFileExtensions=*/false, Listener,
+ /*ValidateDiagnosticOptions=*/false))
+ return true;
+ }
+ return false;
}
/// Transform arbitrary file name into an object-like file name.
@@ -125,8 +192,89 @@ static void sanitizeDiagOpts(DiagnosticOptions &DiagOpts) {
DiagOpts.ShowCarets = false;
// Don't write out diagnostic file.
DiagOpts.DiagnosticSerializationFile.clear();
- // Don't emit warnings as errors (and all other warnings too).
- DiagOpts.IgnoreWarnings = true;
+ // Don't emit warnings except for scanning specific warnings.
+ // TODO: It would be useful to add a more principled way to ignore all
+ // warnings that come from source code. The issue is that we need to
+ // ignore warnings that could be surpressed by
+ // `#pragma clang diagnostic`, while still allowing some scanning
+ // warnings for things we're not ready to turn into errors yet.
+ // See `test/ClangScanDeps/diagnostic-pragmas.c` for an example.
+ llvm::erase_if(DiagOpts.Warnings, [](StringRef Warning) {
+ return llvm::StringSwitch<bool>(Warning)
+ .Cases("pch-vfs-diff", "error=pch-vfs-diff", false)
+ .StartsWith("no-error=", false)
+ .Default(true);
+ });
+}
+
+// Clang implements -D and -U by splatting text into a predefines buffer. This
+// allows constructs such as `-DFඞ=3 "-D F\u{0D9E} 4 3 2”` to be accepted and
+// define the same macro, or adding C++ style comments before the macro name.
+//
+// This function checks that the first non-space characters in the macro
+// obviously form an identifier that can be uniqued on without lexing. Failing
+// to do this could lead to changing the final definition of a macro.
+//
+// We could set up a preprocessor and actually lex the name, but that's very
+// heavyweight for a situation that will almost never happen in practice.
+static std::optional<StringRef> getSimpleMacroName(StringRef Macro) {
+ StringRef Name = Macro.split("=").first.ltrim(" \t");
+ std::size_t I = 0;
+
+ auto FinishName = [&]() -> std::optional<StringRef> {
+ StringRef SimpleName = Name.slice(0, I);
+ if (SimpleName.empty())
+ return std::nullopt;
+ return SimpleName;
+ };
+
+ for (; I != Name.size(); ++I) {
+ switch (Name[I]) {
+ case '(': // Start of macro parameter list
+ case ' ': // End of macro name
+ case '\t':
+ return FinishName();
+ case '_':
+ continue;
+ default:
+ if (llvm::isAlnum(Name[I]))
+ continue;
+ return std::nullopt;
+ }
+ }
+ return FinishName();
+}
+
+static void canonicalizeDefines(PreprocessorOptions &PPOpts) {
+ using MacroOpt = std::pair<StringRef, std::size_t>;
+ std::vector<MacroOpt> SimpleNames;
+ SimpleNames.reserve(PPOpts.Macros.size());
+ std::size_t Index = 0;
+ for (const auto &M : PPOpts.Macros) {
+ auto SName = getSimpleMacroName(M.first);
+ // Skip optimizing if we can't guarantee we can preserve relative order.
+ if (!SName)
+ return;
+ SimpleNames.emplace_back(*SName, Index);
+ ++Index;
+ }
+
+ llvm::stable_sort(SimpleNames, llvm::less_first());
+ // Keep the last instance of each macro name by going in reverse
+ auto NewEnd = std::unique(
+ SimpleNames.rbegin(), SimpleNames.rend(),
+ [](const MacroOpt &A, const MacroOpt &B) { return A.first == B.first; });
+ SimpleNames.erase(SimpleNames.begin(), NewEnd.base());
+
+ // Apply permutation.
+ decltype(PPOpts.Macros) NewMacros;
+ NewMacros.reserve(SimpleNames.size());
+ for (std::size_t I = 0, E = SimpleNames.size(); I != E; ++I) {
+ std::size_t OriginalIndex = SimpleNames[I].second;
+ // We still emit undefines here as they may be undefining a predefined macro
+ NewMacros.push_back(std::move(PPOpts.Macros[OriginalIndex]));
+ }
+ std::swap(PPOpts.Macros, NewMacros);
}
/// A clang tool that runs the preprocessor in a mode that's optimized for
@@ -146,13 +294,15 @@ public:
DisableFree(DisableFree), ModuleName(ModuleName) {}
bool runInvocation(std::shared_ptr<CompilerInvocation> Invocation,
- FileManager *FileMgr,
+ FileManager *DriverFileMgr,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *DiagConsumer) override {
// Make a deep copy of the original Clang invocation.
CompilerInvocation OriginalInvocation(*Invocation);
// Restore the value of DisableFree, which may be modified by Tooling.
OriginalInvocation.getFrontendOpts().DisableFree = DisableFree;
+ if (any(OptimizeArgs & ScanningOptimizations::Macros))
+ canonicalizeDefines(OriginalInvocation.getPreprocessorOpts());
if (Scanned) {
// Scanning runs once for the first -cc1 invocation in a chain of driver
@@ -176,6 +326,10 @@ public:
if (!ScanInstance.hasDiagnostics())
return false;
+ // Some DiagnosticConsumers require that finish() is called.
+ auto DiagConsumerFinisher =
+ llvm::make_scope_exit([DiagConsumer]() { DiagConsumer->finish(); });
+
ScanInstance.getPreprocessorOpts().AllowPCHWithDifferentModulesCachePath =
true;
@@ -183,36 +337,41 @@ public:
ScanInstance.getFrontendOpts().UseGlobalModuleIndex = false;
ScanInstance.getFrontendOpts().ModulesShareFileManager = false;
ScanInstance.getHeaderSearchOpts().ModuleFormat = "raw";
+ ScanInstance.getHeaderSearchOpts().ModulesIncludeVFSUsage =
+ any(OptimizeArgs & ScanningOptimizations::VFS);
- ScanInstance.setFileManager(FileMgr);
// Support for virtual file system overlays.
- FileMgr->setVirtualFileSystem(createVFSFromCompilerInvocation(
+ auto FS = createVFSFromCompilerInvocation(
ScanInstance.getInvocation(), ScanInstance.getDiagnostics(),
- FileMgr->getVirtualFileSystemPtr()));
+ DriverFileMgr->getVirtualFileSystemPtr());
+ // Create a new FileManager to match the invocation's FileSystemOptions.
+ auto *FileMgr = ScanInstance.createFileManager(FS);
ScanInstance.createSourceManager(*FileMgr);
// Store the list of prebuilt module files into header search options. This
// will prevent the implicit build to create duplicate modules and will
// force reuse of the existing prebuilt module files instead.
+ PrebuiltModuleVFSMapT PrebuiltModuleVFSMap;
if (!ScanInstance.getPreprocessorOpts().ImplicitPCHInclude.empty())
- visitPrebuiltModule(
- ScanInstance.getPreprocessorOpts().ImplicitPCHInclude, ScanInstance,
- ScanInstance.getHeaderSearchOpts().PrebuiltModuleFiles);
+ if (visitPrebuiltModule(
+ ScanInstance.getPreprocessorOpts().ImplicitPCHInclude,
+ ScanInstance,
+ ScanInstance.getHeaderSearchOpts().PrebuiltModuleFiles,
+ PrebuiltModuleVFSMap, ScanInstance.getDiagnostics()))
+ return false;
// Use the dependency scanning optimized file system if requested to do so.
- if (DepFS) {
- llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> LocalDepFS =
- DepFS;
+ if (DepFS)
ScanInstance.getPreprocessorOpts().DependencyDirectivesForFile =
- [LocalDepFS = std::move(LocalDepFS)](FileEntryRef File)
+ [LocalDepFS = DepFS](FileEntryRef File)
-> std::optional<ArrayRef<dependency_directives_scan::Directive>> {
if (llvm::ErrorOr<EntryRef> Entry =
LocalDepFS->getOrCreateFileSystemEntry(File.getName()))
- return Entry->getDirectiveTokens();
+ if (LocalDepFS->ensureDirectiveTokensArePopulated(*Entry))
+ return Entry->getDirectiveTokens();
return std::nullopt;
};
- }
// Create the dependency collector that will collect the produced
// dependencies.
@@ -241,8 +400,8 @@ public:
case ScanningOutputFormat::Full:
MDC = std::make_shared<ModuleDepCollector>(
std::move(Opts), ScanInstance, Consumer, Controller,
- OriginalInvocation, OptimizeArgs, EagerLoadModules,
- Format == ScanningOutputFormat::P1689);
+ OriginalInvocation, std::move(PrebuiltModuleVFSMap), OptimizeArgs,
+ EagerLoadModules, Format == ScanningOutputFormat::P1689);
ScanInstance.addDependencyCollector(MDC);
break;
}
@@ -268,11 +427,19 @@ public:
else
Action = std::make_unique<ReadPCHAndPreprocessAction>();
+ if (ScanInstance.getDiagnostics().hasErrorOccurred())
+ return false;
+
+ // Each action is responsible for calling finish.
+ DiagConsumerFinisher.release();
const bool Result = ScanInstance.ExecuteAction(*Action);
if (Result)
setLastCC1Arguments(std::move(OriginalInvocation));
+ // Propagate the statistics to the parent FileManager.
+ DriverFileMgr->AddStats(ScanInstance.getFileManager());
+
return Result;
}
@@ -457,9 +624,8 @@ bool DependencyScanningWorker::computeDependencies(
ModifiedCommandLine ? *ModifiedCommandLine : CommandLine;
auto &FinalFS = ModifiedFS ? ModifiedFS : BaseFS;
- FileSystemOptions FSOpts;
- FSOpts.WorkingDir = WorkingDirectory.str();
- auto FileMgr = llvm::makeIntrusiveRefCnt<FileManager>(FSOpts, FinalFS);
+ auto FileMgr =
+ llvm::makeIntrusiveRefCnt<FileManager>(FileSystemOptions{}, FinalFS);
std::vector<const char *> FinalCCommandLine(FinalCommandLine.size(), nullptr);
llvm::transform(FinalCommandLine, FinalCCommandLine.begin(),
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
index bfaa89785104..370d83484685 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
@@ -29,27 +29,72 @@ const std::vector<std::string> &ModuleDeps::getBuildArguments() {
return std::get<std::vector<std::string>>(BuildInfo);
}
-static void optimizeHeaderSearchOpts(HeaderSearchOptions &Opts,
- ASTReader &Reader,
- const serialization::ModuleFile &MF) {
- // Only preserve search paths that were used during the dependency scan.
- std::vector<HeaderSearchOptions::Entry> Entries = Opts.UserEntries;
- Opts.UserEntries.clear();
-
- llvm::BitVector SearchPathUsage(Entries.size());
- llvm::DenseSet<const serialization::ModuleFile *> Visited;
- std::function<void(const serialization::ModuleFile *)> VisitMF =
- [&](const serialization::ModuleFile *MF) {
- SearchPathUsage |= MF->SearchPathUsage;
- Visited.insert(MF);
- for (const serialization::ModuleFile *Import : MF->Imports)
- if (!Visited.contains(Import))
- VisitMF(Import);
- };
- VisitMF(&MF);
-
- for (auto Idx : SearchPathUsage.set_bits())
- Opts.UserEntries.push_back(Entries[Idx]);
+static void
+optimizeHeaderSearchOpts(HeaderSearchOptions &Opts, ASTReader &Reader,
+ const serialization::ModuleFile &MF,
+ const PrebuiltModuleVFSMapT &PrebuiltModuleVFSMap,
+ ScanningOptimizations OptimizeArgs) {
+ if (any(OptimizeArgs & ScanningOptimizations::HeaderSearch)) {
+ // Only preserve search paths that were used during the dependency scan.
+ std::vector<HeaderSearchOptions::Entry> Entries;
+ std::swap(Opts.UserEntries, Entries);
+
+ llvm::BitVector SearchPathUsage(Entries.size());
+ llvm::DenseSet<const serialization::ModuleFile *> Visited;
+ std::function<void(const serialization::ModuleFile *)> VisitMF =
+ [&](const serialization::ModuleFile *MF) {
+ SearchPathUsage |= MF->SearchPathUsage;
+ Visited.insert(MF);
+ for (const serialization::ModuleFile *Import : MF->Imports)
+ if (!Visited.contains(Import))
+ VisitMF(Import);
+ };
+ VisitMF(&MF);
+
+ if (SearchPathUsage.size() != Entries.size())
+ llvm::report_fatal_error(
+ "Inconsistent search path options between modules detected");
+
+ for (auto Idx : SearchPathUsage.set_bits())
+ Opts.UserEntries.push_back(std::move(Entries[Idx]));
+ }
+ if (any(OptimizeArgs & ScanningOptimizations::VFS)) {
+ std::vector<std::string> VFSOverlayFiles;
+ std::swap(Opts.VFSOverlayFiles, VFSOverlayFiles);
+
+ llvm::BitVector VFSUsage(VFSOverlayFiles.size());
+ llvm::DenseSet<const serialization::ModuleFile *> Visited;
+ std::function<void(const serialization::ModuleFile *)> VisitMF =
+ [&](const serialization::ModuleFile *MF) {
+ Visited.insert(MF);
+ if (MF->Kind == serialization::MK_ImplicitModule) {
+ VFSUsage |= MF->VFSUsage;
+ // We only need to recurse into implicit modules. Other module types
+ // will have the correct set of VFSs for anything they depend on.
+ for (const serialization::ModuleFile *Import : MF->Imports)
+ if (!Visited.contains(Import))
+ VisitMF(Import);
+ } else {
+ // This is not an implicitly built module, so it may have different
+ // VFS options. Fall back to a string comparison instead.
+ auto VFSMap = PrebuiltModuleVFSMap.find(MF->FileName);
+ if (VFSMap == PrebuiltModuleVFSMap.end())
+ return;
+ for (std::size_t I = 0, E = VFSOverlayFiles.size(); I != E; ++I) {
+ if (VFSMap->second.contains(VFSOverlayFiles[I]))
+ VFSUsage[I] = true;
+ }
+ }
+ };
+ VisitMF(&MF);
+
+ if (VFSUsage.size() != VFSOverlayFiles.size())
+ llvm::report_fatal_error(
+ "Inconsistent -ivfsoverlay options between modules detected");
+
+ for (auto Idx : VFSUsage.set_bits())
+ Opts.VFSOverlayFiles.push_back(std::move(VFSOverlayFiles[Idx]));
+ }
}
static void optimizeDiagnosticOpts(DiagnosticOptions &Opts,
@@ -109,11 +154,36 @@ void ModuleDepCollector::addOutputPaths(CowCompilerInvocation &CI,
}
}
+void dependencies::resetBenignCodeGenOptions(frontend::ActionKind ProgramAction,
+ const LangOptions &LangOpts,
+ CodeGenOptions &CGOpts) {
+ // TODO: Figure out better way to set options to their default value.
+ if (ProgramAction == frontend::GenerateModule) {
+ CGOpts.MainFileName.clear();
+ CGOpts.DwarfDebugFlags.clear();
+ }
+ if (ProgramAction == frontend::GeneratePCH ||
+ (ProgramAction == frontend::GenerateModule && !LangOpts.ModulesCodegen)) {
+ CGOpts.DebugCompilationDir.clear();
+ CGOpts.CoverageCompilationDir.clear();
+ CGOpts.CoverageDataFile.clear();
+ CGOpts.CoverageNotesFile.clear();
+ CGOpts.ProfileInstrumentUsePath.clear();
+ CGOpts.SampleProfileFile.clear();
+ CGOpts.ProfileRemappingFile.clear();
+ }
+}
+
static CowCompilerInvocation
makeCommonInvocationForModuleBuild(CompilerInvocation CI) {
CI.resetNonModularOptions();
CI.clearImplicitModuleBuildOptions();
+ // The scanner takes care to avoid passing non-affecting module maps to the
+ // explicit compiles. No need to do extra work just to find out there are no
+ // module map files to prune.
+ CI.getHeaderSearchOpts().ModulesPruneNonAffectingModuleMaps = false;
+
// Remove options incompatible with explicit module build or are likely to
// differ between identical modules discovered from different translation
// units.
@@ -122,15 +192,8 @@ makeCommonInvocationForModuleBuild(CompilerInvocation CI) {
// LLVM options are not going to affect the AST
CI.getFrontendOpts().LLVMArgs.clear();
- // TODO: Figure out better way to set options to their default value.
- CI.getCodeGenOpts().MainFileName.clear();
- CI.getCodeGenOpts().DwarfDebugFlags.clear();
- if (!CI.getLangOpts().ModulesCodegen) {
- CI.getCodeGenOpts().DebugCompilationDir.clear();
- CI.getCodeGenOpts().CoverageCompilationDir.clear();
- CI.getCodeGenOpts().CoverageDataFile.clear();
- CI.getCodeGenOpts().CoverageNotesFile.clear();
- }
+ resetBenignCodeGenOptions(frontend::GenerateModule, CI.getLangOpts(),
+ CI.getCodeGenOpts());
// Map output paths that affect behaviour to "-" so their existence is in the
// context hash. The final path will be computed in addOutputPaths.
@@ -181,6 +244,10 @@ ModuleDepCollector::getInvocationAdjustedForModuleBuildWithoutOutputs(
ScanInstance.getFileManager().getFile(Deps.ClangModuleMapFile);
assert(CurrentModuleMapEntry && "module map file entry not found");
+ // Remove directly passed modulemap files. They will get added back if they
+ // were actually used.
+ CI.getMutFrontendOpts().ModuleMapFiles.clear();
+
auto DepModuleMapFiles = collectModuleMapFiles(Deps.ClangModuleDeps);
for (StringRef ModuleMapFile : Deps.ModuleMapFileDeps) {
// TODO: Track these as `FileEntryRef` to simplify the equality check below.
@@ -290,6 +357,8 @@ static bool needsModules(FrontendInputFile FIF) {
void ModuleDepCollector::applyDiscoveredDependencies(CompilerInvocation &CI) {
CI.clearImplicitModuleBuildOptions();
+ resetBenignCodeGenOptions(CI.getFrontendOpts().ProgramAction,
+ CI.getLangOpts(), CI.getCodeGenOpts());
if (llvm::any_of(CI.getFrontendOpts().Inputs, needsModules)) {
Preprocessor &PP = ScanInstance.getPreprocessor();
@@ -396,14 +465,14 @@ void ModuleDepCollectorPP::LexedFileChanged(FileID FID,
void ModuleDepCollectorPP::InclusionDirective(
SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
bool IsAngled, CharSourceRange FilenameRange, OptionalFileEntryRef File,
- StringRef SearchPath, StringRef RelativePath, const Module *Imported,
- SrcMgr::CharacteristicKind FileType) {
- if (!File && !Imported) {
+ StringRef SearchPath, StringRef RelativePath, const Module *SuggestedModule,
+ bool ModuleImported, SrcMgr::CharacteristicKind FileType) {
+ if (!File && !ModuleImported) {
// This is a non-modular include that HeaderSearch failed to find. Add it
// here as `FileChanged` will never see it.
MDC.addFileDep(FileName);
}
- handleImport(Imported);
+ handleImport(SuggestedModule);
}
void ModuleDepCollectorPP::moduleImport(SourceLocation ImportLoc,
@@ -509,6 +578,10 @@ ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
MD.ID.ModuleName = M->getFullModuleName();
MD.IsSystem = M->IsSystem;
+ // For modules which use export_as link name, the linked product that of the
+ // corresponding export_as-named module.
+ if (!M->UseExportAsModuleLinkName)
+ MD.LinkLibraries = M->LinkLibraries;
ModuleMap &ModMapInfo =
MDC.ScanInstance.getPreprocessor().getHeaderSearchInfo().getModuleMap();
@@ -558,9 +631,12 @@ ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
CowCompilerInvocation CI =
MDC.getInvocationAdjustedForModuleBuildWithoutOutputs(
MD, [&](CowCompilerInvocation &BuildInvocation) {
- if (any(MDC.OptimizeArgs & ScanningOptimizations::HeaderSearch))
+ if (any(MDC.OptimizeArgs & (ScanningOptimizations::HeaderSearch |
+ ScanningOptimizations::VFS)))
optimizeHeaderSearchOpts(BuildInvocation.getMutHeaderSearchOpts(),
- *MDC.ScanInstance.getASTReader(), *MF);
+ *MDC.ScanInstance.getASTReader(), *MF,
+ MDC.PrebuiltModuleVFSMap,
+ MDC.OptimizeArgs);
if (any(MDC.OptimizeArgs & ScanningOptimizations::SystemWarnings))
optimizeDiagnosticOpts(
BuildInvocation.getMutDiagnosticOpts(),
@@ -661,9 +737,11 @@ ModuleDepCollector::ModuleDepCollector(
std::unique_ptr<DependencyOutputOptions> Opts,
CompilerInstance &ScanInstance, DependencyConsumer &C,
DependencyActionController &Controller, CompilerInvocation OriginalCI,
+ PrebuiltModuleVFSMapT PrebuiltModuleVFSMap,
ScanningOptimizations OptimizeArgs, bool EagerLoadModules,
bool IsStdModuleP1689Format)
: ScanInstance(ScanInstance), Consumer(C), Controller(Controller),
+ PrebuiltModuleVFSMap(std::move(PrebuiltModuleVFSMap)),
Opts(std::move(Opts)),
CommonInvocation(
makeCommonInvocationForModuleBuild(std::move(OriginalCI))),
diff --git a/contrib/llvm-project/clang/lib/Tooling/DumpTool/APIData.h b/contrib/llvm-project/clang/lib/Tooling/DumpTool/APIData.h
deleted file mode 100644
index 03e247a8bd95..000000000000
--- a/contrib/llvm-project/clang/lib/Tooling/DumpTool/APIData.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===- APIData.h ---------------------------------------------*- C++ -*----===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_TOOLING_DUMPTOOL_APIDATA_H
-#define LLVM_CLANG_LIB_TOOLING_DUMPTOOL_APIDATA_H
-
-#include <string>
-#include <vector>
-
-namespace clang {
-namespace tooling {
-
-struct ClassData {
- std::vector<std::string> ASTClassLocations;
- std::vector<std::string> ASTClassRanges;
- std::vector<std::string> TemplateParms;
- std::vector<std::string> TypeSourceInfos;
- std::vector<std::string> TypeLocs;
- std::vector<std::string> NestedNameLocs;
- std::vector<std::string> DeclNameInfos;
-};
-
-} // namespace tooling
-} // namespace clang
-
-#endif
diff --git a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp b/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp
deleted file mode 100644
index 42691d556d98..000000000000
--- a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp
+++ /dev/null
@@ -1,271 +0,0 @@
-//===- ASTSrcLocProcessor.cpp --------------------------------*- C++ -*----===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "ASTSrcLocProcessor.h"
-
-#include "clang/Frontend/CompilerInstance.h"
-#include "llvm/Support/JSON.h"
-#include "llvm/Support/MemoryBuffer.h"
-
-using namespace clang::tooling;
-using namespace llvm;
-using namespace clang::ast_matchers;
-
-ASTSrcLocProcessor::ASTSrcLocProcessor(StringRef JsonPath)
- : JsonPath(JsonPath) {
-
- MatchFinder::MatchFinderOptions FinderOptions;
-
- Finder = std::make_unique<MatchFinder>(std::move(FinderOptions));
- Finder->addMatcher(
- cxxRecordDecl(
- isDefinition(),
- isSameOrDerivedFrom(
- namedDecl(
- hasAnyName(
- "clang::Stmt", "clang::Decl", "clang::CXXCtorInitializer",
- "clang::NestedNameSpecifierLoc",
- "clang::TemplateArgumentLoc", "clang::CXXBaseSpecifier",
- "clang::DeclarationNameInfo", "clang::TypeLoc"))
- .bind("nodeClade")),
- optionally(isDerivedFrom(cxxRecordDecl().bind("derivedFrom"))))
- .bind("className"),
- this);
- Finder->addMatcher(
- cxxRecordDecl(isDefinition(), hasAnyName("clang::PointerLikeTypeLoc",
- "clang::TypeofLikeTypeLoc"))
- .bind("templateName"),
- this);
-}
-
-std::unique_ptr<clang::ASTConsumer>
-ASTSrcLocProcessor::createASTConsumer(clang::CompilerInstance &Compiler,
- StringRef File) {
- return Finder->newASTConsumer();
-}
-
-llvm::json::Object toJSON(llvm::StringMap<std::vector<StringRef>> const &Obj) {
- using llvm::json::toJSON;
-
- llvm::json::Object JsonObj;
- for (const auto &Item : Obj) {
- JsonObj[Item.first()] = Item.second;
- }
- return JsonObj;
-}
-
-llvm::json::Object toJSON(llvm::StringMap<std::string> const &Obj) {
- using llvm::json::toJSON;
-
- llvm::json::Object JsonObj;
- for (const auto &Item : Obj) {
- JsonObj[Item.first()] = Item.second;
- }
- return JsonObj;
-}
-
-llvm::json::Object toJSON(ClassData const &Obj) {
- llvm::json::Object JsonObj;
-
- if (!Obj.ASTClassLocations.empty())
- JsonObj["sourceLocations"] = Obj.ASTClassLocations;
- if (!Obj.ASTClassRanges.empty())
- JsonObj["sourceRanges"] = Obj.ASTClassRanges;
- if (!Obj.TemplateParms.empty())
- JsonObj["templateParms"] = Obj.TemplateParms;
- if (!Obj.TypeSourceInfos.empty())
- JsonObj["typeSourceInfos"] = Obj.TypeSourceInfos;
- if (!Obj.TypeLocs.empty())
- JsonObj["typeLocs"] = Obj.TypeLocs;
- if (!Obj.NestedNameLocs.empty())
- JsonObj["nestedNameLocs"] = Obj.NestedNameLocs;
- if (!Obj.DeclNameInfos.empty())
- JsonObj["declNameInfos"] = Obj.DeclNameInfos;
- return JsonObj;
-}
-
-llvm::json::Object toJSON(llvm::StringMap<ClassData> const &Obj) {
- using llvm::json::toJSON;
-
- llvm::json::Object JsonObj;
- for (const auto &Item : Obj)
- JsonObj[Item.first()] = ::toJSON(Item.second);
- return JsonObj;
-}
-
-void WriteJSON(StringRef JsonPath, llvm::json::Object &&ClassInheritance,
- llvm::json::Object &&ClassesInClade,
- llvm::json::Object &&ClassEntries) {
- llvm::json::Object JsonObj;
-
- using llvm::json::toJSON;
-
- JsonObj["classInheritance"] = std::move(ClassInheritance);
- JsonObj["classesInClade"] = std::move(ClassesInClade);
- JsonObj["classEntries"] = std::move(ClassEntries);
-
- llvm::json::Value JsonVal(std::move(JsonObj));
-
- bool WriteChange = false;
- std::string OutString;
- if (auto ExistingOrErr = MemoryBuffer::getFile(JsonPath, /*IsText=*/true)) {
- raw_string_ostream Out(OutString);
- Out << formatv("{0:2}", JsonVal);
- if (ExistingOrErr.get()->getBuffer() == Out.str())
- return;
- WriteChange = true;
- }
-
- std::error_code EC;
- llvm::raw_fd_ostream JsonOut(JsonPath, EC, llvm::sys::fs::OF_Text);
- if (EC)
- return;
-
- if (WriteChange)
- JsonOut << OutString;
- else
- JsonOut << formatv("{0:2}", JsonVal);
-}
-
-void ASTSrcLocProcessor::generate() {
- WriteJSON(JsonPath, ::toJSON(ClassInheritance), ::toJSON(ClassesInClade),
- ::toJSON(ClassEntries));
-}
-
-void ASTSrcLocProcessor::generateEmpty() { WriteJSON(JsonPath, {}, {}, {}); }
-
-std::vector<std::string>
-CaptureMethods(std::string TypeString, const clang::CXXRecordDecl *ASTClass,
- const MatchFinder::MatchResult &Result) {
-
- auto publicAccessor = [](auto... InnerMatcher) {
- return cxxMethodDecl(isPublic(), parameterCountIs(0), isConst(),
- InnerMatcher...);
- };
-
- auto BoundNodesVec = match(
- findAll(
- publicAccessor(
- ofClass(cxxRecordDecl(
- equalsNode(ASTClass),
- optionally(isDerivedFrom(
- cxxRecordDecl(hasAnyName("clang::Stmt", "clang::Decl"))
- .bind("stmtOrDeclBase"))),
- optionally(isDerivedFrom(
- cxxRecordDecl(hasName("clang::Expr")).bind("exprBase"))),
- optionally(
- isDerivedFrom(cxxRecordDecl(hasName("clang::TypeLoc"))
- .bind("typeLocBase"))))),
- returns(hasCanonicalType(asString(TypeString))))
- .bind("classMethod")),
- *ASTClass, *Result.Context);
-
- std::vector<std::string> Methods;
- for (const auto &BN : BoundNodesVec) {
- if (const auto *Node = BN.getNodeAs<clang::NamedDecl>("classMethod")) {
- const auto *StmtOrDeclBase =
- BN.getNodeAs<clang::CXXRecordDecl>("stmtOrDeclBase");
- const auto *TypeLocBase =
- BN.getNodeAs<clang::CXXRecordDecl>("typeLocBase");
- const auto *ExprBase = BN.getNodeAs<clang::CXXRecordDecl>("exprBase");
- // The clang AST has several methods on base classes which are overriden
- // pseudo-virtually by derived classes.
- // We record only the pseudo-virtual methods on the base classes to
- // avoid duplication.
- if (StmtOrDeclBase &&
- (Node->getName() == "getBeginLoc" || Node->getName() == "getEndLoc" ||
- Node->getName() == "getSourceRange"))
- continue;
- if (ExprBase && Node->getName() == "getExprLoc")
- continue;
- if (TypeLocBase && Node->getName() == "getLocalSourceRange")
- continue;
- if ((ASTClass->getName() == "PointerLikeTypeLoc" ||
- ASTClass->getName() == "TypeofLikeTypeLoc") &&
- Node->getName() == "getLocalSourceRange")
- continue;
- Methods.push_back(Node->getName().str());
- }
- }
- return Methods;
-}
-
-void ASTSrcLocProcessor::run(const MatchFinder::MatchResult &Result) {
-
- const auto *ASTClass =
- Result.Nodes.getNodeAs<clang::CXXRecordDecl>("className");
-
- StringRef CladeName;
- if (ASTClass) {
- if (const auto *NodeClade =
- Result.Nodes.getNodeAs<clang::CXXRecordDecl>("nodeClade"))
- CladeName = NodeClade->getName();
- } else {
- ASTClass = Result.Nodes.getNodeAs<clang::CXXRecordDecl>("templateName");
- CladeName = "TypeLoc";
- }
-
- StringRef ClassName = ASTClass->getName();
-
- ClassData CD;
-
- CD.ASTClassLocations =
- CaptureMethods("class clang::SourceLocation", ASTClass, Result);
- CD.ASTClassRanges =
- CaptureMethods("class clang::SourceRange", ASTClass, Result);
- CD.TypeSourceInfos =
- CaptureMethods("class clang::TypeSourceInfo *", ASTClass, Result);
- CD.TypeLocs = CaptureMethods("class clang::TypeLoc", ASTClass, Result);
- CD.NestedNameLocs =
- CaptureMethods("class clang::NestedNameSpecifierLoc", ASTClass, Result);
- CD.DeclNameInfos =
- CaptureMethods("struct clang::DeclarationNameInfo", ASTClass, Result);
- auto DI = CaptureMethods("const struct clang::DeclarationNameInfo &",
- ASTClass, Result);
- CD.DeclNameInfos.insert(CD.DeclNameInfos.end(), DI.begin(), DI.end());
-
- if (const auto *DerivedFrom =
- Result.Nodes.getNodeAs<clang::CXXRecordDecl>("derivedFrom")) {
-
- if (const auto *Templ =
- llvm::dyn_cast<clang::ClassTemplateSpecializationDecl>(
- DerivedFrom)) {
-
- const auto &TArgs = Templ->getTemplateArgs();
-
- SmallString<256> TArgsString;
- llvm::raw_svector_ostream OS(TArgsString);
- OS << DerivedFrom->getName() << '<';
-
- clang::PrintingPolicy PPol(Result.Context->getLangOpts());
- PPol.TerseOutput = true;
-
- for (unsigned I = 0; I < TArgs.size(); ++I) {
- if (I > 0)
- OS << ", ";
- TArgs.get(I).getAsType().print(OS, PPol);
- }
- OS << '>';
-
- ClassInheritance[ClassName] = TArgsString.str().str();
- } else {
- ClassInheritance[ClassName] = DerivedFrom->getName().str();
- }
- }
-
- if (const auto *Templ = ASTClass->getDescribedClassTemplate()) {
- if (auto *TParams = Templ->getTemplateParameters()) {
- for (const auto &TParam : *TParams) {
- CD.TemplateParms.push_back(TParam->getName().str());
- }
- }
- }
-
- ClassEntries[ClassName] = CD;
- ClassesInClade[CladeName].push_back(ClassName);
-}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h b/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h
deleted file mode 100644
index 5f2b48173f28..000000000000
--- a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h
+++ /dev/null
@@ -1,53 +0,0 @@
-//===- ASTSrcLocProcessor.h ---------------------------------*- C++ -*-----===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_TOOLING_DUMPTOOL_ASTSRCLOCPROCESSOR_H
-#define LLVM_CLANG_TOOLING_DUMPTOOL_ASTSRCLOCPROCESSOR_H
-
-#include "APIData.h"
-#include "clang/ASTMatchers/ASTMatchFinder.h"
-#include "llvm/ADT/StringRef.h"
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace clang {
-
-class CompilerInstance;
-
-namespace tooling {
-
-class ASTSrcLocProcessor : public ast_matchers::MatchFinder::MatchCallback {
-public:
- explicit ASTSrcLocProcessor(StringRef JsonPath);
-
- std::unique_ptr<ASTConsumer> createASTConsumer(CompilerInstance &Compiler,
- StringRef File);
-
- void generate();
- void generateEmpty();
-
-private:
- void run(const ast_matchers::MatchFinder::MatchResult &Result) override;
-
- std::optional<TraversalKind> getCheckTraversalKind() const override {
- return TK_IgnoreUnlessSpelledInSource;
- }
-
- llvm::StringMap<std::string> ClassInheritance;
- llvm::StringMap<std::vector<StringRef>> ClassesInClade;
- llvm::StringMap<ClassData> ClassEntries;
-
- std::string JsonPath;
- std::unique_ptr<clang::ast_matchers::MatchFinder> Finder;
-};
-
-} // namespace tooling
-} // namespace clang
-
-#endif
diff --git a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp b/contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp
deleted file mode 100644
index 1529bfa75d6d..000000000000
--- a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp
+++ /dev/null
@@ -1,155 +0,0 @@
-//===- ClangSrcLocDump.cpp ------------------------------------*- C++ -*---===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Driver/Compilation.h"
-#include "clang/Driver/Driver.h"
-#include "clang/Driver/Job.h"
-#include "clang/Driver/Options.h"
-#include "clang/Driver/Tool.h"
-#include "clang/Frontend/CompilerInstance.h"
-#include "clang/Frontend/TextDiagnosticPrinter.h"
-#include "clang/Lex/PreprocessorOptions.h"
-#include "clang/Tooling/Tooling.h"
-#include "llvm/Option/ArgList.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/JSON.h"
-#include "llvm/TargetParser/Host.h"
-
-#include "ASTSrcLocProcessor.h"
-
-using namespace clang::tooling;
-using namespace clang;
-using namespace llvm;
-
-static cl::list<std::string> IncludeDirectories(
- "I", cl::desc("Include directories to use while compiling"),
- cl::value_desc("directory"), cl::Required, cl::OneOrMore, cl::Prefix);
-
-static cl::opt<bool>
- SkipProcessing("skip-processing",
- cl::desc("Avoid processing the AST header file"),
- cl::Required, cl::value_desc("bool"));
-
-static cl::opt<std::string> JsonOutputPath("json-output-path",
- cl::desc("json output path"),
- cl::Required,
- cl::value_desc("path"));
-
-class ASTSrcLocGenerationAction : public clang::ASTFrontendAction {
-public:
- ASTSrcLocGenerationAction() : Processor(JsonOutputPath) {}
-
- void ExecuteAction() override {
- clang::ASTFrontendAction::ExecuteAction();
- if (getCompilerInstance().getDiagnostics().getNumErrors() > 0)
- Processor.generateEmpty();
- else
- Processor.generate();
- }
-
- std::unique_ptr<clang::ASTConsumer>
- CreateASTConsumer(clang::CompilerInstance &Compiler,
- llvm::StringRef File) override {
- return Processor.createASTConsumer(Compiler, File);
- }
-
-private:
- ASTSrcLocProcessor Processor;
-};
-
-static const char Filename[] = "ASTTU.cpp";
-
-int main(int argc, const char **argv) {
-
- cl::ParseCommandLineOptions(argc, argv);
-
- if (SkipProcessing) {
- std::error_code EC;
- llvm::raw_fd_ostream JsonOut(JsonOutputPath, EC, llvm::sys::fs::OF_Text);
- if (EC)
- return 1;
- JsonOut << formatv("{0:2}", llvm::json::Value(llvm::json::Object()));
- return 0;
- }
-
- std::vector<std::string> Args;
- Args.push_back("-cc1");
-
- llvm::transform(IncludeDirectories, std::back_inserter(Args),
- [](const std::string &IncDir) { return "-I" + IncDir; });
-
- Args.push_back("-fsyntax-only");
- Args.push_back(Filename);
-
- std::vector<const char *> Argv(Args.size(), nullptr);
- llvm::transform(Args, Argv.begin(),
- [](const std::string &Arg) { return Arg.c_str(); });
-
- IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts =
- CreateAndPopulateDiagOpts(Argv);
-
- // Don't output diagnostics, because common scenarios such as
- // cross-compiling fail with diagnostics. This is not fatal, but
- // just causes attempts to use the introspection API to return no data.
- TextDiagnosticPrinter DiagnosticPrinter(llvm::nulls(), &*DiagOpts);
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), &*DiagOpts,
- &DiagnosticPrinter, false);
-
- auto *OFS = new llvm::vfs::OverlayFileSystem(vfs::getRealFileSystem());
-
- auto *MemFS = new llvm::vfs::InMemoryFileSystem();
- OFS->pushOverlay(MemFS);
- MemFS->addFile(Filename, 0,
- MemoryBuffer::getMemBuffer("#include \"clang/AST/AST.h\"\n"));
-
- auto Files = llvm::makeIntrusiveRefCnt<FileManager>(FileSystemOptions(), OFS);
-
- auto Driver = std::make_unique<clang::driver::Driver>(
- "clang", llvm::sys::getDefaultTargetTriple(), Diagnostics,
- "ast-api-dump-tool", OFS);
-
- std::unique_ptr<clang::driver::Compilation> Comp(
- Driver->BuildCompilation(llvm::ArrayRef(Argv)));
- if (!Comp)
- return 1;
-
- const auto &Jobs = Comp->getJobs();
- if (Jobs.size() != 1 || !isa<clang::driver::Command>(*Jobs.begin())) {
- SmallString<256> error_msg;
- llvm::raw_svector_ostream error_stream(error_msg);
- Jobs.Print(error_stream, "; ", true);
- return 1;
- }
-
- const auto &Cmd = cast<clang::driver::Command>(*Jobs.begin());
- const llvm::opt::ArgStringList &CC1Args = Cmd.getArguments();
-
- auto Invocation = std::make_unique<CompilerInvocation>();
- CompilerInvocation::CreateFromArgs(*Invocation, CC1Args, Diagnostics);
-
- CompilerInstance Compiler(std::make_shared<clang::PCHContainerOperations>());
- Compiler.setInvocation(std::move(Invocation));
-
- Compiler.createDiagnostics(&DiagnosticPrinter, false);
- if (!Compiler.hasDiagnostics())
- return 1;
-
- // Suppress "2 errors generated" or similar messages
- Compiler.getDiagnosticOpts().ShowCarets = false;
- Compiler.createSourceManager(*Files);
- Compiler.setFileManager(Files.get());
-
- ASTSrcLocGenerationAction ScopedToolAction;
- Compiler.ExecuteAction(ScopedToolAction);
-
- Files->clearStatCache();
-
- return 0;
-}
diff --git a/contrib/llvm-project/clang/lib/Tooling/EmptyNodeIntrospection.inc.in b/contrib/llvm-project/clang/lib/Tooling/EmptyNodeIntrospection.inc.in
deleted file mode 100644
index 2071c34cbd04..000000000000
--- a/contrib/llvm-project/clang/lib/Tooling/EmptyNodeIntrospection.inc.in
+++ /dev/null
@@ -1,48 +0,0 @@
-//===- EmptyNodeIntrospection.inc.in --------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-namespace clang {
-namespace tooling {
-bool NodeIntrospection::hasIntrospectionSupport() { return false; }
-
-NodeLocationAccessors NodeIntrospection::GetLocations(clang::Stmt const *) {
- return {};
-}
-NodeLocationAccessors NodeIntrospection::GetLocations(clang::Decl const *) {
- return {};
-}
-NodeLocationAccessors NodeIntrospection::GetLocations(
- clang::CXXCtorInitializer const *) {
- return {};
-}
-NodeLocationAccessors NodeIntrospection::GetLocations(
- clang::NestedNameSpecifierLoc const&) {
- return {};
-}
-NodeLocationAccessors NodeIntrospection::GetLocations(
- clang::TemplateArgumentLoc const&) {
- return {};
-}
-NodeLocationAccessors NodeIntrospection::GetLocations(
- clang::CXXBaseSpecifier const*) {
- return {};
-}
-NodeLocationAccessors NodeIntrospection::GetLocations(
- clang::TypeLoc const&) {
- return {};
-}
-NodeLocationAccessors NodeIntrospection::GetLocations(
- clang::DeclarationNameInfo const&) {
- return {};
-}
-NodeLocationAccessors
-NodeIntrospection::GetLocations(clang::DynTypedNode const &) {
- return {};
-}
-} // namespace tooling
-} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
index d275222ac6b5..4313da66efc0 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -234,8 +234,18 @@ int IncludeCategoryManager::getSortIncludePriority(StringRef IncludeName,
return Ret;
}
bool IncludeCategoryManager::isMainHeader(StringRef IncludeName) const {
- if (!IncludeName.starts_with("\""))
- return false;
+ switch (Style.MainIncludeChar) {
+ case IncludeStyle::MICD_Quote:
+ if (!IncludeName.starts_with("\""))
+ return false;
+ break;
+ case IncludeStyle::MICD_AngleBracket:
+ if (!IncludeName.starts_with("<"))
+ return false;
+ break;
+ case IncludeStyle::MICD_Any:
+ break;
+ }
IncludeName =
IncludeName.drop_front(1).drop_back(1); // remove the surrounding "" or <>
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/IncludeStyle.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/IncludeStyle.cpp
index da5bb00d1013..05dfb50589de 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Inclusions/IncludeStyle.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/IncludeStyle.cpp
@@ -28,5 +28,12 @@ void ScalarEnumerationTraits<IncludeStyle::IncludeBlocksStyle>::enumeration(
IO.enumCase(Value, "Regroup", IncludeStyle::IBS_Regroup);
}
+void ScalarEnumerationTraits<IncludeStyle::MainIncludeCharDiscriminator>::
+ enumeration(IO &IO, IncludeStyle::MainIncludeCharDiscriminator &Value) {
+ IO.enumCase(Value, "Quote", IncludeStyle::MICD_Quote);
+ IO.enumCase(Value, "AngleBracket", IncludeStyle::MICD_AngleBracket);
+ IO.enumCase(Value, "Any", IncludeStyle::MICD_Any);
+}
+
} // namespace yaml
} // namespace llvm
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/CSpecialSymbolMap.inc b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/CSpecialSymbolMap.inc
new file mode 100644
index 000000000000..a515f69ea6a8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/CSpecialSymbolMap.inc
@@ -0,0 +1,14 @@
+//===-- StdSpecialSymbolMap.inc -----------------------------------*- C -*-===//
+//
+// This is a hand-curated list for C symbols that cannot be parsed/extracted
+// via the include-mapping tool (gen_std.py).
+//
+//===----------------------------------------------------------------------===//
+
+SYMBOL(size_t, None, <stddef.h>)
+SYMBOL(size_t, None, <stdio.h>)
+SYMBOL(size_t, None, <stdlib.h>)
+SYMBOL(size_t, None, <string.h>)
+SYMBOL(size_t, None, <time.h>)
+SYMBOL(size_t, None, <uchar.h>)
+SYMBOL(size_t, None, <wchar.h>)
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp
index adf1b230ff03..0832bcf66145 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp
@@ -55,11 +55,12 @@ static const SymbolHeaderMapping *getMappingPerLang(Lang L) {
}
static int countSymbols(Lang Language) {
- ArrayRef<const char*> Symbols;
+ ArrayRef<const char *> Symbols;
#define SYMBOL(Name, NS, Header) #NS #Name,
switch (Language) {
case Lang::C: {
static constexpr const char *CSymbols[] = {
+#include "CSpecialSymbolMap.inc"
#include "CSymbolMap.inc"
};
Symbols = CSymbols;
@@ -147,6 +148,7 @@ static int initialize(Lang Language) {
switch (Language) {
case Lang::C: {
static constexpr Symbol CSymbols[] = {
+#include "CSpecialSymbolMap.inc"
#include "CSymbolMap.inc"
};
for (const Symbol &S : CSymbols)
diff --git a/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
index a77686996879..5ecba5dfece3 100644
--- a/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
@@ -260,7 +260,7 @@ static llvm::StringRef stripExecutableExtension(llvm::StringRef Name) {
return Name;
}
-// There are compiler-wrappers (ccache, distcc, gomacc) that take the "real"
+// There are compiler-wrappers (ccache, distcc) that take the "real"
// compiler as an argument, e.g. distcc gcc -O3 foo.c.
// These end up in compile_commands.json when people set CC="distcc gcc".
// Clang's driver doesn't understand this, so we need to unwrap.
@@ -269,8 +269,7 @@ static bool unwrapCommand(std::vector<std::string> &Args) {
return false;
StringRef Wrapper =
stripExecutableExtension(llvm::sys::path::filename(Args.front()));
- if (Wrapper == "distcc" || Wrapper == "gomacc" || Wrapper == "ccache" ||
- Wrapper == "sccache") {
+ if (Wrapper == "distcc" || Wrapper == "ccache" || Wrapper == "sccache") {
// Most of these wrappers support being invoked 3 ways:
// `distcc g++ file.c` This is the mode we're trying to match.
// We need to drop `distcc`.
diff --git a/contrib/llvm-project/clang/lib/Tooling/LocateToolCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/LocateToolCompilationDatabase.cpp
new file mode 100644
index 000000000000..033f69f3760c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/LocateToolCompilationDatabase.cpp
@@ -0,0 +1,71 @@
+//===- GuessTargetAndModeCompilationDatabase.cpp --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Tooling/Tooling.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include <memory>
+
+namespace clang {
+namespace tooling {
+
+namespace {
+class LocationAdderDatabase : public CompilationDatabase {
+public:
+ LocationAdderDatabase(std::unique_ptr<CompilationDatabase> Base)
+ : Base(std::move(Base)) {
+ assert(this->Base != nullptr);
+ }
+
+ std::vector<std::string> getAllFiles() const override {
+ return Base->getAllFiles();
+ }
+
+ std::vector<CompileCommand> getAllCompileCommands() const override {
+ return addLocation(Base->getAllCompileCommands());
+ }
+
+ std::vector<CompileCommand>
+ getCompileCommands(StringRef FilePath) const override {
+ return addLocation(Base->getCompileCommands(FilePath));
+ }
+
+private:
+ std::vector<CompileCommand>
+ addLocation(std::vector<CompileCommand> Cmds) const {
+ for (auto &Cmd : Cmds) {
+ if (Cmd.CommandLine.empty())
+ continue;
+ std::string &Driver = Cmd.CommandLine.front();
+ // If the driver name already is absolute, we don't need to do anything.
+ if (llvm::sys::path::is_absolute(Driver))
+ continue;
+ // If the name is a relative path, like bin/clang, we assume it's
+ // possible to resolve it and don't do anything about it either.
+ if (llvm::any_of(Driver,
+ [](char C) { return llvm::sys::path::is_separator(C); }))
+ continue;
+ auto Absolute = llvm::sys::findProgramByName(Driver);
+ // If we found it in path, update the entry in Cmd.CommandLine
+ if (Absolute && llvm::sys::path::is_absolute(*Absolute))
+ Driver = std::move(*Absolute);
+ }
+ return Cmds;
+ }
+ std::unique_ptr<CompilationDatabase> Base;
+};
+} // namespace
+
+std::unique_ptr<CompilationDatabase>
+inferToolLocation(std::unique_ptr<CompilationDatabase> Base) {
+ return std::make_unique<LocationAdderDatabase>(std::move(Base));
+}
+
+} // namespace tooling
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Tooling/NodeIntrospection.cpp b/contrib/llvm-project/clang/lib/Tooling/NodeIntrospection.cpp
deleted file mode 100644
index f01bb1cb9c3c..000000000000
--- a/contrib/llvm-project/clang/lib/Tooling/NodeIntrospection.cpp
+++ /dev/null
@@ -1,88 +0,0 @@
-//===- NodeIntrospection.h -----------------------------------*- C++ -*----===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the implementation of the NodeIntrospection.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Tooling/NodeIntrospection.h"
-
-#include "clang/AST/AST.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace clang {
-
-namespace tooling {
-
-void LocationCallFormatterCpp::print(const LocationCall &Call,
- llvm::raw_ostream &OS) {
- if (const LocationCall *On = Call.on()) {
- print(*On, OS);
- if (On->returnsPointer())
- OS << "->";
- else
- OS << '.';
- }
-
- OS << Call.name() << "()";
-}
-
-std::string LocationCallFormatterCpp::format(const LocationCall &Call) {
- std::string Result;
- llvm::raw_string_ostream OS(Result);
- print(Call, OS);
- OS.flush();
- return Result;
-}
-
-namespace internal {
-
-static bool locationCallLessThan(const LocationCall *LHS,
- const LocationCall *RHS) {
- if (!LHS && !RHS)
- return false;
- if (LHS && !RHS)
- return true;
- if (!LHS && RHS)
- return false;
- auto compareResult = LHS->name().compare(RHS->name());
- if (compareResult < 0)
- return true;
- if (compareResult > 0)
- return false;
- return locationCallLessThan(LHS->on(), RHS->on());
-}
-
-bool RangeLessThan::operator()(
- std::pair<SourceRange, SharedLocationCall> const &LHS,
- std::pair<SourceRange, SharedLocationCall> const &RHS) const {
- if (LHS.first.getBegin() < RHS.first.getBegin())
- return true;
- else if (LHS.first.getBegin() != RHS.first.getBegin())
- return false;
-
- if (LHS.first.getEnd() < RHS.first.getEnd())
- return true;
- else if (LHS.first.getEnd() != RHS.first.getEnd())
- return false;
-
- return locationCallLessThan(LHS.second.get(), RHS.second.get());
-}
-bool RangeLessThan::operator()(
- std::pair<SourceLocation, SharedLocationCall> const &LHS,
- std::pair<SourceLocation, SharedLocationCall> const &RHS) const {
- if (LHS.first == RHS.first)
- return locationCallLessThan(LHS.second.get(), RHS.second.get());
- return LHS.first < RHS.first;
-}
-} // namespace internal
-
-} // namespace tooling
-} // namespace clang
-
-#include "clang/Tooling/NodeIntrospection.inc"
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp
index 3d5ae2fed014..dfc98355c664 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp
@@ -1,4 +1,4 @@
-//===--- AtomicChange.cpp - AtomicChange implementation -----------------*- C++ -*-===//
+//===--- AtomicChange.cpp - AtomicChange implementation ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
index cd0261989495..3e50d67f4d6e 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
@@ -735,7 +735,8 @@ public:
auto *Declaration =
cast<syntax::SimpleDeclaration>(handleFreeStandingTagDecl(C));
foldExplicitTemplateInstantiation(
- Builder.getTemplateRange(C), Builder.findToken(C->getExternLoc()),
+ Builder.getTemplateRange(C),
+ Builder.findToken(C->getExternKeywordLoc()),
Builder.findToken(C->getTemplateKeywordLoc()), Declaration, C);
return true;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
index 8d32c45a4a70..0a656dff3842 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
@@ -383,12 +383,13 @@ llvm::ArrayRef<syntax::Token> TokenBuffer::spelledTokens(FileID FID) const {
return It->second.SpelledTokens;
}
-const syntax::Token *TokenBuffer::spelledTokenAt(SourceLocation Loc) const {
+const syntax::Token *
+TokenBuffer::spelledTokenContaining(SourceLocation Loc) const {
assert(Loc.isFileID());
const auto *Tok = llvm::partition_point(
spelledTokens(SourceMgr->getFileID(Loc)),
- [&](const syntax::Token &Tok) { return Tok.location() < Loc; });
- if (!Tok || Tok->location() != Loc)
+ [&](const syntax::Token &Tok) { return Tok.endLocation() <= Loc; });
+ if (!Tok || Loc < Tok->location())
return nullptr;
return Tok;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp b/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
index c5c3cdb47e92..ffacf9cf1f78 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
@@ -293,7 +293,7 @@ void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
++Token) {
StringRef TokenRef(*Token);
ShouldAddTarget = ShouldAddTarget && !TokenRef.starts_with(TargetOPT) &&
- !TokenRef.equals(TargetOPTLegacy);
+ TokenRef != TargetOPTLegacy;
ShouldAddMode = ShouldAddMode && !TokenRef.starts_with(DriverModeOPT);
}
if (ShouldAddMode) {
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp
index 7370baf01083..e84ddde74a70 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp
@@ -96,13 +96,6 @@ static SourceLocation findPreviousTokenKind(SourceLocation Start,
}
}
-static SourceLocation findOpenParen(const CallExpr &E, const SourceManager &SM,
- const LangOptions &LangOpts) {
- SourceLocation EndLoc =
- E.getNumArgs() == 0 ? E.getRParenLoc() : E.getArg(0)->getBeginLoc();
- return findPreviousTokenKind(EndLoc, SM, LangOpts, tok::TokenKind::l_paren);
-}
-
RangeSelector transformer::before(RangeSelector Selector) {
return [Selector](const MatchResult &Result) -> Expected<CharSourceRange> {
Expected<CharSourceRange> SelectedRange = Selector(Result);
@@ -287,18 +280,50 @@ RangeSelector transformer::statements(std::string ID) {
}
namespace {
-// Returns the range of the source between the call's parentheses.
-CharSourceRange getCallArgumentsRange(const MatchResult &Result,
- const CallExpr &CE) {
+
+SourceLocation getRLoc(const CallExpr &E) { return E.getRParenLoc(); }
+
+SourceLocation getRLoc(const CXXConstructExpr &E) {
+ return E.getParenOrBraceRange().getEnd();
+}
+
+tok::TokenKind getStartToken(const CallExpr &E) {
+ return tok::TokenKind::l_paren;
+}
+
+tok::TokenKind getStartToken(const CXXConstructExpr &E) {
+ return isa<CXXTemporaryObjectExpr>(E) ? tok::TokenKind::l_paren
+ : tok::TokenKind::l_brace;
+}
+
+template <typename ExprWithArgs>
+SourceLocation findArgStartDelimiter(const ExprWithArgs &E, SourceLocation RLoc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation Loc = E.getNumArgs() == 0 ? RLoc : E.getArg(0)->getBeginLoc();
+ return findPreviousTokenKind(Loc, SM, LangOpts, getStartToken(E));
+}
+// Returns the range of the source between the call's or construct expr's
+// parentheses/braces.
+template <typename ExprWithArgs>
+CharSourceRange getArgumentsRange(const MatchResult &Result,
+ const ExprWithArgs &CE) {
+ const SourceLocation RLoc = getRLoc(CE);
return CharSourceRange::getCharRange(
- findOpenParen(CE, *Result.SourceManager, Result.Context->getLangOpts())
+ findArgStartDelimiter(CE, RLoc, *Result.SourceManager,
+ Result.Context->getLangOpts())
.getLocWithOffset(1),
- CE.getRParenLoc());
+ RLoc);
}
} // namespace
RangeSelector transformer::callArgs(std::string ID) {
- return RelativeSelector<CallExpr, getCallArgumentsRange>(std::move(ID));
+ return RelativeSelector<CallExpr, getArgumentsRange<CallExpr>>(std::move(ID));
+}
+
+RangeSelector transformer::constructExprArgs(std::string ID) {
+ return RelativeSelector<CXXConstructExpr,
+ getArgumentsRange<CXXConstructExpr>>(std::move(ID));
}
namespace {
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp
index 6aae834b0db5..ab7184c2c069 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp
@@ -101,6 +101,54 @@ static bool spelledInMacroDefinition(SourceLocation Loc,
return false;
}
+// Returns the expansion char-range of `Loc` if `Loc` is a split token. For
+// example, `>>` in nested templates needs the first `>` to be split, otherwise
+// the `SourceLocation` of the token would lex as `>>` instead of `>`.
+static std::optional<CharSourceRange>
+getExpansionForSplitToken(SourceLocation Loc, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ if (Loc.isMacroID()) {
+ bool Invalid = false;
+ auto &SLoc = SM.getSLocEntry(SM.getFileID(Loc), &Invalid);
+ if (Invalid)
+ return std::nullopt;
+ if (auto &Expansion = SLoc.getExpansion();
+ !Expansion.isExpansionTokenRange()) {
+ // A char-range expansion is only used where a token-range would be
+ // incorrect, and so identifies this as a split token (and importantly,
+ // not as a macro).
+ return Expansion.getExpansionLocRange();
+ }
+ }
+ return std::nullopt;
+}
+
+// If `Range` covers a split token, returns the expansion range, otherwise
+// returns `Range`.
+static CharSourceRange getRangeForSplitTokens(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ if (Range.isTokenRange()) {
+ auto BeginToken = getExpansionForSplitToken(Range.getBegin(), SM, LangOpts);
+ auto EndToken = getExpansionForSplitToken(Range.getEnd(), SM, LangOpts);
+ if (EndToken) {
+ SourceLocation BeginLoc =
+ BeginToken ? BeginToken->getBegin() : Range.getBegin();
+ // We can't use the expansion location with a token-range, because that
+ // will incorrectly lex the end token, so use a char-range that ends at
+ // the split.
+ return CharSourceRange::getCharRange(BeginLoc, EndToken->getEnd());
+ } else if (BeginToken) {
+ // Since the end token is not split, the whole range covers the split, so
+ // the only adjustment we make is to use the expansion location of the
+ // begin token.
+ return CharSourceRange::getTokenRange(BeginToken->getBegin(),
+ Range.getEnd());
+ }
+ }
+ return Range;
+}
+
static CharSourceRange getRange(const CharSourceRange &EditRange,
const SourceManager &SM,
const LangOptions &LangOpts,
@@ -109,13 +157,14 @@ static CharSourceRange getRange(const CharSourceRange &EditRange,
if (IncludeMacroExpansion) {
Range = Lexer::makeFileCharRange(EditRange, SM, LangOpts);
} else {
- if (spelledInMacroDefinition(EditRange.getBegin(), SM) ||
- spelledInMacroDefinition(EditRange.getEnd(), SM))
+ auto AdjustedRange = getRangeForSplitTokens(EditRange, SM, LangOpts);
+ if (spelledInMacroDefinition(AdjustedRange.getBegin(), SM) ||
+ spelledInMacroDefinition(AdjustedRange.getEnd(), SM))
return {};
- auto B = SM.getSpellingLoc(EditRange.getBegin());
- auto E = SM.getSpellingLoc(EditRange.getEnd());
- if (EditRange.isTokenRange())
+ auto B = SM.getSpellingLoc(AdjustedRange.getBegin());
+ auto E = SM.getSpellingLoc(AdjustedRange.getEnd());
+ if (AdjustedRange.isTokenRange())
E = Lexer::getLocForEndOfToken(E, 0, SM, LangOpts);
Range = CharSourceRange::getCharRange(B, E);
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
index d91c9e0a20cc..bc4fa6e36057 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
@@ -51,7 +51,7 @@ static Error printNode(StringRef Id, const MatchFinder::MatchResult &Match,
if (auto Err = NodeOrErr.takeError())
return Err;
NodeOrErr->print(Os, PrintingPolicy(Match.Context->getLangOpts()));
- *Result += Os.str();
+ *Result += Output;
return Error::success();
}
@@ -371,7 +371,7 @@ public:
Stream << ", " << DefaultStencil->toString();
}
Stream << ")";
- return Stream.str();
+ return Buffer;
}
private: